2 * Copyright 2012 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include "pipebuffer/pb_slab.h"
31 #include "util/u_blitter.h"
33 #define SI_NUM_GRAPHICS_SHADERS (PIPE_SHADER_TESS_EVAL+1)
34 #define SI_NUM_SHADERS (PIPE_SHADER_COMPUTE+1)
36 #define SI_NUM_VERTEX_BUFFERS SI_MAX_ATTRIBS
37 #define SI_NUM_SAMPLERS 32 /* OpenGL textures units per shader */
38 #define SI_NUM_CONST_BUFFERS 16
39 #define SI_NUM_IMAGES 16
40 #define SI_NUM_SHADER_BUFFERS 16
44 struct si_shader_ctx_state
;
45 struct si_shader_selector
;
49 struct si_state_blend
{
50 struct si_pm4_state pm4
;
51 uint32_t cb_target_mask
;
52 /* Set 0xf or 0x0 (4 bits) per render target if the following is
53 * true. ANDed with spi_shader_col_format.
55 unsigned cb_target_enabled_4bit
;
56 unsigned blend_enable_4bit
;
57 unsigned need_src_alpha_4bit
;
58 unsigned commutative_4bit
;
59 unsigned dcc_msaa_corruption_4bit
;
60 bool alpha_to_coverage
:1;
62 bool dual_src_blend
:1;
63 bool logicop_enable
:1;
66 struct si_state_rasterizer
{
67 struct si_pm4_state pm4
;
68 /* poly offset states for 16-bit, 24-bit, and 32-bit zbuffers */
69 struct si_pm4_state
*pm4_poly_offset
;
70 unsigned pa_sc_line_stipple
;
71 unsigned pa_cl_clip_cntl
;
74 unsigned sprite_coord_enable
:8;
75 unsigned clip_plane_enable
:8;
76 unsigned half_pixel_center
:1;
78 unsigned flatshade_first
:1;
80 unsigned multisample_enable
:1;
81 unsigned force_persample_interp
:1;
82 unsigned line_stipple_enable
:1;
83 unsigned poly_stipple_enable
:1;
84 unsigned line_smooth
:1;
85 unsigned poly_smooth
:1;
86 unsigned uses_poly_offset
:1;
87 unsigned clamp_fragment_color
:1;
88 unsigned clamp_vertex_color
:1;
89 unsigned rasterizer_discard
:1;
90 unsigned scissor_enable
:1;
91 unsigned clip_halfz
:1;
92 unsigned cull_front
:1;
94 unsigned depth_clamp_any
:1;
95 unsigned provoking_vertex_first
:1;
98 struct si_dsa_stencil_ref_part
{
100 uint8_t writemask
[2];
103 struct si_dsa_order_invariance
{
104 /** Whether the final result in Z/S buffers is guaranteed to be
105 * invariant under changes to the order in which fragments arrive. */
108 /** Whether the set of fragments that pass the combined Z/S test is
109 * guaranteed to be invariant under changes to the order in which
110 * fragments arrive. */
113 /** Whether the last fragment that passes the combined Z/S test at each
114 * sample is guaranteed to be invariant under changes to the order in
115 * which fragments arrive. */
119 struct si_state_dsa
{
120 struct si_pm4_state pm4
;
121 struct si_dsa_stencil_ref_part stencil_ref
;
123 /* 0 = without stencil buffer, 1 = when both Z and S buffers are present */
124 struct si_dsa_order_invariance order_invariance
[2];
127 bool depth_enabled
:1;
128 bool depth_write_enabled
:1;
129 bool stencil_enabled
:1;
130 bool stencil_write_enabled
:1;
135 struct si_stencil_ref
{
136 struct pipe_stencil_ref state
;
137 struct si_dsa_stencil_ref_part dsa_part
;
140 struct si_vertex_elements
142 struct si_resource
*instance_divisor_factor_buffer
;
143 uint32_t rsrc_word3
[SI_MAX_ATTRIBS
];
144 uint16_t src_offset
[SI_MAX_ATTRIBS
];
145 uint8_t fix_fetch
[SI_MAX_ATTRIBS
];
146 uint8_t format_size
[SI_MAX_ATTRIBS
];
147 uint8_t vertex_buffer_index
[SI_MAX_ATTRIBS
];
149 /* Bitmask of elements that always need a fixup to be applied. */
150 uint16_t fix_fetch_always
;
152 /* Bitmask of elements whose fetch should always be opencoded. */
153 uint16_t fix_fetch_opencode
;
155 /* Bitmask of elements which need to be opencoded if the vertex buffer
157 uint16_t fix_fetch_unaligned
;
159 /* For elements in fix_fetch_unaligned: whether the effective
160 * element load size as seen by the hardware is a dword (as opposed
163 uint16_t hw_load_is_dword
;
165 /* Bitmask of vertex buffers requiring alignment check */
166 uint16_t vb_alignment_check_mask
;
169 bool uses_instance_divisors
;
171 uint16_t first_vb_use_mask
;
172 /* Vertex buffer descriptor list size aligned for optimal prefetch. */
173 uint16_t desc_list_byte_size
;
174 uint16_t instance_divisor_is_one
; /* bitmask of inputs */
175 uint16_t instance_divisor_is_fetched
; /* bitmask of inputs */
180 struct si_state_blend
*blend
;
181 struct si_state_rasterizer
*rasterizer
;
182 struct si_state_dsa
*dsa
;
183 struct si_pm4_state
*poly_offset
;
184 struct si_pm4_state
*ls
;
185 struct si_pm4_state
*hs
;
186 struct si_pm4_state
*es
;
187 struct si_pm4_state
*gs
;
188 struct si_pm4_state
*vgt_shader_config
;
189 struct si_pm4_state
*vs
;
190 struct si_pm4_state
*ps
;
192 struct si_pm4_state
*array
[0];
195 #define SI_STATE_IDX(name) \
196 (offsetof(union si_state, named.name) / sizeof(struct si_pm4_state *))
197 #define SI_STATE_BIT(name) (1 << SI_STATE_IDX(name))
198 #define SI_NUM_STATES (sizeof(union si_state) / sizeof(struct si_pm4_state *))
200 static inline unsigned si_states_that_always_roll_context(void)
202 return (SI_STATE_BIT(blend
) |
203 SI_STATE_BIT(rasterizer
) |
205 SI_STATE_BIT(poly_offset
) |
206 SI_STATE_BIT(vgt_shader_config
));
209 union si_state_atoms
{
211 /* The order matters. */
212 struct si_atom render_cond
;
213 struct si_atom streamout_begin
;
214 struct si_atom streamout_enable
; /* must be after streamout_begin */
215 struct si_atom framebuffer
;
216 struct si_atom msaa_sample_locs
;
217 struct si_atom db_render_state
;
218 struct si_atom dpbb_state
;
219 struct si_atom msaa_config
;
220 struct si_atom sample_mask
;
221 struct si_atom cb_render_state
;
222 struct si_atom blend_color
;
223 struct si_atom clip_regs
;
224 struct si_atom clip_state
;
225 struct si_atom shader_pointers
;
226 struct si_atom guardband
;
227 struct si_atom scissors
;
228 struct si_atom viewports
;
229 struct si_atom stencil_ref
;
230 struct si_atom spi_map
;
231 struct si_atom scratch_state
;
232 struct si_atom window_rectangles
;
233 struct si_atom shader_query
;
235 struct si_atom array
[0];
238 #define SI_ATOM_BIT(name) (1 << (offsetof(union si_state_atoms, s.name) / \
239 sizeof(struct si_atom)))
240 #define SI_NUM_ATOMS (sizeof(union si_state_atoms)/sizeof(struct si_atom*))
242 static inline unsigned si_atoms_that_always_roll_context(void)
244 return (SI_ATOM_BIT(streamout_begin
) |
245 SI_ATOM_BIT(streamout_enable
) |
246 SI_ATOM_BIT(framebuffer
) |
247 SI_ATOM_BIT(msaa_sample_locs
) |
248 SI_ATOM_BIT(sample_mask
) |
249 SI_ATOM_BIT(blend_color
) |
250 SI_ATOM_BIT(clip_state
) |
251 SI_ATOM_BIT(scissors
) |
252 SI_ATOM_BIT(viewports
) |
253 SI_ATOM_BIT(stencil_ref
) |
254 SI_ATOM_BIT(scratch_state
) |
255 SI_ATOM_BIT(window_rectangles
));
258 struct si_shader_data
{
259 uint32_t sh_base
[SI_NUM_SHADERS
];
262 #define SI_TRACKED_PA_CL_VS_OUT_CNTL__VS_MASK \
263 (S_02881C_USE_VTX_POINT_SIZE(1) | \
264 S_02881C_USE_VTX_EDGE_FLAG(1) | \
265 S_02881C_USE_VTX_RENDER_TARGET_INDX(1) | \
266 S_02881C_USE_VTX_VIEWPORT_INDX(1) | \
267 S_02881C_VS_OUT_MISC_VEC_ENA(1) | \
268 S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(1))
270 /* The list of registers whose emitted values are remembered by si_context. */
271 enum si_tracked_reg
{
272 SI_TRACKED_DB_RENDER_CONTROL
, /* 2 consecutive registers */
273 SI_TRACKED_DB_COUNT_CONTROL
,
275 SI_TRACKED_DB_RENDER_OVERRIDE2
,
276 SI_TRACKED_DB_SHADER_CONTROL
,
278 SI_TRACKED_CB_TARGET_MASK
,
279 SI_TRACKED_CB_DCC_CONTROL
,
281 SI_TRACKED_SX_PS_DOWNCONVERT
, /* 3 consecutive registers */
282 SI_TRACKED_SX_BLEND_OPT_EPSILON
,
283 SI_TRACKED_SX_BLEND_OPT_CONTROL
,
285 SI_TRACKED_PA_SC_LINE_CNTL
, /* 2 consecutive registers */
286 SI_TRACKED_PA_SC_AA_CONFIG
,
289 SI_TRACKED_PA_SC_MODE_CNTL_1
,
291 SI_TRACKED_PA_SU_PRIM_FILTER_CNTL
,
292 SI_TRACKED_PA_SU_SMALL_PRIM_FILTER_CNTL
,
294 SI_TRACKED_PA_CL_VS_OUT_CNTL__VS
, /* set with SI_TRACKED_PA_CL_VS_OUT_CNTL__VS_MASK*/
295 SI_TRACKED_PA_CL_VS_OUT_CNTL__CL
, /* set with ~SI_TRACKED_PA_CL_VS_OUT_CNTL__VS_MASK */
296 SI_TRACKED_PA_CL_CLIP_CNTL
,
298 SI_TRACKED_PA_SC_BINNER_CNTL_0
,
299 SI_TRACKED_DB_DFSM_CONTROL
,
301 SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ
, /* 4 consecutive registers */
302 SI_TRACKED_PA_CL_GB_VERT_DISC_ADJ
,
303 SI_TRACKED_PA_CL_GB_HORZ_CLIP_ADJ
,
304 SI_TRACKED_PA_CL_GB_HORZ_DISC_ADJ
,
306 SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET
,
307 SI_TRACKED_PA_SU_VTX_CNTL
,
309 SI_TRACKED_PA_SC_CLIPRECT_RULE
,
311 SI_TRACKED_VGT_ESGS_RING_ITEMSIZE
,
313 SI_TRACKED_VGT_GSVS_RING_OFFSET_1
, /* 3 consecutive registers */
314 SI_TRACKED_VGT_GSVS_RING_OFFSET_2
,
315 SI_TRACKED_VGT_GSVS_RING_OFFSET_3
,
317 SI_TRACKED_VGT_GSVS_RING_ITEMSIZE
,
318 SI_TRACKED_VGT_GS_MAX_VERT_OUT
,
320 SI_TRACKED_VGT_GS_VERT_ITEMSIZE
, /* 4 consecutive registers */
321 SI_TRACKED_VGT_GS_VERT_ITEMSIZE_1
,
322 SI_TRACKED_VGT_GS_VERT_ITEMSIZE_2
,
323 SI_TRACKED_VGT_GS_VERT_ITEMSIZE_3
,
325 SI_TRACKED_VGT_GS_INSTANCE_CNT
,
326 SI_TRACKED_VGT_GS_ONCHIP_CNTL
,
327 SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP
,
328 SI_TRACKED_VGT_GS_MODE
,
329 SI_TRACKED_VGT_PRIMITIVEID_EN
,
330 SI_TRACKED_VGT_REUSE_OFF
,
331 SI_TRACKED_SPI_VS_OUT_CONFIG
,
332 SI_TRACKED_PA_CL_VTE_CNTL
,
333 SI_TRACKED_PA_CL_NGG_CNTL
,
334 SI_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP
,
335 SI_TRACKED_GE_NGG_SUBGRP_CNTL
,
337 SI_TRACKED_SPI_SHADER_IDX_FORMAT
, /* 2 consecutive registers */
338 SI_TRACKED_SPI_SHADER_POS_FORMAT
,
340 SI_TRACKED_SPI_PS_INPUT_ENA
, /* 2 consecutive registers */
341 SI_TRACKED_SPI_PS_INPUT_ADDR
,
343 SI_TRACKED_SPI_BARYC_CNTL
,
344 SI_TRACKED_SPI_PS_IN_CONTROL
,
346 SI_TRACKED_SPI_SHADER_Z_FORMAT
, /* 2 consecutive registers */
347 SI_TRACKED_SPI_SHADER_COL_FORMAT
,
349 SI_TRACKED_CB_SHADER_MASK
,
350 SI_TRACKED_VGT_TF_PARAM
,
351 SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL
,
356 struct si_tracked_regs
{
358 uint32_t reg_value
[SI_NUM_TRACKED_REGS
];
359 uint32_t spi_ps_input_cntl
[32];
362 /* Private read-write buffer slots. */
369 SI_VS_STREAMOUT_BUF0
,
370 SI_VS_STREAMOUT_BUF1
,
371 SI_VS_STREAMOUT_BUF2
,
372 SI_VS_STREAMOUT_BUF3
,
374 SI_HS_CONST_DEFAULT_TESS_LEVELS
,
375 SI_VS_CONST_INSTANCE_DIVISORS
,
376 SI_VS_CONST_CLIP_PLANES
,
377 SI_PS_CONST_POLY_STIPPLE
,
378 SI_PS_CONST_SAMPLE_POSITIONS
,
380 /* Image descriptor of color buffer 0 for KHR_blend_equation_advanced. */
381 SI_PS_IMAGE_COLORBUF0
,
382 SI_PS_IMAGE_COLORBUF0_HI
,
383 SI_PS_IMAGE_COLORBUF0_FMASK
,
384 SI_PS_IMAGE_COLORBUF0_FMASK_HI
,
391 /* Indices into sctx->descriptors, laid out so that gfx and compute pipelines
395 * 1 - vertex const and shader buffers
396 * 2 - vertex samplers and images
397 * 3 - fragment const and shader buffer
399 * 11 - compute const and shader buffers
400 * 12 - compute samplers and images
403 SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS
,
404 SI_SHADER_DESCS_SAMPLERS_AND_IMAGES
,
408 #define SI_DESCS_RW_BUFFERS 0
409 #define SI_DESCS_FIRST_SHADER 1
410 #define SI_DESCS_FIRST_COMPUTE (SI_DESCS_FIRST_SHADER + \
411 PIPE_SHADER_COMPUTE * SI_NUM_SHADER_DESCS)
412 #define SI_NUM_DESCS (SI_DESCS_FIRST_SHADER + \
413 SI_NUM_SHADERS * SI_NUM_SHADER_DESCS)
415 #define SI_DESCS_SHADER_MASK(name) \
416 u_bit_consecutive(SI_DESCS_FIRST_SHADER + \
417 PIPE_SHADER_##name * SI_NUM_SHADER_DESCS, \
420 static inline unsigned
421 si_const_and_shader_buffer_descriptors_idx(unsigned shader
)
423 return SI_DESCS_FIRST_SHADER
+ shader
* SI_NUM_SHADER_DESCS
+
424 SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS
;
427 static inline unsigned
428 si_sampler_and_image_descriptors_idx(unsigned shader
)
430 return SI_DESCS_FIRST_SHADER
+ shader
* SI_NUM_SHADER_DESCS
+
431 SI_SHADER_DESCS_SAMPLERS_AND_IMAGES
;
434 /* This represents descriptors in memory, such as buffer resources,
435 * image resources, and sampler states.
437 struct si_descriptors
{
438 /* The list of descriptors in malloc'd memory. */
440 /* The list in mapped GPU memory. */
443 /* The buffer where the descriptors have been uploaded. */
444 struct si_resource
*buffer
;
445 uint64_t gpu_address
;
447 /* The maximum number of descriptors. */
448 uint32_t num_elements
;
450 /* Slots that are used by currently-bound shaders.
451 * It determines which slots are uploaded.
453 uint32_t first_active_slot
;
454 uint32_t num_active_slots
;
456 /* The SH register offset relative to USER_DATA*_0 where the pointer
457 * to the descriptor array will be stored. */
458 short shader_userdata_offset
;
459 /* The size of one descriptor. */
460 ubyte element_dw_size
;
461 /* If there is only one slot enabled, bind it directly instead of
462 * uploading descriptors. -1 if disabled. */
463 signed char slot_index_to_bind_directly
;
466 struct si_buffer_resources
{
467 struct pipe_resource
**buffers
; /* this has num_buffers elements */
468 unsigned *offsets
; /* this has num_buffers elements */
470 enum radeon_bo_priority priority
:6;
471 enum radeon_bo_priority priority_constbuf
:6;
473 /* The i-th bit is set if that element is enabled (non-NULL resource). */
474 unsigned enabled_mask
;
475 unsigned writable_mask
;
478 #define si_pm4_state_changed(sctx, member) \
479 ((sctx)->queued.named.member != (sctx)->emitted.named.member)
481 #define si_pm4_state_enabled_and_changed(sctx, member) \
482 ((sctx)->queued.named.member && si_pm4_state_changed(sctx, member))
484 #define si_pm4_bind_state(sctx, member, value) \
486 (sctx)->queued.named.member = (value); \
487 (sctx)->dirty_states |= SI_STATE_BIT(member); \
490 #define si_pm4_delete_state(sctx, member, value) \
492 if ((sctx)->queued.named.member == (value)) { \
493 (sctx)->queued.named.member = NULL; \
495 si_pm4_free_state(sctx, (struct si_pm4_state *)(value), \
496 SI_STATE_IDX(member)); \
499 /* si_descriptors.c */
500 void si_set_mutable_tex_desc_fields(struct si_screen
*sscreen
,
501 struct si_texture
*tex
,
502 const struct legacy_surf_level
*base_level_info
,
503 unsigned base_level
, unsigned first_level
,
504 unsigned block_width
, bool is_stencil
,
506 void si_update_ps_colorbuf0_slot(struct si_context
*sctx
);
507 void si_get_pipe_constant_buffer(struct si_context
*sctx
, uint shader
,
508 uint slot
, struct pipe_constant_buffer
*cbuf
);
509 void si_get_shader_buffers(struct si_context
*sctx
,
510 enum pipe_shader_type shader
,
511 uint start_slot
, uint count
,
512 struct pipe_shader_buffer
*sbuf
);
513 void si_set_ring_buffer(struct si_context
*sctx
, uint slot
,
514 struct pipe_resource
*buffer
,
515 unsigned stride
, unsigned num_records
,
516 bool add_tid
, bool swizzle
,
517 unsigned element_size
, unsigned index_stride
, uint64_t offset
);
518 void si_init_all_descriptors(struct si_context
*sctx
);
519 bool si_upload_vertex_buffer_descriptors(struct si_context
*sctx
);
520 bool si_upload_graphics_shader_descriptors(struct si_context
*sctx
);
521 bool si_upload_compute_shader_descriptors(struct si_context
*sctx
);
522 void si_release_all_descriptors(struct si_context
*sctx
);
523 void si_gfx_resources_add_all_to_bo_list(struct si_context
*sctx
);
524 void si_compute_resources_add_all_to_bo_list(struct si_context
*sctx
);
525 void si_all_descriptors_begin_new_cs(struct si_context
*sctx
);
526 void si_upload_const_buffer(struct si_context
*sctx
, struct si_resource
**buf
,
527 const uint8_t *ptr
, unsigned size
, uint32_t *const_offset
);
528 void si_update_all_texture_descriptors(struct si_context
*sctx
);
529 void si_shader_change_notify(struct si_context
*sctx
);
530 void si_update_needs_color_decompress_masks(struct si_context
*sctx
);
531 void si_emit_graphics_shader_pointers(struct si_context
*sctx
);
532 void si_emit_compute_shader_pointers(struct si_context
*sctx
);
533 void si_set_rw_buffer(struct si_context
*sctx
,
534 uint slot
, const struct pipe_constant_buffer
*input
);
535 void si_set_rw_shader_buffer(struct si_context
*sctx
, uint slot
,
536 const struct pipe_shader_buffer
*sbuffer
);
537 void si_set_active_descriptors(struct si_context
*sctx
, unsigned desc_idx
,
538 uint64_t new_active_mask
);
539 void si_set_active_descriptors_for_shader(struct si_context
*sctx
,
540 struct si_shader_selector
*sel
);
541 bool si_bindless_descriptor_can_reclaim_slab(void *priv
,
542 struct pb_slab_entry
*entry
);
543 struct pb_slab
*si_bindless_descriptor_slab_alloc(void *priv
, unsigned heap
,
545 unsigned group_index
);
546 void si_bindless_descriptor_slab_free(void *priv
, struct pb_slab
*pslab
);
547 void si_rebind_buffer(struct si_context
*sctx
, struct pipe_resource
*buf
);
549 void si_init_state_compute_functions(struct si_context
*sctx
);
550 void si_init_state_functions(struct si_context
*sctx
);
551 void si_init_screen_state_functions(struct si_screen
*sscreen
);
553 si_make_buffer_descriptor(struct si_screen
*screen
, struct si_resource
*buf
,
554 enum pipe_format format
,
555 unsigned offset
, unsigned size
,
557 struct pipe_sampler_view
*
558 si_create_sampler_view_custom(struct pipe_context
*ctx
,
559 struct pipe_resource
*texture
,
560 const struct pipe_sampler_view
*state
,
561 unsigned width0
, unsigned height0
,
562 unsigned force_level
);
563 void si_update_fb_dirtiness_after_rendering(struct si_context
*sctx
);
564 void si_update_ps_iter_samples(struct si_context
*sctx
);
565 void si_save_qbo_state(struct si_context
*sctx
, struct si_qbo_state
*st
);
566 void si_restore_qbo_state(struct si_context
*sctx
, struct si_qbo_state
*st
);
567 void si_set_occlusion_query_state(struct si_context
*sctx
,
568 bool old_perfect_enable
);
570 struct si_fast_udiv_info32
{
571 unsigned multiplier
; /* the "magic number" multiplier */
572 unsigned pre_shift
; /* shift for the dividend before multiplying */
573 unsigned post_shift
; /* shift for the dividend after multiplying */
574 int increment
; /* 0 or 1; if set then increment the numerator, using one of
575 the two strategies */
578 struct si_fast_udiv_info32
579 si_compute_fast_udiv_info32(uint32_t D
, unsigned num_bits
);
581 /* si_state_binning.c */
582 void si_emit_dpbb_state(struct si_context
*sctx
);
584 /* si_state_shaders.c */
585 void *si_get_ir_binary(struct si_shader_selector
*sel
, bool ngg
, bool es
);
586 bool si_shader_cache_load_shader(struct si_screen
*sscreen
, void *ir_binary
,
587 struct si_shader
*shader
);
588 bool si_shader_cache_insert_shader(struct si_screen
*sscreen
, void *ir_binary
,
589 struct si_shader
*shader
,
590 bool insert_into_disk_cache
);
591 bool si_update_shaders(struct si_context
*sctx
);
592 void si_init_shader_functions(struct si_context
*sctx
);
593 bool si_init_shader_cache(struct si_screen
*sscreen
);
594 void si_destroy_shader_cache(struct si_screen
*sscreen
);
595 void si_schedule_initial_compile(struct si_context
*sctx
, unsigned processor
,
596 struct util_queue_fence
*ready_fence
,
597 struct si_compiler_ctx_state
*compiler_ctx_state
,
598 void *job
, util_queue_execute_func execute
);
599 void si_get_active_slot_masks(const struct tgsi_shader_info
*info
,
600 uint32_t *const_and_shader_buffers
,
601 uint64_t *samplers_and_images
);
602 int si_shader_select_with_key(struct si_screen
*sscreen
,
603 struct si_shader_ctx_state
*state
,
604 struct si_compiler_ctx_state
*compiler_state
,
605 struct si_shader_key
*key
,
607 bool optimized_or_none
);
608 void si_shader_selector_key_vs(struct si_context
*sctx
,
609 struct si_shader_selector
*vs
,
610 struct si_shader_key
*key
,
611 struct si_vs_prolog_bits
*prolog_key
);
612 unsigned si_get_input_prim(const struct si_shader_selector
*gs
);
613 bool si_update_ngg(struct si_context
*sctx
);
615 /* si_state_draw.c */
616 void si_emit_surface_sync(struct si_context
*sctx
, struct radeon_cmdbuf
*cs
,
617 unsigned cp_coher_cntl
);
618 void si_prim_discard_signal_next_compute_ib_start(struct si_context
*sctx
);
619 void gfx10_emit_cache_flush(struct si_context
*sctx
);
620 void si_emit_cache_flush(struct si_context
*sctx
);
621 void si_trace_emit(struct si_context
*sctx
);
622 void si_init_draw_functions(struct si_context
*sctx
);
624 /* si_state_msaa.c */
625 void si_init_msaa_functions(struct si_context
*sctx
);
626 void si_emit_sample_locations(struct radeon_cmdbuf
*cs
, int nr_samples
);
628 /* si_state_streamout.c */
629 void si_streamout_buffers_dirty(struct si_context
*sctx
);
630 void si_emit_streamout_end(struct si_context
*sctx
);
631 void si_update_prims_generated_query_state(struct si_context
*sctx
,
632 unsigned type
, int diff
);
633 void si_init_streamout_functions(struct si_context
*sctx
);
636 static inline unsigned si_get_constbuf_slot(unsigned slot
)
638 /* Constant buffers are in slots [16..31], ascending */
639 return SI_NUM_SHADER_BUFFERS
+ slot
;
642 static inline unsigned si_get_shaderbuf_slot(unsigned slot
)
644 /* shader buffers are in slots [15..0], descending */
645 return SI_NUM_SHADER_BUFFERS
- 1 - slot
;
648 static inline unsigned si_get_sampler_slot(unsigned slot
)
650 /* samplers are in slots [8..39], ascending */
651 return SI_NUM_IMAGES
/ 2 + slot
;
654 static inline unsigned si_get_image_slot(unsigned slot
)
656 /* images are in slots [15..0] (sampler slots [7..0]), descending */
657 return SI_NUM_IMAGES
- 1 - slot
;