radeonsi: use the live shader cache
[mesa.git] / src / gallium / drivers / radeonsi / si_state.h
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #ifndef SI_STATE_H
26 #define SI_STATE_H
27
28 #include "si_pm4.h"
29
30 #include "pipebuffer/pb_slab.h"
31 #include "util/u_blitter.h"
32
33 #define SI_NUM_GRAPHICS_SHADERS (PIPE_SHADER_TESS_EVAL+1)
34 #define SI_NUM_SHADERS (PIPE_SHADER_COMPUTE+1)
35
36 #define SI_NUM_VERTEX_BUFFERS SI_MAX_ATTRIBS
37 #define SI_NUM_SAMPLERS 32 /* OpenGL textures units per shader */
38 #define SI_NUM_CONST_BUFFERS 16
39 #define SI_NUM_IMAGES 16
40 #define SI_NUM_IMAGE_SLOTS (SI_NUM_IMAGES * 2) /* the second half are FMASK slots */
41 #define SI_NUM_SHADER_BUFFERS 16
42
43 struct si_screen;
44 struct si_shader;
45 struct si_shader_ctx_state;
46 struct si_shader_selector;
47 struct si_texture;
48 struct si_qbo_state;
49
50 struct si_state_blend {
51 struct si_pm4_state pm4;
52 uint32_t cb_target_mask;
53 /* Set 0xf or 0x0 (4 bits) per render target if the following is
54 * true. ANDed with spi_shader_col_format.
55 */
56 unsigned cb_target_enabled_4bit;
57 unsigned blend_enable_4bit;
58 unsigned need_src_alpha_4bit;
59 unsigned commutative_4bit;
60 unsigned dcc_msaa_corruption_4bit;
61 bool alpha_to_coverage:1;
62 bool alpha_to_one:1;
63 bool dual_src_blend:1;
64 bool logicop_enable:1;
65 };
66
67 struct si_state_rasterizer {
68 struct si_pm4_state pm4;
69 /* poly offset states for 16-bit, 24-bit, and 32-bit zbuffers */
70 struct si_pm4_state *pm4_poly_offset;
71 unsigned pa_sc_line_stipple;
72 unsigned pa_cl_clip_cntl;
73 float line_width;
74 float max_point_size;
75 unsigned sprite_coord_enable:8;
76 unsigned clip_plane_enable:8;
77 unsigned half_pixel_center:1;
78 unsigned flatshade:1;
79 unsigned flatshade_first:1;
80 unsigned two_side:1;
81 unsigned multisample_enable:1;
82 unsigned force_persample_interp:1;
83 unsigned line_stipple_enable:1;
84 unsigned poly_stipple_enable:1;
85 unsigned line_smooth:1;
86 unsigned poly_smooth:1;
87 unsigned uses_poly_offset:1;
88 unsigned clamp_fragment_color:1;
89 unsigned clamp_vertex_color:1;
90 unsigned rasterizer_discard:1;
91 unsigned scissor_enable:1;
92 unsigned clip_halfz:1;
93 unsigned cull_front:1;
94 unsigned cull_back:1;
95 unsigned depth_clamp_any:1;
96 unsigned provoking_vertex_first:1;
97 unsigned polygon_mode_enabled:1;
98 unsigned polygon_mode_is_lines:1;
99 };
100
101 struct si_dsa_stencil_ref_part {
102 uint8_t valuemask[2];
103 uint8_t writemask[2];
104 };
105
106 struct si_dsa_order_invariance {
107 /** Whether the final result in Z/S buffers is guaranteed to be
108 * invariant under changes to the order in which fragments arrive. */
109 bool zs:1;
110
111 /** Whether the set of fragments that pass the combined Z/S test is
112 * guaranteed to be invariant under changes to the order in which
113 * fragments arrive. */
114 bool pass_set:1;
115
116 /** Whether the last fragment that passes the combined Z/S test at each
117 * sample is guaranteed to be invariant under changes to the order in
118 * which fragments arrive. */
119 bool pass_last:1;
120 };
121
122 struct si_state_dsa {
123 struct si_pm4_state pm4;
124 struct si_dsa_stencil_ref_part stencil_ref;
125
126 /* 0 = without stencil buffer, 1 = when both Z and S buffers are present */
127 struct si_dsa_order_invariance order_invariance[2];
128
129 ubyte alpha_func:3;
130 bool depth_enabled:1;
131 bool depth_write_enabled:1;
132 bool stencil_enabled:1;
133 bool stencil_write_enabled:1;
134 bool db_can_write:1;
135
136 };
137
138 struct si_stencil_ref {
139 struct pipe_stencil_ref state;
140 struct si_dsa_stencil_ref_part dsa_part;
141 };
142
143 struct si_vertex_elements
144 {
145 struct si_resource *instance_divisor_factor_buffer;
146 uint32_t rsrc_word3[SI_MAX_ATTRIBS];
147 uint16_t src_offset[SI_MAX_ATTRIBS];
148 uint8_t fix_fetch[SI_MAX_ATTRIBS];
149 uint8_t format_size[SI_MAX_ATTRIBS];
150 uint8_t vertex_buffer_index[SI_MAX_ATTRIBS];
151
152 /* Bitmask of elements that always need a fixup to be applied. */
153 uint16_t fix_fetch_always;
154
155 /* Bitmask of elements whose fetch should always be opencoded. */
156 uint16_t fix_fetch_opencode;
157
158 /* Bitmask of elements which need to be opencoded if the vertex buffer
159 * is unaligned. */
160 uint16_t fix_fetch_unaligned;
161
162 /* For elements in fix_fetch_unaligned: whether the effective
163 * element load size as seen by the hardware is a dword (as opposed
164 * to a short).
165 */
166 uint16_t hw_load_is_dword;
167
168 /* Bitmask of vertex buffers requiring alignment check */
169 uint16_t vb_alignment_check_mask;
170
171 uint8_t count;
172 bool uses_instance_divisors;
173
174 uint16_t first_vb_use_mask;
175 /* Vertex buffer descriptor list size aligned for optimal prefetch. */
176 uint16_t vb_desc_list_alloc_size;
177 uint16_t instance_divisor_is_one; /* bitmask of inputs */
178 uint16_t instance_divisor_is_fetched; /* bitmask of inputs */
179 };
180
181 union si_state {
182 struct {
183 struct si_state_blend *blend;
184 struct si_state_rasterizer *rasterizer;
185 struct si_state_dsa *dsa;
186 struct si_pm4_state *poly_offset;
187 struct si_pm4_state *ls;
188 struct si_pm4_state *hs;
189 struct si_pm4_state *es;
190 struct si_pm4_state *gs;
191 struct si_pm4_state *vgt_shader_config;
192 struct si_pm4_state *vs;
193 struct si_pm4_state *ps;
194 } named;
195 struct si_pm4_state *array[0];
196 };
197
198 #define SI_STATE_IDX(name) \
199 (offsetof(union si_state, named.name) / sizeof(struct si_pm4_state *))
200 #define SI_STATE_BIT(name) (1 << SI_STATE_IDX(name))
201 #define SI_NUM_STATES (sizeof(union si_state) / sizeof(struct si_pm4_state *))
202
203 static inline unsigned si_states_that_always_roll_context(void)
204 {
205 return (SI_STATE_BIT(blend) |
206 SI_STATE_BIT(rasterizer) |
207 SI_STATE_BIT(dsa) |
208 SI_STATE_BIT(poly_offset) |
209 SI_STATE_BIT(vgt_shader_config));
210 }
211
212 union si_state_atoms {
213 struct {
214 /* The order matters. */
215 struct si_atom render_cond;
216 struct si_atom streamout_begin;
217 struct si_atom streamout_enable; /* must be after streamout_begin */
218 struct si_atom framebuffer;
219 struct si_atom msaa_sample_locs;
220 struct si_atom db_render_state;
221 struct si_atom dpbb_state;
222 struct si_atom msaa_config;
223 struct si_atom sample_mask;
224 struct si_atom cb_render_state;
225 struct si_atom blend_color;
226 struct si_atom clip_regs;
227 struct si_atom clip_state;
228 struct si_atom shader_pointers;
229 struct si_atom guardband;
230 struct si_atom scissors;
231 struct si_atom viewports;
232 struct si_atom stencil_ref;
233 struct si_atom spi_map;
234 struct si_atom scratch_state;
235 struct si_atom window_rectangles;
236 struct si_atom shader_query;
237 } s;
238 struct si_atom array[0];
239 };
240
241 #define SI_ATOM_BIT(name) (1 << (offsetof(union si_state_atoms, s.name) / \
242 sizeof(struct si_atom)))
243 #define SI_NUM_ATOMS (sizeof(union si_state_atoms)/sizeof(struct si_atom*))
244
245 static inline unsigned si_atoms_that_always_roll_context(void)
246 {
247 return (SI_ATOM_BIT(streamout_begin) |
248 SI_ATOM_BIT(streamout_enable) |
249 SI_ATOM_BIT(framebuffer) |
250 SI_ATOM_BIT(msaa_sample_locs) |
251 SI_ATOM_BIT(sample_mask) |
252 SI_ATOM_BIT(blend_color) |
253 SI_ATOM_BIT(clip_state) |
254 SI_ATOM_BIT(scissors) |
255 SI_ATOM_BIT(viewports) |
256 SI_ATOM_BIT(stencil_ref) |
257 SI_ATOM_BIT(scratch_state) |
258 SI_ATOM_BIT(window_rectangles));
259 }
260
261 struct si_shader_data {
262 uint32_t sh_base[SI_NUM_SHADERS];
263 };
264
265 #define SI_TRACKED_PA_CL_VS_OUT_CNTL__VS_MASK \
266 (S_02881C_USE_VTX_POINT_SIZE(1) | \
267 S_02881C_USE_VTX_EDGE_FLAG(1) | \
268 S_02881C_USE_VTX_RENDER_TARGET_INDX(1) | \
269 S_02881C_USE_VTX_VIEWPORT_INDX(1) | \
270 S_02881C_VS_OUT_MISC_VEC_ENA(1) | \
271 S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(1))
272
273 /* The list of registers whose emitted values are remembered by si_context. */
274 enum si_tracked_reg {
275 SI_TRACKED_DB_RENDER_CONTROL, /* 2 consecutive registers */
276 SI_TRACKED_DB_COUNT_CONTROL,
277
278 SI_TRACKED_DB_RENDER_OVERRIDE2,
279 SI_TRACKED_DB_SHADER_CONTROL,
280
281 SI_TRACKED_CB_TARGET_MASK,
282 SI_TRACKED_CB_DCC_CONTROL,
283
284 SI_TRACKED_SX_PS_DOWNCONVERT, /* 3 consecutive registers */
285 SI_TRACKED_SX_BLEND_OPT_EPSILON,
286 SI_TRACKED_SX_BLEND_OPT_CONTROL,
287
288 SI_TRACKED_PA_SC_LINE_CNTL, /* 2 consecutive registers */
289 SI_TRACKED_PA_SC_AA_CONFIG,
290
291 SI_TRACKED_DB_EQAA,
292 SI_TRACKED_PA_SC_MODE_CNTL_1,
293
294 SI_TRACKED_PA_SU_PRIM_FILTER_CNTL,
295 SI_TRACKED_PA_SU_SMALL_PRIM_FILTER_CNTL,
296
297 SI_TRACKED_PA_CL_VS_OUT_CNTL__VS, /* set with SI_TRACKED_PA_CL_VS_OUT_CNTL__VS_MASK*/
298 SI_TRACKED_PA_CL_VS_OUT_CNTL__CL, /* set with ~SI_TRACKED_PA_CL_VS_OUT_CNTL__VS_MASK */
299 SI_TRACKED_PA_CL_CLIP_CNTL,
300
301 SI_TRACKED_PA_SC_BINNER_CNTL_0,
302 SI_TRACKED_DB_DFSM_CONTROL,
303
304 SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ, /* 4 consecutive registers */
305 SI_TRACKED_PA_CL_GB_VERT_DISC_ADJ,
306 SI_TRACKED_PA_CL_GB_HORZ_CLIP_ADJ,
307 SI_TRACKED_PA_CL_GB_HORZ_DISC_ADJ,
308
309 SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET,
310 SI_TRACKED_PA_SU_VTX_CNTL,
311
312 SI_TRACKED_PA_SC_CLIPRECT_RULE,
313
314 SI_TRACKED_PA_SC_LINE_STIPPLE,
315
316 SI_TRACKED_VGT_ESGS_RING_ITEMSIZE,
317
318 SI_TRACKED_VGT_GSVS_RING_OFFSET_1, /* 3 consecutive registers */
319 SI_TRACKED_VGT_GSVS_RING_OFFSET_2,
320 SI_TRACKED_VGT_GSVS_RING_OFFSET_3,
321
322 SI_TRACKED_VGT_GSVS_RING_ITEMSIZE,
323 SI_TRACKED_VGT_GS_MAX_VERT_OUT,
324
325 SI_TRACKED_VGT_GS_VERT_ITEMSIZE, /* 4 consecutive registers */
326 SI_TRACKED_VGT_GS_VERT_ITEMSIZE_1,
327 SI_TRACKED_VGT_GS_VERT_ITEMSIZE_2,
328 SI_TRACKED_VGT_GS_VERT_ITEMSIZE_3,
329
330 SI_TRACKED_VGT_GS_INSTANCE_CNT,
331 SI_TRACKED_VGT_GS_ONCHIP_CNTL,
332 SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP,
333 SI_TRACKED_VGT_GS_MODE,
334 SI_TRACKED_VGT_PRIMITIVEID_EN,
335 SI_TRACKED_VGT_REUSE_OFF,
336 SI_TRACKED_SPI_VS_OUT_CONFIG,
337 SI_TRACKED_PA_CL_VTE_CNTL,
338 SI_TRACKED_PA_CL_NGG_CNTL,
339 SI_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP,
340 SI_TRACKED_GE_NGG_SUBGRP_CNTL,
341
342 SI_TRACKED_SPI_SHADER_IDX_FORMAT, /* 2 consecutive registers */
343 SI_TRACKED_SPI_SHADER_POS_FORMAT,
344
345 SI_TRACKED_SPI_PS_INPUT_ENA, /* 2 consecutive registers */
346 SI_TRACKED_SPI_PS_INPUT_ADDR,
347
348 SI_TRACKED_SPI_BARYC_CNTL,
349 SI_TRACKED_SPI_PS_IN_CONTROL,
350
351 SI_TRACKED_SPI_SHADER_Z_FORMAT, /* 2 consecutive registers */
352 SI_TRACKED_SPI_SHADER_COL_FORMAT,
353
354 SI_TRACKED_CB_SHADER_MASK,
355 SI_TRACKED_VGT_TF_PARAM,
356 SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
357
358 SI_TRACKED_GE_PC_ALLOC,
359
360 SI_NUM_TRACKED_REGS,
361 };
362
363 struct si_tracked_regs {
364 uint64_t reg_saved;
365 uint32_t reg_value[SI_NUM_TRACKED_REGS];
366 uint32_t spi_ps_input_cntl[32];
367 };
368
369 /* Private read-write buffer slots. */
370 enum {
371 SI_ES_RING_ESGS,
372 SI_GS_RING_ESGS,
373
374 SI_RING_GSVS,
375
376 SI_VS_STREAMOUT_BUF0,
377 SI_VS_STREAMOUT_BUF1,
378 SI_VS_STREAMOUT_BUF2,
379 SI_VS_STREAMOUT_BUF3,
380
381 SI_HS_CONST_DEFAULT_TESS_LEVELS,
382 SI_VS_CONST_INSTANCE_DIVISORS,
383 SI_VS_CONST_CLIP_PLANES,
384 SI_PS_CONST_POLY_STIPPLE,
385 SI_PS_CONST_SAMPLE_POSITIONS,
386
387 /* Image descriptor of color buffer 0 for KHR_blend_equation_advanced. */
388 SI_PS_IMAGE_COLORBUF0,
389 SI_PS_IMAGE_COLORBUF0_HI,
390 SI_PS_IMAGE_COLORBUF0_FMASK,
391 SI_PS_IMAGE_COLORBUF0_FMASK_HI,
392
393 GFX10_GS_QUERY_BUF,
394
395 SI_NUM_RW_BUFFERS,
396 };
397
398 /* Indices into sctx->descriptors, laid out so that gfx and compute pipelines
399 * are contiguous:
400 *
401 * 0 - rw buffers
402 * 1 - vertex const and shader buffers
403 * 2 - vertex samplers and images
404 * 3 - fragment const and shader buffer
405 * ...
406 * 11 - compute const and shader buffers
407 * 12 - compute samplers and images
408 */
409 enum {
410 SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS,
411 SI_SHADER_DESCS_SAMPLERS_AND_IMAGES,
412 SI_NUM_SHADER_DESCS,
413 };
414
415 #define SI_DESCS_RW_BUFFERS 0
416 #define SI_DESCS_FIRST_SHADER 1
417 #define SI_DESCS_FIRST_COMPUTE (SI_DESCS_FIRST_SHADER + \
418 PIPE_SHADER_COMPUTE * SI_NUM_SHADER_DESCS)
419 #define SI_NUM_DESCS (SI_DESCS_FIRST_SHADER + \
420 SI_NUM_SHADERS * SI_NUM_SHADER_DESCS)
421
422 #define SI_DESCS_SHADER_MASK(name) \
423 u_bit_consecutive(SI_DESCS_FIRST_SHADER + \
424 PIPE_SHADER_##name * SI_NUM_SHADER_DESCS, \
425 SI_NUM_SHADER_DESCS)
426
427 static inline unsigned
428 si_const_and_shader_buffer_descriptors_idx(unsigned shader)
429 {
430 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
431 SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS;
432 }
433
434 static inline unsigned
435 si_sampler_and_image_descriptors_idx(unsigned shader)
436 {
437 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
438 SI_SHADER_DESCS_SAMPLERS_AND_IMAGES;
439 }
440
441 /* This represents descriptors in memory, such as buffer resources,
442 * image resources, and sampler states.
443 */
444 struct si_descriptors {
445 /* The list of descriptors in malloc'd memory. */
446 uint32_t *list;
447 /* The list in mapped GPU memory. */
448 uint32_t *gpu_list;
449
450 /* The buffer where the descriptors have been uploaded. */
451 struct si_resource *buffer;
452 uint64_t gpu_address;
453
454 /* The maximum number of descriptors. */
455 uint32_t num_elements;
456
457 /* Slots that are used by currently-bound shaders.
458 * It determines which slots are uploaded.
459 */
460 uint32_t first_active_slot;
461 uint32_t num_active_slots;
462
463 /* The SH register offset relative to USER_DATA*_0 where the pointer
464 * to the descriptor array will be stored. */
465 short shader_userdata_offset;
466 /* The size of one descriptor. */
467 ubyte element_dw_size;
468 /* If there is only one slot enabled, bind it directly instead of
469 * uploading descriptors. -1 if disabled. */
470 signed char slot_index_to_bind_directly;
471 };
472
473 struct si_buffer_resources {
474 struct pipe_resource **buffers; /* this has num_buffers elements */
475 unsigned *offsets; /* this has num_buffers elements */
476
477 enum radeon_bo_priority priority:6;
478 enum radeon_bo_priority priority_constbuf:6;
479
480 /* The i-th bit is set if that element is enabled (non-NULL resource). */
481 unsigned enabled_mask;
482 unsigned writable_mask;
483 };
484
485 #define si_pm4_state_changed(sctx, member) \
486 ((sctx)->queued.named.member != (sctx)->emitted.named.member)
487
488 #define si_pm4_state_enabled_and_changed(sctx, member) \
489 ((sctx)->queued.named.member && si_pm4_state_changed(sctx, member))
490
491 #define si_pm4_bind_state(sctx, member, value) \
492 do { \
493 (sctx)->queued.named.member = (value); \
494 (sctx)->dirty_states |= SI_STATE_BIT(member); \
495 } while(0)
496
497 #define si_pm4_delete_state(sctx, member, value) \
498 do { \
499 if ((sctx)->queued.named.member == (value)) { \
500 (sctx)->queued.named.member = NULL; \
501 } \
502 si_pm4_free_state(sctx, (struct si_pm4_state *)(value), \
503 SI_STATE_IDX(member)); \
504 } while(0)
505
506 /* si_descriptors.c */
507 void si_set_mutable_tex_desc_fields(struct si_screen *sscreen,
508 struct si_texture *tex,
509 const struct legacy_surf_level *base_level_info,
510 unsigned base_level, unsigned first_level,
511 unsigned block_width, bool is_stencil,
512 uint32_t *state);
513 void si_update_ps_colorbuf0_slot(struct si_context *sctx);
514 void si_get_pipe_constant_buffer(struct si_context *sctx, uint shader,
515 uint slot, struct pipe_constant_buffer *cbuf);
516 void si_get_shader_buffers(struct si_context *sctx,
517 enum pipe_shader_type shader,
518 uint start_slot, uint count,
519 struct pipe_shader_buffer *sbuf);
520 void si_set_ring_buffer(struct si_context *sctx, uint slot,
521 struct pipe_resource *buffer,
522 unsigned stride, unsigned num_records,
523 bool add_tid, bool swizzle,
524 unsigned element_size, unsigned index_stride, uint64_t offset);
525 void si_init_all_descriptors(struct si_context *sctx);
526 bool si_upload_vertex_buffer_descriptors(struct si_context *sctx);
527 bool si_upload_graphics_shader_descriptors(struct si_context *sctx);
528 bool si_upload_compute_shader_descriptors(struct si_context *sctx);
529 void si_release_all_descriptors(struct si_context *sctx);
530 void si_gfx_resources_add_all_to_bo_list(struct si_context *sctx);
531 void si_compute_resources_add_all_to_bo_list(struct si_context *sctx);
532 void si_all_descriptors_begin_new_cs(struct si_context *sctx);
533 void si_upload_const_buffer(struct si_context *sctx, struct si_resource **buf,
534 const uint8_t *ptr, unsigned size, uint32_t *const_offset);
535 void si_update_all_texture_descriptors(struct si_context *sctx);
536 void si_shader_change_notify(struct si_context *sctx);
537 void si_update_needs_color_decompress_masks(struct si_context *sctx);
538 void si_emit_graphics_shader_pointers(struct si_context *sctx);
539 void si_emit_compute_shader_pointers(struct si_context *sctx);
540 void si_set_rw_buffer(struct si_context *sctx,
541 uint slot, const struct pipe_constant_buffer *input);
542 void si_set_rw_shader_buffer(struct si_context *sctx, uint slot,
543 const struct pipe_shader_buffer *sbuffer);
544 void si_set_active_descriptors(struct si_context *sctx, unsigned desc_idx,
545 uint64_t new_active_mask);
546 void si_set_active_descriptors_for_shader(struct si_context *sctx,
547 struct si_shader_selector *sel);
548 bool si_bindless_descriptor_can_reclaim_slab(void *priv,
549 struct pb_slab_entry *entry);
550 struct pb_slab *si_bindless_descriptor_slab_alloc(void *priv, unsigned heap,
551 unsigned entry_size,
552 unsigned group_index);
553 void si_bindless_descriptor_slab_free(void *priv, struct pb_slab *pslab);
554 void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf);
555 /* si_state.c */
556 void si_init_state_compute_functions(struct si_context *sctx);
557 void si_init_state_functions(struct si_context *sctx);
558 void si_init_screen_state_functions(struct si_screen *sscreen);
559 void
560 si_make_buffer_descriptor(struct si_screen *screen, struct si_resource *buf,
561 enum pipe_format format,
562 unsigned offset, unsigned size,
563 uint32_t *state);
564 struct pipe_sampler_view *
565 si_create_sampler_view_custom(struct pipe_context *ctx,
566 struct pipe_resource *texture,
567 const struct pipe_sampler_view *state,
568 unsigned width0, unsigned height0,
569 unsigned force_level);
570 void si_update_fb_dirtiness_after_rendering(struct si_context *sctx);
571 void si_update_ps_iter_samples(struct si_context *sctx);
572 void si_save_qbo_state(struct si_context *sctx, struct si_qbo_state *st);
573 void si_restore_qbo_state(struct si_context *sctx, struct si_qbo_state *st);
574 void si_set_occlusion_query_state(struct si_context *sctx,
575 bool old_perfect_enable);
576
577 struct si_fast_udiv_info32 {
578 unsigned multiplier; /* the "magic number" multiplier */
579 unsigned pre_shift; /* shift for the dividend before multiplying */
580 unsigned post_shift; /* shift for the dividend after multiplying */
581 int increment; /* 0 or 1; if set then increment the numerator, using one of
582 the two strategies */
583 };
584
585 struct si_fast_udiv_info32
586 si_compute_fast_udiv_info32(uint32_t D, unsigned num_bits);
587
588 /* si_state_binning.c */
589 void si_emit_dpbb_state(struct si_context *sctx);
590
591 /* si_state_shaders.c */
592 void si_get_ir_cache_key(struct si_shader_selector *sel, bool ngg, bool es,
593 unsigned char ir_sha1_cache_key[20]);
594 bool si_shader_cache_load_shader(struct si_screen *sscreen,
595 unsigned char ir_sha1_cache_key[20],
596 struct si_shader *shader);
597 void si_shader_cache_insert_shader(struct si_screen *sscreen,
598 unsigned char ir_sha1_cache_key[20],
599 struct si_shader *shader,
600 bool insert_into_disk_cache);
601 bool si_update_shaders(struct si_context *sctx);
602 void si_init_screen_live_shader_cache(struct si_screen *sscreen);
603 void si_init_shader_functions(struct si_context *sctx);
604 bool si_init_shader_cache(struct si_screen *sscreen);
605 void si_destroy_shader_cache(struct si_screen *sscreen);
606 void si_schedule_initial_compile(struct si_context *sctx, unsigned processor,
607 struct util_queue_fence *ready_fence,
608 struct si_compiler_ctx_state *compiler_ctx_state,
609 void *job, util_queue_execute_func execute);
610 void si_get_active_slot_masks(const struct si_shader_info *info,
611 uint32_t *const_and_shader_buffers,
612 uint64_t *samplers_and_images);
613 int si_shader_select_with_key(struct si_screen *sscreen,
614 struct si_shader_ctx_state *state,
615 struct si_compiler_ctx_state *compiler_state,
616 struct si_shader_key *key,
617 int thread_index,
618 bool optimized_or_none);
619 void si_shader_selector_key_vs(struct si_context *sctx,
620 struct si_shader_selector *vs,
621 struct si_shader_key *key,
622 struct si_vs_prolog_bits *prolog_key);
623 unsigned si_get_input_prim(const struct si_shader_selector *gs);
624 bool si_update_ngg(struct si_context *sctx);
625
626 /* si_state_draw.c */
627 void si_emit_surface_sync(struct si_context *sctx, struct radeon_cmdbuf *cs,
628 unsigned cp_coher_cntl);
629 void si_prim_discard_signal_next_compute_ib_start(struct si_context *sctx);
630 void gfx10_emit_cache_flush(struct si_context *sctx);
631 void si_emit_cache_flush(struct si_context *sctx);
632 void si_trace_emit(struct si_context *sctx);
633 void si_init_draw_functions(struct si_context *sctx);
634
635 /* si_state_msaa.c */
636 void si_init_msaa_functions(struct si_context *sctx);
637 void si_emit_sample_locations(struct radeon_cmdbuf *cs, int nr_samples);
638
639 /* si_state_streamout.c */
640 void si_streamout_buffers_dirty(struct si_context *sctx);
641 void si_emit_streamout_end(struct si_context *sctx);
642 void si_update_prims_generated_query_state(struct si_context *sctx,
643 unsigned type, int diff);
644 void si_init_streamout_functions(struct si_context *sctx);
645
646
647 static inline unsigned si_get_constbuf_slot(unsigned slot)
648 {
649 /* Constant buffers are in slots [16..31], ascending */
650 return SI_NUM_SHADER_BUFFERS + slot;
651 }
652
653 static inline unsigned si_get_shaderbuf_slot(unsigned slot)
654 {
655 /* shader buffers are in slots [15..0], descending */
656 return SI_NUM_SHADER_BUFFERS - 1 - slot;
657 }
658
659 static inline unsigned si_get_sampler_slot(unsigned slot)
660 {
661 /* 32 samplers are in sampler slots [16..47], 16 dw per slot, ascending */
662 /* those are equivalent to image slots [32..95], 8 dw per slot, ascending */
663 return SI_NUM_IMAGE_SLOTS / 2 + slot;
664 }
665
666 static inline unsigned si_get_image_slot(unsigned slot)
667 {
668 /* image slots are in [31..0] (sampler slots [15..0]), descending */
669 /* images are in slots [31..16], while FMASKs are in slots [15..0] */
670 return SI_NUM_IMAGE_SLOTS - 1 - slot;
671 }
672
673 #endif