radeonsi:optimizing SET_CONTEXT_REG for shaders VS
[mesa.git] / src / gallium / drivers / radeonsi / si_state.h
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #ifndef SI_STATE_H
26 #define SI_STATE_H
27
28 #include "si_pm4.h"
29
30 #include "pipebuffer/pb_slab.h"
31 #include "util/u_blitter.h"
32
33 #define SI_NUM_GRAPHICS_SHADERS (PIPE_SHADER_TESS_EVAL+1)
34 #define SI_NUM_SHADERS (PIPE_SHADER_COMPUTE+1)
35
36 #define SI_NUM_VERTEX_BUFFERS SI_MAX_ATTRIBS
37 #define SI_NUM_SAMPLERS 32 /* OpenGL textures units per shader */
38 #define SI_NUM_CONST_BUFFERS 16
39 #define SI_NUM_IMAGES 16
40 #define SI_NUM_SHADER_BUFFERS 16
41
42 struct si_screen;
43 struct si_shader;
44 struct si_shader_selector;
45 struct si_texture;
46 struct si_qbo_state;
47
48 struct si_state_blend {
49 struct si_pm4_state pm4;
50 uint32_t cb_target_mask;
51 /* Set 0xf or 0x0 (4 bits) per render target if the following is
52 * true. ANDed with spi_shader_col_format.
53 */
54 unsigned cb_target_enabled_4bit;
55 unsigned blend_enable_4bit;
56 unsigned need_src_alpha_4bit;
57 unsigned commutative_4bit;
58 bool alpha_to_coverage:1;
59 bool alpha_to_one:1;
60 bool dual_src_blend:1;
61 bool logicop_enable:1;
62 };
63
64 struct si_state_rasterizer {
65 struct si_pm4_state pm4;
66 /* poly offset states for 16-bit, 24-bit, and 32-bit zbuffers */
67 struct si_pm4_state *pm4_poly_offset;
68 unsigned pa_sc_line_stipple;
69 unsigned pa_cl_clip_cntl;
70 float line_width;
71 float max_point_size;
72 unsigned sprite_coord_enable:8;
73 unsigned clip_plane_enable:8;
74 unsigned flatshade:1;
75 unsigned two_side:1;
76 unsigned multisample_enable:1;
77 unsigned force_persample_interp:1;
78 unsigned line_stipple_enable:1;
79 unsigned poly_stipple_enable:1;
80 unsigned line_smooth:1;
81 unsigned poly_smooth:1;
82 unsigned uses_poly_offset:1;
83 unsigned clamp_fragment_color:1;
84 unsigned clamp_vertex_color:1;
85 unsigned rasterizer_discard:1;
86 unsigned scissor_enable:1;
87 unsigned clip_halfz:1;
88 };
89
90 struct si_dsa_stencil_ref_part {
91 uint8_t valuemask[2];
92 uint8_t writemask[2];
93 };
94
95 struct si_dsa_order_invariance {
96 /** Whether the final result in Z/S buffers is guaranteed to be
97 * invariant under changes to the order in which fragments arrive. */
98 bool zs:1;
99
100 /** Whether the set of fragments that pass the combined Z/S test is
101 * guaranteed to be invariant under changes to the order in which
102 * fragments arrive. */
103 bool pass_set:1;
104
105 /** Whether the last fragment that passes the combined Z/S test at each
106 * sample is guaranteed to be invariant under changes to the order in
107 * which fragments arrive. */
108 bool pass_last:1;
109 };
110
111 struct si_state_dsa {
112 struct si_pm4_state pm4;
113 struct si_dsa_stencil_ref_part stencil_ref;
114
115 /* 0 = without stencil buffer, 1 = when both Z and S buffers are present */
116 struct si_dsa_order_invariance order_invariance[2];
117
118 ubyte alpha_func:3;
119 bool depth_enabled:1;
120 bool depth_write_enabled:1;
121 bool stencil_enabled:1;
122 bool stencil_write_enabled:1;
123 bool db_can_write:1;
124
125 };
126
127 struct si_stencil_ref {
128 struct pipe_stencil_ref state;
129 struct si_dsa_stencil_ref_part dsa_part;
130 };
131
132 struct si_vertex_elements
133 {
134 uint32_t instance_divisors[SI_MAX_ATTRIBS];
135 uint32_t rsrc_word3[SI_MAX_ATTRIBS];
136 uint16_t src_offset[SI_MAX_ATTRIBS];
137 uint8_t fix_fetch[SI_MAX_ATTRIBS];
138 uint8_t format_size[SI_MAX_ATTRIBS];
139 uint8_t vertex_buffer_index[SI_MAX_ATTRIBS];
140
141 uint8_t count;
142 bool uses_instance_divisors;
143
144 uint16_t first_vb_use_mask;
145 /* Vertex buffer descriptor list size aligned for optimal prefetch. */
146 uint16_t desc_list_byte_size;
147 uint16_t instance_divisor_is_one; /* bitmask of inputs */
148 uint16_t instance_divisor_is_fetched; /* bitmask of inputs */
149 };
150
151 union si_state {
152 struct {
153 struct si_state_blend *blend;
154 struct si_state_rasterizer *rasterizer;
155 struct si_state_dsa *dsa;
156 struct si_pm4_state *poly_offset;
157 struct si_pm4_state *ls;
158 struct si_pm4_state *hs;
159 struct si_pm4_state *es;
160 struct si_pm4_state *gs;
161 struct si_pm4_state *vgt_shader_config;
162 struct si_pm4_state *vs;
163 struct si_pm4_state *ps;
164 } named;
165 struct si_pm4_state *array[0];
166 };
167
168 #define SI_STATE_IDX(name) \
169 (offsetof(union si_state, named.name) / sizeof(struct si_pm4_state *))
170 #define SI_STATE_BIT(name) (1 << SI_STATE_IDX(name))
171 #define SI_NUM_STATES (sizeof(union si_state) / sizeof(struct si_pm4_state *))
172
173 static inline unsigned si_states_that_roll_context(void)
174 {
175 return (SI_STATE_BIT(blend) |
176 SI_STATE_BIT(rasterizer) |
177 SI_STATE_BIT(dsa) |
178 SI_STATE_BIT(poly_offset) |
179 SI_STATE_BIT(es) |
180 SI_STATE_BIT(gs) |
181 SI_STATE_BIT(vgt_shader_config) |
182 SI_STATE_BIT(vs) |
183 SI_STATE_BIT(ps));
184 }
185
186 union si_state_atoms {
187 struct {
188 /* The order matters. */
189 struct si_atom render_cond;
190 struct si_atom streamout_begin;
191 struct si_atom streamout_enable; /* must be after streamout_begin */
192 struct si_atom framebuffer;
193 struct si_atom msaa_sample_locs;
194 struct si_atom db_render_state;
195 struct si_atom dpbb_state;
196 struct si_atom msaa_config;
197 struct si_atom sample_mask;
198 struct si_atom cb_render_state;
199 struct si_atom blend_color;
200 struct si_atom clip_regs;
201 struct si_atom clip_state;
202 struct si_atom shader_pointers;
203 struct si_atom guardband;
204 struct si_atom scissors;
205 struct si_atom viewports;
206 struct si_atom stencil_ref;
207 struct si_atom spi_map;
208 struct si_atom scratch_state;
209 struct si_atom window_rectangles;
210 } s;
211 struct si_atom array[0];
212 };
213
214 #define SI_ATOM_BIT(name) (1 << (offsetof(union si_state_atoms, s.name) / \
215 sizeof(struct si_atom)))
216 #define SI_NUM_ATOMS (sizeof(union si_state_atoms)/sizeof(struct si_atom*))
217
218 static inline unsigned si_atoms_that_roll_context(void)
219 {
220 return (SI_ATOM_BIT(streamout_begin) |
221 SI_ATOM_BIT(streamout_enable) |
222 SI_ATOM_BIT(framebuffer) |
223 SI_ATOM_BIT(msaa_sample_locs) |
224 SI_ATOM_BIT(db_render_state) |
225 SI_ATOM_BIT(dpbb_state) |
226 SI_ATOM_BIT(msaa_config) |
227 SI_ATOM_BIT(sample_mask) |
228 SI_ATOM_BIT(cb_render_state) |
229 SI_ATOM_BIT(blend_color) |
230 SI_ATOM_BIT(clip_regs) |
231 SI_ATOM_BIT(clip_state) |
232 SI_ATOM_BIT(guardband) |
233 SI_ATOM_BIT(scissors) |
234 SI_ATOM_BIT(viewports) |
235 SI_ATOM_BIT(stencil_ref) |
236 SI_ATOM_BIT(spi_map) |
237 SI_ATOM_BIT(scratch_state));
238 }
239
240 struct si_shader_data {
241 uint32_t sh_base[SI_NUM_SHADERS];
242 };
243
244 /* The list of registers whose emitted values are remembered by si_context. */
245 enum si_tracked_reg {
246 SI_TRACKED_DB_RENDER_CONTROL, /* 2 consecutive registers */
247 SI_TRACKED_DB_COUNT_CONTROL,
248
249 SI_TRACKED_DB_RENDER_OVERRIDE2,
250 SI_TRACKED_DB_SHADER_CONTROL,
251
252 SI_TRACKED_CB_TARGET_MASK,
253 SI_TRACKED_CB_DCC_CONTROL,
254
255 SI_TRACKED_SX_PS_DOWNCONVERT, /* 3 consecutive registers */
256 SI_TRACKED_SX_BLEND_OPT_EPSILON,
257 SI_TRACKED_SX_BLEND_OPT_CONTROL,
258
259 SI_TRACKED_PA_SC_LINE_CNTL, /* 2 consecutive registers */
260 SI_TRACKED_PA_SC_AA_CONFIG,
261
262 SI_TRACKED_DB_EQAA,
263 SI_TRACKED_PA_SC_MODE_CNTL_1,
264
265 SI_TRACKED_PA_SU_SMALL_PRIM_FILTER_CNTL,
266
267 SI_TRACKED_PA_CL_VS_OUT_CNTL,
268 SI_TRACKED_PA_CL_CLIP_CNTL,
269
270 SI_TRACKED_PA_SC_BINNER_CNTL_0,
271 SI_TRACKED_DB_DFSM_CONTROL,
272
273 SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ, /* 4 consecutive registers */
274 SI_TRACKED_PA_CL_GB_VERT_DISC_ADJ,
275 SI_TRACKED_PA_CL_GB_HORZ_CLIP_ADJ,
276 SI_TRACKED_PA_CL_GB_HORZ_DISC_ADJ,
277
278 SI_TRACKED_PA_SC_CLIPRECT_RULE,
279
280 SI_TRACKED_VGT_ESGS_RING_ITEMSIZE,
281
282 SI_TRACKED_VGT_GSVS_RING_OFFSET_1, /* 4 consecutive registers */
283 SI_TRACKED_VGT_GSVS_RING_OFFSET_2,
284 SI_TRACKED_VGT_GSVS_RING_OFFSET_3,
285 SI_TRACKED_VGT_GS_OUT_PRIM_TYPE,
286
287 SI_TRACKED_VGT_GSVS_RING_ITEMSIZE,
288 SI_TRACKED_VGT_GS_MAX_VERT_OUT,
289
290 SI_TRACKED_VGT_GS_VERT_ITEMSIZE, /* 4 consecutive registers */
291 SI_TRACKED_VGT_GS_VERT_ITEMSIZE_1,
292 SI_TRACKED_VGT_GS_VERT_ITEMSIZE_2,
293 SI_TRACKED_VGT_GS_VERT_ITEMSIZE_3,
294
295 SI_TRACKED_VGT_GS_INSTANCE_CNT,
296 SI_TRACKED_VGT_GS_ONCHIP_CNTL,
297 SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP,
298 SI_TRACKED_VGT_GS_MODE,
299 SI_TRACKED_VGT_PRIMITIVEID_EN,
300 SI_TRACKED_VGT_REUSE_OFF,
301 SI_TRACKED_SPI_VS_OUT_CONFIG,
302 SI_TRACKED_SPI_SHADER_POS_FORMAT,
303 SI_TRACKED_PA_CL_VTE_CNTL,
304
305 SI_NUM_TRACKED_REGS,
306 };
307
308 struct si_tracked_regs {
309 uint64_t reg_saved;
310 uint32_t reg_value[SI_NUM_TRACKED_REGS];
311 uint32_t spi_ps_input_cntl[32];
312 };
313
314 /* Private read-write buffer slots. */
315 enum {
316 SI_ES_RING_ESGS,
317 SI_GS_RING_ESGS,
318
319 SI_RING_GSVS,
320
321 SI_VS_STREAMOUT_BUF0,
322 SI_VS_STREAMOUT_BUF1,
323 SI_VS_STREAMOUT_BUF2,
324 SI_VS_STREAMOUT_BUF3,
325
326 SI_HS_CONST_DEFAULT_TESS_LEVELS,
327 SI_VS_CONST_INSTANCE_DIVISORS,
328 SI_VS_CONST_CLIP_PLANES,
329 SI_PS_CONST_POLY_STIPPLE,
330 SI_PS_CONST_SAMPLE_POSITIONS,
331
332 /* Image descriptor of color buffer 0 for KHR_blend_equation_advanced. */
333 SI_PS_IMAGE_COLORBUF0,
334 SI_PS_IMAGE_COLORBUF0_HI,
335 SI_PS_IMAGE_COLORBUF0_FMASK,
336 SI_PS_IMAGE_COLORBUF0_FMASK_HI,
337
338 SI_NUM_RW_BUFFERS,
339 };
340
341 /* Indices into sctx->descriptors, laid out so that gfx and compute pipelines
342 * are contiguous:
343 *
344 * 0 - rw buffers
345 * 1 - vertex const and shader buffers
346 * 2 - vertex samplers and images
347 * 3 - fragment const and shader buffer
348 * ...
349 * 11 - compute const and shader buffers
350 * 12 - compute samplers and images
351 */
352 enum {
353 SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS,
354 SI_SHADER_DESCS_SAMPLERS_AND_IMAGES,
355 SI_NUM_SHADER_DESCS,
356 };
357
358 #define SI_DESCS_RW_BUFFERS 0
359 #define SI_DESCS_FIRST_SHADER 1
360 #define SI_DESCS_FIRST_COMPUTE (SI_DESCS_FIRST_SHADER + \
361 PIPE_SHADER_COMPUTE * SI_NUM_SHADER_DESCS)
362 #define SI_NUM_DESCS (SI_DESCS_FIRST_SHADER + \
363 SI_NUM_SHADERS * SI_NUM_SHADER_DESCS)
364
365 #define SI_DESCS_SHADER_MASK(name) \
366 u_bit_consecutive(SI_DESCS_FIRST_SHADER + \
367 PIPE_SHADER_##name * SI_NUM_SHADER_DESCS, \
368 SI_NUM_SHADER_DESCS)
369
370 /* This represents descriptors in memory, such as buffer resources,
371 * image resources, and sampler states.
372 */
373 struct si_descriptors {
374 /* The list of descriptors in malloc'd memory. */
375 uint32_t *list;
376 /* The list in mapped GPU memory. */
377 uint32_t *gpu_list;
378
379 /* The buffer where the descriptors have been uploaded. */
380 struct r600_resource *buffer;
381 uint64_t gpu_address;
382
383 /* The maximum number of descriptors. */
384 uint32_t num_elements;
385
386 /* Slots that are used by currently-bound shaders.
387 * It determines which slots are uploaded.
388 */
389 uint32_t first_active_slot;
390 uint32_t num_active_slots;
391
392 /* The SH register offset relative to USER_DATA*_0 where the pointer
393 * to the descriptor array will be stored. */
394 short shader_userdata_offset;
395 /* The size of one descriptor. */
396 ubyte element_dw_size;
397 /* If there is only one slot enabled, bind it directly instead of
398 * uploading descriptors. -1 if disabled. */
399 signed char slot_index_to_bind_directly;
400 };
401
402 struct si_buffer_resources {
403 struct pipe_resource **buffers; /* this has num_buffers elements */
404
405 enum radeon_bo_usage shader_usage:4; /* READ, WRITE, or READWRITE */
406 enum radeon_bo_usage shader_usage_constbuf:4;
407 enum radeon_bo_priority priority:6;
408 enum radeon_bo_priority priority_constbuf:6;
409
410 /* The i-th bit is set if that element is enabled (non-NULL resource). */
411 unsigned enabled_mask;
412 };
413
414 #define si_pm4_state_changed(sctx, member) \
415 ((sctx)->queued.named.member != (sctx)->emitted.named.member)
416
417 #define si_pm4_state_enabled_and_changed(sctx, member) \
418 ((sctx)->queued.named.member && si_pm4_state_changed(sctx, member))
419
420 #define si_pm4_bind_state(sctx, member, value) \
421 do { \
422 (sctx)->queued.named.member = (value); \
423 (sctx)->dirty_states |= SI_STATE_BIT(member); \
424 } while(0)
425
426 #define si_pm4_delete_state(sctx, member, value) \
427 do { \
428 if ((sctx)->queued.named.member == (value)) { \
429 (sctx)->queued.named.member = NULL; \
430 } \
431 si_pm4_free_state(sctx, (struct si_pm4_state *)(value), \
432 SI_STATE_IDX(member)); \
433 } while(0)
434
435 /* si_descriptors.c */
436 void si_set_mutable_tex_desc_fields(struct si_screen *sscreen,
437 struct si_texture *tex,
438 const struct legacy_surf_level *base_level_info,
439 unsigned base_level, unsigned first_level,
440 unsigned block_width, bool is_stencil,
441 uint32_t *state);
442 void si_update_ps_colorbuf0_slot(struct si_context *sctx);
443 void si_get_pipe_constant_buffer(struct si_context *sctx, uint shader,
444 uint slot, struct pipe_constant_buffer *cbuf);
445 void si_get_shader_buffers(struct si_context *sctx,
446 enum pipe_shader_type shader,
447 uint start_slot, uint count,
448 struct pipe_shader_buffer *sbuf);
449 void si_set_ring_buffer(struct si_context *sctx, uint slot,
450 struct pipe_resource *buffer,
451 unsigned stride, unsigned num_records,
452 bool add_tid, bool swizzle,
453 unsigned element_size, unsigned index_stride, uint64_t offset);
454 void si_init_all_descriptors(struct si_context *sctx);
455 bool si_upload_vertex_buffer_descriptors(struct si_context *sctx);
456 bool si_upload_graphics_shader_descriptors(struct si_context *sctx);
457 bool si_upload_compute_shader_descriptors(struct si_context *sctx);
458 void si_release_all_descriptors(struct si_context *sctx);
459 void si_all_descriptors_begin_new_cs(struct si_context *sctx);
460 void si_all_resident_buffers_begin_new_cs(struct si_context *sctx);
461 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
462 const uint8_t *ptr, unsigned size, uint32_t *const_offset);
463 void si_update_all_texture_descriptors(struct si_context *sctx);
464 void si_shader_change_notify(struct si_context *sctx);
465 void si_update_needs_color_decompress_masks(struct si_context *sctx);
466 void si_emit_graphics_shader_pointers(struct si_context *sctx);
467 void si_emit_compute_shader_pointers(struct si_context *sctx);
468 void si_set_rw_buffer(struct si_context *sctx,
469 uint slot, const struct pipe_constant_buffer *input);
470 void si_set_active_descriptors(struct si_context *sctx, unsigned desc_idx,
471 uint64_t new_active_mask);
472 void si_set_active_descriptors_for_shader(struct si_context *sctx,
473 struct si_shader_selector *sel);
474 bool si_bindless_descriptor_can_reclaim_slab(void *priv,
475 struct pb_slab_entry *entry);
476 struct pb_slab *si_bindless_descriptor_slab_alloc(void *priv, unsigned heap,
477 unsigned entry_size,
478 unsigned group_index);
479 void si_bindless_descriptor_slab_free(void *priv, struct pb_slab *pslab);
480 void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf,
481 uint64_t old_va);
482 /* si_state.c */
483 void si_init_state_functions(struct si_context *sctx);
484 void si_init_screen_state_functions(struct si_screen *sscreen);
485 void
486 si_make_buffer_descriptor(struct si_screen *screen, struct r600_resource *buf,
487 enum pipe_format format,
488 unsigned offset, unsigned size,
489 uint32_t *state);
490 void
491 si_make_texture_descriptor(struct si_screen *screen,
492 struct si_texture *tex,
493 bool sampler,
494 enum pipe_texture_target target,
495 enum pipe_format pipe_format,
496 const unsigned char state_swizzle[4],
497 unsigned first_level, unsigned last_level,
498 unsigned first_layer, unsigned last_layer,
499 unsigned width, unsigned height, unsigned depth,
500 uint32_t *state,
501 uint32_t *fmask_state);
502 struct pipe_sampler_view *
503 si_create_sampler_view_custom(struct pipe_context *ctx,
504 struct pipe_resource *texture,
505 const struct pipe_sampler_view *state,
506 unsigned width0, unsigned height0,
507 unsigned force_level);
508 void si_update_fb_dirtiness_after_rendering(struct si_context *sctx);
509 void si_update_ps_iter_samples(struct si_context *sctx);
510 void si_save_qbo_state(struct si_context *sctx, struct si_qbo_state *st);
511 void si_set_occlusion_query_state(struct si_context *sctx,
512 bool old_perfect_enable);
513
514 /* si_state_binning.c */
515 void si_emit_dpbb_state(struct si_context *sctx);
516
517 /* si_state_shaders.c */
518 void *si_get_ir_binary(struct si_shader_selector *sel);
519 bool si_shader_cache_load_shader(struct si_screen *sscreen, void *ir_binary,
520 struct si_shader *shader);
521 bool si_shader_cache_insert_shader(struct si_screen *sscreen, void *ir_binary,
522 struct si_shader *shader,
523 bool insert_into_disk_cache);
524 bool si_update_shaders(struct si_context *sctx);
525 void si_init_shader_functions(struct si_context *sctx);
526 bool si_init_shader_cache(struct si_screen *sscreen);
527 void si_destroy_shader_cache(struct si_screen *sscreen);
528 void si_schedule_initial_compile(struct si_context *sctx, unsigned processor,
529 struct util_queue_fence *ready_fence,
530 struct si_compiler_ctx_state *compiler_ctx_state,
531 void *job, util_queue_execute_func execute);
532 void si_get_active_slot_masks(const struct tgsi_shader_info *info,
533 uint32_t *const_and_shader_buffers,
534 uint64_t *samplers_and_images);
535
536 /* si_state_draw.c */
537 void si_init_ia_multi_vgt_param_table(struct si_context *sctx);
538 void si_emit_cache_flush(struct si_context *sctx);
539 void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo);
540 void si_draw_rectangle(struct blitter_context *blitter,
541 void *vertex_elements_cso,
542 blitter_get_vs_func get_vs,
543 int x1, int y1, int x2, int y2,
544 float depth, unsigned num_instances,
545 enum blitter_attrib_type type,
546 const union blitter_attrib *attrib);
547 void si_trace_emit(struct si_context *sctx);
548
549 /* si_state_msaa.c */
550 void si_init_msaa_functions(struct si_context *sctx);
551 void si_emit_sample_locations(struct radeon_cmdbuf *cs, int nr_samples);
552
553 /* si_state_streamout.c */
554 void si_streamout_buffers_dirty(struct si_context *sctx);
555 void si_emit_streamout_end(struct si_context *sctx);
556 void si_update_prims_generated_query_state(struct si_context *sctx,
557 unsigned type, int diff);
558 void si_init_streamout_functions(struct si_context *sctx);
559
560
561 static inline unsigned si_get_constbuf_slot(unsigned slot)
562 {
563 /* Constant buffers are in slots [16..31], ascending */
564 return SI_NUM_SHADER_BUFFERS + slot;
565 }
566
567 static inline unsigned si_get_shaderbuf_slot(unsigned slot)
568 {
569 /* shader buffers are in slots [15..0], descending */
570 return SI_NUM_SHADER_BUFFERS - 1 - slot;
571 }
572
573 static inline unsigned si_get_sampler_slot(unsigned slot)
574 {
575 /* samplers are in slots [8..39], ascending */
576 return SI_NUM_IMAGES / 2 + slot;
577 }
578
579 static inline unsigned si_get_image_slot(unsigned slot)
580 {
581 /* images are in slots [15..0] (sampler slots [7..0]), descending */
582 return SI_NUM_IMAGES - 1 - slot;
583 }
584
585 #endif