radeonsi/gfx10: set user data base registers
[mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 /* Resource binding slots and sampler states (each described with 8 or
26 * 4 dwords) are stored in lists in memory which is accessed by shaders
27 * using scalar load instructions.
28 *
29 * This file is responsible for managing such lists. It keeps a copy of all
30 * descriptors in CPU memory and re-uploads a whole list if some slots have
31 * been changed.
32 *
33 * This code is also reponsible for updating shader pointers to those lists.
34 *
35 * Note that CP DMA can't be used for updating the lists, because a GPU hang
36 * could leave the list in a mid-IB state and the next IB would get wrong
37 * descriptors and the whole context would be unusable at that point.
38 * (Note: The register shadowing can't be used due to the same reason)
39 *
40 * Also, uploading descriptors to newly allocated memory doesn't require
41 * a KCACHE flush.
42 *
43 *
44 * Possible scenarios for one 16 dword image+sampler slot:
45 *
46 * | Image | w/ FMASK | Buffer | NULL
47 * [ 0: 3] Image[0:3] | Image[0:3] | Null[0:3] | Null[0:3]
48 * [ 4: 7] Image[4:7] | Image[4:7] | Buffer[0:3] | 0
49 * [ 8:11] Null[0:3] | Fmask[0:3] | Null[0:3] | Null[0:3]
50 * [12:15] Sampler[0:3] | Fmask[4:7] | Sampler[0:3] | Sampler[0:3]
51 *
52 * FMASK implies MSAA, therefore no sampler state.
53 * Sampler states are never unbound except when FMASK is bound.
54 */
55
56 #include "si_pipe.h"
57 #include "sid.h"
58
59 #include "util/hash_table.h"
60 #include "util/u_idalloc.h"
61 #include "util/u_format.h"
62 #include "util/u_memory.h"
63 #include "util/u_upload_mgr.h"
64
65
66 /* NULL image and buffer descriptor for textures (alpha = 1) and images
67 * (alpha = 0).
68 *
69 * For images, all fields must be zero except for the swizzle, which
70 * supports arbitrary combinations of 0s and 1s. The texture type must be
71 * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
72 *
73 * For buffers, all fields must be zero. If they are not, the hw hangs.
74 *
75 * This is the only reason why the buffer descriptor must be in words [4:7].
76 */
77 static uint32_t null_texture_descriptor[8] = {
78 0,
79 0,
80 0,
81 S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1) |
82 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
83 /* the rest must contain zeros, which is also used by the buffer
84 * descriptor */
85 };
86
87 static uint32_t null_image_descriptor[8] = {
88 0,
89 0,
90 0,
91 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
92 /* the rest must contain zeros, which is also used by the buffer
93 * descriptor */
94 };
95
96 static uint64_t si_desc_extract_buffer_address(const uint32_t *desc)
97 {
98 uint64_t va = desc[0] |
99 ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
100
101 /* Sign-extend the 48-bit address. */
102 va <<= 16;
103 va = (int64_t)va >> 16;
104 return va;
105 }
106
107 static void si_init_descriptor_list(uint32_t *desc_list,
108 unsigned element_dw_size,
109 unsigned num_elements,
110 const uint32_t *null_descriptor)
111 {
112 int i;
113
114 /* Initialize the array to NULL descriptors if the element size is 8. */
115 if (null_descriptor) {
116 assert(element_dw_size % 8 == 0);
117 for (i = 0; i < num_elements * element_dw_size / 8; i++)
118 memcpy(desc_list + i * 8, null_descriptor, 8 * 4);
119 }
120 }
121
122 static void si_init_descriptors(struct si_descriptors *desc,
123 short shader_userdata_rel_index,
124 unsigned element_dw_size,
125 unsigned num_elements)
126 {
127 desc->list = CALLOC(num_elements, element_dw_size * 4);
128 desc->element_dw_size = element_dw_size;
129 desc->num_elements = num_elements;
130 desc->shader_userdata_offset = shader_userdata_rel_index * 4;
131 desc->slot_index_to_bind_directly = -1;
132 }
133
134 static void si_release_descriptors(struct si_descriptors *desc)
135 {
136 si_resource_reference(&desc->buffer, NULL);
137 FREE(desc->list);
138 }
139
140 static bool si_upload_descriptors(struct si_context *sctx,
141 struct si_descriptors *desc)
142 {
143 unsigned slot_size = desc->element_dw_size * 4;
144 unsigned first_slot_offset = desc->first_active_slot * slot_size;
145 unsigned upload_size = desc->num_active_slots * slot_size;
146
147 /* Skip the upload if no shader is using the descriptors. dirty_mask
148 * will stay dirty and the descriptors will be uploaded when there is
149 * a shader using them.
150 */
151 if (!upload_size)
152 return true;
153
154 /* If there is just one active descriptor, bind it directly. */
155 if ((int)desc->first_active_slot == desc->slot_index_to_bind_directly &&
156 desc->num_active_slots == 1) {
157 uint32_t *descriptor = &desc->list[desc->slot_index_to_bind_directly *
158 desc->element_dw_size];
159
160 /* The buffer is already in the buffer list. */
161 si_resource_reference(&desc->buffer, NULL);
162 desc->gpu_list = NULL;
163 desc->gpu_address = si_desc_extract_buffer_address(descriptor);
164 si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
165 return true;
166 }
167
168 uint32_t *ptr;
169 unsigned buffer_offset;
170 u_upload_alloc(sctx->b.const_uploader, first_slot_offset, upload_size,
171 si_optimal_tcc_alignment(sctx, upload_size),
172 &buffer_offset, (struct pipe_resource**)&desc->buffer,
173 (void**)&ptr);
174 if (!desc->buffer) {
175 desc->gpu_address = 0;
176 return false; /* skip the draw call */
177 }
178
179 util_memcpy_cpu_to_le32(ptr, (char*)desc->list + first_slot_offset,
180 upload_size);
181 desc->gpu_list = ptr - first_slot_offset / 4;
182
183 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, desc->buffer,
184 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
185
186 /* The shader pointer should point to slot 0. */
187 buffer_offset -= first_slot_offset;
188 desc->gpu_address = desc->buffer->gpu_address + buffer_offset;
189
190 assert(desc->buffer->flags & RADEON_FLAG_32BIT);
191 assert((desc->buffer->gpu_address >> 32) == sctx->screen->info.address32_hi);
192 assert((desc->gpu_address >> 32) == sctx->screen->info.address32_hi);
193
194 si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
195 return true;
196 }
197
198 static void
199 si_descriptors_begin_new_cs(struct si_context *sctx, struct si_descriptors *desc)
200 {
201 if (!desc->buffer)
202 return;
203
204 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, desc->buffer,
205 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
206 }
207
208 /* SAMPLER VIEWS */
209
210 static inline enum radeon_bo_priority
211 si_get_sampler_view_priority(struct si_resource *res)
212 {
213 if (res->b.b.target == PIPE_BUFFER)
214 return RADEON_PRIO_SAMPLER_BUFFER;
215
216 if (res->b.b.nr_samples > 1)
217 return RADEON_PRIO_SAMPLER_TEXTURE_MSAA;
218
219 return RADEON_PRIO_SAMPLER_TEXTURE;
220 }
221
222 static struct si_descriptors *
223 si_sampler_and_image_descriptors(struct si_context *sctx, unsigned shader)
224 {
225 return &sctx->descriptors[si_sampler_and_image_descriptors_idx(shader)];
226 }
227
228 static void si_release_sampler_views(struct si_samplers *samplers)
229 {
230 int i;
231
232 for (i = 0; i < ARRAY_SIZE(samplers->views); i++) {
233 pipe_sampler_view_reference(&samplers->views[i], NULL);
234 }
235 }
236
237 static void si_sampler_view_add_buffer(struct si_context *sctx,
238 struct pipe_resource *resource,
239 enum radeon_bo_usage usage,
240 bool is_stencil_sampler,
241 bool check_mem)
242 {
243 struct si_texture *tex = (struct si_texture*)resource;
244 enum radeon_bo_priority priority;
245
246 if (!resource)
247 return;
248
249 /* Use the flushed depth texture if direct sampling is unsupported. */
250 if (resource->target != PIPE_BUFFER &&
251 tex->is_depth && !si_can_sample_zs(tex, is_stencil_sampler))
252 tex = tex->flushed_depth_texture;
253
254 priority = si_get_sampler_view_priority(&tex->buffer);
255 radeon_add_to_gfx_buffer_list_check_mem(sctx, &tex->buffer, usage, priority,
256 check_mem);
257
258 if (resource->target == PIPE_BUFFER)
259 return;
260
261 /* Add separate DCC. */
262 if (tex->dcc_separate_buffer) {
263 radeon_add_to_gfx_buffer_list_check_mem(sctx, tex->dcc_separate_buffer,
264 usage, RADEON_PRIO_SEPARATE_META, check_mem);
265 }
266 }
267
268 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
269 struct si_samplers *samplers)
270 {
271 unsigned mask = samplers->enabled_mask;
272
273 /* Add buffers to the CS. */
274 while (mask) {
275 int i = u_bit_scan(&mask);
276 struct si_sampler_view *sview = (struct si_sampler_view *)samplers->views[i];
277
278 si_sampler_view_add_buffer(sctx, sview->base.texture,
279 RADEON_USAGE_READ,
280 sview->is_stencil_sampler, false);
281 }
282 }
283
284 /* Set buffer descriptor fields that can be changed by reallocations. */
285 static void si_set_buf_desc_address(struct si_resource *buf,
286 uint64_t offset, uint32_t *state)
287 {
288 uint64_t va = buf->gpu_address + offset;
289
290 state[0] = va;
291 state[1] &= C_008F04_BASE_ADDRESS_HI;
292 state[1] |= S_008F04_BASE_ADDRESS_HI(va >> 32);
293 }
294
295 /* Set texture descriptor fields that can be changed by reallocations.
296 *
297 * \param tex texture
298 * \param base_level_info information of the level of BASE_ADDRESS
299 * \param base_level the level of BASE_ADDRESS
300 * \param first_level pipe_sampler_view.u.tex.first_level
301 * \param block_width util_format_get_blockwidth()
302 * \param is_stencil select between separate Z & Stencil
303 * \param state descriptor to update
304 */
305 void si_set_mutable_tex_desc_fields(struct si_screen *sscreen,
306 struct si_texture *tex,
307 const struct legacy_surf_level *base_level_info,
308 unsigned base_level, unsigned first_level,
309 unsigned block_width, bool is_stencil,
310 uint32_t *state)
311 {
312 uint64_t va, meta_va = 0;
313
314 if (tex->is_depth && !si_can_sample_zs(tex, is_stencil)) {
315 tex = tex->flushed_depth_texture;
316 is_stencil = false;
317 }
318
319 va = tex->buffer.gpu_address;
320
321 if (sscreen->info.chip_class >= GFX9) {
322 /* Only stencil_offset needs to be added here. */
323 if (is_stencil)
324 va += tex->surface.u.gfx9.stencil_offset;
325 else
326 va += tex->surface.u.gfx9.surf_offset;
327 } else {
328 va += base_level_info->offset;
329 }
330
331 state[0] = va >> 8;
332 state[1] &= C_008F14_BASE_ADDRESS_HI;
333 state[1] |= S_008F14_BASE_ADDRESS_HI(va >> 40);
334
335 /* Only macrotiled modes can set tile swizzle.
336 * GFX9 doesn't use (legacy) base_level_info.
337 */
338 if (sscreen->info.chip_class >= GFX9 ||
339 base_level_info->mode == RADEON_SURF_MODE_2D)
340 state[0] |= tex->surface.tile_swizzle;
341
342 if (sscreen->info.chip_class >= GFX8) {
343 state[6] &= C_008F28_COMPRESSION_EN;
344
345 if (vi_dcc_enabled(tex, first_level)) {
346 meta_va = (!tex->dcc_separate_buffer ? tex->buffer.gpu_address : 0) +
347 tex->dcc_offset;
348
349 if (sscreen->info.chip_class == GFX8) {
350 meta_va += base_level_info->dcc_offset;
351 assert(base_level_info->mode == RADEON_SURF_MODE_2D);
352 }
353
354 meta_va |= (uint32_t)tex->surface.tile_swizzle << 8;
355 } else if (vi_tc_compat_htile_enabled(tex, first_level)) {
356 meta_va = tex->buffer.gpu_address + tex->htile_offset;
357 }
358
359 if (meta_va)
360 state[6] |= S_008F28_COMPRESSION_EN(1);
361 }
362
363 if (sscreen->info.chip_class >= GFX8 && sscreen->info.chip_class <= GFX9)
364 state[7] = meta_va >> 8;
365
366 if (sscreen->info.chip_class >= GFX10) {
367 state[3] &= C_00A00C_SW_MODE;
368
369 if (is_stencil) {
370 state[3] |= S_00A00C_SW_MODE(tex->surface.u.gfx9.stencil.swizzle_mode);
371 } else {
372 state[3] |= S_00A00C_SW_MODE(tex->surface.u.gfx9.surf.swizzle_mode);
373 }
374
375 state[6] &= C_00A018_META_DATA_ADDRESS_LO &
376 C_00A018_META_PIPE_ALIGNED;
377
378 if (meta_va) {
379 struct gfx9_surf_meta_flags meta;
380
381 if (tex->dcc_offset)
382 meta = tex->surface.u.gfx9.dcc;
383 else
384 meta = tex->surface.u.gfx9.htile;
385
386 state[6] |= S_00A018_META_PIPE_ALIGNED(meta.pipe_aligned) |
387 S_00A018_META_DATA_ADDRESS_LO(meta_va >> 8);
388 }
389
390 state[7] = meta_va >> 16;
391 } else if (sscreen->info.chip_class >= GFX9) {
392 state[3] &= C_008F1C_SW_MODE;
393 state[4] &= C_008F20_PITCH;
394
395 if (is_stencil) {
396 state[3] |= S_008F1C_SW_MODE(tex->surface.u.gfx9.stencil.swizzle_mode);
397 state[4] |= S_008F20_PITCH(tex->surface.u.gfx9.stencil.epitch);
398 } else {
399 state[3] |= S_008F1C_SW_MODE(tex->surface.u.gfx9.surf.swizzle_mode);
400 state[4] |= S_008F20_PITCH(tex->surface.u.gfx9.surf.epitch);
401 }
402
403 state[5] &= C_008F24_META_DATA_ADDRESS &
404 C_008F24_META_PIPE_ALIGNED &
405 C_008F24_META_RB_ALIGNED;
406 if (meta_va) {
407 struct gfx9_surf_meta_flags meta;
408
409 if (tex->dcc_offset)
410 meta = tex->surface.u.gfx9.dcc;
411 else
412 meta = tex->surface.u.gfx9.htile;
413
414 state[5] |= S_008F24_META_DATA_ADDRESS(meta_va >> 40) |
415 S_008F24_META_PIPE_ALIGNED(meta.pipe_aligned) |
416 S_008F24_META_RB_ALIGNED(meta.rb_aligned);
417 }
418 } else {
419 /* GFX6-GFX8 */
420 unsigned pitch = base_level_info->nblk_x * block_width;
421 unsigned index = si_tile_mode_index(tex, base_level, is_stencil);
422
423 state[3] &= C_008F1C_TILING_INDEX;
424 state[3] |= S_008F1C_TILING_INDEX(index);
425 state[4] &= C_008F20_PITCH;
426 state[4] |= S_008F20_PITCH(pitch - 1);
427 }
428 }
429
430 static void si_set_sampler_state_desc(struct si_sampler_state *sstate,
431 struct si_sampler_view *sview,
432 struct si_texture *tex,
433 uint32_t *desc)
434 {
435 if (sview && sview->is_integer)
436 memcpy(desc, sstate->integer_val, 4*4);
437 else if (tex && tex->upgraded_depth &&
438 (!sview || !sview->is_stencil_sampler))
439 memcpy(desc, sstate->upgraded_depth_val, 4*4);
440 else
441 memcpy(desc, sstate->val, 4*4);
442 }
443
444 static void si_set_sampler_view_desc(struct si_context *sctx,
445 struct si_sampler_view *sview,
446 struct si_sampler_state *sstate,
447 uint32_t *desc)
448 {
449 struct pipe_sampler_view *view = &sview->base;
450 struct si_texture *tex = (struct si_texture *)view->texture;
451 bool is_buffer = tex->buffer.b.b.target == PIPE_BUFFER;
452
453 if (unlikely(!is_buffer && sview->dcc_incompatible)) {
454 if (vi_dcc_enabled(tex, view->u.tex.first_level))
455 if (!si_texture_disable_dcc(sctx, tex))
456 si_decompress_dcc(sctx, tex);
457
458 sview->dcc_incompatible = false;
459 }
460
461 assert(tex); /* views with texture == NULL aren't supported */
462 memcpy(desc, sview->state, 8*4);
463
464 if (is_buffer) {
465 si_set_buf_desc_address(&tex->buffer,
466 sview->base.u.buf.offset,
467 desc + 4);
468 } else {
469 bool is_separate_stencil = tex->db_compatible &&
470 sview->is_stencil_sampler;
471
472 si_set_mutable_tex_desc_fields(sctx->screen, tex,
473 sview->base_level_info,
474 sview->base_level,
475 sview->base.u.tex.first_level,
476 sview->block_width,
477 is_separate_stencil,
478 desc);
479 }
480
481 if (!is_buffer && tex->surface.fmask_size) {
482 memcpy(desc + 8, sview->fmask_state, 8*4);
483 } else {
484 /* Disable FMASK and bind sampler state in [12:15]. */
485 memcpy(desc + 8, null_texture_descriptor, 4*4);
486
487 if (sstate)
488 si_set_sampler_state_desc(sstate, sview,
489 is_buffer ? NULL : tex,
490 desc + 12);
491 }
492 }
493
494 static bool color_needs_decompression(struct si_texture *tex)
495 {
496 return tex->surface.fmask_size ||
497 (tex->dirty_level_mask &&
498 (tex->cmask_buffer || tex->dcc_offset));
499 }
500
501 static bool depth_needs_decompression(struct si_texture *tex)
502 {
503 /* If the depth/stencil texture is TC-compatible, no decompression
504 * will be done. The decompression function will only flush DB caches
505 * to make it coherent with shaders. That's necessary because the driver
506 * doesn't flush DB caches in any other case.
507 */
508 return tex->db_compatible;
509 }
510
511 static void si_set_sampler_view(struct si_context *sctx,
512 unsigned shader,
513 unsigned slot, struct pipe_sampler_view *view,
514 bool disallow_early_out)
515 {
516 struct si_samplers *samplers = &sctx->samplers[shader];
517 struct si_sampler_view *sview = (struct si_sampler_view*)view;
518 struct si_descriptors *descs = si_sampler_and_image_descriptors(sctx, shader);
519 unsigned desc_slot = si_get_sampler_slot(slot);
520 uint32_t *desc = descs->list + desc_slot * 16;
521
522 if (samplers->views[slot] == view && !disallow_early_out)
523 return;
524
525 if (view) {
526 struct si_texture *tex = (struct si_texture *)view->texture;
527
528 si_set_sampler_view_desc(sctx, sview,
529 samplers->sampler_states[slot], desc);
530
531 if (tex->buffer.b.b.target == PIPE_BUFFER) {
532 tex->buffer.bind_history |= PIPE_BIND_SAMPLER_VIEW;
533 samplers->needs_depth_decompress_mask &= ~(1u << slot);
534 samplers->needs_color_decompress_mask &= ~(1u << slot);
535 } else {
536 if (depth_needs_decompression(tex)) {
537 samplers->needs_depth_decompress_mask |= 1u << slot;
538 } else {
539 samplers->needs_depth_decompress_mask &= ~(1u << slot);
540 }
541 if (color_needs_decompression(tex)) {
542 samplers->needs_color_decompress_mask |= 1u << slot;
543 } else {
544 samplers->needs_color_decompress_mask &= ~(1u << slot);
545 }
546
547 if (tex->dcc_offset &&
548 p_atomic_read(&tex->framebuffers_bound))
549 sctx->need_check_render_feedback = true;
550 }
551
552 pipe_sampler_view_reference(&samplers->views[slot], view);
553 samplers->enabled_mask |= 1u << slot;
554
555 /* Since this can flush, it must be done after enabled_mask is
556 * updated. */
557 si_sampler_view_add_buffer(sctx, view->texture,
558 RADEON_USAGE_READ,
559 sview->is_stencil_sampler, true);
560 } else {
561 pipe_sampler_view_reference(&samplers->views[slot], NULL);
562 memcpy(desc, null_texture_descriptor, 8*4);
563 /* Only clear the lower dwords of FMASK. */
564 memcpy(desc + 8, null_texture_descriptor, 4*4);
565 /* Re-set the sampler state if we are transitioning from FMASK. */
566 if (samplers->sampler_states[slot])
567 si_set_sampler_state_desc(samplers->sampler_states[slot], NULL, NULL,
568 desc + 12);
569
570 samplers->enabled_mask &= ~(1u << slot);
571 samplers->needs_depth_decompress_mask &= ~(1u << slot);
572 samplers->needs_color_decompress_mask &= ~(1u << slot);
573 }
574
575 sctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
576 }
577
578 static void si_update_shader_needs_decompress_mask(struct si_context *sctx,
579 unsigned shader)
580 {
581 struct si_samplers *samplers = &sctx->samplers[shader];
582 unsigned shader_bit = 1 << shader;
583
584 if (samplers->needs_depth_decompress_mask ||
585 samplers->needs_color_decompress_mask ||
586 sctx->images[shader].needs_color_decompress_mask)
587 sctx->shader_needs_decompress_mask |= shader_bit;
588 else
589 sctx->shader_needs_decompress_mask &= ~shader_bit;
590 }
591
592 static void si_set_sampler_views(struct pipe_context *ctx,
593 enum pipe_shader_type shader, unsigned start,
594 unsigned count,
595 struct pipe_sampler_view **views)
596 {
597 struct si_context *sctx = (struct si_context *)ctx;
598 int i;
599
600 if (!count || shader >= SI_NUM_SHADERS)
601 return;
602
603 if (views) {
604 for (i = 0; i < count; i++)
605 si_set_sampler_view(sctx, shader, start + i, views[i], false);
606 } else {
607 for (i = 0; i < count; i++)
608 si_set_sampler_view(sctx, shader, start + i, NULL, false);
609 }
610
611 si_update_shader_needs_decompress_mask(sctx, shader);
612 }
613
614 static void
615 si_samplers_update_needs_color_decompress_mask(struct si_samplers *samplers)
616 {
617 unsigned mask = samplers->enabled_mask;
618
619 while (mask) {
620 int i = u_bit_scan(&mask);
621 struct pipe_resource *res = samplers->views[i]->texture;
622
623 if (res && res->target != PIPE_BUFFER) {
624 struct si_texture *tex = (struct si_texture *)res;
625
626 if (color_needs_decompression(tex)) {
627 samplers->needs_color_decompress_mask |= 1u << i;
628 } else {
629 samplers->needs_color_decompress_mask &= ~(1u << i);
630 }
631 }
632 }
633 }
634
635 /* IMAGE VIEWS */
636
637 static void
638 si_release_image_views(struct si_images *images)
639 {
640 unsigned i;
641
642 for (i = 0; i < SI_NUM_IMAGES; ++i) {
643 struct pipe_image_view *view = &images->views[i];
644
645 pipe_resource_reference(&view->resource, NULL);
646 }
647 }
648
649 static void
650 si_image_views_begin_new_cs(struct si_context *sctx, struct si_images *images)
651 {
652 uint mask = images->enabled_mask;
653
654 /* Add buffers to the CS. */
655 while (mask) {
656 int i = u_bit_scan(&mask);
657 struct pipe_image_view *view = &images->views[i];
658
659 assert(view->resource);
660
661 si_sampler_view_add_buffer(sctx, view->resource,
662 RADEON_USAGE_READWRITE, false, false);
663 }
664 }
665
666 static void
667 si_disable_shader_image(struct si_context *ctx, unsigned shader, unsigned slot)
668 {
669 struct si_images *images = &ctx->images[shader];
670
671 if (images->enabled_mask & (1u << slot)) {
672 struct si_descriptors *descs = si_sampler_and_image_descriptors(ctx, shader);
673 unsigned desc_slot = si_get_image_slot(slot);
674
675 pipe_resource_reference(&images->views[slot].resource, NULL);
676 images->needs_color_decompress_mask &= ~(1 << slot);
677
678 memcpy(descs->list + desc_slot*8, null_image_descriptor, 8*4);
679 images->enabled_mask &= ~(1u << slot);
680 ctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
681 }
682 }
683
684 static void
685 si_mark_image_range_valid(const struct pipe_image_view *view)
686 {
687 struct si_resource *res = si_resource(view->resource);
688
689 if (res->b.b.target != PIPE_BUFFER)
690 return;
691
692 util_range_add(&res->valid_buffer_range,
693 view->u.buf.offset,
694 view->u.buf.offset + view->u.buf.size);
695 }
696
697 static void si_set_shader_image_desc(struct si_context *ctx,
698 const struct pipe_image_view *view,
699 bool skip_decompress,
700 uint32_t *desc, uint32_t *fmask_desc)
701 {
702 struct si_screen *screen = ctx->screen;
703 struct si_resource *res;
704
705 res = si_resource(view->resource);
706
707 if (res->b.b.target == PIPE_BUFFER ||
708 view->shader_access & SI_IMAGE_ACCESS_AS_BUFFER) {
709 if (view->access & PIPE_IMAGE_ACCESS_WRITE)
710 si_mark_image_range_valid(view);
711
712 si_make_buffer_descriptor(screen, res,
713 view->format,
714 view->u.buf.offset,
715 view->u.buf.size, desc);
716 si_set_buf_desc_address(res, view->u.buf.offset, desc + 4);
717 } else {
718 static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
719 struct si_texture *tex = (struct si_texture *)res;
720 unsigned level = view->u.tex.level;
721 unsigned width, height, depth, hw_level;
722 bool uses_dcc = vi_dcc_enabled(tex, level);
723 unsigned access = view->access;
724
725 /* Clear the write flag when writes can't occur.
726 * Note that DCC_DECOMPRESS for MSAA doesn't work in some cases,
727 * so we don't wanna trigger it.
728 */
729 if (tex->is_depth ||
730 (!fmask_desc && tex->surface.fmask_size != 0)) {
731 assert(!"Z/S and MSAA image stores are not supported");
732 access &= ~PIPE_IMAGE_ACCESS_WRITE;
733 }
734
735 assert(!tex->is_depth);
736 assert(fmask_desc || tex->surface.fmask_size == 0);
737
738 if (uses_dcc && !skip_decompress &&
739 (view->access & PIPE_IMAGE_ACCESS_WRITE ||
740 !vi_dcc_formats_compatible(res->b.b.format, view->format))) {
741 /* If DCC can't be disabled, at least decompress it.
742 * The decompression is relatively cheap if the surface
743 * has been decompressed already.
744 */
745 if (!si_texture_disable_dcc(ctx, tex))
746 si_decompress_dcc(ctx, tex);
747 }
748
749 if (ctx->chip_class >= GFX9) {
750 /* Always set the base address. The swizzle modes don't
751 * allow setting mipmap level offsets as the base.
752 */
753 width = res->b.b.width0;
754 height = res->b.b.height0;
755 depth = res->b.b.depth0;
756 hw_level = level;
757 } else {
758 /* Always force the base level to the selected level.
759 *
760 * This is required for 3D textures, where otherwise
761 * selecting a single slice for non-layered bindings
762 * fails. It doesn't hurt the other targets.
763 */
764 width = u_minify(res->b.b.width0, level);
765 height = u_minify(res->b.b.height0, level);
766 depth = u_minify(res->b.b.depth0, level);
767 hw_level = 0;
768 }
769
770 screen->make_texture_descriptor(screen, tex,
771 false, res->b.b.target,
772 view->format, swizzle,
773 hw_level, hw_level,
774 view->u.tex.first_layer,
775 view->u.tex.last_layer,
776 width, height, depth,
777 desc, fmask_desc);
778 si_set_mutable_tex_desc_fields(screen, tex,
779 &tex->surface.u.legacy.level[level],
780 level, level,
781 util_format_get_blockwidth(view->format),
782 false, desc);
783 }
784 }
785
786 static void si_set_shader_image(struct si_context *ctx,
787 unsigned shader,
788 unsigned slot, const struct pipe_image_view *view,
789 bool skip_decompress)
790 {
791 struct si_images *images = &ctx->images[shader];
792 struct si_descriptors *descs = si_sampler_and_image_descriptors(ctx, shader);
793 struct si_resource *res;
794 unsigned desc_slot = si_get_image_slot(slot);
795 uint32_t *desc = descs->list + desc_slot * 8;
796
797 if (!view || !view->resource) {
798 si_disable_shader_image(ctx, shader, slot);
799 return;
800 }
801
802 res = si_resource(view->resource);
803
804 if (&images->views[slot] != view)
805 util_copy_image_view(&images->views[slot], view);
806
807 si_set_shader_image_desc(ctx, view, skip_decompress, desc, NULL);
808
809 if (res->b.b.target == PIPE_BUFFER ||
810 view->shader_access & SI_IMAGE_ACCESS_AS_BUFFER) {
811 images->needs_color_decompress_mask &= ~(1 << slot);
812 res->bind_history |= PIPE_BIND_SHADER_IMAGE;
813 } else {
814 struct si_texture *tex = (struct si_texture *)res;
815 unsigned level = view->u.tex.level;
816
817 if (color_needs_decompression(tex)) {
818 images->needs_color_decompress_mask |= 1 << slot;
819 } else {
820 images->needs_color_decompress_mask &= ~(1 << slot);
821 }
822
823 if (vi_dcc_enabled(tex, level) &&
824 p_atomic_read(&tex->framebuffers_bound))
825 ctx->need_check_render_feedback = true;
826 }
827
828 images->enabled_mask |= 1u << slot;
829 ctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
830
831 /* Since this can flush, it must be done after enabled_mask is updated. */
832 si_sampler_view_add_buffer(ctx, &res->b.b,
833 (view->access & PIPE_IMAGE_ACCESS_WRITE) ?
834 RADEON_USAGE_READWRITE : RADEON_USAGE_READ,
835 false, true);
836 }
837
838 static void
839 si_set_shader_images(struct pipe_context *pipe,
840 enum pipe_shader_type shader,
841 unsigned start_slot, unsigned count,
842 const struct pipe_image_view *views)
843 {
844 struct si_context *ctx = (struct si_context *)pipe;
845 unsigned i, slot;
846
847 assert(shader < SI_NUM_SHADERS);
848
849 if (!count)
850 return;
851
852 assert(start_slot + count <= SI_NUM_IMAGES);
853
854 if (views) {
855 for (i = 0, slot = start_slot; i < count; ++i, ++slot)
856 si_set_shader_image(ctx, shader, slot, &views[i], false);
857 } else {
858 for (i = 0, slot = start_slot; i < count; ++i, ++slot)
859 si_set_shader_image(ctx, shader, slot, NULL, false);
860 }
861
862 si_update_shader_needs_decompress_mask(ctx, shader);
863 }
864
865 static void
866 si_images_update_needs_color_decompress_mask(struct si_images *images)
867 {
868 unsigned mask = images->enabled_mask;
869
870 while (mask) {
871 int i = u_bit_scan(&mask);
872 struct pipe_resource *res = images->views[i].resource;
873
874 if (res && res->target != PIPE_BUFFER) {
875 struct si_texture *tex = (struct si_texture *)res;
876
877 if (color_needs_decompression(tex)) {
878 images->needs_color_decompress_mask |= 1 << i;
879 } else {
880 images->needs_color_decompress_mask &= ~(1 << i);
881 }
882 }
883 }
884 }
885
886 void si_update_ps_colorbuf0_slot(struct si_context *sctx)
887 {
888 struct si_buffer_resources *buffers = &sctx->rw_buffers;
889 struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
890 unsigned slot = SI_PS_IMAGE_COLORBUF0;
891 struct pipe_surface *surf = NULL;
892
893 /* si_texture_disable_dcc can get us here again. */
894 if (sctx->blitter->running)
895 return;
896
897 /* See whether FBFETCH is used and color buffer 0 is set. */
898 if (sctx->ps_shader.cso &&
899 sctx->ps_shader.cso->info.opcode_count[TGSI_OPCODE_FBFETCH] &&
900 sctx->framebuffer.state.nr_cbufs &&
901 sctx->framebuffer.state.cbufs[0])
902 surf = sctx->framebuffer.state.cbufs[0];
903
904 /* Return if FBFETCH transitions from disabled to disabled. */
905 if (!buffers->buffers[slot] && !surf)
906 return;
907
908 sctx->ps_uses_fbfetch = surf != NULL;
909 si_update_ps_iter_samples(sctx);
910
911 if (surf) {
912 struct si_texture *tex = (struct si_texture*)surf->texture;
913 struct pipe_image_view view;
914
915 assert(tex);
916 assert(!tex->is_depth);
917
918 /* Disable DCC, because the texture is used as both a sampler
919 * and color buffer.
920 */
921 si_texture_disable_dcc(sctx, tex);
922
923 if (tex->buffer.b.b.nr_samples <= 1 && tex->cmask_buffer) {
924 /* Disable CMASK. */
925 assert(tex->cmask_buffer != &tex->buffer);
926 si_eliminate_fast_color_clear(sctx, tex);
927 si_texture_discard_cmask(sctx->screen, tex);
928 }
929
930 view.resource = surf->texture;
931 view.format = surf->format;
932 view.access = PIPE_IMAGE_ACCESS_READ;
933 view.u.tex.first_layer = surf->u.tex.first_layer;
934 view.u.tex.last_layer = surf->u.tex.last_layer;
935 view.u.tex.level = surf->u.tex.level;
936
937 /* Set the descriptor. */
938 uint32_t *desc = descs->list + slot*4;
939 memset(desc, 0, 16 * 4);
940 si_set_shader_image_desc(sctx, &view, true, desc, desc + 8);
941
942 pipe_resource_reference(&buffers->buffers[slot], &tex->buffer.b.b);
943 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
944 &tex->buffer, RADEON_USAGE_READ,
945 RADEON_PRIO_SHADER_RW_IMAGE);
946 buffers->enabled_mask |= 1u << slot;
947 } else {
948 /* Clear the descriptor. */
949 memset(descs->list + slot*4, 0, 8*4);
950 pipe_resource_reference(&buffers->buffers[slot], NULL);
951 buffers->enabled_mask &= ~(1u << slot);
952 }
953
954 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
955 }
956
957 /* SAMPLER STATES */
958
959 static void si_bind_sampler_states(struct pipe_context *ctx,
960 enum pipe_shader_type shader,
961 unsigned start, unsigned count, void **states)
962 {
963 struct si_context *sctx = (struct si_context *)ctx;
964 struct si_samplers *samplers = &sctx->samplers[shader];
965 struct si_descriptors *desc = si_sampler_and_image_descriptors(sctx, shader);
966 struct si_sampler_state **sstates = (struct si_sampler_state**)states;
967 int i;
968
969 if (!count || shader >= SI_NUM_SHADERS || !sstates)
970 return;
971
972 for (i = 0; i < count; i++) {
973 unsigned slot = start + i;
974 unsigned desc_slot = si_get_sampler_slot(slot);
975
976 if (!sstates[i] ||
977 sstates[i] == samplers->sampler_states[slot])
978 continue;
979
980 #ifndef NDEBUG
981 assert(sstates[i]->magic == SI_SAMPLER_STATE_MAGIC);
982 #endif
983 samplers->sampler_states[slot] = sstates[i];
984
985 /* If FMASK is bound, don't overwrite it.
986 * The sampler state will be set after FMASK is unbound.
987 */
988 struct si_sampler_view *sview =
989 (struct si_sampler_view *)samplers->views[slot];
990
991 struct si_texture *tex = NULL;
992
993 if (sview && sview->base.texture &&
994 sview->base.texture->target != PIPE_BUFFER)
995 tex = (struct si_texture *)sview->base.texture;
996
997 if (tex && tex->surface.fmask_size)
998 continue;
999
1000 si_set_sampler_state_desc(sstates[i], sview, tex,
1001 desc->list + desc_slot * 16 + 12);
1002
1003 sctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
1004 }
1005 }
1006
1007 /* BUFFER RESOURCES */
1008
1009 static void si_init_buffer_resources(struct si_buffer_resources *buffers,
1010 struct si_descriptors *descs,
1011 unsigned num_buffers,
1012 short shader_userdata_rel_index,
1013 enum radeon_bo_priority priority,
1014 enum radeon_bo_priority priority_constbuf)
1015 {
1016 buffers->priority = priority;
1017 buffers->priority_constbuf = priority_constbuf;
1018 buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
1019 buffers->offsets = CALLOC(num_buffers, sizeof(buffers->offsets[0]));
1020
1021 si_init_descriptors(descs, shader_userdata_rel_index, 4, num_buffers);
1022 }
1023
1024 static void si_release_buffer_resources(struct si_buffer_resources *buffers,
1025 struct si_descriptors *descs)
1026 {
1027 int i;
1028
1029 for (i = 0; i < descs->num_elements; i++) {
1030 pipe_resource_reference(&buffers->buffers[i], NULL);
1031 }
1032
1033 FREE(buffers->buffers);
1034 FREE(buffers->offsets);
1035 }
1036
1037 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
1038 struct si_buffer_resources *buffers)
1039 {
1040 unsigned mask = buffers->enabled_mask;
1041
1042 /* Add buffers to the CS. */
1043 while (mask) {
1044 int i = u_bit_scan(&mask);
1045
1046 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
1047 si_resource(buffers->buffers[i]),
1048 buffers->writable_mask & (1u << i) ? RADEON_USAGE_READWRITE :
1049 RADEON_USAGE_READ,
1050 i < SI_NUM_SHADER_BUFFERS ? buffers->priority :
1051 buffers->priority_constbuf);
1052 }
1053 }
1054
1055 static void si_get_buffer_from_descriptors(struct si_buffer_resources *buffers,
1056 struct si_descriptors *descs,
1057 unsigned idx, struct pipe_resource **buf,
1058 unsigned *offset, unsigned *size)
1059 {
1060 pipe_resource_reference(buf, buffers->buffers[idx]);
1061 if (*buf) {
1062 struct si_resource *res = si_resource(*buf);
1063 const uint32_t *desc = descs->list + idx * 4;
1064 uint64_t va;
1065
1066 *size = desc[2];
1067
1068 assert(G_008F04_STRIDE(desc[1]) == 0);
1069 va = si_desc_extract_buffer_address(desc);
1070
1071 assert(va >= res->gpu_address && va + *size <= res->gpu_address + res->bo_size);
1072 *offset = va - res->gpu_address;
1073 }
1074 }
1075
1076 /* VERTEX BUFFERS */
1077
1078 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
1079 {
1080 int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
1081 int i;
1082
1083 for (i = 0; i < count; i++) {
1084 int vb = sctx->vertex_elements->vertex_buffer_index[i];
1085
1086 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
1087 continue;
1088 if (!sctx->vertex_buffer[vb].buffer.resource)
1089 continue;
1090
1091 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
1092 si_resource(sctx->vertex_buffer[vb].buffer.resource),
1093 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
1094 }
1095
1096 if (!sctx->vb_descriptors_buffer)
1097 return;
1098 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
1099 sctx->vb_descriptors_buffer, RADEON_USAGE_READ,
1100 RADEON_PRIO_DESCRIPTORS);
1101 }
1102
1103 bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
1104 {
1105 struct si_vertex_elements *velems = sctx->vertex_elements;
1106 unsigned i, count;
1107 unsigned desc_list_byte_size;
1108 unsigned first_vb_use_mask;
1109 uint32_t *ptr;
1110
1111 if (!sctx->vertex_buffers_dirty || !velems)
1112 return true;
1113
1114 count = velems->count;
1115
1116 if (!count)
1117 return true;
1118
1119 desc_list_byte_size = velems->desc_list_byte_size;
1120 first_vb_use_mask = velems->first_vb_use_mask;
1121
1122 /* Vertex buffer descriptors are the only ones which are uploaded
1123 * directly through a staging buffer and don't go through
1124 * the fine-grained upload path.
1125 */
1126 u_upload_alloc(sctx->b.const_uploader, 0,
1127 desc_list_byte_size,
1128 si_optimal_tcc_alignment(sctx, desc_list_byte_size),
1129 &sctx->vb_descriptors_offset,
1130 (struct pipe_resource**)&sctx->vb_descriptors_buffer,
1131 (void**)&ptr);
1132 if (!sctx->vb_descriptors_buffer) {
1133 sctx->vb_descriptors_offset = 0;
1134 sctx->vb_descriptors_gpu_list = NULL;
1135 return false;
1136 }
1137
1138 sctx->vb_descriptors_gpu_list = ptr;
1139 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
1140 sctx->vb_descriptors_buffer, RADEON_USAGE_READ,
1141 RADEON_PRIO_DESCRIPTORS);
1142
1143 assert(count <= SI_MAX_ATTRIBS);
1144
1145 for (i = 0; i < count; i++) {
1146 struct pipe_vertex_buffer *vb;
1147 struct si_resource *buf;
1148 unsigned vbo_index = velems->vertex_buffer_index[i];
1149 uint32_t *desc = &ptr[i*4];
1150
1151 vb = &sctx->vertex_buffer[vbo_index];
1152 buf = si_resource(vb->buffer.resource);
1153 if (!buf) {
1154 memset(desc, 0, 16);
1155 continue;
1156 }
1157
1158 int64_t offset = (int64_t)((int)vb->buffer_offset) +
1159 velems->src_offset[i];
1160 uint64_t va = buf->gpu_address + offset;
1161
1162 int64_t num_records = (int64_t)buf->b.b.width0 - offset;
1163 if (sctx->chip_class != GFX8 && vb->stride) {
1164 /* Round up by rounding down and adding 1 */
1165 num_records = (num_records - velems->format_size[i]) /
1166 vb->stride + 1;
1167 }
1168 assert(num_records >= 0 && num_records <= UINT_MAX);
1169
1170 uint32_t rsrc_word3 = velems->rsrc_word3[i];
1171
1172 /* OOB_SELECT chooses the out-of-bounds check:
1173 * - 1: index >= NUM_RECORDS (Structured)
1174 * - 3: offset >= NUM_RECORDS (Raw)
1175 */
1176 if (sctx->chip_class >= GFX10)
1177 rsrc_word3 |= S_008F0C_OOB_SELECT(vb->stride ? 1 : 3);
1178
1179 desc[0] = va;
1180 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1181 S_008F04_STRIDE(vb->stride);
1182 desc[2] = num_records;
1183 desc[3] = rsrc_word3;
1184
1185 if (first_vb_use_mask & (1 << i)) {
1186 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
1187 si_resource(vb->buffer.resource),
1188 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
1189 }
1190 }
1191
1192 /* Don't flush the const cache. It would have a very negative effect
1193 * on performance (confirmed by testing). New descriptors are always
1194 * uploaded to a fresh new buffer, so I don't think flushing the const
1195 * cache is needed. */
1196 si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
1197 sctx->vertex_buffers_dirty = false;
1198 sctx->vertex_buffer_pointer_dirty = true;
1199 sctx->prefetch_L2_mask |= SI_PREFETCH_VBO_DESCRIPTORS;
1200 return true;
1201 }
1202
1203
1204 /* CONSTANT BUFFERS */
1205
1206 static struct si_descriptors *
1207 si_const_and_shader_buffer_descriptors(struct si_context *sctx, unsigned shader)
1208 {
1209 return &sctx->descriptors[si_const_and_shader_buffer_descriptors_idx(shader)];
1210 }
1211
1212 void si_upload_const_buffer(struct si_context *sctx, struct si_resource **buf,
1213 const uint8_t *ptr, unsigned size, uint32_t *const_offset)
1214 {
1215 void *tmp;
1216
1217 u_upload_alloc(sctx->b.const_uploader, 0, size,
1218 si_optimal_tcc_alignment(sctx, size),
1219 const_offset,
1220 (struct pipe_resource**)buf, &tmp);
1221 if (*buf)
1222 util_memcpy_cpu_to_le32(tmp, ptr, size);
1223 }
1224
1225 static void si_set_constant_buffer(struct si_context *sctx,
1226 struct si_buffer_resources *buffers,
1227 unsigned descriptors_idx,
1228 uint slot, const struct pipe_constant_buffer *input)
1229 {
1230 struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
1231 assert(slot < descs->num_elements);
1232 pipe_resource_reference(&buffers->buffers[slot], NULL);
1233
1234 /* GFX7 cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
1235 * with a NULL buffer). We need to use a dummy buffer instead. */
1236 if (sctx->chip_class == GFX7 &&
1237 (!input || (!input->buffer && !input->user_buffer)))
1238 input = &sctx->null_const_buf;
1239
1240 if (input && (input->buffer || input->user_buffer)) {
1241 struct pipe_resource *buffer = NULL;
1242 uint64_t va;
1243 unsigned buffer_offset;
1244
1245 /* Upload the user buffer if needed. */
1246 if (input->user_buffer) {
1247 si_upload_const_buffer(sctx,
1248 (struct si_resource**)&buffer, input->user_buffer,
1249 input->buffer_size, &buffer_offset);
1250 if (!buffer) {
1251 /* Just unbind on failure. */
1252 si_set_constant_buffer(sctx, buffers, descriptors_idx, slot, NULL);
1253 return;
1254 }
1255 } else {
1256 pipe_resource_reference(&buffer, input->buffer);
1257 buffer_offset = input->buffer_offset;
1258 }
1259
1260 va = si_resource(buffer)->gpu_address + buffer_offset;
1261
1262 /* Set the descriptor. */
1263 uint32_t *desc = descs->list + slot*4;
1264 desc[0] = va;
1265 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1266 S_008F04_STRIDE(0);
1267 desc[2] = input->buffer_size;
1268 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1269 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1270 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1271 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
1272
1273 if (sctx->chip_class >= GFX10) {
1274 desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
1275 S_008F0C_OOB_SELECT(3) |
1276 S_008F0C_RESOURCE_LEVEL(1);
1277 } else {
1278 desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1279 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1280 }
1281
1282 buffers->buffers[slot] = buffer;
1283 buffers->offsets[slot] = buffer_offset;
1284 radeon_add_to_gfx_buffer_list_check_mem(sctx,
1285 si_resource(buffer),
1286 RADEON_USAGE_READ,
1287 buffers->priority_constbuf, true);
1288 buffers->enabled_mask |= 1u << slot;
1289 } else {
1290 /* Clear the descriptor. */
1291 memset(descs->list + slot*4, 0, sizeof(uint32_t) * 4);
1292 buffers->enabled_mask &= ~(1u << slot);
1293 }
1294
1295 sctx->descriptors_dirty |= 1u << descriptors_idx;
1296 }
1297
1298 static void si_pipe_set_constant_buffer(struct pipe_context *ctx,
1299 enum pipe_shader_type shader, uint slot,
1300 const struct pipe_constant_buffer *input)
1301 {
1302 struct si_context *sctx = (struct si_context *)ctx;
1303
1304 if (shader >= SI_NUM_SHADERS)
1305 return;
1306
1307 if (slot == 0 && input && input->buffer &&
1308 !(si_resource(input->buffer)->flags & RADEON_FLAG_32BIT)) {
1309 assert(!"constant buffer 0 must have a 32-bit VM address, use const_uploader");
1310 return;
1311 }
1312
1313 if (input && input->buffer)
1314 si_resource(input->buffer)->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
1315
1316 slot = si_get_constbuf_slot(slot);
1317 si_set_constant_buffer(sctx, &sctx->const_and_shader_buffers[shader],
1318 si_const_and_shader_buffer_descriptors_idx(shader),
1319 slot, input);
1320 }
1321
1322 void si_get_pipe_constant_buffer(struct si_context *sctx, uint shader,
1323 uint slot, struct pipe_constant_buffer *cbuf)
1324 {
1325 cbuf->user_buffer = NULL;
1326 si_get_buffer_from_descriptors(
1327 &sctx->const_and_shader_buffers[shader],
1328 si_const_and_shader_buffer_descriptors(sctx, shader),
1329 si_get_constbuf_slot(slot),
1330 &cbuf->buffer, &cbuf->buffer_offset, &cbuf->buffer_size);
1331 }
1332
1333 /* SHADER BUFFERS */
1334
1335 static void si_set_shader_buffer(struct si_context *sctx,
1336 struct si_buffer_resources *buffers,
1337 unsigned descriptors_idx,
1338 uint slot, const struct pipe_shader_buffer *sbuffer,
1339 bool writable, enum radeon_bo_priority priority)
1340 {
1341 struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
1342 uint32_t *desc = descs->list + slot * 4;
1343
1344 if (!sbuffer || !sbuffer->buffer) {
1345 pipe_resource_reference(&buffers->buffers[slot], NULL);
1346 memset(desc, 0, sizeof(uint32_t) * 4);
1347 buffers->enabled_mask &= ~(1u << slot);
1348 buffers->writable_mask &= ~(1u << slot);
1349 sctx->descriptors_dirty |= 1u << descriptors_idx;
1350 return;
1351 }
1352
1353 struct si_resource *buf = si_resource(sbuffer->buffer);
1354 uint64_t va = buf->gpu_address + sbuffer->buffer_offset;
1355
1356 desc[0] = va;
1357 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1358 S_008F04_STRIDE(0);
1359 desc[2] = sbuffer->buffer_size;
1360 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1361 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1362 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1363 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
1364
1365 if (sctx->chip_class >= GFX10) {
1366 desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
1367 S_008F0C_OOB_SELECT(3) |
1368 S_008F0C_RESOURCE_LEVEL(1);
1369 } else {
1370 desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1371 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1372 }
1373
1374 pipe_resource_reference(&buffers->buffers[slot], &buf->b.b);
1375 buffers->offsets[slot] = sbuffer->buffer_offset;
1376 radeon_add_to_gfx_buffer_list_check_mem(sctx, buf,
1377 writable ? RADEON_USAGE_READWRITE :
1378 RADEON_USAGE_READ,
1379 priority, true);
1380 if (writable)
1381 buffers->writable_mask |= 1u << slot;
1382 else
1383 buffers->writable_mask &= ~(1u << slot);
1384
1385 buffers->enabled_mask |= 1u << slot;
1386 sctx->descriptors_dirty |= 1u << descriptors_idx;
1387
1388 util_range_add(&buf->valid_buffer_range, sbuffer->buffer_offset,
1389 sbuffer->buffer_offset + sbuffer->buffer_size);
1390 }
1391
1392 static void si_set_shader_buffers(struct pipe_context *ctx,
1393 enum pipe_shader_type shader,
1394 unsigned start_slot, unsigned count,
1395 const struct pipe_shader_buffer *sbuffers,
1396 unsigned writable_bitmask)
1397 {
1398 struct si_context *sctx = (struct si_context *)ctx;
1399 struct si_buffer_resources *buffers = &sctx->const_and_shader_buffers[shader];
1400 unsigned descriptors_idx = si_const_and_shader_buffer_descriptors_idx(shader);
1401 unsigned i;
1402
1403 assert(start_slot + count <= SI_NUM_SHADER_BUFFERS);
1404
1405 for (i = 0; i < count; ++i) {
1406 const struct pipe_shader_buffer *sbuffer = sbuffers ? &sbuffers[i] : NULL;
1407 unsigned slot = si_get_shaderbuf_slot(start_slot + i);
1408
1409 if (sbuffer && sbuffer->buffer)
1410 si_resource(sbuffer->buffer)->bind_history |= PIPE_BIND_SHADER_BUFFER;
1411
1412 si_set_shader_buffer(sctx, buffers, descriptors_idx, slot, sbuffer,
1413 !!(writable_bitmask & (1u << i)),
1414 buffers->priority);
1415 }
1416 }
1417
1418 void si_get_shader_buffers(struct si_context *sctx,
1419 enum pipe_shader_type shader,
1420 uint start_slot, uint count,
1421 struct pipe_shader_buffer *sbuf)
1422 {
1423 struct si_buffer_resources *buffers = &sctx->const_and_shader_buffers[shader];
1424 struct si_descriptors *descs = si_const_and_shader_buffer_descriptors(sctx, shader);
1425
1426 for (unsigned i = 0; i < count; ++i) {
1427 si_get_buffer_from_descriptors(
1428 buffers, descs,
1429 si_get_shaderbuf_slot(start_slot + i),
1430 &sbuf[i].buffer, &sbuf[i].buffer_offset,
1431 &sbuf[i].buffer_size);
1432 }
1433 }
1434
1435 /* RING BUFFERS */
1436
1437 void si_set_rw_buffer(struct si_context *sctx,
1438 uint slot, const struct pipe_constant_buffer *input)
1439 {
1440 si_set_constant_buffer(sctx, &sctx->rw_buffers, SI_DESCS_RW_BUFFERS,
1441 slot, input);
1442 }
1443
1444 void si_set_rw_shader_buffer(struct si_context *sctx, uint slot,
1445 const struct pipe_shader_buffer *sbuffer)
1446 {
1447 si_set_shader_buffer(sctx, &sctx->rw_buffers, SI_DESCS_RW_BUFFERS,
1448 slot, sbuffer, true, RADEON_PRIO_SHADER_RW_BUFFER);
1449 }
1450
1451 void si_set_ring_buffer(struct si_context *sctx, uint slot,
1452 struct pipe_resource *buffer,
1453 unsigned stride, unsigned num_records,
1454 bool add_tid, bool swizzle,
1455 unsigned element_size, unsigned index_stride, uint64_t offset)
1456 {
1457 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1458 struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1459
1460 /* The stride field in the resource descriptor has 14 bits */
1461 assert(stride < (1 << 14));
1462
1463 assert(slot < descs->num_elements);
1464 pipe_resource_reference(&buffers->buffers[slot], NULL);
1465
1466 if (buffer) {
1467 uint64_t va;
1468
1469 va = si_resource(buffer)->gpu_address + offset;
1470
1471 switch (element_size) {
1472 default:
1473 assert(!"Unsupported ring buffer element size");
1474 case 0:
1475 case 2:
1476 element_size = 0;
1477 break;
1478 case 4:
1479 element_size = 1;
1480 break;
1481 case 8:
1482 element_size = 2;
1483 break;
1484 case 16:
1485 element_size = 3;
1486 break;
1487 }
1488
1489 switch (index_stride) {
1490 default:
1491 assert(!"Unsupported ring buffer index stride");
1492 case 0:
1493 case 8:
1494 index_stride = 0;
1495 break;
1496 case 16:
1497 index_stride = 1;
1498 break;
1499 case 32:
1500 index_stride = 2;
1501 break;
1502 case 64:
1503 index_stride = 3;
1504 break;
1505 }
1506
1507 if (sctx->chip_class >= GFX8 && stride)
1508 num_records *= stride;
1509
1510 /* Set the descriptor. */
1511 uint32_t *desc = descs->list + slot*4;
1512 desc[0] = va;
1513 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1514 S_008F04_STRIDE(stride) |
1515 S_008F04_SWIZZLE_ENABLE(swizzle);
1516 desc[2] = num_records;
1517 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1518 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1519 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1520 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1521 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1522 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1523 S_008F0C_INDEX_STRIDE(index_stride) |
1524 S_008F0C_ADD_TID_ENABLE(add_tid);
1525
1526 if (sctx->chip_class >= GFX9)
1527 assert(!swizzle || element_size == 1); /* always 4 bytes on GFX9 */
1528 else
1529 desc[3] |= S_008F0C_ELEMENT_SIZE(element_size);
1530
1531 pipe_resource_reference(&buffers->buffers[slot], buffer);
1532 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
1533 si_resource(buffer),
1534 RADEON_USAGE_READWRITE, buffers->priority);
1535 buffers->enabled_mask |= 1u << slot;
1536 } else {
1537 /* Clear the descriptor. */
1538 memset(descs->list + slot*4, 0, sizeof(uint32_t) * 4);
1539 buffers->enabled_mask &= ~(1u << slot);
1540 }
1541
1542 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
1543 }
1544
1545 /* INTERNAL CONST BUFFERS */
1546
1547 static void si_set_polygon_stipple(struct pipe_context *ctx,
1548 const struct pipe_poly_stipple *state)
1549 {
1550 struct si_context *sctx = (struct si_context *)ctx;
1551 struct pipe_constant_buffer cb = {};
1552 unsigned stipple[32];
1553 int i;
1554
1555 for (i = 0; i < 32; i++)
1556 stipple[i] = util_bitreverse(state->stipple[i]);
1557
1558 cb.user_buffer = stipple;
1559 cb.buffer_size = sizeof(stipple);
1560
1561 si_set_rw_buffer(sctx, SI_PS_CONST_POLY_STIPPLE, &cb);
1562 }
1563
1564 /* TEXTURE METADATA ENABLE/DISABLE */
1565
1566 static void
1567 si_resident_handles_update_needs_color_decompress(struct si_context *sctx)
1568 {
1569 util_dynarray_clear(&sctx->resident_tex_needs_color_decompress);
1570 util_dynarray_clear(&sctx->resident_img_needs_color_decompress);
1571
1572 util_dynarray_foreach(&sctx->resident_tex_handles,
1573 struct si_texture_handle *, tex_handle) {
1574 struct pipe_resource *res = (*tex_handle)->view->texture;
1575 struct si_texture *tex;
1576
1577 if (!res || res->target == PIPE_BUFFER)
1578 continue;
1579
1580 tex = (struct si_texture *)res;
1581 if (!color_needs_decompression(tex))
1582 continue;
1583
1584 util_dynarray_append(&sctx->resident_tex_needs_color_decompress,
1585 struct si_texture_handle *, *tex_handle);
1586 }
1587
1588 util_dynarray_foreach(&sctx->resident_img_handles,
1589 struct si_image_handle *, img_handle) {
1590 struct pipe_image_view *view = &(*img_handle)->view;
1591 struct pipe_resource *res = view->resource;
1592 struct si_texture *tex;
1593
1594 if (!res || res->target == PIPE_BUFFER)
1595 continue;
1596
1597 tex = (struct si_texture *)res;
1598 if (!color_needs_decompression(tex))
1599 continue;
1600
1601 util_dynarray_append(&sctx->resident_img_needs_color_decompress,
1602 struct si_image_handle *, *img_handle);
1603 }
1604 }
1605
1606 /* CMASK can be enabled (for fast clear) and disabled (for texture export)
1607 * while the texture is bound, possibly by a different context. In that case,
1608 * call this function to update needs_*_decompress_masks.
1609 */
1610 void si_update_needs_color_decompress_masks(struct si_context *sctx)
1611 {
1612 for (int i = 0; i < SI_NUM_SHADERS; ++i) {
1613 si_samplers_update_needs_color_decompress_mask(&sctx->samplers[i]);
1614 si_images_update_needs_color_decompress_mask(&sctx->images[i]);
1615 si_update_shader_needs_decompress_mask(sctx, i);
1616 }
1617
1618 si_resident_handles_update_needs_color_decompress(sctx);
1619 }
1620
1621 /* BUFFER DISCARD/INVALIDATION */
1622
1623 /* Reset descriptors of buffer resources after \p buf has been invalidated.
1624 * If buf == NULL, reset all descriptors.
1625 */
1626 static void si_reset_buffer_resources(struct si_context *sctx,
1627 struct si_buffer_resources *buffers,
1628 unsigned descriptors_idx,
1629 unsigned slot_mask,
1630 struct pipe_resource *buf,
1631 enum radeon_bo_priority priority)
1632 {
1633 struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
1634 unsigned mask = buffers->enabled_mask & slot_mask;
1635
1636 while (mask) {
1637 unsigned i = u_bit_scan(&mask);
1638 struct pipe_resource *buffer = buffers->buffers[i];
1639
1640 if (buffer && (!buf || buffer == buf)) {
1641 si_set_buf_desc_address(si_resource(buffer), buffers->offsets[i],
1642 descs->list + i*4);
1643 sctx->descriptors_dirty |= 1u << descriptors_idx;
1644
1645 radeon_add_to_gfx_buffer_list_check_mem(sctx,
1646 si_resource(buffer),
1647 buffers->writable_mask & (1u << i) ?
1648 RADEON_USAGE_READWRITE :
1649 RADEON_USAGE_READ,
1650 priority, true);
1651 }
1652 }
1653 }
1654
1655 /* Update all buffer bindings where the buffer is bound, including
1656 * all resource descriptors. This is invalidate_buffer without
1657 * the invalidation.
1658 *
1659 * If buf == NULL, update all buffer bindings.
1660 */
1661 void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf)
1662 {
1663 struct si_resource *buffer = si_resource(buf);
1664 unsigned i, shader;
1665 unsigned num_elems = sctx->vertex_elements ?
1666 sctx->vertex_elements->count : 0;
1667
1668 /* We changed the buffer, now we need to bind it where the old one
1669 * was bound. This consists of 2 things:
1670 * 1) Updating the resource descriptor and dirtying it.
1671 * 2) Adding a relocation to the CS, so that it's usable.
1672 */
1673
1674 /* Vertex buffers. */
1675 if (!buffer) {
1676 if (num_elems)
1677 sctx->vertex_buffers_dirty = true;
1678 } else if (buffer->bind_history & PIPE_BIND_VERTEX_BUFFER) {
1679 for (i = 0; i < num_elems; i++) {
1680 int vb = sctx->vertex_elements->vertex_buffer_index[i];
1681
1682 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
1683 continue;
1684 if (!sctx->vertex_buffer[vb].buffer.resource)
1685 continue;
1686
1687 if (sctx->vertex_buffer[vb].buffer.resource == buf) {
1688 sctx->vertex_buffers_dirty = true;
1689 break;
1690 }
1691 }
1692 }
1693
1694 /* Streamout buffers. (other internal buffers can't be invalidated) */
1695 if (!buffer || buffer->bind_history & PIPE_BIND_STREAM_OUTPUT) {
1696 for (i = SI_VS_STREAMOUT_BUF0; i <= SI_VS_STREAMOUT_BUF3; i++) {
1697 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1698 struct si_descriptors *descs =
1699 &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1700 struct pipe_resource *buffer = buffers->buffers[i];
1701
1702 if (!buffer || (buf && buffer != buf))
1703 continue;
1704
1705 si_set_buf_desc_address(si_resource(buffer), buffers->offsets[i],
1706 descs->list + i*4);
1707 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
1708
1709 radeon_add_to_gfx_buffer_list_check_mem(sctx,
1710 si_resource(buffer),
1711 RADEON_USAGE_WRITE,
1712 RADEON_PRIO_SHADER_RW_BUFFER,
1713 true);
1714
1715 /* Update the streamout state. */
1716 if (sctx->streamout.begin_emitted)
1717 si_emit_streamout_end(sctx);
1718 sctx->streamout.append_bitmask =
1719 sctx->streamout.enabled_mask;
1720 si_streamout_buffers_dirty(sctx);
1721 }
1722 }
1723
1724 /* Constant and shader buffers. */
1725 if (!buffer || buffer->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
1726 for (shader = 0; shader < SI_NUM_SHADERS; shader++)
1727 si_reset_buffer_resources(sctx, &sctx->const_and_shader_buffers[shader],
1728 si_const_and_shader_buffer_descriptors_idx(shader),
1729 u_bit_consecutive(SI_NUM_SHADER_BUFFERS, SI_NUM_CONST_BUFFERS),
1730 buf,
1731 sctx->const_and_shader_buffers[shader].priority_constbuf);
1732 }
1733
1734 if (!buffer || buffer->bind_history & PIPE_BIND_SHADER_BUFFER) {
1735 for (shader = 0; shader < SI_NUM_SHADERS; shader++)
1736 si_reset_buffer_resources(sctx, &sctx->const_and_shader_buffers[shader],
1737 si_const_and_shader_buffer_descriptors_idx(shader),
1738 u_bit_consecutive(0, SI_NUM_SHADER_BUFFERS),
1739 buf,
1740 sctx->const_and_shader_buffers[shader].priority);
1741 }
1742
1743 if (!buffer || buffer->bind_history & PIPE_BIND_SAMPLER_VIEW) {
1744 /* Texture buffers - update bindings. */
1745 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1746 struct si_samplers *samplers = &sctx->samplers[shader];
1747 struct si_descriptors *descs =
1748 si_sampler_and_image_descriptors(sctx, shader);
1749 unsigned mask = samplers->enabled_mask;
1750
1751 while (mask) {
1752 unsigned i = u_bit_scan(&mask);
1753 struct pipe_resource *buffer = samplers->views[i]->texture;
1754
1755 if (buffer && buffer->target == PIPE_BUFFER &&
1756 (!buf || buffer == buf)) {
1757 unsigned desc_slot = si_get_sampler_slot(i);
1758
1759 si_set_buf_desc_address(si_resource(buffer),
1760 samplers->views[i]->u.buf.offset,
1761 descs->list + desc_slot * 16 + 4);
1762 sctx->descriptors_dirty |=
1763 1u << si_sampler_and_image_descriptors_idx(shader);
1764
1765 radeon_add_to_gfx_buffer_list_check_mem(
1766 sctx, si_resource(buffer),
1767 RADEON_USAGE_READ,
1768 RADEON_PRIO_SAMPLER_BUFFER, true);
1769 }
1770 }
1771 }
1772 }
1773
1774 /* Shader images */
1775 if (!buffer || buffer->bind_history & PIPE_BIND_SHADER_IMAGE) {
1776 for (shader = 0; shader < SI_NUM_SHADERS; ++shader) {
1777 struct si_images *images = &sctx->images[shader];
1778 struct si_descriptors *descs =
1779 si_sampler_and_image_descriptors(sctx, shader);
1780 unsigned mask = images->enabled_mask;
1781
1782 while (mask) {
1783 unsigned i = u_bit_scan(&mask);
1784 struct pipe_resource *buffer = images->views[i].resource;
1785
1786 if (buffer && buffer->target == PIPE_BUFFER &&
1787 (!buf || buffer == buf)) {
1788 unsigned desc_slot = si_get_image_slot(i);
1789
1790 if (images->views[i].access & PIPE_IMAGE_ACCESS_WRITE)
1791 si_mark_image_range_valid(&images->views[i]);
1792
1793 si_set_buf_desc_address(si_resource(buffer),
1794 images->views[i].u.buf.offset,
1795 descs->list + desc_slot * 8 + 4);
1796 sctx->descriptors_dirty |=
1797 1u << si_sampler_and_image_descriptors_idx(shader);
1798
1799 radeon_add_to_gfx_buffer_list_check_mem(
1800 sctx, si_resource(buffer),
1801 RADEON_USAGE_READWRITE,
1802 RADEON_PRIO_SAMPLER_BUFFER, true);
1803 }
1804 }
1805 }
1806 }
1807
1808 /* Bindless texture handles */
1809 if (!buffer || buffer->texture_handle_allocated) {
1810 struct si_descriptors *descs = &sctx->bindless_descriptors;
1811
1812 util_dynarray_foreach(&sctx->resident_tex_handles,
1813 struct si_texture_handle *, tex_handle) {
1814 struct pipe_sampler_view *view = (*tex_handle)->view;
1815 unsigned desc_slot = (*tex_handle)->desc_slot;
1816 struct pipe_resource *buffer = view->texture;
1817
1818 if (buffer && buffer->target == PIPE_BUFFER &&
1819 (!buf || buffer == buf)) {
1820 si_set_buf_desc_address(si_resource(buffer),
1821 view->u.buf.offset,
1822 descs->list +
1823 desc_slot * 16 + 4);
1824
1825 (*tex_handle)->desc_dirty = true;
1826 sctx->bindless_descriptors_dirty = true;
1827
1828 radeon_add_to_gfx_buffer_list_check_mem(
1829 sctx, si_resource(buffer),
1830 RADEON_USAGE_READ,
1831 RADEON_PRIO_SAMPLER_BUFFER, true);
1832 }
1833 }
1834 }
1835
1836 /* Bindless image handles */
1837 if (!buffer || buffer->image_handle_allocated) {
1838 struct si_descriptors *descs = &sctx->bindless_descriptors;
1839
1840 util_dynarray_foreach(&sctx->resident_img_handles,
1841 struct si_image_handle *, img_handle) {
1842 struct pipe_image_view *view = &(*img_handle)->view;
1843 unsigned desc_slot = (*img_handle)->desc_slot;
1844 struct pipe_resource *buffer = view->resource;
1845
1846 if (buffer && buffer->target == PIPE_BUFFER &&
1847 (!buf || buffer == buf)) {
1848 if (view->access & PIPE_IMAGE_ACCESS_WRITE)
1849 si_mark_image_range_valid(view);
1850
1851 si_set_buf_desc_address(si_resource(buffer),
1852 view->u.buf.offset,
1853 descs->list +
1854 desc_slot * 16 + 4);
1855
1856 (*img_handle)->desc_dirty = true;
1857 sctx->bindless_descriptors_dirty = true;
1858
1859 radeon_add_to_gfx_buffer_list_check_mem(
1860 sctx, si_resource(buffer),
1861 RADEON_USAGE_READWRITE,
1862 RADEON_PRIO_SAMPLER_BUFFER, true);
1863 }
1864 }
1865 }
1866
1867 if (buffer) {
1868 /* Do the same for other contexts. They will invoke this function
1869 * with buffer == NULL.
1870 */
1871 unsigned new_counter = p_atomic_inc_return(&sctx->screen->dirty_buf_counter);
1872
1873 /* Skip the update for the current context, because we have already updated
1874 * the buffer bindings.
1875 */
1876 if (new_counter == sctx->last_dirty_buf_counter + 1)
1877 sctx->last_dirty_buf_counter = new_counter;
1878 }
1879 }
1880
1881 static void si_upload_bindless_descriptor(struct si_context *sctx,
1882 unsigned desc_slot,
1883 unsigned num_dwords)
1884 {
1885 struct si_descriptors *desc = &sctx->bindless_descriptors;
1886 unsigned desc_slot_offset = desc_slot * 16;
1887 uint32_t *data;
1888 uint64_t va;
1889
1890 data = desc->list + desc_slot_offset;
1891 va = desc->gpu_address + desc_slot_offset * 4;
1892
1893 si_cp_write_data(sctx, desc->buffer, va - desc->buffer->gpu_address,
1894 num_dwords * 4, V_370_TC_L2, V_370_ME, data);
1895 }
1896
1897 static void si_upload_bindless_descriptors(struct si_context *sctx)
1898 {
1899 if (!sctx->bindless_descriptors_dirty)
1900 return;
1901
1902 /* Wait for graphics/compute to be idle before updating the resident
1903 * descriptors directly in memory, in case the GPU is using them.
1904 */
1905 sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
1906 SI_CONTEXT_CS_PARTIAL_FLUSH;
1907 sctx->emit_cache_flush(sctx);
1908
1909 util_dynarray_foreach(&sctx->resident_tex_handles,
1910 struct si_texture_handle *, tex_handle) {
1911 unsigned desc_slot = (*tex_handle)->desc_slot;
1912
1913 if (!(*tex_handle)->desc_dirty)
1914 continue;
1915
1916 si_upload_bindless_descriptor(sctx, desc_slot, 16);
1917 (*tex_handle)->desc_dirty = false;
1918 }
1919
1920 util_dynarray_foreach(&sctx->resident_img_handles,
1921 struct si_image_handle *, img_handle) {
1922 unsigned desc_slot = (*img_handle)->desc_slot;
1923
1924 if (!(*img_handle)->desc_dirty)
1925 continue;
1926
1927 si_upload_bindless_descriptor(sctx, desc_slot, 8);
1928 (*img_handle)->desc_dirty = false;
1929 }
1930
1931 /* Invalidate L1 because it doesn't know that L2 changed. */
1932 sctx->flags |= SI_CONTEXT_INV_SCACHE;
1933 sctx->emit_cache_flush(sctx);
1934
1935 sctx->bindless_descriptors_dirty = false;
1936 }
1937
1938 /* Update mutable image descriptor fields of all resident textures. */
1939 static void si_update_bindless_texture_descriptor(struct si_context *sctx,
1940 struct si_texture_handle *tex_handle)
1941 {
1942 struct si_sampler_view *sview = (struct si_sampler_view *)tex_handle->view;
1943 struct si_descriptors *desc = &sctx->bindless_descriptors;
1944 unsigned desc_slot_offset = tex_handle->desc_slot * 16;
1945 uint32_t desc_list[16];
1946
1947 if (sview->base.texture->target == PIPE_BUFFER)
1948 return;
1949
1950 memcpy(desc_list, desc->list + desc_slot_offset, sizeof(desc_list));
1951 si_set_sampler_view_desc(sctx, sview, &tex_handle->sstate,
1952 desc->list + desc_slot_offset);
1953
1954 if (memcmp(desc_list, desc->list + desc_slot_offset,
1955 sizeof(desc_list))) {
1956 tex_handle->desc_dirty = true;
1957 sctx->bindless_descriptors_dirty = true;
1958 }
1959 }
1960
1961 static void si_update_bindless_image_descriptor(struct si_context *sctx,
1962 struct si_image_handle *img_handle)
1963 {
1964 struct si_descriptors *desc = &sctx->bindless_descriptors;
1965 unsigned desc_slot_offset = img_handle->desc_slot * 16;
1966 struct pipe_image_view *view = &img_handle->view;
1967 uint32_t desc_list[8];
1968
1969 if (view->resource->target == PIPE_BUFFER)
1970 return;
1971
1972 memcpy(desc_list, desc->list + desc_slot_offset,
1973 sizeof(desc_list));
1974 si_set_shader_image_desc(sctx, view, true,
1975 desc->list + desc_slot_offset, NULL);
1976
1977 if (memcmp(desc_list, desc->list + desc_slot_offset,
1978 sizeof(desc_list))) {
1979 img_handle->desc_dirty = true;
1980 sctx->bindless_descriptors_dirty = true;
1981 }
1982 }
1983
1984 static void si_update_all_resident_texture_descriptors(struct si_context *sctx)
1985 {
1986 util_dynarray_foreach(&sctx->resident_tex_handles,
1987 struct si_texture_handle *, tex_handle) {
1988 si_update_bindless_texture_descriptor(sctx, *tex_handle);
1989 }
1990
1991 util_dynarray_foreach(&sctx->resident_img_handles,
1992 struct si_image_handle *, img_handle) {
1993 si_update_bindless_image_descriptor(sctx, *img_handle);
1994 }
1995
1996 si_upload_bindless_descriptors(sctx);
1997 }
1998
1999 /* Update mutable image descriptor fields of all bound textures. */
2000 void si_update_all_texture_descriptors(struct si_context *sctx)
2001 {
2002 unsigned shader;
2003
2004 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
2005 struct si_samplers *samplers = &sctx->samplers[shader];
2006 struct si_images *images = &sctx->images[shader];
2007 unsigned mask;
2008
2009 /* Images. */
2010 mask = images->enabled_mask;
2011 while (mask) {
2012 unsigned i = u_bit_scan(&mask);
2013 struct pipe_image_view *view = &images->views[i];
2014
2015 if (!view->resource ||
2016 view->resource->target == PIPE_BUFFER)
2017 continue;
2018
2019 si_set_shader_image(sctx, shader, i, view, true);
2020 }
2021
2022 /* Sampler views. */
2023 mask = samplers->enabled_mask;
2024 while (mask) {
2025 unsigned i = u_bit_scan(&mask);
2026 struct pipe_sampler_view *view = samplers->views[i];
2027
2028 if (!view ||
2029 !view->texture ||
2030 view->texture->target == PIPE_BUFFER)
2031 continue;
2032
2033 si_set_sampler_view(sctx, shader, i,
2034 samplers->views[i], true);
2035 }
2036
2037 si_update_shader_needs_decompress_mask(sctx, shader);
2038 }
2039
2040 si_update_all_resident_texture_descriptors(sctx);
2041 si_update_ps_colorbuf0_slot(sctx);
2042 }
2043
2044 /* SHADER USER DATA */
2045
2046 static void si_mark_shader_pointers_dirty(struct si_context *sctx,
2047 unsigned shader)
2048 {
2049 sctx->shader_pointers_dirty |=
2050 u_bit_consecutive(SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS,
2051 SI_NUM_SHADER_DESCS);
2052
2053 if (shader == PIPE_SHADER_VERTEX)
2054 sctx->vertex_buffer_pointer_dirty = sctx->vb_descriptors_buffer != NULL;
2055
2056 si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
2057 }
2058
2059 static void si_shader_pointers_begin_new_cs(struct si_context *sctx)
2060 {
2061 sctx->shader_pointers_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
2062 sctx->vertex_buffer_pointer_dirty = sctx->vb_descriptors_buffer != NULL;
2063 si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
2064 sctx->graphics_bindless_pointer_dirty = sctx->bindless_descriptors.buffer != NULL;
2065 sctx->compute_bindless_pointer_dirty = sctx->bindless_descriptors.buffer != NULL;
2066 }
2067
2068 /* Set a base register address for user data constants in the given shader.
2069 * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
2070 */
2071 static void si_set_user_data_base(struct si_context *sctx,
2072 unsigned shader, uint32_t new_base)
2073 {
2074 uint32_t *base = &sctx->shader_pointers.sh_base[shader];
2075
2076 if (*base != new_base) {
2077 *base = new_base;
2078
2079 if (new_base)
2080 si_mark_shader_pointers_dirty(sctx, shader);
2081
2082 /* Any change in enabled shader stages requires re-emitting
2083 * the VS state SGPR, because it contains the clamp_vertex_color
2084 * state, which can be done in VS, TES, and GS.
2085 */
2086 sctx->last_vs_state = ~0;
2087 }
2088 }
2089
2090 /* This must be called when these are changed between enabled and disabled
2091 * - geometry shader
2092 * - tessellation evaluation shader
2093 * - NGG
2094 */
2095 void si_shader_change_notify(struct si_context *sctx)
2096 {
2097 /* VS can be bound as VS, ES, or LS. */
2098 if (sctx->tes_shader.cso) {
2099 if (sctx->chip_class >= GFX10) {
2100 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2101 R_00B430_SPI_SHADER_USER_DATA_HS_0);
2102 } else if (sctx->chip_class >= GFX9) {
2103 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2104 R_00B430_SPI_SHADER_USER_DATA_LS_0);
2105 } else {
2106 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2107 R_00B530_SPI_SHADER_USER_DATA_LS_0);
2108 }
2109 } else if (sctx->chip_class >= GFX10) {
2110 if (sctx->ngg || sctx->gs_shader.cso) {
2111 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2112 R_00B230_SPI_SHADER_USER_DATA_GS_0);
2113 } else {
2114 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2115 R_00B130_SPI_SHADER_USER_DATA_VS_0);
2116 }
2117 } else if (sctx->gs_shader.cso) {
2118 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2119 R_00B330_SPI_SHADER_USER_DATA_ES_0);
2120 } else {
2121 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2122 R_00B130_SPI_SHADER_USER_DATA_VS_0);
2123 }
2124
2125 /* TES can be bound as ES, VS, or not bound. */
2126 if (sctx->tes_shader.cso) {
2127 if (sctx->chip_class >= GFX10) {
2128 if (sctx->ngg || sctx->gs_shader.cso) {
2129 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
2130 R_00B230_SPI_SHADER_USER_DATA_GS_0);
2131 } else {
2132 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
2133 R_00B130_SPI_SHADER_USER_DATA_VS_0);
2134 }
2135 } else if (sctx->gs_shader.cso) {
2136 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
2137 R_00B330_SPI_SHADER_USER_DATA_ES_0);
2138 } else {
2139 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
2140 R_00B130_SPI_SHADER_USER_DATA_VS_0);
2141 }
2142 } else {
2143 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL, 0);
2144 }
2145 }
2146
2147 static void si_emit_shader_pointer_head(struct radeon_cmdbuf *cs,
2148 unsigned sh_offset,
2149 unsigned pointer_count)
2150 {
2151 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, pointer_count, 0));
2152 radeon_emit(cs, (sh_offset - SI_SH_REG_OFFSET) >> 2);
2153 }
2154
2155 static void si_emit_shader_pointer_body(struct si_screen *sscreen,
2156 struct radeon_cmdbuf *cs,
2157 uint64_t va)
2158 {
2159 radeon_emit(cs, va);
2160
2161 assert(va == 0 || (va >> 32) == sscreen->info.address32_hi);
2162 }
2163
2164 static void si_emit_shader_pointer(struct si_context *sctx,
2165 struct si_descriptors *desc,
2166 unsigned sh_base)
2167 {
2168 struct radeon_cmdbuf *cs = sctx->gfx_cs;
2169 unsigned sh_offset = sh_base + desc->shader_userdata_offset;
2170
2171 si_emit_shader_pointer_head(cs, sh_offset, 1);
2172 si_emit_shader_pointer_body(sctx->screen, cs, desc->gpu_address);
2173 }
2174
2175 static void si_emit_consecutive_shader_pointers(struct si_context *sctx,
2176 unsigned pointer_mask,
2177 unsigned sh_base)
2178 {
2179 if (!sh_base)
2180 return;
2181
2182 struct radeon_cmdbuf *cs = sctx->gfx_cs;
2183 unsigned mask = sctx->shader_pointers_dirty & pointer_mask;
2184
2185 while (mask) {
2186 int start, count;
2187 u_bit_scan_consecutive_range(&mask, &start, &count);
2188
2189 struct si_descriptors *descs = &sctx->descriptors[start];
2190 unsigned sh_offset = sh_base + descs->shader_userdata_offset;
2191
2192 si_emit_shader_pointer_head(cs, sh_offset, count);
2193 for (int i = 0; i < count; i++)
2194 si_emit_shader_pointer_body(sctx->screen, cs,
2195 descs[i].gpu_address);
2196 }
2197 }
2198
2199 static void si_emit_global_shader_pointers(struct si_context *sctx,
2200 struct si_descriptors *descs)
2201 {
2202 if (sctx->chip_class == GFX9) {
2203 /* Broadcast it to all shader stages. */
2204 si_emit_shader_pointer(sctx, descs,
2205 R_00B530_SPI_SHADER_USER_DATA_COMMON_0);
2206 return;
2207 }
2208
2209 si_emit_shader_pointer(sctx, descs,
2210 R_00B030_SPI_SHADER_USER_DATA_PS_0);
2211 si_emit_shader_pointer(sctx, descs,
2212 R_00B130_SPI_SHADER_USER_DATA_VS_0);
2213 si_emit_shader_pointer(sctx, descs,
2214 R_00B330_SPI_SHADER_USER_DATA_ES_0);
2215 si_emit_shader_pointer(sctx, descs,
2216 R_00B230_SPI_SHADER_USER_DATA_GS_0);
2217 si_emit_shader_pointer(sctx, descs,
2218 R_00B430_SPI_SHADER_USER_DATA_HS_0);
2219 si_emit_shader_pointer(sctx, descs,
2220 R_00B530_SPI_SHADER_USER_DATA_LS_0);
2221 }
2222
2223 void si_emit_graphics_shader_pointers(struct si_context *sctx)
2224 {
2225 uint32_t *sh_base = sctx->shader_pointers.sh_base;
2226
2227 if (sctx->shader_pointers_dirty & (1 << SI_DESCS_RW_BUFFERS)) {
2228 si_emit_global_shader_pointers(sctx,
2229 &sctx->descriptors[SI_DESCS_RW_BUFFERS]);
2230 }
2231
2232 si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(VERTEX),
2233 sh_base[PIPE_SHADER_VERTEX]);
2234 si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(TESS_EVAL),
2235 sh_base[PIPE_SHADER_TESS_EVAL]);
2236 si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(FRAGMENT),
2237 sh_base[PIPE_SHADER_FRAGMENT]);
2238 si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(TESS_CTRL),
2239 sh_base[PIPE_SHADER_TESS_CTRL]);
2240 si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(GEOMETRY),
2241 sh_base[PIPE_SHADER_GEOMETRY]);
2242
2243 sctx->shader_pointers_dirty &=
2244 ~u_bit_consecutive(SI_DESCS_RW_BUFFERS, SI_DESCS_FIRST_COMPUTE);
2245
2246 if (sctx->vertex_buffer_pointer_dirty) {
2247 struct radeon_cmdbuf *cs = sctx->gfx_cs;
2248
2249 /* Find the location of the VB descriptor pointer. */
2250 /* TODO: In the future, the pointer will be packed in unused
2251 * bits of the first 2 VB descriptors. */
2252 unsigned sh_dw_offset = SI_VS_NUM_USER_SGPR;
2253 if (sctx->chip_class >= GFX9) {
2254 if (sctx->tes_shader.cso)
2255 sh_dw_offset = GFX9_TCS_NUM_USER_SGPR;
2256 else if (sctx->gs_shader.cso)
2257 sh_dw_offset = GFX9_VSGS_NUM_USER_SGPR;
2258 }
2259
2260 unsigned sh_offset = sh_base[PIPE_SHADER_VERTEX] + sh_dw_offset * 4;
2261 si_emit_shader_pointer_head(cs, sh_offset, 1);
2262 si_emit_shader_pointer_body(sctx->screen, cs,
2263 sctx->vb_descriptors_buffer->gpu_address +
2264 sctx->vb_descriptors_offset);
2265 sctx->vertex_buffer_pointer_dirty = false;
2266 }
2267
2268 if (sctx->graphics_bindless_pointer_dirty) {
2269 si_emit_global_shader_pointers(sctx,
2270 &sctx->bindless_descriptors);
2271 sctx->graphics_bindless_pointer_dirty = false;
2272 }
2273 }
2274
2275 void si_emit_compute_shader_pointers(struct si_context *sctx)
2276 {
2277 unsigned base = R_00B900_COMPUTE_USER_DATA_0;
2278
2279 si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(COMPUTE),
2280 R_00B900_COMPUTE_USER_DATA_0);
2281 sctx->shader_pointers_dirty &= ~SI_DESCS_SHADER_MASK(COMPUTE);
2282
2283 if (sctx->compute_bindless_pointer_dirty) {
2284 si_emit_shader_pointer(sctx, &sctx->bindless_descriptors, base);
2285 sctx->compute_bindless_pointer_dirty = false;
2286 }
2287 }
2288
2289 /* BINDLESS */
2290
2291 static void si_init_bindless_descriptors(struct si_context *sctx,
2292 struct si_descriptors *desc,
2293 short shader_userdata_rel_index,
2294 unsigned num_elements)
2295 {
2296 MAYBE_UNUSED unsigned desc_slot;
2297
2298 si_init_descriptors(desc, shader_userdata_rel_index, 16, num_elements);
2299 sctx->bindless_descriptors.num_active_slots = num_elements;
2300
2301 /* The first bindless descriptor is stored at slot 1, because 0 is not
2302 * considered to be a valid handle.
2303 */
2304 sctx->num_bindless_descriptors = 1;
2305
2306 /* Track which bindless slots are used (or not). */
2307 util_idalloc_init(&sctx->bindless_used_slots);
2308 util_idalloc_resize(&sctx->bindless_used_slots, num_elements);
2309
2310 /* Reserve slot 0 because it's an invalid handle for bindless. */
2311 desc_slot = util_idalloc_alloc(&sctx->bindless_used_slots);
2312 assert(desc_slot == 0);
2313 }
2314
2315 static void si_release_bindless_descriptors(struct si_context *sctx)
2316 {
2317 si_release_descriptors(&sctx->bindless_descriptors);
2318 util_idalloc_fini(&sctx->bindless_used_slots);
2319 }
2320
2321 static unsigned si_get_first_free_bindless_slot(struct si_context *sctx)
2322 {
2323 struct si_descriptors *desc = &sctx->bindless_descriptors;
2324 unsigned desc_slot;
2325
2326 desc_slot = util_idalloc_alloc(&sctx->bindless_used_slots);
2327 if (desc_slot >= desc->num_elements) {
2328 /* The array of bindless descriptors is full, resize it. */
2329 unsigned slot_size = desc->element_dw_size * 4;
2330 unsigned new_num_elements = desc->num_elements * 2;
2331
2332 desc->list = REALLOC(desc->list, desc->num_elements * slot_size,
2333 new_num_elements * slot_size);
2334 desc->num_elements = new_num_elements;
2335 desc->num_active_slots = new_num_elements;
2336 }
2337
2338 assert(desc_slot);
2339 return desc_slot;
2340 }
2341
2342 static unsigned
2343 si_create_bindless_descriptor(struct si_context *sctx, uint32_t *desc_list,
2344 unsigned size)
2345 {
2346 struct si_descriptors *desc = &sctx->bindless_descriptors;
2347 unsigned desc_slot, desc_slot_offset;
2348
2349 /* Find a free slot. */
2350 desc_slot = si_get_first_free_bindless_slot(sctx);
2351
2352 /* For simplicity, sampler and image bindless descriptors use fixed
2353 * 16-dword slots for now. Image descriptors only need 8-dword but this
2354 * doesn't really matter because no real apps use image handles.
2355 */
2356 desc_slot_offset = desc_slot * 16;
2357
2358 /* Copy the descriptor into the array. */
2359 memcpy(desc->list + desc_slot_offset, desc_list, size);
2360
2361 /* Re-upload the whole array of bindless descriptors into a new buffer.
2362 */
2363 if (!si_upload_descriptors(sctx, desc))
2364 return 0;
2365
2366 /* Make sure to re-emit the shader pointers for all stages. */
2367 sctx->graphics_bindless_pointer_dirty = true;
2368 sctx->compute_bindless_pointer_dirty = true;
2369
2370 return desc_slot;
2371 }
2372
2373 static void si_update_bindless_buffer_descriptor(struct si_context *sctx,
2374 unsigned desc_slot,
2375 struct pipe_resource *resource,
2376 uint64_t offset,
2377 bool *desc_dirty)
2378 {
2379 struct si_descriptors *desc = &sctx->bindless_descriptors;
2380 struct si_resource *buf = si_resource(resource);
2381 unsigned desc_slot_offset = desc_slot * 16;
2382 uint32_t *desc_list = desc->list + desc_slot_offset + 4;
2383 uint64_t old_desc_va;
2384
2385 assert(resource->target == PIPE_BUFFER);
2386
2387 /* Retrieve the old buffer addr from the descriptor. */
2388 old_desc_va = si_desc_extract_buffer_address(desc_list);
2389
2390 if (old_desc_va != buf->gpu_address + offset) {
2391 /* The buffer has been invalidated when the handle wasn't
2392 * resident, update the descriptor and the dirty flag.
2393 */
2394 si_set_buf_desc_address(buf, offset, &desc_list[0]);
2395
2396 *desc_dirty = true;
2397 }
2398 }
2399
2400 static uint64_t si_create_texture_handle(struct pipe_context *ctx,
2401 struct pipe_sampler_view *view,
2402 const struct pipe_sampler_state *state)
2403 {
2404 struct si_sampler_view *sview = (struct si_sampler_view *)view;
2405 struct si_context *sctx = (struct si_context *)ctx;
2406 struct si_texture_handle *tex_handle;
2407 struct si_sampler_state *sstate;
2408 uint32_t desc_list[16];
2409 uint64_t handle;
2410
2411 tex_handle = CALLOC_STRUCT(si_texture_handle);
2412 if (!tex_handle)
2413 return 0;
2414
2415 memset(desc_list, 0, sizeof(desc_list));
2416 si_init_descriptor_list(&desc_list[0], 16, 1, null_texture_descriptor);
2417
2418 sstate = ctx->create_sampler_state(ctx, state);
2419 if (!sstate) {
2420 FREE(tex_handle);
2421 return 0;
2422 }
2423
2424 si_set_sampler_view_desc(sctx, sview, sstate, &desc_list[0]);
2425 memcpy(&tex_handle->sstate, sstate, sizeof(*sstate));
2426 ctx->delete_sampler_state(ctx, sstate);
2427
2428 tex_handle->desc_slot = si_create_bindless_descriptor(sctx, desc_list,
2429 sizeof(desc_list));
2430 if (!tex_handle->desc_slot) {
2431 FREE(tex_handle);
2432 return 0;
2433 }
2434
2435 handle = tex_handle->desc_slot;
2436
2437 if (!_mesa_hash_table_insert(sctx->tex_handles,
2438 (void *)(uintptr_t)handle,
2439 tex_handle)) {
2440 FREE(tex_handle);
2441 return 0;
2442 }
2443
2444 pipe_sampler_view_reference(&tex_handle->view, view);
2445
2446 si_resource(sview->base.texture)->texture_handle_allocated = true;
2447
2448 return handle;
2449 }
2450
2451 static void si_delete_texture_handle(struct pipe_context *ctx, uint64_t handle)
2452 {
2453 struct si_context *sctx = (struct si_context *)ctx;
2454 struct si_texture_handle *tex_handle;
2455 struct hash_entry *entry;
2456
2457 entry = _mesa_hash_table_search(sctx->tex_handles,
2458 (void *)(uintptr_t)handle);
2459 if (!entry)
2460 return;
2461
2462 tex_handle = (struct si_texture_handle *)entry->data;
2463
2464 /* Allow this descriptor slot to be re-used. */
2465 util_idalloc_free(&sctx->bindless_used_slots, tex_handle->desc_slot);
2466
2467 pipe_sampler_view_reference(&tex_handle->view, NULL);
2468 _mesa_hash_table_remove(sctx->tex_handles, entry);
2469 FREE(tex_handle);
2470 }
2471
2472 static void si_make_texture_handle_resident(struct pipe_context *ctx,
2473 uint64_t handle, bool resident)
2474 {
2475 struct si_context *sctx = (struct si_context *)ctx;
2476 struct si_texture_handle *tex_handle;
2477 struct si_sampler_view *sview;
2478 struct hash_entry *entry;
2479
2480 entry = _mesa_hash_table_search(sctx->tex_handles,
2481 (void *)(uintptr_t)handle);
2482 if (!entry)
2483 return;
2484
2485 tex_handle = (struct si_texture_handle *)entry->data;
2486 sview = (struct si_sampler_view *)tex_handle->view;
2487
2488 if (resident) {
2489 if (sview->base.texture->target != PIPE_BUFFER) {
2490 struct si_texture *tex =
2491 (struct si_texture *)sview->base.texture;
2492
2493 if (depth_needs_decompression(tex)) {
2494 util_dynarray_append(
2495 &sctx->resident_tex_needs_depth_decompress,
2496 struct si_texture_handle *,
2497 tex_handle);
2498 }
2499
2500 if (color_needs_decompression(tex)) {
2501 util_dynarray_append(
2502 &sctx->resident_tex_needs_color_decompress,
2503 struct si_texture_handle *,
2504 tex_handle);
2505 }
2506
2507 if (tex->dcc_offset &&
2508 p_atomic_read(&tex->framebuffers_bound))
2509 sctx->need_check_render_feedback = true;
2510
2511 si_update_bindless_texture_descriptor(sctx, tex_handle);
2512 } else {
2513 si_update_bindless_buffer_descriptor(sctx,
2514 tex_handle->desc_slot,
2515 sview->base.texture,
2516 sview->base.u.buf.offset,
2517 &tex_handle->desc_dirty);
2518 }
2519
2520 /* Re-upload the descriptor if it has been updated while it
2521 * wasn't resident.
2522 */
2523 if (tex_handle->desc_dirty)
2524 sctx->bindless_descriptors_dirty = true;
2525
2526 /* Add the texture handle to the per-context list. */
2527 util_dynarray_append(&sctx->resident_tex_handles,
2528 struct si_texture_handle *, tex_handle);
2529
2530 /* Add the buffers to the current CS in case si_begin_new_cs()
2531 * is not going to be called.
2532 */
2533 si_sampler_view_add_buffer(sctx, sview->base.texture,
2534 RADEON_USAGE_READ,
2535 sview->is_stencil_sampler, false);
2536 } else {
2537 /* Remove the texture handle from the per-context list. */
2538 util_dynarray_delete_unordered(&sctx->resident_tex_handles,
2539 struct si_texture_handle *,
2540 tex_handle);
2541
2542 if (sview->base.texture->target != PIPE_BUFFER) {
2543 util_dynarray_delete_unordered(
2544 &sctx->resident_tex_needs_depth_decompress,
2545 struct si_texture_handle *, tex_handle);
2546
2547 util_dynarray_delete_unordered(
2548 &sctx->resident_tex_needs_color_decompress,
2549 struct si_texture_handle *, tex_handle);
2550 }
2551 }
2552 }
2553
2554 static uint64_t si_create_image_handle(struct pipe_context *ctx,
2555 const struct pipe_image_view *view)
2556 {
2557 struct si_context *sctx = (struct si_context *)ctx;
2558 struct si_image_handle *img_handle;
2559 uint32_t desc_list[8];
2560 uint64_t handle;
2561
2562 if (!view || !view->resource)
2563 return 0;
2564
2565 img_handle = CALLOC_STRUCT(si_image_handle);
2566 if (!img_handle)
2567 return 0;
2568
2569 memset(desc_list, 0, sizeof(desc_list));
2570 si_init_descriptor_list(&desc_list[0], 8, 1, null_image_descriptor);
2571
2572 si_set_shader_image_desc(sctx, view, false, &desc_list[0], NULL);
2573
2574 img_handle->desc_slot = si_create_bindless_descriptor(sctx, desc_list,
2575 sizeof(desc_list));
2576 if (!img_handle->desc_slot) {
2577 FREE(img_handle);
2578 return 0;
2579 }
2580
2581 handle = img_handle->desc_slot;
2582
2583 if (!_mesa_hash_table_insert(sctx->img_handles,
2584 (void *)(uintptr_t)handle,
2585 img_handle)) {
2586 FREE(img_handle);
2587 return 0;
2588 }
2589
2590 util_copy_image_view(&img_handle->view, view);
2591
2592 si_resource(view->resource)->image_handle_allocated = true;
2593
2594 return handle;
2595 }
2596
2597 static void si_delete_image_handle(struct pipe_context *ctx, uint64_t handle)
2598 {
2599 struct si_context *sctx = (struct si_context *)ctx;
2600 struct si_image_handle *img_handle;
2601 struct hash_entry *entry;
2602
2603 entry = _mesa_hash_table_search(sctx->img_handles,
2604 (void *)(uintptr_t)handle);
2605 if (!entry)
2606 return;
2607
2608 img_handle = (struct si_image_handle *)entry->data;
2609
2610 util_copy_image_view(&img_handle->view, NULL);
2611 _mesa_hash_table_remove(sctx->img_handles, entry);
2612 FREE(img_handle);
2613 }
2614
2615 static void si_make_image_handle_resident(struct pipe_context *ctx,
2616 uint64_t handle, unsigned access,
2617 bool resident)
2618 {
2619 struct si_context *sctx = (struct si_context *)ctx;
2620 struct si_image_handle *img_handle;
2621 struct pipe_image_view *view;
2622 struct si_resource *res;
2623 struct hash_entry *entry;
2624
2625 entry = _mesa_hash_table_search(sctx->img_handles,
2626 (void *)(uintptr_t)handle);
2627 if (!entry)
2628 return;
2629
2630 img_handle = (struct si_image_handle *)entry->data;
2631 view = &img_handle->view;
2632 res = si_resource(view->resource);
2633
2634 if (resident) {
2635 if (res->b.b.target != PIPE_BUFFER) {
2636 struct si_texture *tex = (struct si_texture *)res;
2637 unsigned level = view->u.tex.level;
2638
2639 if (color_needs_decompression(tex)) {
2640 util_dynarray_append(
2641 &sctx->resident_img_needs_color_decompress,
2642 struct si_image_handle *,
2643 img_handle);
2644 }
2645
2646 if (vi_dcc_enabled(tex, level) &&
2647 p_atomic_read(&tex->framebuffers_bound))
2648 sctx->need_check_render_feedback = true;
2649
2650 si_update_bindless_image_descriptor(sctx, img_handle);
2651 } else {
2652 si_update_bindless_buffer_descriptor(sctx,
2653 img_handle->desc_slot,
2654 view->resource,
2655 view->u.buf.offset,
2656 &img_handle->desc_dirty);
2657 }
2658
2659 /* Re-upload the descriptor if it has been updated while it
2660 * wasn't resident.
2661 */
2662 if (img_handle->desc_dirty)
2663 sctx->bindless_descriptors_dirty = true;
2664
2665 /* Add the image handle to the per-context list. */
2666 util_dynarray_append(&sctx->resident_img_handles,
2667 struct si_image_handle *, img_handle);
2668
2669 /* Add the buffers to the current CS in case si_begin_new_cs()
2670 * is not going to be called.
2671 */
2672 si_sampler_view_add_buffer(sctx, view->resource,
2673 (access & PIPE_IMAGE_ACCESS_WRITE) ?
2674 RADEON_USAGE_READWRITE :
2675 RADEON_USAGE_READ, false, false);
2676 } else {
2677 /* Remove the image handle from the per-context list. */
2678 util_dynarray_delete_unordered(&sctx->resident_img_handles,
2679 struct si_image_handle *,
2680 img_handle);
2681
2682 if (res->b.b.target != PIPE_BUFFER) {
2683 util_dynarray_delete_unordered(
2684 &sctx->resident_img_needs_color_decompress,
2685 struct si_image_handle *,
2686 img_handle);
2687 }
2688 }
2689 }
2690
2691 static void si_resident_buffers_add_all_to_bo_list(struct si_context *sctx)
2692 {
2693 unsigned num_resident_tex_handles, num_resident_img_handles;
2694
2695 num_resident_tex_handles = sctx->resident_tex_handles.size /
2696 sizeof(struct si_texture_handle *);
2697 num_resident_img_handles = sctx->resident_img_handles.size /
2698 sizeof(struct si_image_handle *);
2699
2700 /* Add all resident texture handles. */
2701 util_dynarray_foreach(&sctx->resident_tex_handles,
2702 struct si_texture_handle *, tex_handle) {
2703 struct si_sampler_view *sview =
2704 (struct si_sampler_view *)(*tex_handle)->view;
2705
2706 si_sampler_view_add_buffer(sctx, sview->base.texture,
2707 RADEON_USAGE_READ,
2708 sview->is_stencil_sampler, false);
2709 }
2710
2711 /* Add all resident image handles. */
2712 util_dynarray_foreach(&sctx->resident_img_handles,
2713 struct si_image_handle *, img_handle) {
2714 struct pipe_image_view *view = &(*img_handle)->view;
2715
2716 si_sampler_view_add_buffer(sctx, view->resource,
2717 RADEON_USAGE_READWRITE,
2718 false, false);
2719 }
2720
2721 sctx->num_resident_handles += num_resident_tex_handles +
2722 num_resident_img_handles;
2723 assert(sctx->bo_list_add_all_resident_resources);
2724 sctx->bo_list_add_all_resident_resources = false;
2725 }
2726
2727 /* INIT/DEINIT/UPLOAD */
2728
2729 void si_init_all_descriptors(struct si_context *sctx)
2730 {
2731 int i;
2732 unsigned first_shader =
2733 sctx->has_graphics ? 0 : PIPE_SHADER_COMPUTE;
2734
2735 for (i = first_shader; i < SI_NUM_SHADERS; i++) {
2736 bool is_2nd = sctx->chip_class >= GFX9 &&
2737 (i == PIPE_SHADER_TESS_CTRL ||
2738 i == PIPE_SHADER_GEOMETRY);
2739 unsigned num_sampler_slots = SI_NUM_IMAGES / 2 + SI_NUM_SAMPLERS;
2740 unsigned num_buffer_slots = SI_NUM_SHADER_BUFFERS + SI_NUM_CONST_BUFFERS;
2741 int rel_dw_offset;
2742 struct si_descriptors *desc;
2743
2744 if (is_2nd) {
2745 if (i == PIPE_SHADER_TESS_CTRL) {
2746 rel_dw_offset = (R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS -
2747 R_00B430_SPI_SHADER_USER_DATA_LS_0) / 4;
2748 } else { /* PIPE_SHADER_GEOMETRY */
2749 rel_dw_offset = (R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS -
2750 R_00B330_SPI_SHADER_USER_DATA_ES_0) / 4;
2751 }
2752 } else {
2753 rel_dw_offset = SI_SGPR_CONST_AND_SHADER_BUFFERS;
2754 }
2755 desc = si_const_and_shader_buffer_descriptors(sctx, i);
2756 si_init_buffer_resources(&sctx->const_and_shader_buffers[i], desc,
2757 num_buffer_slots, rel_dw_offset,
2758 RADEON_PRIO_SHADER_RW_BUFFER,
2759 RADEON_PRIO_CONST_BUFFER);
2760 desc->slot_index_to_bind_directly = si_get_constbuf_slot(0);
2761
2762 if (is_2nd) {
2763 if (i == PIPE_SHADER_TESS_CTRL) {
2764 rel_dw_offset = (R_00B40C_SPI_SHADER_USER_DATA_ADDR_HI_HS -
2765 R_00B430_SPI_SHADER_USER_DATA_LS_0) / 4;
2766 } else { /* PIPE_SHADER_GEOMETRY */
2767 rel_dw_offset = (R_00B20C_SPI_SHADER_USER_DATA_ADDR_HI_GS -
2768 R_00B330_SPI_SHADER_USER_DATA_ES_0) / 4;
2769 }
2770 } else {
2771 rel_dw_offset = SI_SGPR_SAMPLERS_AND_IMAGES;
2772 }
2773
2774 desc = si_sampler_and_image_descriptors(sctx, i);
2775 si_init_descriptors(desc, rel_dw_offset, 16, num_sampler_slots);
2776
2777 int j;
2778 for (j = 0; j < SI_NUM_IMAGES; j++)
2779 memcpy(desc->list + j * 8, null_image_descriptor, 8 * 4);
2780 for (; j < SI_NUM_IMAGES + SI_NUM_SAMPLERS * 2; j++)
2781 memcpy(desc->list + j * 8, null_texture_descriptor, 8 * 4);
2782 }
2783
2784 si_init_buffer_resources(&sctx->rw_buffers,
2785 &sctx->descriptors[SI_DESCS_RW_BUFFERS],
2786 SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
2787 /* The second priority is used by
2788 * const buffers in RW buffer slots. */
2789 RADEON_PRIO_SHADER_RINGS, RADEON_PRIO_CONST_BUFFER);
2790 sctx->descriptors[SI_DESCS_RW_BUFFERS].num_active_slots = SI_NUM_RW_BUFFERS;
2791
2792 /* Initialize an array of 1024 bindless descriptors, when the limit is
2793 * reached, just make it larger and re-upload the whole array.
2794 */
2795 si_init_bindless_descriptors(sctx, &sctx->bindless_descriptors,
2796 SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES,
2797 1024);
2798
2799 sctx->descriptors_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
2800
2801 /* Set pipe_context functions. */
2802 sctx->b.bind_sampler_states = si_bind_sampler_states;
2803 sctx->b.set_shader_images = si_set_shader_images;
2804 sctx->b.set_constant_buffer = si_pipe_set_constant_buffer;
2805 sctx->b.set_shader_buffers = si_set_shader_buffers;
2806 sctx->b.set_sampler_views = si_set_sampler_views;
2807 sctx->b.create_texture_handle = si_create_texture_handle;
2808 sctx->b.delete_texture_handle = si_delete_texture_handle;
2809 sctx->b.make_texture_handle_resident = si_make_texture_handle_resident;
2810 sctx->b.create_image_handle = si_create_image_handle;
2811 sctx->b.delete_image_handle = si_delete_image_handle;
2812 sctx->b.make_image_handle_resident = si_make_image_handle_resident;
2813
2814 if (!sctx->has_graphics)
2815 return;
2816
2817 sctx->b.set_polygon_stipple = si_set_polygon_stipple;
2818
2819 /* Shader user data. */
2820 sctx->atoms.s.shader_pointers.emit = si_emit_graphics_shader_pointers;
2821
2822 /* Set default and immutable mappings. */
2823 if (sctx->ngg) {
2824 assert(sctx->chip_class >= GFX10);
2825 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B230_SPI_SHADER_USER_DATA_GS_0);
2826 } else {
2827 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
2828 }
2829
2830 if (sctx->chip_class == GFX9) {
2831 si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL,
2832 R_00B430_SPI_SHADER_USER_DATA_LS_0);
2833 si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY,
2834 R_00B330_SPI_SHADER_USER_DATA_ES_0);
2835 } else {
2836 si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL,
2837 R_00B430_SPI_SHADER_USER_DATA_HS_0);
2838 si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY,
2839 R_00B230_SPI_SHADER_USER_DATA_GS_0);
2840 }
2841 si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
2842 }
2843
2844 static bool si_upload_shader_descriptors(struct si_context *sctx, unsigned mask)
2845 {
2846 unsigned dirty = sctx->descriptors_dirty & mask;
2847
2848 /* Assume nothing will go wrong: */
2849 sctx->shader_pointers_dirty |= dirty;
2850
2851 while (dirty) {
2852 unsigned i = u_bit_scan(&dirty);
2853
2854 if (!si_upload_descriptors(sctx, &sctx->descriptors[i]))
2855 return false;
2856 }
2857
2858 sctx->descriptors_dirty &= ~mask;
2859
2860 si_upload_bindless_descriptors(sctx);
2861
2862 return true;
2863 }
2864
2865 bool si_upload_graphics_shader_descriptors(struct si_context *sctx)
2866 {
2867 const unsigned mask = u_bit_consecutive(0, SI_DESCS_FIRST_COMPUTE);
2868 return si_upload_shader_descriptors(sctx, mask);
2869 }
2870
2871 bool si_upload_compute_shader_descriptors(struct si_context *sctx)
2872 {
2873 /* Does not update rw_buffers as that is not needed for compute shaders
2874 * and the input buffer is using the same SGPR's anyway.
2875 */
2876 const unsigned mask = u_bit_consecutive(SI_DESCS_FIRST_COMPUTE,
2877 SI_NUM_DESCS - SI_DESCS_FIRST_COMPUTE);
2878 return si_upload_shader_descriptors(sctx, mask);
2879 }
2880
2881 void si_release_all_descriptors(struct si_context *sctx)
2882 {
2883 int i;
2884
2885 for (i = 0; i < SI_NUM_SHADERS; i++) {
2886 si_release_buffer_resources(&sctx->const_and_shader_buffers[i],
2887 si_const_and_shader_buffer_descriptors(sctx, i));
2888 si_release_sampler_views(&sctx->samplers[i]);
2889 si_release_image_views(&sctx->images[i]);
2890 }
2891 si_release_buffer_resources(&sctx->rw_buffers,
2892 &sctx->descriptors[SI_DESCS_RW_BUFFERS]);
2893 for (i = 0; i < SI_NUM_VERTEX_BUFFERS; i++)
2894 pipe_vertex_buffer_unreference(&sctx->vertex_buffer[i]);
2895
2896 for (i = 0; i < SI_NUM_DESCS; ++i)
2897 si_release_descriptors(&sctx->descriptors[i]);
2898
2899 si_resource_reference(&sctx->vb_descriptors_buffer, NULL);
2900 sctx->vb_descriptors_gpu_list = NULL; /* points into a mapped buffer */
2901
2902 si_release_bindless_descriptors(sctx);
2903 }
2904
2905 void si_gfx_resources_add_all_to_bo_list(struct si_context *sctx)
2906 {
2907 for (unsigned i = 0; i < SI_NUM_GRAPHICS_SHADERS; i++) {
2908 si_buffer_resources_begin_new_cs(sctx, &sctx->const_and_shader_buffers[i]);
2909 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i]);
2910 si_image_views_begin_new_cs(sctx, &sctx->images[i]);
2911 }
2912 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers);
2913 si_vertex_buffers_begin_new_cs(sctx);
2914
2915 if (sctx->bo_list_add_all_resident_resources)
2916 si_resident_buffers_add_all_to_bo_list(sctx);
2917
2918 assert(sctx->bo_list_add_all_gfx_resources);
2919 sctx->bo_list_add_all_gfx_resources = false;
2920 }
2921
2922 void si_compute_resources_add_all_to_bo_list(struct si_context *sctx)
2923 {
2924 unsigned sh = PIPE_SHADER_COMPUTE;
2925
2926 si_buffer_resources_begin_new_cs(sctx, &sctx->const_and_shader_buffers[sh]);
2927 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[sh]);
2928 si_image_views_begin_new_cs(sctx, &sctx->images[sh]);
2929 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers);
2930
2931 if (sctx->bo_list_add_all_resident_resources)
2932 si_resident_buffers_add_all_to_bo_list(sctx);
2933
2934 assert(sctx->bo_list_add_all_compute_resources);
2935 sctx->bo_list_add_all_compute_resources = false;
2936 }
2937
2938 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
2939 {
2940 for (unsigned i = 0; i < SI_NUM_DESCS; ++i)
2941 si_descriptors_begin_new_cs(sctx, &sctx->descriptors[i]);
2942 si_descriptors_begin_new_cs(sctx, &sctx->bindless_descriptors);
2943
2944 si_shader_pointers_begin_new_cs(sctx);
2945
2946 sctx->bo_list_add_all_resident_resources = true;
2947 sctx->bo_list_add_all_gfx_resources = true;
2948 sctx->bo_list_add_all_compute_resources = true;
2949 }
2950
2951 void si_set_active_descriptors(struct si_context *sctx, unsigned desc_idx,
2952 uint64_t new_active_mask)
2953 {
2954 struct si_descriptors *desc = &sctx->descriptors[desc_idx];
2955
2956 /* Ignore no-op updates and updates that disable all slots. */
2957 if (!new_active_mask ||
2958 new_active_mask == u_bit_consecutive64(desc->first_active_slot,
2959 desc->num_active_slots))
2960 return;
2961
2962 int first, count;
2963 u_bit_scan_consecutive_range64(&new_active_mask, &first, &count);
2964 assert(new_active_mask == 0);
2965
2966 /* Upload/dump descriptors if slots are being enabled. */
2967 if (first < desc->first_active_slot ||
2968 first + count > desc->first_active_slot + desc->num_active_slots)
2969 sctx->descriptors_dirty |= 1u << desc_idx;
2970
2971 desc->first_active_slot = first;
2972 desc->num_active_slots = count;
2973 }
2974
2975 void si_set_active_descriptors_for_shader(struct si_context *sctx,
2976 struct si_shader_selector *sel)
2977 {
2978 if (!sel)
2979 return;
2980
2981 si_set_active_descriptors(sctx,
2982 si_const_and_shader_buffer_descriptors_idx(sel->type),
2983 sel->active_const_and_shader_buffers);
2984 si_set_active_descriptors(sctx,
2985 si_sampler_and_image_descriptors_idx(sel->type),
2986 sel->active_samplers_and_images);
2987 }