09442be1350b619b3464adc4e7dcb998d881f9ca
[mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 /* Resource binding slots and sampler states (each described with 8 or
26 * 4 dwords) are stored in lists in memory which is accessed by shaders
27 * using scalar load instructions.
28 *
29 * This file is responsible for managing such lists. It keeps a copy of all
30 * descriptors in CPU memory and re-uploads a whole list if some slots have
31 * been changed.
32 *
33 * This code is also reponsible for updating shader pointers to those lists.
34 *
35 * Note that CP DMA can't be used for updating the lists, because a GPU hang
36 * could leave the list in a mid-IB state and the next IB would get wrong
37 * descriptors and the whole context would be unusable at that point.
38 * (Note: The register shadowing can't be used due to the same reason)
39 *
40 * Also, uploading descriptors to newly allocated memory doesn't require
41 * a KCACHE flush.
42 *
43 *
44 * Possible scenarios for one 16 dword image+sampler slot:
45 *
46 * | Image | w/ FMASK | Buffer | NULL
47 * [ 0: 3] Image[0:3] | Image[0:3] | Null[0:3] | Null[0:3]
48 * [ 4: 7] Image[4:7] | Image[4:7] | Buffer[0:3] | 0
49 * [ 8:11] Null[0:3] | Fmask[0:3] | Null[0:3] | Null[0:3]
50 * [12:15] Sampler[0:3] | Fmask[4:7] | Sampler[0:3] | Sampler[0:3]
51 *
52 * FMASK implies MSAA, therefore no sampler state.
53 * Sampler states are never unbound except when FMASK is bound.
54 */
55
56 #include "si_pipe.h"
57 #include "sid.h"
58
59 #include "util/hash_table.h"
60 #include "util/u_idalloc.h"
61 #include "util/u_format.h"
62 #include "util/u_memory.h"
63 #include "util/u_upload_mgr.h"
64
65
66 /* NULL image and buffer descriptor for textures (alpha = 1) and images
67 * (alpha = 0).
68 *
69 * For images, all fields must be zero except for the swizzle, which
70 * supports arbitrary combinations of 0s and 1s. The texture type must be
71 * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
72 *
73 * For buffers, all fields must be zero. If they are not, the hw hangs.
74 *
75 * This is the only reason why the buffer descriptor must be in words [4:7].
76 */
77 static uint32_t null_texture_descriptor[8] = {
78 0,
79 0,
80 0,
81 S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1) |
82 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
83 /* the rest must contain zeros, which is also used by the buffer
84 * descriptor */
85 };
86
87 static uint32_t null_image_descriptor[8] = {
88 0,
89 0,
90 0,
91 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
92 /* the rest must contain zeros, which is also used by the buffer
93 * descriptor */
94 };
95
96 static uint64_t si_desc_extract_buffer_address(const uint32_t *desc)
97 {
98 uint64_t va = desc[0] |
99 ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
100
101 /* Sign-extend the 48-bit address. */
102 va <<= 16;
103 va = (int64_t)va >> 16;
104 return va;
105 }
106
107 static void si_init_descriptor_list(uint32_t *desc_list,
108 unsigned element_dw_size,
109 unsigned num_elements,
110 const uint32_t *null_descriptor)
111 {
112 int i;
113
114 /* Initialize the array to NULL descriptors if the element size is 8. */
115 if (null_descriptor) {
116 assert(element_dw_size % 8 == 0);
117 for (i = 0; i < num_elements * element_dw_size / 8; i++)
118 memcpy(desc_list + i * 8, null_descriptor, 8 * 4);
119 }
120 }
121
122 static void si_init_descriptors(struct si_descriptors *desc,
123 short shader_userdata_rel_index,
124 unsigned element_dw_size,
125 unsigned num_elements)
126 {
127 desc->list = CALLOC(num_elements, element_dw_size * 4);
128 desc->element_dw_size = element_dw_size;
129 desc->num_elements = num_elements;
130 desc->shader_userdata_offset = shader_userdata_rel_index * 4;
131 desc->slot_index_to_bind_directly = -1;
132 }
133
134 static void si_release_descriptors(struct si_descriptors *desc)
135 {
136 si_resource_reference(&desc->buffer, NULL);
137 FREE(desc->list);
138 }
139
140 static bool si_upload_descriptors(struct si_context *sctx,
141 struct si_descriptors *desc)
142 {
143 unsigned slot_size = desc->element_dw_size * 4;
144 unsigned first_slot_offset = desc->first_active_slot * slot_size;
145 unsigned upload_size = desc->num_active_slots * slot_size;
146
147 /* Skip the upload if no shader is using the descriptors. dirty_mask
148 * will stay dirty and the descriptors will be uploaded when there is
149 * a shader using them.
150 */
151 if (!upload_size)
152 return true;
153
154 /* If there is just one active descriptor, bind it directly. */
155 if ((int)desc->first_active_slot == desc->slot_index_to_bind_directly &&
156 desc->num_active_slots == 1) {
157 uint32_t *descriptor = &desc->list[desc->slot_index_to_bind_directly *
158 desc->element_dw_size];
159
160 /* The buffer is already in the buffer list. */
161 si_resource_reference(&desc->buffer, NULL);
162 desc->gpu_list = NULL;
163 desc->gpu_address = si_desc_extract_buffer_address(descriptor);
164 si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
165 return true;
166 }
167
168 uint32_t *ptr;
169 unsigned buffer_offset;
170 u_upload_alloc(sctx->b.const_uploader, first_slot_offset, upload_size,
171 si_optimal_tcc_alignment(sctx, upload_size),
172 &buffer_offset, (struct pipe_resource**)&desc->buffer,
173 (void**)&ptr);
174 if (!desc->buffer) {
175 desc->gpu_address = 0;
176 return false; /* skip the draw call */
177 }
178
179 util_memcpy_cpu_to_le32(ptr, (char*)desc->list + first_slot_offset,
180 upload_size);
181 desc->gpu_list = ptr - first_slot_offset / 4;
182
183 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, desc->buffer,
184 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
185
186 /* The shader pointer should point to slot 0. */
187 buffer_offset -= first_slot_offset;
188 desc->gpu_address = desc->buffer->gpu_address + buffer_offset;
189
190 assert(desc->buffer->flags & RADEON_FLAG_32BIT);
191 assert((desc->buffer->gpu_address >> 32) == sctx->screen->info.address32_hi);
192 assert((desc->gpu_address >> 32) == sctx->screen->info.address32_hi);
193
194 si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
195 return true;
196 }
197
198 static void
199 si_descriptors_begin_new_cs(struct si_context *sctx, struct si_descriptors *desc)
200 {
201 if (!desc->buffer)
202 return;
203
204 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, desc->buffer,
205 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
206 }
207
208 /* SAMPLER VIEWS */
209
210 static inline enum radeon_bo_priority
211 si_get_sampler_view_priority(struct si_resource *res)
212 {
213 if (res->b.b.target == PIPE_BUFFER)
214 return RADEON_PRIO_SAMPLER_BUFFER;
215
216 if (res->b.b.nr_samples > 1)
217 return RADEON_PRIO_SAMPLER_TEXTURE_MSAA;
218
219 return RADEON_PRIO_SAMPLER_TEXTURE;
220 }
221
222 static struct si_descriptors *
223 si_sampler_and_image_descriptors(struct si_context *sctx, unsigned shader)
224 {
225 return &sctx->descriptors[si_sampler_and_image_descriptors_idx(shader)];
226 }
227
228 static void si_release_sampler_views(struct si_samplers *samplers)
229 {
230 int i;
231
232 for (i = 0; i < ARRAY_SIZE(samplers->views); i++) {
233 pipe_sampler_view_reference(&samplers->views[i], NULL);
234 }
235 }
236
237 static void si_sampler_view_add_buffer(struct si_context *sctx,
238 struct pipe_resource *resource,
239 enum radeon_bo_usage usage,
240 bool is_stencil_sampler,
241 bool check_mem)
242 {
243 struct si_texture *tex = (struct si_texture*)resource;
244 enum radeon_bo_priority priority;
245
246 if (!resource)
247 return;
248
249 /* Use the flushed depth texture if direct sampling is unsupported. */
250 if (resource->target != PIPE_BUFFER &&
251 tex->is_depth && !si_can_sample_zs(tex, is_stencil_sampler))
252 tex = tex->flushed_depth_texture;
253
254 priority = si_get_sampler_view_priority(&tex->buffer);
255 radeon_add_to_gfx_buffer_list_check_mem(sctx, &tex->buffer, usage, priority,
256 check_mem);
257
258 if (resource->target == PIPE_BUFFER)
259 return;
260
261 /* Add separate DCC. */
262 if (tex->dcc_separate_buffer) {
263 radeon_add_to_gfx_buffer_list_check_mem(sctx, tex->dcc_separate_buffer,
264 usage, RADEON_PRIO_SEPARATE_META, check_mem);
265 }
266 }
267
268 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
269 struct si_samplers *samplers)
270 {
271 unsigned mask = samplers->enabled_mask;
272
273 /* Add buffers to the CS. */
274 while (mask) {
275 int i = u_bit_scan(&mask);
276 struct si_sampler_view *sview = (struct si_sampler_view *)samplers->views[i];
277
278 si_sampler_view_add_buffer(sctx, sview->base.texture,
279 RADEON_USAGE_READ,
280 sview->is_stencil_sampler, false);
281 }
282 }
283
284 /* Set buffer descriptor fields that can be changed by reallocations. */
285 static void si_set_buf_desc_address(struct si_resource *buf,
286 uint64_t offset, uint32_t *state)
287 {
288 uint64_t va = buf->gpu_address + offset;
289
290 state[0] = va;
291 state[1] &= C_008F04_BASE_ADDRESS_HI;
292 state[1] |= S_008F04_BASE_ADDRESS_HI(va >> 32);
293 }
294
295 /* Set texture descriptor fields that can be changed by reallocations.
296 *
297 * \param tex texture
298 * \param base_level_info information of the level of BASE_ADDRESS
299 * \param base_level the level of BASE_ADDRESS
300 * \param first_level pipe_sampler_view.u.tex.first_level
301 * \param block_width util_format_get_blockwidth()
302 * \param is_stencil select between separate Z & Stencil
303 * \param state descriptor to update
304 */
305 void si_set_mutable_tex_desc_fields(struct si_screen *sscreen,
306 struct si_texture *tex,
307 const struct legacy_surf_level *base_level_info,
308 unsigned base_level, unsigned first_level,
309 unsigned block_width, bool is_stencil,
310 uint32_t *state)
311 {
312 uint64_t va, meta_va = 0;
313
314 if (tex->is_depth && !si_can_sample_zs(tex, is_stencil)) {
315 tex = tex->flushed_depth_texture;
316 is_stencil = false;
317 }
318
319 va = tex->buffer.gpu_address;
320
321 if (sscreen->info.chip_class >= GFX9) {
322 /* Only stencil_offset needs to be added here. */
323 if (is_stencil)
324 va += tex->surface.u.gfx9.stencil_offset;
325 else
326 va += tex->surface.u.gfx9.surf_offset;
327 } else {
328 va += base_level_info->offset;
329 }
330
331 state[0] = va >> 8;
332 state[1] &= C_008F14_BASE_ADDRESS_HI;
333 state[1] |= S_008F14_BASE_ADDRESS_HI(va >> 40);
334
335 /* Only macrotiled modes can set tile swizzle.
336 * GFX9 doesn't use (legacy) base_level_info.
337 */
338 if (sscreen->info.chip_class >= GFX9 ||
339 base_level_info->mode == RADEON_SURF_MODE_2D)
340 state[0] |= tex->surface.tile_swizzle;
341
342 if (sscreen->info.chip_class >= GFX8) {
343 state[6] &= C_008F28_COMPRESSION_EN;
344
345 if (vi_dcc_enabled(tex, first_level)) {
346 meta_va = (!tex->dcc_separate_buffer ? tex->buffer.gpu_address : 0) +
347 tex->dcc_offset;
348
349 if (sscreen->info.chip_class == GFX8) {
350 meta_va += base_level_info->dcc_offset;
351 assert(base_level_info->mode == RADEON_SURF_MODE_2D);
352 }
353
354 unsigned dcc_tile_swizzle = tex->surface.tile_swizzle << 8;
355 dcc_tile_swizzle &= tex->surface.dcc_alignment - 1;
356 meta_va |= dcc_tile_swizzle;
357 } else if (vi_tc_compat_htile_enabled(tex, first_level,
358 is_stencil ? PIPE_MASK_S : PIPE_MASK_Z)) {
359 meta_va = tex->buffer.gpu_address + tex->htile_offset;
360 }
361
362 if (meta_va)
363 state[6] |= S_008F28_COMPRESSION_EN(1);
364 }
365
366 if (sscreen->info.chip_class >= GFX8 && sscreen->info.chip_class <= GFX9)
367 state[7] = meta_va >> 8;
368
369 if (sscreen->info.chip_class >= GFX10) {
370 state[3] &= C_00A00C_SW_MODE;
371
372 if (is_stencil) {
373 state[3] |= S_00A00C_SW_MODE(tex->surface.u.gfx9.stencil.swizzle_mode);
374 } else {
375 state[3] |= S_00A00C_SW_MODE(tex->surface.u.gfx9.surf.swizzle_mode);
376 }
377
378 state[6] &= C_00A018_META_DATA_ADDRESS_LO &
379 C_00A018_META_PIPE_ALIGNED;
380
381 if (meta_va) {
382 struct gfx9_surf_meta_flags meta;
383
384 if (tex->dcc_offset)
385 meta = tex->surface.u.gfx9.dcc;
386 else
387 meta = tex->surface.u.gfx9.htile;
388
389 state[6] |= S_00A018_META_PIPE_ALIGNED(meta.pipe_aligned) |
390 S_00A018_META_DATA_ADDRESS_LO(meta_va >> 8);
391 }
392
393 state[7] = meta_va >> 16;
394 } else if (sscreen->info.chip_class == GFX9) {
395 state[3] &= C_008F1C_SW_MODE;
396 state[4] &= C_008F20_PITCH;
397
398 if (is_stencil) {
399 state[3] |= S_008F1C_SW_MODE(tex->surface.u.gfx9.stencil.swizzle_mode);
400 state[4] |= S_008F20_PITCH(tex->surface.u.gfx9.stencil.epitch);
401 } else {
402 state[3] |= S_008F1C_SW_MODE(tex->surface.u.gfx9.surf.swizzle_mode);
403 state[4] |= S_008F20_PITCH(tex->surface.u.gfx9.surf.epitch);
404 }
405
406 state[5] &= C_008F24_META_DATA_ADDRESS &
407 C_008F24_META_PIPE_ALIGNED &
408 C_008F24_META_RB_ALIGNED;
409 if (meta_va) {
410 struct gfx9_surf_meta_flags meta;
411
412 if (tex->dcc_offset)
413 meta = tex->surface.u.gfx9.dcc;
414 else
415 meta = tex->surface.u.gfx9.htile;
416
417 state[5] |= S_008F24_META_DATA_ADDRESS(meta_va >> 40) |
418 S_008F24_META_PIPE_ALIGNED(meta.pipe_aligned) |
419 S_008F24_META_RB_ALIGNED(meta.rb_aligned);
420 }
421 } else {
422 /* GFX6-GFX8 */
423 unsigned pitch = base_level_info->nblk_x * block_width;
424 unsigned index = si_tile_mode_index(tex, base_level, is_stencil);
425
426 state[3] &= C_008F1C_TILING_INDEX;
427 state[3] |= S_008F1C_TILING_INDEX(index);
428 state[4] &= C_008F20_PITCH;
429 state[4] |= S_008F20_PITCH(pitch - 1);
430 }
431 }
432
433 static void si_set_sampler_state_desc(struct si_sampler_state *sstate,
434 struct si_sampler_view *sview,
435 struct si_texture *tex,
436 uint32_t *desc)
437 {
438 if (sview && sview->is_integer)
439 memcpy(desc, sstate->integer_val, 4*4);
440 else if (tex && tex->upgraded_depth &&
441 (!sview || !sview->is_stencil_sampler))
442 memcpy(desc, sstate->upgraded_depth_val, 4*4);
443 else
444 memcpy(desc, sstate->val, 4*4);
445 }
446
447 static void si_set_sampler_view_desc(struct si_context *sctx,
448 struct si_sampler_view *sview,
449 struct si_sampler_state *sstate,
450 uint32_t *desc)
451 {
452 struct pipe_sampler_view *view = &sview->base;
453 struct si_texture *tex = (struct si_texture *)view->texture;
454 bool is_buffer = tex->buffer.b.b.target == PIPE_BUFFER;
455
456 if (unlikely(!is_buffer && sview->dcc_incompatible)) {
457 if (vi_dcc_enabled(tex, view->u.tex.first_level))
458 if (!si_texture_disable_dcc(sctx, tex))
459 si_decompress_dcc(sctx, tex);
460
461 sview->dcc_incompatible = false;
462 }
463
464 assert(tex); /* views with texture == NULL aren't supported */
465 memcpy(desc, sview->state, 8*4);
466
467 if (is_buffer) {
468 si_set_buf_desc_address(&tex->buffer,
469 sview->base.u.buf.offset,
470 desc + 4);
471 } else {
472 bool is_separate_stencil = tex->db_compatible &&
473 sview->is_stencil_sampler;
474
475 si_set_mutable_tex_desc_fields(sctx->screen, tex,
476 sview->base_level_info,
477 sview->base_level,
478 sview->base.u.tex.first_level,
479 sview->block_width,
480 is_separate_stencil,
481 desc);
482 }
483
484 if (!is_buffer && tex->surface.fmask_size) {
485 memcpy(desc + 8, sview->fmask_state, 8*4);
486 } else {
487 /* Disable FMASK and bind sampler state in [12:15]. */
488 memcpy(desc + 8, null_texture_descriptor, 4*4);
489
490 if (sstate)
491 si_set_sampler_state_desc(sstate, sview,
492 is_buffer ? NULL : tex,
493 desc + 12);
494 }
495 }
496
497 static bool color_needs_decompression(struct si_texture *tex)
498 {
499 return tex->surface.fmask_size ||
500 (tex->dirty_level_mask &&
501 (tex->cmask_buffer || tex->dcc_offset));
502 }
503
504 static bool depth_needs_decompression(struct si_texture *tex)
505 {
506 /* If the depth/stencil texture is TC-compatible, no decompression
507 * will be done. The decompression function will only flush DB caches
508 * to make it coherent with shaders. That's necessary because the driver
509 * doesn't flush DB caches in any other case.
510 */
511 return tex->db_compatible;
512 }
513
514 static void si_set_sampler_view(struct si_context *sctx,
515 unsigned shader,
516 unsigned slot, struct pipe_sampler_view *view,
517 bool disallow_early_out)
518 {
519 struct si_samplers *samplers = &sctx->samplers[shader];
520 struct si_sampler_view *sview = (struct si_sampler_view*)view;
521 struct si_descriptors *descs = si_sampler_and_image_descriptors(sctx, shader);
522 unsigned desc_slot = si_get_sampler_slot(slot);
523 uint32_t *desc = descs->list + desc_slot * 16;
524
525 if (samplers->views[slot] == view && !disallow_early_out)
526 return;
527
528 if (view) {
529 struct si_texture *tex = (struct si_texture *)view->texture;
530
531 si_set_sampler_view_desc(sctx, sview,
532 samplers->sampler_states[slot], desc);
533
534 if (tex->buffer.b.b.target == PIPE_BUFFER) {
535 tex->buffer.bind_history |= PIPE_BIND_SAMPLER_VIEW;
536 samplers->needs_depth_decompress_mask &= ~(1u << slot);
537 samplers->needs_color_decompress_mask &= ~(1u << slot);
538 } else {
539 if (depth_needs_decompression(tex)) {
540 samplers->needs_depth_decompress_mask |= 1u << slot;
541 } else {
542 samplers->needs_depth_decompress_mask &= ~(1u << slot);
543 }
544 if (color_needs_decompression(tex)) {
545 samplers->needs_color_decompress_mask |= 1u << slot;
546 } else {
547 samplers->needs_color_decompress_mask &= ~(1u << slot);
548 }
549
550 if (tex->dcc_offset &&
551 p_atomic_read(&tex->framebuffers_bound))
552 sctx->need_check_render_feedback = true;
553 }
554
555 pipe_sampler_view_reference(&samplers->views[slot], view);
556 samplers->enabled_mask |= 1u << slot;
557
558 /* Since this can flush, it must be done after enabled_mask is
559 * updated. */
560 si_sampler_view_add_buffer(sctx, view->texture,
561 RADEON_USAGE_READ,
562 sview->is_stencil_sampler, true);
563 } else {
564 pipe_sampler_view_reference(&samplers->views[slot], NULL);
565 memcpy(desc, null_texture_descriptor, 8*4);
566 /* Only clear the lower dwords of FMASK. */
567 memcpy(desc + 8, null_texture_descriptor, 4*4);
568 /* Re-set the sampler state if we are transitioning from FMASK. */
569 if (samplers->sampler_states[slot])
570 si_set_sampler_state_desc(samplers->sampler_states[slot], NULL, NULL,
571 desc + 12);
572
573 samplers->enabled_mask &= ~(1u << slot);
574 samplers->needs_depth_decompress_mask &= ~(1u << slot);
575 samplers->needs_color_decompress_mask &= ~(1u << slot);
576 }
577
578 sctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
579 }
580
581 static void si_update_shader_needs_decompress_mask(struct si_context *sctx,
582 unsigned shader)
583 {
584 struct si_samplers *samplers = &sctx->samplers[shader];
585 unsigned shader_bit = 1 << shader;
586
587 if (samplers->needs_depth_decompress_mask ||
588 samplers->needs_color_decompress_mask ||
589 sctx->images[shader].needs_color_decompress_mask)
590 sctx->shader_needs_decompress_mask |= shader_bit;
591 else
592 sctx->shader_needs_decompress_mask &= ~shader_bit;
593 }
594
595 static void si_set_sampler_views(struct pipe_context *ctx,
596 enum pipe_shader_type shader, unsigned start,
597 unsigned count,
598 struct pipe_sampler_view **views)
599 {
600 struct si_context *sctx = (struct si_context *)ctx;
601 int i;
602
603 if (!count || shader >= SI_NUM_SHADERS)
604 return;
605
606 if (views) {
607 for (i = 0; i < count; i++)
608 si_set_sampler_view(sctx, shader, start + i, views[i], false);
609 } else {
610 for (i = 0; i < count; i++)
611 si_set_sampler_view(sctx, shader, start + i, NULL, false);
612 }
613
614 si_update_shader_needs_decompress_mask(sctx, shader);
615 }
616
617 static void
618 si_samplers_update_needs_color_decompress_mask(struct si_samplers *samplers)
619 {
620 unsigned mask = samplers->enabled_mask;
621
622 while (mask) {
623 int i = u_bit_scan(&mask);
624 struct pipe_resource *res = samplers->views[i]->texture;
625
626 if (res && res->target != PIPE_BUFFER) {
627 struct si_texture *tex = (struct si_texture *)res;
628
629 if (color_needs_decompression(tex)) {
630 samplers->needs_color_decompress_mask |= 1u << i;
631 } else {
632 samplers->needs_color_decompress_mask &= ~(1u << i);
633 }
634 }
635 }
636 }
637
638 /* IMAGE VIEWS */
639
640 static void
641 si_release_image_views(struct si_images *images)
642 {
643 unsigned i;
644
645 for (i = 0; i < SI_NUM_IMAGES; ++i) {
646 struct pipe_image_view *view = &images->views[i];
647
648 pipe_resource_reference(&view->resource, NULL);
649 }
650 }
651
652 static void
653 si_image_views_begin_new_cs(struct si_context *sctx, struct si_images *images)
654 {
655 uint mask = images->enabled_mask;
656
657 /* Add buffers to the CS. */
658 while (mask) {
659 int i = u_bit_scan(&mask);
660 struct pipe_image_view *view = &images->views[i];
661
662 assert(view->resource);
663
664 si_sampler_view_add_buffer(sctx, view->resource,
665 RADEON_USAGE_READWRITE, false, false);
666 }
667 }
668
669 static void
670 si_disable_shader_image(struct si_context *ctx, unsigned shader, unsigned slot)
671 {
672 struct si_images *images = &ctx->images[shader];
673
674 if (images->enabled_mask & (1u << slot)) {
675 struct si_descriptors *descs = si_sampler_and_image_descriptors(ctx, shader);
676 unsigned desc_slot = si_get_image_slot(slot);
677
678 pipe_resource_reference(&images->views[slot].resource, NULL);
679 images->needs_color_decompress_mask &= ~(1 << slot);
680
681 memcpy(descs->list + desc_slot*8, null_image_descriptor, 8*4);
682 images->enabled_mask &= ~(1u << slot);
683 ctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
684 }
685 }
686
687 static void
688 si_mark_image_range_valid(const struct pipe_image_view *view)
689 {
690 struct si_resource *res = si_resource(view->resource);
691
692 if (res->b.b.target != PIPE_BUFFER)
693 return;
694
695 util_range_add(&res->valid_buffer_range,
696 view->u.buf.offset,
697 view->u.buf.offset + view->u.buf.size);
698 }
699
700 static void si_set_shader_image_desc(struct si_context *ctx,
701 const struct pipe_image_view *view,
702 bool skip_decompress,
703 uint32_t *desc, uint32_t *fmask_desc)
704 {
705 struct si_screen *screen = ctx->screen;
706 struct si_resource *res;
707
708 res = si_resource(view->resource);
709
710 if (res->b.b.target == PIPE_BUFFER ||
711 view->shader_access & SI_IMAGE_ACCESS_AS_BUFFER) {
712 if (view->access & PIPE_IMAGE_ACCESS_WRITE)
713 si_mark_image_range_valid(view);
714
715 si_make_buffer_descriptor(screen, res,
716 view->format,
717 view->u.buf.offset,
718 view->u.buf.size, desc);
719 si_set_buf_desc_address(res, view->u.buf.offset, desc + 4);
720 } else {
721 static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
722 struct si_texture *tex = (struct si_texture *)res;
723 unsigned level = view->u.tex.level;
724 unsigned width, height, depth, hw_level;
725 bool uses_dcc = vi_dcc_enabled(tex, level);
726 unsigned access = view->access;
727
728 /* Clear the write flag when writes can't occur.
729 * Note that DCC_DECOMPRESS for MSAA doesn't work in some cases,
730 * so we don't wanna trigger it.
731 */
732 if (tex->is_depth ||
733 (!fmask_desc && tex->surface.fmask_size != 0)) {
734 assert(!"Z/S and MSAA image stores are not supported");
735 access &= ~PIPE_IMAGE_ACCESS_WRITE;
736 }
737
738 assert(!tex->is_depth);
739 assert(fmask_desc || tex->surface.fmask_size == 0);
740
741 if (uses_dcc && !skip_decompress &&
742 (view->access & PIPE_IMAGE_ACCESS_WRITE ||
743 !vi_dcc_formats_compatible(screen, res->b.b.format, view->format))) {
744 /* If DCC can't be disabled, at least decompress it.
745 * The decompression is relatively cheap if the surface
746 * has been decompressed already.
747 */
748 if (!si_texture_disable_dcc(ctx, tex))
749 si_decompress_dcc(ctx, tex);
750 }
751
752 if (ctx->chip_class >= GFX9) {
753 /* Always set the base address. The swizzle modes don't
754 * allow setting mipmap level offsets as the base.
755 */
756 width = res->b.b.width0;
757 height = res->b.b.height0;
758 depth = res->b.b.depth0;
759 hw_level = level;
760 } else {
761 /* Always force the base level to the selected level.
762 *
763 * This is required for 3D textures, where otherwise
764 * selecting a single slice for non-layered bindings
765 * fails. It doesn't hurt the other targets.
766 */
767 width = u_minify(res->b.b.width0, level);
768 height = u_minify(res->b.b.height0, level);
769 depth = u_minify(res->b.b.depth0, level);
770 hw_level = 0;
771 }
772
773 screen->make_texture_descriptor(screen, tex,
774 false, res->b.b.target,
775 view->format, swizzle,
776 hw_level, hw_level,
777 view->u.tex.first_layer,
778 view->u.tex.last_layer,
779 width, height, depth,
780 desc, fmask_desc);
781 si_set_mutable_tex_desc_fields(screen, tex,
782 &tex->surface.u.legacy.level[level],
783 level, level,
784 util_format_get_blockwidth(view->format),
785 false, desc);
786 }
787 }
788
789 static void si_set_shader_image(struct si_context *ctx,
790 unsigned shader,
791 unsigned slot, const struct pipe_image_view *view,
792 bool skip_decompress)
793 {
794 struct si_images *images = &ctx->images[shader];
795 struct si_descriptors *descs = si_sampler_and_image_descriptors(ctx, shader);
796 struct si_resource *res;
797 unsigned desc_slot = si_get_image_slot(slot);
798 uint32_t *desc = descs->list + desc_slot * 8;
799
800 if (!view || !view->resource) {
801 si_disable_shader_image(ctx, shader, slot);
802 return;
803 }
804
805 res = si_resource(view->resource);
806
807 if (&images->views[slot] != view)
808 util_copy_image_view(&images->views[slot], view);
809
810 si_set_shader_image_desc(ctx, view, skip_decompress, desc, NULL);
811
812 if (res->b.b.target == PIPE_BUFFER ||
813 view->shader_access & SI_IMAGE_ACCESS_AS_BUFFER) {
814 images->needs_color_decompress_mask &= ~(1 << slot);
815 res->bind_history |= PIPE_BIND_SHADER_IMAGE;
816 } else {
817 struct si_texture *tex = (struct si_texture *)res;
818 unsigned level = view->u.tex.level;
819
820 if (color_needs_decompression(tex)) {
821 images->needs_color_decompress_mask |= 1 << slot;
822 } else {
823 images->needs_color_decompress_mask &= ~(1 << slot);
824 }
825
826 if (vi_dcc_enabled(tex, level) &&
827 p_atomic_read(&tex->framebuffers_bound))
828 ctx->need_check_render_feedback = true;
829 }
830
831 images->enabled_mask |= 1u << slot;
832 ctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
833
834 /* Since this can flush, it must be done after enabled_mask is updated. */
835 si_sampler_view_add_buffer(ctx, &res->b.b,
836 (view->access & PIPE_IMAGE_ACCESS_WRITE) ?
837 RADEON_USAGE_READWRITE : RADEON_USAGE_READ,
838 false, true);
839 }
840
841 static void
842 si_set_shader_images(struct pipe_context *pipe,
843 enum pipe_shader_type shader,
844 unsigned start_slot, unsigned count,
845 const struct pipe_image_view *views)
846 {
847 struct si_context *ctx = (struct si_context *)pipe;
848 unsigned i, slot;
849
850 assert(shader < SI_NUM_SHADERS);
851
852 if (!count)
853 return;
854
855 assert(start_slot + count <= SI_NUM_IMAGES);
856
857 if (views) {
858 for (i = 0, slot = start_slot; i < count; ++i, ++slot)
859 si_set_shader_image(ctx, shader, slot, &views[i], false);
860 } else {
861 for (i = 0, slot = start_slot; i < count; ++i, ++slot)
862 si_set_shader_image(ctx, shader, slot, NULL, false);
863 }
864
865 si_update_shader_needs_decompress_mask(ctx, shader);
866 }
867
868 static void
869 si_images_update_needs_color_decompress_mask(struct si_images *images)
870 {
871 unsigned mask = images->enabled_mask;
872
873 while (mask) {
874 int i = u_bit_scan(&mask);
875 struct pipe_resource *res = images->views[i].resource;
876
877 if (res && res->target != PIPE_BUFFER) {
878 struct si_texture *tex = (struct si_texture *)res;
879
880 if (color_needs_decompression(tex)) {
881 images->needs_color_decompress_mask |= 1 << i;
882 } else {
883 images->needs_color_decompress_mask &= ~(1 << i);
884 }
885 }
886 }
887 }
888
889 void si_update_ps_colorbuf0_slot(struct si_context *sctx)
890 {
891 struct si_buffer_resources *buffers = &sctx->rw_buffers;
892 struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
893 unsigned slot = SI_PS_IMAGE_COLORBUF0;
894 struct pipe_surface *surf = NULL;
895
896 /* si_texture_disable_dcc can get us here again. */
897 if (sctx->blitter->running)
898 return;
899
900 /* See whether FBFETCH is used and color buffer 0 is set. */
901 if (sctx->ps_shader.cso &&
902 sctx->ps_shader.cso->info.uses_fbfetch &&
903 sctx->framebuffer.state.nr_cbufs &&
904 sctx->framebuffer.state.cbufs[0])
905 surf = sctx->framebuffer.state.cbufs[0];
906
907 /* Return if FBFETCH transitions from disabled to disabled. */
908 if (!buffers->buffers[slot] && !surf)
909 return;
910
911 sctx->ps_uses_fbfetch = surf != NULL;
912 si_update_ps_iter_samples(sctx);
913
914 if (surf) {
915 struct si_texture *tex = (struct si_texture*)surf->texture;
916 struct pipe_image_view view;
917
918 assert(tex);
919 assert(!tex->is_depth);
920
921 /* Disable DCC, because the texture is used as both a sampler
922 * and color buffer.
923 */
924 si_texture_disable_dcc(sctx, tex);
925
926 if (tex->buffer.b.b.nr_samples <= 1 && tex->cmask_buffer) {
927 /* Disable CMASK. */
928 assert(tex->cmask_buffer != &tex->buffer);
929 si_eliminate_fast_color_clear(sctx, tex);
930 si_texture_discard_cmask(sctx->screen, tex);
931 }
932
933 view.resource = surf->texture;
934 view.format = surf->format;
935 view.access = PIPE_IMAGE_ACCESS_READ;
936 view.u.tex.first_layer = surf->u.tex.first_layer;
937 view.u.tex.last_layer = surf->u.tex.last_layer;
938 view.u.tex.level = surf->u.tex.level;
939
940 /* Set the descriptor. */
941 uint32_t *desc = descs->list + slot*4;
942 memset(desc, 0, 16 * 4);
943 si_set_shader_image_desc(sctx, &view, true, desc, desc + 8);
944
945 pipe_resource_reference(&buffers->buffers[slot], &tex->buffer.b.b);
946 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
947 &tex->buffer, RADEON_USAGE_READ,
948 RADEON_PRIO_SHADER_RW_IMAGE);
949 buffers->enabled_mask |= 1u << slot;
950 } else {
951 /* Clear the descriptor. */
952 memset(descs->list + slot*4, 0, 8*4);
953 pipe_resource_reference(&buffers->buffers[slot], NULL);
954 buffers->enabled_mask &= ~(1u << slot);
955 }
956
957 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
958 }
959
960 /* SAMPLER STATES */
961
962 static void si_bind_sampler_states(struct pipe_context *ctx,
963 enum pipe_shader_type shader,
964 unsigned start, unsigned count, void **states)
965 {
966 struct si_context *sctx = (struct si_context *)ctx;
967 struct si_samplers *samplers = &sctx->samplers[shader];
968 struct si_descriptors *desc = si_sampler_and_image_descriptors(sctx, shader);
969 struct si_sampler_state **sstates = (struct si_sampler_state**)states;
970 int i;
971
972 if (!count || shader >= SI_NUM_SHADERS || !sstates)
973 return;
974
975 for (i = 0; i < count; i++) {
976 unsigned slot = start + i;
977 unsigned desc_slot = si_get_sampler_slot(slot);
978
979 if (!sstates[i] ||
980 sstates[i] == samplers->sampler_states[slot])
981 continue;
982
983 #ifndef NDEBUG
984 assert(sstates[i]->magic == SI_SAMPLER_STATE_MAGIC);
985 #endif
986 samplers->sampler_states[slot] = sstates[i];
987
988 /* If FMASK is bound, don't overwrite it.
989 * The sampler state will be set after FMASK is unbound.
990 */
991 struct si_sampler_view *sview =
992 (struct si_sampler_view *)samplers->views[slot];
993
994 struct si_texture *tex = NULL;
995
996 if (sview && sview->base.texture &&
997 sview->base.texture->target != PIPE_BUFFER)
998 tex = (struct si_texture *)sview->base.texture;
999
1000 if (tex && tex->surface.fmask_size)
1001 continue;
1002
1003 si_set_sampler_state_desc(sstates[i], sview, tex,
1004 desc->list + desc_slot * 16 + 12);
1005
1006 sctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
1007 }
1008 }
1009
1010 /* BUFFER RESOURCES */
1011
1012 static void si_init_buffer_resources(struct si_buffer_resources *buffers,
1013 struct si_descriptors *descs,
1014 unsigned num_buffers,
1015 short shader_userdata_rel_index,
1016 enum radeon_bo_priority priority,
1017 enum radeon_bo_priority priority_constbuf)
1018 {
1019 buffers->priority = priority;
1020 buffers->priority_constbuf = priority_constbuf;
1021 buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
1022 buffers->offsets = CALLOC(num_buffers, sizeof(buffers->offsets[0]));
1023
1024 si_init_descriptors(descs, shader_userdata_rel_index, 4, num_buffers);
1025 }
1026
1027 static void si_release_buffer_resources(struct si_buffer_resources *buffers,
1028 struct si_descriptors *descs)
1029 {
1030 int i;
1031
1032 for (i = 0; i < descs->num_elements; i++) {
1033 pipe_resource_reference(&buffers->buffers[i], NULL);
1034 }
1035
1036 FREE(buffers->buffers);
1037 FREE(buffers->offsets);
1038 }
1039
1040 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
1041 struct si_buffer_resources *buffers)
1042 {
1043 unsigned mask = buffers->enabled_mask;
1044
1045 /* Add buffers to the CS. */
1046 while (mask) {
1047 int i = u_bit_scan(&mask);
1048
1049 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
1050 si_resource(buffers->buffers[i]),
1051 buffers->writable_mask & (1u << i) ? RADEON_USAGE_READWRITE :
1052 RADEON_USAGE_READ,
1053 i < SI_NUM_SHADER_BUFFERS ? buffers->priority :
1054 buffers->priority_constbuf);
1055 }
1056 }
1057
1058 static void si_get_buffer_from_descriptors(struct si_buffer_resources *buffers,
1059 struct si_descriptors *descs,
1060 unsigned idx, struct pipe_resource **buf,
1061 unsigned *offset, unsigned *size)
1062 {
1063 pipe_resource_reference(buf, buffers->buffers[idx]);
1064 if (*buf) {
1065 struct si_resource *res = si_resource(*buf);
1066 const uint32_t *desc = descs->list + idx * 4;
1067 uint64_t va;
1068
1069 *size = desc[2];
1070
1071 assert(G_008F04_STRIDE(desc[1]) == 0);
1072 va = si_desc_extract_buffer_address(desc);
1073
1074 assert(va >= res->gpu_address && va + *size <= res->gpu_address + res->bo_size);
1075 *offset = va - res->gpu_address;
1076 }
1077 }
1078
1079 /* VERTEX BUFFERS */
1080
1081 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
1082 {
1083 int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
1084 int i;
1085
1086 for (i = 0; i < count; i++) {
1087 int vb = sctx->vertex_elements->vertex_buffer_index[i];
1088
1089 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
1090 continue;
1091 if (!sctx->vertex_buffer[vb].buffer.resource)
1092 continue;
1093
1094 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
1095 si_resource(sctx->vertex_buffer[vb].buffer.resource),
1096 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
1097 }
1098
1099 if (!sctx->vb_descriptors_buffer)
1100 return;
1101 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
1102 sctx->vb_descriptors_buffer, RADEON_USAGE_READ,
1103 RADEON_PRIO_DESCRIPTORS);
1104 }
1105
1106 bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
1107 {
1108 struct si_vertex_elements *velems = sctx->vertex_elements;
1109 unsigned i, count;
1110 unsigned desc_list_byte_size;
1111 unsigned first_vb_use_mask;
1112 uint32_t *ptr;
1113
1114 if (!sctx->vertex_buffers_dirty || !velems)
1115 return true;
1116
1117 count = velems->count;
1118
1119 if (!count)
1120 return true;
1121
1122 desc_list_byte_size = velems->desc_list_byte_size;
1123 first_vb_use_mask = velems->first_vb_use_mask;
1124
1125 /* Vertex buffer descriptors are the only ones which are uploaded
1126 * directly through a staging buffer and don't go through
1127 * the fine-grained upload path.
1128 */
1129 u_upload_alloc(sctx->b.const_uploader, 0,
1130 desc_list_byte_size,
1131 si_optimal_tcc_alignment(sctx, desc_list_byte_size),
1132 &sctx->vb_descriptors_offset,
1133 (struct pipe_resource**)&sctx->vb_descriptors_buffer,
1134 (void**)&ptr);
1135 if (!sctx->vb_descriptors_buffer) {
1136 sctx->vb_descriptors_offset = 0;
1137 sctx->vb_descriptors_gpu_list = NULL;
1138 return false;
1139 }
1140
1141 sctx->vb_descriptors_gpu_list = ptr;
1142 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
1143 sctx->vb_descriptors_buffer, RADEON_USAGE_READ,
1144 RADEON_PRIO_DESCRIPTORS);
1145
1146 assert(count <= SI_MAX_ATTRIBS);
1147
1148 for (i = 0; i < count; i++) {
1149 struct pipe_vertex_buffer *vb;
1150 struct si_resource *buf;
1151 unsigned vbo_index = velems->vertex_buffer_index[i];
1152 uint32_t *desc = &ptr[i*4];
1153
1154 vb = &sctx->vertex_buffer[vbo_index];
1155 buf = si_resource(vb->buffer.resource);
1156 if (!buf) {
1157 memset(desc, 0, 16);
1158 continue;
1159 }
1160
1161 int64_t offset = (int64_t)((int)vb->buffer_offset) +
1162 velems->src_offset[i];
1163
1164 if (offset >= buf->b.b.width0) {
1165 assert(offset < buf->b.b.width0);
1166 memset(desc, 0, 16);
1167 continue;
1168 }
1169
1170 uint64_t va = buf->gpu_address + offset;
1171
1172 int64_t num_records = (int64_t)buf->b.b.width0 - offset;
1173 if (sctx->chip_class != GFX8 && vb->stride) {
1174 /* Round up by rounding down and adding 1 */
1175 num_records = (num_records - velems->format_size[i]) /
1176 vb->stride + 1;
1177 }
1178 assert(num_records >= 0 && num_records <= UINT_MAX);
1179
1180 uint32_t rsrc_word3 = velems->rsrc_word3[i];
1181
1182 /* OOB_SELECT chooses the out-of-bounds check:
1183 * - 1: index >= NUM_RECORDS (Structured)
1184 * - 3: offset >= NUM_RECORDS (Raw)
1185 */
1186 if (sctx->chip_class >= GFX10)
1187 rsrc_word3 |= S_008F0C_OOB_SELECT(vb->stride ? 1 : 3);
1188
1189 desc[0] = va;
1190 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1191 S_008F04_STRIDE(vb->stride);
1192 desc[2] = num_records;
1193 desc[3] = rsrc_word3;
1194
1195 if (first_vb_use_mask & (1 << i)) {
1196 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
1197 si_resource(vb->buffer.resource),
1198 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
1199 }
1200 }
1201
1202 /* Don't flush the const cache. It would have a very negative effect
1203 * on performance (confirmed by testing). New descriptors are always
1204 * uploaded to a fresh new buffer, so I don't think flushing the const
1205 * cache is needed. */
1206 si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
1207 sctx->vertex_buffers_dirty = false;
1208 sctx->vertex_buffer_pointer_dirty = true;
1209 sctx->prefetch_L2_mask |= SI_PREFETCH_VBO_DESCRIPTORS;
1210 return true;
1211 }
1212
1213
1214 /* CONSTANT BUFFERS */
1215
1216 static struct si_descriptors *
1217 si_const_and_shader_buffer_descriptors(struct si_context *sctx, unsigned shader)
1218 {
1219 return &sctx->descriptors[si_const_and_shader_buffer_descriptors_idx(shader)];
1220 }
1221
1222 void si_upload_const_buffer(struct si_context *sctx, struct si_resource **buf,
1223 const uint8_t *ptr, unsigned size, uint32_t *const_offset)
1224 {
1225 void *tmp;
1226
1227 u_upload_alloc(sctx->b.const_uploader, 0, size,
1228 si_optimal_tcc_alignment(sctx, size),
1229 const_offset,
1230 (struct pipe_resource**)buf, &tmp);
1231 if (*buf)
1232 util_memcpy_cpu_to_le32(tmp, ptr, size);
1233 }
1234
1235 static void si_set_constant_buffer(struct si_context *sctx,
1236 struct si_buffer_resources *buffers,
1237 unsigned descriptors_idx,
1238 uint slot, const struct pipe_constant_buffer *input)
1239 {
1240 struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
1241 assert(slot < descs->num_elements);
1242 pipe_resource_reference(&buffers->buffers[slot], NULL);
1243
1244 /* GFX7 cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
1245 * with a NULL buffer). We need to use a dummy buffer instead. */
1246 if (sctx->chip_class == GFX7 &&
1247 (!input || (!input->buffer && !input->user_buffer)))
1248 input = &sctx->null_const_buf;
1249
1250 if (input && (input->buffer || input->user_buffer)) {
1251 struct pipe_resource *buffer = NULL;
1252 uint64_t va;
1253 unsigned buffer_offset;
1254
1255 /* Upload the user buffer if needed. */
1256 if (input->user_buffer) {
1257 si_upload_const_buffer(sctx,
1258 (struct si_resource**)&buffer, input->user_buffer,
1259 input->buffer_size, &buffer_offset);
1260 if (!buffer) {
1261 /* Just unbind on failure. */
1262 si_set_constant_buffer(sctx, buffers, descriptors_idx, slot, NULL);
1263 return;
1264 }
1265 } else {
1266 pipe_resource_reference(&buffer, input->buffer);
1267 buffer_offset = input->buffer_offset;
1268 }
1269
1270 va = si_resource(buffer)->gpu_address + buffer_offset;
1271
1272 /* Set the descriptor. */
1273 uint32_t *desc = descs->list + slot*4;
1274 desc[0] = va;
1275 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1276 S_008F04_STRIDE(0);
1277 desc[2] = input->buffer_size;
1278 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1279 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1280 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1281 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
1282
1283 if (sctx->chip_class >= GFX10) {
1284 desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
1285 S_008F0C_OOB_SELECT(3) |
1286 S_008F0C_RESOURCE_LEVEL(1);
1287 } else {
1288 desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1289 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1290 }
1291
1292 buffers->buffers[slot] = buffer;
1293 buffers->offsets[slot] = buffer_offset;
1294 radeon_add_to_gfx_buffer_list_check_mem(sctx,
1295 si_resource(buffer),
1296 RADEON_USAGE_READ,
1297 buffers->priority_constbuf, true);
1298 buffers->enabled_mask |= 1u << slot;
1299 } else {
1300 /* Clear the descriptor. */
1301 memset(descs->list + slot*4, 0, sizeof(uint32_t) * 4);
1302 buffers->enabled_mask &= ~(1u << slot);
1303 }
1304
1305 sctx->descriptors_dirty |= 1u << descriptors_idx;
1306 }
1307
1308 static void si_pipe_set_constant_buffer(struct pipe_context *ctx,
1309 enum pipe_shader_type shader, uint slot,
1310 const struct pipe_constant_buffer *input)
1311 {
1312 struct si_context *sctx = (struct si_context *)ctx;
1313
1314 if (shader >= SI_NUM_SHADERS)
1315 return;
1316
1317 if (slot == 0 && input && input->buffer &&
1318 !(si_resource(input->buffer)->flags & RADEON_FLAG_32BIT)) {
1319 assert(!"constant buffer 0 must have a 32-bit VM address, use const_uploader");
1320 return;
1321 }
1322
1323 if (input && input->buffer)
1324 si_resource(input->buffer)->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
1325
1326 slot = si_get_constbuf_slot(slot);
1327 si_set_constant_buffer(sctx, &sctx->const_and_shader_buffers[shader],
1328 si_const_and_shader_buffer_descriptors_idx(shader),
1329 slot, input);
1330 }
1331
1332 void si_get_pipe_constant_buffer(struct si_context *sctx, uint shader,
1333 uint slot, struct pipe_constant_buffer *cbuf)
1334 {
1335 cbuf->user_buffer = NULL;
1336 si_get_buffer_from_descriptors(
1337 &sctx->const_and_shader_buffers[shader],
1338 si_const_and_shader_buffer_descriptors(sctx, shader),
1339 si_get_constbuf_slot(slot),
1340 &cbuf->buffer, &cbuf->buffer_offset, &cbuf->buffer_size);
1341 }
1342
1343 /* SHADER BUFFERS */
1344
1345 static void si_set_shader_buffer(struct si_context *sctx,
1346 struct si_buffer_resources *buffers,
1347 unsigned descriptors_idx,
1348 uint slot, const struct pipe_shader_buffer *sbuffer,
1349 bool writable, enum radeon_bo_priority priority)
1350 {
1351 struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
1352 uint32_t *desc = descs->list + slot * 4;
1353
1354 if (!sbuffer || !sbuffer->buffer) {
1355 pipe_resource_reference(&buffers->buffers[slot], NULL);
1356 memset(desc, 0, sizeof(uint32_t) * 4);
1357 buffers->enabled_mask &= ~(1u << slot);
1358 buffers->writable_mask &= ~(1u << slot);
1359 sctx->descriptors_dirty |= 1u << descriptors_idx;
1360 return;
1361 }
1362
1363 struct si_resource *buf = si_resource(sbuffer->buffer);
1364 uint64_t va = buf->gpu_address + sbuffer->buffer_offset;
1365
1366 desc[0] = va;
1367 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1368 S_008F04_STRIDE(0);
1369 desc[2] = sbuffer->buffer_size;
1370 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1371 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1372 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1373 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
1374
1375 if (sctx->chip_class >= GFX10) {
1376 desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
1377 S_008F0C_OOB_SELECT(3) |
1378 S_008F0C_RESOURCE_LEVEL(1);
1379 } else {
1380 desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1381 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1382 }
1383
1384 pipe_resource_reference(&buffers->buffers[slot], &buf->b.b);
1385 buffers->offsets[slot] = sbuffer->buffer_offset;
1386 radeon_add_to_gfx_buffer_list_check_mem(sctx, buf,
1387 writable ? RADEON_USAGE_READWRITE :
1388 RADEON_USAGE_READ,
1389 priority, true);
1390 if (writable)
1391 buffers->writable_mask |= 1u << slot;
1392 else
1393 buffers->writable_mask &= ~(1u << slot);
1394
1395 buffers->enabled_mask |= 1u << slot;
1396 sctx->descriptors_dirty |= 1u << descriptors_idx;
1397
1398 util_range_add(&buf->valid_buffer_range, sbuffer->buffer_offset,
1399 sbuffer->buffer_offset + sbuffer->buffer_size);
1400 }
1401
1402 static void si_set_shader_buffers(struct pipe_context *ctx,
1403 enum pipe_shader_type shader,
1404 unsigned start_slot, unsigned count,
1405 const struct pipe_shader_buffer *sbuffers,
1406 unsigned writable_bitmask)
1407 {
1408 struct si_context *sctx = (struct si_context *)ctx;
1409 struct si_buffer_resources *buffers = &sctx->const_and_shader_buffers[shader];
1410 unsigned descriptors_idx = si_const_and_shader_buffer_descriptors_idx(shader);
1411 unsigned i;
1412
1413 assert(start_slot + count <= SI_NUM_SHADER_BUFFERS);
1414
1415 for (i = 0; i < count; ++i) {
1416 const struct pipe_shader_buffer *sbuffer = sbuffers ? &sbuffers[i] : NULL;
1417 unsigned slot = si_get_shaderbuf_slot(start_slot + i);
1418
1419 if (sbuffer && sbuffer->buffer)
1420 si_resource(sbuffer->buffer)->bind_history |= PIPE_BIND_SHADER_BUFFER;
1421
1422 si_set_shader_buffer(sctx, buffers, descriptors_idx, slot, sbuffer,
1423 !!(writable_bitmask & (1u << i)),
1424 buffers->priority);
1425 }
1426 }
1427
1428 void si_get_shader_buffers(struct si_context *sctx,
1429 enum pipe_shader_type shader,
1430 uint start_slot, uint count,
1431 struct pipe_shader_buffer *sbuf)
1432 {
1433 struct si_buffer_resources *buffers = &sctx->const_and_shader_buffers[shader];
1434 struct si_descriptors *descs = si_const_and_shader_buffer_descriptors(sctx, shader);
1435
1436 for (unsigned i = 0; i < count; ++i) {
1437 si_get_buffer_from_descriptors(
1438 buffers, descs,
1439 si_get_shaderbuf_slot(start_slot + i),
1440 &sbuf[i].buffer, &sbuf[i].buffer_offset,
1441 &sbuf[i].buffer_size);
1442 }
1443 }
1444
1445 /* RING BUFFERS */
1446
1447 void si_set_rw_buffer(struct si_context *sctx,
1448 uint slot, const struct pipe_constant_buffer *input)
1449 {
1450 si_set_constant_buffer(sctx, &sctx->rw_buffers, SI_DESCS_RW_BUFFERS,
1451 slot, input);
1452 }
1453
1454 void si_set_rw_shader_buffer(struct si_context *sctx, uint slot,
1455 const struct pipe_shader_buffer *sbuffer)
1456 {
1457 si_set_shader_buffer(sctx, &sctx->rw_buffers, SI_DESCS_RW_BUFFERS,
1458 slot, sbuffer, true, RADEON_PRIO_SHADER_RW_BUFFER);
1459 }
1460
1461 void si_set_ring_buffer(struct si_context *sctx, uint slot,
1462 struct pipe_resource *buffer,
1463 unsigned stride, unsigned num_records,
1464 bool add_tid, bool swizzle,
1465 unsigned element_size, unsigned index_stride, uint64_t offset)
1466 {
1467 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1468 struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1469
1470 /* The stride field in the resource descriptor has 14 bits */
1471 assert(stride < (1 << 14));
1472
1473 assert(slot < descs->num_elements);
1474 pipe_resource_reference(&buffers->buffers[slot], NULL);
1475
1476 if (buffer) {
1477 uint64_t va;
1478
1479 va = si_resource(buffer)->gpu_address + offset;
1480
1481 switch (element_size) {
1482 default:
1483 assert(!"Unsupported ring buffer element size");
1484 case 0:
1485 case 2:
1486 element_size = 0;
1487 break;
1488 case 4:
1489 element_size = 1;
1490 break;
1491 case 8:
1492 element_size = 2;
1493 break;
1494 case 16:
1495 element_size = 3;
1496 break;
1497 }
1498
1499 switch (index_stride) {
1500 default:
1501 assert(!"Unsupported ring buffer index stride");
1502 case 0:
1503 case 8:
1504 index_stride = 0;
1505 break;
1506 case 16:
1507 index_stride = 1;
1508 break;
1509 case 32:
1510 index_stride = 2;
1511 break;
1512 case 64:
1513 index_stride = 3;
1514 break;
1515 }
1516
1517 if (sctx->chip_class >= GFX8 && stride)
1518 num_records *= stride;
1519
1520 /* Set the descriptor. */
1521 uint32_t *desc = descs->list + slot*4;
1522 desc[0] = va;
1523 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1524 S_008F04_STRIDE(stride) |
1525 S_008F04_SWIZZLE_ENABLE(swizzle);
1526 desc[2] = num_records;
1527 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1528 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1529 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1530 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1531 S_008F0C_INDEX_STRIDE(index_stride) |
1532 S_008F0C_ADD_TID_ENABLE(add_tid);
1533
1534 if (sctx->chip_class >= GFX9)
1535 assert(!swizzle || element_size == 1); /* always 4 bytes on GFX9 */
1536 else
1537 desc[3] |= S_008F0C_ELEMENT_SIZE(element_size);
1538
1539 if (sctx->chip_class >= GFX10) {
1540 desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
1541 S_008F0C_OOB_SELECT(2) |
1542 S_008F0C_RESOURCE_LEVEL(1);
1543 } else {
1544 desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1545 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1546 }
1547
1548 pipe_resource_reference(&buffers->buffers[slot], buffer);
1549 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
1550 si_resource(buffer),
1551 RADEON_USAGE_READWRITE, buffers->priority);
1552 buffers->enabled_mask |= 1u << slot;
1553 } else {
1554 /* Clear the descriptor. */
1555 memset(descs->list + slot*4, 0, sizeof(uint32_t) * 4);
1556 buffers->enabled_mask &= ~(1u << slot);
1557 }
1558
1559 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
1560 }
1561
1562 /* INTERNAL CONST BUFFERS */
1563
1564 static void si_set_polygon_stipple(struct pipe_context *ctx,
1565 const struct pipe_poly_stipple *state)
1566 {
1567 struct si_context *sctx = (struct si_context *)ctx;
1568 struct pipe_constant_buffer cb = {};
1569 unsigned stipple[32];
1570 int i;
1571
1572 for (i = 0; i < 32; i++)
1573 stipple[i] = util_bitreverse(state->stipple[i]);
1574
1575 cb.user_buffer = stipple;
1576 cb.buffer_size = sizeof(stipple);
1577
1578 si_set_rw_buffer(sctx, SI_PS_CONST_POLY_STIPPLE, &cb);
1579 }
1580
1581 /* TEXTURE METADATA ENABLE/DISABLE */
1582
1583 static void
1584 si_resident_handles_update_needs_color_decompress(struct si_context *sctx)
1585 {
1586 util_dynarray_clear(&sctx->resident_tex_needs_color_decompress);
1587 util_dynarray_clear(&sctx->resident_img_needs_color_decompress);
1588
1589 util_dynarray_foreach(&sctx->resident_tex_handles,
1590 struct si_texture_handle *, tex_handle) {
1591 struct pipe_resource *res = (*tex_handle)->view->texture;
1592 struct si_texture *tex;
1593
1594 if (!res || res->target == PIPE_BUFFER)
1595 continue;
1596
1597 tex = (struct si_texture *)res;
1598 if (!color_needs_decompression(tex))
1599 continue;
1600
1601 util_dynarray_append(&sctx->resident_tex_needs_color_decompress,
1602 struct si_texture_handle *, *tex_handle);
1603 }
1604
1605 util_dynarray_foreach(&sctx->resident_img_handles,
1606 struct si_image_handle *, img_handle) {
1607 struct pipe_image_view *view = &(*img_handle)->view;
1608 struct pipe_resource *res = view->resource;
1609 struct si_texture *tex;
1610
1611 if (!res || res->target == PIPE_BUFFER)
1612 continue;
1613
1614 tex = (struct si_texture *)res;
1615 if (!color_needs_decompression(tex))
1616 continue;
1617
1618 util_dynarray_append(&sctx->resident_img_needs_color_decompress,
1619 struct si_image_handle *, *img_handle);
1620 }
1621 }
1622
1623 /* CMASK can be enabled (for fast clear) and disabled (for texture export)
1624 * while the texture is bound, possibly by a different context. In that case,
1625 * call this function to update needs_*_decompress_masks.
1626 */
1627 void si_update_needs_color_decompress_masks(struct si_context *sctx)
1628 {
1629 for (int i = 0; i < SI_NUM_SHADERS; ++i) {
1630 si_samplers_update_needs_color_decompress_mask(&sctx->samplers[i]);
1631 si_images_update_needs_color_decompress_mask(&sctx->images[i]);
1632 si_update_shader_needs_decompress_mask(sctx, i);
1633 }
1634
1635 si_resident_handles_update_needs_color_decompress(sctx);
1636 }
1637
1638 /* BUFFER DISCARD/INVALIDATION */
1639
1640 /* Reset descriptors of buffer resources after \p buf has been invalidated.
1641 * If buf == NULL, reset all descriptors.
1642 */
1643 static void si_reset_buffer_resources(struct si_context *sctx,
1644 struct si_buffer_resources *buffers,
1645 unsigned descriptors_idx,
1646 unsigned slot_mask,
1647 struct pipe_resource *buf,
1648 enum radeon_bo_priority priority)
1649 {
1650 struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
1651 unsigned mask = buffers->enabled_mask & slot_mask;
1652
1653 while (mask) {
1654 unsigned i = u_bit_scan(&mask);
1655 struct pipe_resource *buffer = buffers->buffers[i];
1656
1657 if (buffer && (!buf || buffer == buf)) {
1658 si_set_buf_desc_address(si_resource(buffer), buffers->offsets[i],
1659 descs->list + i*4);
1660 sctx->descriptors_dirty |= 1u << descriptors_idx;
1661
1662 radeon_add_to_gfx_buffer_list_check_mem(sctx,
1663 si_resource(buffer),
1664 buffers->writable_mask & (1u << i) ?
1665 RADEON_USAGE_READWRITE :
1666 RADEON_USAGE_READ,
1667 priority, true);
1668 }
1669 }
1670 }
1671
1672 /* Update all buffer bindings where the buffer is bound, including
1673 * all resource descriptors. This is invalidate_buffer without
1674 * the invalidation.
1675 *
1676 * If buf == NULL, update all buffer bindings.
1677 */
1678 void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf)
1679 {
1680 struct si_resource *buffer = si_resource(buf);
1681 unsigned i, shader;
1682 unsigned num_elems = sctx->vertex_elements ?
1683 sctx->vertex_elements->count : 0;
1684
1685 /* We changed the buffer, now we need to bind it where the old one
1686 * was bound. This consists of 2 things:
1687 * 1) Updating the resource descriptor and dirtying it.
1688 * 2) Adding a relocation to the CS, so that it's usable.
1689 */
1690
1691 /* Vertex buffers. */
1692 if (!buffer) {
1693 if (num_elems)
1694 sctx->vertex_buffers_dirty = true;
1695 } else if (buffer->bind_history & PIPE_BIND_VERTEX_BUFFER) {
1696 for (i = 0; i < num_elems; i++) {
1697 int vb = sctx->vertex_elements->vertex_buffer_index[i];
1698
1699 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
1700 continue;
1701 if (!sctx->vertex_buffer[vb].buffer.resource)
1702 continue;
1703
1704 if (sctx->vertex_buffer[vb].buffer.resource == buf) {
1705 sctx->vertex_buffers_dirty = true;
1706 break;
1707 }
1708 }
1709 }
1710
1711 /* Streamout buffers. (other internal buffers can't be invalidated) */
1712 if (!buffer || buffer->bind_history & PIPE_BIND_STREAM_OUTPUT) {
1713 for (i = SI_VS_STREAMOUT_BUF0; i <= SI_VS_STREAMOUT_BUF3; i++) {
1714 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1715 struct si_descriptors *descs =
1716 &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1717 struct pipe_resource *buffer = buffers->buffers[i];
1718
1719 if (!buffer || (buf && buffer != buf))
1720 continue;
1721
1722 si_set_buf_desc_address(si_resource(buffer), buffers->offsets[i],
1723 descs->list + i*4);
1724 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
1725
1726 radeon_add_to_gfx_buffer_list_check_mem(sctx,
1727 si_resource(buffer),
1728 RADEON_USAGE_WRITE,
1729 RADEON_PRIO_SHADER_RW_BUFFER,
1730 true);
1731
1732 /* Update the streamout state. */
1733 if (sctx->streamout.begin_emitted)
1734 si_emit_streamout_end(sctx);
1735 sctx->streamout.append_bitmask =
1736 sctx->streamout.enabled_mask;
1737 si_streamout_buffers_dirty(sctx);
1738 }
1739 }
1740
1741 /* Constant and shader buffers. */
1742 if (!buffer || buffer->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
1743 for (shader = 0; shader < SI_NUM_SHADERS; shader++)
1744 si_reset_buffer_resources(sctx, &sctx->const_and_shader_buffers[shader],
1745 si_const_and_shader_buffer_descriptors_idx(shader),
1746 u_bit_consecutive(SI_NUM_SHADER_BUFFERS, SI_NUM_CONST_BUFFERS),
1747 buf,
1748 sctx->const_and_shader_buffers[shader].priority_constbuf);
1749 }
1750
1751 if (!buffer || buffer->bind_history & PIPE_BIND_SHADER_BUFFER) {
1752 for (shader = 0; shader < SI_NUM_SHADERS; shader++)
1753 si_reset_buffer_resources(sctx, &sctx->const_and_shader_buffers[shader],
1754 si_const_and_shader_buffer_descriptors_idx(shader),
1755 u_bit_consecutive(0, SI_NUM_SHADER_BUFFERS),
1756 buf,
1757 sctx->const_and_shader_buffers[shader].priority);
1758 }
1759
1760 if (!buffer || buffer->bind_history & PIPE_BIND_SAMPLER_VIEW) {
1761 /* Texture buffers - update bindings. */
1762 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1763 struct si_samplers *samplers = &sctx->samplers[shader];
1764 struct si_descriptors *descs =
1765 si_sampler_and_image_descriptors(sctx, shader);
1766 unsigned mask = samplers->enabled_mask;
1767
1768 while (mask) {
1769 unsigned i = u_bit_scan(&mask);
1770 struct pipe_resource *buffer = samplers->views[i]->texture;
1771
1772 if (buffer && buffer->target == PIPE_BUFFER &&
1773 (!buf || buffer == buf)) {
1774 unsigned desc_slot = si_get_sampler_slot(i);
1775
1776 si_set_buf_desc_address(si_resource(buffer),
1777 samplers->views[i]->u.buf.offset,
1778 descs->list + desc_slot * 16 + 4);
1779 sctx->descriptors_dirty |=
1780 1u << si_sampler_and_image_descriptors_idx(shader);
1781
1782 radeon_add_to_gfx_buffer_list_check_mem(
1783 sctx, si_resource(buffer),
1784 RADEON_USAGE_READ,
1785 RADEON_PRIO_SAMPLER_BUFFER, true);
1786 }
1787 }
1788 }
1789 }
1790
1791 /* Shader images */
1792 if (!buffer || buffer->bind_history & PIPE_BIND_SHADER_IMAGE) {
1793 for (shader = 0; shader < SI_NUM_SHADERS; ++shader) {
1794 struct si_images *images = &sctx->images[shader];
1795 struct si_descriptors *descs =
1796 si_sampler_and_image_descriptors(sctx, shader);
1797 unsigned mask = images->enabled_mask;
1798
1799 while (mask) {
1800 unsigned i = u_bit_scan(&mask);
1801 struct pipe_resource *buffer = images->views[i].resource;
1802
1803 if (buffer && buffer->target == PIPE_BUFFER &&
1804 (!buf || buffer == buf)) {
1805 unsigned desc_slot = si_get_image_slot(i);
1806
1807 if (images->views[i].access & PIPE_IMAGE_ACCESS_WRITE)
1808 si_mark_image_range_valid(&images->views[i]);
1809
1810 si_set_buf_desc_address(si_resource(buffer),
1811 images->views[i].u.buf.offset,
1812 descs->list + desc_slot * 8 + 4);
1813 sctx->descriptors_dirty |=
1814 1u << si_sampler_and_image_descriptors_idx(shader);
1815
1816 radeon_add_to_gfx_buffer_list_check_mem(
1817 sctx, si_resource(buffer),
1818 RADEON_USAGE_READWRITE,
1819 RADEON_PRIO_SAMPLER_BUFFER, true);
1820 }
1821 }
1822 }
1823 }
1824
1825 /* Bindless texture handles */
1826 if (!buffer || buffer->texture_handle_allocated) {
1827 struct si_descriptors *descs = &sctx->bindless_descriptors;
1828
1829 util_dynarray_foreach(&sctx->resident_tex_handles,
1830 struct si_texture_handle *, tex_handle) {
1831 struct pipe_sampler_view *view = (*tex_handle)->view;
1832 unsigned desc_slot = (*tex_handle)->desc_slot;
1833 struct pipe_resource *buffer = view->texture;
1834
1835 if (buffer && buffer->target == PIPE_BUFFER &&
1836 (!buf || buffer == buf)) {
1837 si_set_buf_desc_address(si_resource(buffer),
1838 view->u.buf.offset,
1839 descs->list +
1840 desc_slot * 16 + 4);
1841
1842 (*tex_handle)->desc_dirty = true;
1843 sctx->bindless_descriptors_dirty = true;
1844
1845 radeon_add_to_gfx_buffer_list_check_mem(
1846 sctx, si_resource(buffer),
1847 RADEON_USAGE_READ,
1848 RADEON_PRIO_SAMPLER_BUFFER, true);
1849 }
1850 }
1851 }
1852
1853 /* Bindless image handles */
1854 if (!buffer || buffer->image_handle_allocated) {
1855 struct si_descriptors *descs = &sctx->bindless_descriptors;
1856
1857 util_dynarray_foreach(&sctx->resident_img_handles,
1858 struct si_image_handle *, img_handle) {
1859 struct pipe_image_view *view = &(*img_handle)->view;
1860 unsigned desc_slot = (*img_handle)->desc_slot;
1861 struct pipe_resource *buffer = view->resource;
1862
1863 if (buffer && buffer->target == PIPE_BUFFER &&
1864 (!buf || buffer == buf)) {
1865 if (view->access & PIPE_IMAGE_ACCESS_WRITE)
1866 si_mark_image_range_valid(view);
1867
1868 si_set_buf_desc_address(si_resource(buffer),
1869 view->u.buf.offset,
1870 descs->list +
1871 desc_slot * 16 + 4);
1872
1873 (*img_handle)->desc_dirty = true;
1874 sctx->bindless_descriptors_dirty = true;
1875
1876 radeon_add_to_gfx_buffer_list_check_mem(
1877 sctx, si_resource(buffer),
1878 RADEON_USAGE_READWRITE,
1879 RADEON_PRIO_SAMPLER_BUFFER, true);
1880 }
1881 }
1882 }
1883
1884 if (buffer) {
1885 /* Do the same for other contexts. They will invoke this function
1886 * with buffer == NULL.
1887 */
1888 unsigned new_counter = p_atomic_inc_return(&sctx->screen->dirty_buf_counter);
1889
1890 /* Skip the update for the current context, because we have already updated
1891 * the buffer bindings.
1892 */
1893 if (new_counter == sctx->last_dirty_buf_counter + 1)
1894 sctx->last_dirty_buf_counter = new_counter;
1895 }
1896 }
1897
1898 static void si_upload_bindless_descriptor(struct si_context *sctx,
1899 unsigned desc_slot,
1900 unsigned num_dwords)
1901 {
1902 struct si_descriptors *desc = &sctx->bindless_descriptors;
1903 unsigned desc_slot_offset = desc_slot * 16;
1904 uint32_t *data;
1905 uint64_t va;
1906
1907 data = desc->list + desc_slot_offset;
1908 va = desc->gpu_address + desc_slot_offset * 4;
1909
1910 si_cp_write_data(sctx, desc->buffer, va - desc->buffer->gpu_address,
1911 num_dwords * 4, V_370_TC_L2, V_370_ME, data);
1912 }
1913
1914 static void si_upload_bindless_descriptors(struct si_context *sctx)
1915 {
1916 if (!sctx->bindless_descriptors_dirty)
1917 return;
1918
1919 /* Wait for graphics/compute to be idle before updating the resident
1920 * descriptors directly in memory, in case the GPU is using them.
1921 */
1922 sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
1923 SI_CONTEXT_CS_PARTIAL_FLUSH;
1924 sctx->emit_cache_flush(sctx);
1925
1926 util_dynarray_foreach(&sctx->resident_tex_handles,
1927 struct si_texture_handle *, tex_handle) {
1928 unsigned desc_slot = (*tex_handle)->desc_slot;
1929
1930 if (!(*tex_handle)->desc_dirty)
1931 continue;
1932
1933 si_upload_bindless_descriptor(sctx, desc_slot, 16);
1934 (*tex_handle)->desc_dirty = false;
1935 }
1936
1937 util_dynarray_foreach(&sctx->resident_img_handles,
1938 struct si_image_handle *, img_handle) {
1939 unsigned desc_slot = (*img_handle)->desc_slot;
1940
1941 if (!(*img_handle)->desc_dirty)
1942 continue;
1943
1944 si_upload_bindless_descriptor(sctx, desc_slot, 8);
1945 (*img_handle)->desc_dirty = false;
1946 }
1947
1948 /* Invalidate L1 because it doesn't know that L2 changed. */
1949 sctx->flags |= SI_CONTEXT_INV_SCACHE;
1950 sctx->emit_cache_flush(sctx);
1951
1952 sctx->bindless_descriptors_dirty = false;
1953 }
1954
1955 /* Update mutable image descriptor fields of all resident textures. */
1956 static void si_update_bindless_texture_descriptor(struct si_context *sctx,
1957 struct si_texture_handle *tex_handle)
1958 {
1959 struct si_sampler_view *sview = (struct si_sampler_view *)tex_handle->view;
1960 struct si_descriptors *desc = &sctx->bindless_descriptors;
1961 unsigned desc_slot_offset = tex_handle->desc_slot * 16;
1962 uint32_t desc_list[16];
1963
1964 if (sview->base.texture->target == PIPE_BUFFER)
1965 return;
1966
1967 memcpy(desc_list, desc->list + desc_slot_offset, sizeof(desc_list));
1968 si_set_sampler_view_desc(sctx, sview, &tex_handle->sstate,
1969 desc->list + desc_slot_offset);
1970
1971 if (memcmp(desc_list, desc->list + desc_slot_offset,
1972 sizeof(desc_list))) {
1973 tex_handle->desc_dirty = true;
1974 sctx->bindless_descriptors_dirty = true;
1975 }
1976 }
1977
1978 static void si_update_bindless_image_descriptor(struct si_context *sctx,
1979 struct si_image_handle *img_handle)
1980 {
1981 struct si_descriptors *desc = &sctx->bindless_descriptors;
1982 unsigned desc_slot_offset = img_handle->desc_slot * 16;
1983 struct pipe_image_view *view = &img_handle->view;
1984 uint32_t desc_list[8];
1985
1986 if (view->resource->target == PIPE_BUFFER)
1987 return;
1988
1989 memcpy(desc_list, desc->list + desc_slot_offset,
1990 sizeof(desc_list));
1991 si_set_shader_image_desc(sctx, view, true,
1992 desc->list + desc_slot_offset, NULL);
1993
1994 if (memcmp(desc_list, desc->list + desc_slot_offset,
1995 sizeof(desc_list))) {
1996 img_handle->desc_dirty = true;
1997 sctx->bindless_descriptors_dirty = true;
1998 }
1999 }
2000
2001 static void si_update_all_resident_texture_descriptors(struct si_context *sctx)
2002 {
2003 util_dynarray_foreach(&sctx->resident_tex_handles,
2004 struct si_texture_handle *, tex_handle) {
2005 si_update_bindless_texture_descriptor(sctx, *tex_handle);
2006 }
2007
2008 util_dynarray_foreach(&sctx->resident_img_handles,
2009 struct si_image_handle *, img_handle) {
2010 si_update_bindless_image_descriptor(sctx, *img_handle);
2011 }
2012
2013 si_upload_bindless_descriptors(sctx);
2014 }
2015
2016 /* Update mutable image descriptor fields of all bound textures. */
2017 void si_update_all_texture_descriptors(struct si_context *sctx)
2018 {
2019 unsigned shader;
2020
2021 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
2022 struct si_samplers *samplers = &sctx->samplers[shader];
2023 struct si_images *images = &sctx->images[shader];
2024 unsigned mask;
2025
2026 /* Images. */
2027 mask = images->enabled_mask;
2028 while (mask) {
2029 unsigned i = u_bit_scan(&mask);
2030 struct pipe_image_view *view = &images->views[i];
2031
2032 if (!view->resource ||
2033 view->resource->target == PIPE_BUFFER)
2034 continue;
2035
2036 si_set_shader_image(sctx, shader, i, view, true);
2037 }
2038
2039 /* Sampler views. */
2040 mask = samplers->enabled_mask;
2041 while (mask) {
2042 unsigned i = u_bit_scan(&mask);
2043 struct pipe_sampler_view *view = samplers->views[i];
2044
2045 if (!view ||
2046 !view->texture ||
2047 view->texture->target == PIPE_BUFFER)
2048 continue;
2049
2050 si_set_sampler_view(sctx, shader, i,
2051 samplers->views[i], true);
2052 }
2053
2054 si_update_shader_needs_decompress_mask(sctx, shader);
2055 }
2056
2057 si_update_all_resident_texture_descriptors(sctx);
2058 si_update_ps_colorbuf0_slot(sctx);
2059 }
2060
2061 /* SHADER USER DATA */
2062
2063 static void si_mark_shader_pointers_dirty(struct si_context *sctx,
2064 unsigned shader)
2065 {
2066 sctx->shader_pointers_dirty |=
2067 u_bit_consecutive(SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS,
2068 SI_NUM_SHADER_DESCS);
2069
2070 if (shader == PIPE_SHADER_VERTEX)
2071 sctx->vertex_buffer_pointer_dirty = sctx->vb_descriptors_buffer != NULL;
2072
2073 si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
2074 }
2075
2076 static void si_shader_pointers_begin_new_cs(struct si_context *sctx)
2077 {
2078 sctx->shader_pointers_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
2079 sctx->vertex_buffer_pointer_dirty = sctx->vb_descriptors_buffer != NULL;
2080 si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
2081 sctx->graphics_bindless_pointer_dirty = sctx->bindless_descriptors.buffer != NULL;
2082 sctx->compute_bindless_pointer_dirty = sctx->bindless_descriptors.buffer != NULL;
2083 }
2084
2085 /* Set a base register address for user data constants in the given shader.
2086 * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
2087 */
2088 static void si_set_user_data_base(struct si_context *sctx,
2089 unsigned shader, uint32_t new_base)
2090 {
2091 uint32_t *base = &sctx->shader_pointers.sh_base[shader];
2092
2093 if (*base != new_base) {
2094 *base = new_base;
2095
2096 if (new_base)
2097 si_mark_shader_pointers_dirty(sctx, shader);
2098
2099 /* Any change in enabled shader stages requires re-emitting
2100 * the VS state SGPR, because it contains the clamp_vertex_color
2101 * state, which can be done in VS, TES, and GS.
2102 */
2103 sctx->last_vs_state = ~0;
2104 }
2105 }
2106
2107 /* This must be called when these are changed between enabled and disabled
2108 * - geometry shader
2109 * - tessellation evaluation shader
2110 * - NGG
2111 */
2112 void si_shader_change_notify(struct si_context *sctx)
2113 {
2114 /* VS can be bound as VS, ES, or LS. */
2115 if (sctx->tes_shader.cso) {
2116 if (sctx->chip_class >= GFX10) {
2117 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2118 R_00B430_SPI_SHADER_USER_DATA_HS_0);
2119 } else if (sctx->chip_class == GFX9) {
2120 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2121 R_00B430_SPI_SHADER_USER_DATA_LS_0);
2122 } else {
2123 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2124 R_00B530_SPI_SHADER_USER_DATA_LS_0);
2125 }
2126 } else if (sctx->chip_class >= GFX10) {
2127 if (sctx->ngg || sctx->gs_shader.cso) {
2128 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2129 R_00B230_SPI_SHADER_USER_DATA_GS_0);
2130 } else {
2131 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2132 R_00B130_SPI_SHADER_USER_DATA_VS_0);
2133 }
2134 } else if (sctx->gs_shader.cso) {
2135 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2136 R_00B330_SPI_SHADER_USER_DATA_ES_0);
2137 } else {
2138 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2139 R_00B130_SPI_SHADER_USER_DATA_VS_0);
2140 }
2141
2142 /* TES can be bound as ES, VS, or not bound. */
2143 if (sctx->tes_shader.cso) {
2144 if (sctx->chip_class >= GFX10) {
2145 if (sctx->ngg || sctx->gs_shader.cso) {
2146 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
2147 R_00B230_SPI_SHADER_USER_DATA_GS_0);
2148 } else {
2149 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
2150 R_00B130_SPI_SHADER_USER_DATA_VS_0);
2151 }
2152 } else if (sctx->gs_shader.cso) {
2153 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
2154 R_00B330_SPI_SHADER_USER_DATA_ES_0);
2155 } else {
2156 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
2157 R_00B130_SPI_SHADER_USER_DATA_VS_0);
2158 }
2159 } else {
2160 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL, 0);
2161 }
2162 }
2163
2164 static void si_emit_shader_pointer_head(struct radeon_cmdbuf *cs,
2165 unsigned sh_offset,
2166 unsigned pointer_count)
2167 {
2168 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, pointer_count, 0));
2169 radeon_emit(cs, (sh_offset - SI_SH_REG_OFFSET) >> 2);
2170 }
2171
2172 static void si_emit_shader_pointer_body(struct si_screen *sscreen,
2173 struct radeon_cmdbuf *cs,
2174 uint64_t va)
2175 {
2176 radeon_emit(cs, va);
2177
2178 assert(va == 0 || (va >> 32) == sscreen->info.address32_hi);
2179 }
2180
2181 static void si_emit_shader_pointer(struct si_context *sctx,
2182 struct si_descriptors *desc,
2183 unsigned sh_base)
2184 {
2185 struct radeon_cmdbuf *cs = sctx->gfx_cs;
2186 unsigned sh_offset = sh_base + desc->shader_userdata_offset;
2187
2188 si_emit_shader_pointer_head(cs, sh_offset, 1);
2189 si_emit_shader_pointer_body(sctx->screen, cs, desc->gpu_address);
2190 }
2191
2192 static void si_emit_consecutive_shader_pointers(struct si_context *sctx,
2193 unsigned pointer_mask,
2194 unsigned sh_base)
2195 {
2196 if (!sh_base)
2197 return;
2198
2199 struct radeon_cmdbuf *cs = sctx->gfx_cs;
2200 unsigned mask = sctx->shader_pointers_dirty & pointer_mask;
2201
2202 while (mask) {
2203 int start, count;
2204 u_bit_scan_consecutive_range(&mask, &start, &count);
2205
2206 struct si_descriptors *descs = &sctx->descriptors[start];
2207 unsigned sh_offset = sh_base + descs->shader_userdata_offset;
2208
2209 si_emit_shader_pointer_head(cs, sh_offset, count);
2210 for (int i = 0; i < count; i++)
2211 si_emit_shader_pointer_body(sctx->screen, cs,
2212 descs[i].gpu_address);
2213 }
2214 }
2215
2216 static void si_emit_global_shader_pointers(struct si_context *sctx,
2217 struct si_descriptors *descs)
2218 {
2219 if (sctx->chip_class >= GFX10) {
2220 si_emit_shader_pointer(sctx, descs,
2221 R_00B030_SPI_SHADER_USER_DATA_PS_0);
2222 /* HW VS stage only used in non-NGG mode. */
2223 si_emit_shader_pointer(sctx, descs,
2224 R_00B130_SPI_SHADER_USER_DATA_VS_0);
2225 si_emit_shader_pointer(sctx, descs,
2226 R_00B230_SPI_SHADER_USER_DATA_GS_0);
2227 si_emit_shader_pointer(sctx, descs,
2228 R_00B430_SPI_SHADER_USER_DATA_HS_0);
2229 return;
2230 } else if (sctx->chip_class == GFX9) {
2231 /* Broadcast it to all shader stages. */
2232 si_emit_shader_pointer(sctx, descs,
2233 R_00B530_SPI_SHADER_USER_DATA_COMMON_0);
2234 return;
2235 }
2236
2237 si_emit_shader_pointer(sctx, descs,
2238 R_00B030_SPI_SHADER_USER_DATA_PS_0);
2239 si_emit_shader_pointer(sctx, descs,
2240 R_00B130_SPI_SHADER_USER_DATA_VS_0);
2241 si_emit_shader_pointer(sctx, descs,
2242 R_00B330_SPI_SHADER_USER_DATA_ES_0);
2243 si_emit_shader_pointer(sctx, descs,
2244 R_00B230_SPI_SHADER_USER_DATA_GS_0);
2245 si_emit_shader_pointer(sctx, descs,
2246 R_00B430_SPI_SHADER_USER_DATA_HS_0);
2247 si_emit_shader_pointer(sctx, descs,
2248 R_00B530_SPI_SHADER_USER_DATA_LS_0);
2249 }
2250
2251 void si_emit_graphics_shader_pointers(struct si_context *sctx)
2252 {
2253 uint32_t *sh_base = sctx->shader_pointers.sh_base;
2254
2255 if (sctx->shader_pointers_dirty & (1 << SI_DESCS_RW_BUFFERS)) {
2256 si_emit_global_shader_pointers(sctx,
2257 &sctx->descriptors[SI_DESCS_RW_BUFFERS]);
2258 }
2259
2260 si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(VERTEX),
2261 sh_base[PIPE_SHADER_VERTEX]);
2262 si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(TESS_EVAL),
2263 sh_base[PIPE_SHADER_TESS_EVAL]);
2264 si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(FRAGMENT),
2265 sh_base[PIPE_SHADER_FRAGMENT]);
2266 si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(TESS_CTRL),
2267 sh_base[PIPE_SHADER_TESS_CTRL]);
2268 si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(GEOMETRY),
2269 sh_base[PIPE_SHADER_GEOMETRY]);
2270
2271 sctx->shader_pointers_dirty &=
2272 ~u_bit_consecutive(SI_DESCS_RW_BUFFERS, SI_DESCS_FIRST_COMPUTE);
2273
2274 if (sctx->vertex_buffer_pointer_dirty) {
2275 struct radeon_cmdbuf *cs = sctx->gfx_cs;
2276
2277 /* Find the location of the VB descriptor pointer. */
2278 /* TODO: In the future, the pointer will be packed in unused
2279 * bits of the first 2 VB descriptors. */
2280 unsigned sh_dw_offset = SI_VS_NUM_USER_SGPR;
2281 if (sctx->chip_class >= GFX9) {
2282 if (sctx->tes_shader.cso)
2283 sh_dw_offset = GFX9_TCS_NUM_USER_SGPR;
2284 else if (sctx->gs_shader.cso)
2285 sh_dw_offset = GFX9_VSGS_NUM_USER_SGPR;
2286 }
2287
2288 unsigned sh_offset = sh_base[PIPE_SHADER_VERTEX] + sh_dw_offset * 4;
2289 si_emit_shader_pointer_head(cs, sh_offset, 1);
2290 si_emit_shader_pointer_body(sctx->screen, cs,
2291 sctx->vb_descriptors_buffer->gpu_address +
2292 sctx->vb_descriptors_offset);
2293 sctx->vertex_buffer_pointer_dirty = false;
2294 }
2295
2296 if (sctx->graphics_bindless_pointer_dirty) {
2297 si_emit_global_shader_pointers(sctx,
2298 &sctx->bindless_descriptors);
2299 sctx->graphics_bindless_pointer_dirty = false;
2300 }
2301 }
2302
2303 void si_emit_compute_shader_pointers(struct si_context *sctx)
2304 {
2305 unsigned base = R_00B900_COMPUTE_USER_DATA_0;
2306
2307 si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(COMPUTE),
2308 R_00B900_COMPUTE_USER_DATA_0);
2309 sctx->shader_pointers_dirty &= ~SI_DESCS_SHADER_MASK(COMPUTE);
2310
2311 if (sctx->compute_bindless_pointer_dirty) {
2312 si_emit_shader_pointer(sctx, &sctx->bindless_descriptors, base);
2313 sctx->compute_bindless_pointer_dirty = false;
2314 }
2315 }
2316
2317 /* BINDLESS */
2318
2319 static void si_init_bindless_descriptors(struct si_context *sctx,
2320 struct si_descriptors *desc,
2321 short shader_userdata_rel_index,
2322 unsigned num_elements)
2323 {
2324 MAYBE_UNUSED unsigned desc_slot;
2325
2326 si_init_descriptors(desc, shader_userdata_rel_index, 16, num_elements);
2327 sctx->bindless_descriptors.num_active_slots = num_elements;
2328
2329 /* The first bindless descriptor is stored at slot 1, because 0 is not
2330 * considered to be a valid handle.
2331 */
2332 sctx->num_bindless_descriptors = 1;
2333
2334 /* Track which bindless slots are used (or not). */
2335 util_idalloc_init(&sctx->bindless_used_slots);
2336 util_idalloc_resize(&sctx->bindless_used_slots, num_elements);
2337
2338 /* Reserve slot 0 because it's an invalid handle for bindless. */
2339 desc_slot = util_idalloc_alloc(&sctx->bindless_used_slots);
2340 assert(desc_slot == 0);
2341 }
2342
2343 static void si_release_bindless_descriptors(struct si_context *sctx)
2344 {
2345 si_release_descriptors(&sctx->bindless_descriptors);
2346 util_idalloc_fini(&sctx->bindless_used_slots);
2347 }
2348
2349 static unsigned si_get_first_free_bindless_slot(struct si_context *sctx)
2350 {
2351 struct si_descriptors *desc = &sctx->bindless_descriptors;
2352 unsigned desc_slot;
2353
2354 desc_slot = util_idalloc_alloc(&sctx->bindless_used_slots);
2355 if (desc_slot >= desc->num_elements) {
2356 /* The array of bindless descriptors is full, resize it. */
2357 unsigned slot_size = desc->element_dw_size * 4;
2358 unsigned new_num_elements = desc->num_elements * 2;
2359
2360 desc->list = REALLOC(desc->list, desc->num_elements * slot_size,
2361 new_num_elements * slot_size);
2362 desc->num_elements = new_num_elements;
2363 desc->num_active_slots = new_num_elements;
2364 }
2365
2366 assert(desc_slot);
2367 return desc_slot;
2368 }
2369
2370 static unsigned
2371 si_create_bindless_descriptor(struct si_context *sctx, uint32_t *desc_list,
2372 unsigned size)
2373 {
2374 struct si_descriptors *desc = &sctx->bindless_descriptors;
2375 unsigned desc_slot, desc_slot_offset;
2376
2377 /* Find a free slot. */
2378 desc_slot = si_get_first_free_bindless_slot(sctx);
2379
2380 /* For simplicity, sampler and image bindless descriptors use fixed
2381 * 16-dword slots for now. Image descriptors only need 8-dword but this
2382 * doesn't really matter because no real apps use image handles.
2383 */
2384 desc_slot_offset = desc_slot * 16;
2385
2386 /* Copy the descriptor into the array. */
2387 memcpy(desc->list + desc_slot_offset, desc_list, size);
2388
2389 /* Re-upload the whole array of bindless descriptors into a new buffer.
2390 */
2391 if (!si_upload_descriptors(sctx, desc))
2392 return 0;
2393
2394 /* Make sure to re-emit the shader pointers for all stages. */
2395 sctx->graphics_bindless_pointer_dirty = true;
2396 sctx->compute_bindless_pointer_dirty = true;
2397
2398 return desc_slot;
2399 }
2400
2401 static void si_update_bindless_buffer_descriptor(struct si_context *sctx,
2402 unsigned desc_slot,
2403 struct pipe_resource *resource,
2404 uint64_t offset,
2405 bool *desc_dirty)
2406 {
2407 struct si_descriptors *desc = &sctx->bindless_descriptors;
2408 struct si_resource *buf = si_resource(resource);
2409 unsigned desc_slot_offset = desc_slot * 16;
2410 uint32_t *desc_list = desc->list + desc_slot_offset + 4;
2411 uint64_t old_desc_va;
2412
2413 assert(resource->target == PIPE_BUFFER);
2414
2415 /* Retrieve the old buffer addr from the descriptor. */
2416 old_desc_va = si_desc_extract_buffer_address(desc_list);
2417
2418 if (old_desc_va != buf->gpu_address + offset) {
2419 /* The buffer has been invalidated when the handle wasn't
2420 * resident, update the descriptor and the dirty flag.
2421 */
2422 si_set_buf_desc_address(buf, offset, &desc_list[0]);
2423
2424 *desc_dirty = true;
2425 }
2426 }
2427
2428 static uint64_t si_create_texture_handle(struct pipe_context *ctx,
2429 struct pipe_sampler_view *view,
2430 const struct pipe_sampler_state *state)
2431 {
2432 struct si_sampler_view *sview = (struct si_sampler_view *)view;
2433 struct si_context *sctx = (struct si_context *)ctx;
2434 struct si_texture_handle *tex_handle;
2435 struct si_sampler_state *sstate;
2436 uint32_t desc_list[16];
2437 uint64_t handle;
2438
2439 tex_handle = CALLOC_STRUCT(si_texture_handle);
2440 if (!tex_handle)
2441 return 0;
2442
2443 memset(desc_list, 0, sizeof(desc_list));
2444 si_init_descriptor_list(&desc_list[0], 16, 1, null_texture_descriptor);
2445
2446 sstate = ctx->create_sampler_state(ctx, state);
2447 if (!sstate) {
2448 FREE(tex_handle);
2449 return 0;
2450 }
2451
2452 si_set_sampler_view_desc(sctx, sview, sstate, &desc_list[0]);
2453 memcpy(&tex_handle->sstate, sstate, sizeof(*sstate));
2454 ctx->delete_sampler_state(ctx, sstate);
2455
2456 tex_handle->desc_slot = si_create_bindless_descriptor(sctx, desc_list,
2457 sizeof(desc_list));
2458 if (!tex_handle->desc_slot) {
2459 FREE(tex_handle);
2460 return 0;
2461 }
2462
2463 handle = tex_handle->desc_slot;
2464
2465 if (!_mesa_hash_table_insert(sctx->tex_handles,
2466 (void *)(uintptr_t)handle,
2467 tex_handle)) {
2468 FREE(tex_handle);
2469 return 0;
2470 }
2471
2472 pipe_sampler_view_reference(&tex_handle->view, view);
2473
2474 si_resource(sview->base.texture)->texture_handle_allocated = true;
2475
2476 return handle;
2477 }
2478
2479 static void si_delete_texture_handle(struct pipe_context *ctx, uint64_t handle)
2480 {
2481 struct si_context *sctx = (struct si_context *)ctx;
2482 struct si_texture_handle *tex_handle;
2483 struct hash_entry *entry;
2484
2485 entry = _mesa_hash_table_search(sctx->tex_handles,
2486 (void *)(uintptr_t)handle);
2487 if (!entry)
2488 return;
2489
2490 tex_handle = (struct si_texture_handle *)entry->data;
2491
2492 /* Allow this descriptor slot to be re-used. */
2493 util_idalloc_free(&sctx->bindless_used_slots, tex_handle->desc_slot);
2494
2495 pipe_sampler_view_reference(&tex_handle->view, NULL);
2496 _mesa_hash_table_remove(sctx->tex_handles, entry);
2497 FREE(tex_handle);
2498 }
2499
2500 static void si_make_texture_handle_resident(struct pipe_context *ctx,
2501 uint64_t handle, bool resident)
2502 {
2503 struct si_context *sctx = (struct si_context *)ctx;
2504 struct si_texture_handle *tex_handle;
2505 struct si_sampler_view *sview;
2506 struct hash_entry *entry;
2507
2508 entry = _mesa_hash_table_search(sctx->tex_handles,
2509 (void *)(uintptr_t)handle);
2510 if (!entry)
2511 return;
2512
2513 tex_handle = (struct si_texture_handle *)entry->data;
2514 sview = (struct si_sampler_view *)tex_handle->view;
2515
2516 if (resident) {
2517 if (sview->base.texture->target != PIPE_BUFFER) {
2518 struct si_texture *tex =
2519 (struct si_texture *)sview->base.texture;
2520
2521 if (depth_needs_decompression(tex)) {
2522 util_dynarray_append(
2523 &sctx->resident_tex_needs_depth_decompress,
2524 struct si_texture_handle *,
2525 tex_handle);
2526 }
2527
2528 if (color_needs_decompression(tex)) {
2529 util_dynarray_append(
2530 &sctx->resident_tex_needs_color_decompress,
2531 struct si_texture_handle *,
2532 tex_handle);
2533 }
2534
2535 if (tex->dcc_offset &&
2536 p_atomic_read(&tex->framebuffers_bound))
2537 sctx->need_check_render_feedback = true;
2538
2539 si_update_bindless_texture_descriptor(sctx, tex_handle);
2540 } else {
2541 si_update_bindless_buffer_descriptor(sctx,
2542 tex_handle->desc_slot,
2543 sview->base.texture,
2544 sview->base.u.buf.offset,
2545 &tex_handle->desc_dirty);
2546 }
2547
2548 /* Re-upload the descriptor if it has been updated while it
2549 * wasn't resident.
2550 */
2551 if (tex_handle->desc_dirty)
2552 sctx->bindless_descriptors_dirty = true;
2553
2554 /* Add the texture handle to the per-context list. */
2555 util_dynarray_append(&sctx->resident_tex_handles,
2556 struct si_texture_handle *, tex_handle);
2557
2558 /* Add the buffers to the current CS in case si_begin_new_cs()
2559 * is not going to be called.
2560 */
2561 si_sampler_view_add_buffer(sctx, sview->base.texture,
2562 RADEON_USAGE_READ,
2563 sview->is_stencil_sampler, false);
2564 } else {
2565 /* Remove the texture handle from the per-context list. */
2566 util_dynarray_delete_unordered(&sctx->resident_tex_handles,
2567 struct si_texture_handle *,
2568 tex_handle);
2569
2570 if (sview->base.texture->target != PIPE_BUFFER) {
2571 util_dynarray_delete_unordered(
2572 &sctx->resident_tex_needs_depth_decompress,
2573 struct si_texture_handle *, tex_handle);
2574
2575 util_dynarray_delete_unordered(
2576 &sctx->resident_tex_needs_color_decompress,
2577 struct si_texture_handle *, tex_handle);
2578 }
2579 }
2580 }
2581
2582 static uint64_t si_create_image_handle(struct pipe_context *ctx,
2583 const struct pipe_image_view *view)
2584 {
2585 struct si_context *sctx = (struct si_context *)ctx;
2586 struct si_image_handle *img_handle;
2587 uint32_t desc_list[8];
2588 uint64_t handle;
2589
2590 if (!view || !view->resource)
2591 return 0;
2592
2593 img_handle = CALLOC_STRUCT(si_image_handle);
2594 if (!img_handle)
2595 return 0;
2596
2597 memset(desc_list, 0, sizeof(desc_list));
2598 si_init_descriptor_list(&desc_list[0], 8, 1, null_image_descriptor);
2599
2600 si_set_shader_image_desc(sctx, view, false, &desc_list[0], NULL);
2601
2602 img_handle->desc_slot = si_create_bindless_descriptor(sctx, desc_list,
2603 sizeof(desc_list));
2604 if (!img_handle->desc_slot) {
2605 FREE(img_handle);
2606 return 0;
2607 }
2608
2609 handle = img_handle->desc_slot;
2610
2611 if (!_mesa_hash_table_insert(sctx->img_handles,
2612 (void *)(uintptr_t)handle,
2613 img_handle)) {
2614 FREE(img_handle);
2615 return 0;
2616 }
2617
2618 util_copy_image_view(&img_handle->view, view);
2619
2620 si_resource(view->resource)->image_handle_allocated = true;
2621
2622 return handle;
2623 }
2624
2625 static void si_delete_image_handle(struct pipe_context *ctx, uint64_t handle)
2626 {
2627 struct si_context *sctx = (struct si_context *)ctx;
2628 struct si_image_handle *img_handle;
2629 struct hash_entry *entry;
2630
2631 entry = _mesa_hash_table_search(sctx->img_handles,
2632 (void *)(uintptr_t)handle);
2633 if (!entry)
2634 return;
2635
2636 img_handle = (struct si_image_handle *)entry->data;
2637
2638 util_copy_image_view(&img_handle->view, NULL);
2639 _mesa_hash_table_remove(sctx->img_handles, entry);
2640 FREE(img_handle);
2641 }
2642
2643 static void si_make_image_handle_resident(struct pipe_context *ctx,
2644 uint64_t handle, unsigned access,
2645 bool resident)
2646 {
2647 struct si_context *sctx = (struct si_context *)ctx;
2648 struct si_image_handle *img_handle;
2649 struct pipe_image_view *view;
2650 struct si_resource *res;
2651 struct hash_entry *entry;
2652
2653 entry = _mesa_hash_table_search(sctx->img_handles,
2654 (void *)(uintptr_t)handle);
2655 if (!entry)
2656 return;
2657
2658 img_handle = (struct si_image_handle *)entry->data;
2659 view = &img_handle->view;
2660 res = si_resource(view->resource);
2661
2662 if (resident) {
2663 if (res->b.b.target != PIPE_BUFFER) {
2664 struct si_texture *tex = (struct si_texture *)res;
2665 unsigned level = view->u.tex.level;
2666
2667 if (color_needs_decompression(tex)) {
2668 util_dynarray_append(
2669 &sctx->resident_img_needs_color_decompress,
2670 struct si_image_handle *,
2671 img_handle);
2672 }
2673
2674 if (vi_dcc_enabled(tex, level) &&
2675 p_atomic_read(&tex->framebuffers_bound))
2676 sctx->need_check_render_feedback = true;
2677
2678 si_update_bindless_image_descriptor(sctx, img_handle);
2679 } else {
2680 si_update_bindless_buffer_descriptor(sctx,
2681 img_handle->desc_slot,
2682 view->resource,
2683 view->u.buf.offset,
2684 &img_handle->desc_dirty);
2685 }
2686
2687 /* Re-upload the descriptor if it has been updated while it
2688 * wasn't resident.
2689 */
2690 if (img_handle->desc_dirty)
2691 sctx->bindless_descriptors_dirty = true;
2692
2693 /* Add the image handle to the per-context list. */
2694 util_dynarray_append(&sctx->resident_img_handles,
2695 struct si_image_handle *, img_handle);
2696
2697 /* Add the buffers to the current CS in case si_begin_new_cs()
2698 * is not going to be called.
2699 */
2700 si_sampler_view_add_buffer(sctx, view->resource,
2701 (access & PIPE_IMAGE_ACCESS_WRITE) ?
2702 RADEON_USAGE_READWRITE :
2703 RADEON_USAGE_READ, false, false);
2704 } else {
2705 /* Remove the image handle from the per-context list. */
2706 util_dynarray_delete_unordered(&sctx->resident_img_handles,
2707 struct si_image_handle *,
2708 img_handle);
2709
2710 if (res->b.b.target != PIPE_BUFFER) {
2711 util_dynarray_delete_unordered(
2712 &sctx->resident_img_needs_color_decompress,
2713 struct si_image_handle *,
2714 img_handle);
2715 }
2716 }
2717 }
2718
2719 static void si_resident_buffers_add_all_to_bo_list(struct si_context *sctx)
2720 {
2721 unsigned num_resident_tex_handles, num_resident_img_handles;
2722
2723 num_resident_tex_handles = sctx->resident_tex_handles.size /
2724 sizeof(struct si_texture_handle *);
2725 num_resident_img_handles = sctx->resident_img_handles.size /
2726 sizeof(struct si_image_handle *);
2727
2728 /* Add all resident texture handles. */
2729 util_dynarray_foreach(&sctx->resident_tex_handles,
2730 struct si_texture_handle *, tex_handle) {
2731 struct si_sampler_view *sview =
2732 (struct si_sampler_view *)(*tex_handle)->view;
2733
2734 si_sampler_view_add_buffer(sctx, sview->base.texture,
2735 RADEON_USAGE_READ,
2736 sview->is_stencil_sampler, false);
2737 }
2738
2739 /* Add all resident image handles. */
2740 util_dynarray_foreach(&sctx->resident_img_handles,
2741 struct si_image_handle *, img_handle) {
2742 struct pipe_image_view *view = &(*img_handle)->view;
2743
2744 si_sampler_view_add_buffer(sctx, view->resource,
2745 RADEON_USAGE_READWRITE,
2746 false, false);
2747 }
2748
2749 sctx->num_resident_handles += num_resident_tex_handles +
2750 num_resident_img_handles;
2751 assert(sctx->bo_list_add_all_resident_resources);
2752 sctx->bo_list_add_all_resident_resources = false;
2753 }
2754
2755 /* INIT/DEINIT/UPLOAD */
2756
2757 void si_init_all_descriptors(struct si_context *sctx)
2758 {
2759 int i;
2760 unsigned first_shader =
2761 sctx->has_graphics ? 0 : PIPE_SHADER_COMPUTE;
2762
2763 for (i = first_shader; i < SI_NUM_SHADERS; i++) {
2764 bool is_2nd = sctx->chip_class >= GFX9 &&
2765 (i == PIPE_SHADER_TESS_CTRL ||
2766 i == PIPE_SHADER_GEOMETRY);
2767 unsigned num_sampler_slots = SI_NUM_IMAGES / 2 + SI_NUM_SAMPLERS;
2768 unsigned num_buffer_slots = SI_NUM_SHADER_BUFFERS + SI_NUM_CONST_BUFFERS;
2769 int rel_dw_offset;
2770 struct si_descriptors *desc;
2771
2772 if (is_2nd) {
2773 if (i == PIPE_SHADER_TESS_CTRL) {
2774 rel_dw_offset = (R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS -
2775 R_00B430_SPI_SHADER_USER_DATA_LS_0) / 4;
2776 } else if (sctx->chip_class >= GFX10) { /* PIPE_SHADER_GEOMETRY */
2777 rel_dw_offset = (R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS -
2778 R_00B230_SPI_SHADER_USER_DATA_GS_0) / 4;
2779 } else {
2780 rel_dw_offset = (R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS -
2781 R_00B330_SPI_SHADER_USER_DATA_ES_0) / 4;
2782 }
2783 } else {
2784 rel_dw_offset = SI_SGPR_CONST_AND_SHADER_BUFFERS;
2785 }
2786 desc = si_const_and_shader_buffer_descriptors(sctx, i);
2787 si_init_buffer_resources(&sctx->const_and_shader_buffers[i], desc,
2788 num_buffer_slots, rel_dw_offset,
2789 RADEON_PRIO_SHADER_RW_BUFFER,
2790 RADEON_PRIO_CONST_BUFFER);
2791 desc->slot_index_to_bind_directly = si_get_constbuf_slot(0);
2792
2793 if (is_2nd) {
2794 if (i == PIPE_SHADER_TESS_CTRL) {
2795 rel_dw_offset = (R_00B40C_SPI_SHADER_USER_DATA_ADDR_HI_HS -
2796 R_00B430_SPI_SHADER_USER_DATA_LS_0) / 4;
2797 } else if (sctx->chip_class >= GFX10) { /* PIPE_SHADER_GEOMETRY */
2798 rel_dw_offset = (R_00B20C_SPI_SHADER_USER_DATA_ADDR_HI_GS -
2799 R_00B230_SPI_SHADER_USER_DATA_GS_0) / 4;
2800 } else {
2801 rel_dw_offset = (R_00B20C_SPI_SHADER_USER_DATA_ADDR_HI_GS -
2802 R_00B330_SPI_SHADER_USER_DATA_ES_0) / 4;
2803 }
2804 } else {
2805 rel_dw_offset = SI_SGPR_SAMPLERS_AND_IMAGES;
2806 }
2807
2808 desc = si_sampler_and_image_descriptors(sctx, i);
2809 si_init_descriptors(desc, rel_dw_offset, 16, num_sampler_slots);
2810
2811 int j;
2812 for (j = 0; j < SI_NUM_IMAGES; j++)
2813 memcpy(desc->list + j * 8, null_image_descriptor, 8 * 4);
2814 for (; j < SI_NUM_IMAGES + SI_NUM_SAMPLERS * 2; j++)
2815 memcpy(desc->list + j * 8, null_texture_descriptor, 8 * 4);
2816 }
2817
2818 si_init_buffer_resources(&sctx->rw_buffers,
2819 &sctx->descriptors[SI_DESCS_RW_BUFFERS],
2820 SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
2821 /* The second priority is used by
2822 * const buffers in RW buffer slots. */
2823 RADEON_PRIO_SHADER_RINGS, RADEON_PRIO_CONST_BUFFER);
2824 sctx->descriptors[SI_DESCS_RW_BUFFERS].num_active_slots = SI_NUM_RW_BUFFERS;
2825
2826 /* Initialize an array of 1024 bindless descriptors, when the limit is
2827 * reached, just make it larger and re-upload the whole array.
2828 */
2829 si_init_bindless_descriptors(sctx, &sctx->bindless_descriptors,
2830 SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES,
2831 1024);
2832
2833 sctx->descriptors_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
2834
2835 /* Set pipe_context functions. */
2836 sctx->b.bind_sampler_states = si_bind_sampler_states;
2837 sctx->b.set_shader_images = si_set_shader_images;
2838 sctx->b.set_constant_buffer = si_pipe_set_constant_buffer;
2839 sctx->b.set_shader_buffers = si_set_shader_buffers;
2840 sctx->b.set_sampler_views = si_set_sampler_views;
2841 sctx->b.create_texture_handle = si_create_texture_handle;
2842 sctx->b.delete_texture_handle = si_delete_texture_handle;
2843 sctx->b.make_texture_handle_resident = si_make_texture_handle_resident;
2844 sctx->b.create_image_handle = si_create_image_handle;
2845 sctx->b.delete_image_handle = si_delete_image_handle;
2846 sctx->b.make_image_handle_resident = si_make_image_handle_resident;
2847
2848 if (!sctx->has_graphics)
2849 return;
2850
2851 sctx->b.set_polygon_stipple = si_set_polygon_stipple;
2852
2853 /* Shader user data. */
2854 sctx->atoms.s.shader_pointers.emit = si_emit_graphics_shader_pointers;
2855
2856 /* Set default and immutable mappings. */
2857 if (sctx->ngg) {
2858 assert(sctx->chip_class >= GFX10);
2859 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B230_SPI_SHADER_USER_DATA_GS_0);
2860 } else {
2861 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
2862 }
2863
2864 if (sctx->chip_class == GFX9) {
2865 si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL,
2866 R_00B430_SPI_SHADER_USER_DATA_LS_0);
2867 si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY,
2868 R_00B330_SPI_SHADER_USER_DATA_ES_0);
2869 } else {
2870 si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL,
2871 R_00B430_SPI_SHADER_USER_DATA_HS_0);
2872 si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY,
2873 R_00B230_SPI_SHADER_USER_DATA_GS_0);
2874 }
2875 si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
2876 }
2877
2878 static bool si_upload_shader_descriptors(struct si_context *sctx, unsigned mask)
2879 {
2880 unsigned dirty = sctx->descriptors_dirty & mask;
2881
2882 /* Assume nothing will go wrong: */
2883 sctx->shader_pointers_dirty |= dirty;
2884
2885 while (dirty) {
2886 unsigned i = u_bit_scan(&dirty);
2887
2888 if (!si_upload_descriptors(sctx, &sctx->descriptors[i]))
2889 return false;
2890 }
2891
2892 sctx->descriptors_dirty &= ~mask;
2893
2894 si_upload_bindless_descriptors(sctx);
2895
2896 return true;
2897 }
2898
2899 bool si_upload_graphics_shader_descriptors(struct si_context *sctx)
2900 {
2901 const unsigned mask = u_bit_consecutive(0, SI_DESCS_FIRST_COMPUTE);
2902 return si_upload_shader_descriptors(sctx, mask);
2903 }
2904
2905 bool si_upload_compute_shader_descriptors(struct si_context *sctx)
2906 {
2907 /* Does not update rw_buffers as that is not needed for compute shaders
2908 * and the input buffer is using the same SGPR's anyway.
2909 */
2910 const unsigned mask = u_bit_consecutive(SI_DESCS_FIRST_COMPUTE,
2911 SI_NUM_DESCS - SI_DESCS_FIRST_COMPUTE);
2912 return si_upload_shader_descriptors(sctx, mask);
2913 }
2914
2915 void si_release_all_descriptors(struct si_context *sctx)
2916 {
2917 int i;
2918
2919 for (i = 0; i < SI_NUM_SHADERS; i++) {
2920 si_release_buffer_resources(&sctx->const_and_shader_buffers[i],
2921 si_const_and_shader_buffer_descriptors(sctx, i));
2922 si_release_sampler_views(&sctx->samplers[i]);
2923 si_release_image_views(&sctx->images[i]);
2924 }
2925 si_release_buffer_resources(&sctx->rw_buffers,
2926 &sctx->descriptors[SI_DESCS_RW_BUFFERS]);
2927 for (i = 0; i < SI_NUM_VERTEX_BUFFERS; i++)
2928 pipe_vertex_buffer_unreference(&sctx->vertex_buffer[i]);
2929
2930 for (i = 0; i < SI_NUM_DESCS; ++i)
2931 si_release_descriptors(&sctx->descriptors[i]);
2932
2933 si_resource_reference(&sctx->vb_descriptors_buffer, NULL);
2934 sctx->vb_descriptors_gpu_list = NULL; /* points into a mapped buffer */
2935
2936 si_release_bindless_descriptors(sctx);
2937 }
2938
2939 void si_gfx_resources_add_all_to_bo_list(struct si_context *sctx)
2940 {
2941 for (unsigned i = 0; i < SI_NUM_GRAPHICS_SHADERS; i++) {
2942 si_buffer_resources_begin_new_cs(sctx, &sctx->const_and_shader_buffers[i]);
2943 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i]);
2944 si_image_views_begin_new_cs(sctx, &sctx->images[i]);
2945 }
2946 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers);
2947 si_vertex_buffers_begin_new_cs(sctx);
2948
2949 if (sctx->bo_list_add_all_resident_resources)
2950 si_resident_buffers_add_all_to_bo_list(sctx);
2951
2952 assert(sctx->bo_list_add_all_gfx_resources);
2953 sctx->bo_list_add_all_gfx_resources = false;
2954 }
2955
2956 void si_compute_resources_add_all_to_bo_list(struct si_context *sctx)
2957 {
2958 unsigned sh = PIPE_SHADER_COMPUTE;
2959
2960 si_buffer_resources_begin_new_cs(sctx, &sctx->const_and_shader_buffers[sh]);
2961 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[sh]);
2962 si_image_views_begin_new_cs(sctx, &sctx->images[sh]);
2963 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers);
2964
2965 if (sctx->bo_list_add_all_resident_resources)
2966 si_resident_buffers_add_all_to_bo_list(sctx);
2967
2968 assert(sctx->bo_list_add_all_compute_resources);
2969 sctx->bo_list_add_all_compute_resources = false;
2970 }
2971
2972 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
2973 {
2974 for (unsigned i = 0; i < SI_NUM_DESCS; ++i)
2975 si_descriptors_begin_new_cs(sctx, &sctx->descriptors[i]);
2976 si_descriptors_begin_new_cs(sctx, &sctx->bindless_descriptors);
2977
2978 si_shader_pointers_begin_new_cs(sctx);
2979
2980 sctx->bo_list_add_all_resident_resources = true;
2981 sctx->bo_list_add_all_gfx_resources = true;
2982 sctx->bo_list_add_all_compute_resources = true;
2983 }
2984
2985 void si_set_active_descriptors(struct si_context *sctx, unsigned desc_idx,
2986 uint64_t new_active_mask)
2987 {
2988 struct si_descriptors *desc = &sctx->descriptors[desc_idx];
2989
2990 /* Ignore no-op updates and updates that disable all slots. */
2991 if (!new_active_mask ||
2992 new_active_mask == u_bit_consecutive64(desc->first_active_slot,
2993 desc->num_active_slots))
2994 return;
2995
2996 int first, count;
2997 u_bit_scan_consecutive_range64(&new_active_mask, &first, &count);
2998 assert(new_active_mask == 0);
2999
3000 /* Upload/dump descriptors if slots are being enabled. */
3001 if (first < desc->first_active_slot ||
3002 first + count > desc->first_active_slot + desc->num_active_slots)
3003 sctx->descriptors_dirty |= 1u << desc_idx;
3004
3005 desc->first_active_slot = first;
3006 desc->num_active_slots = count;
3007 }
3008
3009 void si_set_active_descriptors_for_shader(struct si_context *sctx,
3010 struct si_shader_selector *sel)
3011 {
3012 if (!sel)
3013 return;
3014
3015 si_set_active_descriptors(sctx,
3016 si_const_and_shader_buffer_descriptors_idx(sel->type),
3017 sel->active_const_and_shader_buffers);
3018 si_set_active_descriptors(sctx,
3019 si_sampler_and_image_descriptors_idx(sel->type),
3020 sel->active_samplers_and_images);
3021 }