c161fd1200e77ef8c8820facc25610255ce85d88
[mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Marek Olšák <marek.olsak@amd.com>
25 */
26
27 /* Resource binding slots and sampler states (each described with 8 or
28 * 4 dwords) are stored in lists in memory which is accessed by shaders
29 * using scalar load instructions.
30 *
31 * This file is responsible for managing such lists. It keeps a copy of all
32 * descriptors in CPU memory and re-uploads a whole list if some slots have
33 * been changed.
34 *
35 * This code is also reponsible for updating shader pointers to those lists.
36 *
37 * Note that CP DMA can't be used for updating the lists, because a GPU hang
38 * could leave the list in a mid-IB state and the next IB would get wrong
39 * descriptors and the whole context would be unusable at that point.
40 * (Note: The register shadowing can't be used due to the same reason)
41 *
42 * Also, uploading descriptors to newly allocated memory doesn't require
43 * a KCACHE flush.
44 *
45 *
46 * Possible scenarios for one 16 dword image+sampler slot:
47 *
48 * | Image | w/ FMASK | Buffer | NULL
49 * [ 0: 3] Image[0:3] | Image[0:3] | Null[0:3] | Null[0:3]
50 * [ 4: 7] Image[4:7] | Image[4:7] | Buffer[0:3] | 0
51 * [ 8:11] Null[0:3] | Fmask[0:3] | Null[0:3] | Null[0:3]
52 * [12:15] Sampler[0:3] | Fmask[4:7] | Sampler[0:3] | Sampler[0:3]
53 *
54 * FMASK implies MSAA, therefore no sampler state.
55 * Sampler states are never unbound except when FMASK is bound.
56 */
57
58 #include "radeon/r600_cs.h"
59 #include "si_pipe.h"
60 #include "sid.h"
61 #include "gfx9d.h"
62
63 #include "util/hash_table.h"
64 #include "util/u_format.h"
65 #include "util/u_memory.h"
66 #include "util/u_upload_mgr.h"
67
68
69 /* NULL image and buffer descriptor for textures (alpha = 1) and images
70 * (alpha = 0).
71 *
72 * For images, all fields must be zero except for the swizzle, which
73 * supports arbitrary combinations of 0s and 1s. The texture type must be
74 * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
75 *
76 * For buffers, all fields must be zero. If they are not, the hw hangs.
77 *
78 * This is the only reason why the buffer descriptor must be in words [4:7].
79 */
80 static uint32_t null_texture_descriptor[8] = {
81 0,
82 0,
83 0,
84 S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1) |
85 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
86 /* the rest must contain zeros, which is also used by the buffer
87 * descriptor */
88 };
89
90 static uint32_t null_image_descriptor[8] = {
91 0,
92 0,
93 0,
94 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
95 /* the rest must contain zeros, which is also used by the buffer
96 * descriptor */
97 };
98
99 static uint16_t si_ce_ram_size(struct si_context *sctx)
100 {
101 return sctx->b.chip_class >= GFX9 ? 4096 : 32768;
102 }
103
104 static void si_init_descriptor_list(uint32_t *desc_list,
105 unsigned element_dw_size,
106 unsigned num_elements,
107 const uint32_t *null_descriptor)
108 {
109 int i;
110
111 /* Initialize the array to NULL descriptors if the element size is 8. */
112 if (null_descriptor) {
113 assert(element_dw_size % 8 == 0);
114 for (i = 0; i < num_elements * element_dw_size / 8; i++)
115 memcpy(desc_list + i * 8, null_descriptor, 8 * 4);
116 }
117 }
118
119 static void si_init_descriptors(struct si_context *sctx,
120 struct si_descriptors *desc,
121 unsigned shader_userdata_index,
122 unsigned element_dw_size,
123 unsigned num_elements,
124 unsigned first_ce_slot,
125 unsigned num_ce_slots,
126 unsigned *ce_offset)
127 {
128 desc->list = CALLOC(num_elements, element_dw_size * 4);
129 desc->element_dw_size = element_dw_size;
130 desc->num_elements = num_elements;
131 desc->first_ce_slot = sctx->ce_ib ? first_ce_slot : 0;
132 desc->num_ce_slots = sctx->ce_ib ? num_ce_slots : 0;
133 desc->dirty_mask = 0;
134 desc->shader_userdata_offset = shader_userdata_index * 4;
135
136 if (desc->num_ce_slots) {
137 assert(num_elements <= sizeof(desc->dirty_mask)*8);
138
139 desc->uses_ce = true;
140 desc->ce_offset = *ce_offset;
141 desc->dirty_mask = u_bit_consecutive64(0, num_elements);
142
143 *ce_offset += element_dw_size * desc->num_ce_slots * 4;
144 }
145 }
146
147 static void si_release_descriptors(struct si_descriptors *desc)
148 {
149 r600_resource_reference(&desc->buffer, NULL);
150 FREE(desc->list);
151 }
152
153 static bool si_ce_upload(struct si_context *sctx, unsigned ce_offset, unsigned size,
154 unsigned *out_offset, struct r600_resource **out_buf)
155 {
156 uint64_t va;
157 unsigned cache_line_size = sctx->screen->b.info.tcc_cache_line_size;
158
159 /* The base and size should be aligned to the L2 cache line size
160 * for optimal performance. (all dumps should rewrite whole lines)
161 */
162 size = align(size, cache_line_size);
163
164 (void)si_ce_ram_size; /* silence an "unused" warning */
165 assert(ce_offset + size <= si_ce_ram_size(sctx));
166
167 u_suballocator_alloc(sctx->ce_suballocator, size, cache_line_size,
168 out_offset, (struct pipe_resource**)out_buf);
169 if (!out_buf)
170 return false;
171
172 va = (*out_buf)->gpu_address + *out_offset;
173
174 radeon_emit(sctx->ce_ib, PKT3(PKT3_DUMP_CONST_RAM, 3, 0));
175 radeon_emit(sctx->ce_ib, ce_offset);
176 radeon_emit(sctx->ce_ib, size / 4);
177 radeon_emit(sctx->ce_ib, va);
178 radeon_emit(sctx->ce_ib, va >> 32);
179
180 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, *out_buf,
181 RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
182
183 sctx->ce_need_synchronization = true;
184 return true;
185 }
186
187 void si_ce_save_all_descriptors_at_ib_end(struct si_context* sctx)
188 {
189 bool success = si_ce_upload(sctx, 0, sctx->total_ce_ram_allocated,
190 &sctx->ce_ram_saved_offset,
191 &sctx->ce_ram_saved_buffer);
192 (void)success;
193 assert(success);
194 }
195
196 void si_ce_restore_all_descriptors_at_ib_start(struct si_context *sctx)
197 {
198 if (!sctx->ce_ram_saved_buffer)
199 return;
200
201 struct radeon_winsys_cs *ib = sctx->ce_preamble_ib;
202 if (!ib)
203 ib = sctx->ce_ib;
204
205 uint64_t va = sctx->ce_ram_saved_buffer->gpu_address +
206 sctx->ce_ram_saved_offset;
207
208 radeon_emit(ib, PKT3(PKT3_LOAD_CONST_RAM, 3, 0));
209 radeon_emit(ib, va);
210 radeon_emit(ib, va >> 32);
211 radeon_emit(ib, sctx->total_ce_ram_allocated / 4);
212 radeon_emit(ib, 0);
213
214 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
215 sctx->ce_ram_saved_buffer,
216 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
217 }
218
219 void si_ce_enable_loads(struct radeon_winsys_cs *ib)
220 {
221 radeon_emit(ib, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
222 radeon_emit(ib, CONTEXT_CONTROL_LOAD_ENABLE(1) |
223 CONTEXT_CONTROL_LOAD_CE_RAM(1));
224 radeon_emit(ib, CONTEXT_CONTROL_SHADOW_ENABLE(1));
225 }
226
227 static bool si_upload_descriptors(struct si_context *sctx,
228 struct si_descriptors *desc,
229 struct r600_atom * atom)
230 {
231 unsigned slot_size = desc->element_dw_size * 4;
232 unsigned first_slot_offset = desc->first_active_slot * slot_size;
233 unsigned upload_size = desc->num_active_slots * slot_size;
234
235 /* Skip the upload if no shader is using the descriptors. dirty_mask
236 * will stay dirty and the descriptors will be uploaded when there is
237 * a shader using them.
238 */
239 if (!upload_size)
240 return true;
241
242 if (desc->uses_ce) {
243 const uint32_t *list = desc->list +
244 desc->first_ce_slot * desc->element_dw_size;
245 uint64_t mask = (desc->dirty_mask >> desc->first_ce_slot) &
246 u_bit_consecutive64(0, desc->num_ce_slots);
247
248
249 while (mask) {
250 int begin, count;
251 u_bit_scan_consecutive_range64(&mask, &begin, &count);
252
253 begin *= desc->element_dw_size;
254 count *= desc->element_dw_size;
255
256 radeon_emit(sctx->ce_ib,
257 PKT3(PKT3_WRITE_CONST_RAM, count, 0));
258 radeon_emit(sctx->ce_ib, desc->ce_offset + begin * 4);
259 radeon_emit_array(sctx->ce_ib, list + begin, count);
260 }
261
262 if (!si_ce_upload(sctx,
263 desc->ce_offset +
264 (first_slot_offset - desc->first_ce_slot * slot_size),
265 upload_size, (unsigned*)&desc->buffer_offset,
266 &desc->buffer))
267 return false;
268 } else {
269 uint32_t *ptr;
270
271 u_upload_alloc(sctx->b.b.const_uploader, 0, upload_size,
272 si_optimal_tcc_alignment(sctx, upload_size),
273 (unsigned*)&desc->buffer_offset,
274 (struct pipe_resource**)&desc->buffer,
275 (void**)&ptr);
276 if (!desc->buffer)
277 return false; /* skip the draw call */
278
279 util_memcpy_cpu_to_le32(ptr, (char*)desc->list + first_slot_offset,
280 upload_size);
281 desc->gpu_list = ptr - first_slot_offset / 4;
282
283 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
284 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
285 }
286
287 /* The shader pointer should point to slot 0. */
288 desc->buffer_offset -= first_slot_offset;
289
290 desc->dirty_mask = 0;
291
292 if (atom)
293 si_mark_atom_dirty(sctx, atom);
294
295 return true;
296 }
297
298 static void
299 si_descriptors_begin_new_cs(struct si_context *sctx, struct si_descriptors *desc)
300 {
301 if (!desc->buffer)
302 return;
303
304 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
305 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
306 }
307
308 /* SAMPLER VIEWS */
309
310 static unsigned
311 si_sampler_and_image_descriptors_idx(unsigned shader)
312 {
313 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
314 SI_SHADER_DESCS_SAMPLERS_AND_IMAGES;
315 }
316
317 static struct si_descriptors *
318 si_sampler_and_image_descriptors(struct si_context *sctx, unsigned shader)
319 {
320 return &sctx->descriptors[si_sampler_and_image_descriptors_idx(shader)];
321 }
322
323 static void si_release_sampler_views(struct si_sampler_views *views)
324 {
325 int i;
326
327 for (i = 0; i < ARRAY_SIZE(views->views); i++) {
328 pipe_sampler_view_reference(&views->views[i], NULL);
329 }
330 }
331
332 static void si_sampler_view_add_buffer(struct si_context *sctx,
333 struct pipe_resource *resource,
334 enum radeon_bo_usage usage,
335 bool is_stencil_sampler,
336 bool check_mem)
337 {
338 struct r600_resource *rres;
339 struct r600_texture *rtex;
340 enum radeon_bo_priority priority;
341
342 if (!resource)
343 return;
344
345 if (resource->target != PIPE_BUFFER) {
346 struct r600_texture *tex = (struct r600_texture*)resource;
347
348 if (tex->is_depth && !r600_can_sample_zs(tex, is_stencil_sampler))
349 resource = &tex->flushed_depth_texture->resource.b.b;
350 }
351
352 rres = (struct r600_resource*)resource;
353 priority = r600_get_sampler_view_priority(rres);
354
355 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
356 rres, usage, priority,
357 check_mem);
358
359 if (resource->target == PIPE_BUFFER)
360 return;
361
362 /* Now add separate DCC or HTILE. */
363 rtex = (struct r600_texture*)resource;
364 if (rtex->dcc_separate_buffer) {
365 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
366 rtex->dcc_separate_buffer, usage,
367 RADEON_PRIO_DCC, check_mem);
368 }
369 }
370
371 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
372 struct si_sampler_views *views)
373 {
374 unsigned mask = views->enabled_mask;
375
376 /* Add buffers to the CS. */
377 while (mask) {
378 int i = u_bit_scan(&mask);
379 struct si_sampler_view *sview = (struct si_sampler_view *)views->views[i];
380
381 si_sampler_view_add_buffer(sctx, sview->base.texture,
382 RADEON_USAGE_READ,
383 sview->is_stencil_sampler, false);
384 }
385 }
386
387 /* Set buffer descriptor fields that can be changed by reallocations. */
388 static void si_set_buf_desc_address(struct r600_resource *buf,
389 uint64_t offset, uint32_t *state)
390 {
391 uint64_t va = buf->gpu_address + offset;
392
393 state[0] = va;
394 state[1] &= C_008F04_BASE_ADDRESS_HI;
395 state[1] |= S_008F04_BASE_ADDRESS_HI(va >> 32);
396 }
397
398 /* Set texture descriptor fields that can be changed by reallocations.
399 *
400 * \param tex texture
401 * \param base_level_info information of the level of BASE_ADDRESS
402 * \param base_level the level of BASE_ADDRESS
403 * \param first_level pipe_sampler_view.u.tex.first_level
404 * \param block_width util_format_get_blockwidth()
405 * \param is_stencil select between separate Z & Stencil
406 * \param state descriptor to update
407 */
408 void si_set_mutable_tex_desc_fields(struct si_screen *sscreen,
409 struct r600_texture *tex,
410 const struct legacy_surf_level *base_level_info,
411 unsigned base_level, unsigned first_level,
412 unsigned block_width, bool is_stencil,
413 uint32_t *state)
414 {
415 uint64_t va, meta_va = 0;
416
417 if (tex->is_depth && !r600_can_sample_zs(tex, is_stencil)) {
418 tex = tex->flushed_depth_texture;
419 is_stencil = false;
420 }
421
422 va = tex->resource.gpu_address;
423
424 if (sscreen->b.chip_class >= GFX9) {
425 /* Only stencil_offset needs to be added here. */
426 if (is_stencil)
427 va += tex->surface.u.gfx9.stencil_offset;
428 else
429 va += tex->surface.u.gfx9.surf_offset;
430 } else {
431 va += base_level_info->offset;
432 }
433
434 state[0] = va >> 8;
435 state[1] &= C_008F14_BASE_ADDRESS_HI;
436 state[1] |= S_008F14_BASE_ADDRESS_HI(va >> 40);
437
438 /* Only macrotiled modes can set tile swizzle.
439 * GFX9 doesn't use (legacy) base_level_info.
440 */
441 if (sscreen->b.chip_class >= GFX9 ||
442 base_level_info->mode == RADEON_SURF_MODE_2D)
443 state[0] |= tex->surface.tile_swizzle;
444
445 if (sscreen->b.chip_class >= VI) {
446 state[6] &= C_008F28_COMPRESSION_EN;
447 state[7] = 0;
448
449 if (vi_dcc_enabled(tex, first_level)) {
450 meta_va = (!tex->dcc_separate_buffer ? tex->resource.gpu_address : 0) +
451 tex->dcc_offset;
452
453 if (sscreen->b.chip_class == VI) {
454 meta_va += base_level_info->dcc_offset;
455 assert(base_level_info->mode == RADEON_SURF_MODE_2D);
456 }
457
458 meta_va |= (uint32_t)tex->surface.tile_swizzle << 8;
459 } else if (tex->tc_compatible_htile && first_level == 0) {
460 meta_va = tex->resource.gpu_address + tex->htile_offset;
461 }
462
463 if (meta_va) {
464 state[6] |= S_008F28_COMPRESSION_EN(1);
465 state[7] = meta_va >> 8;
466 }
467 }
468
469 if (sscreen->b.chip_class >= GFX9) {
470 state[3] &= C_008F1C_SW_MODE;
471 state[4] &= C_008F20_PITCH_GFX9;
472
473 if (is_stencil) {
474 state[3] |= S_008F1C_SW_MODE(tex->surface.u.gfx9.stencil.swizzle_mode);
475 state[4] |= S_008F20_PITCH_GFX9(tex->surface.u.gfx9.stencil.epitch);
476 } else {
477 state[3] |= S_008F1C_SW_MODE(tex->surface.u.gfx9.surf.swizzle_mode);
478 state[4] |= S_008F20_PITCH_GFX9(tex->surface.u.gfx9.surf.epitch);
479 }
480
481 state[5] &= C_008F24_META_DATA_ADDRESS &
482 C_008F24_META_PIPE_ALIGNED &
483 C_008F24_META_RB_ALIGNED;
484 if (meta_va) {
485 struct gfx9_surf_meta_flags meta;
486
487 if (tex->dcc_offset)
488 meta = tex->surface.u.gfx9.dcc;
489 else
490 meta = tex->surface.u.gfx9.htile;
491
492 state[5] |= S_008F24_META_DATA_ADDRESS(meta_va >> 40) |
493 S_008F24_META_PIPE_ALIGNED(meta.pipe_aligned) |
494 S_008F24_META_RB_ALIGNED(meta.rb_aligned);
495 }
496 } else {
497 /* SI-CI-VI */
498 unsigned pitch = base_level_info->nblk_x * block_width;
499 unsigned index = si_tile_mode_index(tex, base_level, is_stencil);
500
501 state[3] &= C_008F1C_TILING_INDEX;
502 state[3] |= S_008F1C_TILING_INDEX(index);
503 state[4] &= C_008F20_PITCH_GFX6;
504 state[4] |= S_008F20_PITCH_GFX6(pitch - 1);
505 }
506 }
507
508 static void si_set_sampler_view_desc(struct si_context *sctx,
509 struct si_sampler_view *sview,
510 struct si_sampler_state *sstate,
511 uint32_t *desc)
512 {
513 struct pipe_sampler_view *view = &sview->base;
514 struct r600_texture *rtex = (struct r600_texture *)view->texture;
515 bool is_buffer = rtex->resource.b.b.target == PIPE_BUFFER;
516
517 if (unlikely(!is_buffer && sview->dcc_incompatible)) {
518 if (vi_dcc_enabled(rtex, view->u.tex.first_level))
519 if (!r600_texture_disable_dcc(&sctx->b, rtex))
520 sctx->b.decompress_dcc(&sctx->b.b, rtex);
521
522 sview->dcc_incompatible = false;
523 }
524
525 assert(rtex); /* views with texture == NULL aren't supported */
526 memcpy(desc, sview->state, 8*4);
527
528 if (is_buffer) {
529 si_set_buf_desc_address(&rtex->resource,
530 sview->base.u.buf.offset,
531 desc + 4);
532 } else {
533 bool is_separate_stencil = rtex->db_compatible &&
534 sview->is_stencil_sampler;
535
536 si_set_mutable_tex_desc_fields(sctx->screen, rtex,
537 sview->base_level_info,
538 sview->base_level,
539 sview->base.u.tex.first_level,
540 sview->block_width,
541 is_separate_stencil,
542 desc);
543 }
544
545 if (!is_buffer && rtex->fmask.size) {
546 memcpy(desc + 8, sview->fmask_state, 8*4);
547 } else {
548 /* Disable FMASK and bind sampler state in [12:15]. */
549 memcpy(desc + 8, null_texture_descriptor, 4*4);
550
551 if (sstate)
552 memcpy(desc + 12, sstate->val, 4*4);
553 }
554 }
555
556 static void si_set_sampler_view(struct si_context *sctx,
557 unsigned shader,
558 unsigned slot, struct pipe_sampler_view *view,
559 bool disallow_early_out)
560 {
561 struct si_sampler_views *views = &sctx->samplers[shader].views;
562 struct si_sampler_view *rview = (struct si_sampler_view*)view;
563 struct si_descriptors *descs = si_sampler_and_image_descriptors(sctx, shader);
564 unsigned desc_slot = si_get_sampler_slot(slot);
565 uint32_t *desc = descs->list + desc_slot * 16;
566
567 if (views->views[slot] == view && !disallow_early_out)
568 return;
569
570 if (view) {
571 struct r600_texture *rtex = (struct r600_texture *)view->texture;
572
573 si_set_sampler_view_desc(sctx, rview,
574 views->sampler_states[slot], desc);
575
576 if (rtex->resource.b.b.target == PIPE_BUFFER)
577 rtex->resource.bind_history |= PIPE_BIND_SAMPLER_VIEW;
578
579 pipe_sampler_view_reference(&views->views[slot], view);
580 views->enabled_mask |= 1u << slot;
581
582 /* Since this can flush, it must be done after enabled_mask is
583 * updated. */
584 si_sampler_view_add_buffer(sctx, view->texture,
585 RADEON_USAGE_READ,
586 rview->is_stencil_sampler, true);
587 } else {
588 pipe_sampler_view_reference(&views->views[slot], NULL);
589 memcpy(desc, null_texture_descriptor, 8*4);
590 /* Only clear the lower dwords of FMASK. */
591 memcpy(desc + 8, null_texture_descriptor, 4*4);
592 /* Re-set the sampler state if we are transitioning from FMASK. */
593 if (views->sampler_states[slot])
594 memcpy(desc + 12,
595 views->sampler_states[slot]->val, 4*4);
596
597 views->enabled_mask &= ~(1u << slot);
598 }
599
600 descs->dirty_mask |= 1ull << desc_slot;
601 sctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
602 }
603
604 static bool color_needs_decompression(struct r600_texture *rtex)
605 {
606 return rtex->fmask.size ||
607 (rtex->dirty_level_mask &&
608 (rtex->cmask.size || rtex->dcc_offset));
609 }
610
611 static bool depth_needs_decompression(struct r600_texture *rtex)
612 {
613 /* If the depth/stencil texture is TC-compatible, no decompression
614 * will be done. The decompression function will only flush DB caches
615 * to make it coherent with shaders. That's necessary because the driver
616 * doesn't flush DB caches in any other case.
617 */
618 return rtex->db_compatible;
619 }
620
621 static void si_update_shader_needs_decompress_mask(struct si_context *sctx,
622 unsigned shader)
623 {
624 struct si_textures_info *samplers = &sctx->samplers[shader];
625 unsigned shader_bit = 1 << shader;
626
627 if (samplers->needs_depth_decompress_mask ||
628 samplers->needs_color_decompress_mask ||
629 sctx->images[shader].needs_color_decompress_mask)
630 sctx->shader_needs_decompress_mask |= shader_bit;
631 else
632 sctx->shader_needs_decompress_mask &= ~shader_bit;
633 }
634
635 static void si_set_sampler_views(struct pipe_context *ctx,
636 enum pipe_shader_type shader, unsigned start,
637 unsigned count,
638 struct pipe_sampler_view **views)
639 {
640 struct si_context *sctx = (struct si_context *)ctx;
641 struct si_textures_info *samplers = &sctx->samplers[shader];
642 int i;
643
644 if (!count || shader >= SI_NUM_SHADERS)
645 return;
646
647 for (i = 0; i < count; i++) {
648 unsigned slot = start + i;
649
650 if (!views || !views[i]) {
651 samplers->needs_depth_decompress_mask &= ~(1u << slot);
652 samplers->needs_color_decompress_mask &= ~(1u << slot);
653 si_set_sampler_view(sctx, shader, slot, NULL, false);
654 continue;
655 }
656
657 si_set_sampler_view(sctx, shader, slot, views[i], false);
658
659 if (views[i]->texture && views[i]->texture->target != PIPE_BUFFER) {
660 struct r600_texture *rtex =
661 (struct r600_texture*)views[i]->texture;
662
663 if (depth_needs_decompression(rtex)) {
664 samplers->needs_depth_decompress_mask |= 1u << slot;
665 } else {
666 samplers->needs_depth_decompress_mask &= ~(1u << slot);
667 }
668 if (color_needs_decompression(rtex)) {
669 samplers->needs_color_decompress_mask |= 1u << slot;
670 } else {
671 samplers->needs_color_decompress_mask &= ~(1u << slot);
672 }
673
674 if (rtex->dcc_offset &&
675 p_atomic_read(&rtex->framebuffers_bound))
676 sctx->need_check_render_feedback = true;
677 } else {
678 samplers->needs_depth_decompress_mask &= ~(1u << slot);
679 samplers->needs_color_decompress_mask &= ~(1u << slot);
680 }
681 }
682
683 si_update_shader_needs_decompress_mask(sctx, shader);
684 }
685
686 static void
687 si_samplers_update_needs_color_decompress_mask(struct si_textures_info *samplers)
688 {
689 unsigned mask = samplers->views.enabled_mask;
690
691 while (mask) {
692 int i = u_bit_scan(&mask);
693 struct pipe_resource *res = samplers->views.views[i]->texture;
694
695 if (res && res->target != PIPE_BUFFER) {
696 struct r600_texture *rtex = (struct r600_texture *)res;
697
698 if (color_needs_decompression(rtex)) {
699 samplers->needs_color_decompress_mask |= 1u << i;
700 } else {
701 samplers->needs_color_decompress_mask &= ~(1u << i);
702 }
703 }
704 }
705 }
706
707 /* IMAGE VIEWS */
708
709 static void
710 si_release_image_views(struct si_images_info *images)
711 {
712 unsigned i;
713
714 for (i = 0; i < SI_NUM_IMAGES; ++i) {
715 struct pipe_image_view *view = &images->views[i];
716
717 pipe_resource_reference(&view->resource, NULL);
718 }
719 }
720
721 static void
722 si_image_views_begin_new_cs(struct si_context *sctx, struct si_images_info *images)
723 {
724 uint mask = images->enabled_mask;
725
726 /* Add buffers to the CS. */
727 while (mask) {
728 int i = u_bit_scan(&mask);
729 struct pipe_image_view *view = &images->views[i];
730
731 assert(view->resource);
732
733 si_sampler_view_add_buffer(sctx, view->resource,
734 RADEON_USAGE_READWRITE, false, false);
735 }
736 }
737
738 static void
739 si_disable_shader_image(struct si_context *ctx, unsigned shader, unsigned slot)
740 {
741 struct si_images_info *images = &ctx->images[shader];
742
743 if (images->enabled_mask & (1u << slot)) {
744 struct si_descriptors *descs = si_sampler_and_image_descriptors(ctx, shader);
745 unsigned desc_slot = si_get_image_slot(slot);
746
747 pipe_resource_reference(&images->views[slot].resource, NULL);
748 images->needs_color_decompress_mask &= ~(1 << slot);
749
750 memcpy(descs->list + desc_slot*8, null_image_descriptor, 8*4);
751 images->enabled_mask &= ~(1u << slot);
752 /* two 8-byte images share one 16-byte slot */
753 descs->dirty_mask |= 1u << (desc_slot / 2);
754 ctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
755 }
756 }
757
758 static void
759 si_mark_image_range_valid(const struct pipe_image_view *view)
760 {
761 struct r600_resource *res = (struct r600_resource *)view->resource;
762
763 assert(res && res->b.b.target == PIPE_BUFFER);
764
765 util_range_add(&res->valid_buffer_range,
766 view->u.buf.offset,
767 view->u.buf.offset + view->u.buf.size);
768 }
769
770 static void si_set_shader_image_desc(struct si_context *ctx,
771 const struct pipe_image_view *view,
772 bool skip_decompress,
773 uint32_t *desc)
774 {
775 struct si_screen *screen = ctx->screen;
776 struct r600_resource *res;
777
778 res = (struct r600_resource *)view->resource;
779
780 if (res->b.b.target == PIPE_BUFFER) {
781 if (view->access & PIPE_IMAGE_ACCESS_WRITE)
782 si_mark_image_range_valid(view);
783
784 si_make_buffer_descriptor(screen, res,
785 view->format,
786 view->u.buf.offset,
787 view->u.buf.size, desc);
788 si_set_buf_desc_address(res, view->u.buf.offset, desc + 4);
789 } else {
790 static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
791 struct r600_texture *tex = (struct r600_texture *)res;
792 unsigned level = view->u.tex.level;
793 unsigned width, height, depth, hw_level;
794 bool uses_dcc = vi_dcc_enabled(tex, level);
795
796 assert(!tex->is_depth);
797 assert(tex->fmask.size == 0);
798
799 if (uses_dcc && !skip_decompress &&
800 (view->access & PIPE_IMAGE_ACCESS_WRITE ||
801 !vi_dcc_formats_compatible(res->b.b.format, view->format))) {
802 /* If DCC can't be disabled, at least decompress it.
803 * The decompression is relatively cheap if the surface
804 * has been decompressed already.
805 */
806 if (!r600_texture_disable_dcc(&ctx->b, tex))
807 ctx->b.decompress_dcc(&ctx->b.b, tex);
808 }
809
810 if (ctx->b.chip_class >= GFX9) {
811 /* Always set the base address. The swizzle modes don't
812 * allow setting mipmap level offsets as the base.
813 */
814 width = res->b.b.width0;
815 height = res->b.b.height0;
816 depth = res->b.b.depth0;
817 hw_level = level;
818 } else {
819 /* Always force the base level to the selected level.
820 *
821 * This is required for 3D textures, where otherwise
822 * selecting a single slice for non-layered bindings
823 * fails. It doesn't hurt the other targets.
824 */
825 width = u_minify(res->b.b.width0, level);
826 height = u_minify(res->b.b.height0, level);
827 depth = u_minify(res->b.b.depth0, level);
828 hw_level = 0;
829 }
830
831 si_make_texture_descriptor(screen, tex,
832 false, res->b.b.target,
833 view->format, swizzle,
834 hw_level, hw_level,
835 view->u.tex.first_layer,
836 view->u.tex.last_layer,
837 width, height, depth,
838 desc, NULL);
839 si_set_mutable_tex_desc_fields(screen, tex,
840 &tex->surface.u.legacy.level[level],
841 level, level,
842 util_format_get_blockwidth(view->format),
843 false, desc);
844 }
845 }
846
847 static void si_set_shader_image(struct si_context *ctx,
848 unsigned shader,
849 unsigned slot, const struct pipe_image_view *view,
850 bool skip_decompress)
851 {
852 struct si_images_info *images = &ctx->images[shader];
853 struct si_descriptors *descs = si_sampler_and_image_descriptors(ctx, shader);
854 struct r600_resource *res;
855 unsigned desc_slot = si_get_image_slot(slot);
856 uint32_t *desc = descs->list + desc_slot * 8;
857
858 if (!view || !view->resource) {
859 si_disable_shader_image(ctx, shader, slot);
860 return;
861 }
862
863 res = (struct r600_resource *)view->resource;
864
865 if (&images->views[slot] != view)
866 util_copy_image_view(&images->views[slot], view);
867
868 si_set_shader_image_desc(ctx, view, skip_decompress, desc);
869
870 if (res->b.b.target == PIPE_BUFFER) {
871 images->needs_color_decompress_mask &= ~(1 << slot);
872 res->bind_history |= PIPE_BIND_SHADER_IMAGE;
873 } else {
874 struct r600_texture *tex = (struct r600_texture *)res;
875 unsigned level = view->u.tex.level;
876
877 if (color_needs_decompression(tex)) {
878 images->needs_color_decompress_mask |= 1 << slot;
879 } else {
880 images->needs_color_decompress_mask &= ~(1 << slot);
881 }
882
883 if (vi_dcc_enabled(tex, level) &&
884 p_atomic_read(&tex->framebuffers_bound))
885 ctx->need_check_render_feedback = true;
886 }
887
888 images->enabled_mask |= 1u << slot;
889 /* two 8-byte images share one 16-byte slot */
890 descs->dirty_mask |= 1u << (desc_slot / 2);
891 ctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
892
893 /* Since this can flush, it must be done after enabled_mask is updated. */
894 si_sampler_view_add_buffer(ctx, &res->b.b,
895 (view->access & PIPE_IMAGE_ACCESS_WRITE) ?
896 RADEON_USAGE_READWRITE : RADEON_USAGE_READ,
897 false, true);
898 }
899
900 static void
901 si_set_shader_images(struct pipe_context *pipe,
902 enum pipe_shader_type shader,
903 unsigned start_slot, unsigned count,
904 const struct pipe_image_view *views)
905 {
906 struct si_context *ctx = (struct si_context *)pipe;
907 unsigned i, slot;
908
909 assert(shader < SI_NUM_SHADERS);
910
911 if (!count)
912 return;
913
914 assert(start_slot + count <= SI_NUM_IMAGES);
915
916 if (views) {
917 for (i = 0, slot = start_slot; i < count; ++i, ++slot)
918 si_set_shader_image(ctx, shader, slot, &views[i], false);
919 } else {
920 for (i = 0, slot = start_slot; i < count; ++i, ++slot)
921 si_set_shader_image(ctx, shader, slot, NULL, false);
922 }
923
924 si_update_shader_needs_decompress_mask(ctx, shader);
925 }
926
927 static void
928 si_images_update_needs_color_decompress_mask(struct si_images_info *images)
929 {
930 unsigned mask = images->enabled_mask;
931
932 while (mask) {
933 int i = u_bit_scan(&mask);
934 struct pipe_resource *res = images->views[i].resource;
935
936 if (res && res->target != PIPE_BUFFER) {
937 struct r600_texture *rtex = (struct r600_texture *)res;
938
939 if (color_needs_decompression(rtex)) {
940 images->needs_color_decompress_mask |= 1 << i;
941 } else {
942 images->needs_color_decompress_mask &= ~(1 << i);
943 }
944 }
945 }
946 }
947
948 /* SAMPLER STATES */
949
950 static void si_bind_sampler_states(struct pipe_context *ctx,
951 enum pipe_shader_type shader,
952 unsigned start, unsigned count, void **states)
953 {
954 struct si_context *sctx = (struct si_context *)ctx;
955 struct si_textures_info *samplers = &sctx->samplers[shader];
956 struct si_descriptors *desc = si_sampler_and_image_descriptors(sctx, shader);
957 struct si_sampler_state **sstates = (struct si_sampler_state**)states;
958 int i;
959
960 if (!count || shader >= SI_NUM_SHADERS)
961 return;
962
963 for (i = 0; i < count; i++) {
964 unsigned slot = start + i;
965 unsigned desc_slot = si_get_sampler_slot(slot);
966
967 if (!sstates[i] ||
968 sstates[i] == samplers->views.sampler_states[slot])
969 continue;
970
971 #ifdef DEBUG
972 assert(sstates[i]->magic == SI_SAMPLER_STATE_MAGIC);
973 #endif
974 samplers->views.sampler_states[slot] = sstates[i];
975
976 /* If FMASK is bound, don't overwrite it.
977 * The sampler state will be set after FMASK is unbound.
978 */
979 if (samplers->views.views[slot] &&
980 samplers->views.views[slot]->texture &&
981 samplers->views.views[slot]->texture->target != PIPE_BUFFER &&
982 ((struct r600_texture*)samplers->views.views[slot]->texture)->fmask.size)
983 continue;
984
985 memcpy(desc->list + desc_slot * 16 + 12, sstates[i]->val, 4*4);
986 desc->dirty_mask |= 1ull << desc_slot;
987 sctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
988 }
989 }
990
991 /* BUFFER RESOURCES */
992
993 static void si_init_buffer_resources(struct si_context *sctx,
994 struct si_buffer_resources *buffers,
995 struct si_descriptors *descs,
996 unsigned num_buffers,
997 unsigned first_ce_slot,
998 unsigned num_ce_slots,
999 unsigned shader_userdata_index,
1000 enum radeon_bo_usage shader_usage,
1001 enum radeon_bo_usage shader_usage_constbuf,
1002 enum radeon_bo_priority priority,
1003 enum radeon_bo_priority priority_constbuf,
1004 unsigned *ce_offset)
1005 {
1006 buffers->shader_usage = shader_usage;
1007 buffers->shader_usage_constbuf = shader_usage_constbuf;
1008 buffers->priority = priority;
1009 buffers->priority_constbuf = priority_constbuf;
1010 buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
1011
1012 si_init_descriptors(sctx, descs, shader_userdata_index, 4, num_buffers,
1013 first_ce_slot, num_ce_slots, ce_offset);
1014 }
1015
1016 static void si_release_buffer_resources(struct si_buffer_resources *buffers,
1017 struct si_descriptors *descs)
1018 {
1019 int i;
1020
1021 for (i = 0; i < descs->num_elements; i++) {
1022 pipe_resource_reference(&buffers->buffers[i], NULL);
1023 }
1024
1025 FREE(buffers->buffers);
1026 }
1027
1028 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
1029 struct si_buffer_resources *buffers)
1030 {
1031 unsigned mask = buffers->enabled_mask;
1032
1033 /* Add buffers to the CS. */
1034 while (mask) {
1035 int i = u_bit_scan(&mask);
1036
1037 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1038 r600_resource(buffers->buffers[i]),
1039 i < SI_NUM_SHADER_BUFFERS ? buffers->shader_usage :
1040 buffers->shader_usage_constbuf,
1041 i < SI_NUM_SHADER_BUFFERS ? buffers->priority :
1042 buffers->priority_constbuf);
1043 }
1044 }
1045
1046 static void si_get_buffer_from_descriptors(struct si_buffer_resources *buffers,
1047 struct si_descriptors *descs,
1048 unsigned idx, struct pipe_resource **buf,
1049 unsigned *offset, unsigned *size)
1050 {
1051 pipe_resource_reference(buf, buffers->buffers[idx]);
1052 if (*buf) {
1053 struct r600_resource *res = r600_resource(*buf);
1054 const uint32_t *desc = descs->list + idx * 4;
1055 uint64_t va;
1056
1057 *size = desc[2];
1058
1059 assert(G_008F04_STRIDE(desc[1]) == 0);
1060 va = ((uint64_t)desc[1] << 32) | desc[0];
1061
1062 assert(va >= res->gpu_address && va + *size <= res->gpu_address + res->bo_size);
1063 *offset = va - res->gpu_address;
1064 }
1065 }
1066
1067 /* VERTEX BUFFERS */
1068
1069 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
1070 {
1071 struct si_descriptors *desc = &sctx->vertex_buffers;
1072 int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
1073 int i;
1074
1075 for (i = 0; i < count; i++) {
1076 int vb = sctx->vertex_elements->vertex_buffer_index[i];
1077
1078 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
1079 continue;
1080 if (!sctx->vertex_buffer[vb].buffer.resource)
1081 continue;
1082
1083 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1084 (struct r600_resource*)sctx->vertex_buffer[vb].buffer.resource,
1085 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
1086 }
1087
1088 if (!desc->buffer)
1089 return;
1090 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1091 desc->buffer, RADEON_USAGE_READ,
1092 RADEON_PRIO_DESCRIPTORS);
1093 }
1094
1095 bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
1096 {
1097 struct si_vertex_elements *velems = sctx->vertex_elements;
1098 struct si_descriptors *desc = &sctx->vertex_buffers;
1099 unsigned i, count;
1100 unsigned desc_list_byte_size;
1101 unsigned first_vb_use_mask;
1102 uint64_t va;
1103 uint32_t *ptr;
1104
1105 if (!sctx->vertex_buffers_dirty || !velems)
1106 return true;
1107
1108 count = velems->count;
1109
1110 if (!count)
1111 return true;
1112
1113 desc_list_byte_size = velems->desc_list_byte_size;
1114 first_vb_use_mask = velems->first_vb_use_mask;
1115
1116 /* Vertex buffer descriptors are the only ones which are uploaded
1117 * directly through a staging buffer and don't go through
1118 * the fine-grained upload path.
1119 */
1120 u_upload_alloc(sctx->b.b.const_uploader, 0,
1121 desc_list_byte_size,
1122 si_optimal_tcc_alignment(sctx, desc_list_byte_size),
1123 (unsigned*)&desc->buffer_offset,
1124 (struct pipe_resource**)&desc->buffer, (void**)&ptr);
1125 if (!desc->buffer)
1126 return false;
1127
1128 desc->list = ptr;
1129 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1130 desc->buffer, RADEON_USAGE_READ,
1131 RADEON_PRIO_DESCRIPTORS);
1132
1133 assert(count <= SI_MAX_ATTRIBS);
1134
1135 for (i = 0; i < count; i++) {
1136 struct pipe_vertex_buffer *vb;
1137 struct r600_resource *rbuffer;
1138 unsigned offset;
1139 unsigned vbo_index = velems->vertex_buffer_index[i];
1140 uint32_t *desc = &ptr[i*4];
1141
1142 vb = &sctx->vertex_buffer[vbo_index];
1143 rbuffer = (struct r600_resource*)vb->buffer.resource;
1144 if (!rbuffer) {
1145 memset(desc, 0, 16);
1146 continue;
1147 }
1148
1149 offset = vb->buffer_offset + velems->src_offset[i];
1150 va = rbuffer->gpu_address + offset;
1151
1152 /* Fill in T# buffer resource description */
1153 desc[0] = va;
1154 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1155 S_008F04_STRIDE(vb->stride);
1156
1157 if (sctx->b.chip_class != VI && vb->stride) {
1158 /* Round up by rounding down and adding 1 */
1159 desc[2] = (vb->buffer.resource->width0 - offset -
1160 velems->format_size[i]) /
1161 vb->stride + 1;
1162 } else {
1163 desc[2] = vb->buffer.resource->width0 - offset;
1164 }
1165
1166 desc[3] = velems->rsrc_word3[i];
1167
1168 if (first_vb_use_mask & (1 << i)) {
1169 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1170 (struct r600_resource*)vb->buffer.resource,
1171 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
1172 }
1173 }
1174
1175 /* Don't flush the const cache. It would have a very negative effect
1176 * on performance (confirmed by testing). New descriptors are always
1177 * uploaded to a fresh new buffer, so I don't think flushing the const
1178 * cache is needed. */
1179 si_mark_atom_dirty(sctx, &sctx->shader_pointers.atom);
1180 sctx->vertex_buffers_dirty = false;
1181 sctx->vertex_buffer_pointer_dirty = true;
1182 sctx->prefetch_L2_mask |= SI_PREFETCH_VBO_DESCRIPTORS;
1183 return true;
1184 }
1185
1186
1187 /* CONSTANT BUFFERS */
1188
1189 static unsigned
1190 si_const_and_shader_buffer_descriptors_idx(unsigned shader)
1191 {
1192 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
1193 SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS;
1194 }
1195
1196 static struct si_descriptors *
1197 si_const_and_shader_buffer_descriptors(struct si_context *sctx, unsigned shader)
1198 {
1199 return &sctx->descriptors[si_const_and_shader_buffer_descriptors_idx(shader)];
1200 }
1201
1202 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
1203 const uint8_t *ptr, unsigned size, uint32_t *const_offset)
1204 {
1205 void *tmp;
1206
1207 u_upload_alloc(sctx->b.b.const_uploader, 0, size,
1208 si_optimal_tcc_alignment(sctx, size),
1209 const_offset,
1210 (struct pipe_resource**)rbuffer, &tmp);
1211 if (*rbuffer)
1212 util_memcpy_cpu_to_le32(tmp, ptr, size);
1213 }
1214
1215 static void si_set_constant_buffer(struct si_context *sctx,
1216 struct si_buffer_resources *buffers,
1217 unsigned descriptors_idx,
1218 uint slot, const struct pipe_constant_buffer *input)
1219 {
1220 struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
1221 assert(slot < descs->num_elements);
1222 pipe_resource_reference(&buffers->buffers[slot], NULL);
1223
1224 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
1225 * with a NULL buffer). We need to use a dummy buffer instead. */
1226 if (sctx->b.chip_class == CIK &&
1227 (!input || (!input->buffer && !input->user_buffer)))
1228 input = &sctx->null_const_buf;
1229
1230 if (input && (input->buffer || input->user_buffer)) {
1231 struct pipe_resource *buffer = NULL;
1232 uint64_t va;
1233
1234 /* Upload the user buffer if needed. */
1235 if (input->user_buffer) {
1236 unsigned buffer_offset;
1237
1238 si_upload_const_buffer(sctx,
1239 (struct r600_resource**)&buffer, input->user_buffer,
1240 input->buffer_size, &buffer_offset);
1241 if (!buffer) {
1242 /* Just unbind on failure. */
1243 si_set_constant_buffer(sctx, buffers, descriptors_idx, slot, NULL);
1244 return;
1245 }
1246 va = r600_resource(buffer)->gpu_address + buffer_offset;
1247 } else {
1248 pipe_resource_reference(&buffer, input->buffer);
1249 va = r600_resource(buffer)->gpu_address + input->buffer_offset;
1250 /* Only track usage for non-user buffers. */
1251 r600_resource(buffer)->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
1252 }
1253
1254 /* Set the descriptor. */
1255 uint32_t *desc = descs->list + slot*4;
1256 desc[0] = va;
1257 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1258 S_008F04_STRIDE(0);
1259 desc[2] = input->buffer_size;
1260 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1261 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1262 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1263 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1264 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1265 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1266
1267 buffers->buffers[slot] = buffer;
1268 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1269 (struct r600_resource*)buffer,
1270 buffers->shader_usage_constbuf,
1271 buffers->priority_constbuf, true);
1272 buffers->enabled_mask |= 1u << slot;
1273 } else {
1274 /* Clear the descriptor. */
1275 memset(descs->list + slot*4, 0, sizeof(uint32_t) * 4);
1276 buffers->enabled_mask &= ~(1u << slot);
1277 }
1278
1279 descs->dirty_mask |= 1u << slot;
1280 sctx->descriptors_dirty |= 1u << descriptors_idx;
1281 }
1282
1283 void si_set_rw_buffer(struct si_context *sctx,
1284 uint slot, const struct pipe_constant_buffer *input)
1285 {
1286 si_set_constant_buffer(sctx, &sctx->rw_buffers,
1287 SI_DESCS_RW_BUFFERS, slot, input);
1288 }
1289
1290 static void si_pipe_set_constant_buffer(struct pipe_context *ctx,
1291 enum pipe_shader_type shader, uint slot,
1292 const struct pipe_constant_buffer *input)
1293 {
1294 struct si_context *sctx = (struct si_context *)ctx;
1295
1296 if (shader >= SI_NUM_SHADERS)
1297 return;
1298
1299 slot = si_get_constbuf_slot(slot);
1300 si_set_constant_buffer(sctx, &sctx->const_and_shader_buffers[shader],
1301 si_const_and_shader_buffer_descriptors_idx(shader),
1302 slot, input);
1303 }
1304
1305 void si_get_pipe_constant_buffer(struct si_context *sctx, uint shader,
1306 uint slot, struct pipe_constant_buffer *cbuf)
1307 {
1308 cbuf->user_buffer = NULL;
1309 si_get_buffer_from_descriptors(
1310 &sctx->const_and_shader_buffers[shader],
1311 si_const_and_shader_buffer_descriptors(sctx, shader),
1312 si_get_constbuf_slot(slot),
1313 &cbuf->buffer, &cbuf->buffer_offset, &cbuf->buffer_size);
1314 }
1315
1316 /* SHADER BUFFERS */
1317
1318 static void si_set_shader_buffers(struct pipe_context *ctx,
1319 enum pipe_shader_type shader,
1320 unsigned start_slot, unsigned count,
1321 const struct pipe_shader_buffer *sbuffers)
1322 {
1323 struct si_context *sctx = (struct si_context *)ctx;
1324 struct si_buffer_resources *buffers = &sctx->const_and_shader_buffers[shader];
1325 struct si_descriptors *descs = si_const_and_shader_buffer_descriptors(sctx, shader);
1326 unsigned i;
1327
1328 assert(start_slot + count <= SI_NUM_SHADER_BUFFERS);
1329
1330 for (i = 0; i < count; ++i) {
1331 const struct pipe_shader_buffer *sbuffer = sbuffers ? &sbuffers[i] : NULL;
1332 struct r600_resource *buf;
1333 unsigned slot = si_get_shaderbuf_slot(start_slot + i);
1334 uint32_t *desc = descs->list + slot * 4;
1335 uint64_t va;
1336
1337 if (!sbuffer || !sbuffer->buffer) {
1338 pipe_resource_reference(&buffers->buffers[slot], NULL);
1339 memset(desc, 0, sizeof(uint32_t) * 4);
1340 buffers->enabled_mask &= ~(1u << slot);
1341 descs->dirty_mask |= 1u << slot;
1342 sctx->descriptors_dirty |=
1343 1u << si_const_and_shader_buffer_descriptors_idx(shader);
1344 continue;
1345 }
1346
1347 buf = (struct r600_resource *)sbuffer->buffer;
1348 va = buf->gpu_address + sbuffer->buffer_offset;
1349
1350 desc[0] = va;
1351 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1352 S_008F04_STRIDE(0);
1353 desc[2] = sbuffer->buffer_size;
1354 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1355 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1356 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1357 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1358 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1359 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1360
1361 pipe_resource_reference(&buffers->buffers[slot], &buf->b.b);
1362 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx, buf,
1363 buffers->shader_usage,
1364 buffers->priority, true);
1365 buf->bind_history |= PIPE_BIND_SHADER_BUFFER;
1366
1367 buffers->enabled_mask |= 1u << slot;
1368 descs->dirty_mask |= 1u << slot;
1369 sctx->descriptors_dirty |=
1370 1u << si_const_and_shader_buffer_descriptors_idx(shader);
1371
1372 util_range_add(&buf->valid_buffer_range, sbuffer->buffer_offset,
1373 sbuffer->buffer_offset + sbuffer->buffer_size);
1374 }
1375 }
1376
1377 void si_get_shader_buffers(struct si_context *sctx,
1378 enum pipe_shader_type shader,
1379 uint start_slot, uint count,
1380 struct pipe_shader_buffer *sbuf)
1381 {
1382 struct si_buffer_resources *buffers = &sctx->const_and_shader_buffers[shader];
1383 struct si_descriptors *descs = si_const_and_shader_buffer_descriptors(sctx, shader);
1384
1385 for (unsigned i = 0; i < count; ++i) {
1386 si_get_buffer_from_descriptors(
1387 buffers, descs,
1388 si_get_shaderbuf_slot(start_slot + i),
1389 &sbuf[i].buffer, &sbuf[i].buffer_offset,
1390 &sbuf[i].buffer_size);
1391 }
1392 }
1393
1394 /* RING BUFFERS */
1395
1396 void si_set_ring_buffer(struct pipe_context *ctx, uint slot,
1397 struct pipe_resource *buffer,
1398 unsigned stride, unsigned num_records,
1399 bool add_tid, bool swizzle,
1400 unsigned element_size, unsigned index_stride, uint64_t offset)
1401 {
1402 struct si_context *sctx = (struct si_context *)ctx;
1403 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1404 struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1405
1406 /* The stride field in the resource descriptor has 14 bits */
1407 assert(stride < (1 << 14));
1408
1409 assert(slot < descs->num_elements);
1410 pipe_resource_reference(&buffers->buffers[slot], NULL);
1411
1412 if (buffer) {
1413 uint64_t va;
1414
1415 va = r600_resource(buffer)->gpu_address + offset;
1416
1417 switch (element_size) {
1418 default:
1419 assert(!"Unsupported ring buffer element size");
1420 case 0:
1421 case 2:
1422 element_size = 0;
1423 break;
1424 case 4:
1425 element_size = 1;
1426 break;
1427 case 8:
1428 element_size = 2;
1429 break;
1430 case 16:
1431 element_size = 3;
1432 break;
1433 }
1434
1435 switch (index_stride) {
1436 default:
1437 assert(!"Unsupported ring buffer index stride");
1438 case 0:
1439 case 8:
1440 index_stride = 0;
1441 break;
1442 case 16:
1443 index_stride = 1;
1444 break;
1445 case 32:
1446 index_stride = 2;
1447 break;
1448 case 64:
1449 index_stride = 3;
1450 break;
1451 }
1452
1453 if (sctx->b.chip_class >= VI && stride)
1454 num_records *= stride;
1455
1456 /* Set the descriptor. */
1457 uint32_t *desc = descs->list + slot*4;
1458 desc[0] = va;
1459 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1460 S_008F04_STRIDE(stride) |
1461 S_008F04_SWIZZLE_ENABLE(swizzle);
1462 desc[2] = num_records;
1463 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1464 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1465 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1466 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1467 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1468 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1469 S_008F0C_INDEX_STRIDE(index_stride) |
1470 S_008F0C_ADD_TID_ENABLE(add_tid);
1471
1472 if (sctx->b.chip_class >= GFX9)
1473 assert(!swizzle || element_size == 1); /* always 4 bytes on GFX9 */
1474 else
1475 desc[3] |= S_008F0C_ELEMENT_SIZE(element_size);
1476
1477 pipe_resource_reference(&buffers->buffers[slot], buffer);
1478 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1479 (struct r600_resource*)buffer,
1480 buffers->shader_usage, buffers->priority);
1481 buffers->enabled_mask |= 1u << slot;
1482 } else {
1483 /* Clear the descriptor. */
1484 memset(descs->list + slot*4, 0, sizeof(uint32_t) * 4);
1485 buffers->enabled_mask &= ~(1u << slot);
1486 }
1487
1488 descs->dirty_mask |= 1u << slot;
1489 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
1490 }
1491
1492 /* STREAMOUT BUFFERS */
1493
1494 static void si_set_streamout_targets(struct pipe_context *ctx,
1495 unsigned num_targets,
1496 struct pipe_stream_output_target **targets,
1497 const unsigned *offsets)
1498 {
1499 struct si_context *sctx = (struct si_context *)ctx;
1500 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1501 struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1502 unsigned old_num_targets = sctx->b.streamout.num_targets;
1503 unsigned i, bufidx;
1504
1505 /* We are going to unbind the buffers. Mark which caches need to be flushed. */
1506 if (sctx->b.streamout.num_targets && sctx->b.streamout.begin_emitted) {
1507 /* Since streamout uses vector writes which go through TC L2
1508 * and most other clients can use TC L2 as well, we don't need
1509 * to flush it.
1510 *
1511 * The only cases which requires flushing it is VGT DMA index
1512 * fetching (on <= CIK) and indirect draw data, which are rare
1513 * cases. Thus, flag the TC L2 dirtiness in the resource and
1514 * handle it at draw call time.
1515 */
1516 for (i = 0; i < sctx->b.streamout.num_targets; i++)
1517 if (sctx->b.streamout.targets[i])
1518 r600_resource(sctx->b.streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
1519
1520 /* Invalidate the scalar cache in case a streamout buffer is
1521 * going to be used as a constant buffer.
1522 *
1523 * Invalidate TC L1, because streamout bypasses it (done by
1524 * setting GLC=1 in the store instruction), but it can contain
1525 * outdated data of streamout buffers.
1526 *
1527 * VS_PARTIAL_FLUSH is required if the buffers are going to be
1528 * used as an input immediately.
1529 */
1530 sctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
1531 SI_CONTEXT_INV_VMEM_L1 |
1532 SI_CONTEXT_VS_PARTIAL_FLUSH;
1533 }
1534
1535 /* All readers of the streamout targets need to be finished before we can
1536 * start writing to the targets.
1537 */
1538 if (num_targets)
1539 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
1540 SI_CONTEXT_CS_PARTIAL_FLUSH;
1541
1542 /* Streamout buffers must be bound in 2 places:
1543 * 1) in VGT by setting the VGT_STRMOUT registers
1544 * 2) as shader resources
1545 */
1546
1547 /* Set the VGT regs. */
1548 r600_set_streamout_targets(ctx, num_targets, targets, offsets);
1549
1550 /* Set the shader resources.*/
1551 for (i = 0; i < num_targets; i++) {
1552 bufidx = SI_VS_STREAMOUT_BUF0 + i;
1553
1554 if (targets[i]) {
1555 struct pipe_resource *buffer = targets[i]->buffer;
1556 uint64_t va = r600_resource(buffer)->gpu_address;
1557
1558 /* Set the descriptor.
1559 *
1560 * On VI, the format must be non-INVALID, otherwise
1561 * the buffer will be considered not bound and store
1562 * instructions will be no-ops.
1563 */
1564 uint32_t *desc = descs->list + bufidx*4;
1565 desc[0] = va;
1566 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
1567 desc[2] = 0xffffffff;
1568 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1569 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1570 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1571 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1572 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1573
1574 /* Set the resource. */
1575 pipe_resource_reference(&buffers->buffers[bufidx],
1576 buffer);
1577 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1578 (struct r600_resource*)buffer,
1579 buffers->shader_usage,
1580 RADEON_PRIO_SHADER_RW_BUFFER,
1581 true);
1582 r600_resource(buffer)->bind_history |= PIPE_BIND_STREAM_OUTPUT;
1583
1584 buffers->enabled_mask |= 1u << bufidx;
1585 } else {
1586 /* Clear the descriptor and unset the resource. */
1587 memset(descs->list + bufidx*4, 0,
1588 sizeof(uint32_t) * 4);
1589 pipe_resource_reference(&buffers->buffers[bufidx],
1590 NULL);
1591 buffers->enabled_mask &= ~(1u << bufidx);
1592 }
1593 descs->dirty_mask |= 1u << bufidx;
1594 }
1595 for (; i < old_num_targets; i++) {
1596 bufidx = SI_VS_STREAMOUT_BUF0 + i;
1597 /* Clear the descriptor and unset the resource. */
1598 memset(descs->list + bufidx*4, 0, sizeof(uint32_t) * 4);
1599 pipe_resource_reference(&buffers->buffers[bufidx], NULL);
1600 buffers->enabled_mask &= ~(1u << bufidx);
1601 descs->dirty_mask |= 1u << bufidx;
1602 }
1603
1604 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
1605 }
1606
1607 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
1608 uint32_t *desc, uint64_t old_buf_va,
1609 struct pipe_resource *new_buf)
1610 {
1611 /* Retrieve the buffer offset from the descriptor. */
1612 uint64_t old_desc_va =
1613 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
1614
1615 assert(old_buf_va <= old_desc_va);
1616 uint64_t offset_within_buffer = old_desc_va - old_buf_va;
1617
1618 /* Update the descriptor. */
1619 si_set_buf_desc_address(r600_resource(new_buf), offset_within_buffer,
1620 desc);
1621 }
1622
1623 /* INTERNAL CONST BUFFERS */
1624
1625 static void si_set_polygon_stipple(struct pipe_context *ctx,
1626 const struct pipe_poly_stipple *state)
1627 {
1628 struct si_context *sctx = (struct si_context *)ctx;
1629 struct pipe_constant_buffer cb = {};
1630 unsigned stipple[32];
1631 int i;
1632
1633 for (i = 0; i < 32; i++)
1634 stipple[i] = util_bitreverse(state->stipple[i]);
1635
1636 cb.user_buffer = stipple;
1637 cb.buffer_size = sizeof(stipple);
1638
1639 si_set_rw_buffer(sctx, SI_PS_CONST_POLY_STIPPLE, &cb);
1640 }
1641
1642 /* TEXTURE METADATA ENABLE/DISABLE */
1643
1644 static void
1645 si_resident_handles_update_needs_color_decompress(struct si_context *sctx)
1646 {
1647 util_dynarray_clear(&sctx->resident_tex_needs_color_decompress);
1648 util_dynarray_clear(&sctx->resident_img_needs_color_decompress);
1649
1650 util_dynarray_foreach(&sctx->resident_tex_handles,
1651 struct si_texture_handle *, tex_handle) {
1652 struct pipe_resource *res = (*tex_handle)->view->texture;
1653 struct r600_texture *rtex;
1654
1655 if (!res || res->target == PIPE_BUFFER)
1656 continue;
1657
1658 rtex = (struct r600_texture *)res;
1659 if (!color_needs_decompression(rtex))
1660 continue;
1661
1662 util_dynarray_append(&sctx->resident_tex_needs_color_decompress,
1663 struct si_texture_handle *, *tex_handle);
1664 }
1665
1666 util_dynarray_foreach(&sctx->resident_img_handles,
1667 struct si_image_handle *, img_handle) {
1668 struct pipe_image_view *view = &(*img_handle)->view;
1669 struct pipe_resource *res = view->resource;
1670 struct r600_texture *rtex;
1671
1672 if (!res || res->target == PIPE_BUFFER)
1673 continue;
1674
1675 rtex = (struct r600_texture *)res;
1676 if (!color_needs_decompression(rtex))
1677 continue;
1678
1679 util_dynarray_append(&sctx->resident_img_needs_color_decompress,
1680 struct si_image_handle *, *img_handle);
1681 }
1682 }
1683
1684 /* CMASK can be enabled (for fast clear) and disabled (for texture export)
1685 * while the texture is bound, possibly by a different context. In that case,
1686 * call this function to update needs_*_decompress_masks.
1687 */
1688 void si_update_needs_color_decompress_masks(struct si_context *sctx)
1689 {
1690 for (int i = 0; i < SI_NUM_SHADERS; ++i) {
1691 si_samplers_update_needs_color_decompress_mask(&sctx->samplers[i]);
1692 si_images_update_needs_color_decompress_mask(&sctx->images[i]);
1693 si_update_shader_needs_decompress_mask(sctx, i);
1694 }
1695
1696 si_resident_handles_update_needs_color_decompress(sctx);
1697 }
1698
1699 /* BUFFER DISCARD/INVALIDATION */
1700
1701 /** Reset descriptors of buffer resources after \p buf has been invalidated. */
1702 static void si_reset_buffer_resources(struct si_context *sctx,
1703 struct si_buffer_resources *buffers,
1704 unsigned descriptors_idx,
1705 unsigned slot_mask,
1706 struct pipe_resource *buf,
1707 uint64_t old_va,
1708 enum radeon_bo_usage usage,
1709 enum radeon_bo_priority priority)
1710 {
1711 struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
1712 unsigned mask = buffers->enabled_mask & slot_mask;
1713
1714 while (mask) {
1715 unsigned i = u_bit_scan(&mask);
1716 if (buffers->buffers[i] == buf) {
1717 si_desc_reset_buffer_offset(&sctx->b.b,
1718 descs->list + i*4,
1719 old_va, buf);
1720 descs->dirty_mask |= 1u << i;
1721 sctx->descriptors_dirty |= 1u << descriptors_idx;
1722
1723 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1724 (struct r600_resource *)buf,
1725 usage, priority, true);
1726 }
1727 }
1728 }
1729
1730 static void si_rebind_buffer(struct pipe_context *ctx, struct pipe_resource *buf,
1731 uint64_t old_va)
1732 {
1733 struct si_context *sctx = (struct si_context*)ctx;
1734 struct r600_resource *rbuffer = r600_resource(buf);
1735 unsigned i, shader;
1736 unsigned num_elems = sctx->vertex_elements ?
1737 sctx->vertex_elements->count : 0;
1738
1739 /* We changed the buffer, now we need to bind it where the old one
1740 * was bound. This consists of 2 things:
1741 * 1) Updating the resource descriptor and dirtying it.
1742 * 2) Adding a relocation to the CS, so that it's usable.
1743 */
1744
1745 /* Vertex buffers. */
1746 if (rbuffer->bind_history & PIPE_BIND_VERTEX_BUFFER) {
1747 for (i = 0; i < num_elems; i++) {
1748 int vb = sctx->vertex_elements->vertex_buffer_index[i];
1749
1750 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
1751 continue;
1752 if (!sctx->vertex_buffer[vb].buffer.resource)
1753 continue;
1754
1755 if (sctx->vertex_buffer[vb].buffer.resource == buf) {
1756 sctx->vertex_buffers_dirty = true;
1757 break;
1758 }
1759 }
1760 }
1761
1762 /* Streamout buffers. (other internal buffers can't be invalidated) */
1763 if (rbuffer->bind_history & PIPE_BIND_STREAM_OUTPUT) {
1764 for (i = SI_VS_STREAMOUT_BUF0; i <= SI_VS_STREAMOUT_BUF3; i++) {
1765 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1766 struct si_descriptors *descs =
1767 &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1768
1769 if (buffers->buffers[i] != buf)
1770 continue;
1771
1772 si_desc_reset_buffer_offset(ctx, descs->list + i*4,
1773 old_va, buf);
1774 descs->dirty_mask |= 1u << i;
1775 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
1776
1777 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1778 rbuffer, buffers->shader_usage,
1779 RADEON_PRIO_SHADER_RW_BUFFER,
1780 true);
1781
1782 /* Update the streamout state. */
1783 if (sctx->b.streamout.begin_emitted)
1784 r600_emit_streamout_end(&sctx->b);
1785 sctx->b.streamout.append_bitmask =
1786 sctx->b.streamout.enabled_mask;
1787 r600_streamout_buffers_dirty(&sctx->b);
1788 }
1789 }
1790
1791 /* Constant and shader buffers. */
1792 if (rbuffer->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
1793 for (shader = 0; shader < SI_NUM_SHADERS; shader++)
1794 si_reset_buffer_resources(sctx, &sctx->const_and_shader_buffers[shader],
1795 si_const_and_shader_buffer_descriptors_idx(shader),
1796 u_bit_consecutive(SI_NUM_SHADER_BUFFERS, SI_NUM_CONST_BUFFERS),
1797 buf, old_va,
1798 sctx->const_and_shader_buffers[shader].shader_usage_constbuf,
1799 sctx->const_and_shader_buffers[shader].priority_constbuf);
1800 }
1801
1802 if (rbuffer->bind_history & PIPE_BIND_SHADER_BUFFER) {
1803 for (shader = 0; shader < SI_NUM_SHADERS; shader++)
1804 si_reset_buffer_resources(sctx, &sctx->const_and_shader_buffers[shader],
1805 si_const_and_shader_buffer_descriptors_idx(shader),
1806 u_bit_consecutive(0, SI_NUM_SHADER_BUFFERS),
1807 buf, old_va,
1808 sctx->const_and_shader_buffers[shader].shader_usage,
1809 sctx->const_and_shader_buffers[shader].priority);
1810 }
1811
1812 if (rbuffer->bind_history & PIPE_BIND_SAMPLER_VIEW) {
1813 /* Texture buffers - update bindings. */
1814 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1815 struct si_sampler_views *views = &sctx->samplers[shader].views;
1816 struct si_descriptors *descs =
1817 si_sampler_and_image_descriptors(sctx, shader);
1818 unsigned mask = views->enabled_mask;
1819
1820 while (mask) {
1821 unsigned i = u_bit_scan(&mask);
1822 if (views->views[i]->texture == buf) {
1823 unsigned desc_slot = si_get_sampler_slot(i);
1824
1825 si_desc_reset_buffer_offset(ctx,
1826 descs->list +
1827 desc_slot * 16 + 4,
1828 old_va, buf);
1829 descs->dirty_mask |= 1ull << desc_slot;
1830 sctx->descriptors_dirty |=
1831 1u << si_sampler_and_image_descriptors_idx(shader);
1832
1833 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1834 rbuffer, RADEON_USAGE_READ,
1835 RADEON_PRIO_SAMPLER_BUFFER,
1836 true);
1837 }
1838 }
1839 }
1840 }
1841
1842 /* Shader images */
1843 if (rbuffer->bind_history & PIPE_BIND_SHADER_IMAGE) {
1844 for (shader = 0; shader < SI_NUM_SHADERS; ++shader) {
1845 struct si_images_info *images = &sctx->images[shader];
1846 struct si_descriptors *descs =
1847 si_sampler_and_image_descriptors(sctx, shader);
1848 unsigned mask = images->enabled_mask;
1849
1850 while (mask) {
1851 unsigned i = u_bit_scan(&mask);
1852
1853 if (images->views[i].resource == buf) {
1854 unsigned desc_slot = si_get_image_slot(i);
1855
1856 if (images->views[i].access & PIPE_IMAGE_ACCESS_WRITE)
1857 si_mark_image_range_valid(&images->views[i]);
1858
1859 si_desc_reset_buffer_offset(
1860 ctx, descs->list + desc_slot * 8 + 4,
1861 old_va, buf);
1862 /* two 8-byte images share one 16-byte slot */
1863 descs->dirty_mask |= 1u << (desc_slot / 2);
1864 sctx->descriptors_dirty |=
1865 1u << si_sampler_and_image_descriptors_idx(shader);
1866
1867 radeon_add_to_buffer_list_check_mem(
1868 &sctx->b, &sctx->b.gfx, rbuffer,
1869 RADEON_USAGE_READWRITE,
1870 RADEON_PRIO_SAMPLER_BUFFER, true);
1871 }
1872 }
1873 }
1874 }
1875
1876 /* Bindless texture handles */
1877 if (rbuffer->texture_handle_allocated) {
1878 struct si_descriptors *descs = &sctx->bindless_descriptors;
1879
1880 util_dynarray_foreach(&sctx->resident_tex_handles,
1881 struct si_texture_handle *, tex_handle) {
1882 struct pipe_sampler_view *view = (*tex_handle)->view;
1883 unsigned desc_slot = (*tex_handle)->desc_slot;
1884
1885 if (view->texture == buf) {
1886 si_set_buf_desc_address(rbuffer,
1887 view->u.buf.offset,
1888 descs->list +
1889 desc_slot * 16 + 4);
1890
1891 (*tex_handle)->desc_dirty = true;
1892 sctx->bindless_descriptors_dirty = true;
1893
1894 radeon_add_to_buffer_list_check_mem(
1895 &sctx->b, &sctx->b.gfx, rbuffer,
1896 RADEON_USAGE_READ,
1897 RADEON_PRIO_SAMPLER_BUFFER, true);
1898 }
1899 }
1900 }
1901
1902 /* Bindless image handles */
1903 if (rbuffer->image_handle_allocated) {
1904 struct si_descriptors *descs = &sctx->bindless_descriptors;
1905
1906 util_dynarray_foreach(&sctx->resident_img_handles,
1907 struct si_image_handle *, img_handle) {
1908 struct pipe_image_view *view = &(*img_handle)->view;
1909 unsigned desc_slot = (*img_handle)->desc_slot;
1910
1911 if (view->resource == buf) {
1912 if (view->access & PIPE_IMAGE_ACCESS_WRITE)
1913 si_mark_image_range_valid(view);
1914
1915 si_set_buf_desc_address(rbuffer,
1916 view->u.buf.offset,
1917 descs->list +
1918 desc_slot * 16 + 4);
1919
1920 (*img_handle)->desc_dirty = true;
1921 sctx->bindless_descriptors_dirty = true;
1922
1923 radeon_add_to_buffer_list_check_mem(
1924 &sctx->b, &sctx->b.gfx, rbuffer,
1925 RADEON_USAGE_READWRITE,
1926 RADEON_PRIO_SAMPLER_BUFFER, true);
1927 }
1928 }
1929 }
1930 }
1931
1932 /* Reallocate a buffer a update all resource bindings where the buffer is
1933 * bound.
1934 *
1935 * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
1936 * idle by discarding its contents. Apps usually tell us when to do this using
1937 * map_buffer flags, for example.
1938 */
1939 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
1940 {
1941 struct si_context *sctx = (struct si_context*)ctx;
1942 struct r600_resource *rbuffer = r600_resource(buf);
1943 uint64_t old_va = rbuffer->gpu_address;
1944
1945 /* Reallocate the buffer in the same pipe_resource. */
1946 r600_alloc_resource(&sctx->screen->b, rbuffer);
1947
1948 si_rebind_buffer(ctx, buf, old_va);
1949 }
1950
1951 static void si_upload_bindless_descriptor(struct si_context *sctx,
1952 unsigned desc_slot,
1953 unsigned num_dwords)
1954 {
1955 struct si_descriptors *desc = &sctx->bindless_descriptors;
1956 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
1957 unsigned desc_slot_offset = desc_slot * 16;
1958 uint32_t *data;
1959 uint64_t va;
1960
1961 data = desc->list + desc_slot_offset;
1962
1963 va = desc->buffer->gpu_address + desc->buffer_offset +
1964 desc_slot_offset * 4;
1965
1966 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + num_dwords, 0));
1967 radeon_emit(cs, S_370_DST_SEL(V_370_TC_L2) |
1968 S_370_WR_CONFIRM(1) |
1969 S_370_ENGINE_SEL(V_370_ME));
1970 radeon_emit(cs, va);
1971 radeon_emit(cs, va >> 32);
1972 radeon_emit_array(cs, data, num_dwords);
1973 }
1974
1975 static void si_upload_bindless_descriptors(struct si_context *sctx)
1976 {
1977 if (!sctx->bindless_descriptors_dirty)
1978 return;
1979
1980 /* Wait for graphics/compute to be idle before updating the resident
1981 * descriptors directly in memory, in case the GPU is using them.
1982 */
1983 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
1984 SI_CONTEXT_CS_PARTIAL_FLUSH;
1985 si_emit_cache_flush(sctx);
1986
1987 util_dynarray_foreach(&sctx->resident_tex_handles,
1988 struct si_texture_handle *, tex_handle) {
1989 unsigned desc_slot = (*tex_handle)->desc_slot;
1990
1991 if (!(*tex_handle)->desc_dirty)
1992 continue;
1993
1994 si_upload_bindless_descriptor(sctx, desc_slot, 16);
1995 (*tex_handle)->desc_dirty = false;
1996 }
1997
1998 util_dynarray_foreach(&sctx->resident_img_handles,
1999 struct si_image_handle *, img_handle) {
2000 unsigned desc_slot = (*img_handle)->desc_slot;
2001
2002 if (!(*img_handle)->desc_dirty)
2003 continue;
2004
2005 si_upload_bindless_descriptor(sctx, desc_slot, 8);
2006 (*img_handle)->desc_dirty = false;
2007 }
2008
2009 /* Invalidate L1 because it doesn't know that L2 changed. */
2010 sctx->b.flags |= SI_CONTEXT_INV_SMEM_L1;
2011 si_emit_cache_flush(sctx);
2012
2013 sctx->bindless_descriptors_dirty = false;
2014 }
2015
2016 /* Update mutable image descriptor fields of all resident textures. */
2017 static void si_update_resident_texture_descriptor(struct si_context *sctx,
2018 struct si_texture_handle *tex_handle)
2019 {
2020 struct si_sampler_view *sview = (struct si_sampler_view *)tex_handle->view;
2021 struct si_descriptors *desc = &sctx->bindless_descriptors;
2022 unsigned desc_slot_offset = tex_handle->desc_slot * 16;
2023 uint32_t desc_list[16];
2024
2025 if (sview->base.texture->target == PIPE_BUFFER)
2026 return;
2027
2028 memcpy(desc_list, desc->list + desc_slot_offset, sizeof(desc_list));
2029 si_set_sampler_view_desc(sctx, sview, &tex_handle->sstate,
2030 desc->list + desc_slot_offset);
2031
2032 if (memcmp(desc_list, desc->list + desc_slot_offset,
2033 sizeof(desc_list))) {
2034 tex_handle->desc_dirty = true;
2035 sctx->bindless_descriptors_dirty = true;
2036 }
2037 }
2038
2039 static void si_update_resident_image_descriptor(struct si_context *sctx,
2040 struct si_image_handle *img_handle)
2041 {
2042 struct si_descriptors *desc = &sctx->bindless_descriptors;
2043 unsigned desc_slot_offset = img_handle->desc_slot * 16;
2044 struct pipe_image_view *view = &img_handle->view;
2045 uint32_t desc_list[8];
2046
2047 if (view->resource->target == PIPE_BUFFER)
2048 return;
2049
2050 memcpy(desc_list, desc->list + desc_slot_offset,
2051 sizeof(desc_list));
2052 si_set_shader_image_desc(sctx, view, true,
2053 desc->list + desc_slot_offset);
2054
2055 if (memcmp(desc_list, desc->list + desc_slot_offset,
2056 sizeof(desc_list))) {
2057 img_handle->desc_dirty = true;
2058 sctx->bindless_descriptors_dirty = true;
2059 }
2060 }
2061
2062 static void si_update_all_resident_texture_descriptors(struct si_context *sctx)
2063 {
2064 util_dynarray_foreach(&sctx->resident_tex_handles,
2065 struct si_texture_handle *, tex_handle) {
2066 si_update_resident_texture_descriptor(sctx, *tex_handle);
2067 }
2068
2069 util_dynarray_foreach(&sctx->resident_img_handles,
2070 struct si_image_handle *, img_handle) {
2071 si_update_resident_image_descriptor(sctx, *img_handle);
2072 }
2073
2074 si_upload_bindless_descriptors(sctx);
2075 }
2076
2077 /* Update mutable image descriptor fields of all bound textures. */
2078 void si_update_all_texture_descriptors(struct si_context *sctx)
2079 {
2080 unsigned shader;
2081
2082 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
2083 struct si_sampler_views *samplers = &sctx->samplers[shader].views;
2084 struct si_images_info *images = &sctx->images[shader];
2085 unsigned mask;
2086
2087 /* Images. */
2088 mask = images->enabled_mask;
2089 while (mask) {
2090 unsigned i = u_bit_scan(&mask);
2091 struct pipe_image_view *view = &images->views[i];
2092
2093 if (!view->resource ||
2094 view->resource->target == PIPE_BUFFER)
2095 continue;
2096
2097 si_set_shader_image(sctx, shader, i, view, true);
2098 }
2099
2100 /* Sampler views. */
2101 mask = samplers->enabled_mask;
2102 while (mask) {
2103 unsigned i = u_bit_scan(&mask);
2104 struct pipe_sampler_view *view = samplers->views[i];
2105
2106 if (!view ||
2107 !view->texture ||
2108 view->texture->target == PIPE_BUFFER)
2109 continue;
2110
2111 si_set_sampler_view(sctx, shader, i,
2112 samplers->views[i], true);
2113 }
2114
2115 si_update_shader_needs_decompress_mask(sctx, shader);
2116 }
2117
2118 si_update_all_resident_texture_descriptors(sctx);
2119 }
2120
2121 /* SHADER USER DATA */
2122
2123 static void si_mark_shader_pointers_dirty(struct si_context *sctx,
2124 unsigned shader)
2125 {
2126 sctx->shader_pointers_dirty |=
2127 u_bit_consecutive(SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS,
2128 SI_NUM_SHADER_DESCS);
2129
2130 if (shader == PIPE_SHADER_VERTEX)
2131 sctx->vertex_buffer_pointer_dirty = sctx->vertex_buffers.buffer != NULL;
2132
2133 si_mark_atom_dirty(sctx, &sctx->shader_pointers.atom);
2134 }
2135
2136 static void si_shader_pointers_begin_new_cs(struct si_context *sctx)
2137 {
2138 sctx->shader_pointers_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
2139 sctx->vertex_buffer_pointer_dirty = sctx->vertex_buffers.buffer != NULL;
2140 si_mark_atom_dirty(sctx, &sctx->shader_pointers.atom);
2141 sctx->graphics_bindless_pointer_dirty = sctx->bindless_descriptors.buffer != NULL;
2142 sctx->compute_bindless_pointer_dirty = sctx->bindless_descriptors.buffer != NULL;
2143 }
2144
2145 /* Set a base register address for user data constants in the given shader.
2146 * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
2147 */
2148 static void si_set_user_data_base(struct si_context *sctx,
2149 unsigned shader, uint32_t new_base)
2150 {
2151 uint32_t *base = &sctx->shader_pointers.sh_base[shader];
2152
2153 if (*base != new_base) {
2154 *base = new_base;
2155
2156 if (new_base) {
2157 si_mark_shader_pointers_dirty(sctx, shader);
2158
2159 if (shader == PIPE_SHADER_VERTEX)
2160 sctx->last_vs_state = ~0;
2161 }
2162 }
2163 }
2164
2165 /* This must be called when these shaders are changed from non-NULL to NULL
2166 * and vice versa:
2167 * - geometry shader
2168 * - tessellation control shader
2169 * - tessellation evaluation shader
2170 */
2171 void si_shader_change_notify(struct si_context *sctx)
2172 {
2173 /* VS can be bound as VS, ES, or LS. */
2174 if (sctx->tes_shader.cso) {
2175 if (sctx->b.chip_class >= GFX9) {
2176 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2177 R_00B430_SPI_SHADER_USER_DATA_LS_0);
2178 } else {
2179 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2180 R_00B530_SPI_SHADER_USER_DATA_LS_0);
2181 }
2182 } else if (sctx->gs_shader.cso) {
2183 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2184 R_00B330_SPI_SHADER_USER_DATA_ES_0);
2185 } else {
2186 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2187 R_00B130_SPI_SHADER_USER_DATA_VS_0);
2188 }
2189
2190 /* TES can be bound as ES, VS, or not bound. */
2191 if (sctx->tes_shader.cso) {
2192 if (sctx->gs_shader.cso)
2193 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
2194 R_00B330_SPI_SHADER_USER_DATA_ES_0);
2195 else
2196 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
2197 R_00B130_SPI_SHADER_USER_DATA_VS_0);
2198 } else {
2199 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL, 0);
2200 }
2201 }
2202
2203 static void si_emit_shader_pointer(struct si_context *sctx,
2204 struct si_descriptors *desc,
2205 unsigned sh_base)
2206 {
2207 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
2208 uint64_t va;
2209
2210 if (!desc->buffer)
2211 return; /* the pointer is not used by current shaders */
2212
2213 va = desc->buffer->gpu_address +
2214 desc->buffer_offset;
2215
2216 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
2217 radeon_emit(cs, (sh_base + desc->shader_userdata_offset - SI_SH_REG_OFFSET) >> 2);
2218 radeon_emit(cs, va);
2219 radeon_emit(cs, va >> 32);
2220 }
2221
2222 static void si_emit_global_shader_pointers(struct si_context *sctx,
2223 struct si_descriptors *descs)
2224 {
2225 si_emit_shader_pointer(sctx, descs,
2226 R_00B030_SPI_SHADER_USER_DATA_PS_0);
2227 si_emit_shader_pointer(sctx, descs,
2228 R_00B130_SPI_SHADER_USER_DATA_VS_0);
2229
2230 if (sctx->b.chip_class >= GFX9) {
2231 /* GFX9 merged LS-HS and ES-GS. */
2232 if (descs == &sctx->descriptors[SI_DESCS_RW_BUFFERS]) {
2233 /* Set RW_BUFFERS in the special registers, so that
2234 * it's preloaded into s[0:1] instead of s[8:9].
2235 */
2236 si_emit_shader_pointer(sctx, descs,
2237 R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS);
2238 si_emit_shader_pointer(sctx, descs,
2239 R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS);
2240 } else {
2241 /* Set BINDLESS_SAMPLERS_AND_IMAGES into s[10:11],
2242 * s[8:9] remains unused for now.
2243 */
2244 assert(descs == &sctx->bindless_descriptors);
2245 si_emit_shader_pointer(sctx, descs,
2246 R_00B330_SPI_SHADER_USER_DATA_ES_0);
2247 si_emit_shader_pointer(sctx, descs,
2248 R_00B430_SPI_SHADER_USER_DATA_LS_0);
2249 }
2250 } else {
2251 si_emit_shader_pointer(sctx, descs,
2252 R_00B230_SPI_SHADER_USER_DATA_GS_0);
2253 si_emit_shader_pointer(sctx, descs,
2254 R_00B330_SPI_SHADER_USER_DATA_ES_0);
2255 si_emit_shader_pointer(sctx, descs,
2256 R_00B430_SPI_SHADER_USER_DATA_HS_0);
2257 si_emit_shader_pointer(sctx, descs,
2258 R_00B530_SPI_SHADER_USER_DATA_LS_0);
2259 }
2260 }
2261
2262 void si_emit_graphics_shader_pointers(struct si_context *sctx,
2263 struct r600_atom *atom)
2264 {
2265 unsigned mask;
2266 uint32_t *sh_base = sctx->shader_pointers.sh_base;
2267 struct si_descriptors *descs;
2268
2269 descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
2270
2271 if (sctx->shader_pointers_dirty & (1 << SI_DESCS_RW_BUFFERS))
2272 si_emit_global_shader_pointers(sctx, descs);
2273
2274 mask = sctx->shader_pointers_dirty &
2275 u_bit_consecutive(SI_DESCS_FIRST_SHADER,
2276 SI_DESCS_FIRST_COMPUTE - SI_DESCS_FIRST_SHADER);
2277
2278 while (mask) {
2279 unsigned i = u_bit_scan(&mask);
2280 unsigned shader = (i - SI_DESCS_FIRST_SHADER) / SI_NUM_SHADER_DESCS;
2281 unsigned base = sh_base[shader];
2282
2283 if (base)
2284 si_emit_shader_pointer(sctx, descs + i, base);
2285 }
2286 sctx->shader_pointers_dirty &=
2287 ~u_bit_consecutive(SI_DESCS_RW_BUFFERS, SI_DESCS_FIRST_COMPUTE);
2288
2289 if (sctx->vertex_buffer_pointer_dirty) {
2290 si_emit_shader_pointer(sctx, &sctx->vertex_buffers,
2291 sh_base[PIPE_SHADER_VERTEX]);
2292 sctx->vertex_buffer_pointer_dirty = false;
2293 }
2294
2295 if (sctx->graphics_bindless_pointer_dirty) {
2296 si_emit_global_shader_pointers(sctx,
2297 &sctx->bindless_descriptors);
2298 sctx->graphics_bindless_pointer_dirty = false;
2299 }
2300 }
2301
2302 void si_emit_compute_shader_pointers(struct si_context *sctx)
2303 {
2304 unsigned base = R_00B900_COMPUTE_USER_DATA_0;
2305 struct si_descriptors *descs = sctx->descriptors;
2306 unsigned compute_mask =
2307 u_bit_consecutive(SI_DESCS_FIRST_COMPUTE, SI_NUM_SHADER_DESCS);
2308 unsigned mask = sctx->shader_pointers_dirty & compute_mask;
2309
2310 while (mask) {
2311 unsigned i = u_bit_scan(&mask);
2312
2313 si_emit_shader_pointer(sctx, descs + i, base);
2314 }
2315 sctx->shader_pointers_dirty &= ~compute_mask;
2316
2317 if (sctx->compute_bindless_pointer_dirty) {
2318 si_emit_shader_pointer(sctx, &sctx->bindless_descriptors, base);
2319 sctx->compute_bindless_pointer_dirty = false;
2320 }
2321 }
2322
2323 /* BINDLESS */
2324
2325 static void si_init_bindless_descriptors(struct si_context *sctx,
2326 struct si_descriptors *desc,
2327 unsigned shader_userdata_index,
2328 unsigned num_elements)
2329 {
2330 si_init_descriptors(sctx, desc, shader_userdata_index, 16, num_elements,
2331 0, 0, NULL);
2332 sctx->bindless_descriptors.num_active_slots = num_elements;
2333
2334 /* The first bindless descriptor is stored at slot 1, because 0 is not
2335 * considered to be a valid handle.
2336 */
2337 sctx->num_bindless_descriptors = 1;
2338 }
2339
2340 static void si_release_bindless_descriptors(struct si_context *sctx)
2341 {
2342 si_release_descriptors(&sctx->bindless_descriptors);
2343 }
2344
2345 static unsigned
2346 si_create_bindless_descriptor(struct si_context *sctx, uint32_t *desc_list,
2347 unsigned size)
2348 {
2349 struct si_descriptors *desc = &sctx->bindless_descriptors;
2350 unsigned desc_slot, desc_slot_offset;
2351
2352 /* Reserve a new slot for this bindless descriptor. */
2353 desc_slot = sctx->num_bindless_descriptors++;
2354
2355 if (desc_slot >= desc->num_elements) {
2356 /* The array of bindless descriptors is full, resize it. */
2357 unsigned slot_size = desc->element_dw_size * 4;
2358 unsigned new_num_elements = desc->num_elements * 2;
2359
2360 desc->list = REALLOC(desc->list, desc->num_elements * slot_size,
2361 new_num_elements * slot_size);
2362 desc->num_elements = new_num_elements;
2363 desc->num_active_slots = new_num_elements;
2364 }
2365
2366 /* For simplicity, sampler and image bindless descriptors use fixed
2367 * 16-dword slots for now. Image descriptors only need 8-dword but this
2368 * doesn't really matter because no real apps use image handles.
2369 */
2370 desc_slot_offset = desc_slot * 16;
2371
2372 /* Copy the descriptor into the array. */
2373 memcpy(desc->list + desc_slot_offset, desc_list, size);
2374
2375 /* Re-upload the whole array of bindless descriptors into a new buffer.
2376 */
2377 if (!si_upload_descriptors(sctx, desc, &sctx->shader_pointers.atom))
2378 return 0;
2379
2380 /* Make sure to re-emit the shader pointers for all stages. */
2381 sctx->graphics_bindless_pointer_dirty = true;
2382 sctx->compute_bindless_pointer_dirty = true;
2383
2384 return desc_slot;
2385 }
2386
2387 static void si_invalidate_bindless_buf_desc(struct si_context *sctx,
2388 unsigned desc_slot,
2389 struct pipe_resource *resource,
2390 uint64_t offset,
2391 bool *desc_dirty)
2392 {
2393 struct si_descriptors *desc = &sctx->bindless_descriptors;
2394 struct r600_resource *buf = r600_resource(resource);
2395 unsigned desc_slot_offset = desc_slot * 16;
2396 uint32_t *desc_list = desc->list + desc_slot_offset + 4;
2397 uint64_t old_desc_va;
2398
2399 assert(resource->target == PIPE_BUFFER);
2400
2401 /* Retrieve the old buffer addr from the descriptor. */
2402 old_desc_va = desc_list[0];
2403 old_desc_va |= ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc_list[1]) << 32);
2404
2405 if (old_desc_va != buf->gpu_address + offset) {
2406 /* The buffer has been invalidated when the handle wasn't
2407 * resident, update the descriptor and the dirty flag.
2408 */
2409 si_set_buf_desc_address(buf, offset, &desc_list[0]);
2410
2411 *desc_dirty = true;
2412 sctx->bindless_descriptors_dirty = true;
2413 }
2414 }
2415
2416 static uint64_t si_create_texture_handle(struct pipe_context *ctx,
2417 struct pipe_sampler_view *view,
2418 const struct pipe_sampler_state *state)
2419 {
2420 struct si_sampler_view *sview = (struct si_sampler_view *)view;
2421 struct si_context *sctx = (struct si_context *)ctx;
2422 struct si_texture_handle *tex_handle;
2423 struct si_sampler_state *sstate;
2424 uint32_t desc_list[16];
2425 uint64_t handle;
2426
2427 tex_handle = CALLOC_STRUCT(si_texture_handle);
2428 if (!tex_handle)
2429 return 0;
2430
2431 memset(desc_list, 0, sizeof(desc_list));
2432 si_init_descriptor_list(&desc_list[0], 16, 1, null_texture_descriptor);
2433
2434 sstate = ctx->create_sampler_state(ctx, state);
2435 if (!sstate) {
2436 FREE(tex_handle);
2437 return 0;
2438 }
2439
2440 si_set_sampler_view_desc(sctx, sview, sstate, &desc_list[0]);
2441 memcpy(&tex_handle->sstate, sstate, sizeof(*sstate));
2442 ctx->delete_sampler_state(ctx, sstate);
2443
2444 tex_handle->desc_slot = si_create_bindless_descriptor(sctx, desc_list,
2445 sizeof(desc_list));
2446 if (!tex_handle->desc_slot) {
2447 FREE(tex_handle);
2448 return 0;
2449 }
2450
2451 handle = tex_handle->desc_slot;
2452
2453 if (!_mesa_hash_table_insert(sctx->tex_handles, (void *)handle,
2454 tex_handle)) {
2455 FREE(tex_handle);
2456 return 0;
2457 }
2458
2459 pipe_sampler_view_reference(&tex_handle->view, view);
2460
2461 r600_resource(sview->base.texture)->texture_handle_allocated = true;
2462
2463 return handle;
2464 }
2465
2466 static void si_delete_texture_handle(struct pipe_context *ctx, uint64_t handle)
2467 {
2468 struct si_context *sctx = (struct si_context *)ctx;
2469 struct si_texture_handle *tex_handle;
2470 struct hash_entry *entry;
2471
2472 entry = _mesa_hash_table_search(sctx->tex_handles, (void *)handle);
2473 if (!entry)
2474 return;
2475
2476 tex_handle = (struct si_texture_handle *)entry->data;
2477
2478 pipe_sampler_view_reference(&tex_handle->view, NULL);
2479 _mesa_hash_table_remove(sctx->tex_handles, entry);
2480 FREE(tex_handle);
2481 }
2482
2483 static void si_make_texture_handle_resident(struct pipe_context *ctx,
2484 uint64_t handle, bool resident)
2485 {
2486 struct si_context *sctx = (struct si_context *)ctx;
2487 struct si_texture_handle *tex_handle;
2488 struct si_sampler_view *sview;
2489 struct hash_entry *entry;
2490
2491 entry = _mesa_hash_table_search(sctx->tex_handles, (void *)handle);
2492 if (!entry)
2493 return;
2494
2495 tex_handle = (struct si_texture_handle *)entry->data;
2496 sview = (struct si_sampler_view *)tex_handle->view;
2497
2498 if (resident) {
2499 if (sview->base.texture->target != PIPE_BUFFER) {
2500 struct r600_texture *rtex =
2501 (struct r600_texture *)sview->base.texture;
2502
2503 if (depth_needs_decompression(rtex)) {
2504 util_dynarray_append(
2505 &sctx->resident_tex_needs_depth_decompress,
2506 struct si_texture_handle *,
2507 tex_handle);
2508 }
2509
2510 if (color_needs_decompression(rtex)) {
2511 util_dynarray_append(
2512 &sctx->resident_tex_needs_color_decompress,
2513 struct si_texture_handle *,
2514 tex_handle);
2515 }
2516
2517 if (rtex->dcc_offset &&
2518 p_atomic_read(&rtex->framebuffers_bound))
2519 sctx->need_check_render_feedback = true;
2520
2521 /* Re-upload the descriptor if it has been updated
2522 * while it wasn't resident.
2523 */
2524 si_update_resident_texture_descriptor(sctx, tex_handle);
2525 if (tex_handle->desc_dirty)
2526 sctx->bindless_descriptors_dirty = true;
2527 } else {
2528 si_invalidate_bindless_buf_desc(sctx,
2529 tex_handle->desc_slot,
2530 sview->base.texture,
2531 sview->base.u.buf.offset,
2532 &tex_handle->desc_dirty);
2533 }
2534
2535 /* Add the texture handle to the per-context list. */
2536 util_dynarray_append(&sctx->resident_tex_handles,
2537 struct si_texture_handle *, tex_handle);
2538
2539 /* Add the buffers to the current CS in case si_begin_new_cs()
2540 * is not going to be called.
2541 */
2542 si_sampler_view_add_buffer(sctx, sview->base.texture,
2543 RADEON_USAGE_READ,
2544 sview->is_stencil_sampler, false);
2545 } else {
2546 /* Remove the texture handle from the per-context list. */
2547 util_dynarray_delete_unordered(&sctx->resident_tex_handles,
2548 struct si_texture_handle *,
2549 tex_handle);
2550
2551 if (sview->base.texture->target != PIPE_BUFFER) {
2552 util_dynarray_delete_unordered(
2553 &sctx->resident_tex_needs_depth_decompress,
2554 struct si_texture_handle *, tex_handle);
2555
2556 util_dynarray_delete_unordered(
2557 &sctx->resident_tex_needs_color_decompress,
2558 struct si_texture_handle *, tex_handle);
2559 }
2560 }
2561 }
2562
2563 static uint64_t si_create_image_handle(struct pipe_context *ctx,
2564 const struct pipe_image_view *view)
2565 {
2566 struct si_context *sctx = (struct si_context *)ctx;
2567 struct si_image_handle *img_handle;
2568 uint32_t desc_list[8];
2569 uint64_t handle;
2570
2571 if (!view || !view->resource)
2572 return 0;
2573
2574 img_handle = CALLOC_STRUCT(si_image_handle);
2575 if (!img_handle)
2576 return 0;
2577
2578 memset(desc_list, 0, sizeof(desc_list));
2579 si_init_descriptor_list(&desc_list[0], 8, 1, null_image_descriptor);
2580
2581 si_set_shader_image_desc(sctx, view, false, &desc_list[0]);
2582
2583 img_handle->desc_slot = si_create_bindless_descriptor(sctx, desc_list,
2584 sizeof(desc_list));
2585 if (!img_handle->desc_slot) {
2586 FREE(img_handle);
2587 return 0;
2588 }
2589
2590 handle = img_handle->desc_slot;
2591
2592 if (!_mesa_hash_table_insert(sctx->img_handles, (void *)handle,
2593 img_handle)) {
2594 FREE(img_handle);
2595 return 0;
2596 }
2597
2598 util_copy_image_view(&img_handle->view, view);
2599
2600 r600_resource(view->resource)->image_handle_allocated = true;
2601
2602 return handle;
2603 }
2604
2605 static void si_delete_image_handle(struct pipe_context *ctx, uint64_t handle)
2606 {
2607 struct si_context *sctx = (struct si_context *)ctx;
2608 struct si_image_handle *img_handle;
2609 struct hash_entry *entry;
2610
2611 entry = _mesa_hash_table_search(sctx->img_handles, (void *)handle);
2612 if (!entry)
2613 return;
2614
2615 img_handle = (struct si_image_handle *)entry->data;
2616
2617 util_copy_image_view(&img_handle->view, NULL);
2618 _mesa_hash_table_remove(sctx->img_handles, entry);
2619 FREE(img_handle);
2620 }
2621
2622 static void si_make_image_handle_resident(struct pipe_context *ctx,
2623 uint64_t handle, unsigned access,
2624 bool resident)
2625 {
2626 struct si_context *sctx = (struct si_context *)ctx;
2627 struct si_image_handle *img_handle;
2628 struct pipe_image_view *view;
2629 struct r600_resource *res;
2630 struct hash_entry *entry;
2631
2632 entry = _mesa_hash_table_search(sctx->img_handles, (void *)handle);
2633 if (!entry)
2634 return;
2635
2636 img_handle = (struct si_image_handle *)entry->data;
2637 view = &img_handle->view;
2638 res = (struct r600_resource *)view->resource;
2639
2640 if (resident) {
2641 if (res->b.b.target != PIPE_BUFFER) {
2642 struct r600_texture *rtex = (struct r600_texture *)res;
2643 unsigned level = view->u.tex.level;
2644
2645 if (color_needs_decompression(rtex)) {
2646 util_dynarray_append(
2647 &sctx->resident_img_needs_color_decompress,
2648 struct si_image_handle *,
2649 img_handle);
2650 }
2651
2652 if (vi_dcc_enabled(rtex, level) &&
2653 p_atomic_read(&rtex->framebuffers_bound))
2654 sctx->need_check_render_feedback = true;
2655
2656 /* Re-upload the descriptor if it has been updated
2657 * while it wasn't resident.
2658 */
2659 si_update_resident_image_descriptor(sctx, img_handle);
2660 if (img_handle->desc_dirty)
2661 sctx->bindless_descriptors_dirty = true;
2662
2663 } else {
2664 si_invalidate_bindless_buf_desc(sctx,
2665 img_handle->desc_slot,
2666 view->resource,
2667 view->u.buf.offset,
2668 &img_handle->desc_dirty);
2669 }
2670
2671 /* Add the image handle to the per-context list. */
2672 util_dynarray_append(&sctx->resident_img_handles,
2673 struct si_image_handle *, img_handle);
2674
2675 /* Add the buffers to the current CS in case si_begin_new_cs()
2676 * is not going to be called.
2677 */
2678 si_sampler_view_add_buffer(sctx, view->resource,
2679 (access & PIPE_IMAGE_ACCESS_WRITE) ?
2680 RADEON_USAGE_READWRITE :
2681 RADEON_USAGE_READ, false, false);
2682 } else {
2683 /* Remove the image handle from the per-context list. */
2684 util_dynarray_delete_unordered(&sctx->resident_img_handles,
2685 struct si_image_handle *,
2686 img_handle);
2687
2688 if (res->b.b.target != PIPE_BUFFER) {
2689 util_dynarray_delete_unordered(
2690 &sctx->resident_img_needs_color_decompress,
2691 struct si_image_handle *,
2692 img_handle);
2693 }
2694 }
2695 }
2696
2697
2698 void si_all_resident_buffers_begin_new_cs(struct si_context *sctx)
2699 {
2700 unsigned num_resident_tex_handles, num_resident_img_handles;
2701
2702 num_resident_tex_handles = sctx->resident_tex_handles.size /
2703 sizeof(struct si_texture_handle *);
2704 num_resident_img_handles = sctx->resident_img_handles.size /
2705 sizeof(struct si_image_handle *);
2706
2707 /* Add all resident texture handles. */
2708 util_dynarray_foreach(&sctx->resident_tex_handles,
2709 struct si_texture_handle *, tex_handle) {
2710 struct si_sampler_view *sview =
2711 (struct si_sampler_view *)(*tex_handle)->view;
2712
2713 si_sampler_view_add_buffer(sctx, sview->base.texture,
2714 RADEON_USAGE_READ,
2715 sview->is_stencil_sampler, false);
2716 }
2717
2718 /* Add all resident image handles. */
2719 util_dynarray_foreach(&sctx->resident_img_handles,
2720 struct si_image_handle *, img_handle) {
2721 struct pipe_image_view *view = &(*img_handle)->view;
2722
2723 si_sampler_view_add_buffer(sctx, view->resource,
2724 RADEON_USAGE_READWRITE,
2725 false, false);
2726 }
2727
2728 sctx->b.num_resident_handles += num_resident_tex_handles +
2729 num_resident_img_handles;
2730 }
2731
2732 /* INIT/DEINIT/UPLOAD */
2733
2734 /* GFX9 has only 4KB of CE, while previous chips had 32KB. In order
2735 * to make CE RAM as useful as possible, this defines limits
2736 * for the number slots that can be in CE RAM on GFX9. If a shader
2737 * is using more, descriptors will be uploaded to memory directly and
2738 * CE won't be used.
2739 *
2740 * These numbers are based on shader-db.
2741 */
2742 static unsigned gfx9_max_ce_samplers[SI_NUM_SHADERS] = {
2743 [PIPE_SHADER_VERTEX] = 0,
2744 [PIPE_SHADER_TESS_CTRL] = 0,
2745 [PIPE_SHADER_TESS_EVAL] = 1,
2746 [PIPE_SHADER_GEOMETRY] = 0,
2747 [PIPE_SHADER_FRAGMENT] = 24,
2748 [PIPE_SHADER_COMPUTE] = 16,
2749 };
2750 static unsigned gfx9_max_ce_images[SI_NUM_SHADERS] = {
2751 /* these must be even due to slot alignment */
2752 [PIPE_SHADER_VERTEX] = 0,
2753 [PIPE_SHADER_TESS_CTRL] = 0,
2754 [PIPE_SHADER_TESS_EVAL] = 0,
2755 [PIPE_SHADER_GEOMETRY] = 0,
2756 [PIPE_SHADER_FRAGMENT] = 2,
2757 [PIPE_SHADER_COMPUTE] = 8,
2758 };
2759 static unsigned gfx9_max_ce_const_buffers[SI_NUM_SHADERS] = {
2760 [PIPE_SHADER_VERTEX] = 9,
2761 [PIPE_SHADER_TESS_CTRL] = 3,
2762 [PIPE_SHADER_TESS_EVAL] = 5,
2763 [PIPE_SHADER_GEOMETRY] = 0,
2764 [PIPE_SHADER_FRAGMENT] = 8,
2765 [PIPE_SHADER_COMPUTE] = 6,
2766 };
2767 static unsigned gfx9_max_ce_shader_buffers[SI_NUM_SHADERS] = {
2768 [PIPE_SHADER_VERTEX] = 0,
2769 [PIPE_SHADER_TESS_CTRL] = 0,
2770 [PIPE_SHADER_TESS_EVAL] = 0,
2771 [PIPE_SHADER_GEOMETRY] = 0,
2772 [PIPE_SHADER_FRAGMENT] = 12,
2773 [PIPE_SHADER_COMPUTE] = 13,
2774 };
2775
2776 void si_init_all_descriptors(struct si_context *sctx)
2777 {
2778 int i;
2779 unsigned ce_offset = 0;
2780
2781 STATIC_ASSERT(GFX9_SGPR_TCS_CONST_AND_SHADER_BUFFERS % 2 == 0);
2782 STATIC_ASSERT(GFX9_SGPR_GS_CONST_AND_SHADER_BUFFERS % 2 == 0);
2783
2784 for (i = 0; i < SI_NUM_SHADERS; i++) {
2785 bool gfx9_tcs = false;
2786 bool gfx9_gs = false;
2787 unsigned num_sampler_slots = SI_NUM_IMAGES / 2 + SI_NUM_SAMPLERS;
2788 unsigned num_buffer_slots = SI_NUM_SHADER_BUFFERS + SI_NUM_CONST_BUFFERS;
2789
2790 unsigned first_sampler_ce_slot = 0;
2791 unsigned num_sampler_ce_slots = num_sampler_slots;
2792
2793 unsigned first_buffer_ce_slot = 0;
2794 unsigned num_buffer_ce_slots = num_buffer_slots;
2795
2796 /* Adjust CE slot ranges based on GFX9 CE RAM limits. */
2797 if (sctx->b.chip_class >= GFX9) {
2798 gfx9_tcs = i == PIPE_SHADER_TESS_CTRL;
2799 gfx9_gs = i == PIPE_SHADER_GEOMETRY;
2800
2801 first_sampler_ce_slot =
2802 si_get_image_slot(gfx9_max_ce_images[i] - 1) / 2;
2803 num_sampler_ce_slots = gfx9_max_ce_images[i] / 2 +
2804 gfx9_max_ce_samplers[i];
2805
2806 first_buffer_ce_slot =
2807 si_get_shaderbuf_slot(gfx9_max_ce_shader_buffers[i] - 1);
2808 num_buffer_ce_slots = gfx9_max_ce_shader_buffers[i] +
2809 gfx9_max_ce_const_buffers[i];
2810 }
2811
2812 si_init_buffer_resources(sctx, &sctx->const_and_shader_buffers[i],
2813 si_const_and_shader_buffer_descriptors(sctx, i),
2814 num_buffer_slots,
2815 first_buffer_ce_slot, num_buffer_ce_slots,
2816 gfx9_tcs ? GFX9_SGPR_TCS_CONST_AND_SHADER_BUFFERS :
2817 gfx9_gs ? GFX9_SGPR_GS_CONST_AND_SHADER_BUFFERS :
2818 SI_SGPR_CONST_AND_SHADER_BUFFERS,
2819 RADEON_USAGE_READWRITE,
2820 RADEON_USAGE_READ,
2821 RADEON_PRIO_SHADER_RW_BUFFER,
2822 RADEON_PRIO_CONST_BUFFER,
2823 &ce_offset);
2824
2825 struct si_descriptors *desc = si_sampler_and_image_descriptors(sctx, i);
2826 si_init_descriptors(sctx, desc,
2827 gfx9_tcs ? GFX9_SGPR_TCS_SAMPLERS_AND_IMAGES :
2828 gfx9_gs ? GFX9_SGPR_GS_SAMPLERS_AND_IMAGES :
2829 SI_SGPR_SAMPLERS_AND_IMAGES,
2830 16, num_sampler_slots,
2831 first_sampler_ce_slot, num_sampler_ce_slots,
2832 &ce_offset);
2833
2834 int j;
2835 for (j = 0; j < SI_NUM_IMAGES; j++)
2836 memcpy(desc->list + j * 8, null_image_descriptor, 8 * 4);
2837 for (; j < SI_NUM_IMAGES + SI_NUM_SAMPLERS * 2; j++)
2838 memcpy(desc->list + j * 8, null_texture_descriptor, 8 * 4);
2839 }
2840
2841 si_init_buffer_resources(sctx, &sctx->rw_buffers,
2842 &sctx->descriptors[SI_DESCS_RW_BUFFERS],
2843 SI_NUM_RW_BUFFERS, 0, SI_NUM_RW_BUFFERS,
2844 SI_SGPR_RW_BUFFERS,
2845 /* The second set of usage/priority is used by
2846 * const buffers in RW buffer slots. */
2847 RADEON_USAGE_READWRITE, RADEON_USAGE_READ,
2848 RADEON_PRIO_SHADER_RINGS, RADEON_PRIO_CONST_BUFFER,
2849 &ce_offset);
2850 sctx->descriptors[SI_DESCS_RW_BUFFERS].num_active_slots = SI_NUM_RW_BUFFERS;
2851
2852 si_init_descriptors(sctx, &sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
2853 4, SI_NUM_VERTEX_BUFFERS, 0, 0, NULL);
2854 FREE(sctx->vertex_buffers.list); /* not used */
2855 sctx->vertex_buffers.list = NULL;
2856
2857 /* Initialize an array of 1024 bindless descriptors, when the limit is
2858 * reached, just make it larger and re-upload the whole array.
2859 */
2860 si_init_bindless_descriptors(sctx, &sctx->bindless_descriptors,
2861 SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES,
2862 1024);
2863
2864 sctx->descriptors_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
2865 sctx->total_ce_ram_allocated = ce_offset;
2866
2867 assert(ce_offset <= si_ce_ram_size(sctx));
2868
2869 /* Set pipe_context functions. */
2870 sctx->b.b.bind_sampler_states = si_bind_sampler_states;
2871 sctx->b.b.set_shader_images = si_set_shader_images;
2872 sctx->b.b.set_constant_buffer = si_pipe_set_constant_buffer;
2873 sctx->b.b.set_polygon_stipple = si_set_polygon_stipple;
2874 sctx->b.b.set_shader_buffers = si_set_shader_buffers;
2875 sctx->b.b.set_sampler_views = si_set_sampler_views;
2876 sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
2877 sctx->b.b.create_texture_handle = si_create_texture_handle;
2878 sctx->b.b.delete_texture_handle = si_delete_texture_handle;
2879 sctx->b.b.make_texture_handle_resident = si_make_texture_handle_resident;
2880 sctx->b.b.create_image_handle = si_create_image_handle;
2881 sctx->b.b.delete_image_handle = si_delete_image_handle;
2882 sctx->b.b.make_image_handle_resident = si_make_image_handle_resident;
2883 sctx->b.invalidate_buffer = si_invalidate_buffer;
2884 sctx->b.rebind_buffer = si_rebind_buffer;
2885
2886 /* Shader user data. */
2887 si_init_atom(sctx, &sctx->shader_pointers.atom, &sctx->atoms.s.shader_pointers,
2888 si_emit_graphics_shader_pointers);
2889
2890 /* Set default and immutable mappings. */
2891 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
2892
2893 if (sctx->b.chip_class >= GFX9) {
2894 si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL,
2895 R_00B430_SPI_SHADER_USER_DATA_LS_0);
2896 si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY,
2897 R_00B330_SPI_SHADER_USER_DATA_ES_0);
2898 } else {
2899 si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL,
2900 R_00B430_SPI_SHADER_USER_DATA_HS_0);
2901 si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY,
2902 R_00B230_SPI_SHADER_USER_DATA_GS_0);
2903 }
2904 si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
2905 }
2906
2907 static bool si_upload_shader_descriptors(struct si_context *sctx, unsigned mask)
2908 {
2909 unsigned dirty = sctx->descriptors_dirty & mask;
2910
2911 /* Assume nothing will go wrong: */
2912 sctx->shader_pointers_dirty |= dirty;
2913
2914 while (dirty) {
2915 unsigned i = u_bit_scan(&dirty);
2916
2917 if (!si_upload_descriptors(sctx, &sctx->descriptors[i],
2918 &sctx->shader_pointers.atom))
2919 return false;
2920 }
2921
2922 sctx->descriptors_dirty &= ~mask;
2923
2924 si_upload_bindless_descriptors(sctx);
2925
2926 return true;
2927 }
2928
2929 bool si_upload_graphics_shader_descriptors(struct si_context *sctx)
2930 {
2931 const unsigned mask = u_bit_consecutive(0, SI_DESCS_FIRST_COMPUTE);
2932 return si_upload_shader_descriptors(sctx, mask);
2933 }
2934
2935 bool si_upload_compute_shader_descriptors(struct si_context *sctx)
2936 {
2937 /* Does not update rw_buffers as that is not needed for compute shaders
2938 * and the input buffer is using the same SGPR's anyway.
2939 */
2940 const unsigned mask = u_bit_consecutive(SI_DESCS_FIRST_COMPUTE,
2941 SI_NUM_DESCS - SI_DESCS_FIRST_COMPUTE);
2942 return si_upload_shader_descriptors(sctx, mask);
2943 }
2944
2945 void si_release_all_descriptors(struct si_context *sctx)
2946 {
2947 int i;
2948
2949 for (i = 0; i < SI_NUM_SHADERS; i++) {
2950 si_release_buffer_resources(&sctx->const_and_shader_buffers[i],
2951 si_const_and_shader_buffer_descriptors(sctx, i));
2952 si_release_sampler_views(&sctx->samplers[i].views);
2953 si_release_image_views(&sctx->images[i]);
2954 }
2955 si_release_buffer_resources(&sctx->rw_buffers,
2956 &sctx->descriptors[SI_DESCS_RW_BUFFERS]);
2957 for (i = 0; i < SI_NUM_VERTEX_BUFFERS; i++)
2958 pipe_vertex_buffer_unreference(&sctx->vertex_buffer[i]);
2959
2960 for (i = 0; i < SI_NUM_DESCS; ++i)
2961 si_release_descriptors(&sctx->descriptors[i]);
2962
2963 sctx->vertex_buffers.list = NULL; /* points into a mapped buffer */
2964 si_release_descriptors(&sctx->vertex_buffers);
2965 si_release_bindless_descriptors(sctx);
2966 }
2967
2968 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
2969 {
2970 int i;
2971
2972 for (i = 0; i < SI_NUM_SHADERS; i++) {
2973 si_buffer_resources_begin_new_cs(sctx, &sctx->const_and_shader_buffers[i]);
2974 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
2975 si_image_views_begin_new_cs(sctx, &sctx->images[i]);
2976 }
2977 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers);
2978 si_vertex_buffers_begin_new_cs(sctx);
2979
2980 for (i = 0; i < SI_NUM_DESCS; ++i)
2981 si_descriptors_begin_new_cs(sctx, &sctx->descriptors[i]);
2982 si_descriptors_begin_new_cs(sctx, &sctx->bindless_descriptors);
2983
2984 si_shader_pointers_begin_new_cs(sctx);
2985 }
2986
2987 void si_set_active_descriptors(struct si_context *sctx, unsigned desc_idx,
2988 uint64_t new_active_mask)
2989 {
2990 struct si_descriptors *desc = &sctx->descriptors[desc_idx];
2991
2992 /* Ignore no-op updates and updates that disable all slots. */
2993 if (!new_active_mask ||
2994 new_active_mask == u_bit_consecutive64(desc->first_active_slot,
2995 desc->num_active_slots))
2996 return;
2997
2998 int first, count;
2999 u_bit_scan_consecutive_range64(&new_active_mask, &first, &count);
3000 assert(new_active_mask == 0);
3001
3002 /* Upload/dump descriptors if slots are being enabled. */
3003 if (first < desc->first_active_slot ||
3004 first + count > desc->first_active_slot + desc->num_active_slots)
3005 sctx->descriptors_dirty |= 1u << desc_idx;
3006
3007 /* Enable or disable CE for this descriptor array. */
3008 bool used_ce = desc->uses_ce;
3009 desc->uses_ce = desc->first_ce_slot <= first &&
3010 desc->first_ce_slot + desc->num_ce_slots >= first + count;
3011
3012 if (desc->uses_ce != used_ce) {
3013 /* Upload or dump descriptors if we're disabling or enabling CE,
3014 * respectively. */
3015 sctx->descriptors_dirty |= 1u << desc_idx;
3016
3017 /* If we're enabling CE, re-upload all descriptors to CE RAM.
3018 * When CE was disabled, uploads to CE RAM stopped.
3019 */
3020 if (desc->uses_ce) {
3021 desc->dirty_mask |=
3022 u_bit_consecutive64(desc->first_ce_slot,
3023 desc->num_ce_slots);
3024 }
3025 }
3026
3027 desc->first_active_slot = first;
3028 desc->num_active_slots = count;
3029 }
3030
3031 void si_set_active_descriptors_for_shader(struct si_context *sctx,
3032 struct si_shader_selector *sel)
3033 {
3034 if (!sel)
3035 return;
3036
3037 si_set_active_descriptors(sctx,
3038 si_const_and_shader_buffer_descriptors_idx(sel->type),
3039 sel->active_const_and_shader_buffers);
3040 si_set_active_descriptors(sctx,
3041 si_sampler_and_image_descriptors_idx(sel->type),
3042 sel->active_samplers_and_images);
3043 }