radeonsi/gfx10: implement si_set_{constant,shader}_buffer
[mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 /* Resource binding slots and sampler states (each described with 8 or
26 * 4 dwords) are stored in lists in memory which is accessed by shaders
27 * using scalar load instructions.
28 *
29 * This file is responsible for managing such lists. It keeps a copy of all
30 * descriptors in CPU memory and re-uploads a whole list if some slots have
31 * been changed.
32 *
33 * This code is also reponsible for updating shader pointers to those lists.
34 *
35 * Note that CP DMA can't be used for updating the lists, because a GPU hang
36 * could leave the list in a mid-IB state and the next IB would get wrong
37 * descriptors and the whole context would be unusable at that point.
38 * (Note: The register shadowing can't be used due to the same reason)
39 *
40 * Also, uploading descriptors to newly allocated memory doesn't require
41 * a KCACHE flush.
42 *
43 *
44 * Possible scenarios for one 16 dword image+sampler slot:
45 *
46 * | Image | w/ FMASK | Buffer | NULL
47 * [ 0: 3] Image[0:3] | Image[0:3] | Null[0:3] | Null[0:3]
48 * [ 4: 7] Image[4:7] | Image[4:7] | Buffer[0:3] | 0
49 * [ 8:11] Null[0:3] | Fmask[0:3] | Null[0:3] | Null[0:3]
50 * [12:15] Sampler[0:3] | Fmask[4:7] | Sampler[0:3] | Sampler[0:3]
51 *
52 * FMASK implies MSAA, therefore no sampler state.
53 * Sampler states are never unbound except when FMASK is bound.
54 */
55
56 #include "si_pipe.h"
57 #include "sid.h"
58
59 #include "util/hash_table.h"
60 #include "util/u_idalloc.h"
61 #include "util/u_format.h"
62 #include "util/u_memory.h"
63 #include "util/u_upload_mgr.h"
64
65
66 /* NULL image and buffer descriptor for textures (alpha = 1) and images
67 * (alpha = 0).
68 *
69 * For images, all fields must be zero except for the swizzle, which
70 * supports arbitrary combinations of 0s and 1s. The texture type must be
71 * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
72 *
73 * For buffers, all fields must be zero. If they are not, the hw hangs.
74 *
75 * This is the only reason why the buffer descriptor must be in words [4:7].
76 */
77 static uint32_t null_texture_descriptor[8] = {
78 0,
79 0,
80 0,
81 S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1) |
82 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
83 /* the rest must contain zeros, which is also used by the buffer
84 * descriptor */
85 };
86
87 static uint32_t null_image_descriptor[8] = {
88 0,
89 0,
90 0,
91 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
92 /* the rest must contain zeros, which is also used by the buffer
93 * descriptor */
94 };
95
96 static uint64_t si_desc_extract_buffer_address(const uint32_t *desc)
97 {
98 uint64_t va = desc[0] |
99 ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
100
101 /* Sign-extend the 48-bit address. */
102 va <<= 16;
103 va = (int64_t)va >> 16;
104 return va;
105 }
106
107 static void si_init_descriptor_list(uint32_t *desc_list,
108 unsigned element_dw_size,
109 unsigned num_elements,
110 const uint32_t *null_descriptor)
111 {
112 int i;
113
114 /* Initialize the array to NULL descriptors if the element size is 8. */
115 if (null_descriptor) {
116 assert(element_dw_size % 8 == 0);
117 for (i = 0; i < num_elements * element_dw_size / 8; i++)
118 memcpy(desc_list + i * 8, null_descriptor, 8 * 4);
119 }
120 }
121
122 static void si_init_descriptors(struct si_descriptors *desc,
123 short shader_userdata_rel_index,
124 unsigned element_dw_size,
125 unsigned num_elements)
126 {
127 desc->list = CALLOC(num_elements, element_dw_size * 4);
128 desc->element_dw_size = element_dw_size;
129 desc->num_elements = num_elements;
130 desc->shader_userdata_offset = shader_userdata_rel_index * 4;
131 desc->slot_index_to_bind_directly = -1;
132 }
133
134 static void si_release_descriptors(struct si_descriptors *desc)
135 {
136 si_resource_reference(&desc->buffer, NULL);
137 FREE(desc->list);
138 }
139
140 static bool si_upload_descriptors(struct si_context *sctx,
141 struct si_descriptors *desc)
142 {
143 unsigned slot_size = desc->element_dw_size * 4;
144 unsigned first_slot_offset = desc->first_active_slot * slot_size;
145 unsigned upload_size = desc->num_active_slots * slot_size;
146
147 /* Skip the upload if no shader is using the descriptors. dirty_mask
148 * will stay dirty and the descriptors will be uploaded when there is
149 * a shader using them.
150 */
151 if (!upload_size)
152 return true;
153
154 /* If there is just one active descriptor, bind it directly. */
155 if ((int)desc->first_active_slot == desc->slot_index_to_bind_directly &&
156 desc->num_active_slots == 1) {
157 uint32_t *descriptor = &desc->list[desc->slot_index_to_bind_directly *
158 desc->element_dw_size];
159
160 /* The buffer is already in the buffer list. */
161 si_resource_reference(&desc->buffer, NULL);
162 desc->gpu_list = NULL;
163 desc->gpu_address = si_desc_extract_buffer_address(descriptor);
164 si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
165 return true;
166 }
167
168 uint32_t *ptr;
169 unsigned buffer_offset;
170 u_upload_alloc(sctx->b.const_uploader, first_slot_offset, upload_size,
171 si_optimal_tcc_alignment(sctx, upload_size),
172 &buffer_offset, (struct pipe_resource**)&desc->buffer,
173 (void**)&ptr);
174 if (!desc->buffer) {
175 desc->gpu_address = 0;
176 return false; /* skip the draw call */
177 }
178
179 util_memcpy_cpu_to_le32(ptr, (char*)desc->list + first_slot_offset,
180 upload_size);
181 desc->gpu_list = ptr - first_slot_offset / 4;
182
183 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, desc->buffer,
184 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
185
186 /* The shader pointer should point to slot 0. */
187 buffer_offset -= first_slot_offset;
188 desc->gpu_address = desc->buffer->gpu_address + buffer_offset;
189
190 assert(desc->buffer->flags & RADEON_FLAG_32BIT);
191 assert((desc->buffer->gpu_address >> 32) == sctx->screen->info.address32_hi);
192 assert((desc->gpu_address >> 32) == sctx->screen->info.address32_hi);
193
194 si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
195 return true;
196 }
197
198 static void
199 si_descriptors_begin_new_cs(struct si_context *sctx, struct si_descriptors *desc)
200 {
201 if (!desc->buffer)
202 return;
203
204 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, desc->buffer,
205 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
206 }
207
208 /* SAMPLER VIEWS */
209
210 static inline enum radeon_bo_priority
211 si_get_sampler_view_priority(struct si_resource *res)
212 {
213 if (res->b.b.target == PIPE_BUFFER)
214 return RADEON_PRIO_SAMPLER_BUFFER;
215
216 if (res->b.b.nr_samples > 1)
217 return RADEON_PRIO_SAMPLER_TEXTURE_MSAA;
218
219 return RADEON_PRIO_SAMPLER_TEXTURE;
220 }
221
222 static struct si_descriptors *
223 si_sampler_and_image_descriptors(struct si_context *sctx, unsigned shader)
224 {
225 return &sctx->descriptors[si_sampler_and_image_descriptors_idx(shader)];
226 }
227
228 static void si_release_sampler_views(struct si_samplers *samplers)
229 {
230 int i;
231
232 for (i = 0; i < ARRAY_SIZE(samplers->views); i++) {
233 pipe_sampler_view_reference(&samplers->views[i], NULL);
234 }
235 }
236
237 static void si_sampler_view_add_buffer(struct si_context *sctx,
238 struct pipe_resource *resource,
239 enum radeon_bo_usage usage,
240 bool is_stencil_sampler,
241 bool check_mem)
242 {
243 struct si_texture *tex = (struct si_texture*)resource;
244 enum radeon_bo_priority priority;
245
246 if (!resource)
247 return;
248
249 /* Use the flushed depth texture if direct sampling is unsupported. */
250 if (resource->target != PIPE_BUFFER &&
251 tex->is_depth && !si_can_sample_zs(tex, is_stencil_sampler))
252 tex = tex->flushed_depth_texture;
253
254 priority = si_get_sampler_view_priority(&tex->buffer);
255 radeon_add_to_gfx_buffer_list_check_mem(sctx, &tex->buffer, usage, priority,
256 check_mem);
257
258 if (resource->target == PIPE_BUFFER)
259 return;
260
261 /* Add separate DCC. */
262 if (tex->dcc_separate_buffer) {
263 radeon_add_to_gfx_buffer_list_check_mem(sctx, tex->dcc_separate_buffer,
264 usage, RADEON_PRIO_SEPARATE_META, check_mem);
265 }
266 }
267
268 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
269 struct si_samplers *samplers)
270 {
271 unsigned mask = samplers->enabled_mask;
272
273 /* Add buffers to the CS. */
274 while (mask) {
275 int i = u_bit_scan(&mask);
276 struct si_sampler_view *sview = (struct si_sampler_view *)samplers->views[i];
277
278 si_sampler_view_add_buffer(sctx, sview->base.texture,
279 RADEON_USAGE_READ,
280 sview->is_stencil_sampler, false);
281 }
282 }
283
284 /* Set buffer descriptor fields that can be changed by reallocations. */
285 static void si_set_buf_desc_address(struct si_resource *buf,
286 uint64_t offset, uint32_t *state)
287 {
288 uint64_t va = buf->gpu_address + offset;
289
290 state[0] = va;
291 state[1] &= C_008F04_BASE_ADDRESS_HI;
292 state[1] |= S_008F04_BASE_ADDRESS_HI(va >> 32);
293 }
294
295 /* Set texture descriptor fields that can be changed by reallocations.
296 *
297 * \param tex texture
298 * \param base_level_info information of the level of BASE_ADDRESS
299 * \param base_level the level of BASE_ADDRESS
300 * \param first_level pipe_sampler_view.u.tex.first_level
301 * \param block_width util_format_get_blockwidth()
302 * \param is_stencil select between separate Z & Stencil
303 * \param state descriptor to update
304 */
305 void si_set_mutable_tex_desc_fields(struct si_screen *sscreen,
306 struct si_texture *tex,
307 const struct legacy_surf_level *base_level_info,
308 unsigned base_level, unsigned first_level,
309 unsigned block_width, bool is_stencil,
310 uint32_t *state)
311 {
312 uint64_t va, meta_va = 0;
313
314 if (tex->is_depth && !si_can_sample_zs(tex, is_stencil)) {
315 tex = tex->flushed_depth_texture;
316 is_stencil = false;
317 }
318
319 va = tex->buffer.gpu_address;
320
321 if (sscreen->info.chip_class >= GFX9) {
322 /* Only stencil_offset needs to be added here. */
323 if (is_stencil)
324 va += tex->surface.u.gfx9.stencil_offset;
325 else
326 va += tex->surface.u.gfx9.surf_offset;
327 } else {
328 va += base_level_info->offset;
329 }
330
331 state[0] = va >> 8;
332 state[1] &= C_008F14_BASE_ADDRESS_HI;
333 state[1] |= S_008F14_BASE_ADDRESS_HI(va >> 40);
334
335 /* Only macrotiled modes can set tile swizzle.
336 * GFX9 doesn't use (legacy) base_level_info.
337 */
338 if (sscreen->info.chip_class >= GFX9 ||
339 base_level_info->mode == RADEON_SURF_MODE_2D)
340 state[0] |= tex->surface.tile_swizzle;
341
342 if (sscreen->info.chip_class >= GFX8) {
343 state[6] &= C_008F28_COMPRESSION_EN;
344
345 if (vi_dcc_enabled(tex, first_level)) {
346 meta_va = (!tex->dcc_separate_buffer ? tex->buffer.gpu_address : 0) +
347 tex->dcc_offset;
348
349 if (sscreen->info.chip_class == GFX8) {
350 meta_va += base_level_info->dcc_offset;
351 assert(base_level_info->mode == RADEON_SURF_MODE_2D);
352 }
353
354 meta_va |= (uint32_t)tex->surface.tile_swizzle << 8;
355 } else if (vi_tc_compat_htile_enabled(tex, first_level)) {
356 meta_va = tex->buffer.gpu_address + tex->htile_offset;
357 }
358
359 if (meta_va)
360 state[6] |= S_008F28_COMPRESSION_EN(1);
361 }
362
363 if (sscreen->info.chip_class >= GFX8 && sscreen->info.chip_class <= GFX9)
364 state[7] = meta_va >> 8;
365
366 if (sscreen->info.chip_class >= GFX10) {
367 state[3] &= C_00A00C_SW_MODE;
368
369 if (is_stencil) {
370 state[3] |= S_00A00C_SW_MODE(tex->surface.u.gfx9.stencil.swizzle_mode);
371 } else {
372 state[3] |= S_00A00C_SW_MODE(tex->surface.u.gfx9.surf.swizzle_mode);
373 }
374
375 state[6] &= C_00A018_META_DATA_ADDRESS_LO &
376 C_00A018_META_PIPE_ALIGNED;
377
378 if (meta_va) {
379 struct gfx9_surf_meta_flags meta;
380
381 if (tex->dcc_offset)
382 meta = tex->surface.u.gfx9.dcc;
383 else
384 meta = tex->surface.u.gfx9.htile;
385
386 state[6] |= S_00A018_META_PIPE_ALIGNED(meta.pipe_aligned) |
387 S_00A018_META_DATA_ADDRESS_LO(meta_va >> 8);
388 }
389
390 state[7] = meta_va >> 16;
391 } else if (sscreen->info.chip_class >= GFX9) {
392 state[3] &= C_008F1C_SW_MODE;
393 state[4] &= C_008F20_PITCH;
394
395 if (is_stencil) {
396 state[3] |= S_008F1C_SW_MODE(tex->surface.u.gfx9.stencil.swizzle_mode);
397 state[4] |= S_008F20_PITCH(tex->surface.u.gfx9.stencil.epitch);
398 } else {
399 state[3] |= S_008F1C_SW_MODE(tex->surface.u.gfx9.surf.swizzle_mode);
400 state[4] |= S_008F20_PITCH(tex->surface.u.gfx9.surf.epitch);
401 }
402
403 state[5] &= C_008F24_META_DATA_ADDRESS &
404 C_008F24_META_PIPE_ALIGNED &
405 C_008F24_META_RB_ALIGNED;
406 if (meta_va) {
407 struct gfx9_surf_meta_flags meta;
408
409 if (tex->dcc_offset)
410 meta = tex->surface.u.gfx9.dcc;
411 else
412 meta = tex->surface.u.gfx9.htile;
413
414 state[5] |= S_008F24_META_DATA_ADDRESS(meta_va >> 40) |
415 S_008F24_META_PIPE_ALIGNED(meta.pipe_aligned) |
416 S_008F24_META_RB_ALIGNED(meta.rb_aligned);
417 }
418 } else {
419 /* GFX6-GFX8 */
420 unsigned pitch = base_level_info->nblk_x * block_width;
421 unsigned index = si_tile_mode_index(tex, base_level, is_stencil);
422
423 state[3] &= C_008F1C_TILING_INDEX;
424 state[3] |= S_008F1C_TILING_INDEX(index);
425 state[4] &= C_008F20_PITCH;
426 state[4] |= S_008F20_PITCH(pitch - 1);
427 }
428 }
429
430 static void si_set_sampler_state_desc(struct si_sampler_state *sstate,
431 struct si_sampler_view *sview,
432 struct si_texture *tex,
433 uint32_t *desc)
434 {
435 if (sview && sview->is_integer)
436 memcpy(desc, sstate->integer_val, 4*4);
437 else if (tex && tex->upgraded_depth &&
438 (!sview || !sview->is_stencil_sampler))
439 memcpy(desc, sstate->upgraded_depth_val, 4*4);
440 else
441 memcpy(desc, sstate->val, 4*4);
442 }
443
444 static void si_set_sampler_view_desc(struct si_context *sctx,
445 struct si_sampler_view *sview,
446 struct si_sampler_state *sstate,
447 uint32_t *desc)
448 {
449 struct pipe_sampler_view *view = &sview->base;
450 struct si_texture *tex = (struct si_texture *)view->texture;
451 bool is_buffer = tex->buffer.b.b.target == PIPE_BUFFER;
452
453 if (unlikely(!is_buffer && sview->dcc_incompatible)) {
454 if (vi_dcc_enabled(tex, view->u.tex.first_level))
455 if (!si_texture_disable_dcc(sctx, tex))
456 si_decompress_dcc(sctx, tex);
457
458 sview->dcc_incompatible = false;
459 }
460
461 assert(tex); /* views with texture == NULL aren't supported */
462 memcpy(desc, sview->state, 8*4);
463
464 if (is_buffer) {
465 si_set_buf_desc_address(&tex->buffer,
466 sview->base.u.buf.offset,
467 desc + 4);
468 } else {
469 bool is_separate_stencil = tex->db_compatible &&
470 sview->is_stencil_sampler;
471
472 si_set_mutable_tex_desc_fields(sctx->screen, tex,
473 sview->base_level_info,
474 sview->base_level,
475 sview->base.u.tex.first_level,
476 sview->block_width,
477 is_separate_stencil,
478 desc);
479 }
480
481 if (!is_buffer && tex->surface.fmask_size) {
482 memcpy(desc + 8, sview->fmask_state, 8*4);
483 } else {
484 /* Disable FMASK and bind sampler state in [12:15]. */
485 memcpy(desc + 8, null_texture_descriptor, 4*4);
486
487 if (sstate)
488 si_set_sampler_state_desc(sstate, sview,
489 is_buffer ? NULL : tex,
490 desc + 12);
491 }
492 }
493
494 static bool color_needs_decompression(struct si_texture *tex)
495 {
496 return tex->surface.fmask_size ||
497 (tex->dirty_level_mask &&
498 (tex->cmask_buffer || tex->dcc_offset));
499 }
500
501 static bool depth_needs_decompression(struct si_texture *tex)
502 {
503 /* If the depth/stencil texture is TC-compatible, no decompression
504 * will be done. The decompression function will only flush DB caches
505 * to make it coherent with shaders. That's necessary because the driver
506 * doesn't flush DB caches in any other case.
507 */
508 return tex->db_compatible;
509 }
510
511 static void si_set_sampler_view(struct si_context *sctx,
512 unsigned shader,
513 unsigned slot, struct pipe_sampler_view *view,
514 bool disallow_early_out)
515 {
516 struct si_samplers *samplers = &sctx->samplers[shader];
517 struct si_sampler_view *sview = (struct si_sampler_view*)view;
518 struct si_descriptors *descs = si_sampler_and_image_descriptors(sctx, shader);
519 unsigned desc_slot = si_get_sampler_slot(slot);
520 uint32_t *desc = descs->list + desc_slot * 16;
521
522 if (samplers->views[slot] == view && !disallow_early_out)
523 return;
524
525 if (view) {
526 struct si_texture *tex = (struct si_texture *)view->texture;
527
528 si_set_sampler_view_desc(sctx, sview,
529 samplers->sampler_states[slot], desc);
530
531 if (tex->buffer.b.b.target == PIPE_BUFFER) {
532 tex->buffer.bind_history |= PIPE_BIND_SAMPLER_VIEW;
533 samplers->needs_depth_decompress_mask &= ~(1u << slot);
534 samplers->needs_color_decompress_mask &= ~(1u << slot);
535 } else {
536 if (depth_needs_decompression(tex)) {
537 samplers->needs_depth_decompress_mask |= 1u << slot;
538 } else {
539 samplers->needs_depth_decompress_mask &= ~(1u << slot);
540 }
541 if (color_needs_decompression(tex)) {
542 samplers->needs_color_decompress_mask |= 1u << slot;
543 } else {
544 samplers->needs_color_decompress_mask &= ~(1u << slot);
545 }
546
547 if (tex->dcc_offset &&
548 p_atomic_read(&tex->framebuffers_bound))
549 sctx->need_check_render_feedback = true;
550 }
551
552 pipe_sampler_view_reference(&samplers->views[slot], view);
553 samplers->enabled_mask |= 1u << slot;
554
555 /* Since this can flush, it must be done after enabled_mask is
556 * updated. */
557 si_sampler_view_add_buffer(sctx, view->texture,
558 RADEON_USAGE_READ,
559 sview->is_stencil_sampler, true);
560 } else {
561 pipe_sampler_view_reference(&samplers->views[slot], NULL);
562 memcpy(desc, null_texture_descriptor, 8*4);
563 /* Only clear the lower dwords of FMASK. */
564 memcpy(desc + 8, null_texture_descriptor, 4*4);
565 /* Re-set the sampler state if we are transitioning from FMASK. */
566 if (samplers->sampler_states[slot])
567 si_set_sampler_state_desc(samplers->sampler_states[slot], NULL, NULL,
568 desc + 12);
569
570 samplers->enabled_mask &= ~(1u << slot);
571 samplers->needs_depth_decompress_mask &= ~(1u << slot);
572 samplers->needs_color_decompress_mask &= ~(1u << slot);
573 }
574
575 sctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
576 }
577
578 static void si_update_shader_needs_decompress_mask(struct si_context *sctx,
579 unsigned shader)
580 {
581 struct si_samplers *samplers = &sctx->samplers[shader];
582 unsigned shader_bit = 1 << shader;
583
584 if (samplers->needs_depth_decompress_mask ||
585 samplers->needs_color_decompress_mask ||
586 sctx->images[shader].needs_color_decompress_mask)
587 sctx->shader_needs_decompress_mask |= shader_bit;
588 else
589 sctx->shader_needs_decompress_mask &= ~shader_bit;
590 }
591
592 static void si_set_sampler_views(struct pipe_context *ctx,
593 enum pipe_shader_type shader, unsigned start,
594 unsigned count,
595 struct pipe_sampler_view **views)
596 {
597 struct si_context *sctx = (struct si_context *)ctx;
598 int i;
599
600 if (!count || shader >= SI_NUM_SHADERS)
601 return;
602
603 if (views) {
604 for (i = 0; i < count; i++)
605 si_set_sampler_view(sctx, shader, start + i, views[i], false);
606 } else {
607 for (i = 0; i < count; i++)
608 si_set_sampler_view(sctx, shader, start + i, NULL, false);
609 }
610
611 si_update_shader_needs_decompress_mask(sctx, shader);
612 }
613
614 static void
615 si_samplers_update_needs_color_decompress_mask(struct si_samplers *samplers)
616 {
617 unsigned mask = samplers->enabled_mask;
618
619 while (mask) {
620 int i = u_bit_scan(&mask);
621 struct pipe_resource *res = samplers->views[i]->texture;
622
623 if (res && res->target != PIPE_BUFFER) {
624 struct si_texture *tex = (struct si_texture *)res;
625
626 if (color_needs_decompression(tex)) {
627 samplers->needs_color_decompress_mask |= 1u << i;
628 } else {
629 samplers->needs_color_decompress_mask &= ~(1u << i);
630 }
631 }
632 }
633 }
634
635 /* IMAGE VIEWS */
636
637 static void
638 si_release_image_views(struct si_images *images)
639 {
640 unsigned i;
641
642 for (i = 0; i < SI_NUM_IMAGES; ++i) {
643 struct pipe_image_view *view = &images->views[i];
644
645 pipe_resource_reference(&view->resource, NULL);
646 }
647 }
648
649 static void
650 si_image_views_begin_new_cs(struct si_context *sctx, struct si_images *images)
651 {
652 uint mask = images->enabled_mask;
653
654 /* Add buffers to the CS. */
655 while (mask) {
656 int i = u_bit_scan(&mask);
657 struct pipe_image_view *view = &images->views[i];
658
659 assert(view->resource);
660
661 si_sampler_view_add_buffer(sctx, view->resource,
662 RADEON_USAGE_READWRITE, false, false);
663 }
664 }
665
666 static void
667 si_disable_shader_image(struct si_context *ctx, unsigned shader, unsigned slot)
668 {
669 struct si_images *images = &ctx->images[shader];
670
671 if (images->enabled_mask & (1u << slot)) {
672 struct si_descriptors *descs = si_sampler_and_image_descriptors(ctx, shader);
673 unsigned desc_slot = si_get_image_slot(slot);
674
675 pipe_resource_reference(&images->views[slot].resource, NULL);
676 images->needs_color_decompress_mask &= ~(1 << slot);
677
678 memcpy(descs->list + desc_slot*8, null_image_descriptor, 8*4);
679 images->enabled_mask &= ~(1u << slot);
680 ctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
681 }
682 }
683
684 static void
685 si_mark_image_range_valid(const struct pipe_image_view *view)
686 {
687 struct si_resource *res = si_resource(view->resource);
688
689 if (res->b.b.target != PIPE_BUFFER)
690 return;
691
692 util_range_add(&res->valid_buffer_range,
693 view->u.buf.offset,
694 view->u.buf.offset + view->u.buf.size);
695 }
696
697 static void si_set_shader_image_desc(struct si_context *ctx,
698 const struct pipe_image_view *view,
699 bool skip_decompress,
700 uint32_t *desc, uint32_t *fmask_desc)
701 {
702 struct si_screen *screen = ctx->screen;
703 struct si_resource *res;
704
705 res = si_resource(view->resource);
706
707 if (res->b.b.target == PIPE_BUFFER ||
708 view->shader_access & SI_IMAGE_ACCESS_AS_BUFFER) {
709 if (view->access & PIPE_IMAGE_ACCESS_WRITE)
710 si_mark_image_range_valid(view);
711
712 si_make_buffer_descriptor(screen, res,
713 view->format,
714 view->u.buf.offset,
715 view->u.buf.size, desc);
716 si_set_buf_desc_address(res, view->u.buf.offset, desc + 4);
717 } else {
718 static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
719 struct si_texture *tex = (struct si_texture *)res;
720 unsigned level = view->u.tex.level;
721 unsigned width, height, depth, hw_level;
722 bool uses_dcc = vi_dcc_enabled(tex, level);
723 unsigned access = view->access;
724
725 /* Clear the write flag when writes can't occur.
726 * Note that DCC_DECOMPRESS for MSAA doesn't work in some cases,
727 * so we don't wanna trigger it.
728 */
729 if (tex->is_depth ||
730 (!fmask_desc && tex->surface.fmask_size != 0)) {
731 assert(!"Z/S and MSAA image stores are not supported");
732 access &= ~PIPE_IMAGE_ACCESS_WRITE;
733 }
734
735 assert(!tex->is_depth);
736 assert(fmask_desc || tex->surface.fmask_size == 0);
737
738 if (uses_dcc && !skip_decompress &&
739 (view->access & PIPE_IMAGE_ACCESS_WRITE ||
740 !vi_dcc_formats_compatible(res->b.b.format, view->format))) {
741 /* If DCC can't be disabled, at least decompress it.
742 * The decompression is relatively cheap if the surface
743 * has been decompressed already.
744 */
745 if (!si_texture_disable_dcc(ctx, tex))
746 si_decompress_dcc(ctx, tex);
747 }
748
749 if (ctx->chip_class >= GFX9) {
750 /* Always set the base address. The swizzle modes don't
751 * allow setting mipmap level offsets as the base.
752 */
753 width = res->b.b.width0;
754 height = res->b.b.height0;
755 depth = res->b.b.depth0;
756 hw_level = level;
757 } else {
758 /* Always force the base level to the selected level.
759 *
760 * This is required for 3D textures, where otherwise
761 * selecting a single slice for non-layered bindings
762 * fails. It doesn't hurt the other targets.
763 */
764 width = u_minify(res->b.b.width0, level);
765 height = u_minify(res->b.b.height0, level);
766 depth = u_minify(res->b.b.depth0, level);
767 hw_level = 0;
768 }
769
770 screen->make_texture_descriptor(screen, tex,
771 false, res->b.b.target,
772 view->format, swizzle,
773 hw_level, hw_level,
774 view->u.tex.first_layer,
775 view->u.tex.last_layer,
776 width, height, depth,
777 desc, fmask_desc);
778 si_set_mutable_tex_desc_fields(screen, tex,
779 &tex->surface.u.legacy.level[level],
780 level, level,
781 util_format_get_blockwidth(view->format),
782 false, desc);
783 }
784 }
785
786 static void si_set_shader_image(struct si_context *ctx,
787 unsigned shader,
788 unsigned slot, const struct pipe_image_view *view,
789 bool skip_decompress)
790 {
791 struct si_images *images = &ctx->images[shader];
792 struct si_descriptors *descs = si_sampler_and_image_descriptors(ctx, shader);
793 struct si_resource *res;
794 unsigned desc_slot = si_get_image_slot(slot);
795 uint32_t *desc = descs->list + desc_slot * 8;
796
797 if (!view || !view->resource) {
798 si_disable_shader_image(ctx, shader, slot);
799 return;
800 }
801
802 res = si_resource(view->resource);
803
804 if (&images->views[slot] != view)
805 util_copy_image_view(&images->views[slot], view);
806
807 si_set_shader_image_desc(ctx, view, skip_decompress, desc, NULL);
808
809 if (res->b.b.target == PIPE_BUFFER ||
810 view->shader_access & SI_IMAGE_ACCESS_AS_BUFFER) {
811 images->needs_color_decompress_mask &= ~(1 << slot);
812 res->bind_history |= PIPE_BIND_SHADER_IMAGE;
813 } else {
814 struct si_texture *tex = (struct si_texture *)res;
815 unsigned level = view->u.tex.level;
816
817 if (color_needs_decompression(tex)) {
818 images->needs_color_decompress_mask |= 1 << slot;
819 } else {
820 images->needs_color_decompress_mask &= ~(1 << slot);
821 }
822
823 if (vi_dcc_enabled(tex, level) &&
824 p_atomic_read(&tex->framebuffers_bound))
825 ctx->need_check_render_feedback = true;
826 }
827
828 images->enabled_mask |= 1u << slot;
829 ctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
830
831 /* Since this can flush, it must be done after enabled_mask is updated. */
832 si_sampler_view_add_buffer(ctx, &res->b.b,
833 (view->access & PIPE_IMAGE_ACCESS_WRITE) ?
834 RADEON_USAGE_READWRITE : RADEON_USAGE_READ,
835 false, true);
836 }
837
838 static void
839 si_set_shader_images(struct pipe_context *pipe,
840 enum pipe_shader_type shader,
841 unsigned start_slot, unsigned count,
842 const struct pipe_image_view *views)
843 {
844 struct si_context *ctx = (struct si_context *)pipe;
845 unsigned i, slot;
846
847 assert(shader < SI_NUM_SHADERS);
848
849 if (!count)
850 return;
851
852 assert(start_slot + count <= SI_NUM_IMAGES);
853
854 if (views) {
855 for (i = 0, slot = start_slot; i < count; ++i, ++slot)
856 si_set_shader_image(ctx, shader, slot, &views[i], false);
857 } else {
858 for (i = 0, slot = start_slot; i < count; ++i, ++slot)
859 si_set_shader_image(ctx, shader, slot, NULL, false);
860 }
861
862 si_update_shader_needs_decompress_mask(ctx, shader);
863 }
864
865 static void
866 si_images_update_needs_color_decompress_mask(struct si_images *images)
867 {
868 unsigned mask = images->enabled_mask;
869
870 while (mask) {
871 int i = u_bit_scan(&mask);
872 struct pipe_resource *res = images->views[i].resource;
873
874 if (res && res->target != PIPE_BUFFER) {
875 struct si_texture *tex = (struct si_texture *)res;
876
877 if (color_needs_decompression(tex)) {
878 images->needs_color_decompress_mask |= 1 << i;
879 } else {
880 images->needs_color_decompress_mask &= ~(1 << i);
881 }
882 }
883 }
884 }
885
886 void si_update_ps_colorbuf0_slot(struct si_context *sctx)
887 {
888 struct si_buffer_resources *buffers = &sctx->rw_buffers;
889 struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
890 unsigned slot = SI_PS_IMAGE_COLORBUF0;
891 struct pipe_surface *surf = NULL;
892
893 /* si_texture_disable_dcc can get us here again. */
894 if (sctx->blitter->running)
895 return;
896
897 /* See whether FBFETCH is used and color buffer 0 is set. */
898 if (sctx->ps_shader.cso &&
899 sctx->ps_shader.cso->info.opcode_count[TGSI_OPCODE_FBFETCH] &&
900 sctx->framebuffer.state.nr_cbufs &&
901 sctx->framebuffer.state.cbufs[0])
902 surf = sctx->framebuffer.state.cbufs[0];
903
904 /* Return if FBFETCH transitions from disabled to disabled. */
905 if (!buffers->buffers[slot] && !surf)
906 return;
907
908 sctx->ps_uses_fbfetch = surf != NULL;
909 si_update_ps_iter_samples(sctx);
910
911 if (surf) {
912 struct si_texture *tex = (struct si_texture*)surf->texture;
913 struct pipe_image_view view;
914
915 assert(tex);
916 assert(!tex->is_depth);
917
918 /* Disable DCC, because the texture is used as both a sampler
919 * and color buffer.
920 */
921 si_texture_disable_dcc(sctx, tex);
922
923 if (tex->buffer.b.b.nr_samples <= 1 && tex->cmask_buffer) {
924 /* Disable CMASK. */
925 assert(tex->cmask_buffer != &tex->buffer);
926 si_eliminate_fast_color_clear(sctx, tex);
927 si_texture_discard_cmask(sctx->screen, tex);
928 }
929
930 view.resource = surf->texture;
931 view.format = surf->format;
932 view.access = PIPE_IMAGE_ACCESS_READ;
933 view.u.tex.first_layer = surf->u.tex.first_layer;
934 view.u.tex.last_layer = surf->u.tex.last_layer;
935 view.u.tex.level = surf->u.tex.level;
936
937 /* Set the descriptor. */
938 uint32_t *desc = descs->list + slot*4;
939 memset(desc, 0, 16 * 4);
940 si_set_shader_image_desc(sctx, &view, true, desc, desc + 8);
941
942 pipe_resource_reference(&buffers->buffers[slot], &tex->buffer.b.b);
943 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
944 &tex->buffer, RADEON_USAGE_READ,
945 RADEON_PRIO_SHADER_RW_IMAGE);
946 buffers->enabled_mask |= 1u << slot;
947 } else {
948 /* Clear the descriptor. */
949 memset(descs->list + slot*4, 0, 8*4);
950 pipe_resource_reference(&buffers->buffers[slot], NULL);
951 buffers->enabled_mask &= ~(1u << slot);
952 }
953
954 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
955 }
956
957 /* SAMPLER STATES */
958
959 static void si_bind_sampler_states(struct pipe_context *ctx,
960 enum pipe_shader_type shader,
961 unsigned start, unsigned count, void **states)
962 {
963 struct si_context *sctx = (struct si_context *)ctx;
964 struct si_samplers *samplers = &sctx->samplers[shader];
965 struct si_descriptors *desc = si_sampler_and_image_descriptors(sctx, shader);
966 struct si_sampler_state **sstates = (struct si_sampler_state**)states;
967 int i;
968
969 if (!count || shader >= SI_NUM_SHADERS || !sstates)
970 return;
971
972 for (i = 0; i < count; i++) {
973 unsigned slot = start + i;
974 unsigned desc_slot = si_get_sampler_slot(slot);
975
976 if (!sstates[i] ||
977 sstates[i] == samplers->sampler_states[slot])
978 continue;
979
980 #ifndef NDEBUG
981 assert(sstates[i]->magic == SI_SAMPLER_STATE_MAGIC);
982 #endif
983 samplers->sampler_states[slot] = sstates[i];
984
985 /* If FMASK is bound, don't overwrite it.
986 * The sampler state will be set after FMASK is unbound.
987 */
988 struct si_sampler_view *sview =
989 (struct si_sampler_view *)samplers->views[slot];
990
991 struct si_texture *tex = NULL;
992
993 if (sview && sview->base.texture &&
994 sview->base.texture->target != PIPE_BUFFER)
995 tex = (struct si_texture *)sview->base.texture;
996
997 if (tex && tex->surface.fmask_size)
998 continue;
999
1000 si_set_sampler_state_desc(sstates[i], sview, tex,
1001 desc->list + desc_slot * 16 + 12);
1002
1003 sctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
1004 }
1005 }
1006
1007 /* BUFFER RESOURCES */
1008
1009 static void si_init_buffer_resources(struct si_buffer_resources *buffers,
1010 struct si_descriptors *descs,
1011 unsigned num_buffers,
1012 short shader_userdata_rel_index,
1013 enum radeon_bo_priority priority,
1014 enum radeon_bo_priority priority_constbuf)
1015 {
1016 buffers->priority = priority;
1017 buffers->priority_constbuf = priority_constbuf;
1018 buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
1019 buffers->offsets = CALLOC(num_buffers, sizeof(buffers->offsets[0]));
1020
1021 si_init_descriptors(descs, shader_userdata_rel_index, 4, num_buffers);
1022 }
1023
1024 static void si_release_buffer_resources(struct si_buffer_resources *buffers,
1025 struct si_descriptors *descs)
1026 {
1027 int i;
1028
1029 for (i = 0; i < descs->num_elements; i++) {
1030 pipe_resource_reference(&buffers->buffers[i], NULL);
1031 }
1032
1033 FREE(buffers->buffers);
1034 FREE(buffers->offsets);
1035 }
1036
1037 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
1038 struct si_buffer_resources *buffers)
1039 {
1040 unsigned mask = buffers->enabled_mask;
1041
1042 /* Add buffers to the CS. */
1043 while (mask) {
1044 int i = u_bit_scan(&mask);
1045
1046 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
1047 si_resource(buffers->buffers[i]),
1048 buffers->writable_mask & (1u << i) ? RADEON_USAGE_READWRITE :
1049 RADEON_USAGE_READ,
1050 i < SI_NUM_SHADER_BUFFERS ? buffers->priority :
1051 buffers->priority_constbuf);
1052 }
1053 }
1054
1055 static void si_get_buffer_from_descriptors(struct si_buffer_resources *buffers,
1056 struct si_descriptors *descs,
1057 unsigned idx, struct pipe_resource **buf,
1058 unsigned *offset, unsigned *size)
1059 {
1060 pipe_resource_reference(buf, buffers->buffers[idx]);
1061 if (*buf) {
1062 struct si_resource *res = si_resource(*buf);
1063 const uint32_t *desc = descs->list + idx * 4;
1064 uint64_t va;
1065
1066 *size = desc[2];
1067
1068 assert(G_008F04_STRIDE(desc[1]) == 0);
1069 va = si_desc_extract_buffer_address(desc);
1070
1071 assert(va >= res->gpu_address && va + *size <= res->gpu_address + res->bo_size);
1072 *offset = va - res->gpu_address;
1073 }
1074 }
1075
1076 /* VERTEX BUFFERS */
1077
1078 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
1079 {
1080 int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
1081 int i;
1082
1083 for (i = 0; i < count; i++) {
1084 int vb = sctx->vertex_elements->vertex_buffer_index[i];
1085
1086 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
1087 continue;
1088 if (!sctx->vertex_buffer[vb].buffer.resource)
1089 continue;
1090
1091 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
1092 si_resource(sctx->vertex_buffer[vb].buffer.resource),
1093 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
1094 }
1095
1096 if (!sctx->vb_descriptors_buffer)
1097 return;
1098 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
1099 sctx->vb_descriptors_buffer, RADEON_USAGE_READ,
1100 RADEON_PRIO_DESCRIPTORS);
1101 }
1102
1103 bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
1104 {
1105 struct si_vertex_elements *velems = sctx->vertex_elements;
1106 unsigned i, count;
1107 unsigned desc_list_byte_size;
1108 unsigned first_vb_use_mask;
1109 uint32_t *ptr;
1110
1111 if (!sctx->vertex_buffers_dirty || !velems)
1112 return true;
1113
1114 count = velems->count;
1115
1116 if (!count)
1117 return true;
1118
1119 desc_list_byte_size = velems->desc_list_byte_size;
1120 first_vb_use_mask = velems->first_vb_use_mask;
1121
1122 /* Vertex buffer descriptors are the only ones which are uploaded
1123 * directly through a staging buffer and don't go through
1124 * the fine-grained upload path.
1125 */
1126 u_upload_alloc(sctx->b.const_uploader, 0,
1127 desc_list_byte_size,
1128 si_optimal_tcc_alignment(sctx, desc_list_byte_size),
1129 &sctx->vb_descriptors_offset,
1130 (struct pipe_resource**)&sctx->vb_descriptors_buffer,
1131 (void**)&ptr);
1132 if (!sctx->vb_descriptors_buffer) {
1133 sctx->vb_descriptors_offset = 0;
1134 sctx->vb_descriptors_gpu_list = NULL;
1135 return false;
1136 }
1137
1138 sctx->vb_descriptors_gpu_list = ptr;
1139 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
1140 sctx->vb_descriptors_buffer, RADEON_USAGE_READ,
1141 RADEON_PRIO_DESCRIPTORS);
1142
1143 assert(count <= SI_MAX_ATTRIBS);
1144
1145 for (i = 0; i < count; i++) {
1146 struct pipe_vertex_buffer *vb;
1147 struct si_resource *buf;
1148 unsigned vbo_index = velems->vertex_buffer_index[i];
1149 uint32_t *desc = &ptr[i*4];
1150
1151 vb = &sctx->vertex_buffer[vbo_index];
1152 buf = si_resource(vb->buffer.resource);
1153 if (!buf) {
1154 memset(desc, 0, 16);
1155 continue;
1156 }
1157
1158 int64_t offset = (int64_t)((int)vb->buffer_offset) +
1159 velems->src_offset[i];
1160 uint64_t va = buf->gpu_address + offset;
1161
1162 int64_t num_records = (int64_t)buf->b.b.width0 - offset;
1163 if (sctx->chip_class != GFX8 && vb->stride) {
1164 /* Round up by rounding down and adding 1 */
1165 num_records = (num_records - velems->format_size[i]) /
1166 vb->stride + 1;
1167 }
1168 assert(num_records >= 0 && num_records <= UINT_MAX);
1169
1170 desc[0] = va;
1171 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1172 S_008F04_STRIDE(vb->stride);
1173 desc[2] = num_records;
1174 desc[3] = velems->rsrc_word3[i];
1175
1176 if (first_vb_use_mask & (1 << i)) {
1177 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
1178 si_resource(vb->buffer.resource),
1179 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
1180 }
1181 }
1182
1183 /* Don't flush the const cache. It would have a very negative effect
1184 * on performance (confirmed by testing). New descriptors are always
1185 * uploaded to a fresh new buffer, so I don't think flushing the const
1186 * cache is needed. */
1187 si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
1188 sctx->vertex_buffers_dirty = false;
1189 sctx->vertex_buffer_pointer_dirty = true;
1190 sctx->prefetch_L2_mask |= SI_PREFETCH_VBO_DESCRIPTORS;
1191 return true;
1192 }
1193
1194
1195 /* CONSTANT BUFFERS */
1196
1197 static struct si_descriptors *
1198 si_const_and_shader_buffer_descriptors(struct si_context *sctx, unsigned shader)
1199 {
1200 return &sctx->descriptors[si_const_and_shader_buffer_descriptors_idx(shader)];
1201 }
1202
1203 void si_upload_const_buffer(struct si_context *sctx, struct si_resource **buf,
1204 const uint8_t *ptr, unsigned size, uint32_t *const_offset)
1205 {
1206 void *tmp;
1207
1208 u_upload_alloc(sctx->b.const_uploader, 0, size,
1209 si_optimal_tcc_alignment(sctx, size),
1210 const_offset,
1211 (struct pipe_resource**)buf, &tmp);
1212 if (*buf)
1213 util_memcpy_cpu_to_le32(tmp, ptr, size);
1214 }
1215
1216 static void si_set_constant_buffer(struct si_context *sctx,
1217 struct si_buffer_resources *buffers,
1218 unsigned descriptors_idx,
1219 uint slot, const struct pipe_constant_buffer *input)
1220 {
1221 struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
1222 assert(slot < descs->num_elements);
1223 pipe_resource_reference(&buffers->buffers[slot], NULL);
1224
1225 /* GFX7 cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
1226 * with a NULL buffer). We need to use a dummy buffer instead. */
1227 if (sctx->chip_class == GFX7 &&
1228 (!input || (!input->buffer && !input->user_buffer)))
1229 input = &sctx->null_const_buf;
1230
1231 if (input && (input->buffer || input->user_buffer)) {
1232 struct pipe_resource *buffer = NULL;
1233 uint64_t va;
1234 unsigned buffer_offset;
1235
1236 /* Upload the user buffer if needed. */
1237 if (input->user_buffer) {
1238 si_upload_const_buffer(sctx,
1239 (struct si_resource**)&buffer, input->user_buffer,
1240 input->buffer_size, &buffer_offset);
1241 if (!buffer) {
1242 /* Just unbind on failure. */
1243 si_set_constant_buffer(sctx, buffers, descriptors_idx, slot, NULL);
1244 return;
1245 }
1246 } else {
1247 pipe_resource_reference(&buffer, input->buffer);
1248 buffer_offset = input->buffer_offset;
1249 }
1250
1251 va = si_resource(buffer)->gpu_address + buffer_offset;
1252
1253 /* Set the descriptor. */
1254 uint32_t *desc = descs->list + slot*4;
1255 desc[0] = va;
1256 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1257 S_008F04_STRIDE(0);
1258 desc[2] = input->buffer_size;
1259 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1260 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1261 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1262 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
1263
1264 if (sctx->chip_class >= GFX10) {
1265 desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
1266 S_008F0C_OOB_SELECT(3) |
1267 S_008F0C_RESOURCE_LEVEL(1);
1268 } else {
1269 desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1270 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1271 }
1272
1273 buffers->buffers[slot] = buffer;
1274 buffers->offsets[slot] = buffer_offset;
1275 radeon_add_to_gfx_buffer_list_check_mem(sctx,
1276 si_resource(buffer),
1277 RADEON_USAGE_READ,
1278 buffers->priority_constbuf, true);
1279 buffers->enabled_mask |= 1u << slot;
1280 } else {
1281 /* Clear the descriptor. */
1282 memset(descs->list + slot*4, 0, sizeof(uint32_t) * 4);
1283 buffers->enabled_mask &= ~(1u << slot);
1284 }
1285
1286 sctx->descriptors_dirty |= 1u << descriptors_idx;
1287 }
1288
1289 static void si_pipe_set_constant_buffer(struct pipe_context *ctx,
1290 enum pipe_shader_type shader, uint slot,
1291 const struct pipe_constant_buffer *input)
1292 {
1293 struct si_context *sctx = (struct si_context *)ctx;
1294
1295 if (shader >= SI_NUM_SHADERS)
1296 return;
1297
1298 if (slot == 0 && input && input->buffer &&
1299 !(si_resource(input->buffer)->flags & RADEON_FLAG_32BIT)) {
1300 assert(!"constant buffer 0 must have a 32-bit VM address, use const_uploader");
1301 return;
1302 }
1303
1304 if (input && input->buffer)
1305 si_resource(input->buffer)->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
1306
1307 slot = si_get_constbuf_slot(slot);
1308 si_set_constant_buffer(sctx, &sctx->const_and_shader_buffers[shader],
1309 si_const_and_shader_buffer_descriptors_idx(shader),
1310 slot, input);
1311 }
1312
1313 void si_get_pipe_constant_buffer(struct si_context *sctx, uint shader,
1314 uint slot, struct pipe_constant_buffer *cbuf)
1315 {
1316 cbuf->user_buffer = NULL;
1317 si_get_buffer_from_descriptors(
1318 &sctx->const_and_shader_buffers[shader],
1319 si_const_and_shader_buffer_descriptors(sctx, shader),
1320 si_get_constbuf_slot(slot),
1321 &cbuf->buffer, &cbuf->buffer_offset, &cbuf->buffer_size);
1322 }
1323
1324 /* SHADER BUFFERS */
1325
1326 static void si_set_shader_buffer(struct si_context *sctx,
1327 struct si_buffer_resources *buffers,
1328 unsigned descriptors_idx,
1329 uint slot, const struct pipe_shader_buffer *sbuffer,
1330 bool writable, enum radeon_bo_priority priority)
1331 {
1332 struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
1333 uint32_t *desc = descs->list + slot * 4;
1334
1335 if (!sbuffer || !sbuffer->buffer) {
1336 pipe_resource_reference(&buffers->buffers[slot], NULL);
1337 memset(desc, 0, sizeof(uint32_t) * 4);
1338 buffers->enabled_mask &= ~(1u << slot);
1339 buffers->writable_mask &= ~(1u << slot);
1340 sctx->descriptors_dirty |= 1u << descriptors_idx;
1341 return;
1342 }
1343
1344 struct si_resource *buf = si_resource(sbuffer->buffer);
1345 uint64_t va = buf->gpu_address + sbuffer->buffer_offset;
1346
1347 desc[0] = va;
1348 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1349 S_008F04_STRIDE(0);
1350 desc[2] = sbuffer->buffer_size;
1351 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1352 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1353 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1354 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
1355
1356 if (sctx->chip_class >= GFX10) {
1357 desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
1358 S_008F0C_OOB_SELECT(3) |
1359 S_008F0C_RESOURCE_LEVEL(1);
1360 } else {
1361 desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1362 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1363 }
1364
1365 pipe_resource_reference(&buffers->buffers[slot], &buf->b.b);
1366 buffers->offsets[slot] = sbuffer->buffer_offset;
1367 radeon_add_to_gfx_buffer_list_check_mem(sctx, buf,
1368 writable ? RADEON_USAGE_READWRITE :
1369 RADEON_USAGE_READ,
1370 priority, true);
1371 if (writable)
1372 buffers->writable_mask |= 1u << slot;
1373 else
1374 buffers->writable_mask &= ~(1u << slot);
1375
1376 buffers->enabled_mask |= 1u << slot;
1377 sctx->descriptors_dirty |= 1u << descriptors_idx;
1378
1379 util_range_add(&buf->valid_buffer_range, sbuffer->buffer_offset,
1380 sbuffer->buffer_offset + sbuffer->buffer_size);
1381 }
1382
1383 static void si_set_shader_buffers(struct pipe_context *ctx,
1384 enum pipe_shader_type shader,
1385 unsigned start_slot, unsigned count,
1386 const struct pipe_shader_buffer *sbuffers,
1387 unsigned writable_bitmask)
1388 {
1389 struct si_context *sctx = (struct si_context *)ctx;
1390 struct si_buffer_resources *buffers = &sctx->const_and_shader_buffers[shader];
1391 unsigned descriptors_idx = si_const_and_shader_buffer_descriptors_idx(shader);
1392 unsigned i;
1393
1394 assert(start_slot + count <= SI_NUM_SHADER_BUFFERS);
1395
1396 for (i = 0; i < count; ++i) {
1397 const struct pipe_shader_buffer *sbuffer = sbuffers ? &sbuffers[i] : NULL;
1398 unsigned slot = si_get_shaderbuf_slot(start_slot + i);
1399
1400 if (sbuffer && sbuffer->buffer)
1401 si_resource(sbuffer->buffer)->bind_history |= PIPE_BIND_SHADER_BUFFER;
1402
1403 si_set_shader_buffer(sctx, buffers, descriptors_idx, slot, sbuffer,
1404 !!(writable_bitmask & (1u << i)),
1405 buffers->priority);
1406 }
1407 }
1408
1409 void si_get_shader_buffers(struct si_context *sctx,
1410 enum pipe_shader_type shader,
1411 uint start_slot, uint count,
1412 struct pipe_shader_buffer *sbuf)
1413 {
1414 struct si_buffer_resources *buffers = &sctx->const_and_shader_buffers[shader];
1415 struct si_descriptors *descs = si_const_and_shader_buffer_descriptors(sctx, shader);
1416
1417 for (unsigned i = 0; i < count; ++i) {
1418 si_get_buffer_from_descriptors(
1419 buffers, descs,
1420 si_get_shaderbuf_slot(start_slot + i),
1421 &sbuf[i].buffer, &sbuf[i].buffer_offset,
1422 &sbuf[i].buffer_size);
1423 }
1424 }
1425
1426 /* RING BUFFERS */
1427
1428 void si_set_rw_buffer(struct si_context *sctx,
1429 uint slot, const struct pipe_constant_buffer *input)
1430 {
1431 si_set_constant_buffer(sctx, &sctx->rw_buffers, SI_DESCS_RW_BUFFERS,
1432 slot, input);
1433 }
1434
1435 void si_set_rw_shader_buffer(struct si_context *sctx, uint slot,
1436 const struct pipe_shader_buffer *sbuffer)
1437 {
1438 si_set_shader_buffer(sctx, &sctx->rw_buffers, SI_DESCS_RW_BUFFERS,
1439 slot, sbuffer, true, RADEON_PRIO_SHADER_RW_BUFFER);
1440 }
1441
1442 void si_set_ring_buffer(struct si_context *sctx, uint slot,
1443 struct pipe_resource *buffer,
1444 unsigned stride, unsigned num_records,
1445 bool add_tid, bool swizzle,
1446 unsigned element_size, unsigned index_stride, uint64_t offset)
1447 {
1448 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1449 struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1450
1451 /* The stride field in the resource descriptor has 14 bits */
1452 assert(stride < (1 << 14));
1453
1454 assert(slot < descs->num_elements);
1455 pipe_resource_reference(&buffers->buffers[slot], NULL);
1456
1457 if (buffer) {
1458 uint64_t va;
1459
1460 va = si_resource(buffer)->gpu_address + offset;
1461
1462 switch (element_size) {
1463 default:
1464 assert(!"Unsupported ring buffer element size");
1465 case 0:
1466 case 2:
1467 element_size = 0;
1468 break;
1469 case 4:
1470 element_size = 1;
1471 break;
1472 case 8:
1473 element_size = 2;
1474 break;
1475 case 16:
1476 element_size = 3;
1477 break;
1478 }
1479
1480 switch (index_stride) {
1481 default:
1482 assert(!"Unsupported ring buffer index stride");
1483 case 0:
1484 case 8:
1485 index_stride = 0;
1486 break;
1487 case 16:
1488 index_stride = 1;
1489 break;
1490 case 32:
1491 index_stride = 2;
1492 break;
1493 case 64:
1494 index_stride = 3;
1495 break;
1496 }
1497
1498 if (sctx->chip_class >= GFX8 && stride)
1499 num_records *= stride;
1500
1501 /* Set the descriptor. */
1502 uint32_t *desc = descs->list + slot*4;
1503 desc[0] = va;
1504 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1505 S_008F04_STRIDE(stride) |
1506 S_008F04_SWIZZLE_ENABLE(swizzle);
1507 desc[2] = num_records;
1508 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1509 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1510 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1511 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1512 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1513 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1514 S_008F0C_INDEX_STRIDE(index_stride) |
1515 S_008F0C_ADD_TID_ENABLE(add_tid);
1516
1517 if (sctx->chip_class >= GFX9)
1518 assert(!swizzle || element_size == 1); /* always 4 bytes on GFX9 */
1519 else
1520 desc[3] |= S_008F0C_ELEMENT_SIZE(element_size);
1521
1522 pipe_resource_reference(&buffers->buffers[slot], buffer);
1523 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
1524 si_resource(buffer),
1525 RADEON_USAGE_READWRITE, buffers->priority);
1526 buffers->enabled_mask |= 1u << slot;
1527 } else {
1528 /* Clear the descriptor. */
1529 memset(descs->list + slot*4, 0, sizeof(uint32_t) * 4);
1530 buffers->enabled_mask &= ~(1u << slot);
1531 }
1532
1533 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
1534 }
1535
1536 /* INTERNAL CONST BUFFERS */
1537
1538 static void si_set_polygon_stipple(struct pipe_context *ctx,
1539 const struct pipe_poly_stipple *state)
1540 {
1541 struct si_context *sctx = (struct si_context *)ctx;
1542 struct pipe_constant_buffer cb = {};
1543 unsigned stipple[32];
1544 int i;
1545
1546 for (i = 0; i < 32; i++)
1547 stipple[i] = util_bitreverse(state->stipple[i]);
1548
1549 cb.user_buffer = stipple;
1550 cb.buffer_size = sizeof(stipple);
1551
1552 si_set_rw_buffer(sctx, SI_PS_CONST_POLY_STIPPLE, &cb);
1553 }
1554
1555 /* TEXTURE METADATA ENABLE/DISABLE */
1556
1557 static void
1558 si_resident_handles_update_needs_color_decompress(struct si_context *sctx)
1559 {
1560 util_dynarray_clear(&sctx->resident_tex_needs_color_decompress);
1561 util_dynarray_clear(&sctx->resident_img_needs_color_decompress);
1562
1563 util_dynarray_foreach(&sctx->resident_tex_handles,
1564 struct si_texture_handle *, tex_handle) {
1565 struct pipe_resource *res = (*tex_handle)->view->texture;
1566 struct si_texture *tex;
1567
1568 if (!res || res->target == PIPE_BUFFER)
1569 continue;
1570
1571 tex = (struct si_texture *)res;
1572 if (!color_needs_decompression(tex))
1573 continue;
1574
1575 util_dynarray_append(&sctx->resident_tex_needs_color_decompress,
1576 struct si_texture_handle *, *tex_handle);
1577 }
1578
1579 util_dynarray_foreach(&sctx->resident_img_handles,
1580 struct si_image_handle *, img_handle) {
1581 struct pipe_image_view *view = &(*img_handle)->view;
1582 struct pipe_resource *res = view->resource;
1583 struct si_texture *tex;
1584
1585 if (!res || res->target == PIPE_BUFFER)
1586 continue;
1587
1588 tex = (struct si_texture *)res;
1589 if (!color_needs_decompression(tex))
1590 continue;
1591
1592 util_dynarray_append(&sctx->resident_img_needs_color_decompress,
1593 struct si_image_handle *, *img_handle);
1594 }
1595 }
1596
1597 /* CMASK can be enabled (for fast clear) and disabled (for texture export)
1598 * while the texture is bound, possibly by a different context. In that case,
1599 * call this function to update needs_*_decompress_masks.
1600 */
1601 void si_update_needs_color_decompress_masks(struct si_context *sctx)
1602 {
1603 for (int i = 0; i < SI_NUM_SHADERS; ++i) {
1604 si_samplers_update_needs_color_decompress_mask(&sctx->samplers[i]);
1605 si_images_update_needs_color_decompress_mask(&sctx->images[i]);
1606 si_update_shader_needs_decompress_mask(sctx, i);
1607 }
1608
1609 si_resident_handles_update_needs_color_decompress(sctx);
1610 }
1611
1612 /* BUFFER DISCARD/INVALIDATION */
1613
1614 /* Reset descriptors of buffer resources after \p buf has been invalidated.
1615 * If buf == NULL, reset all descriptors.
1616 */
1617 static void si_reset_buffer_resources(struct si_context *sctx,
1618 struct si_buffer_resources *buffers,
1619 unsigned descriptors_idx,
1620 unsigned slot_mask,
1621 struct pipe_resource *buf,
1622 enum radeon_bo_priority priority)
1623 {
1624 struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
1625 unsigned mask = buffers->enabled_mask & slot_mask;
1626
1627 while (mask) {
1628 unsigned i = u_bit_scan(&mask);
1629 struct pipe_resource *buffer = buffers->buffers[i];
1630
1631 if (buffer && (!buf || buffer == buf)) {
1632 si_set_buf_desc_address(si_resource(buffer), buffers->offsets[i],
1633 descs->list + i*4);
1634 sctx->descriptors_dirty |= 1u << descriptors_idx;
1635
1636 radeon_add_to_gfx_buffer_list_check_mem(sctx,
1637 si_resource(buffer),
1638 buffers->writable_mask & (1u << i) ?
1639 RADEON_USAGE_READWRITE :
1640 RADEON_USAGE_READ,
1641 priority, true);
1642 }
1643 }
1644 }
1645
1646 /* Update all buffer bindings where the buffer is bound, including
1647 * all resource descriptors. This is invalidate_buffer without
1648 * the invalidation.
1649 *
1650 * If buf == NULL, update all buffer bindings.
1651 */
1652 void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf)
1653 {
1654 struct si_resource *buffer = si_resource(buf);
1655 unsigned i, shader;
1656 unsigned num_elems = sctx->vertex_elements ?
1657 sctx->vertex_elements->count : 0;
1658
1659 /* We changed the buffer, now we need to bind it where the old one
1660 * was bound. This consists of 2 things:
1661 * 1) Updating the resource descriptor and dirtying it.
1662 * 2) Adding a relocation to the CS, so that it's usable.
1663 */
1664
1665 /* Vertex buffers. */
1666 if (!buffer) {
1667 if (num_elems)
1668 sctx->vertex_buffers_dirty = true;
1669 } else if (buffer->bind_history & PIPE_BIND_VERTEX_BUFFER) {
1670 for (i = 0; i < num_elems; i++) {
1671 int vb = sctx->vertex_elements->vertex_buffer_index[i];
1672
1673 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
1674 continue;
1675 if (!sctx->vertex_buffer[vb].buffer.resource)
1676 continue;
1677
1678 if (sctx->vertex_buffer[vb].buffer.resource == buf) {
1679 sctx->vertex_buffers_dirty = true;
1680 break;
1681 }
1682 }
1683 }
1684
1685 /* Streamout buffers. (other internal buffers can't be invalidated) */
1686 if (!buffer || buffer->bind_history & PIPE_BIND_STREAM_OUTPUT) {
1687 for (i = SI_VS_STREAMOUT_BUF0; i <= SI_VS_STREAMOUT_BUF3; i++) {
1688 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1689 struct si_descriptors *descs =
1690 &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1691 struct pipe_resource *buffer = buffers->buffers[i];
1692
1693 if (!buffer || (buf && buffer != buf))
1694 continue;
1695
1696 si_set_buf_desc_address(si_resource(buffer), buffers->offsets[i],
1697 descs->list + i*4);
1698 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
1699
1700 radeon_add_to_gfx_buffer_list_check_mem(sctx,
1701 si_resource(buffer),
1702 RADEON_USAGE_WRITE,
1703 RADEON_PRIO_SHADER_RW_BUFFER,
1704 true);
1705
1706 /* Update the streamout state. */
1707 if (sctx->streamout.begin_emitted)
1708 si_emit_streamout_end(sctx);
1709 sctx->streamout.append_bitmask =
1710 sctx->streamout.enabled_mask;
1711 si_streamout_buffers_dirty(sctx);
1712 }
1713 }
1714
1715 /* Constant and shader buffers. */
1716 if (!buffer || buffer->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
1717 for (shader = 0; shader < SI_NUM_SHADERS; shader++)
1718 si_reset_buffer_resources(sctx, &sctx->const_and_shader_buffers[shader],
1719 si_const_and_shader_buffer_descriptors_idx(shader),
1720 u_bit_consecutive(SI_NUM_SHADER_BUFFERS, SI_NUM_CONST_BUFFERS),
1721 buf,
1722 sctx->const_and_shader_buffers[shader].priority_constbuf);
1723 }
1724
1725 if (!buffer || buffer->bind_history & PIPE_BIND_SHADER_BUFFER) {
1726 for (shader = 0; shader < SI_NUM_SHADERS; shader++)
1727 si_reset_buffer_resources(sctx, &sctx->const_and_shader_buffers[shader],
1728 si_const_and_shader_buffer_descriptors_idx(shader),
1729 u_bit_consecutive(0, SI_NUM_SHADER_BUFFERS),
1730 buf,
1731 sctx->const_and_shader_buffers[shader].priority);
1732 }
1733
1734 if (!buffer || buffer->bind_history & PIPE_BIND_SAMPLER_VIEW) {
1735 /* Texture buffers - update bindings. */
1736 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1737 struct si_samplers *samplers = &sctx->samplers[shader];
1738 struct si_descriptors *descs =
1739 si_sampler_and_image_descriptors(sctx, shader);
1740 unsigned mask = samplers->enabled_mask;
1741
1742 while (mask) {
1743 unsigned i = u_bit_scan(&mask);
1744 struct pipe_resource *buffer = samplers->views[i]->texture;
1745
1746 if (buffer && buffer->target == PIPE_BUFFER &&
1747 (!buf || buffer == buf)) {
1748 unsigned desc_slot = si_get_sampler_slot(i);
1749
1750 si_set_buf_desc_address(si_resource(buffer),
1751 samplers->views[i]->u.buf.offset,
1752 descs->list + desc_slot * 16 + 4);
1753 sctx->descriptors_dirty |=
1754 1u << si_sampler_and_image_descriptors_idx(shader);
1755
1756 radeon_add_to_gfx_buffer_list_check_mem(
1757 sctx, si_resource(buffer),
1758 RADEON_USAGE_READ,
1759 RADEON_PRIO_SAMPLER_BUFFER, true);
1760 }
1761 }
1762 }
1763 }
1764
1765 /* Shader images */
1766 if (!buffer || buffer->bind_history & PIPE_BIND_SHADER_IMAGE) {
1767 for (shader = 0; shader < SI_NUM_SHADERS; ++shader) {
1768 struct si_images *images = &sctx->images[shader];
1769 struct si_descriptors *descs =
1770 si_sampler_and_image_descriptors(sctx, shader);
1771 unsigned mask = images->enabled_mask;
1772
1773 while (mask) {
1774 unsigned i = u_bit_scan(&mask);
1775 struct pipe_resource *buffer = images->views[i].resource;
1776
1777 if (buffer && buffer->target == PIPE_BUFFER &&
1778 (!buf || buffer == buf)) {
1779 unsigned desc_slot = si_get_image_slot(i);
1780
1781 if (images->views[i].access & PIPE_IMAGE_ACCESS_WRITE)
1782 si_mark_image_range_valid(&images->views[i]);
1783
1784 si_set_buf_desc_address(si_resource(buffer),
1785 images->views[i].u.buf.offset,
1786 descs->list + desc_slot * 8 + 4);
1787 sctx->descriptors_dirty |=
1788 1u << si_sampler_and_image_descriptors_idx(shader);
1789
1790 radeon_add_to_gfx_buffer_list_check_mem(
1791 sctx, si_resource(buffer),
1792 RADEON_USAGE_READWRITE,
1793 RADEON_PRIO_SAMPLER_BUFFER, true);
1794 }
1795 }
1796 }
1797 }
1798
1799 /* Bindless texture handles */
1800 if (!buffer || buffer->texture_handle_allocated) {
1801 struct si_descriptors *descs = &sctx->bindless_descriptors;
1802
1803 util_dynarray_foreach(&sctx->resident_tex_handles,
1804 struct si_texture_handle *, tex_handle) {
1805 struct pipe_sampler_view *view = (*tex_handle)->view;
1806 unsigned desc_slot = (*tex_handle)->desc_slot;
1807 struct pipe_resource *buffer = view->texture;
1808
1809 if (buffer && buffer->target == PIPE_BUFFER &&
1810 (!buf || buffer == buf)) {
1811 si_set_buf_desc_address(si_resource(buffer),
1812 view->u.buf.offset,
1813 descs->list +
1814 desc_slot * 16 + 4);
1815
1816 (*tex_handle)->desc_dirty = true;
1817 sctx->bindless_descriptors_dirty = true;
1818
1819 radeon_add_to_gfx_buffer_list_check_mem(
1820 sctx, si_resource(buffer),
1821 RADEON_USAGE_READ,
1822 RADEON_PRIO_SAMPLER_BUFFER, true);
1823 }
1824 }
1825 }
1826
1827 /* Bindless image handles */
1828 if (!buffer || buffer->image_handle_allocated) {
1829 struct si_descriptors *descs = &sctx->bindless_descriptors;
1830
1831 util_dynarray_foreach(&sctx->resident_img_handles,
1832 struct si_image_handle *, img_handle) {
1833 struct pipe_image_view *view = &(*img_handle)->view;
1834 unsigned desc_slot = (*img_handle)->desc_slot;
1835 struct pipe_resource *buffer = view->resource;
1836
1837 if (buffer && buffer->target == PIPE_BUFFER &&
1838 (!buf || buffer == buf)) {
1839 if (view->access & PIPE_IMAGE_ACCESS_WRITE)
1840 si_mark_image_range_valid(view);
1841
1842 si_set_buf_desc_address(si_resource(buffer),
1843 view->u.buf.offset,
1844 descs->list +
1845 desc_slot * 16 + 4);
1846
1847 (*img_handle)->desc_dirty = true;
1848 sctx->bindless_descriptors_dirty = true;
1849
1850 radeon_add_to_gfx_buffer_list_check_mem(
1851 sctx, si_resource(buffer),
1852 RADEON_USAGE_READWRITE,
1853 RADEON_PRIO_SAMPLER_BUFFER, true);
1854 }
1855 }
1856 }
1857
1858 if (buffer) {
1859 /* Do the same for other contexts. They will invoke this function
1860 * with buffer == NULL.
1861 */
1862 unsigned new_counter = p_atomic_inc_return(&sctx->screen->dirty_buf_counter);
1863
1864 /* Skip the update for the current context, because we have already updated
1865 * the buffer bindings.
1866 */
1867 if (new_counter == sctx->last_dirty_buf_counter + 1)
1868 sctx->last_dirty_buf_counter = new_counter;
1869 }
1870 }
1871
1872 static void si_upload_bindless_descriptor(struct si_context *sctx,
1873 unsigned desc_slot,
1874 unsigned num_dwords)
1875 {
1876 struct si_descriptors *desc = &sctx->bindless_descriptors;
1877 unsigned desc_slot_offset = desc_slot * 16;
1878 uint32_t *data;
1879 uint64_t va;
1880
1881 data = desc->list + desc_slot_offset;
1882 va = desc->gpu_address + desc_slot_offset * 4;
1883
1884 si_cp_write_data(sctx, desc->buffer, va - desc->buffer->gpu_address,
1885 num_dwords * 4, V_370_TC_L2, V_370_ME, data);
1886 }
1887
1888 static void si_upload_bindless_descriptors(struct si_context *sctx)
1889 {
1890 if (!sctx->bindless_descriptors_dirty)
1891 return;
1892
1893 /* Wait for graphics/compute to be idle before updating the resident
1894 * descriptors directly in memory, in case the GPU is using them.
1895 */
1896 sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
1897 SI_CONTEXT_CS_PARTIAL_FLUSH;
1898 si_emit_cache_flush(sctx);
1899
1900 util_dynarray_foreach(&sctx->resident_tex_handles,
1901 struct si_texture_handle *, tex_handle) {
1902 unsigned desc_slot = (*tex_handle)->desc_slot;
1903
1904 if (!(*tex_handle)->desc_dirty)
1905 continue;
1906
1907 si_upload_bindless_descriptor(sctx, desc_slot, 16);
1908 (*tex_handle)->desc_dirty = false;
1909 }
1910
1911 util_dynarray_foreach(&sctx->resident_img_handles,
1912 struct si_image_handle *, img_handle) {
1913 unsigned desc_slot = (*img_handle)->desc_slot;
1914
1915 if (!(*img_handle)->desc_dirty)
1916 continue;
1917
1918 si_upload_bindless_descriptor(sctx, desc_slot, 8);
1919 (*img_handle)->desc_dirty = false;
1920 }
1921
1922 /* Invalidate L1 because it doesn't know that L2 changed. */
1923 sctx->flags |= SI_CONTEXT_INV_SCACHE;
1924 si_emit_cache_flush(sctx);
1925
1926 sctx->bindless_descriptors_dirty = false;
1927 }
1928
1929 /* Update mutable image descriptor fields of all resident textures. */
1930 static void si_update_bindless_texture_descriptor(struct si_context *sctx,
1931 struct si_texture_handle *tex_handle)
1932 {
1933 struct si_sampler_view *sview = (struct si_sampler_view *)tex_handle->view;
1934 struct si_descriptors *desc = &sctx->bindless_descriptors;
1935 unsigned desc_slot_offset = tex_handle->desc_slot * 16;
1936 uint32_t desc_list[16];
1937
1938 if (sview->base.texture->target == PIPE_BUFFER)
1939 return;
1940
1941 memcpy(desc_list, desc->list + desc_slot_offset, sizeof(desc_list));
1942 si_set_sampler_view_desc(sctx, sview, &tex_handle->sstate,
1943 desc->list + desc_slot_offset);
1944
1945 if (memcmp(desc_list, desc->list + desc_slot_offset,
1946 sizeof(desc_list))) {
1947 tex_handle->desc_dirty = true;
1948 sctx->bindless_descriptors_dirty = true;
1949 }
1950 }
1951
1952 static void si_update_bindless_image_descriptor(struct si_context *sctx,
1953 struct si_image_handle *img_handle)
1954 {
1955 struct si_descriptors *desc = &sctx->bindless_descriptors;
1956 unsigned desc_slot_offset = img_handle->desc_slot * 16;
1957 struct pipe_image_view *view = &img_handle->view;
1958 uint32_t desc_list[8];
1959
1960 if (view->resource->target == PIPE_BUFFER)
1961 return;
1962
1963 memcpy(desc_list, desc->list + desc_slot_offset,
1964 sizeof(desc_list));
1965 si_set_shader_image_desc(sctx, view, true,
1966 desc->list + desc_slot_offset, NULL);
1967
1968 if (memcmp(desc_list, desc->list + desc_slot_offset,
1969 sizeof(desc_list))) {
1970 img_handle->desc_dirty = true;
1971 sctx->bindless_descriptors_dirty = true;
1972 }
1973 }
1974
1975 static void si_update_all_resident_texture_descriptors(struct si_context *sctx)
1976 {
1977 util_dynarray_foreach(&sctx->resident_tex_handles,
1978 struct si_texture_handle *, tex_handle) {
1979 si_update_bindless_texture_descriptor(sctx, *tex_handle);
1980 }
1981
1982 util_dynarray_foreach(&sctx->resident_img_handles,
1983 struct si_image_handle *, img_handle) {
1984 si_update_bindless_image_descriptor(sctx, *img_handle);
1985 }
1986
1987 si_upload_bindless_descriptors(sctx);
1988 }
1989
1990 /* Update mutable image descriptor fields of all bound textures. */
1991 void si_update_all_texture_descriptors(struct si_context *sctx)
1992 {
1993 unsigned shader;
1994
1995 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1996 struct si_samplers *samplers = &sctx->samplers[shader];
1997 struct si_images *images = &sctx->images[shader];
1998 unsigned mask;
1999
2000 /* Images. */
2001 mask = images->enabled_mask;
2002 while (mask) {
2003 unsigned i = u_bit_scan(&mask);
2004 struct pipe_image_view *view = &images->views[i];
2005
2006 if (!view->resource ||
2007 view->resource->target == PIPE_BUFFER)
2008 continue;
2009
2010 si_set_shader_image(sctx, shader, i, view, true);
2011 }
2012
2013 /* Sampler views. */
2014 mask = samplers->enabled_mask;
2015 while (mask) {
2016 unsigned i = u_bit_scan(&mask);
2017 struct pipe_sampler_view *view = samplers->views[i];
2018
2019 if (!view ||
2020 !view->texture ||
2021 view->texture->target == PIPE_BUFFER)
2022 continue;
2023
2024 si_set_sampler_view(sctx, shader, i,
2025 samplers->views[i], true);
2026 }
2027
2028 si_update_shader_needs_decompress_mask(sctx, shader);
2029 }
2030
2031 si_update_all_resident_texture_descriptors(sctx);
2032 si_update_ps_colorbuf0_slot(sctx);
2033 }
2034
2035 /* SHADER USER DATA */
2036
2037 static void si_mark_shader_pointers_dirty(struct si_context *sctx,
2038 unsigned shader)
2039 {
2040 sctx->shader_pointers_dirty |=
2041 u_bit_consecutive(SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS,
2042 SI_NUM_SHADER_DESCS);
2043
2044 if (shader == PIPE_SHADER_VERTEX)
2045 sctx->vertex_buffer_pointer_dirty = sctx->vb_descriptors_buffer != NULL;
2046
2047 si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
2048 }
2049
2050 static void si_shader_pointers_begin_new_cs(struct si_context *sctx)
2051 {
2052 sctx->shader_pointers_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
2053 sctx->vertex_buffer_pointer_dirty = sctx->vb_descriptors_buffer != NULL;
2054 si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
2055 sctx->graphics_bindless_pointer_dirty = sctx->bindless_descriptors.buffer != NULL;
2056 sctx->compute_bindless_pointer_dirty = sctx->bindless_descriptors.buffer != NULL;
2057 }
2058
2059 /* Set a base register address for user data constants in the given shader.
2060 * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
2061 */
2062 static void si_set_user_data_base(struct si_context *sctx,
2063 unsigned shader, uint32_t new_base)
2064 {
2065 uint32_t *base = &sctx->shader_pointers.sh_base[shader];
2066
2067 if (*base != new_base) {
2068 *base = new_base;
2069
2070 if (new_base)
2071 si_mark_shader_pointers_dirty(sctx, shader);
2072
2073 /* Any change in enabled shader stages requires re-emitting
2074 * the VS state SGPR, because it contains the clamp_vertex_color
2075 * state, which can be done in VS, TES, and GS.
2076 */
2077 sctx->last_vs_state = ~0;
2078 }
2079 }
2080
2081 /* This must be called when these shaders are changed from non-NULL to NULL
2082 * and vice versa:
2083 * - geometry shader
2084 * - tessellation control shader
2085 * - tessellation evaluation shader
2086 */
2087 void si_shader_change_notify(struct si_context *sctx)
2088 {
2089 /* VS can be bound as VS, ES, or LS. */
2090 if (sctx->tes_shader.cso) {
2091 if (sctx->chip_class >= GFX9) {
2092 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2093 R_00B430_SPI_SHADER_USER_DATA_LS_0);
2094 } else {
2095 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2096 R_00B530_SPI_SHADER_USER_DATA_LS_0);
2097 }
2098 } else if (sctx->gs_shader.cso) {
2099 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2100 R_00B330_SPI_SHADER_USER_DATA_ES_0);
2101 } else {
2102 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2103 R_00B130_SPI_SHADER_USER_DATA_VS_0);
2104 }
2105
2106 /* TES can be bound as ES, VS, or not bound. */
2107 if (sctx->tes_shader.cso) {
2108 if (sctx->gs_shader.cso)
2109 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
2110 R_00B330_SPI_SHADER_USER_DATA_ES_0);
2111 else
2112 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
2113 R_00B130_SPI_SHADER_USER_DATA_VS_0);
2114 } else {
2115 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL, 0);
2116 }
2117 }
2118
2119 static void si_emit_shader_pointer_head(struct radeon_cmdbuf *cs,
2120 unsigned sh_offset,
2121 unsigned pointer_count)
2122 {
2123 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, pointer_count, 0));
2124 radeon_emit(cs, (sh_offset - SI_SH_REG_OFFSET) >> 2);
2125 }
2126
2127 static void si_emit_shader_pointer_body(struct si_screen *sscreen,
2128 struct radeon_cmdbuf *cs,
2129 uint64_t va)
2130 {
2131 radeon_emit(cs, va);
2132
2133 assert(va == 0 || (va >> 32) == sscreen->info.address32_hi);
2134 }
2135
2136 static void si_emit_shader_pointer(struct si_context *sctx,
2137 struct si_descriptors *desc,
2138 unsigned sh_base)
2139 {
2140 struct radeon_cmdbuf *cs = sctx->gfx_cs;
2141 unsigned sh_offset = sh_base + desc->shader_userdata_offset;
2142
2143 si_emit_shader_pointer_head(cs, sh_offset, 1);
2144 si_emit_shader_pointer_body(sctx->screen, cs, desc->gpu_address);
2145 }
2146
2147 static void si_emit_consecutive_shader_pointers(struct si_context *sctx,
2148 unsigned pointer_mask,
2149 unsigned sh_base)
2150 {
2151 if (!sh_base)
2152 return;
2153
2154 struct radeon_cmdbuf *cs = sctx->gfx_cs;
2155 unsigned mask = sctx->shader_pointers_dirty & pointer_mask;
2156
2157 while (mask) {
2158 int start, count;
2159 u_bit_scan_consecutive_range(&mask, &start, &count);
2160
2161 struct si_descriptors *descs = &sctx->descriptors[start];
2162 unsigned sh_offset = sh_base + descs->shader_userdata_offset;
2163
2164 si_emit_shader_pointer_head(cs, sh_offset, count);
2165 for (int i = 0; i < count; i++)
2166 si_emit_shader_pointer_body(sctx->screen, cs,
2167 descs[i].gpu_address);
2168 }
2169 }
2170
2171 static void si_emit_global_shader_pointers(struct si_context *sctx,
2172 struct si_descriptors *descs)
2173 {
2174 if (sctx->chip_class == GFX9) {
2175 /* Broadcast it to all shader stages. */
2176 si_emit_shader_pointer(sctx, descs,
2177 R_00B530_SPI_SHADER_USER_DATA_COMMON_0);
2178 return;
2179 }
2180
2181 si_emit_shader_pointer(sctx, descs,
2182 R_00B030_SPI_SHADER_USER_DATA_PS_0);
2183 si_emit_shader_pointer(sctx, descs,
2184 R_00B130_SPI_SHADER_USER_DATA_VS_0);
2185 si_emit_shader_pointer(sctx, descs,
2186 R_00B330_SPI_SHADER_USER_DATA_ES_0);
2187 si_emit_shader_pointer(sctx, descs,
2188 R_00B230_SPI_SHADER_USER_DATA_GS_0);
2189 si_emit_shader_pointer(sctx, descs,
2190 R_00B430_SPI_SHADER_USER_DATA_HS_0);
2191 si_emit_shader_pointer(sctx, descs,
2192 R_00B530_SPI_SHADER_USER_DATA_LS_0);
2193 }
2194
2195 void si_emit_graphics_shader_pointers(struct si_context *sctx)
2196 {
2197 uint32_t *sh_base = sctx->shader_pointers.sh_base;
2198
2199 if (sctx->shader_pointers_dirty & (1 << SI_DESCS_RW_BUFFERS)) {
2200 si_emit_global_shader_pointers(sctx,
2201 &sctx->descriptors[SI_DESCS_RW_BUFFERS]);
2202 }
2203
2204 si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(VERTEX),
2205 sh_base[PIPE_SHADER_VERTEX]);
2206 si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(TESS_EVAL),
2207 sh_base[PIPE_SHADER_TESS_EVAL]);
2208 si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(FRAGMENT),
2209 sh_base[PIPE_SHADER_FRAGMENT]);
2210 si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(TESS_CTRL),
2211 sh_base[PIPE_SHADER_TESS_CTRL]);
2212 si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(GEOMETRY),
2213 sh_base[PIPE_SHADER_GEOMETRY]);
2214
2215 sctx->shader_pointers_dirty &=
2216 ~u_bit_consecutive(SI_DESCS_RW_BUFFERS, SI_DESCS_FIRST_COMPUTE);
2217
2218 if (sctx->vertex_buffer_pointer_dirty) {
2219 struct radeon_cmdbuf *cs = sctx->gfx_cs;
2220
2221 /* Find the location of the VB descriptor pointer. */
2222 /* TODO: In the future, the pointer will be packed in unused
2223 * bits of the first 2 VB descriptors. */
2224 unsigned sh_dw_offset = SI_VS_NUM_USER_SGPR;
2225 if (sctx->chip_class >= GFX9) {
2226 if (sctx->tes_shader.cso)
2227 sh_dw_offset = GFX9_TCS_NUM_USER_SGPR;
2228 else if (sctx->gs_shader.cso)
2229 sh_dw_offset = GFX9_VSGS_NUM_USER_SGPR;
2230 }
2231
2232 unsigned sh_offset = sh_base[PIPE_SHADER_VERTEX] + sh_dw_offset * 4;
2233 si_emit_shader_pointer_head(cs, sh_offset, 1);
2234 si_emit_shader_pointer_body(sctx->screen, cs,
2235 sctx->vb_descriptors_buffer->gpu_address +
2236 sctx->vb_descriptors_offset);
2237 sctx->vertex_buffer_pointer_dirty = false;
2238 }
2239
2240 if (sctx->graphics_bindless_pointer_dirty) {
2241 si_emit_global_shader_pointers(sctx,
2242 &sctx->bindless_descriptors);
2243 sctx->graphics_bindless_pointer_dirty = false;
2244 }
2245 }
2246
2247 void si_emit_compute_shader_pointers(struct si_context *sctx)
2248 {
2249 unsigned base = R_00B900_COMPUTE_USER_DATA_0;
2250
2251 si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(COMPUTE),
2252 R_00B900_COMPUTE_USER_DATA_0);
2253 sctx->shader_pointers_dirty &= ~SI_DESCS_SHADER_MASK(COMPUTE);
2254
2255 if (sctx->compute_bindless_pointer_dirty) {
2256 si_emit_shader_pointer(sctx, &sctx->bindless_descriptors, base);
2257 sctx->compute_bindless_pointer_dirty = false;
2258 }
2259 }
2260
2261 /* BINDLESS */
2262
2263 static void si_init_bindless_descriptors(struct si_context *sctx,
2264 struct si_descriptors *desc,
2265 short shader_userdata_rel_index,
2266 unsigned num_elements)
2267 {
2268 MAYBE_UNUSED unsigned desc_slot;
2269
2270 si_init_descriptors(desc, shader_userdata_rel_index, 16, num_elements);
2271 sctx->bindless_descriptors.num_active_slots = num_elements;
2272
2273 /* The first bindless descriptor is stored at slot 1, because 0 is not
2274 * considered to be a valid handle.
2275 */
2276 sctx->num_bindless_descriptors = 1;
2277
2278 /* Track which bindless slots are used (or not). */
2279 util_idalloc_init(&sctx->bindless_used_slots);
2280 util_idalloc_resize(&sctx->bindless_used_slots, num_elements);
2281
2282 /* Reserve slot 0 because it's an invalid handle for bindless. */
2283 desc_slot = util_idalloc_alloc(&sctx->bindless_used_slots);
2284 assert(desc_slot == 0);
2285 }
2286
2287 static void si_release_bindless_descriptors(struct si_context *sctx)
2288 {
2289 si_release_descriptors(&sctx->bindless_descriptors);
2290 util_idalloc_fini(&sctx->bindless_used_slots);
2291 }
2292
2293 static unsigned si_get_first_free_bindless_slot(struct si_context *sctx)
2294 {
2295 struct si_descriptors *desc = &sctx->bindless_descriptors;
2296 unsigned desc_slot;
2297
2298 desc_slot = util_idalloc_alloc(&sctx->bindless_used_slots);
2299 if (desc_slot >= desc->num_elements) {
2300 /* The array of bindless descriptors is full, resize it. */
2301 unsigned slot_size = desc->element_dw_size * 4;
2302 unsigned new_num_elements = desc->num_elements * 2;
2303
2304 desc->list = REALLOC(desc->list, desc->num_elements * slot_size,
2305 new_num_elements * slot_size);
2306 desc->num_elements = new_num_elements;
2307 desc->num_active_slots = new_num_elements;
2308 }
2309
2310 assert(desc_slot);
2311 return desc_slot;
2312 }
2313
2314 static unsigned
2315 si_create_bindless_descriptor(struct si_context *sctx, uint32_t *desc_list,
2316 unsigned size)
2317 {
2318 struct si_descriptors *desc = &sctx->bindless_descriptors;
2319 unsigned desc_slot, desc_slot_offset;
2320
2321 /* Find a free slot. */
2322 desc_slot = si_get_first_free_bindless_slot(sctx);
2323
2324 /* For simplicity, sampler and image bindless descriptors use fixed
2325 * 16-dword slots for now. Image descriptors only need 8-dword but this
2326 * doesn't really matter because no real apps use image handles.
2327 */
2328 desc_slot_offset = desc_slot * 16;
2329
2330 /* Copy the descriptor into the array. */
2331 memcpy(desc->list + desc_slot_offset, desc_list, size);
2332
2333 /* Re-upload the whole array of bindless descriptors into a new buffer.
2334 */
2335 if (!si_upload_descriptors(sctx, desc))
2336 return 0;
2337
2338 /* Make sure to re-emit the shader pointers for all stages. */
2339 sctx->graphics_bindless_pointer_dirty = true;
2340 sctx->compute_bindless_pointer_dirty = true;
2341
2342 return desc_slot;
2343 }
2344
2345 static void si_update_bindless_buffer_descriptor(struct si_context *sctx,
2346 unsigned desc_slot,
2347 struct pipe_resource *resource,
2348 uint64_t offset,
2349 bool *desc_dirty)
2350 {
2351 struct si_descriptors *desc = &sctx->bindless_descriptors;
2352 struct si_resource *buf = si_resource(resource);
2353 unsigned desc_slot_offset = desc_slot * 16;
2354 uint32_t *desc_list = desc->list + desc_slot_offset + 4;
2355 uint64_t old_desc_va;
2356
2357 assert(resource->target == PIPE_BUFFER);
2358
2359 /* Retrieve the old buffer addr from the descriptor. */
2360 old_desc_va = si_desc_extract_buffer_address(desc_list);
2361
2362 if (old_desc_va != buf->gpu_address + offset) {
2363 /* The buffer has been invalidated when the handle wasn't
2364 * resident, update the descriptor and the dirty flag.
2365 */
2366 si_set_buf_desc_address(buf, offset, &desc_list[0]);
2367
2368 *desc_dirty = true;
2369 }
2370 }
2371
2372 static uint64_t si_create_texture_handle(struct pipe_context *ctx,
2373 struct pipe_sampler_view *view,
2374 const struct pipe_sampler_state *state)
2375 {
2376 struct si_sampler_view *sview = (struct si_sampler_view *)view;
2377 struct si_context *sctx = (struct si_context *)ctx;
2378 struct si_texture_handle *tex_handle;
2379 struct si_sampler_state *sstate;
2380 uint32_t desc_list[16];
2381 uint64_t handle;
2382
2383 tex_handle = CALLOC_STRUCT(si_texture_handle);
2384 if (!tex_handle)
2385 return 0;
2386
2387 memset(desc_list, 0, sizeof(desc_list));
2388 si_init_descriptor_list(&desc_list[0], 16, 1, null_texture_descriptor);
2389
2390 sstate = ctx->create_sampler_state(ctx, state);
2391 if (!sstate) {
2392 FREE(tex_handle);
2393 return 0;
2394 }
2395
2396 si_set_sampler_view_desc(sctx, sview, sstate, &desc_list[0]);
2397 memcpy(&tex_handle->sstate, sstate, sizeof(*sstate));
2398 ctx->delete_sampler_state(ctx, sstate);
2399
2400 tex_handle->desc_slot = si_create_bindless_descriptor(sctx, desc_list,
2401 sizeof(desc_list));
2402 if (!tex_handle->desc_slot) {
2403 FREE(tex_handle);
2404 return 0;
2405 }
2406
2407 handle = tex_handle->desc_slot;
2408
2409 if (!_mesa_hash_table_insert(sctx->tex_handles,
2410 (void *)(uintptr_t)handle,
2411 tex_handle)) {
2412 FREE(tex_handle);
2413 return 0;
2414 }
2415
2416 pipe_sampler_view_reference(&tex_handle->view, view);
2417
2418 si_resource(sview->base.texture)->texture_handle_allocated = true;
2419
2420 return handle;
2421 }
2422
2423 static void si_delete_texture_handle(struct pipe_context *ctx, uint64_t handle)
2424 {
2425 struct si_context *sctx = (struct si_context *)ctx;
2426 struct si_texture_handle *tex_handle;
2427 struct hash_entry *entry;
2428
2429 entry = _mesa_hash_table_search(sctx->tex_handles,
2430 (void *)(uintptr_t)handle);
2431 if (!entry)
2432 return;
2433
2434 tex_handle = (struct si_texture_handle *)entry->data;
2435
2436 /* Allow this descriptor slot to be re-used. */
2437 util_idalloc_free(&sctx->bindless_used_slots, tex_handle->desc_slot);
2438
2439 pipe_sampler_view_reference(&tex_handle->view, NULL);
2440 _mesa_hash_table_remove(sctx->tex_handles, entry);
2441 FREE(tex_handle);
2442 }
2443
2444 static void si_make_texture_handle_resident(struct pipe_context *ctx,
2445 uint64_t handle, bool resident)
2446 {
2447 struct si_context *sctx = (struct si_context *)ctx;
2448 struct si_texture_handle *tex_handle;
2449 struct si_sampler_view *sview;
2450 struct hash_entry *entry;
2451
2452 entry = _mesa_hash_table_search(sctx->tex_handles,
2453 (void *)(uintptr_t)handle);
2454 if (!entry)
2455 return;
2456
2457 tex_handle = (struct si_texture_handle *)entry->data;
2458 sview = (struct si_sampler_view *)tex_handle->view;
2459
2460 if (resident) {
2461 if (sview->base.texture->target != PIPE_BUFFER) {
2462 struct si_texture *tex =
2463 (struct si_texture *)sview->base.texture;
2464
2465 if (depth_needs_decompression(tex)) {
2466 util_dynarray_append(
2467 &sctx->resident_tex_needs_depth_decompress,
2468 struct si_texture_handle *,
2469 tex_handle);
2470 }
2471
2472 if (color_needs_decompression(tex)) {
2473 util_dynarray_append(
2474 &sctx->resident_tex_needs_color_decompress,
2475 struct si_texture_handle *,
2476 tex_handle);
2477 }
2478
2479 if (tex->dcc_offset &&
2480 p_atomic_read(&tex->framebuffers_bound))
2481 sctx->need_check_render_feedback = true;
2482
2483 si_update_bindless_texture_descriptor(sctx, tex_handle);
2484 } else {
2485 si_update_bindless_buffer_descriptor(sctx,
2486 tex_handle->desc_slot,
2487 sview->base.texture,
2488 sview->base.u.buf.offset,
2489 &tex_handle->desc_dirty);
2490 }
2491
2492 /* Re-upload the descriptor if it has been updated while it
2493 * wasn't resident.
2494 */
2495 if (tex_handle->desc_dirty)
2496 sctx->bindless_descriptors_dirty = true;
2497
2498 /* Add the texture handle to the per-context list. */
2499 util_dynarray_append(&sctx->resident_tex_handles,
2500 struct si_texture_handle *, tex_handle);
2501
2502 /* Add the buffers to the current CS in case si_begin_new_cs()
2503 * is not going to be called.
2504 */
2505 si_sampler_view_add_buffer(sctx, sview->base.texture,
2506 RADEON_USAGE_READ,
2507 sview->is_stencil_sampler, false);
2508 } else {
2509 /* Remove the texture handle from the per-context list. */
2510 util_dynarray_delete_unordered(&sctx->resident_tex_handles,
2511 struct si_texture_handle *,
2512 tex_handle);
2513
2514 if (sview->base.texture->target != PIPE_BUFFER) {
2515 util_dynarray_delete_unordered(
2516 &sctx->resident_tex_needs_depth_decompress,
2517 struct si_texture_handle *, tex_handle);
2518
2519 util_dynarray_delete_unordered(
2520 &sctx->resident_tex_needs_color_decompress,
2521 struct si_texture_handle *, tex_handle);
2522 }
2523 }
2524 }
2525
2526 static uint64_t si_create_image_handle(struct pipe_context *ctx,
2527 const struct pipe_image_view *view)
2528 {
2529 struct si_context *sctx = (struct si_context *)ctx;
2530 struct si_image_handle *img_handle;
2531 uint32_t desc_list[8];
2532 uint64_t handle;
2533
2534 if (!view || !view->resource)
2535 return 0;
2536
2537 img_handle = CALLOC_STRUCT(si_image_handle);
2538 if (!img_handle)
2539 return 0;
2540
2541 memset(desc_list, 0, sizeof(desc_list));
2542 si_init_descriptor_list(&desc_list[0], 8, 1, null_image_descriptor);
2543
2544 si_set_shader_image_desc(sctx, view, false, &desc_list[0], NULL);
2545
2546 img_handle->desc_slot = si_create_bindless_descriptor(sctx, desc_list,
2547 sizeof(desc_list));
2548 if (!img_handle->desc_slot) {
2549 FREE(img_handle);
2550 return 0;
2551 }
2552
2553 handle = img_handle->desc_slot;
2554
2555 if (!_mesa_hash_table_insert(sctx->img_handles,
2556 (void *)(uintptr_t)handle,
2557 img_handle)) {
2558 FREE(img_handle);
2559 return 0;
2560 }
2561
2562 util_copy_image_view(&img_handle->view, view);
2563
2564 si_resource(view->resource)->image_handle_allocated = true;
2565
2566 return handle;
2567 }
2568
2569 static void si_delete_image_handle(struct pipe_context *ctx, uint64_t handle)
2570 {
2571 struct si_context *sctx = (struct si_context *)ctx;
2572 struct si_image_handle *img_handle;
2573 struct hash_entry *entry;
2574
2575 entry = _mesa_hash_table_search(sctx->img_handles,
2576 (void *)(uintptr_t)handle);
2577 if (!entry)
2578 return;
2579
2580 img_handle = (struct si_image_handle *)entry->data;
2581
2582 util_copy_image_view(&img_handle->view, NULL);
2583 _mesa_hash_table_remove(sctx->img_handles, entry);
2584 FREE(img_handle);
2585 }
2586
2587 static void si_make_image_handle_resident(struct pipe_context *ctx,
2588 uint64_t handle, unsigned access,
2589 bool resident)
2590 {
2591 struct si_context *sctx = (struct si_context *)ctx;
2592 struct si_image_handle *img_handle;
2593 struct pipe_image_view *view;
2594 struct si_resource *res;
2595 struct hash_entry *entry;
2596
2597 entry = _mesa_hash_table_search(sctx->img_handles,
2598 (void *)(uintptr_t)handle);
2599 if (!entry)
2600 return;
2601
2602 img_handle = (struct si_image_handle *)entry->data;
2603 view = &img_handle->view;
2604 res = si_resource(view->resource);
2605
2606 if (resident) {
2607 if (res->b.b.target != PIPE_BUFFER) {
2608 struct si_texture *tex = (struct si_texture *)res;
2609 unsigned level = view->u.tex.level;
2610
2611 if (color_needs_decompression(tex)) {
2612 util_dynarray_append(
2613 &sctx->resident_img_needs_color_decompress,
2614 struct si_image_handle *,
2615 img_handle);
2616 }
2617
2618 if (vi_dcc_enabled(tex, level) &&
2619 p_atomic_read(&tex->framebuffers_bound))
2620 sctx->need_check_render_feedback = true;
2621
2622 si_update_bindless_image_descriptor(sctx, img_handle);
2623 } else {
2624 si_update_bindless_buffer_descriptor(sctx,
2625 img_handle->desc_slot,
2626 view->resource,
2627 view->u.buf.offset,
2628 &img_handle->desc_dirty);
2629 }
2630
2631 /* Re-upload the descriptor if it has been updated while it
2632 * wasn't resident.
2633 */
2634 if (img_handle->desc_dirty)
2635 sctx->bindless_descriptors_dirty = true;
2636
2637 /* Add the image handle to the per-context list. */
2638 util_dynarray_append(&sctx->resident_img_handles,
2639 struct si_image_handle *, img_handle);
2640
2641 /* Add the buffers to the current CS in case si_begin_new_cs()
2642 * is not going to be called.
2643 */
2644 si_sampler_view_add_buffer(sctx, view->resource,
2645 (access & PIPE_IMAGE_ACCESS_WRITE) ?
2646 RADEON_USAGE_READWRITE :
2647 RADEON_USAGE_READ, false, false);
2648 } else {
2649 /* Remove the image handle from the per-context list. */
2650 util_dynarray_delete_unordered(&sctx->resident_img_handles,
2651 struct si_image_handle *,
2652 img_handle);
2653
2654 if (res->b.b.target != PIPE_BUFFER) {
2655 util_dynarray_delete_unordered(
2656 &sctx->resident_img_needs_color_decompress,
2657 struct si_image_handle *,
2658 img_handle);
2659 }
2660 }
2661 }
2662
2663 static void si_resident_buffers_add_all_to_bo_list(struct si_context *sctx)
2664 {
2665 unsigned num_resident_tex_handles, num_resident_img_handles;
2666
2667 num_resident_tex_handles = sctx->resident_tex_handles.size /
2668 sizeof(struct si_texture_handle *);
2669 num_resident_img_handles = sctx->resident_img_handles.size /
2670 sizeof(struct si_image_handle *);
2671
2672 /* Add all resident texture handles. */
2673 util_dynarray_foreach(&sctx->resident_tex_handles,
2674 struct si_texture_handle *, tex_handle) {
2675 struct si_sampler_view *sview =
2676 (struct si_sampler_view *)(*tex_handle)->view;
2677
2678 si_sampler_view_add_buffer(sctx, sview->base.texture,
2679 RADEON_USAGE_READ,
2680 sview->is_stencil_sampler, false);
2681 }
2682
2683 /* Add all resident image handles. */
2684 util_dynarray_foreach(&sctx->resident_img_handles,
2685 struct si_image_handle *, img_handle) {
2686 struct pipe_image_view *view = &(*img_handle)->view;
2687
2688 si_sampler_view_add_buffer(sctx, view->resource,
2689 RADEON_USAGE_READWRITE,
2690 false, false);
2691 }
2692
2693 sctx->num_resident_handles += num_resident_tex_handles +
2694 num_resident_img_handles;
2695 assert(sctx->bo_list_add_all_resident_resources);
2696 sctx->bo_list_add_all_resident_resources = false;
2697 }
2698
2699 /* INIT/DEINIT/UPLOAD */
2700
2701 void si_init_all_descriptors(struct si_context *sctx)
2702 {
2703 int i;
2704 unsigned first_shader =
2705 sctx->has_graphics ? 0 : PIPE_SHADER_COMPUTE;
2706
2707 for (i = first_shader; i < SI_NUM_SHADERS; i++) {
2708 bool is_2nd = sctx->chip_class >= GFX9 &&
2709 (i == PIPE_SHADER_TESS_CTRL ||
2710 i == PIPE_SHADER_GEOMETRY);
2711 unsigned num_sampler_slots = SI_NUM_IMAGES / 2 + SI_NUM_SAMPLERS;
2712 unsigned num_buffer_slots = SI_NUM_SHADER_BUFFERS + SI_NUM_CONST_BUFFERS;
2713 int rel_dw_offset;
2714 struct si_descriptors *desc;
2715
2716 if (is_2nd) {
2717 if (i == PIPE_SHADER_TESS_CTRL) {
2718 rel_dw_offset = (R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS -
2719 R_00B430_SPI_SHADER_USER_DATA_LS_0) / 4;
2720 } else { /* PIPE_SHADER_GEOMETRY */
2721 rel_dw_offset = (R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS -
2722 R_00B330_SPI_SHADER_USER_DATA_ES_0) / 4;
2723 }
2724 } else {
2725 rel_dw_offset = SI_SGPR_CONST_AND_SHADER_BUFFERS;
2726 }
2727 desc = si_const_and_shader_buffer_descriptors(sctx, i);
2728 si_init_buffer_resources(&sctx->const_and_shader_buffers[i], desc,
2729 num_buffer_slots, rel_dw_offset,
2730 RADEON_PRIO_SHADER_RW_BUFFER,
2731 RADEON_PRIO_CONST_BUFFER);
2732 desc->slot_index_to_bind_directly = si_get_constbuf_slot(0);
2733
2734 if (is_2nd) {
2735 if (i == PIPE_SHADER_TESS_CTRL) {
2736 rel_dw_offset = (R_00B40C_SPI_SHADER_USER_DATA_ADDR_HI_HS -
2737 R_00B430_SPI_SHADER_USER_DATA_LS_0) / 4;
2738 } else { /* PIPE_SHADER_GEOMETRY */
2739 rel_dw_offset = (R_00B20C_SPI_SHADER_USER_DATA_ADDR_HI_GS -
2740 R_00B330_SPI_SHADER_USER_DATA_ES_0) / 4;
2741 }
2742 } else {
2743 rel_dw_offset = SI_SGPR_SAMPLERS_AND_IMAGES;
2744 }
2745
2746 desc = si_sampler_and_image_descriptors(sctx, i);
2747 si_init_descriptors(desc, rel_dw_offset, 16, num_sampler_slots);
2748
2749 int j;
2750 for (j = 0; j < SI_NUM_IMAGES; j++)
2751 memcpy(desc->list + j * 8, null_image_descriptor, 8 * 4);
2752 for (; j < SI_NUM_IMAGES + SI_NUM_SAMPLERS * 2; j++)
2753 memcpy(desc->list + j * 8, null_texture_descriptor, 8 * 4);
2754 }
2755
2756 si_init_buffer_resources(&sctx->rw_buffers,
2757 &sctx->descriptors[SI_DESCS_RW_BUFFERS],
2758 SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
2759 /* The second priority is used by
2760 * const buffers in RW buffer slots. */
2761 RADEON_PRIO_SHADER_RINGS, RADEON_PRIO_CONST_BUFFER);
2762 sctx->descriptors[SI_DESCS_RW_BUFFERS].num_active_slots = SI_NUM_RW_BUFFERS;
2763
2764 /* Initialize an array of 1024 bindless descriptors, when the limit is
2765 * reached, just make it larger and re-upload the whole array.
2766 */
2767 si_init_bindless_descriptors(sctx, &sctx->bindless_descriptors,
2768 SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES,
2769 1024);
2770
2771 sctx->descriptors_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
2772
2773 /* Set pipe_context functions. */
2774 sctx->b.bind_sampler_states = si_bind_sampler_states;
2775 sctx->b.set_shader_images = si_set_shader_images;
2776 sctx->b.set_constant_buffer = si_pipe_set_constant_buffer;
2777 sctx->b.set_shader_buffers = si_set_shader_buffers;
2778 sctx->b.set_sampler_views = si_set_sampler_views;
2779 sctx->b.create_texture_handle = si_create_texture_handle;
2780 sctx->b.delete_texture_handle = si_delete_texture_handle;
2781 sctx->b.make_texture_handle_resident = si_make_texture_handle_resident;
2782 sctx->b.create_image_handle = si_create_image_handle;
2783 sctx->b.delete_image_handle = si_delete_image_handle;
2784 sctx->b.make_image_handle_resident = si_make_image_handle_resident;
2785
2786 if (!sctx->has_graphics)
2787 return;
2788
2789 sctx->b.set_polygon_stipple = si_set_polygon_stipple;
2790
2791 /* Shader user data. */
2792 sctx->atoms.s.shader_pointers.emit = si_emit_graphics_shader_pointers;
2793
2794 /* Set default and immutable mappings. */
2795 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
2796
2797 if (sctx->chip_class >= GFX9) {
2798 si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL,
2799 R_00B430_SPI_SHADER_USER_DATA_LS_0);
2800 si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY,
2801 R_00B330_SPI_SHADER_USER_DATA_ES_0);
2802 } else {
2803 si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL,
2804 R_00B430_SPI_SHADER_USER_DATA_HS_0);
2805 si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY,
2806 R_00B230_SPI_SHADER_USER_DATA_GS_0);
2807 }
2808 si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
2809 }
2810
2811 static bool si_upload_shader_descriptors(struct si_context *sctx, unsigned mask)
2812 {
2813 unsigned dirty = sctx->descriptors_dirty & mask;
2814
2815 /* Assume nothing will go wrong: */
2816 sctx->shader_pointers_dirty |= dirty;
2817
2818 while (dirty) {
2819 unsigned i = u_bit_scan(&dirty);
2820
2821 if (!si_upload_descriptors(sctx, &sctx->descriptors[i]))
2822 return false;
2823 }
2824
2825 sctx->descriptors_dirty &= ~mask;
2826
2827 si_upload_bindless_descriptors(sctx);
2828
2829 return true;
2830 }
2831
2832 bool si_upload_graphics_shader_descriptors(struct si_context *sctx)
2833 {
2834 const unsigned mask = u_bit_consecutive(0, SI_DESCS_FIRST_COMPUTE);
2835 return si_upload_shader_descriptors(sctx, mask);
2836 }
2837
2838 bool si_upload_compute_shader_descriptors(struct si_context *sctx)
2839 {
2840 /* Does not update rw_buffers as that is not needed for compute shaders
2841 * and the input buffer is using the same SGPR's anyway.
2842 */
2843 const unsigned mask = u_bit_consecutive(SI_DESCS_FIRST_COMPUTE,
2844 SI_NUM_DESCS - SI_DESCS_FIRST_COMPUTE);
2845 return si_upload_shader_descriptors(sctx, mask);
2846 }
2847
2848 void si_release_all_descriptors(struct si_context *sctx)
2849 {
2850 int i;
2851
2852 for (i = 0; i < SI_NUM_SHADERS; i++) {
2853 si_release_buffer_resources(&sctx->const_and_shader_buffers[i],
2854 si_const_and_shader_buffer_descriptors(sctx, i));
2855 si_release_sampler_views(&sctx->samplers[i]);
2856 si_release_image_views(&sctx->images[i]);
2857 }
2858 si_release_buffer_resources(&sctx->rw_buffers,
2859 &sctx->descriptors[SI_DESCS_RW_BUFFERS]);
2860 for (i = 0; i < SI_NUM_VERTEX_BUFFERS; i++)
2861 pipe_vertex_buffer_unreference(&sctx->vertex_buffer[i]);
2862
2863 for (i = 0; i < SI_NUM_DESCS; ++i)
2864 si_release_descriptors(&sctx->descriptors[i]);
2865
2866 si_resource_reference(&sctx->vb_descriptors_buffer, NULL);
2867 sctx->vb_descriptors_gpu_list = NULL; /* points into a mapped buffer */
2868
2869 si_release_bindless_descriptors(sctx);
2870 }
2871
2872 void si_gfx_resources_add_all_to_bo_list(struct si_context *sctx)
2873 {
2874 for (unsigned i = 0; i < SI_NUM_GRAPHICS_SHADERS; i++) {
2875 si_buffer_resources_begin_new_cs(sctx, &sctx->const_and_shader_buffers[i]);
2876 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i]);
2877 si_image_views_begin_new_cs(sctx, &sctx->images[i]);
2878 }
2879 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers);
2880 si_vertex_buffers_begin_new_cs(sctx);
2881
2882 if (sctx->bo_list_add_all_resident_resources)
2883 si_resident_buffers_add_all_to_bo_list(sctx);
2884
2885 assert(sctx->bo_list_add_all_gfx_resources);
2886 sctx->bo_list_add_all_gfx_resources = false;
2887 }
2888
2889 void si_compute_resources_add_all_to_bo_list(struct si_context *sctx)
2890 {
2891 unsigned sh = PIPE_SHADER_COMPUTE;
2892
2893 si_buffer_resources_begin_new_cs(sctx, &sctx->const_and_shader_buffers[sh]);
2894 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[sh]);
2895 si_image_views_begin_new_cs(sctx, &sctx->images[sh]);
2896 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers);
2897
2898 if (sctx->bo_list_add_all_resident_resources)
2899 si_resident_buffers_add_all_to_bo_list(sctx);
2900
2901 assert(sctx->bo_list_add_all_compute_resources);
2902 sctx->bo_list_add_all_compute_resources = false;
2903 }
2904
2905 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
2906 {
2907 for (unsigned i = 0; i < SI_NUM_DESCS; ++i)
2908 si_descriptors_begin_new_cs(sctx, &sctx->descriptors[i]);
2909 si_descriptors_begin_new_cs(sctx, &sctx->bindless_descriptors);
2910
2911 si_shader_pointers_begin_new_cs(sctx);
2912
2913 sctx->bo_list_add_all_resident_resources = true;
2914 sctx->bo_list_add_all_gfx_resources = true;
2915 sctx->bo_list_add_all_compute_resources = true;
2916 }
2917
2918 void si_set_active_descriptors(struct si_context *sctx, unsigned desc_idx,
2919 uint64_t new_active_mask)
2920 {
2921 struct si_descriptors *desc = &sctx->descriptors[desc_idx];
2922
2923 /* Ignore no-op updates and updates that disable all slots. */
2924 if (!new_active_mask ||
2925 new_active_mask == u_bit_consecutive64(desc->first_active_slot,
2926 desc->num_active_slots))
2927 return;
2928
2929 int first, count;
2930 u_bit_scan_consecutive_range64(&new_active_mask, &first, &count);
2931 assert(new_active_mask == 0);
2932
2933 /* Upload/dump descriptors if slots are being enabled. */
2934 if (first < desc->first_active_slot ||
2935 first + count > desc->first_active_slot + desc->num_active_slots)
2936 sctx->descriptors_dirty |= 1u << desc_idx;
2937
2938 desc->first_active_slot = first;
2939 desc->num_active_slots = count;
2940 }
2941
2942 void si_set_active_descriptors_for_shader(struct si_context *sctx,
2943 struct si_shader_selector *sel)
2944 {
2945 if (!sel)
2946 return;
2947
2948 si_set_active_descriptors(sctx,
2949 si_const_and_shader_buffer_descriptors_idx(sel->type),
2950 sel->active_const_and_shader_buffers);
2951 si_set_active_descriptors(sctx,
2952 si_sampler_and_image_descriptors_idx(sel->type),
2953 sel->active_samplers_and_images);
2954 }