2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Marek Olšák <marek.olsak@amd.com>
27 /* Resource binding slots and sampler states (each described with 8 or
28 * 4 dwords) are stored in lists in memory which is accessed by shaders
29 * using scalar load instructions.
31 * This file is responsible for managing such lists. It keeps a copy of all
32 * descriptors in CPU memory and re-uploads a whole list if some slots have
35 * This code is also reponsible for updating shader pointers to those lists.
37 * Note that CP DMA can't be used for updating the lists, because a GPU hang
38 * could leave the list in a mid-IB state and the next IB would get wrong
39 * descriptors and the whole context would be unusable at that point.
40 * (Note: The register shadowing can't be used due to the same reason)
42 * Also, uploading descriptors to newly allocated memory doesn't require
46 * Possible scenarios for one 16 dword image+sampler slot:
48 * | Image | w/ FMASK | Buffer | NULL
49 * [ 0: 3] Image[0:3] | Image[0:3] | Null[0:3] | Null[0:3]
50 * [ 4: 7] Image[4:7] | Image[4:7] | Buffer[0:3] | 0
51 * [ 8:11] Null[0:3] | Fmask[0:3] | Null[0:3] | Null[0:3]
52 * [12:15] Sampler[0:3] | Fmask[4:7] | Sampler[0:3] | Sampler[0:3]
54 * FMASK implies MSAA, therefore no sampler state.
55 * Sampler states are never unbound except when FMASK is bound.
58 #include "radeon/r600_cs.h"
63 #include "util/hash_table.h"
64 #include "util/u_idalloc.h"
65 #include "util/u_format.h"
66 #include "util/u_memory.h"
67 #include "util/u_upload_mgr.h"
70 /* NULL image and buffer descriptor for textures (alpha = 1) and images
73 * For images, all fields must be zero except for the swizzle, which
74 * supports arbitrary combinations of 0s and 1s. The texture type must be
75 * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
77 * For buffers, all fields must be zero. If they are not, the hw hangs.
79 * This is the only reason why the buffer descriptor must be in words [4:7].
81 static uint32_t null_texture_descriptor
[8] = {
85 S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1
) |
86 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D
)
87 /* the rest must contain zeros, which is also used by the buffer
91 static uint32_t null_image_descriptor
[8] = {
95 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D
)
96 /* the rest must contain zeros, which is also used by the buffer
100 static void si_init_descriptor_list(uint32_t *desc_list
,
101 unsigned element_dw_size
,
102 unsigned num_elements
,
103 const uint32_t *null_descriptor
)
107 /* Initialize the array to NULL descriptors if the element size is 8. */
108 if (null_descriptor
) {
109 assert(element_dw_size
% 8 == 0);
110 for (i
= 0; i
< num_elements
* element_dw_size
/ 8; i
++)
111 memcpy(desc_list
+ i
* 8, null_descriptor
, 8 * 4);
115 static void si_init_descriptors(struct si_descriptors
*desc
,
116 unsigned shader_userdata_index
,
117 unsigned element_dw_size
,
118 unsigned num_elements
)
120 desc
->list
= CALLOC(num_elements
, element_dw_size
* 4);
121 desc
->element_dw_size
= element_dw_size
;
122 desc
->num_elements
= num_elements
;
123 desc
->shader_userdata_offset
= shader_userdata_index
* 4;
126 static void si_release_descriptors(struct si_descriptors
*desc
)
128 r600_resource_reference(&desc
->buffer
, NULL
);
132 static bool si_upload_descriptors(struct si_context
*sctx
,
133 struct si_descriptors
*desc
,
134 struct r600_atom
* atom
)
136 unsigned slot_size
= desc
->element_dw_size
* 4;
137 unsigned first_slot_offset
= desc
->first_active_slot
* slot_size
;
138 unsigned upload_size
= desc
->num_active_slots
* slot_size
;
140 /* Skip the upload if no shader is using the descriptors. dirty_mask
141 * will stay dirty and the descriptors will be uploaded when there is
142 * a shader using them.
148 u_upload_alloc(sctx
->b
.b
.const_uploader
, 0, upload_size
,
149 si_optimal_tcc_alignment(sctx
, upload_size
),
150 (unsigned*)&desc
->buffer_offset
,
151 (struct pipe_resource
**)&desc
->buffer
,
154 return false; /* skip the draw call */
156 util_memcpy_cpu_to_le32(ptr
, (char*)desc
->list
+ first_slot_offset
,
158 desc
->gpu_list
= ptr
- first_slot_offset
/ 4;
160 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, desc
->buffer
,
161 RADEON_USAGE_READ
, RADEON_PRIO_DESCRIPTORS
);
163 /* The shader pointer should point to slot 0. */
164 desc
->buffer_offset
-= first_slot_offset
;
167 si_mark_atom_dirty(sctx
, atom
);
173 si_descriptors_begin_new_cs(struct si_context
*sctx
, struct si_descriptors
*desc
)
178 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, desc
->buffer
,
179 RADEON_USAGE_READ
, RADEON_PRIO_DESCRIPTORS
);
185 si_sampler_and_image_descriptors_idx(unsigned shader
)
187 return SI_DESCS_FIRST_SHADER
+ shader
* SI_NUM_SHADER_DESCS
+
188 SI_SHADER_DESCS_SAMPLERS_AND_IMAGES
;
191 static struct si_descriptors
*
192 si_sampler_and_image_descriptors(struct si_context
*sctx
, unsigned shader
)
194 return &sctx
->descriptors
[si_sampler_and_image_descriptors_idx(shader
)];
197 static void si_release_sampler_views(struct si_sampler_views
*views
)
201 for (i
= 0; i
< ARRAY_SIZE(views
->views
); i
++) {
202 pipe_sampler_view_reference(&views
->views
[i
], NULL
);
206 static void si_sampler_view_add_buffer(struct si_context
*sctx
,
207 struct pipe_resource
*resource
,
208 enum radeon_bo_usage usage
,
209 bool is_stencil_sampler
,
212 struct r600_resource
*rres
;
213 struct r600_texture
*rtex
;
214 enum radeon_bo_priority priority
;
219 if (resource
->target
!= PIPE_BUFFER
) {
220 struct r600_texture
*tex
= (struct r600_texture
*)resource
;
222 if (tex
->is_depth
&& !r600_can_sample_zs(tex
, is_stencil_sampler
))
223 resource
= &tex
->flushed_depth_texture
->resource
.b
.b
;
226 rres
= (struct r600_resource
*)resource
;
227 priority
= r600_get_sampler_view_priority(rres
);
229 radeon_add_to_buffer_list_check_mem(&sctx
->b
, &sctx
->b
.gfx
,
230 rres
, usage
, priority
,
233 if (resource
->target
== PIPE_BUFFER
)
236 /* Now add separate DCC or HTILE. */
237 rtex
= (struct r600_texture
*)resource
;
238 if (rtex
->dcc_separate_buffer
) {
239 radeon_add_to_buffer_list_check_mem(&sctx
->b
, &sctx
->b
.gfx
,
240 rtex
->dcc_separate_buffer
, usage
,
241 RADEON_PRIO_DCC
, check_mem
);
245 static void si_sampler_views_begin_new_cs(struct si_context
*sctx
,
246 struct si_sampler_views
*views
)
248 unsigned mask
= views
->enabled_mask
;
250 /* Add buffers to the CS. */
252 int i
= u_bit_scan(&mask
);
253 struct si_sampler_view
*sview
= (struct si_sampler_view
*)views
->views
[i
];
255 si_sampler_view_add_buffer(sctx
, sview
->base
.texture
,
257 sview
->is_stencil_sampler
, false);
261 /* Set buffer descriptor fields that can be changed by reallocations. */
262 static void si_set_buf_desc_address(struct r600_resource
*buf
,
263 uint64_t offset
, uint32_t *state
)
265 uint64_t va
= buf
->gpu_address
+ offset
;
268 state
[1] &= C_008F04_BASE_ADDRESS_HI
;
269 state
[1] |= S_008F04_BASE_ADDRESS_HI(va
>> 32);
272 /* Set texture descriptor fields that can be changed by reallocations.
275 * \param base_level_info information of the level of BASE_ADDRESS
276 * \param base_level the level of BASE_ADDRESS
277 * \param first_level pipe_sampler_view.u.tex.first_level
278 * \param block_width util_format_get_blockwidth()
279 * \param is_stencil select between separate Z & Stencil
280 * \param state descriptor to update
282 void si_set_mutable_tex_desc_fields(struct si_screen
*sscreen
,
283 struct r600_texture
*tex
,
284 const struct legacy_surf_level
*base_level_info
,
285 unsigned base_level
, unsigned first_level
,
286 unsigned block_width
, bool is_stencil
,
289 uint64_t va
, meta_va
= 0;
291 if (tex
->is_depth
&& !r600_can_sample_zs(tex
, is_stencil
)) {
292 tex
= tex
->flushed_depth_texture
;
296 va
= tex
->resource
.gpu_address
;
298 if (sscreen
->b
.chip_class
>= GFX9
) {
299 /* Only stencil_offset needs to be added here. */
301 va
+= tex
->surface
.u
.gfx9
.stencil_offset
;
303 va
+= tex
->surface
.u
.gfx9
.surf_offset
;
305 va
+= base_level_info
->offset
;
309 state
[1] &= C_008F14_BASE_ADDRESS_HI
;
310 state
[1] |= S_008F14_BASE_ADDRESS_HI(va
>> 40);
312 /* Only macrotiled modes can set tile swizzle.
313 * GFX9 doesn't use (legacy) base_level_info.
315 if (sscreen
->b
.chip_class
>= GFX9
||
316 base_level_info
->mode
== RADEON_SURF_MODE_2D
)
317 state
[0] |= tex
->surface
.tile_swizzle
;
319 if (sscreen
->b
.chip_class
>= VI
) {
320 state
[6] &= C_008F28_COMPRESSION_EN
;
323 if (vi_dcc_enabled(tex
, first_level
)) {
324 meta_va
= (!tex
->dcc_separate_buffer
? tex
->resource
.gpu_address
: 0) +
327 if (sscreen
->b
.chip_class
== VI
) {
328 meta_va
+= base_level_info
->dcc_offset
;
329 assert(base_level_info
->mode
== RADEON_SURF_MODE_2D
);
332 meta_va
|= (uint32_t)tex
->surface
.tile_swizzle
<< 8;
333 } else if (vi_tc_compat_htile_enabled(tex
, first_level
)) {
334 meta_va
= tex
->resource
.gpu_address
+ tex
->htile_offset
;
338 state
[6] |= S_008F28_COMPRESSION_EN(1);
339 state
[7] = meta_va
>> 8;
343 if (sscreen
->b
.chip_class
>= GFX9
) {
344 state
[3] &= C_008F1C_SW_MODE
;
345 state
[4] &= C_008F20_PITCH_GFX9
;
348 state
[3] |= S_008F1C_SW_MODE(tex
->surface
.u
.gfx9
.stencil
.swizzle_mode
);
349 state
[4] |= S_008F20_PITCH_GFX9(tex
->surface
.u
.gfx9
.stencil
.epitch
);
351 state
[3] |= S_008F1C_SW_MODE(tex
->surface
.u
.gfx9
.surf
.swizzle_mode
);
352 state
[4] |= S_008F20_PITCH_GFX9(tex
->surface
.u
.gfx9
.surf
.epitch
);
355 state
[5] &= C_008F24_META_DATA_ADDRESS
&
356 C_008F24_META_PIPE_ALIGNED
&
357 C_008F24_META_RB_ALIGNED
;
359 struct gfx9_surf_meta_flags meta
;
362 meta
= tex
->surface
.u
.gfx9
.dcc
;
364 meta
= tex
->surface
.u
.gfx9
.htile
;
366 state
[5] |= S_008F24_META_DATA_ADDRESS(meta_va
>> 40) |
367 S_008F24_META_PIPE_ALIGNED(meta
.pipe_aligned
) |
368 S_008F24_META_RB_ALIGNED(meta
.rb_aligned
);
372 unsigned pitch
= base_level_info
->nblk_x
* block_width
;
373 unsigned index
= si_tile_mode_index(tex
, base_level
, is_stencil
);
375 state
[3] &= C_008F1C_TILING_INDEX
;
376 state
[3] |= S_008F1C_TILING_INDEX(index
);
377 state
[4] &= C_008F20_PITCH_GFX6
;
378 state
[4] |= S_008F20_PITCH_GFX6(pitch
- 1);
382 static void si_set_sampler_view_desc(struct si_context
*sctx
,
383 struct si_sampler_view
*sview
,
384 struct si_sampler_state
*sstate
,
387 struct pipe_sampler_view
*view
= &sview
->base
;
388 struct r600_texture
*rtex
= (struct r600_texture
*)view
->texture
;
389 bool is_buffer
= rtex
->resource
.b
.b
.target
== PIPE_BUFFER
;
391 if (unlikely(!is_buffer
&& sview
->dcc_incompatible
)) {
392 if (vi_dcc_enabled(rtex
, view
->u
.tex
.first_level
))
393 if (!r600_texture_disable_dcc(&sctx
->b
, rtex
))
394 sctx
->b
.decompress_dcc(&sctx
->b
.b
, rtex
);
396 sview
->dcc_incompatible
= false;
399 assert(rtex
); /* views with texture == NULL aren't supported */
400 memcpy(desc
, sview
->state
, 8*4);
403 si_set_buf_desc_address(&rtex
->resource
,
404 sview
->base
.u
.buf
.offset
,
407 bool is_separate_stencil
= rtex
->db_compatible
&&
408 sview
->is_stencil_sampler
;
410 si_set_mutable_tex_desc_fields(sctx
->screen
, rtex
,
411 sview
->base_level_info
,
413 sview
->base
.u
.tex
.first_level
,
419 if (!is_buffer
&& rtex
->fmask
.size
) {
420 memcpy(desc
+ 8, sview
->fmask_state
, 8*4);
422 /* Disable FMASK and bind sampler state in [12:15]. */
423 memcpy(desc
+ 8, null_texture_descriptor
, 4*4);
426 memcpy(desc
+ 12, sstate
->val
, 4*4);
430 static void si_set_sampler_view(struct si_context
*sctx
,
432 unsigned slot
, struct pipe_sampler_view
*view
,
433 bool disallow_early_out
)
435 struct si_sampler_views
*views
= &sctx
->samplers
[shader
].views
;
436 struct si_sampler_view
*rview
= (struct si_sampler_view
*)view
;
437 struct si_descriptors
*descs
= si_sampler_and_image_descriptors(sctx
, shader
);
438 unsigned desc_slot
= si_get_sampler_slot(slot
);
439 uint32_t *desc
= descs
->list
+ desc_slot
* 16;
441 if (views
->views
[slot
] == view
&& !disallow_early_out
)
445 struct r600_texture
*rtex
= (struct r600_texture
*)view
->texture
;
447 si_set_sampler_view_desc(sctx
, rview
,
448 views
->sampler_states
[slot
], desc
);
450 if (rtex
->resource
.b
.b
.target
== PIPE_BUFFER
)
451 rtex
->resource
.bind_history
|= PIPE_BIND_SAMPLER_VIEW
;
453 pipe_sampler_view_reference(&views
->views
[slot
], view
);
454 views
->enabled_mask
|= 1u << slot
;
456 /* Since this can flush, it must be done after enabled_mask is
458 si_sampler_view_add_buffer(sctx
, view
->texture
,
460 rview
->is_stencil_sampler
, true);
462 pipe_sampler_view_reference(&views
->views
[slot
], NULL
);
463 memcpy(desc
, null_texture_descriptor
, 8*4);
464 /* Only clear the lower dwords of FMASK. */
465 memcpy(desc
+ 8, null_texture_descriptor
, 4*4);
466 /* Re-set the sampler state if we are transitioning from FMASK. */
467 if (views
->sampler_states
[slot
])
469 views
->sampler_states
[slot
]->val
, 4*4);
471 views
->enabled_mask
&= ~(1u << slot
);
474 sctx
->descriptors_dirty
|= 1u << si_sampler_and_image_descriptors_idx(shader
);
477 static bool color_needs_decompression(struct r600_texture
*rtex
)
479 return rtex
->fmask
.size
||
480 (rtex
->dirty_level_mask
&&
481 (rtex
->cmask
.size
|| rtex
->dcc_offset
));
484 static bool depth_needs_decompression(struct r600_texture
*rtex
)
486 /* If the depth/stencil texture is TC-compatible, no decompression
487 * will be done. The decompression function will only flush DB caches
488 * to make it coherent with shaders. That's necessary because the driver
489 * doesn't flush DB caches in any other case.
491 return rtex
->db_compatible
;
494 static void si_update_shader_needs_decompress_mask(struct si_context
*sctx
,
497 struct si_textures_info
*samplers
= &sctx
->samplers
[shader
];
498 unsigned shader_bit
= 1 << shader
;
500 if (samplers
->needs_depth_decompress_mask
||
501 samplers
->needs_color_decompress_mask
||
502 sctx
->images
[shader
].needs_color_decompress_mask
)
503 sctx
->shader_needs_decompress_mask
|= shader_bit
;
505 sctx
->shader_needs_decompress_mask
&= ~shader_bit
;
508 static void si_set_sampler_views(struct pipe_context
*ctx
,
509 enum pipe_shader_type shader
, unsigned start
,
511 struct pipe_sampler_view
**views
)
513 struct si_context
*sctx
= (struct si_context
*)ctx
;
514 struct si_textures_info
*samplers
= &sctx
->samplers
[shader
];
517 if (!count
|| shader
>= SI_NUM_SHADERS
)
520 for (i
= 0; i
< count
; i
++) {
521 unsigned slot
= start
+ i
;
523 if (!views
|| !views
[i
]) {
524 samplers
->needs_depth_decompress_mask
&= ~(1u << slot
);
525 samplers
->needs_color_decompress_mask
&= ~(1u << slot
);
526 si_set_sampler_view(sctx
, shader
, slot
, NULL
, false);
530 si_set_sampler_view(sctx
, shader
, slot
, views
[i
], false);
532 if (views
[i
]->texture
&& views
[i
]->texture
->target
!= PIPE_BUFFER
) {
533 struct r600_texture
*rtex
=
534 (struct r600_texture
*)views
[i
]->texture
;
536 if (depth_needs_decompression(rtex
)) {
537 samplers
->needs_depth_decompress_mask
|= 1u << slot
;
539 samplers
->needs_depth_decompress_mask
&= ~(1u << slot
);
541 if (color_needs_decompression(rtex
)) {
542 samplers
->needs_color_decompress_mask
|= 1u << slot
;
544 samplers
->needs_color_decompress_mask
&= ~(1u << slot
);
547 if (rtex
->dcc_offset
&&
548 p_atomic_read(&rtex
->framebuffers_bound
))
549 sctx
->need_check_render_feedback
= true;
551 samplers
->needs_depth_decompress_mask
&= ~(1u << slot
);
552 samplers
->needs_color_decompress_mask
&= ~(1u << slot
);
556 si_update_shader_needs_decompress_mask(sctx
, shader
);
560 si_samplers_update_needs_color_decompress_mask(struct si_textures_info
*samplers
)
562 unsigned mask
= samplers
->views
.enabled_mask
;
565 int i
= u_bit_scan(&mask
);
566 struct pipe_resource
*res
= samplers
->views
.views
[i
]->texture
;
568 if (res
&& res
->target
!= PIPE_BUFFER
) {
569 struct r600_texture
*rtex
= (struct r600_texture
*)res
;
571 if (color_needs_decompression(rtex
)) {
572 samplers
->needs_color_decompress_mask
|= 1u << i
;
574 samplers
->needs_color_decompress_mask
&= ~(1u << i
);
583 si_release_image_views(struct si_images_info
*images
)
587 for (i
= 0; i
< SI_NUM_IMAGES
; ++i
) {
588 struct pipe_image_view
*view
= &images
->views
[i
];
590 pipe_resource_reference(&view
->resource
, NULL
);
595 si_image_views_begin_new_cs(struct si_context
*sctx
, struct si_images_info
*images
)
597 uint mask
= images
->enabled_mask
;
599 /* Add buffers to the CS. */
601 int i
= u_bit_scan(&mask
);
602 struct pipe_image_view
*view
= &images
->views
[i
];
604 assert(view
->resource
);
606 si_sampler_view_add_buffer(sctx
, view
->resource
,
607 RADEON_USAGE_READWRITE
, false, false);
612 si_disable_shader_image(struct si_context
*ctx
, unsigned shader
, unsigned slot
)
614 struct si_images_info
*images
= &ctx
->images
[shader
];
616 if (images
->enabled_mask
& (1u << slot
)) {
617 struct si_descriptors
*descs
= si_sampler_and_image_descriptors(ctx
, shader
);
618 unsigned desc_slot
= si_get_image_slot(slot
);
620 pipe_resource_reference(&images
->views
[slot
].resource
, NULL
);
621 images
->needs_color_decompress_mask
&= ~(1 << slot
);
623 memcpy(descs
->list
+ desc_slot
*8, null_image_descriptor
, 8*4);
624 images
->enabled_mask
&= ~(1u << slot
);
625 ctx
->descriptors_dirty
|= 1u << si_sampler_and_image_descriptors_idx(shader
);
630 si_mark_image_range_valid(const struct pipe_image_view
*view
)
632 struct r600_resource
*res
= (struct r600_resource
*)view
->resource
;
634 assert(res
&& res
->b
.b
.target
== PIPE_BUFFER
);
636 util_range_add(&res
->valid_buffer_range
,
638 view
->u
.buf
.offset
+ view
->u
.buf
.size
);
641 static void si_set_shader_image_desc(struct si_context
*ctx
,
642 const struct pipe_image_view
*view
,
643 bool skip_decompress
,
646 struct si_screen
*screen
= ctx
->screen
;
647 struct r600_resource
*res
;
649 res
= (struct r600_resource
*)view
->resource
;
651 if (res
->b
.b
.target
== PIPE_BUFFER
) {
652 if (view
->access
& PIPE_IMAGE_ACCESS_WRITE
)
653 si_mark_image_range_valid(view
);
655 si_make_buffer_descriptor(screen
, res
,
658 view
->u
.buf
.size
, desc
);
659 si_set_buf_desc_address(res
, view
->u
.buf
.offset
, desc
+ 4);
661 static const unsigned char swizzle
[4] = { 0, 1, 2, 3 };
662 struct r600_texture
*tex
= (struct r600_texture
*)res
;
663 unsigned level
= view
->u
.tex
.level
;
664 unsigned width
, height
, depth
, hw_level
;
665 bool uses_dcc
= vi_dcc_enabled(tex
, level
);
667 assert(!tex
->is_depth
);
668 assert(tex
->fmask
.size
== 0);
670 if (uses_dcc
&& !skip_decompress
&&
671 (view
->access
& PIPE_IMAGE_ACCESS_WRITE
||
672 !vi_dcc_formats_compatible(res
->b
.b
.format
, view
->format
))) {
673 /* If DCC can't be disabled, at least decompress it.
674 * The decompression is relatively cheap if the surface
675 * has been decompressed already.
677 if (!r600_texture_disable_dcc(&ctx
->b
, tex
))
678 ctx
->b
.decompress_dcc(&ctx
->b
.b
, tex
);
681 if (ctx
->b
.chip_class
>= GFX9
) {
682 /* Always set the base address. The swizzle modes don't
683 * allow setting mipmap level offsets as the base.
685 width
= res
->b
.b
.width0
;
686 height
= res
->b
.b
.height0
;
687 depth
= res
->b
.b
.depth0
;
690 /* Always force the base level to the selected level.
692 * This is required for 3D textures, where otherwise
693 * selecting a single slice for non-layered bindings
694 * fails. It doesn't hurt the other targets.
696 width
= u_minify(res
->b
.b
.width0
, level
);
697 height
= u_minify(res
->b
.b
.height0
, level
);
698 depth
= u_minify(res
->b
.b
.depth0
, level
);
702 si_make_texture_descriptor(screen
, tex
,
703 false, res
->b
.b
.target
,
704 view
->format
, swizzle
,
706 view
->u
.tex
.first_layer
,
707 view
->u
.tex
.last_layer
,
708 width
, height
, depth
,
710 si_set_mutable_tex_desc_fields(screen
, tex
,
711 &tex
->surface
.u
.legacy
.level
[level
],
713 util_format_get_blockwidth(view
->format
),
718 static void si_set_shader_image(struct si_context
*ctx
,
720 unsigned slot
, const struct pipe_image_view
*view
,
721 bool skip_decompress
)
723 struct si_images_info
*images
= &ctx
->images
[shader
];
724 struct si_descriptors
*descs
= si_sampler_and_image_descriptors(ctx
, shader
);
725 struct r600_resource
*res
;
726 unsigned desc_slot
= si_get_image_slot(slot
);
727 uint32_t *desc
= descs
->list
+ desc_slot
* 8;
729 if (!view
|| !view
->resource
) {
730 si_disable_shader_image(ctx
, shader
, slot
);
734 res
= (struct r600_resource
*)view
->resource
;
736 if (&images
->views
[slot
] != view
)
737 util_copy_image_view(&images
->views
[slot
], view
);
739 si_set_shader_image_desc(ctx
, view
, skip_decompress
, desc
);
741 if (res
->b
.b
.target
== PIPE_BUFFER
) {
742 images
->needs_color_decompress_mask
&= ~(1 << slot
);
743 res
->bind_history
|= PIPE_BIND_SHADER_IMAGE
;
745 struct r600_texture
*tex
= (struct r600_texture
*)res
;
746 unsigned level
= view
->u
.tex
.level
;
748 if (color_needs_decompression(tex
)) {
749 images
->needs_color_decompress_mask
|= 1 << slot
;
751 images
->needs_color_decompress_mask
&= ~(1 << slot
);
754 if (vi_dcc_enabled(tex
, level
) &&
755 p_atomic_read(&tex
->framebuffers_bound
))
756 ctx
->need_check_render_feedback
= true;
759 images
->enabled_mask
|= 1u << slot
;
760 ctx
->descriptors_dirty
|= 1u << si_sampler_and_image_descriptors_idx(shader
);
762 /* Since this can flush, it must be done after enabled_mask is updated. */
763 si_sampler_view_add_buffer(ctx
, &res
->b
.b
,
764 (view
->access
& PIPE_IMAGE_ACCESS_WRITE
) ?
765 RADEON_USAGE_READWRITE
: RADEON_USAGE_READ
,
770 si_set_shader_images(struct pipe_context
*pipe
,
771 enum pipe_shader_type shader
,
772 unsigned start_slot
, unsigned count
,
773 const struct pipe_image_view
*views
)
775 struct si_context
*ctx
= (struct si_context
*)pipe
;
778 assert(shader
< SI_NUM_SHADERS
);
783 assert(start_slot
+ count
<= SI_NUM_IMAGES
);
786 for (i
= 0, slot
= start_slot
; i
< count
; ++i
, ++slot
)
787 si_set_shader_image(ctx
, shader
, slot
, &views
[i
], false);
789 for (i
= 0, slot
= start_slot
; i
< count
; ++i
, ++slot
)
790 si_set_shader_image(ctx
, shader
, slot
, NULL
, false);
793 si_update_shader_needs_decompress_mask(ctx
, shader
);
797 si_images_update_needs_color_decompress_mask(struct si_images_info
*images
)
799 unsigned mask
= images
->enabled_mask
;
802 int i
= u_bit_scan(&mask
);
803 struct pipe_resource
*res
= images
->views
[i
].resource
;
805 if (res
&& res
->target
!= PIPE_BUFFER
) {
806 struct r600_texture
*rtex
= (struct r600_texture
*)res
;
808 if (color_needs_decompression(rtex
)) {
809 images
->needs_color_decompress_mask
|= 1 << i
;
811 images
->needs_color_decompress_mask
&= ~(1 << i
);
819 static void si_bind_sampler_states(struct pipe_context
*ctx
,
820 enum pipe_shader_type shader
,
821 unsigned start
, unsigned count
, void **states
)
823 struct si_context
*sctx
= (struct si_context
*)ctx
;
824 struct si_textures_info
*samplers
= &sctx
->samplers
[shader
];
825 struct si_descriptors
*desc
= si_sampler_and_image_descriptors(sctx
, shader
);
826 struct si_sampler_state
**sstates
= (struct si_sampler_state
**)states
;
829 if (!count
|| shader
>= SI_NUM_SHADERS
)
832 for (i
= 0; i
< count
; i
++) {
833 unsigned slot
= start
+ i
;
834 unsigned desc_slot
= si_get_sampler_slot(slot
);
837 sstates
[i
] == samplers
->views
.sampler_states
[slot
])
841 assert(sstates
[i
]->magic
== SI_SAMPLER_STATE_MAGIC
);
843 samplers
->views
.sampler_states
[slot
] = sstates
[i
];
845 /* If FMASK is bound, don't overwrite it.
846 * The sampler state will be set after FMASK is unbound.
848 if (samplers
->views
.views
[slot
] &&
849 samplers
->views
.views
[slot
]->texture
&&
850 samplers
->views
.views
[slot
]->texture
->target
!= PIPE_BUFFER
&&
851 ((struct r600_texture
*)samplers
->views
.views
[slot
]->texture
)->fmask
.size
)
854 memcpy(desc
->list
+ desc_slot
* 16 + 12, sstates
[i
]->val
, 4*4);
855 sctx
->descriptors_dirty
|= 1u << si_sampler_and_image_descriptors_idx(shader
);
859 /* BUFFER RESOURCES */
861 static void si_init_buffer_resources(struct si_buffer_resources
*buffers
,
862 struct si_descriptors
*descs
,
863 unsigned num_buffers
,
864 unsigned shader_userdata_index
,
865 enum radeon_bo_usage shader_usage
,
866 enum radeon_bo_usage shader_usage_constbuf
,
867 enum radeon_bo_priority priority
,
868 enum radeon_bo_priority priority_constbuf
)
870 buffers
->shader_usage
= shader_usage
;
871 buffers
->shader_usage_constbuf
= shader_usage_constbuf
;
872 buffers
->priority
= priority
;
873 buffers
->priority_constbuf
= priority_constbuf
;
874 buffers
->buffers
= CALLOC(num_buffers
, sizeof(struct pipe_resource
*));
876 si_init_descriptors(descs
, shader_userdata_index
, 4, num_buffers
);
879 static void si_release_buffer_resources(struct si_buffer_resources
*buffers
,
880 struct si_descriptors
*descs
)
884 for (i
= 0; i
< descs
->num_elements
; i
++) {
885 pipe_resource_reference(&buffers
->buffers
[i
], NULL
);
888 FREE(buffers
->buffers
);
891 static void si_buffer_resources_begin_new_cs(struct si_context
*sctx
,
892 struct si_buffer_resources
*buffers
)
894 unsigned mask
= buffers
->enabled_mask
;
896 /* Add buffers to the CS. */
898 int i
= u_bit_scan(&mask
);
900 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
901 r600_resource(buffers
->buffers
[i
]),
902 i
< SI_NUM_SHADER_BUFFERS
? buffers
->shader_usage
:
903 buffers
->shader_usage_constbuf
,
904 i
< SI_NUM_SHADER_BUFFERS
? buffers
->priority
:
905 buffers
->priority_constbuf
);
909 static void si_get_buffer_from_descriptors(struct si_buffer_resources
*buffers
,
910 struct si_descriptors
*descs
,
911 unsigned idx
, struct pipe_resource
**buf
,
912 unsigned *offset
, unsigned *size
)
914 pipe_resource_reference(buf
, buffers
->buffers
[idx
]);
916 struct r600_resource
*res
= r600_resource(*buf
);
917 const uint32_t *desc
= descs
->list
+ idx
* 4;
922 assert(G_008F04_STRIDE(desc
[1]) == 0);
923 va
= ((uint64_t)desc
[1] << 32) | desc
[0];
925 assert(va
>= res
->gpu_address
&& va
+ *size
<= res
->gpu_address
+ res
->bo_size
);
926 *offset
= va
- res
->gpu_address
;
932 static void si_vertex_buffers_begin_new_cs(struct si_context
*sctx
)
934 struct si_descriptors
*desc
= &sctx
->vertex_buffers
;
935 int count
= sctx
->vertex_elements
? sctx
->vertex_elements
->count
: 0;
938 for (i
= 0; i
< count
; i
++) {
939 int vb
= sctx
->vertex_elements
->vertex_buffer_index
[i
];
941 if (vb
>= ARRAY_SIZE(sctx
->vertex_buffer
))
943 if (!sctx
->vertex_buffer
[vb
].buffer
.resource
)
946 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
947 (struct r600_resource
*)sctx
->vertex_buffer
[vb
].buffer
.resource
,
948 RADEON_USAGE_READ
, RADEON_PRIO_VERTEX_BUFFER
);
953 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
954 desc
->buffer
, RADEON_USAGE_READ
,
955 RADEON_PRIO_DESCRIPTORS
);
958 bool si_upload_vertex_buffer_descriptors(struct si_context
*sctx
)
960 struct si_vertex_elements
*velems
= sctx
->vertex_elements
;
961 struct si_descriptors
*desc
= &sctx
->vertex_buffers
;
963 unsigned desc_list_byte_size
;
964 unsigned first_vb_use_mask
;
968 if (!sctx
->vertex_buffers_dirty
|| !velems
)
971 count
= velems
->count
;
976 desc_list_byte_size
= velems
->desc_list_byte_size
;
977 first_vb_use_mask
= velems
->first_vb_use_mask
;
979 /* Vertex buffer descriptors are the only ones which are uploaded
980 * directly through a staging buffer and don't go through
981 * the fine-grained upload path.
983 u_upload_alloc(sctx
->b
.b
.const_uploader
, 0,
985 si_optimal_tcc_alignment(sctx
, desc_list_byte_size
),
986 (unsigned*)&desc
->buffer_offset
,
987 (struct pipe_resource
**)&desc
->buffer
, (void**)&ptr
);
992 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
993 desc
->buffer
, RADEON_USAGE_READ
,
994 RADEON_PRIO_DESCRIPTORS
);
996 assert(count
<= SI_MAX_ATTRIBS
);
998 for (i
= 0; i
< count
; i
++) {
999 struct pipe_vertex_buffer
*vb
;
1000 struct r600_resource
*rbuffer
;
1002 unsigned vbo_index
= velems
->vertex_buffer_index
[i
];
1003 uint32_t *desc
= &ptr
[i
*4];
1005 vb
= &sctx
->vertex_buffer
[vbo_index
];
1006 rbuffer
= (struct r600_resource
*)vb
->buffer
.resource
;
1008 memset(desc
, 0, 16);
1012 offset
= vb
->buffer_offset
+ velems
->src_offset
[i
];
1013 va
= rbuffer
->gpu_address
+ offset
;
1015 /* Fill in T# buffer resource description */
1017 desc
[1] = S_008F04_BASE_ADDRESS_HI(va
>> 32) |
1018 S_008F04_STRIDE(vb
->stride
);
1020 if (sctx
->b
.chip_class
!= VI
&& vb
->stride
) {
1021 /* Round up by rounding down and adding 1 */
1022 desc
[2] = (vb
->buffer
.resource
->width0
- offset
-
1023 velems
->format_size
[i
]) /
1026 desc
[2] = vb
->buffer
.resource
->width0
- offset
;
1029 desc
[3] = velems
->rsrc_word3
[i
];
1031 if (first_vb_use_mask
& (1 << i
)) {
1032 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
1033 (struct r600_resource
*)vb
->buffer
.resource
,
1034 RADEON_USAGE_READ
, RADEON_PRIO_VERTEX_BUFFER
);
1038 /* Don't flush the const cache. It would have a very negative effect
1039 * on performance (confirmed by testing). New descriptors are always
1040 * uploaded to a fresh new buffer, so I don't think flushing the const
1041 * cache is needed. */
1042 si_mark_atom_dirty(sctx
, &sctx
->shader_pointers
.atom
);
1043 sctx
->vertex_buffers_dirty
= false;
1044 sctx
->vertex_buffer_pointer_dirty
= true;
1045 sctx
->prefetch_L2_mask
|= SI_PREFETCH_VBO_DESCRIPTORS
;
1050 /* CONSTANT BUFFERS */
1053 si_const_and_shader_buffer_descriptors_idx(unsigned shader
)
1055 return SI_DESCS_FIRST_SHADER
+ shader
* SI_NUM_SHADER_DESCS
+
1056 SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS
;
1059 static struct si_descriptors
*
1060 si_const_and_shader_buffer_descriptors(struct si_context
*sctx
, unsigned shader
)
1062 return &sctx
->descriptors
[si_const_and_shader_buffer_descriptors_idx(shader
)];
1065 void si_upload_const_buffer(struct si_context
*sctx
, struct r600_resource
**rbuffer
,
1066 const uint8_t *ptr
, unsigned size
, uint32_t *const_offset
)
1070 u_upload_alloc(sctx
->b
.b
.const_uploader
, 0, size
,
1071 si_optimal_tcc_alignment(sctx
, size
),
1073 (struct pipe_resource
**)rbuffer
, &tmp
);
1075 util_memcpy_cpu_to_le32(tmp
, ptr
, size
);
1078 static void si_set_constant_buffer(struct si_context
*sctx
,
1079 struct si_buffer_resources
*buffers
,
1080 unsigned descriptors_idx
,
1081 uint slot
, const struct pipe_constant_buffer
*input
)
1083 struct si_descriptors
*descs
= &sctx
->descriptors
[descriptors_idx
];
1084 assert(slot
< descs
->num_elements
);
1085 pipe_resource_reference(&buffers
->buffers
[slot
], NULL
);
1087 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
1088 * with a NULL buffer). We need to use a dummy buffer instead. */
1089 if (sctx
->b
.chip_class
== CIK
&&
1090 (!input
|| (!input
->buffer
&& !input
->user_buffer
)))
1091 input
= &sctx
->null_const_buf
;
1093 if (input
&& (input
->buffer
|| input
->user_buffer
)) {
1094 struct pipe_resource
*buffer
= NULL
;
1097 /* Upload the user buffer if needed. */
1098 if (input
->user_buffer
) {
1099 unsigned buffer_offset
;
1101 si_upload_const_buffer(sctx
,
1102 (struct r600_resource
**)&buffer
, input
->user_buffer
,
1103 input
->buffer_size
, &buffer_offset
);
1105 /* Just unbind on failure. */
1106 si_set_constant_buffer(sctx
, buffers
, descriptors_idx
, slot
, NULL
);
1109 va
= r600_resource(buffer
)->gpu_address
+ buffer_offset
;
1111 pipe_resource_reference(&buffer
, input
->buffer
);
1112 va
= r600_resource(buffer
)->gpu_address
+ input
->buffer_offset
;
1113 /* Only track usage for non-user buffers. */
1114 r600_resource(buffer
)->bind_history
|= PIPE_BIND_CONSTANT_BUFFER
;
1117 /* Set the descriptor. */
1118 uint32_t *desc
= descs
->list
+ slot
*4;
1120 desc
[1] = S_008F04_BASE_ADDRESS_HI(va
>> 32) |
1122 desc
[2] = input
->buffer_size
;
1123 desc
[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
1124 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y
) |
1125 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z
) |
1126 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W
) |
1127 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT
) |
1128 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32
);
1130 buffers
->buffers
[slot
] = buffer
;
1131 radeon_add_to_buffer_list_check_mem(&sctx
->b
, &sctx
->b
.gfx
,
1132 (struct r600_resource
*)buffer
,
1133 buffers
->shader_usage_constbuf
,
1134 buffers
->priority_constbuf
, true);
1135 buffers
->enabled_mask
|= 1u << slot
;
1137 /* Clear the descriptor. */
1138 memset(descs
->list
+ slot
*4, 0, sizeof(uint32_t) * 4);
1139 buffers
->enabled_mask
&= ~(1u << slot
);
1142 sctx
->descriptors_dirty
|= 1u << descriptors_idx
;
1145 void si_set_rw_buffer(struct si_context
*sctx
,
1146 uint slot
, const struct pipe_constant_buffer
*input
)
1148 si_set_constant_buffer(sctx
, &sctx
->rw_buffers
,
1149 SI_DESCS_RW_BUFFERS
, slot
, input
);
1152 static void si_pipe_set_constant_buffer(struct pipe_context
*ctx
,
1153 enum pipe_shader_type shader
, uint slot
,
1154 const struct pipe_constant_buffer
*input
)
1156 struct si_context
*sctx
= (struct si_context
*)ctx
;
1158 if (shader
>= SI_NUM_SHADERS
)
1161 slot
= si_get_constbuf_slot(slot
);
1162 si_set_constant_buffer(sctx
, &sctx
->const_and_shader_buffers
[shader
],
1163 si_const_and_shader_buffer_descriptors_idx(shader
),
1167 void si_get_pipe_constant_buffer(struct si_context
*sctx
, uint shader
,
1168 uint slot
, struct pipe_constant_buffer
*cbuf
)
1170 cbuf
->user_buffer
= NULL
;
1171 si_get_buffer_from_descriptors(
1172 &sctx
->const_and_shader_buffers
[shader
],
1173 si_const_and_shader_buffer_descriptors(sctx
, shader
),
1174 si_get_constbuf_slot(slot
),
1175 &cbuf
->buffer
, &cbuf
->buffer_offset
, &cbuf
->buffer_size
);
1178 /* SHADER BUFFERS */
1180 static void si_set_shader_buffers(struct pipe_context
*ctx
,
1181 enum pipe_shader_type shader
,
1182 unsigned start_slot
, unsigned count
,
1183 const struct pipe_shader_buffer
*sbuffers
)
1185 struct si_context
*sctx
= (struct si_context
*)ctx
;
1186 struct si_buffer_resources
*buffers
= &sctx
->const_and_shader_buffers
[shader
];
1187 struct si_descriptors
*descs
= si_const_and_shader_buffer_descriptors(sctx
, shader
);
1190 assert(start_slot
+ count
<= SI_NUM_SHADER_BUFFERS
);
1192 for (i
= 0; i
< count
; ++i
) {
1193 const struct pipe_shader_buffer
*sbuffer
= sbuffers
? &sbuffers
[i
] : NULL
;
1194 struct r600_resource
*buf
;
1195 unsigned slot
= si_get_shaderbuf_slot(start_slot
+ i
);
1196 uint32_t *desc
= descs
->list
+ slot
* 4;
1199 if (!sbuffer
|| !sbuffer
->buffer
) {
1200 pipe_resource_reference(&buffers
->buffers
[slot
], NULL
);
1201 memset(desc
, 0, sizeof(uint32_t) * 4);
1202 buffers
->enabled_mask
&= ~(1u << slot
);
1203 sctx
->descriptors_dirty
|=
1204 1u << si_const_and_shader_buffer_descriptors_idx(shader
);
1208 buf
= (struct r600_resource
*)sbuffer
->buffer
;
1209 va
= buf
->gpu_address
+ sbuffer
->buffer_offset
;
1212 desc
[1] = S_008F04_BASE_ADDRESS_HI(va
>> 32) |
1214 desc
[2] = sbuffer
->buffer_size
;
1215 desc
[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
1216 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y
) |
1217 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z
) |
1218 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W
) |
1219 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT
) |
1220 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32
);
1222 pipe_resource_reference(&buffers
->buffers
[slot
], &buf
->b
.b
);
1223 radeon_add_to_buffer_list_check_mem(&sctx
->b
, &sctx
->b
.gfx
, buf
,
1224 buffers
->shader_usage
,
1225 buffers
->priority
, true);
1226 buf
->bind_history
|= PIPE_BIND_SHADER_BUFFER
;
1228 buffers
->enabled_mask
|= 1u << slot
;
1229 sctx
->descriptors_dirty
|=
1230 1u << si_const_and_shader_buffer_descriptors_idx(shader
);
1232 util_range_add(&buf
->valid_buffer_range
, sbuffer
->buffer_offset
,
1233 sbuffer
->buffer_offset
+ sbuffer
->buffer_size
);
1237 void si_get_shader_buffers(struct si_context
*sctx
,
1238 enum pipe_shader_type shader
,
1239 uint start_slot
, uint count
,
1240 struct pipe_shader_buffer
*sbuf
)
1242 struct si_buffer_resources
*buffers
= &sctx
->const_and_shader_buffers
[shader
];
1243 struct si_descriptors
*descs
= si_const_and_shader_buffer_descriptors(sctx
, shader
);
1245 for (unsigned i
= 0; i
< count
; ++i
) {
1246 si_get_buffer_from_descriptors(
1248 si_get_shaderbuf_slot(start_slot
+ i
),
1249 &sbuf
[i
].buffer
, &sbuf
[i
].buffer_offset
,
1250 &sbuf
[i
].buffer_size
);
1256 void si_set_ring_buffer(struct pipe_context
*ctx
, uint slot
,
1257 struct pipe_resource
*buffer
,
1258 unsigned stride
, unsigned num_records
,
1259 bool add_tid
, bool swizzle
,
1260 unsigned element_size
, unsigned index_stride
, uint64_t offset
)
1262 struct si_context
*sctx
= (struct si_context
*)ctx
;
1263 struct si_buffer_resources
*buffers
= &sctx
->rw_buffers
;
1264 struct si_descriptors
*descs
= &sctx
->descriptors
[SI_DESCS_RW_BUFFERS
];
1266 /* The stride field in the resource descriptor has 14 bits */
1267 assert(stride
< (1 << 14));
1269 assert(slot
< descs
->num_elements
);
1270 pipe_resource_reference(&buffers
->buffers
[slot
], NULL
);
1275 va
= r600_resource(buffer
)->gpu_address
+ offset
;
1277 switch (element_size
) {
1279 assert(!"Unsupported ring buffer element size");
1295 switch (index_stride
) {
1297 assert(!"Unsupported ring buffer index stride");
1313 if (sctx
->b
.chip_class
>= VI
&& stride
)
1314 num_records
*= stride
;
1316 /* Set the descriptor. */
1317 uint32_t *desc
= descs
->list
+ slot
*4;
1319 desc
[1] = S_008F04_BASE_ADDRESS_HI(va
>> 32) |
1320 S_008F04_STRIDE(stride
) |
1321 S_008F04_SWIZZLE_ENABLE(swizzle
);
1322 desc
[2] = num_records
;
1323 desc
[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
1324 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y
) |
1325 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z
) |
1326 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W
) |
1327 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT
) |
1328 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32
) |
1329 S_008F0C_INDEX_STRIDE(index_stride
) |
1330 S_008F0C_ADD_TID_ENABLE(add_tid
);
1332 if (sctx
->b
.chip_class
>= GFX9
)
1333 assert(!swizzle
|| element_size
== 1); /* always 4 bytes on GFX9 */
1335 desc
[3] |= S_008F0C_ELEMENT_SIZE(element_size
);
1337 pipe_resource_reference(&buffers
->buffers
[slot
], buffer
);
1338 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
1339 (struct r600_resource
*)buffer
,
1340 buffers
->shader_usage
, buffers
->priority
);
1341 buffers
->enabled_mask
|= 1u << slot
;
1343 /* Clear the descriptor. */
1344 memset(descs
->list
+ slot
*4, 0, sizeof(uint32_t) * 4);
1345 buffers
->enabled_mask
&= ~(1u << slot
);
1348 sctx
->descriptors_dirty
|= 1u << SI_DESCS_RW_BUFFERS
;
1351 /* STREAMOUT BUFFERS */
1353 static void si_set_streamout_targets(struct pipe_context
*ctx
,
1354 unsigned num_targets
,
1355 struct pipe_stream_output_target
**targets
,
1356 const unsigned *offsets
)
1358 struct si_context
*sctx
= (struct si_context
*)ctx
;
1359 struct si_buffer_resources
*buffers
= &sctx
->rw_buffers
;
1360 struct si_descriptors
*descs
= &sctx
->descriptors
[SI_DESCS_RW_BUFFERS
];
1361 unsigned old_num_targets
= sctx
->b
.streamout
.num_targets
;
1364 /* We are going to unbind the buffers. Mark which caches need to be flushed. */
1365 if (sctx
->b
.streamout
.num_targets
&& sctx
->b
.streamout
.begin_emitted
) {
1366 /* Since streamout uses vector writes which go through TC L2
1367 * and most other clients can use TC L2 as well, we don't need
1370 * The only cases which requires flushing it is VGT DMA index
1371 * fetching (on <= CIK) and indirect draw data, which are rare
1372 * cases. Thus, flag the TC L2 dirtiness in the resource and
1373 * handle it at draw call time.
1375 for (i
= 0; i
< sctx
->b
.streamout
.num_targets
; i
++)
1376 if (sctx
->b
.streamout
.targets
[i
])
1377 r600_resource(sctx
->b
.streamout
.targets
[i
]->b
.buffer
)->TC_L2_dirty
= true;
1379 /* Invalidate the scalar cache in case a streamout buffer is
1380 * going to be used as a constant buffer.
1382 * Invalidate TC L1, because streamout bypasses it (done by
1383 * setting GLC=1 in the store instruction), but it can contain
1384 * outdated data of streamout buffers.
1386 * VS_PARTIAL_FLUSH is required if the buffers are going to be
1387 * used as an input immediately.
1389 sctx
->b
.flags
|= SI_CONTEXT_INV_SMEM_L1
|
1390 SI_CONTEXT_INV_VMEM_L1
|
1391 SI_CONTEXT_VS_PARTIAL_FLUSH
;
1394 /* All readers of the streamout targets need to be finished before we can
1395 * start writing to the targets.
1398 sctx
->b
.flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
|
1399 SI_CONTEXT_CS_PARTIAL_FLUSH
;
1401 /* Streamout buffers must be bound in 2 places:
1402 * 1) in VGT by setting the VGT_STRMOUT registers
1403 * 2) as shader resources
1406 /* Set the VGT regs. */
1407 r600_set_streamout_targets(ctx
, num_targets
, targets
, offsets
);
1409 /* Set the shader resources.*/
1410 for (i
= 0; i
< num_targets
; i
++) {
1411 bufidx
= SI_VS_STREAMOUT_BUF0
+ i
;
1414 struct pipe_resource
*buffer
= targets
[i
]->buffer
;
1415 uint64_t va
= r600_resource(buffer
)->gpu_address
;
1417 /* Set the descriptor.
1419 * On VI, the format must be non-INVALID, otherwise
1420 * the buffer will be considered not bound and store
1421 * instructions will be no-ops.
1423 uint32_t *desc
= descs
->list
+ bufidx
*4;
1425 desc
[1] = S_008F04_BASE_ADDRESS_HI(va
>> 32);
1426 desc
[2] = 0xffffffff;
1427 desc
[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
1428 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y
) |
1429 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z
) |
1430 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W
) |
1431 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32
);
1433 /* Set the resource. */
1434 pipe_resource_reference(&buffers
->buffers
[bufidx
],
1436 radeon_add_to_buffer_list_check_mem(&sctx
->b
, &sctx
->b
.gfx
,
1437 (struct r600_resource
*)buffer
,
1438 buffers
->shader_usage
,
1439 RADEON_PRIO_SHADER_RW_BUFFER
,
1441 r600_resource(buffer
)->bind_history
|= PIPE_BIND_STREAM_OUTPUT
;
1443 buffers
->enabled_mask
|= 1u << bufidx
;
1445 /* Clear the descriptor and unset the resource. */
1446 memset(descs
->list
+ bufidx
*4, 0,
1447 sizeof(uint32_t) * 4);
1448 pipe_resource_reference(&buffers
->buffers
[bufidx
],
1450 buffers
->enabled_mask
&= ~(1u << bufidx
);
1453 for (; i
< old_num_targets
; i
++) {
1454 bufidx
= SI_VS_STREAMOUT_BUF0
+ i
;
1455 /* Clear the descriptor and unset the resource. */
1456 memset(descs
->list
+ bufidx
*4, 0, sizeof(uint32_t) * 4);
1457 pipe_resource_reference(&buffers
->buffers
[bufidx
], NULL
);
1458 buffers
->enabled_mask
&= ~(1u << bufidx
);
1461 sctx
->descriptors_dirty
|= 1u << SI_DESCS_RW_BUFFERS
;
1464 static void si_desc_reset_buffer_offset(struct pipe_context
*ctx
,
1465 uint32_t *desc
, uint64_t old_buf_va
,
1466 struct pipe_resource
*new_buf
)
1468 /* Retrieve the buffer offset from the descriptor. */
1469 uint64_t old_desc_va
=
1470 desc
[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc
[1]) << 32);
1472 assert(old_buf_va
<= old_desc_va
);
1473 uint64_t offset_within_buffer
= old_desc_va
- old_buf_va
;
1475 /* Update the descriptor. */
1476 si_set_buf_desc_address(r600_resource(new_buf
), offset_within_buffer
,
1480 /* INTERNAL CONST BUFFERS */
1482 static void si_set_polygon_stipple(struct pipe_context
*ctx
,
1483 const struct pipe_poly_stipple
*state
)
1485 struct si_context
*sctx
= (struct si_context
*)ctx
;
1486 struct pipe_constant_buffer cb
= {};
1487 unsigned stipple
[32];
1490 for (i
= 0; i
< 32; i
++)
1491 stipple
[i
] = util_bitreverse(state
->stipple
[i
]);
1493 cb
.user_buffer
= stipple
;
1494 cb
.buffer_size
= sizeof(stipple
);
1496 si_set_rw_buffer(sctx
, SI_PS_CONST_POLY_STIPPLE
, &cb
);
1499 /* TEXTURE METADATA ENABLE/DISABLE */
1502 si_resident_handles_update_needs_color_decompress(struct si_context
*sctx
)
1504 util_dynarray_clear(&sctx
->resident_tex_needs_color_decompress
);
1505 util_dynarray_clear(&sctx
->resident_img_needs_color_decompress
);
1507 util_dynarray_foreach(&sctx
->resident_tex_handles
,
1508 struct si_texture_handle
*, tex_handle
) {
1509 struct pipe_resource
*res
= (*tex_handle
)->view
->texture
;
1510 struct r600_texture
*rtex
;
1512 if (!res
|| res
->target
== PIPE_BUFFER
)
1515 rtex
= (struct r600_texture
*)res
;
1516 if (!color_needs_decompression(rtex
))
1519 util_dynarray_append(&sctx
->resident_tex_needs_color_decompress
,
1520 struct si_texture_handle
*, *tex_handle
);
1523 util_dynarray_foreach(&sctx
->resident_img_handles
,
1524 struct si_image_handle
*, img_handle
) {
1525 struct pipe_image_view
*view
= &(*img_handle
)->view
;
1526 struct pipe_resource
*res
= view
->resource
;
1527 struct r600_texture
*rtex
;
1529 if (!res
|| res
->target
== PIPE_BUFFER
)
1532 rtex
= (struct r600_texture
*)res
;
1533 if (!color_needs_decompression(rtex
))
1536 util_dynarray_append(&sctx
->resident_img_needs_color_decompress
,
1537 struct si_image_handle
*, *img_handle
);
1541 /* CMASK can be enabled (for fast clear) and disabled (for texture export)
1542 * while the texture is bound, possibly by a different context. In that case,
1543 * call this function to update needs_*_decompress_masks.
1545 void si_update_needs_color_decompress_masks(struct si_context
*sctx
)
1547 for (int i
= 0; i
< SI_NUM_SHADERS
; ++i
) {
1548 si_samplers_update_needs_color_decompress_mask(&sctx
->samplers
[i
]);
1549 si_images_update_needs_color_decompress_mask(&sctx
->images
[i
]);
1550 si_update_shader_needs_decompress_mask(sctx
, i
);
1553 si_resident_handles_update_needs_color_decompress(sctx
);
1556 /* BUFFER DISCARD/INVALIDATION */
1558 /** Reset descriptors of buffer resources after \p buf has been invalidated. */
1559 static void si_reset_buffer_resources(struct si_context
*sctx
,
1560 struct si_buffer_resources
*buffers
,
1561 unsigned descriptors_idx
,
1563 struct pipe_resource
*buf
,
1565 enum radeon_bo_usage usage
,
1566 enum radeon_bo_priority priority
)
1568 struct si_descriptors
*descs
= &sctx
->descriptors
[descriptors_idx
];
1569 unsigned mask
= buffers
->enabled_mask
& slot_mask
;
1572 unsigned i
= u_bit_scan(&mask
);
1573 if (buffers
->buffers
[i
] == buf
) {
1574 si_desc_reset_buffer_offset(&sctx
->b
.b
,
1577 sctx
->descriptors_dirty
|= 1u << descriptors_idx
;
1579 radeon_add_to_buffer_list_check_mem(&sctx
->b
, &sctx
->b
.gfx
,
1580 (struct r600_resource
*)buf
,
1581 usage
, priority
, true);
1586 static void si_rebind_buffer(struct pipe_context
*ctx
, struct pipe_resource
*buf
,
1589 struct si_context
*sctx
= (struct si_context
*)ctx
;
1590 struct r600_resource
*rbuffer
= r600_resource(buf
);
1592 unsigned num_elems
= sctx
->vertex_elements
?
1593 sctx
->vertex_elements
->count
: 0;
1595 /* We changed the buffer, now we need to bind it where the old one
1596 * was bound. This consists of 2 things:
1597 * 1) Updating the resource descriptor and dirtying it.
1598 * 2) Adding a relocation to the CS, so that it's usable.
1601 /* Vertex buffers. */
1602 if (rbuffer
->bind_history
& PIPE_BIND_VERTEX_BUFFER
) {
1603 for (i
= 0; i
< num_elems
; i
++) {
1604 int vb
= sctx
->vertex_elements
->vertex_buffer_index
[i
];
1606 if (vb
>= ARRAY_SIZE(sctx
->vertex_buffer
))
1608 if (!sctx
->vertex_buffer
[vb
].buffer
.resource
)
1611 if (sctx
->vertex_buffer
[vb
].buffer
.resource
== buf
) {
1612 sctx
->vertex_buffers_dirty
= true;
1618 /* Streamout buffers. (other internal buffers can't be invalidated) */
1619 if (rbuffer
->bind_history
& PIPE_BIND_STREAM_OUTPUT
) {
1620 for (i
= SI_VS_STREAMOUT_BUF0
; i
<= SI_VS_STREAMOUT_BUF3
; i
++) {
1621 struct si_buffer_resources
*buffers
= &sctx
->rw_buffers
;
1622 struct si_descriptors
*descs
=
1623 &sctx
->descriptors
[SI_DESCS_RW_BUFFERS
];
1625 if (buffers
->buffers
[i
] != buf
)
1628 si_desc_reset_buffer_offset(ctx
, descs
->list
+ i
*4,
1630 sctx
->descriptors_dirty
|= 1u << SI_DESCS_RW_BUFFERS
;
1632 radeon_add_to_buffer_list_check_mem(&sctx
->b
, &sctx
->b
.gfx
,
1633 rbuffer
, buffers
->shader_usage
,
1634 RADEON_PRIO_SHADER_RW_BUFFER
,
1637 /* Update the streamout state. */
1638 if (sctx
->b
.streamout
.begin_emitted
)
1639 r600_emit_streamout_end(&sctx
->b
);
1640 sctx
->b
.streamout
.append_bitmask
=
1641 sctx
->b
.streamout
.enabled_mask
;
1642 r600_streamout_buffers_dirty(&sctx
->b
);
1646 /* Constant and shader buffers. */
1647 if (rbuffer
->bind_history
& PIPE_BIND_CONSTANT_BUFFER
) {
1648 for (shader
= 0; shader
< SI_NUM_SHADERS
; shader
++)
1649 si_reset_buffer_resources(sctx
, &sctx
->const_and_shader_buffers
[shader
],
1650 si_const_and_shader_buffer_descriptors_idx(shader
),
1651 u_bit_consecutive(SI_NUM_SHADER_BUFFERS
, SI_NUM_CONST_BUFFERS
),
1653 sctx
->const_and_shader_buffers
[shader
].shader_usage_constbuf
,
1654 sctx
->const_and_shader_buffers
[shader
].priority_constbuf
);
1657 if (rbuffer
->bind_history
& PIPE_BIND_SHADER_BUFFER
) {
1658 for (shader
= 0; shader
< SI_NUM_SHADERS
; shader
++)
1659 si_reset_buffer_resources(sctx
, &sctx
->const_and_shader_buffers
[shader
],
1660 si_const_and_shader_buffer_descriptors_idx(shader
),
1661 u_bit_consecutive(0, SI_NUM_SHADER_BUFFERS
),
1663 sctx
->const_and_shader_buffers
[shader
].shader_usage
,
1664 sctx
->const_and_shader_buffers
[shader
].priority
);
1667 if (rbuffer
->bind_history
& PIPE_BIND_SAMPLER_VIEW
) {
1668 /* Texture buffers - update bindings. */
1669 for (shader
= 0; shader
< SI_NUM_SHADERS
; shader
++) {
1670 struct si_sampler_views
*views
= &sctx
->samplers
[shader
].views
;
1671 struct si_descriptors
*descs
=
1672 si_sampler_and_image_descriptors(sctx
, shader
);
1673 unsigned mask
= views
->enabled_mask
;
1676 unsigned i
= u_bit_scan(&mask
);
1677 if (views
->views
[i
]->texture
== buf
) {
1678 unsigned desc_slot
= si_get_sampler_slot(i
);
1680 si_desc_reset_buffer_offset(ctx
,
1684 sctx
->descriptors_dirty
|=
1685 1u << si_sampler_and_image_descriptors_idx(shader
);
1687 radeon_add_to_buffer_list_check_mem(&sctx
->b
, &sctx
->b
.gfx
,
1688 rbuffer
, RADEON_USAGE_READ
,
1689 RADEON_PRIO_SAMPLER_BUFFER
,
1697 if (rbuffer
->bind_history
& PIPE_BIND_SHADER_IMAGE
) {
1698 for (shader
= 0; shader
< SI_NUM_SHADERS
; ++shader
) {
1699 struct si_images_info
*images
= &sctx
->images
[shader
];
1700 struct si_descriptors
*descs
=
1701 si_sampler_and_image_descriptors(sctx
, shader
);
1702 unsigned mask
= images
->enabled_mask
;
1705 unsigned i
= u_bit_scan(&mask
);
1707 if (images
->views
[i
].resource
== buf
) {
1708 unsigned desc_slot
= si_get_image_slot(i
);
1710 if (images
->views
[i
].access
& PIPE_IMAGE_ACCESS_WRITE
)
1711 si_mark_image_range_valid(&images
->views
[i
]);
1713 si_desc_reset_buffer_offset(
1714 ctx
, descs
->list
+ desc_slot
* 8 + 4,
1716 sctx
->descriptors_dirty
|=
1717 1u << si_sampler_and_image_descriptors_idx(shader
);
1719 radeon_add_to_buffer_list_check_mem(
1720 &sctx
->b
, &sctx
->b
.gfx
, rbuffer
,
1721 RADEON_USAGE_READWRITE
,
1722 RADEON_PRIO_SAMPLER_BUFFER
, true);
1728 /* Bindless texture handles */
1729 if (rbuffer
->texture_handle_allocated
) {
1730 struct si_descriptors
*descs
= &sctx
->bindless_descriptors
;
1732 util_dynarray_foreach(&sctx
->resident_tex_handles
,
1733 struct si_texture_handle
*, tex_handle
) {
1734 struct pipe_sampler_view
*view
= (*tex_handle
)->view
;
1735 unsigned desc_slot
= (*tex_handle
)->desc_slot
;
1737 if (view
->texture
== buf
) {
1738 si_set_buf_desc_address(rbuffer
,
1741 desc_slot
* 16 + 4);
1743 (*tex_handle
)->desc_dirty
= true;
1744 sctx
->bindless_descriptors_dirty
= true;
1746 radeon_add_to_buffer_list_check_mem(
1747 &sctx
->b
, &sctx
->b
.gfx
, rbuffer
,
1749 RADEON_PRIO_SAMPLER_BUFFER
, true);
1754 /* Bindless image handles */
1755 if (rbuffer
->image_handle_allocated
) {
1756 struct si_descriptors
*descs
= &sctx
->bindless_descriptors
;
1758 util_dynarray_foreach(&sctx
->resident_img_handles
,
1759 struct si_image_handle
*, img_handle
) {
1760 struct pipe_image_view
*view
= &(*img_handle
)->view
;
1761 unsigned desc_slot
= (*img_handle
)->desc_slot
;
1763 if (view
->resource
== buf
) {
1764 if (view
->access
& PIPE_IMAGE_ACCESS_WRITE
)
1765 si_mark_image_range_valid(view
);
1767 si_set_buf_desc_address(rbuffer
,
1770 desc_slot
* 16 + 4);
1772 (*img_handle
)->desc_dirty
= true;
1773 sctx
->bindless_descriptors_dirty
= true;
1775 radeon_add_to_buffer_list_check_mem(
1776 &sctx
->b
, &sctx
->b
.gfx
, rbuffer
,
1777 RADEON_USAGE_READWRITE
,
1778 RADEON_PRIO_SAMPLER_BUFFER
, true);
1784 /* Reallocate a buffer a update all resource bindings where the buffer is
1787 * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
1788 * idle by discarding its contents. Apps usually tell us when to do this using
1789 * map_buffer flags, for example.
1791 static void si_invalidate_buffer(struct pipe_context
*ctx
, struct pipe_resource
*buf
)
1793 struct si_context
*sctx
= (struct si_context
*)ctx
;
1794 struct r600_resource
*rbuffer
= r600_resource(buf
);
1795 uint64_t old_va
= rbuffer
->gpu_address
;
1797 /* Reallocate the buffer in the same pipe_resource. */
1798 r600_alloc_resource(&sctx
->screen
->b
, rbuffer
);
1800 si_rebind_buffer(ctx
, buf
, old_va
);
1803 static void si_upload_bindless_descriptor(struct si_context
*sctx
,
1805 unsigned num_dwords
)
1807 struct si_descriptors
*desc
= &sctx
->bindless_descriptors
;
1808 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
1809 unsigned desc_slot_offset
= desc_slot
* 16;
1813 data
= desc
->list
+ desc_slot_offset
;
1815 va
= desc
->buffer
->gpu_address
+ desc
->buffer_offset
+
1816 desc_slot_offset
* 4;
1818 radeon_emit(cs
, PKT3(PKT3_WRITE_DATA
, 2 + num_dwords
, 0));
1819 radeon_emit(cs
, S_370_DST_SEL(V_370_TC_L2
) |
1820 S_370_WR_CONFIRM(1) |
1821 S_370_ENGINE_SEL(V_370_ME
));
1822 radeon_emit(cs
, va
);
1823 radeon_emit(cs
, va
>> 32);
1824 radeon_emit_array(cs
, data
, num_dwords
);
1827 static void si_upload_bindless_descriptors(struct si_context
*sctx
)
1829 if (!sctx
->bindless_descriptors_dirty
)
1832 /* Wait for graphics/compute to be idle before updating the resident
1833 * descriptors directly in memory, in case the GPU is using them.
1835 sctx
->b
.flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
|
1836 SI_CONTEXT_CS_PARTIAL_FLUSH
;
1837 si_emit_cache_flush(sctx
);
1839 util_dynarray_foreach(&sctx
->resident_tex_handles
,
1840 struct si_texture_handle
*, tex_handle
) {
1841 unsigned desc_slot
= (*tex_handle
)->desc_slot
;
1843 if (!(*tex_handle
)->desc_dirty
)
1846 si_upload_bindless_descriptor(sctx
, desc_slot
, 16);
1847 (*tex_handle
)->desc_dirty
= false;
1850 util_dynarray_foreach(&sctx
->resident_img_handles
,
1851 struct si_image_handle
*, img_handle
) {
1852 unsigned desc_slot
= (*img_handle
)->desc_slot
;
1854 if (!(*img_handle
)->desc_dirty
)
1857 si_upload_bindless_descriptor(sctx
, desc_slot
, 8);
1858 (*img_handle
)->desc_dirty
= false;
1861 /* Invalidate L1 because it doesn't know that L2 changed. */
1862 sctx
->b
.flags
|= SI_CONTEXT_INV_SMEM_L1
;
1863 si_emit_cache_flush(sctx
);
1865 sctx
->bindless_descriptors_dirty
= false;
1868 /* Update mutable image descriptor fields of all resident textures. */
1869 static void si_update_bindless_texture_descriptor(struct si_context
*sctx
,
1870 struct si_texture_handle
*tex_handle
)
1872 struct si_sampler_view
*sview
= (struct si_sampler_view
*)tex_handle
->view
;
1873 struct si_descriptors
*desc
= &sctx
->bindless_descriptors
;
1874 unsigned desc_slot_offset
= tex_handle
->desc_slot
* 16;
1875 uint32_t desc_list
[16];
1877 if (sview
->base
.texture
->target
== PIPE_BUFFER
)
1880 memcpy(desc_list
, desc
->list
+ desc_slot_offset
, sizeof(desc_list
));
1881 si_set_sampler_view_desc(sctx
, sview
, &tex_handle
->sstate
,
1882 desc
->list
+ desc_slot_offset
);
1884 if (memcmp(desc_list
, desc
->list
+ desc_slot_offset
,
1885 sizeof(desc_list
))) {
1886 tex_handle
->desc_dirty
= true;
1887 sctx
->bindless_descriptors_dirty
= true;
1891 static void si_update_bindless_image_descriptor(struct si_context
*sctx
,
1892 struct si_image_handle
*img_handle
)
1894 struct si_descriptors
*desc
= &sctx
->bindless_descriptors
;
1895 unsigned desc_slot_offset
= img_handle
->desc_slot
* 16;
1896 struct pipe_image_view
*view
= &img_handle
->view
;
1897 uint32_t desc_list
[8];
1899 if (view
->resource
->target
== PIPE_BUFFER
)
1902 memcpy(desc_list
, desc
->list
+ desc_slot_offset
,
1904 si_set_shader_image_desc(sctx
, view
, true,
1905 desc
->list
+ desc_slot_offset
);
1907 if (memcmp(desc_list
, desc
->list
+ desc_slot_offset
,
1908 sizeof(desc_list
))) {
1909 img_handle
->desc_dirty
= true;
1910 sctx
->bindless_descriptors_dirty
= true;
1914 static void si_update_all_resident_texture_descriptors(struct si_context
*sctx
)
1916 util_dynarray_foreach(&sctx
->resident_tex_handles
,
1917 struct si_texture_handle
*, tex_handle
) {
1918 si_update_bindless_texture_descriptor(sctx
, *tex_handle
);
1921 util_dynarray_foreach(&sctx
->resident_img_handles
,
1922 struct si_image_handle
*, img_handle
) {
1923 si_update_bindless_image_descriptor(sctx
, *img_handle
);
1926 si_upload_bindless_descriptors(sctx
);
1929 /* Update mutable image descriptor fields of all bound textures. */
1930 void si_update_all_texture_descriptors(struct si_context
*sctx
)
1934 for (shader
= 0; shader
< SI_NUM_SHADERS
; shader
++) {
1935 struct si_sampler_views
*samplers
= &sctx
->samplers
[shader
].views
;
1936 struct si_images_info
*images
= &sctx
->images
[shader
];
1940 mask
= images
->enabled_mask
;
1942 unsigned i
= u_bit_scan(&mask
);
1943 struct pipe_image_view
*view
= &images
->views
[i
];
1945 if (!view
->resource
||
1946 view
->resource
->target
== PIPE_BUFFER
)
1949 si_set_shader_image(sctx
, shader
, i
, view
, true);
1952 /* Sampler views. */
1953 mask
= samplers
->enabled_mask
;
1955 unsigned i
= u_bit_scan(&mask
);
1956 struct pipe_sampler_view
*view
= samplers
->views
[i
];
1960 view
->texture
->target
== PIPE_BUFFER
)
1963 si_set_sampler_view(sctx
, shader
, i
,
1964 samplers
->views
[i
], true);
1967 si_update_shader_needs_decompress_mask(sctx
, shader
);
1970 si_update_all_resident_texture_descriptors(sctx
);
1973 /* SHADER USER DATA */
1975 static void si_mark_shader_pointers_dirty(struct si_context
*sctx
,
1978 sctx
->shader_pointers_dirty
|=
1979 u_bit_consecutive(SI_DESCS_FIRST_SHADER
+ shader
* SI_NUM_SHADER_DESCS
,
1980 SI_NUM_SHADER_DESCS
);
1982 if (shader
== PIPE_SHADER_VERTEX
)
1983 sctx
->vertex_buffer_pointer_dirty
= sctx
->vertex_buffers
.buffer
!= NULL
;
1985 si_mark_atom_dirty(sctx
, &sctx
->shader_pointers
.atom
);
1988 static void si_shader_pointers_begin_new_cs(struct si_context
*sctx
)
1990 sctx
->shader_pointers_dirty
= u_bit_consecutive(0, SI_NUM_DESCS
);
1991 sctx
->vertex_buffer_pointer_dirty
= sctx
->vertex_buffers
.buffer
!= NULL
;
1992 si_mark_atom_dirty(sctx
, &sctx
->shader_pointers
.atom
);
1993 sctx
->graphics_bindless_pointer_dirty
= sctx
->bindless_descriptors
.buffer
!= NULL
;
1994 sctx
->compute_bindless_pointer_dirty
= sctx
->bindless_descriptors
.buffer
!= NULL
;
1997 /* Set a base register address for user data constants in the given shader.
1998 * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
2000 static void si_set_user_data_base(struct si_context
*sctx
,
2001 unsigned shader
, uint32_t new_base
)
2003 uint32_t *base
= &sctx
->shader_pointers
.sh_base
[shader
];
2005 if (*base
!= new_base
) {
2009 si_mark_shader_pointers_dirty(sctx
, shader
);
2011 if (shader
== PIPE_SHADER_VERTEX
)
2012 sctx
->last_vs_state
= ~0;
2017 /* This must be called when these shaders are changed from non-NULL to NULL
2020 * - tessellation control shader
2021 * - tessellation evaluation shader
2023 void si_shader_change_notify(struct si_context
*sctx
)
2025 /* VS can be bound as VS, ES, or LS. */
2026 if (sctx
->tes_shader
.cso
) {
2027 if (sctx
->b
.chip_class
>= GFX9
) {
2028 si_set_user_data_base(sctx
, PIPE_SHADER_VERTEX
,
2029 R_00B430_SPI_SHADER_USER_DATA_LS_0
);
2031 si_set_user_data_base(sctx
, PIPE_SHADER_VERTEX
,
2032 R_00B530_SPI_SHADER_USER_DATA_LS_0
);
2034 } else if (sctx
->gs_shader
.cso
) {
2035 si_set_user_data_base(sctx
, PIPE_SHADER_VERTEX
,
2036 R_00B330_SPI_SHADER_USER_DATA_ES_0
);
2038 si_set_user_data_base(sctx
, PIPE_SHADER_VERTEX
,
2039 R_00B130_SPI_SHADER_USER_DATA_VS_0
);
2042 /* TES can be bound as ES, VS, or not bound. */
2043 if (sctx
->tes_shader
.cso
) {
2044 if (sctx
->gs_shader
.cso
)
2045 si_set_user_data_base(sctx
, PIPE_SHADER_TESS_EVAL
,
2046 R_00B330_SPI_SHADER_USER_DATA_ES_0
);
2048 si_set_user_data_base(sctx
, PIPE_SHADER_TESS_EVAL
,
2049 R_00B130_SPI_SHADER_USER_DATA_VS_0
);
2051 si_set_user_data_base(sctx
, PIPE_SHADER_TESS_EVAL
, 0);
2055 static void si_emit_shader_pointer(struct si_context
*sctx
,
2056 struct si_descriptors
*desc
,
2059 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
2063 return; /* the pointer is not used by current shaders */
2065 va
= desc
->buffer
->gpu_address
+
2066 desc
->buffer_offset
;
2068 radeon_emit(cs
, PKT3(PKT3_SET_SH_REG
, 2, 0));
2069 radeon_emit(cs
, (sh_base
+ desc
->shader_userdata_offset
- SI_SH_REG_OFFSET
) >> 2);
2070 radeon_emit(cs
, va
);
2071 radeon_emit(cs
, va
>> 32);
2074 static void si_emit_global_shader_pointers(struct si_context
*sctx
,
2075 struct si_descriptors
*descs
)
2077 si_emit_shader_pointer(sctx
, descs
,
2078 R_00B030_SPI_SHADER_USER_DATA_PS_0
);
2079 si_emit_shader_pointer(sctx
, descs
,
2080 R_00B130_SPI_SHADER_USER_DATA_VS_0
);
2082 if (sctx
->b
.chip_class
>= GFX9
) {
2083 /* GFX9 merged LS-HS and ES-GS. */
2084 if (descs
== &sctx
->descriptors
[SI_DESCS_RW_BUFFERS
]) {
2085 /* Set RW_BUFFERS in the special registers, so that
2086 * it's preloaded into s[0:1] instead of s[8:9].
2088 si_emit_shader_pointer(sctx
, descs
,
2089 R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS
);
2090 si_emit_shader_pointer(sctx
, descs
,
2091 R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS
);
2093 /* Set BINDLESS_SAMPLERS_AND_IMAGES into s[10:11],
2094 * s[8:9] remains unused for now.
2096 assert(descs
== &sctx
->bindless_descriptors
);
2097 si_emit_shader_pointer(sctx
, descs
,
2098 R_00B330_SPI_SHADER_USER_DATA_ES_0
);
2099 si_emit_shader_pointer(sctx
, descs
,
2100 R_00B430_SPI_SHADER_USER_DATA_LS_0
);
2103 si_emit_shader_pointer(sctx
, descs
,
2104 R_00B230_SPI_SHADER_USER_DATA_GS_0
);
2105 si_emit_shader_pointer(sctx
, descs
,
2106 R_00B330_SPI_SHADER_USER_DATA_ES_0
);
2107 si_emit_shader_pointer(sctx
, descs
,
2108 R_00B430_SPI_SHADER_USER_DATA_HS_0
);
2109 si_emit_shader_pointer(sctx
, descs
,
2110 R_00B530_SPI_SHADER_USER_DATA_LS_0
);
2114 void si_emit_graphics_shader_pointers(struct si_context
*sctx
,
2115 struct r600_atom
*atom
)
2118 uint32_t *sh_base
= sctx
->shader_pointers
.sh_base
;
2119 struct si_descriptors
*descs
;
2121 descs
= &sctx
->descriptors
[SI_DESCS_RW_BUFFERS
];
2123 if (sctx
->shader_pointers_dirty
& (1 << SI_DESCS_RW_BUFFERS
))
2124 si_emit_global_shader_pointers(sctx
, descs
);
2126 mask
= sctx
->shader_pointers_dirty
&
2127 u_bit_consecutive(SI_DESCS_FIRST_SHADER
,
2128 SI_DESCS_FIRST_COMPUTE
- SI_DESCS_FIRST_SHADER
);
2131 unsigned i
= u_bit_scan(&mask
);
2132 unsigned shader
= (i
- SI_DESCS_FIRST_SHADER
) / SI_NUM_SHADER_DESCS
;
2133 unsigned base
= sh_base
[shader
];
2136 si_emit_shader_pointer(sctx
, descs
+ i
, base
);
2138 sctx
->shader_pointers_dirty
&=
2139 ~u_bit_consecutive(SI_DESCS_RW_BUFFERS
, SI_DESCS_FIRST_COMPUTE
);
2141 if (sctx
->vertex_buffer_pointer_dirty
) {
2142 si_emit_shader_pointer(sctx
, &sctx
->vertex_buffers
,
2143 sh_base
[PIPE_SHADER_VERTEX
]);
2144 sctx
->vertex_buffer_pointer_dirty
= false;
2147 if (sctx
->graphics_bindless_pointer_dirty
) {
2148 si_emit_global_shader_pointers(sctx
,
2149 &sctx
->bindless_descriptors
);
2150 sctx
->graphics_bindless_pointer_dirty
= false;
2154 void si_emit_compute_shader_pointers(struct si_context
*sctx
)
2156 unsigned base
= R_00B900_COMPUTE_USER_DATA_0
;
2157 struct si_descriptors
*descs
= sctx
->descriptors
;
2158 unsigned compute_mask
=
2159 u_bit_consecutive(SI_DESCS_FIRST_COMPUTE
, SI_NUM_SHADER_DESCS
);
2160 unsigned mask
= sctx
->shader_pointers_dirty
& compute_mask
;
2163 unsigned i
= u_bit_scan(&mask
);
2165 si_emit_shader_pointer(sctx
, descs
+ i
, base
);
2167 sctx
->shader_pointers_dirty
&= ~compute_mask
;
2169 if (sctx
->compute_bindless_pointer_dirty
) {
2170 si_emit_shader_pointer(sctx
, &sctx
->bindless_descriptors
, base
);
2171 sctx
->compute_bindless_pointer_dirty
= false;
2177 static void si_init_bindless_descriptors(struct si_context
*sctx
,
2178 struct si_descriptors
*desc
,
2179 unsigned shader_userdata_index
,
2180 unsigned num_elements
)
2182 MAYBE_UNUSED
unsigned desc_slot
;
2184 si_init_descriptors(desc
, shader_userdata_index
, 16, num_elements
);
2185 sctx
->bindless_descriptors
.num_active_slots
= num_elements
;
2187 /* The first bindless descriptor is stored at slot 1, because 0 is not
2188 * considered to be a valid handle.
2190 sctx
->num_bindless_descriptors
= 1;
2192 /* Track which bindless slots are used (or not). */
2193 util_idalloc_init(&sctx
->bindless_used_slots
);
2194 util_idalloc_resize(&sctx
->bindless_used_slots
, num_elements
);
2196 /* Reserve slot 0 because it's an invalid handle for bindless. */
2197 desc_slot
= util_idalloc_alloc(&sctx
->bindless_used_slots
);
2198 assert(desc_slot
!= 0);
2201 static void si_release_bindless_descriptors(struct si_context
*sctx
)
2203 si_release_descriptors(&sctx
->bindless_descriptors
);
2204 util_idalloc_fini(&sctx
->bindless_used_slots
);
2207 static unsigned si_get_first_free_bindless_slot(struct si_context
*sctx
)
2209 struct si_descriptors
*desc
= &sctx
->bindless_descriptors
;
2212 desc_slot
= util_idalloc_alloc(&sctx
->bindless_used_slots
);
2213 if (desc_slot
>= desc
->num_elements
) {
2214 /* The array of bindless descriptors is full, resize it. */
2215 unsigned slot_size
= desc
->element_dw_size
* 4;
2216 unsigned new_num_elements
= desc
->num_elements
* 2;
2218 desc
->list
= REALLOC(desc
->list
, desc
->num_elements
* slot_size
,
2219 new_num_elements
* slot_size
);
2220 desc
->num_elements
= new_num_elements
;
2221 desc
->num_active_slots
= new_num_elements
;
2229 si_create_bindless_descriptor(struct si_context
*sctx
, uint32_t *desc_list
,
2232 struct si_descriptors
*desc
= &sctx
->bindless_descriptors
;
2233 unsigned desc_slot
, desc_slot_offset
;
2235 /* Find a free slot. */
2236 desc_slot
= si_get_first_free_bindless_slot(sctx
);
2238 /* For simplicity, sampler and image bindless descriptors use fixed
2239 * 16-dword slots for now. Image descriptors only need 8-dword but this
2240 * doesn't really matter because no real apps use image handles.
2242 desc_slot_offset
= desc_slot
* 16;
2244 /* Copy the descriptor into the array. */
2245 memcpy(desc
->list
+ desc_slot_offset
, desc_list
, size
);
2247 /* Re-upload the whole array of bindless descriptors into a new buffer.
2249 if (!si_upload_descriptors(sctx
, desc
, &sctx
->shader_pointers
.atom
))
2252 /* Make sure to re-emit the shader pointers for all stages. */
2253 sctx
->graphics_bindless_pointer_dirty
= true;
2254 sctx
->compute_bindless_pointer_dirty
= true;
2259 static void si_update_bindless_buffer_descriptor(struct si_context
*sctx
,
2261 struct pipe_resource
*resource
,
2265 struct si_descriptors
*desc
= &sctx
->bindless_descriptors
;
2266 struct r600_resource
*buf
= r600_resource(resource
);
2267 unsigned desc_slot_offset
= desc_slot
* 16;
2268 uint32_t *desc_list
= desc
->list
+ desc_slot_offset
+ 4;
2269 uint64_t old_desc_va
;
2271 assert(resource
->target
== PIPE_BUFFER
);
2273 /* Retrieve the old buffer addr from the descriptor. */
2274 old_desc_va
= desc_list
[0];
2275 old_desc_va
|= ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc_list
[1]) << 32);
2277 if (old_desc_va
!= buf
->gpu_address
+ offset
) {
2278 /* The buffer has been invalidated when the handle wasn't
2279 * resident, update the descriptor and the dirty flag.
2281 si_set_buf_desc_address(buf
, offset
, &desc_list
[0]);
2287 static uint64_t si_create_texture_handle(struct pipe_context
*ctx
,
2288 struct pipe_sampler_view
*view
,
2289 const struct pipe_sampler_state
*state
)
2291 struct si_sampler_view
*sview
= (struct si_sampler_view
*)view
;
2292 struct si_context
*sctx
= (struct si_context
*)ctx
;
2293 struct si_texture_handle
*tex_handle
;
2294 struct si_sampler_state
*sstate
;
2295 uint32_t desc_list
[16];
2298 tex_handle
= CALLOC_STRUCT(si_texture_handle
);
2302 memset(desc_list
, 0, sizeof(desc_list
));
2303 si_init_descriptor_list(&desc_list
[0], 16, 1, null_texture_descriptor
);
2305 sstate
= ctx
->create_sampler_state(ctx
, state
);
2311 si_set_sampler_view_desc(sctx
, sview
, sstate
, &desc_list
[0]);
2312 memcpy(&tex_handle
->sstate
, sstate
, sizeof(*sstate
));
2313 ctx
->delete_sampler_state(ctx
, sstate
);
2315 tex_handle
->desc_slot
= si_create_bindless_descriptor(sctx
, desc_list
,
2317 if (!tex_handle
->desc_slot
) {
2322 handle
= tex_handle
->desc_slot
;
2324 if (!_mesa_hash_table_insert(sctx
->tex_handles
, (void *)handle
,
2330 pipe_sampler_view_reference(&tex_handle
->view
, view
);
2332 r600_resource(sview
->base
.texture
)->texture_handle_allocated
= true;
2337 static void si_delete_texture_handle(struct pipe_context
*ctx
, uint64_t handle
)
2339 struct si_context
*sctx
= (struct si_context
*)ctx
;
2340 struct si_texture_handle
*tex_handle
;
2341 struct hash_entry
*entry
;
2343 entry
= _mesa_hash_table_search(sctx
->tex_handles
, (void *)handle
);
2347 tex_handle
= (struct si_texture_handle
*)entry
->data
;
2349 /* Allow this descriptor slot to be re-used. */
2350 util_idalloc_free(&sctx
->bindless_used_slots
, tex_handle
->desc_slot
);
2352 pipe_sampler_view_reference(&tex_handle
->view
, NULL
);
2353 _mesa_hash_table_remove(sctx
->tex_handles
, entry
);
2357 static void si_make_texture_handle_resident(struct pipe_context
*ctx
,
2358 uint64_t handle
, bool resident
)
2360 struct si_context
*sctx
= (struct si_context
*)ctx
;
2361 struct si_texture_handle
*tex_handle
;
2362 struct si_sampler_view
*sview
;
2363 struct hash_entry
*entry
;
2365 entry
= _mesa_hash_table_search(sctx
->tex_handles
, (void *)handle
);
2369 tex_handle
= (struct si_texture_handle
*)entry
->data
;
2370 sview
= (struct si_sampler_view
*)tex_handle
->view
;
2373 if (sview
->base
.texture
->target
!= PIPE_BUFFER
) {
2374 struct r600_texture
*rtex
=
2375 (struct r600_texture
*)sview
->base
.texture
;
2377 if (depth_needs_decompression(rtex
)) {
2378 util_dynarray_append(
2379 &sctx
->resident_tex_needs_depth_decompress
,
2380 struct si_texture_handle
*,
2384 if (color_needs_decompression(rtex
)) {
2385 util_dynarray_append(
2386 &sctx
->resident_tex_needs_color_decompress
,
2387 struct si_texture_handle
*,
2391 if (rtex
->dcc_offset
&&
2392 p_atomic_read(&rtex
->framebuffers_bound
))
2393 sctx
->need_check_render_feedback
= true;
2395 si_update_bindless_texture_descriptor(sctx
, tex_handle
);
2397 si_update_bindless_buffer_descriptor(sctx
,
2398 tex_handle
->desc_slot
,
2399 sview
->base
.texture
,
2400 sview
->base
.u
.buf
.offset
,
2401 &tex_handle
->desc_dirty
);
2404 /* Re-upload the descriptor if it has been updated while it
2407 if (tex_handle
->desc_dirty
)
2408 sctx
->bindless_descriptors_dirty
= true;
2410 /* Add the texture handle to the per-context list. */
2411 util_dynarray_append(&sctx
->resident_tex_handles
,
2412 struct si_texture_handle
*, tex_handle
);
2414 /* Add the buffers to the current CS in case si_begin_new_cs()
2415 * is not going to be called.
2417 si_sampler_view_add_buffer(sctx
, sview
->base
.texture
,
2419 sview
->is_stencil_sampler
, false);
2421 /* Remove the texture handle from the per-context list. */
2422 util_dynarray_delete_unordered(&sctx
->resident_tex_handles
,
2423 struct si_texture_handle
*,
2426 if (sview
->base
.texture
->target
!= PIPE_BUFFER
) {
2427 util_dynarray_delete_unordered(
2428 &sctx
->resident_tex_needs_depth_decompress
,
2429 struct si_texture_handle
*, tex_handle
);
2431 util_dynarray_delete_unordered(
2432 &sctx
->resident_tex_needs_color_decompress
,
2433 struct si_texture_handle
*, tex_handle
);
2438 static uint64_t si_create_image_handle(struct pipe_context
*ctx
,
2439 const struct pipe_image_view
*view
)
2441 struct si_context
*sctx
= (struct si_context
*)ctx
;
2442 struct si_image_handle
*img_handle
;
2443 uint32_t desc_list
[8];
2446 if (!view
|| !view
->resource
)
2449 img_handle
= CALLOC_STRUCT(si_image_handle
);
2453 memset(desc_list
, 0, sizeof(desc_list
));
2454 si_init_descriptor_list(&desc_list
[0], 8, 1, null_image_descriptor
);
2456 si_set_shader_image_desc(sctx
, view
, false, &desc_list
[0]);
2458 img_handle
->desc_slot
= si_create_bindless_descriptor(sctx
, desc_list
,
2460 if (!img_handle
->desc_slot
) {
2465 handle
= img_handle
->desc_slot
;
2467 if (!_mesa_hash_table_insert(sctx
->img_handles
, (void *)handle
,
2473 util_copy_image_view(&img_handle
->view
, view
);
2475 r600_resource(view
->resource
)->image_handle_allocated
= true;
2480 static void si_delete_image_handle(struct pipe_context
*ctx
, uint64_t handle
)
2482 struct si_context
*sctx
= (struct si_context
*)ctx
;
2483 struct si_image_handle
*img_handle
;
2484 struct hash_entry
*entry
;
2486 entry
= _mesa_hash_table_search(sctx
->img_handles
, (void *)handle
);
2490 img_handle
= (struct si_image_handle
*)entry
->data
;
2492 util_copy_image_view(&img_handle
->view
, NULL
);
2493 _mesa_hash_table_remove(sctx
->img_handles
, entry
);
2497 static void si_make_image_handle_resident(struct pipe_context
*ctx
,
2498 uint64_t handle
, unsigned access
,
2501 struct si_context
*sctx
= (struct si_context
*)ctx
;
2502 struct si_image_handle
*img_handle
;
2503 struct pipe_image_view
*view
;
2504 struct r600_resource
*res
;
2505 struct hash_entry
*entry
;
2507 entry
= _mesa_hash_table_search(sctx
->img_handles
, (void *)handle
);
2511 img_handle
= (struct si_image_handle
*)entry
->data
;
2512 view
= &img_handle
->view
;
2513 res
= (struct r600_resource
*)view
->resource
;
2516 if (res
->b
.b
.target
!= PIPE_BUFFER
) {
2517 struct r600_texture
*rtex
= (struct r600_texture
*)res
;
2518 unsigned level
= view
->u
.tex
.level
;
2520 if (color_needs_decompression(rtex
)) {
2521 util_dynarray_append(
2522 &sctx
->resident_img_needs_color_decompress
,
2523 struct si_image_handle
*,
2527 if (vi_dcc_enabled(rtex
, level
) &&
2528 p_atomic_read(&rtex
->framebuffers_bound
))
2529 sctx
->need_check_render_feedback
= true;
2531 si_update_bindless_image_descriptor(sctx
, img_handle
);
2533 si_update_bindless_buffer_descriptor(sctx
,
2534 img_handle
->desc_slot
,
2537 &img_handle
->desc_dirty
);
2540 /* Re-upload the descriptor if it has been updated while it
2543 if (img_handle
->desc_dirty
)
2544 sctx
->bindless_descriptors_dirty
= true;
2546 /* Add the image handle to the per-context list. */
2547 util_dynarray_append(&sctx
->resident_img_handles
,
2548 struct si_image_handle
*, img_handle
);
2550 /* Add the buffers to the current CS in case si_begin_new_cs()
2551 * is not going to be called.
2553 si_sampler_view_add_buffer(sctx
, view
->resource
,
2554 (access
& PIPE_IMAGE_ACCESS_WRITE
) ?
2555 RADEON_USAGE_READWRITE
:
2556 RADEON_USAGE_READ
, false, false);
2558 /* Remove the image handle from the per-context list. */
2559 util_dynarray_delete_unordered(&sctx
->resident_img_handles
,
2560 struct si_image_handle
*,
2563 if (res
->b
.b
.target
!= PIPE_BUFFER
) {
2564 util_dynarray_delete_unordered(
2565 &sctx
->resident_img_needs_color_decompress
,
2566 struct si_image_handle
*,
2573 void si_all_resident_buffers_begin_new_cs(struct si_context
*sctx
)
2575 unsigned num_resident_tex_handles
, num_resident_img_handles
;
2577 num_resident_tex_handles
= sctx
->resident_tex_handles
.size
/
2578 sizeof(struct si_texture_handle
*);
2579 num_resident_img_handles
= sctx
->resident_img_handles
.size
/
2580 sizeof(struct si_image_handle
*);
2582 /* Add all resident texture handles. */
2583 util_dynarray_foreach(&sctx
->resident_tex_handles
,
2584 struct si_texture_handle
*, tex_handle
) {
2585 struct si_sampler_view
*sview
=
2586 (struct si_sampler_view
*)(*tex_handle
)->view
;
2588 si_sampler_view_add_buffer(sctx
, sview
->base
.texture
,
2590 sview
->is_stencil_sampler
, false);
2593 /* Add all resident image handles. */
2594 util_dynarray_foreach(&sctx
->resident_img_handles
,
2595 struct si_image_handle
*, img_handle
) {
2596 struct pipe_image_view
*view
= &(*img_handle
)->view
;
2598 si_sampler_view_add_buffer(sctx
, view
->resource
,
2599 RADEON_USAGE_READWRITE
,
2603 sctx
->b
.num_resident_handles
+= num_resident_tex_handles
+
2604 num_resident_img_handles
;
2607 /* INIT/DEINIT/UPLOAD */
2609 void si_init_all_descriptors(struct si_context
*sctx
)
2613 STATIC_ASSERT(GFX9_SGPR_TCS_CONST_AND_SHADER_BUFFERS
% 2 == 0);
2614 STATIC_ASSERT(GFX9_SGPR_GS_CONST_AND_SHADER_BUFFERS
% 2 == 0);
2616 for (i
= 0; i
< SI_NUM_SHADERS
; i
++) {
2617 bool gfx9_tcs
= false;
2618 bool gfx9_gs
= false;
2619 unsigned num_sampler_slots
= SI_NUM_IMAGES
/ 2 + SI_NUM_SAMPLERS
;
2620 unsigned num_buffer_slots
= SI_NUM_SHADER_BUFFERS
+ SI_NUM_CONST_BUFFERS
;
2622 if (sctx
->b
.chip_class
>= GFX9
) {
2623 gfx9_tcs
= i
== PIPE_SHADER_TESS_CTRL
;
2624 gfx9_gs
= i
== PIPE_SHADER_GEOMETRY
;
2627 si_init_buffer_resources(&sctx
->const_and_shader_buffers
[i
],
2628 si_const_and_shader_buffer_descriptors(sctx
, i
),
2630 gfx9_tcs
? GFX9_SGPR_TCS_CONST_AND_SHADER_BUFFERS
:
2631 gfx9_gs
? GFX9_SGPR_GS_CONST_AND_SHADER_BUFFERS
:
2632 SI_SGPR_CONST_AND_SHADER_BUFFERS
,
2633 RADEON_USAGE_READWRITE
,
2635 RADEON_PRIO_SHADER_RW_BUFFER
,
2636 RADEON_PRIO_CONST_BUFFER
);
2638 struct si_descriptors
*desc
= si_sampler_and_image_descriptors(sctx
, i
);
2639 si_init_descriptors(desc
,
2640 gfx9_tcs
? GFX9_SGPR_TCS_SAMPLERS_AND_IMAGES
:
2641 gfx9_gs
? GFX9_SGPR_GS_SAMPLERS_AND_IMAGES
:
2642 SI_SGPR_SAMPLERS_AND_IMAGES
,
2643 16, num_sampler_slots
);
2646 for (j
= 0; j
< SI_NUM_IMAGES
; j
++)
2647 memcpy(desc
->list
+ j
* 8, null_image_descriptor
, 8 * 4);
2648 for (; j
< SI_NUM_IMAGES
+ SI_NUM_SAMPLERS
* 2; j
++)
2649 memcpy(desc
->list
+ j
* 8, null_texture_descriptor
, 8 * 4);
2652 si_init_buffer_resources(&sctx
->rw_buffers
,
2653 &sctx
->descriptors
[SI_DESCS_RW_BUFFERS
],
2654 SI_NUM_RW_BUFFERS
, SI_SGPR_RW_BUFFERS
,
2655 /* The second set of usage/priority is used by
2656 * const buffers in RW buffer slots. */
2657 RADEON_USAGE_READWRITE
, RADEON_USAGE_READ
,
2658 RADEON_PRIO_SHADER_RINGS
, RADEON_PRIO_CONST_BUFFER
);
2659 sctx
->descriptors
[SI_DESCS_RW_BUFFERS
].num_active_slots
= SI_NUM_RW_BUFFERS
;
2661 si_init_descriptors(&sctx
->vertex_buffers
, SI_SGPR_VERTEX_BUFFERS
,
2662 4, SI_NUM_VERTEX_BUFFERS
);
2663 FREE(sctx
->vertex_buffers
.list
); /* not used */
2664 sctx
->vertex_buffers
.list
= NULL
;
2666 /* Initialize an array of 1024 bindless descriptors, when the limit is
2667 * reached, just make it larger and re-upload the whole array.
2669 si_init_bindless_descriptors(sctx
, &sctx
->bindless_descriptors
,
2670 SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES
,
2673 sctx
->descriptors_dirty
= u_bit_consecutive(0, SI_NUM_DESCS
);
2675 /* Set pipe_context functions. */
2676 sctx
->b
.b
.bind_sampler_states
= si_bind_sampler_states
;
2677 sctx
->b
.b
.set_shader_images
= si_set_shader_images
;
2678 sctx
->b
.b
.set_constant_buffer
= si_pipe_set_constant_buffer
;
2679 sctx
->b
.b
.set_polygon_stipple
= si_set_polygon_stipple
;
2680 sctx
->b
.b
.set_shader_buffers
= si_set_shader_buffers
;
2681 sctx
->b
.b
.set_sampler_views
= si_set_sampler_views
;
2682 sctx
->b
.b
.set_stream_output_targets
= si_set_streamout_targets
;
2683 sctx
->b
.b
.create_texture_handle
= si_create_texture_handle
;
2684 sctx
->b
.b
.delete_texture_handle
= si_delete_texture_handle
;
2685 sctx
->b
.b
.make_texture_handle_resident
= si_make_texture_handle_resident
;
2686 sctx
->b
.b
.create_image_handle
= si_create_image_handle
;
2687 sctx
->b
.b
.delete_image_handle
= si_delete_image_handle
;
2688 sctx
->b
.b
.make_image_handle_resident
= si_make_image_handle_resident
;
2689 sctx
->b
.invalidate_buffer
= si_invalidate_buffer
;
2690 sctx
->b
.rebind_buffer
= si_rebind_buffer
;
2692 /* Shader user data. */
2693 si_init_atom(sctx
, &sctx
->shader_pointers
.atom
, &sctx
->atoms
.s
.shader_pointers
,
2694 si_emit_graphics_shader_pointers
);
2696 /* Set default and immutable mappings. */
2697 si_set_user_data_base(sctx
, PIPE_SHADER_VERTEX
, R_00B130_SPI_SHADER_USER_DATA_VS_0
);
2699 if (sctx
->b
.chip_class
>= GFX9
) {
2700 si_set_user_data_base(sctx
, PIPE_SHADER_TESS_CTRL
,
2701 R_00B430_SPI_SHADER_USER_DATA_LS_0
);
2702 si_set_user_data_base(sctx
, PIPE_SHADER_GEOMETRY
,
2703 R_00B330_SPI_SHADER_USER_DATA_ES_0
);
2705 si_set_user_data_base(sctx
, PIPE_SHADER_TESS_CTRL
,
2706 R_00B430_SPI_SHADER_USER_DATA_HS_0
);
2707 si_set_user_data_base(sctx
, PIPE_SHADER_GEOMETRY
,
2708 R_00B230_SPI_SHADER_USER_DATA_GS_0
);
2710 si_set_user_data_base(sctx
, PIPE_SHADER_FRAGMENT
, R_00B030_SPI_SHADER_USER_DATA_PS_0
);
2713 static bool si_upload_shader_descriptors(struct si_context
*sctx
, unsigned mask
)
2715 unsigned dirty
= sctx
->descriptors_dirty
& mask
;
2717 /* Assume nothing will go wrong: */
2718 sctx
->shader_pointers_dirty
|= dirty
;
2721 unsigned i
= u_bit_scan(&dirty
);
2723 if (!si_upload_descriptors(sctx
, &sctx
->descriptors
[i
],
2724 &sctx
->shader_pointers
.atom
))
2728 sctx
->descriptors_dirty
&= ~mask
;
2730 si_upload_bindless_descriptors(sctx
);
2735 bool si_upload_graphics_shader_descriptors(struct si_context
*sctx
)
2737 const unsigned mask
= u_bit_consecutive(0, SI_DESCS_FIRST_COMPUTE
);
2738 return si_upload_shader_descriptors(sctx
, mask
);
2741 bool si_upload_compute_shader_descriptors(struct si_context
*sctx
)
2743 /* Does not update rw_buffers as that is not needed for compute shaders
2744 * and the input buffer is using the same SGPR's anyway.
2746 const unsigned mask
= u_bit_consecutive(SI_DESCS_FIRST_COMPUTE
,
2747 SI_NUM_DESCS
- SI_DESCS_FIRST_COMPUTE
);
2748 return si_upload_shader_descriptors(sctx
, mask
);
2751 void si_release_all_descriptors(struct si_context
*sctx
)
2755 for (i
= 0; i
< SI_NUM_SHADERS
; i
++) {
2756 si_release_buffer_resources(&sctx
->const_and_shader_buffers
[i
],
2757 si_const_and_shader_buffer_descriptors(sctx
, i
));
2758 si_release_sampler_views(&sctx
->samplers
[i
].views
);
2759 si_release_image_views(&sctx
->images
[i
]);
2761 si_release_buffer_resources(&sctx
->rw_buffers
,
2762 &sctx
->descriptors
[SI_DESCS_RW_BUFFERS
]);
2763 for (i
= 0; i
< SI_NUM_VERTEX_BUFFERS
; i
++)
2764 pipe_vertex_buffer_unreference(&sctx
->vertex_buffer
[i
]);
2766 for (i
= 0; i
< SI_NUM_DESCS
; ++i
)
2767 si_release_descriptors(&sctx
->descriptors
[i
]);
2769 sctx
->vertex_buffers
.list
= NULL
; /* points into a mapped buffer */
2770 si_release_descriptors(&sctx
->vertex_buffers
);
2771 si_release_bindless_descriptors(sctx
);
2774 void si_all_descriptors_begin_new_cs(struct si_context
*sctx
)
2778 for (i
= 0; i
< SI_NUM_SHADERS
; i
++) {
2779 si_buffer_resources_begin_new_cs(sctx
, &sctx
->const_and_shader_buffers
[i
]);
2780 si_sampler_views_begin_new_cs(sctx
, &sctx
->samplers
[i
].views
);
2781 si_image_views_begin_new_cs(sctx
, &sctx
->images
[i
]);
2783 si_buffer_resources_begin_new_cs(sctx
, &sctx
->rw_buffers
);
2784 si_vertex_buffers_begin_new_cs(sctx
);
2786 for (i
= 0; i
< SI_NUM_DESCS
; ++i
)
2787 si_descriptors_begin_new_cs(sctx
, &sctx
->descriptors
[i
]);
2788 si_descriptors_begin_new_cs(sctx
, &sctx
->bindless_descriptors
);
2790 si_shader_pointers_begin_new_cs(sctx
);
2793 void si_set_active_descriptors(struct si_context
*sctx
, unsigned desc_idx
,
2794 uint64_t new_active_mask
)
2796 struct si_descriptors
*desc
= &sctx
->descriptors
[desc_idx
];
2798 /* Ignore no-op updates and updates that disable all slots. */
2799 if (!new_active_mask
||
2800 new_active_mask
== u_bit_consecutive64(desc
->first_active_slot
,
2801 desc
->num_active_slots
))
2805 u_bit_scan_consecutive_range64(&new_active_mask
, &first
, &count
);
2806 assert(new_active_mask
== 0);
2808 /* Upload/dump descriptors if slots are being enabled. */
2809 if (first
< desc
->first_active_slot
||
2810 first
+ count
> desc
->first_active_slot
+ desc
->num_active_slots
)
2811 sctx
->descriptors_dirty
|= 1u << desc_idx
;
2813 desc
->first_active_slot
= first
;
2814 desc
->num_active_slots
= count
;
2817 void si_set_active_descriptors_for_shader(struct si_context
*sctx
,
2818 struct si_shader_selector
*sel
)
2823 si_set_active_descriptors(sctx
,
2824 si_const_and_shader_buffer_descriptors_idx(sel
->type
),
2825 sel
->active_const_and_shader_buffers
);
2826 si_set_active_descriptors(sctx
,
2827 si_sampler_and_image_descriptors_idx(sel
->type
),
2828 sel
->active_samplers_and_images
);