gallium: Use enum pipe_shader_type in bind_sampler_states() (v2)
[mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Marek Olšák <marek.olsak@amd.com>
25 */
26
27 /* Resource binding slots and sampler states (each described with 8 or
28 * 4 dwords) are stored in lists in memory which is accessed by shaders
29 * using scalar load instructions.
30 *
31 * This file is responsible for managing such lists. It keeps a copy of all
32 * descriptors in CPU memory and re-uploads a whole list if some slots have
33 * been changed.
34 *
35 * This code is also reponsible for updating shader pointers to those lists.
36 *
37 * Note that CP DMA can't be used for updating the lists, because a GPU hang
38 * could leave the list in a mid-IB state and the next IB would get wrong
39 * descriptors and the whole context would be unusable at that point.
40 * (Note: The register shadowing can't be used due to the same reason)
41 *
42 * Also, uploading descriptors to newly allocated memory doesn't require
43 * a KCACHE flush.
44 *
45 *
46 * Possible scenarios for one 16 dword image+sampler slot:
47 *
48 * | Image | w/ FMASK | Buffer | NULL
49 * [ 0: 3] Image[0:3] | Image[0:3] | Null[0:3] | Null[0:3]
50 * [ 4: 7] Image[4:7] | Image[4:7] | Buffer[0:3] | 0
51 * [ 8:11] Null[0:3] | Fmask[0:3] | Null[0:3] | Null[0:3]
52 * [12:15] Sampler[0:3] | Fmask[4:7] | Sampler[0:3] | Sampler[0:3]
53 *
54 * FMASK implies MSAA, therefore no sampler state.
55 * Sampler states are never unbound except when FMASK is bound.
56 */
57
58 #include "radeon/r600_cs.h"
59 #include "si_pipe.h"
60 #include "si_shader.h"
61 #include "sid.h"
62
63 #include "util/u_format.h"
64 #include "util/u_math.h"
65 #include "util/u_memory.h"
66 #include "util/u_suballoc.h"
67 #include "util/u_upload_mgr.h"
68
69
70 /* NULL image and buffer descriptor for textures (alpha = 1) and images
71 * (alpha = 0).
72 *
73 * For images, all fields must be zero except for the swizzle, which
74 * supports arbitrary combinations of 0s and 1s. The texture type must be
75 * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
76 *
77 * For buffers, all fields must be zero. If they are not, the hw hangs.
78 *
79 * This is the only reason why the buffer descriptor must be in words [4:7].
80 */
81 static uint32_t null_texture_descriptor[8] = {
82 0,
83 0,
84 0,
85 S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1) |
86 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
87 /* the rest must contain zeros, which is also used by the buffer
88 * descriptor */
89 };
90
91 static uint32_t null_image_descriptor[8] = {
92 0,
93 0,
94 0,
95 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
96 /* the rest must contain zeros, which is also used by the buffer
97 * descriptor */
98 };
99
100 static void si_init_descriptors(struct si_descriptors *desc,
101 unsigned shader_userdata_index,
102 unsigned element_dw_size,
103 unsigned num_elements,
104 const uint32_t *null_descriptor,
105 unsigned *ce_offset)
106 {
107 int i;
108
109 assert(num_elements <= sizeof(desc->dirty_mask)*8);
110
111 desc->list = CALLOC(num_elements, element_dw_size * 4);
112 desc->element_dw_size = element_dw_size;
113 desc->num_elements = num_elements;
114 desc->dirty_mask = num_elements == 32 ? ~0u : (1u << num_elements) - 1;
115 desc->shader_userdata_offset = shader_userdata_index * 4;
116
117 if (ce_offset) {
118 desc->ce_offset = *ce_offset;
119
120 /* make sure that ce_offset stays 32 byte aligned */
121 *ce_offset += align(element_dw_size * num_elements * 4, 32);
122 }
123
124 /* Initialize the array to NULL descriptors if the element size is 8. */
125 if (null_descriptor) {
126 assert(element_dw_size % 8 == 0);
127 for (i = 0; i < num_elements * element_dw_size / 8; i++)
128 memcpy(desc->list + i * 8, null_descriptor,
129 8 * 4);
130 }
131 }
132
133 static void si_release_descriptors(struct si_descriptors *desc)
134 {
135 r600_resource_reference(&desc->buffer, NULL);
136 FREE(desc->list);
137 }
138
139 static bool si_ce_upload(struct si_context *sctx, unsigned ce_offset, unsigned size,
140 unsigned *out_offset, struct r600_resource **out_buf) {
141 uint64_t va;
142
143 u_suballocator_alloc(sctx->ce_suballocator, size, 64, out_offset,
144 (struct pipe_resource**)out_buf);
145 if (!out_buf)
146 return false;
147
148 va = (*out_buf)->gpu_address + *out_offset;
149
150 radeon_emit(sctx->ce_ib, PKT3(PKT3_DUMP_CONST_RAM, 3, 0));
151 radeon_emit(sctx->ce_ib, ce_offset);
152 radeon_emit(sctx->ce_ib, size / 4);
153 radeon_emit(sctx->ce_ib, va);
154 radeon_emit(sctx->ce_ib, va >> 32);
155
156 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, *out_buf,
157 RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
158
159 sctx->ce_need_synchronization = true;
160 return true;
161 }
162
163 static void si_ce_reinitialize_descriptors(struct si_context *sctx,
164 struct si_descriptors *desc)
165 {
166 if (desc->buffer) {
167 struct r600_resource *buffer = (struct r600_resource*)desc->buffer;
168 unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
169 uint64_t va = buffer->gpu_address + desc->buffer_offset;
170 struct radeon_winsys_cs *ib = sctx->ce_preamble_ib;
171
172 if (!ib)
173 ib = sctx->ce_ib;
174
175 list_size = align(list_size, 32);
176
177 radeon_emit(ib, PKT3(PKT3_LOAD_CONST_RAM, 3, 0));
178 radeon_emit(ib, va);
179 radeon_emit(ib, va >> 32);
180 radeon_emit(ib, list_size / 4);
181 radeon_emit(ib, desc->ce_offset);
182
183 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
184 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
185 }
186 desc->ce_ram_dirty = false;
187 }
188
189 void si_ce_reinitialize_all_descriptors(struct si_context *sctx)
190 {
191 int i;
192
193 for (i = 0; i < SI_NUM_DESCS; ++i)
194 si_ce_reinitialize_descriptors(sctx, &sctx->descriptors[i]);
195 }
196
197 void si_ce_enable_loads(struct radeon_winsys_cs *ib)
198 {
199 radeon_emit(ib, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
200 radeon_emit(ib, CONTEXT_CONTROL_LOAD_ENABLE(1) |
201 CONTEXT_CONTROL_LOAD_CE_RAM(1));
202 radeon_emit(ib, CONTEXT_CONTROL_SHADOW_ENABLE(1));
203 }
204
205 static bool si_upload_descriptors(struct si_context *sctx,
206 struct si_descriptors *desc,
207 struct r600_atom * atom)
208 {
209 unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
210
211 if (!desc->dirty_mask)
212 return true;
213
214 if (sctx->ce_ib) {
215 uint32_t const* list = (uint32_t const*)desc->list;
216
217 if (desc->ce_ram_dirty)
218 si_ce_reinitialize_descriptors(sctx, desc);
219
220 while(desc->dirty_mask) {
221 int begin, count;
222 u_bit_scan_consecutive_range(&desc->dirty_mask, &begin,
223 &count);
224
225 begin *= desc->element_dw_size;
226 count *= desc->element_dw_size;
227
228 radeon_emit(sctx->ce_ib,
229 PKT3(PKT3_WRITE_CONST_RAM, count, 0));
230 radeon_emit(sctx->ce_ib, desc->ce_offset + begin * 4);
231 radeon_emit_array(sctx->ce_ib, list + begin, count);
232 }
233
234 if (!si_ce_upload(sctx, desc->ce_offset, list_size,
235 &desc->buffer_offset, &desc->buffer))
236 return false;
237 } else {
238 void *ptr;
239
240 u_upload_alloc(sctx->b.uploader, 0, list_size, 256,
241 &desc->buffer_offset,
242 (struct pipe_resource**)&desc->buffer, &ptr);
243 if (!desc->buffer)
244 return false; /* skip the draw call */
245
246 util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
247
248 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
249 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
250 }
251 desc->pointer_dirty = true;
252 desc->dirty_mask = 0;
253
254 if (atom)
255 si_mark_atom_dirty(sctx, atom);
256
257 return true;
258 }
259
260 static void
261 si_descriptors_begin_new_cs(struct si_context *sctx, struct si_descriptors *desc)
262 {
263 desc->ce_ram_dirty = true;
264
265 if (!desc->buffer)
266 return;
267
268 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
269 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
270 }
271
272 /* SAMPLER VIEWS */
273
274 static unsigned
275 si_sampler_descriptors_idx(unsigned shader)
276 {
277 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
278 SI_SHADER_DESCS_SAMPLERS;
279 }
280
281 static struct si_descriptors *
282 si_sampler_descriptors(struct si_context *sctx, unsigned shader)
283 {
284 return &sctx->descriptors[si_sampler_descriptors_idx(shader)];
285 }
286
287 static void si_release_sampler_views(struct si_sampler_views *views)
288 {
289 int i;
290
291 for (i = 0; i < ARRAY_SIZE(views->views); i++) {
292 pipe_sampler_view_reference(&views->views[i], NULL);
293 }
294 }
295
296 static void si_sampler_view_add_buffer(struct si_context *sctx,
297 struct pipe_resource *resource,
298 enum radeon_bo_usage usage,
299 bool is_stencil_sampler,
300 bool check_mem)
301 {
302 struct r600_resource *rres;
303 struct r600_texture *rtex;
304 enum radeon_bo_priority priority;
305
306 if (!resource)
307 return;
308
309 if (resource->target != PIPE_BUFFER) {
310 struct r600_texture *tex = (struct r600_texture*)resource;
311
312 if (tex->is_depth && !r600_can_sample_zs(tex, is_stencil_sampler))
313 resource = &tex->flushed_depth_texture->resource.b.b;
314 }
315
316 rres = (struct r600_resource*)resource;
317 priority = r600_get_sampler_view_priority(rres);
318
319 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
320 rres, usage, priority,
321 check_mem);
322
323 if (resource->target == PIPE_BUFFER)
324 return;
325
326 /* Now add separate DCC if it's present. */
327 rtex = (struct r600_texture*)resource;
328 if (!rtex->dcc_separate_buffer)
329 return;
330
331 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
332 rtex->dcc_separate_buffer, usage,
333 RADEON_PRIO_DCC, check_mem);
334 }
335
336 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
337 struct si_sampler_views *views)
338 {
339 unsigned mask = views->enabled_mask;
340
341 /* Add buffers to the CS. */
342 while (mask) {
343 int i = u_bit_scan(&mask);
344 struct si_sampler_view *sview = (struct si_sampler_view *)views->views[i];
345
346 si_sampler_view_add_buffer(sctx, sview->base.texture,
347 RADEON_USAGE_READ,
348 sview->is_stencil_sampler, false);
349 }
350 }
351
352 /* Set texture descriptor fields that can be changed by reallocations.
353 *
354 * \param tex texture
355 * \param base_level_info information of the level of BASE_ADDRESS
356 * \param base_level the level of BASE_ADDRESS
357 * \param first_level pipe_sampler_view.u.tex.first_level
358 * \param block_width util_format_get_blockwidth()
359 * \param is_stencil select between separate Z & Stencil
360 * \param state descriptor to update
361 */
362 void si_set_mutable_tex_desc_fields(struct r600_texture *tex,
363 const struct radeon_surf_level *base_level_info,
364 unsigned base_level, unsigned first_level,
365 unsigned block_width, bool is_stencil,
366 uint32_t *state)
367 {
368 uint64_t va;
369 unsigned pitch = base_level_info->nblk_x * block_width;
370
371 if (tex->is_depth && !r600_can_sample_zs(tex, is_stencil)) {
372 tex = tex->flushed_depth_texture;
373 is_stencil = false;
374 }
375
376 va = tex->resource.gpu_address + base_level_info->offset;
377
378 state[1] &= C_008F14_BASE_ADDRESS_HI;
379 state[3] &= C_008F1C_TILING_INDEX;
380 state[4] &= C_008F20_PITCH;
381 state[6] &= C_008F28_COMPRESSION_EN;
382
383 state[0] = va >> 8;
384 state[1] |= S_008F14_BASE_ADDRESS_HI(va >> 40);
385 state[3] |= S_008F1C_TILING_INDEX(si_tile_mode_index(tex, base_level,
386 is_stencil));
387 state[4] |= S_008F20_PITCH(pitch - 1);
388
389 if (tex->dcc_offset && tex->surface.level[first_level].dcc_enabled) {
390 state[6] |= S_008F28_COMPRESSION_EN(1);
391 state[7] = ((!tex->dcc_separate_buffer ? tex->resource.gpu_address : 0) +
392 tex->dcc_offset +
393 base_level_info->dcc_offset) >> 8;
394 }
395 }
396
397 static void si_set_sampler_view(struct si_context *sctx,
398 unsigned shader,
399 unsigned slot, struct pipe_sampler_view *view,
400 bool disallow_early_out)
401 {
402 struct si_sampler_views *views = &sctx->samplers[shader].views;
403 struct si_sampler_view *rview = (struct si_sampler_view*)view;
404 struct si_descriptors *descs = si_sampler_descriptors(sctx, shader);
405
406 if (views->views[slot] == view && !disallow_early_out)
407 return;
408
409 if (view) {
410 struct r600_texture *rtex = (struct r600_texture *)view->texture;
411 uint32_t *desc = descs->list + slot * 16;
412
413 si_sampler_view_add_buffer(sctx, view->texture,
414 RADEON_USAGE_READ,
415 rview->is_stencil_sampler, true);
416
417 pipe_sampler_view_reference(&views->views[slot], view);
418 memcpy(desc, rview->state, 8*4);
419
420 if (view->texture && view->texture->target != PIPE_BUFFER) {
421 bool is_separate_stencil =
422 rtex->db_compatible &&
423 rview->is_stencil_sampler;
424
425 si_set_mutable_tex_desc_fields(rtex,
426 rview->base_level_info,
427 rview->base_level,
428 rview->base.u.tex.first_level,
429 rview->block_width,
430 is_separate_stencil,
431 desc);
432 }
433
434 if (view->texture && view->texture->target != PIPE_BUFFER &&
435 rtex->fmask.size) {
436 memcpy(desc + 8,
437 rview->fmask_state, 8*4);
438 } else {
439 /* Disable FMASK and bind sampler state in [12:15]. */
440 memcpy(desc + 8,
441 null_texture_descriptor, 4*4);
442
443 if (views->sampler_states[slot])
444 memcpy(desc + 12,
445 views->sampler_states[slot], 4*4);
446 }
447
448 views->enabled_mask |= 1u << slot;
449 } else {
450 pipe_sampler_view_reference(&views->views[slot], NULL);
451 memcpy(descs->list + slot*16, null_texture_descriptor, 8*4);
452 /* Only clear the lower dwords of FMASK. */
453 memcpy(descs->list + slot*16 + 8, null_texture_descriptor, 4*4);
454 views->enabled_mask &= ~(1u << slot);
455 }
456
457 descs->dirty_mask |= 1u << slot;
458 sctx->descriptors_dirty |= 1u << si_sampler_descriptors_idx(shader);
459 }
460
461 static bool is_compressed_colortex(struct r600_texture *rtex)
462 {
463 return rtex->cmask.size || rtex->fmask.size ||
464 (rtex->dcc_offset && rtex->dirty_level_mask);
465 }
466
467 static void si_set_sampler_views(struct pipe_context *ctx,
468 unsigned shader, unsigned start,
469 unsigned count,
470 struct pipe_sampler_view **views)
471 {
472 struct si_context *sctx = (struct si_context *)ctx;
473 struct si_textures_info *samplers = &sctx->samplers[shader];
474 int i;
475
476 if (!count || shader >= SI_NUM_SHADERS)
477 return;
478
479 for (i = 0; i < count; i++) {
480 unsigned slot = start + i;
481
482 if (!views || !views[i]) {
483 samplers->depth_texture_mask &= ~(1u << slot);
484 samplers->compressed_colortex_mask &= ~(1u << slot);
485 si_set_sampler_view(sctx, shader, slot, NULL, false);
486 continue;
487 }
488
489 si_set_sampler_view(sctx, shader, slot, views[i], false);
490
491 if (views[i]->texture && views[i]->texture->target != PIPE_BUFFER) {
492 struct r600_texture *rtex =
493 (struct r600_texture*)views[i]->texture;
494
495 if (rtex->db_compatible) {
496 samplers->depth_texture_mask |= 1u << slot;
497 } else {
498 samplers->depth_texture_mask &= ~(1u << slot);
499 }
500 if (is_compressed_colortex(rtex)) {
501 samplers->compressed_colortex_mask |= 1u << slot;
502 } else {
503 samplers->compressed_colortex_mask &= ~(1u << slot);
504 }
505
506 if (rtex->dcc_offset &&
507 p_atomic_read(&rtex->framebuffers_bound))
508 sctx->need_check_render_feedback = true;
509 } else {
510 samplers->depth_texture_mask &= ~(1u << slot);
511 samplers->compressed_colortex_mask &= ~(1u << slot);
512 }
513 }
514 }
515
516 static void
517 si_samplers_update_compressed_colortex_mask(struct si_textures_info *samplers)
518 {
519 unsigned mask = samplers->views.enabled_mask;
520
521 while (mask) {
522 int i = u_bit_scan(&mask);
523 struct pipe_resource *res = samplers->views.views[i]->texture;
524
525 if (res && res->target != PIPE_BUFFER) {
526 struct r600_texture *rtex = (struct r600_texture *)res;
527
528 if (is_compressed_colortex(rtex)) {
529 samplers->compressed_colortex_mask |= 1u << i;
530 } else {
531 samplers->compressed_colortex_mask &= ~(1u << i);
532 }
533 }
534 }
535 }
536
537 /* IMAGE VIEWS */
538
539 static unsigned
540 si_image_descriptors_idx(unsigned shader)
541 {
542 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
543 SI_SHADER_DESCS_IMAGES;
544 }
545
546 static struct si_descriptors*
547 si_image_descriptors(struct si_context *sctx, unsigned shader)
548 {
549 return &sctx->descriptors[si_image_descriptors_idx(shader)];
550 }
551
552 static void
553 si_release_image_views(struct si_images_info *images)
554 {
555 unsigned i;
556
557 for (i = 0; i < SI_NUM_IMAGES; ++i) {
558 struct pipe_image_view *view = &images->views[i];
559
560 pipe_resource_reference(&view->resource, NULL);
561 }
562 }
563
564 static void
565 si_image_views_begin_new_cs(struct si_context *sctx, struct si_images_info *images)
566 {
567 uint mask = images->enabled_mask;
568
569 /* Add buffers to the CS. */
570 while (mask) {
571 int i = u_bit_scan(&mask);
572 struct pipe_image_view *view = &images->views[i];
573
574 assert(view->resource);
575
576 si_sampler_view_add_buffer(sctx, view->resource,
577 RADEON_USAGE_READWRITE, false, false);
578 }
579 }
580
581 static void
582 si_disable_shader_image(struct si_context *ctx, unsigned shader, unsigned slot)
583 {
584 struct si_images_info *images = &ctx->images[shader];
585
586 if (images->enabled_mask & (1u << slot)) {
587 struct si_descriptors *descs = si_image_descriptors(ctx, shader);
588
589 pipe_resource_reference(&images->views[slot].resource, NULL);
590 images->compressed_colortex_mask &= ~(1 << slot);
591
592 memcpy(descs->list + slot*8, null_image_descriptor, 8*4);
593 images->enabled_mask &= ~(1u << slot);
594 descs->dirty_mask |= 1u << slot;
595 ctx->descriptors_dirty |= 1u << si_image_descriptors_idx(shader);
596 }
597 }
598
599 static void
600 si_mark_image_range_valid(const struct pipe_image_view *view)
601 {
602 struct r600_resource *res = (struct r600_resource *)view->resource;
603
604 assert(res && res->b.b.target == PIPE_BUFFER);
605
606 util_range_add(&res->valid_buffer_range,
607 view->u.buf.offset,
608 view->u.buf.offset + view->u.buf.size);
609 }
610
611 static void si_set_shader_image(struct si_context *ctx,
612 unsigned shader,
613 unsigned slot, const struct pipe_image_view *view)
614 {
615 struct si_screen *screen = ctx->screen;
616 struct si_images_info *images = &ctx->images[shader];
617 struct si_descriptors *descs = si_image_descriptors(ctx, shader);
618 struct r600_resource *res;
619
620 if (!view || !view->resource) {
621 si_disable_shader_image(ctx, shader, slot);
622 return;
623 }
624
625 res = (struct r600_resource *)view->resource;
626
627 if (&images->views[slot] != view)
628 util_copy_image_view(&images->views[slot], view);
629
630 si_sampler_view_add_buffer(ctx, &res->b.b,
631 RADEON_USAGE_READWRITE, false, true);
632
633 if (res->b.b.target == PIPE_BUFFER) {
634 if (view->access & PIPE_IMAGE_ACCESS_WRITE)
635 si_mark_image_range_valid(view);
636
637 si_make_buffer_descriptor(screen, res,
638 view->format,
639 view->u.buf.offset,
640 view->u.buf.size,
641 descs->list + slot * 8);
642 images->compressed_colortex_mask &= ~(1 << slot);
643 } else {
644 static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
645 struct r600_texture *tex = (struct r600_texture *)res;
646 unsigned level = view->u.tex.level;
647 unsigned width, height, depth;
648 uint32_t *desc = descs->list + slot * 8;
649 bool uses_dcc = tex->dcc_offset &&
650 tex->surface.level[level].dcc_enabled;
651
652 assert(!tex->is_depth);
653 assert(tex->fmask.size == 0);
654
655 if (uses_dcc &&
656 view->access & PIPE_IMAGE_ACCESS_WRITE) {
657 /* If DCC can't be disabled, at least decompress it.
658 * The decompression is relatively cheap if the surface
659 * has been decompressed already.
660 */
661 if (r600_texture_disable_dcc(&ctx->b, tex))
662 uses_dcc = false;
663 else
664 ctx->b.decompress_dcc(&ctx->b.b, tex);
665 }
666
667 if (is_compressed_colortex(tex)) {
668 images->compressed_colortex_mask |= 1 << slot;
669 } else {
670 images->compressed_colortex_mask &= ~(1 << slot);
671 }
672
673 if (uses_dcc &&
674 p_atomic_read(&tex->framebuffers_bound))
675 ctx->need_check_render_feedback = true;
676
677 /* Always force the base level to the selected level.
678 *
679 * This is required for 3D textures, where otherwise
680 * selecting a single slice for non-layered bindings
681 * fails. It doesn't hurt the other targets.
682 */
683 width = u_minify(res->b.b.width0, level);
684 height = u_minify(res->b.b.height0, level);
685 depth = u_minify(res->b.b.depth0, level);
686
687 si_make_texture_descriptor(screen, tex,
688 false, res->b.b.target,
689 view->format, swizzle,
690 0, 0,
691 view->u.tex.first_layer,
692 view->u.tex.last_layer,
693 width, height, depth,
694 desc, NULL);
695 si_set_mutable_tex_desc_fields(tex, &tex->surface.level[level],
696 level, level,
697 util_format_get_blockwidth(view->format),
698 false, desc);
699 }
700
701 images->enabled_mask |= 1u << slot;
702 descs->dirty_mask |= 1u << slot;
703 ctx->descriptors_dirty |= 1u << si_image_descriptors_idx(shader);
704 }
705
706 static void
707 si_set_shader_images(struct pipe_context *pipe, unsigned shader,
708 unsigned start_slot, unsigned count,
709 const struct pipe_image_view *views)
710 {
711 struct si_context *ctx = (struct si_context *)pipe;
712 unsigned i, slot;
713
714 assert(shader < SI_NUM_SHADERS);
715
716 if (!count)
717 return;
718
719 assert(start_slot + count <= SI_NUM_IMAGES);
720
721 if (views) {
722 for (i = 0, slot = start_slot; i < count; ++i, ++slot)
723 si_set_shader_image(ctx, shader, slot, &views[i]);
724 } else {
725 for (i = 0, slot = start_slot; i < count; ++i, ++slot)
726 si_set_shader_image(ctx, shader, slot, NULL);
727 }
728 }
729
730 static void
731 si_images_update_compressed_colortex_mask(struct si_images_info *images)
732 {
733 unsigned mask = images->enabled_mask;
734
735 while (mask) {
736 int i = u_bit_scan(&mask);
737 struct pipe_resource *res = images->views[i].resource;
738
739 if (res && res->target != PIPE_BUFFER) {
740 struct r600_texture *rtex = (struct r600_texture *)res;
741
742 if (is_compressed_colortex(rtex)) {
743 images->compressed_colortex_mask |= 1 << i;
744 } else {
745 images->compressed_colortex_mask &= ~(1 << i);
746 }
747 }
748 }
749 }
750
751 /* SAMPLER STATES */
752
753 static void si_bind_sampler_states(struct pipe_context *ctx,
754 enum pipe_shader_type shader,
755 unsigned start, unsigned count, void **states)
756 {
757 struct si_context *sctx = (struct si_context *)ctx;
758 struct si_textures_info *samplers = &sctx->samplers[shader];
759 struct si_descriptors *desc = si_sampler_descriptors(sctx, shader);
760 struct si_sampler_state **sstates = (struct si_sampler_state**)states;
761 int i;
762
763 if (!count || shader >= SI_NUM_SHADERS)
764 return;
765
766 for (i = 0; i < count; i++) {
767 unsigned slot = start + i;
768
769 if (!sstates[i] ||
770 sstates[i] == samplers->views.sampler_states[slot])
771 continue;
772
773 samplers->views.sampler_states[slot] = sstates[i];
774
775 /* If FMASK is bound, don't overwrite it.
776 * The sampler state will be set after FMASK is unbound.
777 */
778 if (samplers->views.views[i] &&
779 samplers->views.views[i]->texture &&
780 samplers->views.views[i]->texture->target != PIPE_BUFFER &&
781 ((struct r600_texture*)samplers->views.views[i]->texture)->fmask.size)
782 continue;
783
784 memcpy(desc->list + slot * 16 + 12, sstates[i]->val, 4*4);
785 desc->dirty_mask |= 1u << slot;
786 sctx->descriptors_dirty |= 1u << si_sampler_descriptors_idx(shader);
787 }
788 }
789
790 /* BUFFER RESOURCES */
791
792 static void si_init_buffer_resources(struct si_buffer_resources *buffers,
793 struct si_descriptors *descs,
794 unsigned num_buffers,
795 unsigned shader_userdata_index,
796 enum radeon_bo_usage shader_usage,
797 enum radeon_bo_priority priority,
798 unsigned *ce_offset)
799 {
800 buffers->shader_usage = shader_usage;
801 buffers->priority = priority;
802 buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
803
804 si_init_descriptors(descs, shader_userdata_index, 4,
805 num_buffers, NULL, ce_offset);
806 }
807
808 static void si_release_buffer_resources(struct si_buffer_resources *buffers,
809 struct si_descriptors *descs)
810 {
811 int i;
812
813 for (i = 0; i < descs->num_elements; i++) {
814 pipe_resource_reference(&buffers->buffers[i], NULL);
815 }
816
817 FREE(buffers->buffers);
818 }
819
820 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
821 struct si_buffer_resources *buffers)
822 {
823 unsigned mask = buffers->enabled_mask;
824
825 /* Add buffers to the CS. */
826 while (mask) {
827 int i = u_bit_scan(&mask);
828
829 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
830 (struct r600_resource*)buffers->buffers[i],
831 buffers->shader_usage, buffers->priority);
832 }
833 }
834
835 /* VERTEX BUFFERS */
836
837 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
838 {
839 struct si_descriptors *desc = &sctx->vertex_buffers;
840 int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
841 int i;
842
843 for (i = 0; i < count; i++) {
844 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
845
846 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
847 continue;
848 if (!sctx->vertex_buffer[vb].buffer)
849 continue;
850
851 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
852 (struct r600_resource*)sctx->vertex_buffer[vb].buffer,
853 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
854 }
855
856 if (!desc->buffer)
857 return;
858 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
859 desc->buffer, RADEON_USAGE_READ,
860 RADEON_PRIO_DESCRIPTORS);
861 }
862
863 bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
864 {
865 struct si_descriptors *desc = &sctx->vertex_buffers;
866 bool bound[SI_NUM_VERTEX_BUFFERS] = {};
867 unsigned i, count = sctx->vertex_elements->count;
868 uint64_t va;
869 uint32_t *ptr;
870
871 if (!sctx->vertex_buffers_dirty)
872 return true;
873 if (!count || !sctx->vertex_elements)
874 return true;
875
876 /* Vertex buffer descriptors are the only ones which are uploaded
877 * directly through a staging buffer and don't go through
878 * the fine-grained upload path.
879 */
880 u_upload_alloc(sctx->b.uploader, 0, count * 16, 256, &desc->buffer_offset,
881 (struct pipe_resource**)&desc->buffer, (void**)&ptr);
882 if (!desc->buffer)
883 return false;
884
885 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
886 desc->buffer, RADEON_USAGE_READ,
887 RADEON_PRIO_DESCRIPTORS);
888
889 assert(count <= SI_NUM_VERTEX_BUFFERS);
890
891 for (i = 0; i < count; i++) {
892 struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
893 struct pipe_vertex_buffer *vb;
894 struct r600_resource *rbuffer;
895 unsigned offset;
896 uint32_t *desc = &ptr[i*4];
897
898 if (ve->vertex_buffer_index >= ARRAY_SIZE(sctx->vertex_buffer)) {
899 memset(desc, 0, 16);
900 continue;
901 }
902
903 vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
904 rbuffer = (struct r600_resource*)vb->buffer;
905 if (!rbuffer) {
906 memset(desc, 0, 16);
907 continue;
908 }
909
910 offset = vb->buffer_offset + ve->src_offset;
911 va = rbuffer->gpu_address + offset;
912
913 /* Fill in T# buffer resource description */
914 desc[0] = va;
915 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
916 S_008F04_STRIDE(vb->stride);
917
918 if (sctx->b.chip_class <= CIK && vb->stride)
919 /* Round up by rounding down and adding 1 */
920 desc[2] = (vb->buffer->width0 - offset -
921 sctx->vertex_elements->format_size[i]) /
922 vb->stride + 1;
923 else
924 desc[2] = vb->buffer->width0 - offset;
925
926 desc[3] = sctx->vertex_elements->rsrc_word3[i];
927
928 if (!bound[ve->vertex_buffer_index]) {
929 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
930 (struct r600_resource*)vb->buffer,
931 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
932 bound[ve->vertex_buffer_index] = true;
933 }
934 }
935
936 /* Don't flush the const cache. It would have a very negative effect
937 * on performance (confirmed by testing). New descriptors are always
938 * uploaded to a fresh new buffer, so I don't think flushing the const
939 * cache is needed. */
940 desc->pointer_dirty = true;
941 si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
942 sctx->vertex_buffers_dirty = false;
943 return true;
944 }
945
946
947 /* CONSTANT BUFFERS */
948
949 static unsigned
950 si_const_buffer_descriptors_idx(unsigned shader)
951 {
952 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
953 SI_SHADER_DESCS_CONST_BUFFERS;
954 }
955
956 static struct si_descriptors *
957 si_const_buffer_descriptors(struct si_context *sctx, unsigned shader)
958 {
959 return &sctx->descriptors[si_const_buffer_descriptors_idx(shader)];
960 }
961
962 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
963 const uint8_t *ptr, unsigned size, uint32_t *const_offset)
964 {
965 void *tmp;
966
967 u_upload_alloc(sctx->b.uploader, 0, size, 256, const_offset,
968 (struct pipe_resource**)rbuffer, &tmp);
969 if (*rbuffer)
970 util_memcpy_cpu_to_le32(tmp, ptr, size);
971 }
972
973 static void si_set_constant_buffer(struct si_context *sctx,
974 struct si_buffer_resources *buffers,
975 unsigned descriptors_idx,
976 uint slot, const struct pipe_constant_buffer *input)
977 {
978 struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
979 assert(slot < descs->num_elements);
980 pipe_resource_reference(&buffers->buffers[slot], NULL);
981
982 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
983 * with a NULL buffer). We need to use a dummy buffer instead. */
984 if (sctx->b.chip_class == CIK &&
985 (!input || (!input->buffer && !input->user_buffer)))
986 input = &sctx->null_const_buf;
987
988 if (input && (input->buffer || input->user_buffer)) {
989 struct pipe_resource *buffer = NULL;
990 uint64_t va;
991
992 /* Upload the user buffer if needed. */
993 if (input->user_buffer) {
994 unsigned buffer_offset;
995
996 si_upload_const_buffer(sctx,
997 (struct r600_resource**)&buffer, input->user_buffer,
998 input->buffer_size, &buffer_offset);
999 if (!buffer) {
1000 /* Just unbind on failure. */
1001 si_set_constant_buffer(sctx, buffers, descriptors_idx, slot, NULL);
1002 return;
1003 }
1004 va = r600_resource(buffer)->gpu_address + buffer_offset;
1005 } else {
1006 pipe_resource_reference(&buffer, input->buffer);
1007 va = r600_resource(buffer)->gpu_address + input->buffer_offset;
1008 }
1009
1010 /* Set the descriptor. */
1011 uint32_t *desc = descs->list + slot*4;
1012 desc[0] = va;
1013 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1014 S_008F04_STRIDE(0);
1015 desc[2] = input->buffer_size;
1016 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1017 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1018 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1019 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1020 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1021 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1022
1023 buffers->buffers[slot] = buffer;
1024 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1025 (struct r600_resource*)buffer,
1026 buffers->shader_usage,
1027 buffers->priority, true);
1028 buffers->enabled_mask |= 1u << slot;
1029 } else {
1030 /* Clear the descriptor. */
1031 memset(descs->list + slot*4, 0, sizeof(uint32_t) * 4);
1032 buffers->enabled_mask &= ~(1u << slot);
1033 }
1034
1035 descs->dirty_mask |= 1u << slot;
1036 sctx->descriptors_dirty |= 1u << descriptors_idx;
1037 }
1038
1039 void si_set_rw_buffer(struct si_context *sctx,
1040 uint slot, const struct pipe_constant_buffer *input)
1041 {
1042 si_set_constant_buffer(sctx, &sctx->rw_buffers,
1043 SI_DESCS_RW_BUFFERS, slot, input);
1044 }
1045
1046 static void si_pipe_set_constant_buffer(struct pipe_context *ctx,
1047 uint shader, uint slot,
1048 const struct pipe_constant_buffer *input)
1049 {
1050 struct si_context *sctx = (struct si_context *)ctx;
1051
1052 if (shader >= SI_NUM_SHADERS)
1053 return;
1054
1055 si_set_constant_buffer(sctx, &sctx->const_buffers[shader],
1056 si_const_buffer_descriptors_idx(shader),
1057 slot, input);
1058 }
1059
1060 /* SHADER BUFFERS */
1061
1062 static unsigned
1063 si_shader_buffer_descriptors_idx(unsigned shader)
1064 {
1065 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
1066 SI_SHADER_DESCS_SHADER_BUFFERS;
1067 }
1068
1069 static struct si_descriptors *
1070 si_shader_buffer_descriptors(struct si_context *sctx, unsigned shader)
1071 {
1072 return &sctx->descriptors[si_shader_buffer_descriptors_idx(shader)];
1073 }
1074
1075 static void si_set_shader_buffers(struct pipe_context *ctx, unsigned shader,
1076 unsigned start_slot, unsigned count,
1077 const struct pipe_shader_buffer *sbuffers)
1078 {
1079 struct si_context *sctx = (struct si_context *)ctx;
1080 struct si_buffer_resources *buffers = &sctx->shader_buffers[shader];
1081 struct si_descriptors *descs = si_shader_buffer_descriptors(sctx, shader);
1082 unsigned i;
1083
1084 assert(start_slot + count <= SI_NUM_SHADER_BUFFERS);
1085
1086 for (i = 0; i < count; ++i) {
1087 const struct pipe_shader_buffer *sbuffer = sbuffers ? &sbuffers[i] : NULL;
1088 struct r600_resource *buf;
1089 unsigned slot = start_slot + i;
1090 uint32_t *desc = descs->list + slot * 4;
1091 uint64_t va;
1092
1093 if (!sbuffer || !sbuffer->buffer) {
1094 pipe_resource_reference(&buffers->buffers[slot], NULL);
1095 memset(desc, 0, sizeof(uint32_t) * 4);
1096 buffers->enabled_mask &= ~(1u << slot);
1097 descs->dirty_mask |= 1u << slot;
1098 sctx->descriptors_dirty |=
1099 1u << si_shader_buffer_descriptors_idx(shader);
1100 continue;
1101 }
1102
1103 buf = (struct r600_resource *)sbuffer->buffer;
1104 va = buf->gpu_address + sbuffer->buffer_offset;
1105
1106 desc[0] = va;
1107 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1108 S_008F04_STRIDE(0);
1109 desc[2] = sbuffer->buffer_size;
1110 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1111 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1112 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1113 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1114 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1115 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1116
1117 pipe_resource_reference(&buffers->buffers[slot], &buf->b.b);
1118 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx, buf,
1119 buffers->shader_usage,
1120 buffers->priority, true);
1121 buffers->enabled_mask |= 1u << slot;
1122 descs->dirty_mask |= 1u << slot;
1123 sctx->descriptors_dirty |=
1124 1u << si_shader_buffer_descriptors_idx(shader);
1125 }
1126 }
1127
1128 /* RING BUFFERS */
1129
1130 void si_set_ring_buffer(struct pipe_context *ctx, uint slot,
1131 struct pipe_resource *buffer,
1132 unsigned stride, unsigned num_records,
1133 bool add_tid, bool swizzle,
1134 unsigned element_size, unsigned index_stride, uint64_t offset)
1135 {
1136 struct si_context *sctx = (struct si_context *)ctx;
1137 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1138 struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1139
1140 /* The stride field in the resource descriptor has 14 bits */
1141 assert(stride < (1 << 14));
1142
1143 assert(slot < descs->num_elements);
1144 pipe_resource_reference(&buffers->buffers[slot], NULL);
1145
1146 if (buffer) {
1147 uint64_t va;
1148
1149 va = r600_resource(buffer)->gpu_address + offset;
1150
1151 switch (element_size) {
1152 default:
1153 assert(!"Unsupported ring buffer element size");
1154 case 0:
1155 case 2:
1156 element_size = 0;
1157 break;
1158 case 4:
1159 element_size = 1;
1160 break;
1161 case 8:
1162 element_size = 2;
1163 break;
1164 case 16:
1165 element_size = 3;
1166 break;
1167 }
1168
1169 switch (index_stride) {
1170 default:
1171 assert(!"Unsupported ring buffer index stride");
1172 case 0:
1173 case 8:
1174 index_stride = 0;
1175 break;
1176 case 16:
1177 index_stride = 1;
1178 break;
1179 case 32:
1180 index_stride = 2;
1181 break;
1182 case 64:
1183 index_stride = 3;
1184 break;
1185 }
1186
1187 if (sctx->b.chip_class >= VI && stride)
1188 num_records *= stride;
1189
1190 /* Set the descriptor. */
1191 uint32_t *desc = descs->list + slot*4;
1192 desc[0] = va;
1193 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1194 S_008F04_STRIDE(stride) |
1195 S_008F04_SWIZZLE_ENABLE(swizzle);
1196 desc[2] = num_records;
1197 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1198 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1199 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1200 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1201 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1202 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1203 S_008F0C_ELEMENT_SIZE(element_size) |
1204 S_008F0C_INDEX_STRIDE(index_stride) |
1205 S_008F0C_ADD_TID_ENABLE(add_tid);
1206
1207 pipe_resource_reference(&buffers->buffers[slot], buffer);
1208 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1209 (struct r600_resource*)buffer,
1210 buffers->shader_usage, buffers->priority);
1211 buffers->enabled_mask |= 1u << slot;
1212 } else {
1213 /* Clear the descriptor. */
1214 memset(descs->list + slot*4, 0, sizeof(uint32_t) * 4);
1215 buffers->enabled_mask &= ~(1u << slot);
1216 }
1217
1218 descs->dirty_mask |= 1u << slot;
1219 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
1220 }
1221
1222 /* STREAMOUT BUFFERS */
1223
1224 static void si_set_streamout_targets(struct pipe_context *ctx,
1225 unsigned num_targets,
1226 struct pipe_stream_output_target **targets,
1227 const unsigned *offsets)
1228 {
1229 struct si_context *sctx = (struct si_context *)ctx;
1230 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1231 struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1232 unsigned old_num_targets = sctx->b.streamout.num_targets;
1233 unsigned i, bufidx;
1234
1235 /* We are going to unbind the buffers. Mark which caches need to be flushed. */
1236 if (sctx->b.streamout.num_targets && sctx->b.streamout.begin_emitted) {
1237 /* Since streamout uses vector writes which go through TC L2
1238 * and most other clients can use TC L2 as well, we don't need
1239 * to flush it.
1240 *
1241 * The only cases which requires flushing it is VGT DMA index
1242 * fetching (on <= CIK) and indirect draw data, which are rare
1243 * cases. Thus, flag the TC L2 dirtiness in the resource and
1244 * handle it at draw call time.
1245 */
1246 for (i = 0; i < sctx->b.streamout.num_targets; i++)
1247 if (sctx->b.streamout.targets[i])
1248 r600_resource(sctx->b.streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
1249
1250 /* Invalidate the scalar cache in case a streamout buffer is
1251 * going to be used as a constant buffer.
1252 *
1253 * Invalidate TC L1, because streamout bypasses it (done by
1254 * setting GLC=1 in the store instruction), but it can contain
1255 * outdated data of streamout buffers.
1256 *
1257 * VS_PARTIAL_FLUSH is required if the buffers are going to be
1258 * used as an input immediately.
1259 */
1260 sctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
1261 SI_CONTEXT_INV_VMEM_L1 |
1262 SI_CONTEXT_VS_PARTIAL_FLUSH;
1263 }
1264
1265 /* All readers of the streamout targets need to be finished before we can
1266 * start writing to the targets.
1267 */
1268 if (num_targets)
1269 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
1270 SI_CONTEXT_CS_PARTIAL_FLUSH;
1271
1272 /* Streamout buffers must be bound in 2 places:
1273 * 1) in VGT by setting the VGT_STRMOUT registers
1274 * 2) as shader resources
1275 */
1276
1277 /* Set the VGT regs. */
1278 r600_set_streamout_targets(ctx, num_targets, targets, offsets);
1279
1280 /* Set the shader resources.*/
1281 for (i = 0; i < num_targets; i++) {
1282 bufidx = SI_VS_STREAMOUT_BUF0 + i;
1283
1284 if (targets[i]) {
1285 struct pipe_resource *buffer = targets[i]->buffer;
1286 uint64_t va = r600_resource(buffer)->gpu_address;
1287
1288 /* Set the descriptor.
1289 *
1290 * On VI, the format must be non-INVALID, otherwise
1291 * the buffer will be considered not bound and store
1292 * instructions will be no-ops.
1293 */
1294 uint32_t *desc = descs->list + bufidx*4;
1295 desc[0] = va;
1296 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
1297 desc[2] = 0xffffffff;
1298 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1299 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1300 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1301 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1302 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1303
1304 /* Set the resource. */
1305 pipe_resource_reference(&buffers->buffers[bufidx],
1306 buffer);
1307 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1308 (struct r600_resource*)buffer,
1309 buffers->shader_usage,
1310 RADEON_PRIO_SHADER_RW_BUFFER,
1311 true);
1312 buffers->enabled_mask |= 1u << bufidx;
1313 } else {
1314 /* Clear the descriptor and unset the resource. */
1315 memset(descs->list + bufidx*4, 0,
1316 sizeof(uint32_t) * 4);
1317 pipe_resource_reference(&buffers->buffers[bufidx],
1318 NULL);
1319 buffers->enabled_mask &= ~(1u << bufidx);
1320 }
1321 descs->dirty_mask |= 1u << bufidx;
1322 }
1323 for (; i < old_num_targets; i++) {
1324 bufidx = SI_VS_STREAMOUT_BUF0 + i;
1325 /* Clear the descriptor and unset the resource. */
1326 memset(descs->list + bufidx*4, 0, sizeof(uint32_t) * 4);
1327 pipe_resource_reference(&buffers->buffers[bufidx], NULL);
1328 buffers->enabled_mask &= ~(1u << bufidx);
1329 descs->dirty_mask |= 1u << bufidx;
1330 }
1331
1332 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
1333 }
1334
1335 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
1336 uint32_t *desc, uint64_t old_buf_va,
1337 struct pipe_resource *new_buf)
1338 {
1339 /* Retrieve the buffer offset from the descriptor. */
1340 uint64_t old_desc_va =
1341 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
1342
1343 assert(old_buf_va <= old_desc_va);
1344 uint64_t offset_within_buffer = old_desc_va - old_buf_va;
1345
1346 /* Update the descriptor. */
1347 uint64_t va = r600_resource(new_buf)->gpu_address + offset_within_buffer;
1348
1349 desc[0] = va;
1350 desc[1] = (desc[1] & C_008F04_BASE_ADDRESS_HI) |
1351 S_008F04_BASE_ADDRESS_HI(va >> 32);
1352 }
1353
1354 /* INTERNAL CONST BUFFERS */
1355
1356 static void si_set_polygon_stipple(struct pipe_context *ctx,
1357 const struct pipe_poly_stipple *state)
1358 {
1359 struct si_context *sctx = (struct si_context *)ctx;
1360 struct pipe_constant_buffer cb = {};
1361 unsigned stipple[32];
1362 int i;
1363
1364 for (i = 0; i < 32; i++)
1365 stipple[i] = util_bitreverse(state->stipple[i]);
1366
1367 cb.user_buffer = stipple;
1368 cb.buffer_size = sizeof(stipple);
1369
1370 si_set_rw_buffer(sctx, SI_PS_CONST_POLY_STIPPLE, &cb);
1371 }
1372
1373 /* TEXTURE METADATA ENABLE/DISABLE */
1374
1375 /* CMASK can be enabled (for fast clear) and disabled (for texture export)
1376 * while the texture is bound, possibly by a different context. In that case,
1377 * call this function to update compressed_colortex_masks.
1378 */
1379 void si_update_compressed_colortex_masks(struct si_context *sctx)
1380 {
1381 for (int i = 0; i < SI_NUM_SHADERS; ++i) {
1382 si_samplers_update_compressed_colortex_mask(&sctx->samplers[i]);
1383 si_images_update_compressed_colortex_mask(&sctx->images[i]);
1384 }
1385 }
1386
1387 /* BUFFER DISCARD/INVALIDATION */
1388
1389 /** Reset descriptors of buffer resources after \p buf has been invalidated. */
1390 static void si_reset_buffer_resources(struct si_context *sctx,
1391 struct si_buffer_resources *buffers,
1392 unsigned descriptors_idx,
1393 struct pipe_resource *buf,
1394 uint64_t old_va)
1395 {
1396 struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
1397 unsigned mask = buffers->enabled_mask;
1398
1399 while (mask) {
1400 unsigned i = u_bit_scan(&mask);
1401 if (buffers->buffers[i] == buf) {
1402 si_desc_reset_buffer_offset(&sctx->b.b,
1403 descs->list + i*4,
1404 old_va, buf);
1405 descs->dirty_mask |= 1u << i;
1406 sctx->descriptors_dirty |= 1u << descriptors_idx;
1407
1408 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1409 (struct r600_resource *)buf,
1410 buffers->shader_usage,
1411 buffers->priority, true);
1412 }
1413 }
1414 }
1415
1416 /* Reallocate a buffer a update all resource bindings where the buffer is
1417 * bound.
1418 *
1419 * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
1420 * idle by discarding its contents. Apps usually tell us when to do this using
1421 * map_buffer flags, for example.
1422 */
1423 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
1424 {
1425 struct si_context *sctx = (struct si_context*)ctx;
1426 struct r600_resource *rbuffer = r600_resource(buf);
1427 unsigned i, shader, alignment = rbuffer->buf->alignment;
1428 uint64_t old_va = rbuffer->gpu_address;
1429 unsigned num_elems = sctx->vertex_elements ?
1430 sctx->vertex_elements->count : 0;
1431 struct si_sampler_view *view;
1432
1433 /* Reallocate the buffer in the same pipe_resource. */
1434 r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0,
1435 alignment);
1436
1437 /* We changed the buffer, now we need to bind it where the old one
1438 * was bound. This consists of 2 things:
1439 * 1) Updating the resource descriptor and dirtying it.
1440 * 2) Adding a relocation to the CS, so that it's usable.
1441 */
1442
1443 /* Vertex buffers. */
1444 for (i = 0; i < num_elems; i++) {
1445 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
1446
1447 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
1448 continue;
1449 if (!sctx->vertex_buffer[vb].buffer)
1450 continue;
1451
1452 if (sctx->vertex_buffer[vb].buffer == buf) {
1453 sctx->vertex_buffers_dirty = true;
1454 break;
1455 }
1456 }
1457
1458 /* Streamout buffers. (other internal buffers can't be invalidated) */
1459 for (i = SI_VS_STREAMOUT_BUF0; i <= SI_VS_STREAMOUT_BUF3; i++) {
1460 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1461 struct si_descriptors *descs =
1462 &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1463
1464 if (buffers->buffers[i] != buf)
1465 continue;
1466
1467 si_desc_reset_buffer_offset(ctx, descs->list + i*4,
1468 old_va, buf);
1469 descs->dirty_mask |= 1u << i;
1470 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
1471
1472 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1473 rbuffer, buffers->shader_usage,
1474 RADEON_PRIO_SHADER_RW_BUFFER,
1475 true);
1476
1477 /* Update the streamout state. */
1478 if (sctx->b.streamout.begin_emitted)
1479 r600_emit_streamout_end(&sctx->b);
1480 sctx->b.streamout.append_bitmask =
1481 sctx->b.streamout.enabled_mask;
1482 r600_streamout_buffers_dirty(&sctx->b);
1483 }
1484
1485 /* Constant and shader buffers. */
1486 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1487 si_reset_buffer_resources(sctx, &sctx->const_buffers[shader],
1488 si_const_buffer_descriptors_idx(shader),
1489 buf, old_va);
1490 si_reset_buffer_resources(sctx, &sctx->shader_buffers[shader],
1491 si_shader_buffer_descriptors_idx(shader),
1492 buf, old_va);
1493 }
1494
1495 /* Texture buffers - update virtual addresses in sampler view descriptors. */
1496 LIST_FOR_EACH_ENTRY(view, &sctx->b.texture_buffers, list) {
1497 if (view->base.texture == buf) {
1498 si_desc_reset_buffer_offset(ctx, &view->state[4], old_va, buf);
1499 }
1500 }
1501 /* Texture buffers - update bindings. */
1502 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1503 struct si_sampler_views *views = &sctx->samplers[shader].views;
1504 struct si_descriptors *descs =
1505 si_sampler_descriptors(sctx, shader);
1506 unsigned mask = views->enabled_mask;
1507
1508 while (mask) {
1509 unsigned i = u_bit_scan(&mask);
1510 if (views->views[i]->texture == buf) {
1511 si_desc_reset_buffer_offset(ctx,
1512 descs->list +
1513 i * 16 + 4,
1514 old_va, buf);
1515 descs->dirty_mask |= 1u << i;
1516 sctx->descriptors_dirty |=
1517 1u << si_sampler_descriptors_idx(shader);
1518
1519 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1520 rbuffer, RADEON_USAGE_READ,
1521 RADEON_PRIO_SAMPLER_BUFFER,
1522 true);
1523 }
1524 }
1525 }
1526
1527 /* Shader images */
1528 for (shader = 0; shader < SI_NUM_SHADERS; ++shader) {
1529 struct si_images_info *images = &sctx->images[shader];
1530 struct si_descriptors *descs =
1531 si_image_descriptors(sctx, shader);
1532 unsigned mask = images->enabled_mask;
1533
1534 while (mask) {
1535 unsigned i = u_bit_scan(&mask);
1536
1537 if (images->views[i].resource == buf) {
1538 if (images->views[i].access & PIPE_IMAGE_ACCESS_WRITE)
1539 si_mark_image_range_valid(&images->views[i]);
1540
1541 si_desc_reset_buffer_offset(
1542 ctx, descs->list + i * 8 + 4,
1543 old_va, buf);
1544 descs->dirty_mask |= 1u << i;
1545 sctx->descriptors_dirty |=
1546 1u << si_image_descriptors_idx(shader);
1547
1548 radeon_add_to_buffer_list_check_mem(
1549 &sctx->b, &sctx->b.gfx, rbuffer,
1550 RADEON_USAGE_READWRITE,
1551 RADEON_PRIO_SAMPLER_BUFFER, true);
1552 }
1553 }
1554 }
1555 }
1556
1557 /* Update mutable image descriptor fields of all bound textures. */
1558 void si_update_all_texture_descriptors(struct si_context *sctx)
1559 {
1560 unsigned shader;
1561
1562 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1563 struct si_sampler_views *samplers = &sctx->samplers[shader].views;
1564 struct si_images_info *images = &sctx->images[shader];
1565 unsigned mask;
1566
1567 /* Images. */
1568 mask = images->enabled_mask;
1569 while (mask) {
1570 unsigned i = u_bit_scan(&mask);
1571 struct pipe_image_view *view = &images->views[i];
1572
1573 if (!view->resource ||
1574 view->resource->target == PIPE_BUFFER)
1575 continue;
1576
1577 si_set_shader_image(sctx, shader, i, view);
1578 }
1579
1580 /* Sampler views. */
1581 mask = samplers->enabled_mask;
1582 while (mask) {
1583 unsigned i = u_bit_scan(&mask);
1584 struct pipe_sampler_view *view = samplers->views[i];
1585
1586 if (!view ||
1587 !view->texture ||
1588 view->texture->target == PIPE_BUFFER)
1589 continue;
1590
1591 si_set_sampler_view(sctx, shader, i,
1592 samplers->views[i], true);
1593 }
1594 }
1595 }
1596
1597 /* SHADER USER DATA */
1598
1599 static void si_mark_shader_pointers_dirty(struct si_context *sctx,
1600 unsigned shader)
1601 {
1602 struct si_descriptors *descs =
1603 &sctx->descriptors[SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS];
1604
1605 for (unsigned i = 0; i < SI_NUM_SHADER_DESCS; ++i, ++descs)
1606 descs->pointer_dirty = true;
1607
1608 if (shader == PIPE_SHADER_VERTEX)
1609 sctx->vertex_buffers.pointer_dirty = true;
1610
1611 si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
1612 }
1613
1614 static void si_shader_userdata_begin_new_cs(struct si_context *sctx)
1615 {
1616 int i;
1617
1618 for (i = 0; i < SI_NUM_SHADERS; i++) {
1619 si_mark_shader_pointers_dirty(sctx, i);
1620 }
1621 sctx->descriptors[SI_DESCS_RW_BUFFERS].pointer_dirty = true;
1622 }
1623
1624 /* Set a base register address for user data constants in the given shader.
1625 * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
1626 */
1627 static void si_set_user_data_base(struct si_context *sctx,
1628 unsigned shader, uint32_t new_base)
1629 {
1630 uint32_t *base = &sctx->shader_userdata.sh_base[shader];
1631
1632 if (*base != new_base) {
1633 *base = new_base;
1634
1635 if (new_base)
1636 si_mark_shader_pointers_dirty(sctx, shader);
1637 }
1638 }
1639
1640 /* This must be called when these shaders are changed from non-NULL to NULL
1641 * and vice versa:
1642 * - geometry shader
1643 * - tessellation control shader
1644 * - tessellation evaluation shader
1645 */
1646 void si_shader_change_notify(struct si_context *sctx)
1647 {
1648 /* VS can be bound as VS, ES, or LS. */
1649 if (sctx->tes_shader.cso)
1650 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1651 R_00B530_SPI_SHADER_USER_DATA_LS_0);
1652 else if (sctx->gs_shader.cso)
1653 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1654 R_00B330_SPI_SHADER_USER_DATA_ES_0);
1655 else
1656 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1657 R_00B130_SPI_SHADER_USER_DATA_VS_0);
1658
1659 /* TES can be bound as ES, VS, or not bound. */
1660 if (sctx->tes_shader.cso) {
1661 if (sctx->gs_shader.cso)
1662 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1663 R_00B330_SPI_SHADER_USER_DATA_ES_0);
1664 else
1665 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1666 R_00B130_SPI_SHADER_USER_DATA_VS_0);
1667 } else {
1668 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL, 0);
1669 }
1670 }
1671
1672 static void si_emit_shader_pointer(struct si_context *sctx,
1673 struct si_descriptors *desc,
1674 unsigned sh_base, bool keep_dirty)
1675 {
1676 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
1677 uint64_t va;
1678
1679 if (!desc->pointer_dirty || !desc->buffer)
1680 return;
1681
1682 va = desc->buffer->gpu_address +
1683 desc->buffer_offset;
1684
1685 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
1686 radeon_emit(cs, (sh_base + desc->shader_userdata_offset - SI_SH_REG_OFFSET) >> 2);
1687 radeon_emit(cs, va);
1688 radeon_emit(cs, va >> 32);
1689
1690 desc->pointer_dirty = keep_dirty;
1691 }
1692
1693 void si_emit_graphics_shader_userdata(struct si_context *sctx,
1694 struct r600_atom *atom)
1695 {
1696 unsigned shader;
1697 uint32_t *sh_base = sctx->shader_userdata.sh_base;
1698 struct si_descriptors *descs;
1699
1700 descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1701
1702 if (descs->pointer_dirty) {
1703 si_emit_shader_pointer(sctx, descs,
1704 R_00B030_SPI_SHADER_USER_DATA_PS_0, true);
1705 si_emit_shader_pointer(sctx, descs,
1706 R_00B130_SPI_SHADER_USER_DATA_VS_0, true);
1707 si_emit_shader_pointer(sctx, descs,
1708 R_00B230_SPI_SHADER_USER_DATA_GS_0, true);
1709 si_emit_shader_pointer(sctx, descs,
1710 R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
1711 si_emit_shader_pointer(sctx, descs,
1712 R_00B430_SPI_SHADER_USER_DATA_HS_0, true);
1713 descs->pointer_dirty = false;
1714 }
1715
1716 descs = &sctx->descriptors[SI_DESCS_FIRST_SHADER];
1717
1718 for (shader = 0; shader < SI_NUM_GRAPHICS_SHADERS; shader++) {
1719 unsigned base = sh_base[shader];
1720 unsigned i;
1721
1722 if (!base)
1723 continue;
1724
1725 for (i = 0; i < SI_NUM_SHADER_DESCS; i++, descs++)
1726 si_emit_shader_pointer(sctx, descs, base, false);
1727 }
1728 si_emit_shader_pointer(sctx, &sctx->vertex_buffers, sh_base[PIPE_SHADER_VERTEX], false);
1729 }
1730
1731 void si_emit_compute_shader_userdata(struct si_context *sctx)
1732 {
1733 unsigned base = R_00B900_COMPUTE_USER_DATA_0;
1734 struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_FIRST_COMPUTE];
1735
1736 for (unsigned i = 0; i < SI_NUM_SHADER_DESCS; ++i, ++descs)
1737 si_emit_shader_pointer(sctx, descs, base, false);
1738 }
1739
1740 /* INIT/DEINIT/UPLOAD */
1741
1742 void si_init_all_descriptors(struct si_context *sctx)
1743 {
1744 int i;
1745 unsigned ce_offset = 0;
1746
1747 for (i = 0; i < SI_NUM_SHADERS; i++) {
1748 si_init_buffer_resources(&sctx->const_buffers[i],
1749 si_const_buffer_descriptors(sctx, i),
1750 SI_NUM_CONST_BUFFERS, SI_SGPR_CONST_BUFFERS,
1751 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER,
1752 &ce_offset);
1753 si_init_buffer_resources(&sctx->shader_buffers[i],
1754 si_shader_buffer_descriptors(sctx, i),
1755 SI_NUM_SHADER_BUFFERS, SI_SGPR_SHADER_BUFFERS,
1756 RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RW_BUFFER,
1757 &ce_offset);
1758
1759 si_init_descriptors(si_sampler_descriptors(sctx, i),
1760 SI_SGPR_SAMPLERS, 16, SI_NUM_SAMPLERS,
1761 null_texture_descriptor, &ce_offset);
1762
1763 si_init_descriptors(si_image_descriptors(sctx, i),
1764 SI_SGPR_IMAGES, 8, SI_NUM_IMAGES,
1765 null_image_descriptor, &ce_offset);
1766 }
1767
1768 si_init_buffer_resources(&sctx->rw_buffers,
1769 &sctx->descriptors[SI_DESCS_RW_BUFFERS],
1770 SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
1771 RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RINGS,
1772 &ce_offset);
1773 si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
1774 4, SI_NUM_VERTEX_BUFFERS, NULL, NULL);
1775
1776 sctx->descriptors_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
1777
1778 assert(ce_offset <= 32768);
1779
1780 /* Set pipe_context functions. */
1781 sctx->b.b.bind_sampler_states = si_bind_sampler_states;
1782 sctx->b.b.set_shader_images = si_set_shader_images;
1783 sctx->b.b.set_constant_buffer = si_pipe_set_constant_buffer;
1784 sctx->b.b.set_polygon_stipple = si_set_polygon_stipple;
1785 sctx->b.b.set_shader_buffers = si_set_shader_buffers;
1786 sctx->b.b.set_sampler_views = si_set_sampler_views;
1787 sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
1788 sctx->b.invalidate_buffer = si_invalidate_buffer;
1789
1790 /* Shader user data. */
1791 si_init_atom(sctx, &sctx->shader_userdata.atom, &sctx->atoms.s.shader_userdata,
1792 si_emit_graphics_shader_userdata);
1793
1794 /* Set default and immutable mappings. */
1795 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
1796 si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL, R_00B430_SPI_SHADER_USER_DATA_HS_0);
1797 si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY, R_00B230_SPI_SHADER_USER_DATA_GS_0);
1798 si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
1799 }
1800
1801 bool si_upload_graphics_shader_descriptors(struct si_context *sctx)
1802 {
1803 const unsigned mask = u_bit_consecutive(0, SI_DESCS_FIRST_COMPUTE);
1804 unsigned dirty = sctx->descriptors_dirty & mask;
1805
1806 while (dirty) {
1807 unsigned i = u_bit_scan(&dirty);
1808
1809 if (!si_upload_descriptors(sctx, &sctx->descriptors[i],
1810 &sctx->shader_userdata.atom))
1811 return false;
1812 }
1813
1814 sctx->descriptors_dirty &= ~mask;
1815 return true;
1816 }
1817
1818 bool si_upload_compute_shader_descriptors(struct si_context *sctx)
1819 {
1820 /* Does not update rw_buffers as that is not needed for compute shaders
1821 * and the input buffer is using the same SGPR's anyway.
1822 */
1823 const unsigned mask = u_bit_consecutive(SI_DESCS_FIRST_COMPUTE,
1824 SI_NUM_DESCS - SI_DESCS_FIRST_COMPUTE);
1825 unsigned dirty = sctx->descriptors_dirty & mask;
1826
1827 while (dirty) {
1828 unsigned i = u_bit_scan(&dirty);
1829
1830 if (!si_upload_descriptors(sctx, &sctx->descriptors[i], NULL))
1831 return false;
1832 }
1833
1834 sctx->descriptors_dirty &= ~mask;
1835
1836 return true;
1837 }
1838
1839 void si_release_all_descriptors(struct si_context *sctx)
1840 {
1841 int i;
1842
1843 for (i = 0; i < SI_NUM_SHADERS; i++) {
1844 si_release_buffer_resources(&sctx->const_buffers[i],
1845 si_const_buffer_descriptors(sctx, i));
1846 si_release_buffer_resources(&sctx->shader_buffers[i],
1847 si_shader_buffer_descriptors(sctx, i));
1848 si_release_sampler_views(&sctx->samplers[i].views);
1849 si_release_image_views(&sctx->images[i]);
1850 }
1851 si_release_buffer_resources(&sctx->rw_buffers,
1852 &sctx->descriptors[SI_DESCS_RW_BUFFERS]);
1853
1854 for (i = 0; i < SI_NUM_DESCS; ++i)
1855 si_release_descriptors(&sctx->descriptors[i]);
1856 si_release_descriptors(&sctx->vertex_buffers);
1857 }
1858
1859 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
1860 {
1861 int i;
1862
1863 for (i = 0; i < SI_NUM_SHADERS; i++) {
1864 si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
1865 si_buffer_resources_begin_new_cs(sctx, &sctx->shader_buffers[i]);
1866 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
1867 si_image_views_begin_new_cs(sctx, &sctx->images[i]);
1868 }
1869 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers);
1870 si_vertex_buffers_begin_new_cs(sctx);
1871
1872 for (i = 0; i < SI_NUM_DESCS; ++i)
1873 si_descriptors_begin_new_cs(sctx, &sctx->descriptors[i]);
1874
1875 si_shader_userdata_begin_new_cs(sctx);
1876 }