radeonsi: fix a subtle bounds checking corner case with 3-component attributes
[mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Marek Olšák <marek.olsak@amd.com>
25 */
26
27 /* Resource binding slots and sampler states (each described with 8 or
28 * 4 dwords) are stored in lists in memory which is accessed by shaders
29 * using scalar load instructions.
30 *
31 * This file is responsible for managing such lists. It keeps a copy of all
32 * descriptors in CPU memory and re-uploads a whole list if some slots have
33 * been changed.
34 *
35 * This code is also reponsible for updating shader pointers to those lists.
36 *
37 * Note that CP DMA can't be used for updating the lists, because a GPU hang
38 * could leave the list in a mid-IB state and the next IB would get wrong
39 * descriptors and the whole context would be unusable at that point.
40 * (Note: The register shadowing can't be used due to the same reason)
41 *
42 * Also, uploading descriptors to newly allocated memory doesn't require
43 * a KCACHE flush.
44 *
45 *
46 * Possible scenarios for one 16 dword image+sampler slot:
47 *
48 * | Image | w/ FMASK | Buffer | NULL
49 * [ 0: 3] Image[0:3] | Image[0:3] | Null[0:3] | Null[0:3]
50 * [ 4: 7] Image[4:7] | Image[4:7] | Buffer[0:3] | 0
51 * [ 8:11] Null[0:3] | Fmask[0:3] | Null[0:3] | Null[0:3]
52 * [12:15] Sampler[0:3] | Fmask[4:7] | Sampler[0:3] | Sampler[0:3]
53 *
54 * FMASK implies MSAA, therefore no sampler state.
55 * Sampler states are never unbound except when FMASK is bound.
56 */
57
58 #include "radeon/r600_cs.h"
59 #include "si_pipe.h"
60 #include "sid.h"
61
62 #include "util/u_format.h"
63 #include "util/u_memory.h"
64 #include "util/u_upload_mgr.h"
65
66
67 /* NULL image and buffer descriptor for textures (alpha = 1) and images
68 * (alpha = 0).
69 *
70 * For images, all fields must be zero except for the swizzle, which
71 * supports arbitrary combinations of 0s and 1s. The texture type must be
72 * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
73 *
74 * For buffers, all fields must be zero. If they are not, the hw hangs.
75 *
76 * This is the only reason why the buffer descriptor must be in words [4:7].
77 */
78 static uint32_t null_texture_descriptor[8] = {
79 0,
80 0,
81 0,
82 S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1) |
83 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
84 /* the rest must contain zeros, which is also used by the buffer
85 * descriptor */
86 };
87
88 static uint32_t null_image_descriptor[8] = {
89 0,
90 0,
91 0,
92 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
93 /* the rest must contain zeros, which is also used by the buffer
94 * descriptor */
95 };
96
97 static void si_init_descriptors(struct si_descriptors *desc,
98 unsigned shader_userdata_index,
99 unsigned element_dw_size,
100 unsigned num_elements,
101 const uint32_t *null_descriptor,
102 unsigned *ce_offset)
103 {
104 int i;
105
106 assert(num_elements <= sizeof(desc->dirty_mask)*8);
107
108 desc->list = CALLOC(num_elements, element_dw_size * 4);
109 desc->element_dw_size = element_dw_size;
110 desc->num_elements = num_elements;
111 desc->dirty_mask = num_elements == 32 ? ~0u : (1u << num_elements) - 1;
112 desc->shader_userdata_offset = shader_userdata_index * 4;
113
114 if (ce_offset) {
115 desc->ce_offset = *ce_offset;
116
117 /* make sure that ce_offset stays 32 byte aligned */
118 *ce_offset += align(element_dw_size * num_elements * 4, 32);
119 }
120
121 /* Initialize the array to NULL descriptors if the element size is 8. */
122 if (null_descriptor) {
123 assert(element_dw_size % 8 == 0);
124 for (i = 0; i < num_elements * element_dw_size / 8; i++)
125 memcpy(desc->list + i * 8, null_descriptor,
126 8 * 4);
127 }
128 }
129
130 static void si_release_descriptors(struct si_descriptors *desc)
131 {
132 r600_resource_reference(&desc->buffer, NULL);
133 FREE(desc->list);
134 }
135
136 static bool si_ce_upload(struct si_context *sctx, unsigned ce_offset, unsigned size,
137 unsigned *out_offset, struct r600_resource **out_buf) {
138 uint64_t va;
139
140 u_suballocator_alloc(sctx->ce_suballocator, size, 64, out_offset,
141 (struct pipe_resource**)out_buf);
142 if (!out_buf)
143 return false;
144
145 va = (*out_buf)->gpu_address + *out_offset;
146
147 radeon_emit(sctx->ce_ib, PKT3(PKT3_DUMP_CONST_RAM, 3, 0));
148 radeon_emit(sctx->ce_ib, ce_offset);
149 radeon_emit(sctx->ce_ib, size / 4);
150 radeon_emit(sctx->ce_ib, va);
151 radeon_emit(sctx->ce_ib, va >> 32);
152
153 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, *out_buf,
154 RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
155
156 sctx->ce_need_synchronization = true;
157 return true;
158 }
159
160 static void si_ce_reinitialize_descriptors(struct si_context *sctx,
161 struct si_descriptors *desc)
162 {
163 if (desc->buffer) {
164 struct r600_resource *buffer = (struct r600_resource*)desc->buffer;
165 unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
166 uint64_t va = buffer->gpu_address + desc->buffer_offset;
167 struct radeon_winsys_cs *ib = sctx->ce_preamble_ib;
168
169 if (!ib)
170 ib = sctx->ce_ib;
171
172 list_size = align(list_size, 32);
173
174 radeon_emit(ib, PKT3(PKT3_LOAD_CONST_RAM, 3, 0));
175 radeon_emit(ib, va);
176 radeon_emit(ib, va >> 32);
177 radeon_emit(ib, list_size / 4);
178 radeon_emit(ib, desc->ce_offset);
179
180 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
181 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
182 }
183 desc->ce_ram_dirty = false;
184 }
185
186 void si_ce_reinitialize_all_descriptors(struct si_context *sctx)
187 {
188 int i;
189
190 for (i = 0; i < SI_NUM_DESCS; ++i)
191 si_ce_reinitialize_descriptors(sctx, &sctx->descriptors[i]);
192 }
193
194 void si_ce_enable_loads(struct radeon_winsys_cs *ib)
195 {
196 radeon_emit(ib, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
197 radeon_emit(ib, CONTEXT_CONTROL_LOAD_ENABLE(1) |
198 CONTEXT_CONTROL_LOAD_CE_RAM(1));
199 radeon_emit(ib, CONTEXT_CONTROL_SHADOW_ENABLE(1));
200 }
201
202 static bool si_upload_descriptors(struct si_context *sctx,
203 struct si_descriptors *desc,
204 struct r600_atom * atom)
205 {
206 unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
207
208 if (!desc->dirty_mask)
209 return true;
210
211 if (sctx->ce_ib) {
212 uint32_t const* list = (uint32_t const*)desc->list;
213
214 if (desc->ce_ram_dirty)
215 si_ce_reinitialize_descriptors(sctx, desc);
216
217 while(desc->dirty_mask) {
218 int begin, count;
219 u_bit_scan_consecutive_range(&desc->dirty_mask, &begin,
220 &count);
221
222 begin *= desc->element_dw_size;
223 count *= desc->element_dw_size;
224
225 radeon_emit(sctx->ce_ib,
226 PKT3(PKT3_WRITE_CONST_RAM, count, 0));
227 radeon_emit(sctx->ce_ib, desc->ce_offset + begin * 4);
228 radeon_emit_array(sctx->ce_ib, list + begin, count);
229 }
230
231 if (!si_ce_upload(sctx, desc->ce_offset, list_size,
232 &desc->buffer_offset, &desc->buffer))
233 return false;
234 } else {
235 void *ptr;
236
237 u_upload_alloc(sctx->b.uploader, 0, list_size, 256,
238 &desc->buffer_offset,
239 (struct pipe_resource**)&desc->buffer, &ptr);
240 if (!desc->buffer)
241 return false; /* skip the draw call */
242
243 util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
244
245 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
246 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
247 }
248 desc->pointer_dirty = true;
249 desc->dirty_mask = 0;
250
251 if (atom)
252 si_mark_atom_dirty(sctx, atom);
253
254 return true;
255 }
256
257 static void
258 si_descriptors_begin_new_cs(struct si_context *sctx, struct si_descriptors *desc)
259 {
260 desc->ce_ram_dirty = true;
261
262 if (!desc->buffer)
263 return;
264
265 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
266 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
267 }
268
269 /* SAMPLER VIEWS */
270
271 static unsigned
272 si_sampler_descriptors_idx(unsigned shader)
273 {
274 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
275 SI_SHADER_DESCS_SAMPLERS;
276 }
277
278 static struct si_descriptors *
279 si_sampler_descriptors(struct si_context *sctx, unsigned shader)
280 {
281 return &sctx->descriptors[si_sampler_descriptors_idx(shader)];
282 }
283
284 static void si_release_sampler_views(struct si_sampler_views *views)
285 {
286 int i;
287
288 for (i = 0; i < ARRAY_SIZE(views->views); i++) {
289 pipe_sampler_view_reference(&views->views[i], NULL);
290 }
291 }
292
293 static void si_sampler_view_add_buffer(struct si_context *sctx,
294 struct pipe_resource *resource,
295 enum radeon_bo_usage usage,
296 bool is_stencil_sampler,
297 bool check_mem)
298 {
299 struct r600_resource *rres;
300 struct r600_texture *rtex;
301 enum radeon_bo_priority priority;
302
303 if (!resource)
304 return;
305
306 if (resource->target != PIPE_BUFFER) {
307 struct r600_texture *tex = (struct r600_texture*)resource;
308
309 if (tex->is_depth && !r600_can_sample_zs(tex, is_stencil_sampler))
310 resource = &tex->flushed_depth_texture->resource.b.b;
311 }
312
313 rres = (struct r600_resource*)resource;
314 priority = r600_get_sampler_view_priority(rres);
315
316 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
317 rres, usage, priority,
318 check_mem);
319
320 if (resource->target == PIPE_BUFFER)
321 return;
322
323 /* Now add separate DCC if it's present. */
324 rtex = (struct r600_texture*)resource;
325 if (!rtex->dcc_separate_buffer)
326 return;
327
328 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
329 rtex->dcc_separate_buffer, usage,
330 RADEON_PRIO_DCC, check_mem);
331 }
332
333 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
334 struct si_sampler_views *views)
335 {
336 unsigned mask = views->enabled_mask;
337
338 /* Add buffers to the CS. */
339 while (mask) {
340 int i = u_bit_scan(&mask);
341 struct si_sampler_view *sview = (struct si_sampler_view *)views->views[i];
342
343 si_sampler_view_add_buffer(sctx, sview->base.texture,
344 RADEON_USAGE_READ,
345 sview->is_stencil_sampler, false);
346 }
347 }
348
349 /* Set buffer descriptor fields that can be changed by reallocations. */
350 static void si_set_buf_desc_address(struct r600_resource *buf,
351 uint64_t offset, uint32_t *state)
352 {
353 uint64_t va = buf->gpu_address + offset;
354
355 state[0] = va;
356 state[1] &= C_008F04_BASE_ADDRESS_HI;
357 state[1] |= S_008F04_BASE_ADDRESS_HI(va >> 32);
358 }
359
360 /* Set texture descriptor fields that can be changed by reallocations.
361 *
362 * \param tex texture
363 * \param base_level_info information of the level of BASE_ADDRESS
364 * \param base_level the level of BASE_ADDRESS
365 * \param first_level pipe_sampler_view.u.tex.first_level
366 * \param block_width util_format_get_blockwidth()
367 * \param is_stencil select between separate Z & Stencil
368 * \param state descriptor to update
369 */
370 void si_set_mutable_tex_desc_fields(struct r600_texture *tex,
371 const struct radeon_surf_level *base_level_info,
372 unsigned base_level, unsigned first_level,
373 unsigned block_width, bool is_stencil,
374 uint32_t *state)
375 {
376 uint64_t va;
377 unsigned pitch = base_level_info->nblk_x * block_width;
378
379 if (tex->is_depth && !r600_can_sample_zs(tex, is_stencil)) {
380 tex = tex->flushed_depth_texture;
381 is_stencil = false;
382 }
383
384 va = tex->resource.gpu_address + base_level_info->offset;
385
386 state[1] &= C_008F14_BASE_ADDRESS_HI;
387 state[3] &= C_008F1C_TILING_INDEX;
388 state[4] &= C_008F20_PITCH;
389 state[6] &= C_008F28_COMPRESSION_EN;
390
391 state[0] = va >> 8;
392 state[1] |= S_008F14_BASE_ADDRESS_HI(va >> 40);
393 state[3] |= S_008F1C_TILING_INDEX(si_tile_mode_index(tex, base_level,
394 is_stencil));
395 state[4] |= S_008F20_PITCH(pitch - 1);
396
397 if (tex->dcc_offset && first_level < tex->surface.num_dcc_levels) {
398 state[6] |= S_008F28_COMPRESSION_EN(1);
399 state[7] = ((!tex->dcc_separate_buffer ? tex->resource.gpu_address : 0) +
400 tex->dcc_offset +
401 base_level_info->dcc_offset) >> 8;
402 } else if (tex->tc_compatible_htile) {
403 state[6] |= S_008F28_COMPRESSION_EN(1);
404 state[7] = tex->htile_buffer->gpu_address >> 8;
405 }
406 }
407
408 static void si_set_sampler_view(struct si_context *sctx,
409 unsigned shader,
410 unsigned slot, struct pipe_sampler_view *view,
411 bool disallow_early_out)
412 {
413 struct si_sampler_views *views = &sctx->samplers[shader].views;
414 struct si_sampler_view *rview = (struct si_sampler_view*)view;
415 struct si_descriptors *descs = si_sampler_descriptors(sctx, shader);
416
417 if (views->views[slot] == view && !disallow_early_out)
418 return;
419
420 if (view) {
421 struct r600_texture *rtex = (struct r600_texture *)view->texture;
422 uint32_t *desc = descs->list + slot * 16;
423
424 assert(rtex); /* views with texture == NULL aren't supported */
425 pipe_sampler_view_reference(&views->views[slot], view);
426 memcpy(desc, rview->state, 8*4);
427
428 if (rtex->resource.b.b.target == PIPE_BUFFER) {
429 rtex->resource.bind_history |= PIPE_BIND_SAMPLER_VIEW;
430
431 si_set_buf_desc_address(&rtex->resource,
432 view->u.buf.offset,
433 desc + 4);
434 } else {
435 bool is_separate_stencil =
436 rtex->db_compatible &&
437 rview->is_stencil_sampler;
438
439 si_set_mutable_tex_desc_fields(rtex,
440 rview->base_level_info,
441 rview->base_level,
442 rview->base.u.tex.first_level,
443 rview->block_width,
444 is_separate_stencil,
445 desc);
446 }
447
448 if (rtex->resource.b.b.target != PIPE_BUFFER &&
449 rtex->fmask.size) {
450 memcpy(desc + 8,
451 rview->fmask_state, 8*4);
452 } else {
453 /* Disable FMASK and bind sampler state in [12:15]. */
454 memcpy(desc + 8,
455 null_texture_descriptor, 4*4);
456
457 if (views->sampler_states[slot])
458 memcpy(desc + 12,
459 views->sampler_states[slot], 4*4);
460 }
461
462 views->enabled_mask |= 1u << slot;
463
464 /* Since this can flush, it must be done after enabled_mask is
465 * updated. */
466 si_sampler_view_add_buffer(sctx, view->texture,
467 RADEON_USAGE_READ,
468 rview->is_stencil_sampler, true);
469 } else {
470 pipe_sampler_view_reference(&views->views[slot], NULL);
471 memcpy(descs->list + slot*16, null_texture_descriptor, 8*4);
472 /* Only clear the lower dwords of FMASK. */
473 memcpy(descs->list + slot*16 + 8, null_texture_descriptor, 4*4);
474 views->enabled_mask &= ~(1u << slot);
475 }
476
477 descs->dirty_mask |= 1u << slot;
478 sctx->descriptors_dirty |= 1u << si_sampler_descriptors_idx(shader);
479 }
480
481 static bool is_compressed_colortex(struct r600_texture *rtex)
482 {
483 return rtex->cmask.size || rtex->fmask.size ||
484 (rtex->dcc_offset && rtex->dirty_level_mask);
485 }
486
487 static void si_set_sampler_views(struct pipe_context *ctx,
488 enum pipe_shader_type shader, unsigned start,
489 unsigned count,
490 struct pipe_sampler_view **views)
491 {
492 struct si_context *sctx = (struct si_context *)ctx;
493 struct si_textures_info *samplers = &sctx->samplers[shader];
494 int i;
495
496 if (!count || shader >= SI_NUM_SHADERS)
497 return;
498
499 for (i = 0; i < count; i++) {
500 unsigned slot = start + i;
501
502 if (!views || !views[i]) {
503 samplers->depth_texture_mask &= ~(1u << slot);
504 samplers->compressed_colortex_mask &= ~(1u << slot);
505 si_set_sampler_view(sctx, shader, slot, NULL, false);
506 continue;
507 }
508
509 si_set_sampler_view(sctx, shader, slot, views[i], false);
510
511 if (views[i]->texture && views[i]->texture->target != PIPE_BUFFER) {
512 struct r600_texture *rtex =
513 (struct r600_texture*)views[i]->texture;
514 struct si_sampler_view *rview = (struct si_sampler_view *)views[i];
515
516 if (rtex->db_compatible &&
517 (!rtex->tc_compatible_htile || rview->is_stencil_sampler)) {
518 samplers->depth_texture_mask |= 1u << slot;
519 } else {
520 samplers->depth_texture_mask &= ~(1u << slot);
521 }
522 if (is_compressed_colortex(rtex)) {
523 samplers->compressed_colortex_mask |= 1u << slot;
524 } else {
525 samplers->compressed_colortex_mask &= ~(1u << slot);
526 }
527
528 if (rtex->dcc_offset &&
529 p_atomic_read(&rtex->framebuffers_bound))
530 sctx->need_check_render_feedback = true;
531 } else {
532 samplers->depth_texture_mask &= ~(1u << slot);
533 samplers->compressed_colortex_mask &= ~(1u << slot);
534 }
535 }
536 }
537
538 static void
539 si_samplers_update_compressed_colortex_mask(struct si_textures_info *samplers)
540 {
541 unsigned mask = samplers->views.enabled_mask;
542
543 while (mask) {
544 int i = u_bit_scan(&mask);
545 struct pipe_resource *res = samplers->views.views[i]->texture;
546
547 if (res && res->target != PIPE_BUFFER) {
548 struct r600_texture *rtex = (struct r600_texture *)res;
549
550 if (is_compressed_colortex(rtex)) {
551 samplers->compressed_colortex_mask |= 1u << i;
552 } else {
553 samplers->compressed_colortex_mask &= ~(1u << i);
554 }
555 }
556 }
557 }
558
559 /* IMAGE VIEWS */
560
561 static unsigned
562 si_image_descriptors_idx(unsigned shader)
563 {
564 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
565 SI_SHADER_DESCS_IMAGES;
566 }
567
568 static struct si_descriptors*
569 si_image_descriptors(struct si_context *sctx, unsigned shader)
570 {
571 return &sctx->descriptors[si_image_descriptors_idx(shader)];
572 }
573
574 static void
575 si_release_image_views(struct si_images_info *images)
576 {
577 unsigned i;
578
579 for (i = 0; i < SI_NUM_IMAGES; ++i) {
580 struct pipe_image_view *view = &images->views[i];
581
582 pipe_resource_reference(&view->resource, NULL);
583 }
584 }
585
586 static void
587 si_image_views_begin_new_cs(struct si_context *sctx, struct si_images_info *images)
588 {
589 uint mask = images->enabled_mask;
590
591 /* Add buffers to the CS. */
592 while (mask) {
593 int i = u_bit_scan(&mask);
594 struct pipe_image_view *view = &images->views[i];
595
596 assert(view->resource);
597
598 si_sampler_view_add_buffer(sctx, view->resource,
599 RADEON_USAGE_READWRITE, false, false);
600 }
601 }
602
603 static void
604 si_disable_shader_image(struct si_context *ctx, unsigned shader, unsigned slot)
605 {
606 struct si_images_info *images = &ctx->images[shader];
607
608 if (images->enabled_mask & (1u << slot)) {
609 struct si_descriptors *descs = si_image_descriptors(ctx, shader);
610
611 pipe_resource_reference(&images->views[slot].resource, NULL);
612 images->compressed_colortex_mask &= ~(1 << slot);
613
614 memcpy(descs->list + slot*8, null_image_descriptor, 8*4);
615 images->enabled_mask &= ~(1u << slot);
616 descs->dirty_mask |= 1u << slot;
617 ctx->descriptors_dirty |= 1u << si_image_descriptors_idx(shader);
618 }
619 }
620
621 static void
622 si_mark_image_range_valid(const struct pipe_image_view *view)
623 {
624 struct r600_resource *res = (struct r600_resource *)view->resource;
625
626 assert(res && res->b.b.target == PIPE_BUFFER);
627
628 util_range_add(&res->valid_buffer_range,
629 view->u.buf.offset,
630 view->u.buf.offset + view->u.buf.size);
631 }
632
633 static void si_set_shader_image(struct si_context *ctx,
634 unsigned shader,
635 unsigned slot, const struct pipe_image_view *view)
636 {
637 struct si_screen *screen = ctx->screen;
638 struct si_images_info *images = &ctx->images[shader];
639 struct si_descriptors *descs = si_image_descriptors(ctx, shader);
640 struct r600_resource *res;
641 uint32_t *desc = descs->list + slot * 8;
642
643 if (!view || !view->resource) {
644 si_disable_shader_image(ctx, shader, slot);
645 return;
646 }
647
648 res = (struct r600_resource *)view->resource;
649
650 if (&images->views[slot] != view)
651 util_copy_image_view(&images->views[slot], view);
652
653 if (res->b.b.target == PIPE_BUFFER) {
654 if (view->access & PIPE_IMAGE_ACCESS_WRITE)
655 si_mark_image_range_valid(view);
656
657 si_make_buffer_descriptor(screen, res,
658 view->format,
659 view->u.buf.offset,
660 view->u.buf.size,
661 descs->list + slot * 8);
662 si_set_buf_desc_address(res, view->u.buf.offset, desc + 4);
663
664 images->compressed_colortex_mask &= ~(1 << slot);
665 res->bind_history |= PIPE_BIND_SHADER_IMAGE;
666 } else {
667 static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
668 struct r600_texture *tex = (struct r600_texture *)res;
669 unsigned level = view->u.tex.level;
670 unsigned width, height, depth;
671 bool uses_dcc = tex->dcc_offset &&
672 level < tex->surface.num_dcc_levels;
673
674 assert(!tex->is_depth);
675 assert(tex->fmask.size == 0);
676
677 if (uses_dcc &&
678 (view->access & PIPE_IMAGE_ACCESS_WRITE ||
679 !vi_dcc_formats_compatible(res->b.b.format, view->format))) {
680 /* If DCC can't be disabled, at least decompress it.
681 * The decompression is relatively cheap if the surface
682 * has been decompressed already.
683 */
684 if (r600_texture_disable_dcc(&ctx->b, tex))
685 uses_dcc = false;
686 else
687 ctx->b.decompress_dcc(&ctx->b.b, tex);
688 }
689
690 if (is_compressed_colortex(tex)) {
691 images->compressed_colortex_mask |= 1 << slot;
692 } else {
693 images->compressed_colortex_mask &= ~(1 << slot);
694 }
695
696 if (uses_dcc &&
697 p_atomic_read(&tex->framebuffers_bound))
698 ctx->need_check_render_feedback = true;
699
700 /* Always force the base level to the selected level.
701 *
702 * This is required for 3D textures, where otherwise
703 * selecting a single slice for non-layered bindings
704 * fails. It doesn't hurt the other targets.
705 */
706 width = u_minify(res->b.b.width0, level);
707 height = u_minify(res->b.b.height0, level);
708 depth = u_minify(res->b.b.depth0, level);
709
710 si_make_texture_descriptor(screen, tex,
711 false, res->b.b.target,
712 view->format, swizzle,
713 0, 0,
714 view->u.tex.first_layer,
715 view->u.tex.last_layer,
716 width, height, depth,
717 desc, NULL);
718 si_set_mutable_tex_desc_fields(tex, &tex->surface.level[level],
719 level, level,
720 util_format_get_blockwidth(view->format),
721 false, desc);
722 }
723
724 images->enabled_mask |= 1u << slot;
725 descs->dirty_mask |= 1u << slot;
726 ctx->descriptors_dirty |= 1u << si_image_descriptors_idx(shader);
727
728 /* Since this can flush, it must be done after enabled_mask is updated. */
729 si_sampler_view_add_buffer(ctx, &res->b.b,
730 RADEON_USAGE_READWRITE, false, true);
731 }
732
733 static void
734 si_set_shader_images(struct pipe_context *pipe,
735 enum pipe_shader_type shader,
736 unsigned start_slot, unsigned count,
737 const struct pipe_image_view *views)
738 {
739 struct si_context *ctx = (struct si_context *)pipe;
740 unsigned i, slot;
741
742 assert(shader < SI_NUM_SHADERS);
743
744 if (!count)
745 return;
746
747 assert(start_slot + count <= SI_NUM_IMAGES);
748
749 if (views) {
750 for (i = 0, slot = start_slot; i < count; ++i, ++slot)
751 si_set_shader_image(ctx, shader, slot, &views[i]);
752 } else {
753 for (i = 0, slot = start_slot; i < count; ++i, ++slot)
754 si_set_shader_image(ctx, shader, slot, NULL);
755 }
756 }
757
758 static void
759 si_images_update_compressed_colortex_mask(struct si_images_info *images)
760 {
761 unsigned mask = images->enabled_mask;
762
763 while (mask) {
764 int i = u_bit_scan(&mask);
765 struct pipe_resource *res = images->views[i].resource;
766
767 if (res && res->target != PIPE_BUFFER) {
768 struct r600_texture *rtex = (struct r600_texture *)res;
769
770 if (is_compressed_colortex(rtex)) {
771 images->compressed_colortex_mask |= 1 << i;
772 } else {
773 images->compressed_colortex_mask &= ~(1 << i);
774 }
775 }
776 }
777 }
778
779 /* SAMPLER STATES */
780
781 static void si_bind_sampler_states(struct pipe_context *ctx,
782 enum pipe_shader_type shader,
783 unsigned start, unsigned count, void **states)
784 {
785 struct si_context *sctx = (struct si_context *)ctx;
786 struct si_textures_info *samplers = &sctx->samplers[shader];
787 struct si_descriptors *desc = si_sampler_descriptors(sctx, shader);
788 struct si_sampler_state **sstates = (struct si_sampler_state**)states;
789 int i;
790
791 if (!count || shader >= SI_NUM_SHADERS)
792 return;
793
794 for (i = 0; i < count; i++) {
795 unsigned slot = start + i;
796
797 if (!sstates[i] ||
798 sstates[i] == samplers->views.sampler_states[slot])
799 continue;
800
801 samplers->views.sampler_states[slot] = sstates[i];
802
803 /* If FMASK is bound, don't overwrite it.
804 * The sampler state will be set after FMASK is unbound.
805 */
806 if (samplers->views.views[i] &&
807 samplers->views.views[i]->texture &&
808 samplers->views.views[i]->texture->target != PIPE_BUFFER &&
809 ((struct r600_texture*)samplers->views.views[i]->texture)->fmask.size)
810 continue;
811
812 memcpy(desc->list + slot * 16 + 12, sstates[i]->val, 4*4);
813 desc->dirty_mask |= 1u << slot;
814 sctx->descriptors_dirty |= 1u << si_sampler_descriptors_idx(shader);
815 }
816 }
817
818 /* BUFFER RESOURCES */
819
820 static void si_init_buffer_resources(struct si_buffer_resources *buffers,
821 struct si_descriptors *descs,
822 unsigned num_buffers,
823 unsigned shader_userdata_index,
824 enum radeon_bo_usage shader_usage,
825 enum radeon_bo_priority priority,
826 unsigned *ce_offset)
827 {
828 buffers->shader_usage = shader_usage;
829 buffers->priority = priority;
830 buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
831
832 si_init_descriptors(descs, shader_userdata_index, 4,
833 num_buffers, NULL, ce_offset);
834 }
835
836 static void si_release_buffer_resources(struct si_buffer_resources *buffers,
837 struct si_descriptors *descs)
838 {
839 int i;
840
841 for (i = 0; i < descs->num_elements; i++) {
842 pipe_resource_reference(&buffers->buffers[i], NULL);
843 }
844
845 FREE(buffers->buffers);
846 }
847
848 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
849 struct si_buffer_resources *buffers)
850 {
851 unsigned mask = buffers->enabled_mask;
852
853 /* Add buffers to the CS. */
854 while (mask) {
855 int i = u_bit_scan(&mask);
856
857 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
858 (struct r600_resource*)buffers->buffers[i],
859 buffers->shader_usage, buffers->priority);
860 }
861 }
862
863 static void si_get_buffer_from_descriptors(struct si_buffer_resources *buffers,
864 struct si_descriptors *descs,
865 unsigned idx, struct pipe_resource **buf,
866 unsigned *offset, unsigned *size)
867 {
868 pipe_resource_reference(buf, buffers->buffers[idx]);
869 if (*buf) {
870 struct r600_resource *res = r600_resource(*buf);
871 const uint32_t *desc = descs->list + idx * 4;
872 uint64_t va;
873
874 *size = desc[2];
875
876 assert(G_008F04_STRIDE(desc[1]) == 0);
877 va = ((uint64_t)desc[1] << 32) | desc[0];
878
879 assert(va >= res->gpu_address && va + *size <= res->gpu_address + res->bo_size);
880 *offset = va - res->gpu_address;
881 }
882 }
883
884 /* VERTEX BUFFERS */
885
886 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
887 {
888 struct si_descriptors *desc = &sctx->vertex_buffers;
889 int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
890 int i;
891
892 for (i = 0; i < count; i++) {
893 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
894
895 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
896 continue;
897 if (!sctx->vertex_buffer[vb].buffer)
898 continue;
899
900 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
901 (struct r600_resource*)sctx->vertex_buffer[vb].buffer,
902 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
903 }
904
905 if (!desc->buffer)
906 return;
907 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
908 desc->buffer, RADEON_USAGE_READ,
909 RADEON_PRIO_DESCRIPTORS);
910 }
911
912 bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
913 {
914 struct si_descriptors *desc = &sctx->vertex_buffers;
915 bool bound[SI_NUM_VERTEX_BUFFERS] = {};
916 unsigned i, count = sctx->vertex_elements->count;
917 uint64_t va;
918 uint32_t *ptr;
919
920 if (!sctx->vertex_buffers_dirty)
921 return true;
922 if (!count || !sctx->vertex_elements)
923 return true;
924
925 /* Vertex buffer descriptors are the only ones which are uploaded
926 * directly through a staging buffer and don't go through
927 * the fine-grained upload path.
928 */
929 u_upload_alloc(sctx->b.uploader, 0, count * 16, 256, &desc->buffer_offset,
930 (struct pipe_resource**)&desc->buffer, (void**)&ptr);
931 if (!desc->buffer)
932 return false;
933
934 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
935 desc->buffer, RADEON_USAGE_READ,
936 RADEON_PRIO_DESCRIPTORS);
937
938 assert(count <= SI_NUM_VERTEX_BUFFERS);
939
940 for (i = 0; i < count; i++) {
941 struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
942 struct pipe_vertex_buffer *vb;
943 struct r600_resource *rbuffer;
944 unsigned offset;
945 uint32_t *desc = &ptr[i*4];
946
947 if (ve->vertex_buffer_index >= ARRAY_SIZE(sctx->vertex_buffer)) {
948 memset(desc, 0, 16);
949 continue;
950 }
951
952 vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
953 rbuffer = (struct r600_resource*)vb->buffer;
954 if (!rbuffer) {
955 memset(desc, 0, 16);
956 continue;
957 }
958
959 offset = vb->buffer_offset + ve->src_offset;
960 va = rbuffer->gpu_address + offset;
961
962 /* Fill in T# buffer resource description */
963 desc[0] = va;
964 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
965 S_008F04_STRIDE(vb->stride);
966
967 if (sctx->b.chip_class <= CIK && vb->stride) {
968 /* Round up by rounding down and adding 1 */
969 desc[2] = (vb->buffer->width0 - offset -
970 sctx->vertex_elements->format_size[i]) /
971 vb->stride + 1;
972 } else {
973 uint32_t size3;
974
975 desc[2] = vb->buffer->width0 - offset;
976
977 /* For attributes of size 3 with byte or short
978 * components, we use a 4-component data format.
979 *
980 * As a consequence, we have to round the buffer size
981 * up so that the hardware sees four components as
982 * being inside the buffer if and only if the first
983 * three components are in the buffer.
984 *
985 * Since the offset and stride are guaranteed to be
986 * 4-byte aligned, this alignment will never cross the
987 * winsys buffer boundary.
988 */
989 size3 = (sctx->vertex_elements->fix_size3 >> (2 * i)) & 3;
990 if (vb->stride && size3) {
991 assert(offset % 4 == 0 && vb->stride % 4 == 0);
992 assert(size3 <= 2);
993 desc[2] = align(desc[2], size3 * 2);
994 }
995 }
996
997 desc[3] = sctx->vertex_elements->rsrc_word3[i];
998
999 if (!bound[ve->vertex_buffer_index]) {
1000 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1001 (struct r600_resource*)vb->buffer,
1002 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
1003 bound[ve->vertex_buffer_index] = true;
1004 }
1005 }
1006
1007 /* Don't flush the const cache. It would have a very negative effect
1008 * on performance (confirmed by testing). New descriptors are always
1009 * uploaded to a fresh new buffer, so I don't think flushing the const
1010 * cache is needed. */
1011 desc->pointer_dirty = true;
1012 si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
1013 sctx->vertex_buffers_dirty = false;
1014 return true;
1015 }
1016
1017
1018 /* CONSTANT BUFFERS */
1019
1020 static unsigned
1021 si_const_buffer_descriptors_idx(unsigned shader)
1022 {
1023 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
1024 SI_SHADER_DESCS_CONST_BUFFERS;
1025 }
1026
1027 static struct si_descriptors *
1028 si_const_buffer_descriptors(struct si_context *sctx, unsigned shader)
1029 {
1030 return &sctx->descriptors[si_const_buffer_descriptors_idx(shader)];
1031 }
1032
1033 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
1034 const uint8_t *ptr, unsigned size, uint32_t *const_offset)
1035 {
1036 void *tmp;
1037
1038 u_upload_alloc(sctx->b.uploader, 0, size, 256, const_offset,
1039 (struct pipe_resource**)rbuffer, &tmp);
1040 if (*rbuffer)
1041 util_memcpy_cpu_to_le32(tmp, ptr, size);
1042 }
1043
1044 static void si_set_constant_buffer(struct si_context *sctx,
1045 struct si_buffer_resources *buffers,
1046 unsigned descriptors_idx,
1047 uint slot, const struct pipe_constant_buffer *input)
1048 {
1049 struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
1050 assert(slot < descs->num_elements);
1051 pipe_resource_reference(&buffers->buffers[slot], NULL);
1052
1053 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
1054 * with a NULL buffer). We need to use a dummy buffer instead. */
1055 if (sctx->b.chip_class == CIK &&
1056 (!input || (!input->buffer && !input->user_buffer)))
1057 input = &sctx->null_const_buf;
1058
1059 if (input && (input->buffer || input->user_buffer)) {
1060 struct pipe_resource *buffer = NULL;
1061 uint64_t va;
1062
1063 /* Upload the user buffer if needed. */
1064 if (input->user_buffer) {
1065 unsigned buffer_offset;
1066
1067 si_upload_const_buffer(sctx,
1068 (struct r600_resource**)&buffer, input->user_buffer,
1069 input->buffer_size, &buffer_offset);
1070 if (!buffer) {
1071 /* Just unbind on failure. */
1072 si_set_constant_buffer(sctx, buffers, descriptors_idx, slot, NULL);
1073 return;
1074 }
1075 va = r600_resource(buffer)->gpu_address + buffer_offset;
1076 } else {
1077 pipe_resource_reference(&buffer, input->buffer);
1078 va = r600_resource(buffer)->gpu_address + input->buffer_offset;
1079 /* Only track usage for non-user buffers. */
1080 r600_resource(buffer)->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
1081 }
1082
1083 /* Set the descriptor. */
1084 uint32_t *desc = descs->list + slot*4;
1085 desc[0] = va;
1086 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1087 S_008F04_STRIDE(0);
1088 desc[2] = input->buffer_size;
1089 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1090 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1091 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1092 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1093 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1094 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1095
1096 buffers->buffers[slot] = buffer;
1097 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1098 (struct r600_resource*)buffer,
1099 buffers->shader_usage,
1100 buffers->priority, true);
1101 buffers->enabled_mask |= 1u << slot;
1102 } else {
1103 /* Clear the descriptor. */
1104 memset(descs->list + slot*4, 0, sizeof(uint32_t) * 4);
1105 buffers->enabled_mask &= ~(1u << slot);
1106 }
1107
1108 descs->dirty_mask |= 1u << slot;
1109 sctx->descriptors_dirty |= 1u << descriptors_idx;
1110 }
1111
1112 void si_set_rw_buffer(struct si_context *sctx,
1113 uint slot, const struct pipe_constant_buffer *input)
1114 {
1115 si_set_constant_buffer(sctx, &sctx->rw_buffers,
1116 SI_DESCS_RW_BUFFERS, slot, input);
1117 }
1118
1119 static void si_pipe_set_constant_buffer(struct pipe_context *ctx,
1120 uint shader, uint slot,
1121 const struct pipe_constant_buffer *input)
1122 {
1123 struct si_context *sctx = (struct si_context *)ctx;
1124
1125 if (shader >= SI_NUM_SHADERS)
1126 return;
1127
1128 si_set_constant_buffer(sctx, &sctx->const_buffers[shader],
1129 si_const_buffer_descriptors_idx(shader),
1130 slot, input);
1131 }
1132
1133 void si_get_pipe_constant_buffer(struct si_context *sctx, uint shader,
1134 uint slot, struct pipe_constant_buffer *cbuf)
1135 {
1136 cbuf->user_buffer = NULL;
1137 si_get_buffer_from_descriptors(
1138 &sctx->const_buffers[shader],
1139 si_const_buffer_descriptors(sctx, shader),
1140 slot, &cbuf->buffer, &cbuf->buffer_offset, &cbuf->buffer_size);
1141 }
1142
1143 /* SHADER BUFFERS */
1144
1145 static unsigned
1146 si_shader_buffer_descriptors_idx(enum pipe_shader_type shader)
1147 {
1148 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
1149 SI_SHADER_DESCS_SHADER_BUFFERS;
1150 }
1151
1152 static struct si_descriptors *
1153 si_shader_buffer_descriptors(struct si_context *sctx,
1154 enum pipe_shader_type shader)
1155 {
1156 return &sctx->descriptors[si_shader_buffer_descriptors_idx(shader)];
1157 }
1158
1159 static void si_set_shader_buffers(struct pipe_context *ctx,
1160 enum pipe_shader_type shader,
1161 unsigned start_slot, unsigned count,
1162 const struct pipe_shader_buffer *sbuffers)
1163 {
1164 struct si_context *sctx = (struct si_context *)ctx;
1165 struct si_buffer_resources *buffers = &sctx->shader_buffers[shader];
1166 struct si_descriptors *descs = si_shader_buffer_descriptors(sctx, shader);
1167 unsigned i;
1168
1169 assert(start_slot + count <= SI_NUM_SHADER_BUFFERS);
1170
1171 for (i = 0; i < count; ++i) {
1172 const struct pipe_shader_buffer *sbuffer = sbuffers ? &sbuffers[i] : NULL;
1173 struct r600_resource *buf;
1174 unsigned slot = start_slot + i;
1175 uint32_t *desc = descs->list + slot * 4;
1176 uint64_t va;
1177
1178 if (!sbuffer || !sbuffer->buffer) {
1179 pipe_resource_reference(&buffers->buffers[slot], NULL);
1180 memset(desc, 0, sizeof(uint32_t) * 4);
1181 buffers->enabled_mask &= ~(1u << slot);
1182 descs->dirty_mask |= 1u << slot;
1183 sctx->descriptors_dirty |=
1184 1u << si_shader_buffer_descriptors_idx(shader);
1185 continue;
1186 }
1187
1188 buf = (struct r600_resource *)sbuffer->buffer;
1189 va = buf->gpu_address + sbuffer->buffer_offset;
1190
1191 desc[0] = va;
1192 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1193 S_008F04_STRIDE(0);
1194 desc[2] = sbuffer->buffer_size;
1195 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1196 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1197 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1198 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1199 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1200 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1201
1202 pipe_resource_reference(&buffers->buffers[slot], &buf->b.b);
1203 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx, buf,
1204 buffers->shader_usage,
1205 buffers->priority, true);
1206 buf->bind_history |= PIPE_BIND_SHADER_BUFFER;
1207
1208 buffers->enabled_mask |= 1u << slot;
1209 descs->dirty_mask |= 1u << slot;
1210 sctx->descriptors_dirty |=
1211 1u << si_shader_buffer_descriptors_idx(shader);
1212 }
1213 }
1214
1215 void si_get_shader_buffers(struct si_context *sctx, uint shader,
1216 uint start_slot, uint count,
1217 struct pipe_shader_buffer *sbuf)
1218 {
1219 struct si_buffer_resources *buffers = &sctx->shader_buffers[shader];
1220 struct si_descriptors *descs = si_shader_buffer_descriptors(sctx, shader);
1221
1222 for (unsigned i = 0; i < count; ++i) {
1223 si_get_buffer_from_descriptors(
1224 buffers, descs, start_slot + i,
1225 &sbuf[i].buffer, &sbuf[i].buffer_offset,
1226 &sbuf[i].buffer_size);
1227 }
1228 }
1229
1230 /* RING BUFFERS */
1231
1232 void si_set_ring_buffer(struct pipe_context *ctx, uint slot,
1233 struct pipe_resource *buffer,
1234 unsigned stride, unsigned num_records,
1235 bool add_tid, bool swizzle,
1236 unsigned element_size, unsigned index_stride, uint64_t offset)
1237 {
1238 struct si_context *sctx = (struct si_context *)ctx;
1239 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1240 struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1241
1242 /* The stride field in the resource descriptor has 14 bits */
1243 assert(stride < (1 << 14));
1244
1245 assert(slot < descs->num_elements);
1246 pipe_resource_reference(&buffers->buffers[slot], NULL);
1247
1248 if (buffer) {
1249 uint64_t va;
1250
1251 va = r600_resource(buffer)->gpu_address + offset;
1252
1253 switch (element_size) {
1254 default:
1255 assert(!"Unsupported ring buffer element size");
1256 case 0:
1257 case 2:
1258 element_size = 0;
1259 break;
1260 case 4:
1261 element_size = 1;
1262 break;
1263 case 8:
1264 element_size = 2;
1265 break;
1266 case 16:
1267 element_size = 3;
1268 break;
1269 }
1270
1271 switch (index_stride) {
1272 default:
1273 assert(!"Unsupported ring buffer index stride");
1274 case 0:
1275 case 8:
1276 index_stride = 0;
1277 break;
1278 case 16:
1279 index_stride = 1;
1280 break;
1281 case 32:
1282 index_stride = 2;
1283 break;
1284 case 64:
1285 index_stride = 3;
1286 break;
1287 }
1288
1289 if (sctx->b.chip_class >= VI && stride)
1290 num_records *= stride;
1291
1292 /* Set the descriptor. */
1293 uint32_t *desc = descs->list + slot*4;
1294 desc[0] = va;
1295 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1296 S_008F04_STRIDE(stride) |
1297 S_008F04_SWIZZLE_ENABLE(swizzle);
1298 desc[2] = num_records;
1299 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1300 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1301 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1302 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1303 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1304 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1305 S_008F0C_ELEMENT_SIZE(element_size) |
1306 S_008F0C_INDEX_STRIDE(index_stride) |
1307 S_008F0C_ADD_TID_ENABLE(add_tid);
1308
1309 pipe_resource_reference(&buffers->buffers[slot], buffer);
1310 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1311 (struct r600_resource*)buffer,
1312 buffers->shader_usage, buffers->priority);
1313 buffers->enabled_mask |= 1u << slot;
1314 } else {
1315 /* Clear the descriptor. */
1316 memset(descs->list + slot*4, 0, sizeof(uint32_t) * 4);
1317 buffers->enabled_mask &= ~(1u << slot);
1318 }
1319
1320 descs->dirty_mask |= 1u << slot;
1321 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
1322 }
1323
1324 /* STREAMOUT BUFFERS */
1325
1326 static void si_set_streamout_targets(struct pipe_context *ctx,
1327 unsigned num_targets,
1328 struct pipe_stream_output_target **targets,
1329 const unsigned *offsets)
1330 {
1331 struct si_context *sctx = (struct si_context *)ctx;
1332 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1333 struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1334 unsigned old_num_targets = sctx->b.streamout.num_targets;
1335 unsigned i, bufidx;
1336
1337 /* We are going to unbind the buffers. Mark which caches need to be flushed. */
1338 if (sctx->b.streamout.num_targets && sctx->b.streamout.begin_emitted) {
1339 /* Since streamout uses vector writes which go through TC L2
1340 * and most other clients can use TC L2 as well, we don't need
1341 * to flush it.
1342 *
1343 * The only cases which requires flushing it is VGT DMA index
1344 * fetching (on <= CIK) and indirect draw data, which are rare
1345 * cases. Thus, flag the TC L2 dirtiness in the resource and
1346 * handle it at draw call time.
1347 */
1348 for (i = 0; i < sctx->b.streamout.num_targets; i++)
1349 if (sctx->b.streamout.targets[i])
1350 r600_resource(sctx->b.streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
1351
1352 /* Invalidate the scalar cache in case a streamout buffer is
1353 * going to be used as a constant buffer.
1354 *
1355 * Invalidate TC L1, because streamout bypasses it (done by
1356 * setting GLC=1 in the store instruction), but it can contain
1357 * outdated data of streamout buffers.
1358 *
1359 * VS_PARTIAL_FLUSH is required if the buffers are going to be
1360 * used as an input immediately.
1361 */
1362 sctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
1363 SI_CONTEXT_INV_VMEM_L1 |
1364 SI_CONTEXT_VS_PARTIAL_FLUSH;
1365 }
1366
1367 /* All readers of the streamout targets need to be finished before we can
1368 * start writing to the targets.
1369 */
1370 if (num_targets)
1371 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
1372 SI_CONTEXT_CS_PARTIAL_FLUSH;
1373
1374 /* Streamout buffers must be bound in 2 places:
1375 * 1) in VGT by setting the VGT_STRMOUT registers
1376 * 2) as shader resources
1377 */
1378
1379 /* Set the VGT regs. */
1380 r600_set_streamout_targets(ctx, num_targets, targets, offsets);
1381
1382 /* Set the shader resources.*/
1383 for (i = 0; i < num_targets; i++) {
1384 bufidx = SI_VS_STREAMOUT_BUF0 + i;
1385
1386 if (targets[i]) {
1387 struct pipe_resource *buffer = targets[i]->buffer;
1388 uint64_t va = r600_resource(buffer)->gpu_address;
1389
1390 /* Set the descriptor.
1391 *
1392 * On VI, the format must be non-INVALID, otherwise
1393 * the buffer will be considered not bound and store
1394 * instructions will be no-ops.
1395 */
1396 uint32_t *desc = descs->list + bufidx*4;
1397 desc[0] = va;
1398 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
1399 desc[2] = 0xffffffff;
1400 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1401 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1402 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1403 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1404 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1405
1406 /* Set the resource. */
1407 pipe_resource_reference(&buffers->buffers[bufidx],
1408 buffer);
1409 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1410 (struct r600_resource*)buffer,
1411 buffers->shader_usage,
1412 RADEON_PRIO_SHADER_RW_BUFFER,
1413 true);
1414 r600_resource(buffer)->bind_history |= PIPE_BIND_STREAM_OUTPUT;
1415
1416 buffers->enabled_mask |= 1u << bufidx;
1417 } else {
1418 /* Clear the descriptor and unset the resource. */
1419 memset(descs->list + bufidx*4, 0,
1420 sizeof(uint32_t) * 4);
1421 pipe_resource_reference(&buffers->buffers[bufidx],
1422 NULL);
1423 buffers->enabled_mask &= ~(1u << bufidx);
1424 }
1425 descs->dirty_mask |= 1u << bufidx;
1426 }
1427 for (; i < old_num_targets; i++) {
1428 bufidx = SI_VS_STREAMOUT_BUF0 + i;
1429 /* Clear the descriptor and unset the resource. */
1430 memset(descs->list + bufidx*4, 0, sizeof(uint32_t) * 4);
1431 pipe_resource_reference(&buffers->buffers[bufidx], NULL);
1432 buffers->enabled_mask &= ~(1u << bufidx);
1433 descs->dirty_mask |= 1u << bufidx;
1434 }
1435
1436 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
1437 }
1438
1439 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
1440 uint32_t *desc, uint64_t old_buf_va,
1441 struct pipe_resource *new_buf)
1442 {
1443 /* Retrieve the buffer offset from the descriptor. */
1444 uint64_t old_desc_va =
1445 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
1446
1447 assert(old_buf_va <= old_desc_va);
1448 uint64_t offset_within_buffer = old_desc_va - old_buf_va;
1449
1450 /* Update the descriptor. */
1451 si_set_buf_desc_address(r600_resource(new_buf), offset_within_buffer,
1452 desc);
1453 }
1454
1455 /* INTERNAL CONST BUFFERS */
1456
1457 static void si_set_polygon_stipple(struct pipe_context *ctx,
1458 const struct pipe_poly_stipple *state)
1459 {
1460 struct si_context *sctx = (struct si_context *)ctx;
1461 struct pipe_constant_buffer cb = {};
1462 unsigned stipple[32];
1463 int i;
1464
1465 for (i = 0; i < 32; i++)
1466 stipple[i] = util_bitreverse(state->stipple[i]);
1467
1468 cb.user_buffer = stipple;
1469 cb.buffer_size = sizeof(stipple);
1470
1471 si_set_rw_buffer(sctx, SI_PS_CONST_POLY_STIPPLE, &cb);
1472 }
1473
1474 /* TEXTURE METADATA ENABLE/DISABLE */
1475
1476 /* CMASK can be enabled (for fast clear) and disabled (for texture export)
1477 * while the texture is bound, possibly by a different context. In that case,
1478 * call this function to update compressed_colortex_masks.
1479 */
1480 void si_update_compressed_colortex_masks(struct si_context *sctx)
1481 {
1482 for (int i = 0; i < SI_NUM_SHADERS; ++i) {
1483 si_samplers_update_compressed_colortex_mask(&sctx->samplers[i]);
1484 si_images_update_compressed_colortex_mask(&sctx->images[i]);
1485 }
1486 }
1487
1488 /* BUFFER DISCARD/INVALIDATION */
1489
1490 /** Reset descriptors of buffer resources after \p buf has been invalidated. */
1491 static void si_reset_buffer_resources(struct si_context *sctx,
1492 struct si_buffer_resources *buffers,
1493 unsigned descriptors_idx,
1494 struct pipe_resource *buf,
1495 uint64_t old_va)
1496 {
1497 struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
1498 unsigned mask = buffers->enabled_mask;
1499
1500 while (mask) {
1501 unsigned i = u_bit_scan(&mask);
1502 if (buffers->buffers[i] == buf) {
1503 si_desc_reset_buffer_offset(&sctx->b.b,
1504 descs->list + i*4,
1505 old_va, buf);
1506 descs->dirty_mask |= 1u << i;
1507 sctx->descriptors_dirty |= 1u << descriptors_idx;
1508
1509 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1510 (struct r600_resource *)buf,
1511 buffers->shader_usage,
1512 buffers->priority, true);
1513 }
1514 }
1515 }
1516
1517 /* Reallocate a buffer a update all resource bindings where the buffer is
1518 * bound.
1519 *
1520 * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
1521 * idle by discarding its contents. Apps usually tell us when to do this using
1522 * map_buffer flags, for example.
1523 */
1524 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
1525 {
1526 struct si_context *sctx = (struct si_context*)ctx;
1527 struct r600_resource *rbuffer = r600_resource(buf);
1528 unsigned i, shader;
1529 uint64_t old_va = rbuffer->gpu_address;
1530 unsigned num_elems = sctx->vertex_elements ?
1531 sctx->vertex_elements->count : 0;
1532
1533 /* Reallocate the buffer in the same pipe_resource. */
1534 r600_alloc_resource(&sctx->screen->b, rbuffer);
1535
1536 /* We changed the buffer, now we need to bind it where the old one
1537 * was bound. This consists of 2 things:
1538 * 1) Updating the resource descriptor and dirtying it.
1539 * 2) Adding a relocation to the CS, so that it's usable.
1540 */
1541
1542 /* Vertex buffers. */
1543 if (rbuffer->bind_history & PIPE_BIND_VERTEX_BUFFER) {
1544 for (i = 0; i < num_elems; i++) {
1545 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
1546
1547 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
1548 continue;
1549 if (!sctx->vertex_buffer[vb].buffer)
1550 continue;
1551
1552 if (sctx->vertex_buffer[vb].buffer == buf) {
1553 sctx->vertex_buffers_dirty = true;
1554 break;
1555 }
1556 }
1557 }
1558
1559 /* Streamout buffers. (other internal buffers can't be invalidated) */
1560 if (rbuffer->bind_history & PIPE_BIND_STREAM_OUTPUT) {
1561 for (i = SI_VS_STREAMOUT_BUF0; i <= SI_VS_STREAMOUT_BUF3; i++) {
1562 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1563 struct si_descriptors *descs =
1564 &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1565
1566 if (buffers->buffers[i] != buf)
1567 continue;
1568
1569 si_desc_reset_buffer_offset(ctx, descs->list + i*4,
1570 old_va, buf);
1571 descs->dirty_mask |= 1u << i;
1572 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
1573
1574 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1575 rbuffer, buffers->shader_usage,
1576 RADEON_PRIO_SHADER_RW_BUFFER,
1577 true);
1578
1579 /* Update the streamout state. */
1580 if (sctx->b.streamout.begin_emitted)
1581 r600_emit_streamout_end(&sctx->b);
1582 sctx->b.streamout.append_bitmask =
1583 sctx->b.streamout.enabled_mask;
1584 r600_streamout_buffers_dirty(&sctx->b);
1585 }
1586 }
1587
1588 /* Constant and shader buffers. */
1589 if (rbuffer->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
1590 for (shader = 0; shader < SI_NUM_SHADERS; shader++)
1591 si_reset_buffer_resources(sctx, &sctx->const_buffers[shader],
1592 si_const_buffer_descriptors_idx(shader),
1593 buf, old_va);
1594 }
1595
1596 if (rbuffer->bind_history & PIPE_BIND_SHADER_BUFFER) {
1597 for (shader = 0; shader < SI_NUM_SHADERS; shader++)
1598 si_reset_buffer_resources(sctx, &sctx->shader_buffers[shader],
1599 si_shader_buffer_descriptors_idx(shader),
1600 buf, old_va);
1601 }
1602
1603 if (rbuffer->bind_history & PIPE_BIND_SAMPLER_VIEW) {
1604 /* Texture buffers - update bindings. */
1605 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1606 struct si_sampler_views *views = &sctx->samplers[shader].views;
1607 struct si_descriptors *descs =
1608 si_sampler_descriptors(sctx, shader);
1609 unsigned mask = views->enabled_mask;
1610
1611 while (mask) {
1612 unsigned i = u_bit_scan(&mask);
1613 if (views->views[i]->texture == buf) {
1614 si_desc_reset_buffer_offset(ctx,
1615 descs->list +
1616 i * 16 + 4,
1617 old_va, buf);
1618 descs->dirty_mask |= 1u << i;
1619 sctx->descriptors_dirty |=
1620 1u << si_sampler_descriptors_idx(shader);
1621
1622 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1623 rbuffer, RADEON_USAGE_READ,
1624 RADEON_PRIO_SAMPLER_BUFFER,
1625 true);
1626 }
1627 }
1628 }
1629 }
1630
1631 /* Shader images */
1632 if (rbuffer->bind_history & PIPE_BIND_SHADER_IMAGE) {
1633 for (shader = 0; shader < SI_NUM_SHADERS; ++shader) {
1634 struct si_images_info *images = &sctx->images[shader];
1635 struct si_descriptors *descs =
1636 si_image_descriptors(sctx, shader);
1637 unsigned mask = images->enabled_mask;
1638
1639 while (mask) {
1640 unsigned i = u_bit_scan(&mask);
1641
1642 if (images->views[i].resource == buf) {
1643 if (images->views[i].access & PIPE_IMAGE_ACCESS_WRITE)
1644 si_mark_image_range_valid(&images->views[i]);
1645
1646 si_desc_reset_buffer_offset(
1647 ctx, descs->list + i * 8 + 4,
1648 old_va, buf);
1649 descs->dirty_mask |= 1u << i;
1650 sctx->descriptors_dirty |=
1651 1u << si_image_descriptors_idx(shader);
1652
1653 radeon_add_to_buffer_list_check_mem(
1654 &sctx->b, &sctx->b.gfx, rbuffer,
1655 RADEON_USAGE_READWRITE,
1656 RADEON_PRIO_SAMPLER_BUFFER, true);
1657 }
1658 }
1659 }
1660 }
1661 }
1662
1663 /* Update mutable image descriptor fields of all bound textures. */
1664 void si_update_all_texture_descriptors(struct si_context *sctx)
1665 {
1666 unsigned shader;
1667
1668 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1669 struct si_sampler_views *samplers = &sctx->samplers[shader].views;
1670 struct si_images_info *images = &sctx->images[shader];
1671 unsigned mask;
1672
1673 /* Images. */
1674 mask = images->enabled_mask;
1675 while (mask) {
1676 unsigned i = u_bit_scan(&mask);
1677 struct pipe_image_view *view = &images->views[i];
1678
1679 if (!view->resource ||
1680 view->resource->target == PIPE_BUFFER)
1681 continue;
1682
1683 si_set_shader_image(sctx, shader, i, view);
1684 }
1685
1686 /* Sampler views. */
1687 mask = samplers->enabled_mask;
1688 while (mask) {
1689 unsigned i = u_bit_scan(&mask);
1690 struct pipe_sampler_view *view = samplers->views[i];
1691
1692 if (!view ||
1693 !view->texture ||
1694 view->texture->target == PIPE_BUFFER)
1695 continue;
1696
1697 si_set_sampler_view(sctx, shader, i,
1698 samplers->views[i], true);
1699 }
1700 }
1701 }
1702
1703 /* SHADER USER DATA */
1704
1705 static void si_mark_shader_pointers_dirty(struct si_context *sctx,
1706 unsigned shader)
1707 {
1708 struct si_descriptors *descs =
1709 &sctx->descriptors[SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS];
1710
1711 for (unsigned i = 0; i < SI_NUM_SHADER_DESCS; ++i, ++descs)
1712 descs->pointer_dirty = true;
1713
1714 if (shader == PIPE_SHADER_VERTEX)
1715 sctx->vertex_buffers.pointer_dirty = true;
1716
1717 si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
1718 }
1719
1720 static void si_shader_userdata_begin_new_cs(struct si_context *sctx)
1721 {
1722 int i;
1723
1724 for (i = 0; i < SI_NUM_SHADERS; i++) {
1725 si_mark_shader_pointers_dirty(sctx, i);
1726 }
1727 sctx->descriptors[SI_DESCS_RW_BUFFERS].pointer_dirty = true;
1728 }
1729
1730 /* Set a base register address for user data constants in the given shader.
1731 * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
1732 */
1733 static void si_set_user_data_base(struct si_context *sctx,
1734 unsigned shader, uint32_t new_base)
1735 {
1736 uint32_t *base = &sctx->shader_userdata.sh_base[shader];
1737
1738 if (*base != new_base) {
1739 *base = new_base;
1740
1741 if (new_base)
1742 si_mark_shader_pointers_dirty(sctx, shader);
1743 }
1744 }
1745
1746 /* This must be called when these shaders are changed from non-NULL to NULL
1747 * and vice versa:
1748 * - geometry shader
1749 * - tessellation control shader
1750 * - tessellation evaluation shader
1751 */
1752 void si_shader_change_notify(struct si_context *sctx)
1753 {
1754 /* VS can be bound as VS, ES, or LS. */
1755 if (sctx->tes_shader.cso)
1756 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1757 R_00B530_SPI_SHADER_USER_DATA_LS_0);
1758 else if (sctx->gs_shader.cso)
1759 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1760 R_00B330_SPI_SHADER_USER_DATA_ES_0);
1761 else
1762 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1763 R_00B130_SPI_SHADER_USER_DATA_VS_0);
1764
1765 /* TES can be bound as ES, VS, or not bound. */
1766 if (sctx->tes_shader.cso) {
1767 if (sctx->gs_shader.cso)
1768 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1769 R_00B330_SPI_SHADER_USER_DATA_ES_0);
1770 else
1771 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1772 R_00B130_SPI_SHADER_USER_DATA_VS_0);
1773 } else {
1774 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL, 0);
1775 }
1776 }
1777
1778 static void si_emit_shader_pointer(struct si_context *sctx,
1779 struct si_descriptors *desc,
1780 unsigned sh_base, bool keep_dirty)
1781 {
1782 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
1783 uint64_t va;
1784
1785 if (!desc->pointer_dirty || !desc->buffer)
1786 return;
1787
1788 va = desc->buffer->gpu_address +
1789 desc->buffer_offset;
1790
1791 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
1792 radeon_emit(cs, (sh_base + desc->shader_userdata_offset - SI_SH_REG_OFFSET) >> 2);
1793 radeon_emit(cs, va);
1794 radeon_emit(cs, va >> 32);
1795
1796 desc->pointer_dirty = keep_dirty;
1797 }
1798
1799 void si_emit_graphics_shader_userdata(struct si_context *sctx,
1800 struct r600_atom *atom)
1801 {
1802 unsigned shader;
1803 uint32_t *sh_base = sctx->shader_userdata.sh_base;
1804 struct si_descriptors *descs;
1805
1806 descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1807
1808 if (descs->pointer_dirty) {
1809 si_emit_shader_pointer(sctx, descs,
1810 R_00B030_SPI_SHADER_USER_DATA_PS_0, true);
1811 si_emit_shader_pointer(sctx, descs,
1812 R_00B130_SPI_SHADER_USER_DATA_VS_0, true);
1813 si_emit_shader_pointer(sctx, descs,
1814 R_00B230_SPI_SHADER_USER_DATA_GS_0, true);
1815 si_emit_shader_pointer(sctx, descs,
1816 R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
1817 si_emit_shader_pointer(sctx, descs,
1818 R_00B430_SPI_SHADER_USER_DATA_HS_0, true);
1819 descs->pointer_dirty = false;
1820 }
1821
1822 descs = &sctx->descriptors[SI_DESCS_FIRST_SHADER];
1823
1824 for (shader = 0; shader < SI_NUM_GRAPHICS_SHADERS; shader++) {
1825 unsigned base = sh_base[shader];
1826 unsigned i;
1827
1828 if (!base)
1829 continue;
1830
1831 for (i = 0; i < SI_NUM_SHADER_DESCS; i++, descs++)
1832 si_emit_shader_pointer(sctx, descs, base, false);
1833 }
1834 si_emit_shader_pointer(sctx, &sctx->vertex_buffers, sh_base[PIPE_SHADER_VERTEX], false);
1835 }
1836
1837 void si_emit_compute_shader_userdata(struct si_context *sctx)
1838 {
1839 unsigned base = R_00B900_COMPUTE_USER_DATA_0;
1840 struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_FIRST_COMPUTE];
1841
1842 for (unsigned i = 0; i < SI_NUM_SHADER_DESCS; ++i, ++descs)
1843 si_emit_shader_pointer(sctx, descs, base, false);
1844 }
1845
1846 /* INIT/DEINIT/UPLOAD */
1847
1848 void si_init_all_descriptors(struct si_context *sctx)
1849 {
1850 int i;
1851 unsigned ce_offset = 0;
1852
1853 for (i = 0; i < SI_NUM_SHADERS; i++) {
1854 si_init_buffer_resources(&sctx->const_buffers[i],
1855 si_const_buffer_descriptors(sctx, i),
1856 SI_NUM_CONST_BUFFERS, SI_SGPR_CONST_BUFFERS,
1857 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER,
1858 &ce_offset);
1859 si_init_buffer_resources(&sctx->shader_buffers[i],
1860 si_shader_buffer_descriptors(sctx, i),
1861 SI_NUM_SHADER_BUFFERS, SI_SGPR_SHADER_BUFFERS,
1862 RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RW_BUFFER,
1863 &ce_offset);
1864
1865 si_init_descriptors(si_sampler_descriptors(sctx, i),
1866 SI_SGPR_SAMPLERS, 16, SI_NUM_SAMPLERS,
1867 null_texture_descriptor, &ce_offset);
1868
1869 si_init_descriptors(si_image_descriptors(sctx, i),
1870 SI_SGPR_IMAGES, 8, SI_NUM_IMAGES,
1871 null_image_descriptor, &ce_offset);
1872 }
1873
1874 si_init_buffer_resources(&sctx->rw_buffers,
1875 &sctx->descriptors[SI_DESCS_RW_BUFFERS],
1876 SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
1877 RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RINGS,
1878 &ce_offset);
1879 si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
1880 4, SI_NUM_VERTEX_BUFFERS, NULL, NULL);
1881
1882 sctx->descriptors_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
1883
1884 assert(ce_offset <= 32768);
1885
1886 /* Set pipe_context functions. */
1887 sctx->b.b.bind_sampler_states = si_bind_sampler_states;
1888 sctx->b.b.set_shader_images = si_set_shader_images;
1889 sctx->b.b.set_constant_buffer = si_pipe_set_constant_buffer;
1890 sctx->b.b.set_polygon_stipple = si_set_polygon_stipple;
1891 sctx->b.b.set_shader_buffers = si_set_shader_buffers;
1892 sctx->b.b.set_sampler_views = si_set_sampler_views;
1893 sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
1894 sctx->b.invalidate_buffer = si_invalidate_buffer;
1895
1896 /* Shader user data. */
1897 si_init_atom(sctx, &sctx->shader_userdata.atom, &sctx->atoms.s.shader_userdata,
1898 si_emit_graphics_shader_userdata);
1899
1900 /* Set default and immutable mappings. */
1901 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
1902 si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL, R_00B430_SPI_SHADER_USER_DATA_HS_0);
1903 si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY, R_00B230_SPI_SHADER_USER_DATA_GS_0);
1904 si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
1905 }
1906
1907 bool si_upload_graphics_shader_descriptors(struct si_context *sctx)
1908 {
1909 const unsigned mask = u_bit_consecutive(0, SI_DESCS_FIRST_COMPUTE);
1910 unsigned dirty = sctx->descriptors_dirty & mask;
1911
1912 while (dirty) {
1913 unsigned i = u_bit_scan(&dirty);
1914
1915 if (!si_upload_descriptors(sctx, &sctx->descriptors[i],
1916 &sctx->shader_userdata.atom))
1917 return false;
1918 }
1919
1920 sctx->descriptors_dirty &= ~mask;
1921 return true;
1922 }
1923
1924 bool si_upload_compute_shader_descriptors(struct si_context *sctx)
1925 {
1926 /* Does not update rw_buffers as that is not needed for compute shaders
1927 * and the input buffer is using the same SGPR's anyway.
1928 */
1929 const unsigned mask = u_bit_consecutive(SI_DESCS_FIRST_COMPUTE,
1930 SI_NUM_DESCS - SI_DESCS_FIRST_COMPUTE);
1931 unsigned dirty = sctx->descriptors_dirty & mask;
1932
1933 while (dirty) {
1934 unsigned i = u_bit_scan(&dirty);
1935
1936 if (!si_upload_descriptors(sctx, &sctx->descriptors[i], NULL))
1937 return false;
1938 }
1939
1940 sctx->descriptors_dirty &= ~mask;
1941
1942 return true;
1943 }
1944
1945 void si_release_all_descriptors(struct si_context *sctx)
1946 {
1947 int i;
1948
1949 for (i = 0; i < SI_NUM_SHADERS; i++) {
1950 si_release_buffer_resources(&sctx->const_buffers[i],
1951 si_const_buffer_descriptors(sctx, i));
1952 si_release_buffer_resources(&sctx->shader_buffers[i],
1953 si_shader_buffer_descriptors(sctx, i));
1954 si_release_sampler_views(&sctx->samplers[i].views);
1955 si_release_image_views(&sctx->images[i]);
1956 }
1957 si_release_buffer_resources(&sctx->rw_buffers,
1958 &sctx->descriptors[SI_DESCS_RW_BUFFERS]);
1959
1960 for (i = 0; i < SI_NUM_DESCS; ++i)
1961 si_release_descriptors(&sctx->descriptors[i]);
1962 si_release_descriptors(&sctx->vertex_buffers);
1963 }
1964
1965 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
1966 {
1967 int i;
1968
1969 for (i = 0; i < SI_NUM_SHADERS; i++) {
1970 si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
1971 si_buffer_resources_begin_new_cs(sctx, &sctx->shader_buffers[i]);
1972 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
1973 si_image_views_begin_new_cs(sctx, &sctx->images[i]);
1974 }
1975 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers);
1976 si_vertex_buffers_begin_new_cs(sctx);
1977
1978 for (i = 0; i < SI_NUM_DESCS; ++i)
1979 si_descriptors_begin_new_cs(sctx, &sctx->descriptors[i]);
1980
1981 si_shader_userdata_begin_new_cs(sctx);
1982 }