gallium/radeon: replace radeon_surf_info::dcc_enabled with num_dcc_levels
[mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Marek Olšák <marek.olsak@amd.com>
25 */
26
27 /* Resource binding slots and sampler states (each described with 8 or
28 * 4 dwords) are stored in lists in memory which is accessed by shaders
29 * using scalar load instructions.
30 *
31 * This file is responsible for managing such lists. It keeps a copy of all
32 * descriptors in CPU memory and re-uploads a whole list if some slots have
33 * been changed.
34 *
35 * This code is also reponsible for updating shader pointers to those lists.
36 *
37 * Note that CP DMA can't be used for updating the lists, because a GPU hang
38 * could leave the list in a mid-IB state and the next IB would get wrong
39 * descriptors and the whole context would be unusable at that point.
40 * (Note: The register shadowing can't be used due to the same reason)
41 *
42 * Also, uploading descriptors to newly allocated memory doesn't require
43 * a KCACHE flush.
44 *
45 *
46 * Possible scenarios for one 16 dword image+sampler slot:
47 *
48 * | Image | w/ FMASK | Buffer | NULL
49 * [ 0: 3] Image[0:3] | Image[0:3] | Null[0:3] | Null[0:3]
50 * [ 4: 7] Image[4:7] | Image[4:7] | Buffer[0:3] | 0
51 * [ 8:11] Null[0:3] | Fmask[0:3] | Null[0:3] | Null[0:3]
52 * [12:15] Sampler[0:3] | Fmask[4:7] | Sampler[0:3] | Sampler[0:3]
53 *
54 * FMASK implies MSAA, therefore no sampler state.
55 * Sampler states are never unbound except when FMASK is bound.
56 */
57
58 #include "radeon/r600_cs.h"
59 #include "si_pipe.h"
60 #include "sid.h"
61
62 #include "util/u_format.h"
63 #include "util/u_memory.h"
64 #include "util/u_upload_mgr.h"
65
66
67 /* NULL image and buffer descriptor for textures (alpha = 1) and images
68 * (alpha = 0).
69 *
70 * For images, all fields must be zero except for the swizzle, which
71 * supports arbitrary combinations of 0s and 1s. The texture type must be
72 * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
73 *
74 * For buffers, all fields must be zero. If they are not, the hw hangs.
75 *
76 * This is the only reason why the buffer descriptor must be in words [4:7].
77 */
78 static uint32_t null_texture_descriptor[8] = {
79 0,
80 0,
81 0,
82 S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1) |
83 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
84 /* the rest must contain zeros, which is also used by the buffer
85 * descriptor */
86 };
87
88 static uint32_t null_image_descriptor[8] = {
89 0,
90 0,
91 0,
92 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
93 /* the rest must contain zeros, which is also used by the buffer
94 * descriptor */
95 };
96
97 static void si_init_descriptors(struct si_descriptors *desc,
98 unsigned shader_userdata_index,
99 unsigned element_dw_size,
100 unsigned num_elements,
101 const uint32_t *null_descriptor,
102 unsigned *ce_offset)
103 {
104 int i;
105
106 assert(num_elements <= sizeof(desc->dirty_mask)*8);
107
108 desc->list = CALLOC(num_elements, element_dw_size * 4);
109 desc->element_dw_size = element_dw_size;
110 desc->num_elements = num_elements;
111 desc->dirty_mask = num_elements == 32 ? ~0u : (1u << num_elements) - 1;
112 desc->shader_userdata_offset = shader_userdata_index * 4;
113
114 if (ce_offset) {
115 desc->ce_offset = *ce_offset;
116
117 /* make sure that ce_offset stays 32 byte aligned */
118 *ce_offset += align(element_dw_size * num_elements * 4, 32);
119 }
120
121 /* Initialize the array to NULL descriptors if the element size is 8. */
122 if (null_descriptor) {
123 assert(element_dw_size % 8 == 0);
124 for (i = 0; i < num_elements * element_dw_size / 8; i++)
125 memcpy(desc->list + i * 8, null_descriptor,
126 8 * 4);
127 }
128 }
129
130 static void si_release_descriptors(struct si_descriptors *desc)
131 {
132 r600_resource_reference(&desc->buffer, NULL);
133 FREE(desc->list);
134 }
135
136 static bool si_ce_upload(struct si_context *sctx, unsigned ce_offset, unsigned size,
137 unsigned *out_offset, struct r600_resource **out_buf) {
138 uint64_t va;
139
140 u_suballocator_alloc(sctx->ce_suballocator, size, 64, out_offset,
141 (struct pipe_resource**)out_buf);
142 if (!out_buf)
143 return false;
144
145 va = (*out_buf)->gpu_address + *out_offset;
146
147 radeon_emit(sctx->ce_ib, PKT3(PKT3_DUMP_CONST_RAM, 3, 0));
148 radeon_emit(sctx->ce_ib, ce_offset);
149 radeon_emit(sctx->ce_ib, size / 4);
150 radeon_emit(sctx->ce_ib, va);
151 radeon_emit(sctx->ce_ib, va >> 32);
152
153 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, *out_buf,
154 RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
155
156 sctx->ce_need_synchronization = true;
157 return true;
158 }
159
160 static void si_ce_reinitialize_descriptors(struct si_context *sctx,
161 struct si_descriptors *desc)
162 {
163 if (desc->buffer) {
164 struct r600_resource *buffer = (struct r600_resource*)desc->buffer;
165 unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
166 uint64_t va = buffer->gpu_address + desc->buffer_offset;
167 struct radeon_winsys_cs *ib = sctx->ce_preamble_ib;
168
169 if (!ib)
170 ib = sctx->ce_ib;
171
172 list_size = align(list_size, 32);
173
174 radeon_emit(ib, PKT3(PKT3_LOAD_CONST_RAM, 3, 0));
175 radeon_emit(ib, va);
176 radeon_emit(ib, va >> 32);
177 radeon_emit(ib, list_size / 4);
178 radeon_emit(ib, desc->ce_offset);
179
180 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
181 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
182 }
183 desc->ce_ram_dirty = false;
184 }
185
186 void si_ce_reinitialize_all_descriptors(struct si_context *sctx)
187 {
188 int i;
189
190 for (i = 0; i < SI_NUM_DESCS; ++i)
191 si_ce_reinitialize_descriptors(sctx, &sctx->descriptors[i]);
192 }
193
194 void si_ce_enable_loads(struct radeon_winsys_cs *ib)
195 {
196 radeon_emit(ib, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
197 radeon_emit(ib, CONTEXT_CONTROL_LOAD_ENABLE(1) |
198 CONTEXT_CONTROL_LOAD_CE_RAM(1));
199 radeon_emit(ib, CONTEXT_CONTROL_SHADOW_ENABLE(1));
200 }
201
202 static bool si_upload_descriptors(struct si_context *sctx,
203 struct si_descriptors *desc,
204 struct r600_atom * atom)
205 {
206 unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
207
208 if (!desc->dirty_mask)
209 return true;
210
211 if (sctx->ce_ib) {
212 uint32_t const* list = (uint32_t const*)desc->list;
213
214 if (desc->ce_ram_dirty)
215 si_ce_reinitialize_descriptors(sctx, desc);
216
217 while(desc->dirty_mask) {
218 int begin, count;
219 u_bit_scan_consecutive_range(&desc->dirty_mask, &begin,
220 &count);
221
222 begin *= desc->element_dw_size;
223 count *= desc->element_dw_size;
224
225 radeon_emit(sctx->ce_ib,
226 PKT3(PKT3_WRITE_CONST_RAM, count, 0));
227 radeon_emit(sctx->ce_ib, desc->ce_offset + begin * 4);
228 radeon_emit_array(sctx->ce_ib, list + begin, count);
229 }
230
231 if (!si_ce_upload(sctx, desc->ce_offset, list_size,
232 &desc->buffer_offset, &desc->buffer))
233 return false;
234 } else {
235 void *ptr;
236
237 u_upload_alloc(sctx->b.uploader, 0, list_size, 256,
238 &desc->buffer_offset,
239 (struct pipe_resource**)&desc->buffer, &ptr);
240 if (!desc->buffer)
241 return false; /* skip the draw call */
242
243 util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
244
245 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
246 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
247 }
248 desc->pointer_dirty = true;
249 desc->dirty_mask = 0;
250
251 if (atom)
252 si_mark_atom_dirty(sctx, atom);
253
254 return true;
255 }
256
257 static void
258 si_descriptors_begin_new_cs(struct si_context *sctx, struct si_descriptors *desc)
259 {
260 desc->ce_ram_dirty = true;
261
262 if (!desc->buffer)
263 return;
264
265 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
266 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
267 }
268
269 /* SAMPLER VIEWS */
270
271 static unsigned
272 si_sampler_descriptors_idx(unsigned shader)
273 {
274 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
275 SI_SHADER_DESCS_SAMPLERS;
276 }
277
278 static struct si_descriptors *
279 si_sampler_descriptors(struct si_context *sctx, unsigned shader)
280 {
281 return &sctx->descriptors[si_sampler_descriptors_idx(shader)];
282 }
283
284 static void si_release_sampler_views(struct si_sampler_views *views)
285 {
286 int i;
287
288 for (i = 0; i < ARRAY_SIZE(views->views); i++) {
289 pipe_sampler_view_reference(&views->views[i], NULL);
290 }
291 }
292
293 static void si_sampler_view_add_buffer(struct si_context *sctx,
294 struct pipe_resource *resource,
295 enum radeon_bo_usage usage,
296 bool is_stencil_sampler,
297 bool check_mem)
298 {
299 struct r600_resource *rres;
300 struct r600_texture *rtex;
301 enum radeon_bo_priority priority;
302
303 if (!resource)
304 return;
305
306 if (resource->target != PIPE_BUFFER) {
307 struct r600_texture *tex = (struct r600_texture*)resource;
308
309 if (tex->is_depth && !r600_can_sample_zs(tex, is_stencil_sampler))
310 resource = &tex->flushed_depth_texture->resource.b.b;
311 }
312
313 rres = (struct r600_resource*)resource;
314 priority = r600_get_sampler_view_priority(rres);
315
316 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
317 rres, usage, priority,
318 check_mem);
319
320 if (resource->target == PIPE_BUFFER)
321 return;
322
323 /* Now add separate DCC if it's present. */
324 rtex = (struct r600_texture*)resource;
325 if (!rtex->dcc_separate_buffer)
326 return;
327
328 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
329 rtex->dcc_separate_buffer, usage,
330 RADEON_PRIO_DCC, check_mem);
331 }
332
333 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
334 struct si_sampler_views *views)
335 {
336 unsigned mask = views->enabled_mask;
337
338 /* Add buffers to the CS. */
339 while (mask) {
340 int i = u_bit_scan(&mask);
341 struct si_sampler_view *sview = (struct si_sampler_view *)views->views[i];
342
343 si_sampler_view_add_buffer(sctx, sview->base.texture,
344 RADEON_USAGE_READ,
345 sview->is_stencil_sampler, false);
346 }
347 }
348
349 /* Set buffer descriptor fields that can be changed by reallocations. */
350 static void si_set_buf_desc_address(struct r600_resource *buf,
351 uint64_t offset, uint32_t *state)
352 {
353 uint64_t va = buf->gpu_address + offset;
354
355 state[0] = va;
356 state[1] &= C_008F04_BASE_ADDRESS_HI;
357 state[1] |= S_008F04_BASE_ADDRESS_HI(va >> 32);
358 }
359
360 /* Set texture descriptor fields that can be changed by reallocations.
361 *
362 * \param tex texture
363 * \param base_level_info information of the level of BASE_ADDRESS
364 * \param base_level the level of BASE_ADDRESS
365 * \param first_level pipe_sampler_view.u.tex.first_level
366 * \param block_width util_format_get_blockwidth()
367 * \param is_stencil select between separate Z & Stencil
368 * \param state descriptor to update
369 */
370 void si_set_mutable_tex_desc_fields(struct r600_texture *tex,
371 const struct radeon_surf_level *base_level_info,
372 unsigned base_level, unsigned first_level,
373 unsigned block_width, bool is_stencil,
374 uint32_t *state)
375 {
376 uint64_t va;
377 unsigned pitch = base_level_info->nblk_x * block_width;
378
379 if (tex->is_depth && !r600_can_sample_zs(tex, is_stencil)) {
380 tex = tex->flushed_depth_texture;
381 is_stencil = false;
382 }
383
384 va = tex->resource.gpu_address + base_level_info->offset;
385
386 state[1] &= C_008F14_BASE_ADDRESS_HI;
387 state[3] &= C_008F1C_TILING_INDEX;
388 state[4] &= C_008F20_PITCH;
389 state[6] &= C_008F28_COMPRESSION_EN;
390
391 state[0] = va >> 8;
392 state[1] |= S_008F14_BASE_ADDRESS_HI(va >> 40);
393 state[3] |= S_008F1C_TILING_INDEX(si_tile_mode_index(tex, base_level,
394 is_stencil));
395 state[4] |= S_008F20_PITCH(pitch - 1);
396
397 if (tex->dcc_offset && first_level < tex->surface.num_dcc_levels) {
398 state[6] |= S_008F28_COMPRESSION_EN(1);
399 state[7] = ((!tex->dcc_separate_buffer ? tex->resource.gpu_address : 0) +
400 tex->dcc_offset +
401 base_level_info->dcc_offset) >> 8;
402 } else if (tex->tc_compatible_htile) {
403 state[6] |= S_008F28_COMPRESSION_EN(1);
404 state[7] = tex->htile_buffer->gpu_address >> 8;
405 }
406 }
407
408 static void si_set_sampler_view(struct si_context *sctx,
409 unsigned shader,
410 unsigned slot, struct pipe_sampler_view *view,
411 bool disallow_early_out)
412 {
413 struct si_sampler_views *views = &sctx->samplers[shader].views;
414 struct si_sampler_view *rview = (struct si_sampler_view*)view;
415 struct si_descriptors *descs = si_sampler_descriptors(sctx, shader);
416
417 if (views->views[slot] == view && !disallow_early_out)
418 return;
419
420 if (view) {
421 struct r600_texture *rtex = (struct r600_texture *)view->texture;
422 uint32_t *desc = descs->list + slot * 16;
423
424 assert(rtex); /* views with texture == NULL aren't supported */
425 pipe_sampler_view_reference(&views->views[slot], view);
426 memcpy(desc, rview->state, 8*4);
427
428 if (rtex->resource.b.b.target == PIPE_BUFFER) {
429 rtex->resource.bind_history |= PIPE_BIND_SAMPLER_VIEW;
430
431 si_set_buf_desc_address(&rtex->resource,
432 view->u.buf.offset,
433 desc + 4);
434 } else {
435 bool is_separate_stencil =
436 rtex->db_compatible &&
437 rview->is_stencil_sampler;
438
439 si_set_mutable_tex_desc_fields(rtex,
440 rview->base_level_info,
441 rview->base_level,
442 rview->base.u.tex.first_level,
443 rview->block_width,
444 is_separate_stencil,
445 desc);
446 }
447
448 if (rtex->resource.b.b.target != PIPE_BUFFER &&
449 rtex->fmask.size) {
450 memcpy(desc + 8,
451 rview->fmask_state, 8*4);
452 } else {
453 /* Disable FMASK and bind sampler state in [12:15]. */
454 memcpy(desc + 8,
455 null_texture_descriptor, 4*4);
456
457 if (views->sampler_states[slot])
458 memcpy(desc + 12,
459 views->sampler_states[slot], 4*4);
460 }
461
462 views->enabled_mask |= 1u << slot;
463
464 /* Since this can flush, it must be done after enabled_mask is
465 * updated. */
466 si_sampler_view_add_buffer(sctx, view->texture,
467 RADEON_USAGE_READ,
468 rview->is_stencil_sampler, true);
469 } else {
470 pipe_sampler_view_reference(&views->views[slot], NULL);
471 memcpy(descs->list + slot*16, null_texture_descriptor, 8*4);
472 /* Only clear the lower dwords of FMASK. */
473 memcpy(descs->list + slot*16 + 8, null_texture_descriptor, 4*4);
474 views->enabled_mask &= ~(1u << slot);
475 }
476
477 descs->dirty_mask |= 1u << slot;
478 sctx->descriptors_dirty |= 1u << si_sampler_descriptors_idx(shader);
479 }
480
481 static bool is_compressed_colortex(struct r600_texture *rtex)
482 {
483 return rtex->cmask.size || rtex->fmask.size ||
484 (rtex->dcc_offset && rtex->dirty_level_mask);
485 }
486
487 static void si_set_sampler_views(struct pipe_context *ctx,
488 enum pipe_shader_type shader, unsigned start,
489 unsigned count,
490 struct pipe_sampler_view **views)
491 {
492 struct si_context *sctx = (struct si_context *)ctx;
493 struct si_textures_info *samplers = &sctx->samplers[shader];
494 int i;
495
496 if (!count || shader >= SI_NUM_SHADERS)
497 return;
498
499 for (i = 0; i < count; i++) {
500 unsigned slot = start + i;
501
502 if (!views || !views[i]) {
503 samplers->depth_texture_mask &= ~(1u << slot);
504 samplers->compressed_colortex_mask &= ~(1u << slot);
505 si_set_sampler_view(sctx, shader, slot, NULL, false);
506 continue;
507 }
508
509 si_set_sampler_view(sctx, shader, slot, views[i], false);
510
511 if (views[i]->texture && views[i]->texture->target != PIPE_BUFFER) {
512 struct r600_texture *rtex =
513 (struct r600_texture*)views[i]->texture;
514 struct si_sampler_view *rview = (struct si_sampler_view *)views[i];
515
516 if (rtex->db_compatible &&
517 (!rtex->tc_compatible_htile || rview->is_stencil_sampler)) {
518 samplers->depth_texture_mask |= 1u << slot;
519 } else {
520 samplers->depth_texture_mask &= ~(1u << slot);
521 }
522 if (is_compressed_colortex(rtex)) {
523 samplers->compressed_colortex_mask |= 1u << slot;
524 } else {
525 samplers->compressed_colortex_mask &= ~(1u << slot);
526 }
527
528 if (rtex->dcc_offset &&
529 p_atomic_read(&rtex->framebuffers_bound))
530 sctx->need_check_render_feedback = true;
531 } else {
532 samplers->depth_texture_mask &= ~(1u << slot);
533 samplers->compressed_colortex_mask &= ~(1u << slot);
534 }
535 }
536 }
537
538 static void
539 si_samplers_update_compressed_colortex_mask(struct si_textures_info *samplers)
540 {
541 unsigned mask = samplers->views.enabled_mask;
542
543 while (mask) {
544 int i = u_bit_scan(&mask);
545 struct pipe_resource *res = samplers->views.views[i]->texture;
546
547 if (res && res->target != PIPE_BUFFER) {
548 struct r600_texture *rtex = (struct r600_texture *)res;
549
550 if (is_compressed_colortex(rtex)) {
551 samplers->compressed_colortex_mask |= 1u << i;
552 } else {
553 samplers->compressed_colortex_mask &= ~(1u << i);
554 }
555 }
556 }
557 }
558
559 /* IMAGE VIEWS */
560
561 static unsigned
562 si_image_descriptors_idx(unsigned shader)
563 {
564 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
565 SI_SHADER_DESCS_IMAGES;
566 }
567
568 static struct si_descriptors*
569 si_image_descriptors(struct si_context *sctx, unsigned shader)
570 {
571 return &sctx->descriptors[si_image_descriptors_idx(shader)];
572 }
573
574 static void
575 si_release_image_views(struct si_images_info *images)
576 {
577 unsigned i;
578
579 for (i = 0; i < SI_NUM_IMAGES; ++i) {
580 struct pipe_image_view *view = &images->views[i];
581
582 pipe_resource_reference(&view->resource, NULL);
583 }
584 }
585
586 static void
587 si_image_views_begin_new_cs(struct si_context *sctx, struct si_images_info *images)
588 {
589 uint mask = images->enabled_mask;
590
591 /* Add buffers to the CS. */
592 while (mask) {
593 int i = u_bit_scan(&mask);
594 struct pipe_image_view *view = &images->views[i];
595
596 assert(view->resource);
597
598 si_sampler_view_add_buffer(sctx, view->resource,
599 RADEON_USAGE_READWRITE, false, false);
600 }
601 }
602
603 static void
604 si_disable_shader_image(struct si_context *ctx, unsigned shader, unsigned slot)
605 {
606 struct si_images_info *images = &ctx->images[shader];
607
608 if (images->enabled_mask & (1u << slot)) {
609 struct si_descriptors *descs = si_image_descriptors(ctx, shader);
610
611 pipe_resource_reference(&images->views[slot].resource, NULL);
612 images->compressed_colortex_mask &= ~(1 << slot);
613
614 memcpy(descs->list + slot*8, null_image_descriptor, 8*4);
615 images->enabled_mask &= ~(1u << slot);
616 descs->dirty_mask |= 1u << slot;
617 ctx->descriptors_dirty |= 1u << si_image_descriptors_idx(shader);
618 }
619 }
620
621 static void
622 si_mark_image_range_valid(const struct pipe_image_view *view)
623 {
624 struct r600_resource *res = (struct r600_resource *)view->resource;
625
626 assert(res && res->b.b.target == PIPE_BUFFER);
627
628 util_range_add(&res->valid_buffer_range,
629 view->u.buf.offset,
630 view->u.buf.offset + view->u.buf.size);
631 }
632
633 static void si_set_shader_image(struct si_context *ctx,
634 unsigned shader,
635 unsigned slot, const struct pipe_image_view *view)
636 {
637 struct si_screen *screen = ctx->screen;
638 struct si_images_info *images = &ctx->images[shader];
639 struct si_descriptors *descs = si_image_descriptors(ctx, shader);
640 struct r600_resource *res;
641 uint32_t *desc = descs->list + slot * 8;
642
643 if (!view || !view->resource) {
644 si_disable_shader_image(ctx, shader, slot);
645 return;
646 }
647
648 res = (struct r600_resource *)view->resource;
649
650 if (&images->views[slot] != view)
651 util_copy_image_view(&images->views[slot], view);
652
653 if (res->b.b.target == PIPE_BUFFER) {
654 if (view->access & PIPE_IMAGE_ACCESS_WRITE)
655 si_mark_image_range_valid(view);
656
657 si_make_buffer_descriptor(screen, res,
658 view->format,
659 view->u.buf.offset,
660 view->u.buf.size,
661 descs->list + slot * 8);
662 si_set_buf_desc_address(res, view->u.buf.offset, desc + 4);
663
664 images->compressed_colortex_mask &= ~(1 << slot);
665 res->bind_history |= PIPE_BIND_SHADER_IMAGE;
666 } else {
667 static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
668 struct r600_texture *tex = (struct r600_texture *)res;
669 unsigned level = view->u.tex.level;
670 unsigned width, height, depth;
671 bool uses_dcc = tex->dcc_offset &&
672 level < tex->surface.num_dcc_levels;
673
674 assert(!tex->is_depth);
675 assert(tex->fmask.size == 0);
676
677 if (uses_dcc &&
678 (view->access & PIPE_IMAGE_ACCESS_WRITE ||
679 !vi_dcc_formats_compatible(res->b.b.format, view->format))) {
680 /* If DCC can't be disabled, at least decompress it.
681 * The decompression is relatively cheap if the surface
682 * has been decompressed already.
683 */
684 if (r600_texture_disable_dcc(&ctx->b, tex))
685 uses_dcc = false;
686 else
687 ctx->b.decompress_dcc(&ctx->b.b, tex);
688 }
689
690 if (is_compressed_colortex(tex)) {
691 images->compressed_colortex_mask |= 1 << slot;
692 } else {
693 images->compressed_colortex_mask &= ~(1 << slot);
694 }
695
696 if (uses_dcc &&
697 p_atomic_read(&tex->framebuffers_bound))
698 ctx->need_check_render_feedback = true;
699
700 /* Always force the base level to the selected level.
701 *
702 * This is required for 3D textures, where otherwise
703 * selecting a single slice for non-layered bindings
704 * fails. It doesn't hurt the other targets.
705 */
706 width = u_minify(res->b.b.width0, level);
707 height = u_minify(res->b.b.height0, level);
708 depth = u_minify(res->b.b.depth0, level);
709
710 si_make_texture_descriptor(screen, tex,
711 false, res->b.b.target,
712 view->format, swizzle,
713 0, 0,
714 view->u.tex.first_layer,
715 view->u.tex.last_layer,
716 width, height, depth,
717 desc, NULL);
718 si_set_mutable_tex_desc_fields(tex, &tex->surface.level[level],
719 level, level,
720 util_format_get_blockwidth(view->format),
721 false, desc);
722 }
723
724 images->enabled_mask |= 1u << slot;
725 descs->dirty_mask |= 1u << slot;
726 ctx->descriptors_dirty |= 1u << si_image_descriptors_idx(shader);
727
728 /* Since this can flush, it must be done after enabled_mask is updated. */
729 si_sampler_view_add_buffer(ctx, &res->b.b,
730 RADEON_USAGE_READWRITE, false, true);
731 }
732
733 static void
734 si_set_shader_images(struct pipe_context *pipe,
735 enum pipe_shader_type shader,
736 unsigned start_slot, unsigned count,
737 const struct pipe_image_view *views)
738 {
739 struct si_context *ctx = (struct si_context *)pipe;
740 unsigned i, slot;
741
742 assert(shader < SI_NUM_SHADERS);
743
744 if (!count)
745 return;
746
747 assert(start_slot + count <= SI_NUM_IMAGES);
748
749 if (views) {
750 for (i = 0, slot = start_slot; i < count; ++i, ++slot)
751 si_set_shader_image(ctx, shader, slot, &views[i]);
752 } else {
753 for (i = 0, slot = start_slot; i < count; ++i, ++slot)
754 si_set_shader_image(ctx, shader, slot, NULL);
755 }
756 }
757
758 static void
759 si_images_update_compressed_colortex_mask(struct si_images_info *images)
760 {
761 unsigned mask = images->enabled_mask;
762
763 while (mask) {
764 int i = u_bit_scan(&mask);
765 struct pipe_resource *res = images->views[i].resource;
766
767 if (res && res->target != PIPE_BUFFER) {
768 struct r600_texture *rtex = (struct r600_texture *)res;
769
770 if (is_compressed_colortex(rtex)) {
771 images->compressed_colortex_mask |= 1 << i;
772 } else {
773 images->compressed_colortex_mask &= ~(1 << i);
774 }
775 }
776 }
777 }
778
779 /* SAMPLER STATES */
780
781 static void si_bind_sampler_states(struct pipe_context *ctx,
782 enum pipe_shader_type shader,
783 unsigned start, unsigned count, void **states)
784 {
785 struct si_context *sctx = (struct si_context *)ctx;
786 struct si_textures_info *samplers = &sctx->samplers[shader];
787 struct si_descriptors *desc = si_sampler_descriptors(sctx, shader);
788 struct si_sampler_state **sstates = (struct si_sampler_state**)states;
789 int i;
790
791 if (!count || shader >= SI_NUM_SHADERS)
792 return;
793
794 for (i = 0; i < count; i++) {
795 unsigned slot = start + i;
796
797 if (!sstates[i] ||
798 sstates[i] == samplers->views.sampler_states[slot])
799 continue;
800
801 samplers->views.sampler_states[slot] = sstates[i];
802
803 /* If FMASK is bound, don't overwrite it.
804 * The sampler state will be set after FMASK is unbound.
805 */
806 if (samplers->views.views[i] &&
807 samplers->views.views[i]->texture &&
808 samplers->views.views[i]->texture->target != PIPE_BUFFER &&
809 ((struct r600_texture*)samplers->views.views[i]->texture)->fmask.size)
810 continue;
811
812 memcpy(desc->list + slot * 16 + 12, sstates[i]->val, 4*4);
813 desc->dirty_mask |= 1u << slot;
814 sctx->descriptors_dirty |= 1u << si_sampler_descriptors_idx(shader);
815 }
816 }
817
818 /* BUFFER RESOURCES */
819
820 static void si_init_buffer_resources(struct si_buffer_resources *buffers,
821 struct si_descriptors *descs,
822 unsigned num_buffers,
823 unsigned shader_userdata_index,
824 enum radeon_bo_usage shader_usage,
825 enum radeon_bo_priority priority,
826 unsigned *ce_offset)
827 {
828 buffers->shader_usage = shader_usage;
829 buffers->priority = priority;
830 buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
831
832 si_init_descriptors(descs, shader_userdata_index, 4,
833 num_buffers, NULL, ce_offset);
834 }
835
836 static void si_release_buffer_resources(struct si_buffer_resources *buffers,
837 struct si_descriptors *descs)
838 {
839 int i;
840
841 for (i = 0; i < descs->num_elements; i++) {
842 pipe_resource_reference(&buffers->buffers[i], NULL);
843 }
844
845 FREE(buffers->buffers);
846 }
847
848 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
849 struct si_buffer_resources *buffers)
850 {
851 unsigned mask = buffers->enabled_mask;
852
853 /* Add buffers to the CS. */
854 while (mask) {
855 int i = u_bit_scan(&mask);
856
857 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
858 (struct r600_resource*)buffers->buffers[i],
859 buffers->shader_usage, buffers->priority);
860 }
861 }
862
863 static void si_get_buffer_from_descriptors(struct si_buffer_resources *buffers,
864 struct si_descriptors *descs,
865 unsigned idx, struct pipe_resource **buf,
866 unsigned *offset, unsigned *size)
867 {
868 pipe_resource_reference(buf, buffers->buffers[idx]);
869 if (*buf) {
870 struct r600_resource *res = r600_resource(*buf);
871 const uint32_t *desc = descs->list + idx * 4;
872 uint64_t va;
873
874 *size = desc[2];
875
876 assert(G_008F04_STRIDE(desc[1]) == 0);
877 va = ((uint64_t)desc[1] << 32) | desc[0];
878
879 assert(va >= res->gpu_address && va + *size <= res->gpu_address + res->bo_size);
880 *offset = va - res->gpu_address;
881 }
882 }
883
884 /* VERTEX BUFFERS */
885
886 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
887 {
888 struct si_descriptors *desc = &sctx->vertex_buffers;
889 int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
890 int i;
891
892 for (i = 0; i < count; i++) {
893 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
894
895 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
896 continue;
897 if (!sctx->vertex_buffer[vb].buffer)
898 continue;
899
900 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
901 (struct r600_resource*)sctx->vertex_buffer[vb].buffer,
902 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
903 }
904
905 if (!desc->buffer)
906 return;
907 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
908 desc->buffer, RADEON_USAGE_READ,
909 RADEON_PRIO_DESCRIPTORS);
910 }
911
912 bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
913 {
914 struct si_descriptors *desc = &sctx->vertex_buffers;
915 bool bound[SI_NUM_VERTEX_BUFFERS] = {};
916 unsigned i, count = sctx->vertex_elements->count;
917 uint64_t va;
918 uint32_t *ptr;
919
920 if (!sctx->vertex_buffers_dirty)
921 return true;
922 if (!count || !sctx->vertex_elements)
923 return true;
924
925 /* Vertex buffer descriptors are the only ones which are uploaded
926 * directly through a staging buffer and don't go through
927 * the fine-grained upload path.
928 */
929 u_upload_alloc(sctx->b.uploader, 0, count * 16, 256, &desc->buffer_offset,
930 (struct pipe_resource**)&desc->buffer, (void**)&ptr);
931 if (!desc->buffer)
932 return false;
933
934 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
935 desc->buffer, RADEON_USAGE_READ,
936 RADEON_PRIO_DESCRIPTORS);
937
938 assert(count <= SI_NUM_VERTEX_BUFFERS);
939
940 for (i = 0; i < count; i++) {
941 struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
942 struct pipe_vertex_buffer *vb;
943 struct r600_resource *rbuffer;
944 unsigned offset;
945 uint32_t *desc = &ptr[i*4];
946
947 if (ve->vertex_buffer_index >= ARRAY_SIZE(sctx->vertex_buffer)) {
948 memset(desc, 0, 16);
949 continue;
950 }
951
952 vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
953 rbuffer = (struct r600_resource*)vb->buffer;
954 if (!rbuffer) {
955 memset(desc, 0, 16);
956 continue;
957 }
958
959 offset = vb->buffer_offset + ve->src_offset;
960 va = rbuffer->gpu_address + offset;
961
962 /* Fill in T# buffer resource description */
963 desc[0] = va;
964 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
965 S_008F04_STRIDE(vb->stride);
966
967 if (sctx->b.chip_class <= CIK && vb->stride)
968 /* Round up by rounding down and adding 1 */
969 desc[2] = (vb->buffer->width0 - offset -
970 sctx->vertex_elements->format_size[i]) /
971 vb->stride + 1;
972 else
973 desc[2] = vb->buffer->width0 - offset;
974
975 desc[3] = sctx->vertex_elements->rsrc_word3[i];
976
977 if (!bound[ve->vertex_buffer_index]) {
978 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
979 (struct r600_resource*)vb->buffer,
980 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
981 bound[ve->vertex_buffer_index] = true;
982 }
983 }
984
985 /* Don't flush the const cache. It would have a very negative effect
986 * on performance (confirmed by testing). New descriptors are always
987 * uploaded to a fresh new buffer, so I don't think flushing the const
988 * cache is needed. */
989 desc->pointer_dirty = true;
990 si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
991 sctx->vertex_buffers_dirty = false;
992 return true;
993 }
994
995
996 /* CONSTANT BUFFERS */
997
998 static unsigned
999 si_const_buffer_descriptors_idx(unsigned shader)
1000 {
1001 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
1002 SI_SHADER_DESCS_CONST_BUFFERS;
1003 }
1004
1005 static struct si_descriptors *
1006 si_const_buffer_descriptors(struct si_context *sctx, unsigned shader)
1007 {
1008 return &sctx->descriptors[si_const_buffer_descriptors_idx(shader)];
1009 }
1010
1011 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
1012 const uint8_t *ptr, unsigned size, uint32_t *const_offset)
1013 {
1014 void *tmp;
1015
1016 u_upload_alloc(sctx->b.uploader, 0, size, 256, const_offset,
1017 (struct pipe_resource**)rbuffer, &tmp);
1018 if (*rbuffer)
1019 util_memcpy_cpu_to_le32(tmp, ptr, size);
1020 }
1021
1022 static void si_set_constant_buffer(struct si_context *sctx,
1023 struct si_buffer_resources *buffers,
1024 unsigned descriptors_idx,
1025 uint slot, const struct pipe_constant_buffer *input)
1026 {
1027 struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
1028 assert(slot < descs->num_elements);
1029 pipe_resource_reference(&buffers->buffers[slot], NULL);
1030
1031 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
1032 * with a NULL buffer). We need to use a dummy buffer instead. */
1033 if (sctx->b.chip_class == CIK &&
1034 (!input || (!input->buffer && !input->user_buffer)))
1035 input = &sctx->null_const_buf;
1036
1037 if (input && (input->buffer || input->user_buffer)) {
1038 struct pipe_resource *buffer = NULL;
1039 uint64_t va;
1040
1041 /* Upload the user buffer if needed. */
1042 if (input->user_buffer) {
1043 unsigned buffer_offset;
1044
1045 si_upload_const_buffer(sctx,
1046 (struct r600_resource**)&buffer, input->user_buffer,
1047 input->buffer_size, &buffer_offset);
1048 if (!buffer) {
1049 /* Just unbind on failure. */
1050 si_set_constant_buffer(sctx, buffers, descriptors_idx, slot, NULL);
1051 return;
1052 }
1053 va = r600_resource(buffer)->gpu_address + buffer_offset;
1054 } else {
1055 pipe_resource_reference(&buffer, input->buffer);
1056 va = r600_resource(buffer)->gpu_address + input->buffer_offset;
1057 /* Only track usage for non-user buffers. */
1058 r600_resource(buffer)->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
1059 }
1060
1061 /* Set the descriptor. */
1062 uint32_t *desc = descs->list + slot*4;
1063 desc[0] = va;
1064 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1065 S_008F04_STRIDE(0);
1066 desc[2] = input->buffer_size;
1067 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1068 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1069 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1070 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1071 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1072 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1073
1074 buffers->buffers[slot] = buffer;
1075 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1076 (struct r600_resource*)buffer,
1077 buffers->shader_usage,
1078 buffers->priority, true);
1079 buffers->enabled_mask |= 1u << slot;
1080 } else {
1081 /* Clear the descriptor. */
1082 memset(descs->list + slot*4, 0, sizeof(uint32_t) * 4);
1083 buffers->enabled_mask &= ~(1u << slot);
1084 }
1085
1086 descs->dirty_mask |= 1u << slot;
1087 sctx->descriptors_dirty |= 1u << descriptors_idx;
1088 }
1089
1090 void si_set_rw_buffer(struct si_context *sctx,
1091 uint slot, const struct pipe_constant_buffer *input)
1092 {
1093 si_set_constant_buffer(sctx, &sctx->rw_buffers,
1094 SI_DESCS_RW_BUFFERS, slot, input);
1095 }
1096
1097 static void si_pipe_set_constant_buffer(struct pipe_context *ctx,
1098 uint shader, uint slot,
1099 const struct pipe_constant_buffer *input)
1100 {
1101 struct si_context *sctx = (struct si_context *)ctx;
1102
1103 if (shader >= SI_NUM_SHADERS)
1104 return;
1105
1106 si_set_constant_buffer(sctx, &sctx->const_buffers[shader],
1107 si_const_buffer_descriptors_idx(shader),
1108 slot, input);
1109 }
1110
1111 void si_get_pipe_constant_buffer(struct si_context *sctx, uint shader,
1112 uint slot, struct pipe_constant_buffer *cbuf)
1113 {
1114 cbuf->user_buffer = NULL;
1115 si_get_buffer_from_descriptors(
1116 &sctx->const_buffers[shader],
1117 si_const_buffer_descriptors(sctx, shader),
1118 slot, &cbuf->buffer, &cbuf->buffer_offset, &cbuf->buffer_size);
1119 }
1120
1121 /* SHADER BUFFERS */
1122
1123 static unsigned
1124 si_shader_buffer_descriptors_idx(enum pipe_shader_type shader)
1125 {
1126 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
1127 SI_SHADER_DESCS_SHADER_BUFFERS;
1128 }
1129
1130 static struct si_descriptors *
1131 si_shader_buffer_descriptors(struct si_context *sctx,
1132 enum pipe_shader_type shader)
1133 {
1134 return &sctx->descriptors[si_shader_buffer_descriptors_idx(shader)];
1135 }
1136
1137 static void si_set_shader_buffers(struct pipe_context *ctx,
1138 enum pipe_shader_type shader,
1139 unsigned start_slot, unsigned count,
1140 const struct pipe_shader_buffer *sbuffers)
1141 {
1142 struct si_context *sctx = (struct si_context *)ctx;
1143 struct si_buffer_resources *buffers = &sctx->shader_buffers[shader];
1144 struct si_descriptors *descs = si_shader_buffer_descriptors(sctx, shader);
1145 unsigned i;
1146
1147 assert(start_slot + count <= SI_NUM_SHADER_BUFFERS);
1148
1149 for (i = 0; i < count; ++i) {
1150 const struct pipe_shader_buffer *sbuffer = sbuffers ? &sbuffers[i] : NULL;
1151 struct r600_resource *buf;
1152 unsigned slot = start_slot + i;
1153 uint32_t *desc = descs->list + slot * 4;
1154 uint64_t va;
1155
1156 if (!sbuffer || !sbuffer->buffer) {
1157 pipe_resource_reference(&buffers->buffers[slot], NULL);
1158 memset(desc, 0, sizeof(uint32_t) * 4);
1159 buffers->enabled_mask &= ~(1u << slot);
1160 descs->dirty_mask |= 1u << slot;
1161 sctx->descriptors_dirty |=
1162 1u << si_shader_buffer_descriptors_idx(shader);
1163 continue;
1164 }
1165
1166 buf = (struct r600_resource *)sbuffer->buffer;
1167 va = buf->gpu_address + sbuffer->buffer_offset;
1168
1169 desc[0] = va;
1170 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1171 S_008F04_STRIDE(0);
1172 desc[2] = sbuffer->buffer_size;
1173 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1174 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1175 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1176 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1177 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1178 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1179
1180 pipe_resource_reference(&buffers->buffers[slot], &buf->b.b);
1181 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx, buf,
1182 buffers->shader_usage,
1183 buffers->priority, true);
1184 buf->bind_history |= PIPE_BIND_SHADER_BUFFER;
1185
1186 buffers->enabled_mask |= 1u << slot;
1187 descs->dirty_mask |= 1u << slot;
1188 sctx->descriptors_dirty |=
1189 1u << si_shader_buffer_descriptors_idx(shader);
1190 }
1191 }
1192
1193 void si_get_shader_buffers(struct si_context *sctx, uint shader,
1194 uint start_slot, uint count,
1195 struct pipe_shader_buffer *sbuf)
1196 {
1197 struct si_buffer_resources *buffers = &sctx->shader_buffers[shader];
1198 struct si_descriptors *descs = si_shader_buffer_descriptors(sctx, shader);
1199
1200 for (unsigned i = 0; i < count; ++i) {
1201 si_get_buffer_from_descriptors(
1202 buffers, descs, start_slot + i,
1203 &sbuf[i].buffer, &sbuf[i].buffer_offset,
1204 &sbuf[i].buffer_size);
1205 }
1206 }
1207
1208 /* RING BUFFERS */
1209
1210 void si_set_ring_buffer(struct pipe_context *ctx, uint slot,
1211 struct pipe_resource *buffer,
1212 unsigned stride, unsigned num_records,
1213 bool add_tid, bool swizzle,
1214 unsigned element_size, unsigned index_stride, uint64_t offset)
1215 {
1216 struct si_context *sctx = (struct si_context *)ctx;
1217 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1218 struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1219
1220 /* The stride field in the resource descriptor has 14 bits */
1221 assert(stride < (1 << 14));
1222
1223 assert(slot < descs->num_elements);
1224 pipe_resource_reference(&buffers->buffers[slot], NULL);
1225
1226 if (buffer) {
1227 uint64_t va;
1228
1229 va = r600_resource(buffer)->gpu_address + offset;
1230
1231 switch (element_size) {
1232 default:
1233 assert(!"Unsupported ring buffer element size");
1234 case 0:
1235 case 2:
1236 element_size = 0;
1237 break;
1238 case 4:
1239 element_size = 1;
1240 break;
1241 case 8:
1242 element_size = 2;
1243 break;
1244 case 16:
1245 element_size = 3;
1246 break;
1247 }
1248
1249 switch (index_stride) {
1250 default:
1251 assert(!"Unsupported ring buffer index stride");
1252 case 0:
1253 case 8:
1254 index_stride = 0;
1255 break;
1256 case 16:
1257 index_stride = 1;
1258 break;
1259 case 32:
1260 index_stride = 2;
1261 break;
1262 case 64:
1263 index_stride = 3;
1264 break;
1265 }
1266
1267 if (sctx->b.chip_class >= VI && stride)
1268 num_records *= stride;
1269
1270 /* Set the descriptor. */
1271 uint32_t *desc = descs->list + slot*4;
1272 desc[0] = va;
1273 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1274 S_008F04_STRIDE(stride) |
1275 S_008F04_SWIZZLE_ENABLE(swizzle);
1276 desc[2] = num_records;
1277 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1278 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1279 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1280 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1281 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1282 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1283 S_008F0C_ELEMENT_SIZE(element_size) |
1284 S_008F0C_INDEX_STRIDE(index_stride) |
1285 S_008F0C_ADD_TID_ENABLE(add_tid);
1286
1287 pipe_resource_reference(&buffers->buffers[slot], buffer);
1288 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1289 (struct r600_resource*)buffer,
1290 buffers->shader_usage, buffers->priority);
1291 buffers->enabled_mask |= 1u << slot;
1292 } else {
1293 /* Clear the descriptor. */
1294 memset(descs->list + slot*4, 0, sizeof(uint32_t) * 4);
1295 buffers->enabled_mask &= ~(1u << slot);
1296 }
1297
1298 descs->dirty_mask |= 1u << slot;
1299 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
1300 }
1301
1302 /* STREAMOUT BUFFERS */
1303
1304 static void si_set_streamout_targets(struct pipe_context *ctx,
1305 unsigned num_targets,
1306 struct pipe_stream_output_target **targets,
1307 const unsigned *offsets)
1308 {
1309 struct si_context *sctx = (struct si_context *)ctx;
1310 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1311 struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1312 unsigned old_num_targets = sctx->b.streamout.num_targets;
1313 unsigned i, bufidx;
1314
1315 /* We are going to unbind the buffers. Mark which caches need to be flushed. */
1316 if (sctx->b.streamout.num_targets && sctx->b.streamout.begin_emitted) {
1317 /* Since streamout uses vector writes which go through TC L2
1318 * and most other clients can use TC L2 as well, we don't need
1319 * to flush it.
1320 *
1321 * The only cases which requires flushing it is VGT DMA index
1322 * fetching (on <= CIK) and indirect draw data, which are rare
1323 * cases. Thus, flag the TC L2 dirtiness in the resource and
1324 * handle it at draw call time.
1325 */
1326 for (i = 0; i < sctx->b.streamout.num_targets; i++)
1327 if (sctx->b.streamout.targets[i])
1328 r600_resource(sctx->b.streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
1329
1330 /* Invalidate the scalar cache in case a streamout buffer is
1331 * going to be used as a constant buffer.
1332 *
1333 * Invalidate TC L1, because streamout bypasses it (done by
1334 * setting GLC=1 in the store instruction), but it can contain
1335 * outdated data of streamout buffers.
1336 *
1337 * VS_PARTIAL_FLUSH is required if the buffers are going to be
1338 * used as an input immediately.
1339 */
1340 sctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
1341 SI_CONTEXT_INV_VMEM_L1 |
1342 SI_CONTEXT_VS_PARTIAL_FLUSH;
1343 }
1344
1345 /* All readers of the streamout targets need to be finished before we can
1346 * start writing to the targets.
1347 */
1348 if (num_targets)
1349 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
1350 SI_CONTEXT_CS_PARTIAL_FLUSH;
1351
1352 /* Streamout buffers must be bound in 2 places:
1353 * 1) in VGT by setting the VGT_STRMOUT registers
1354 * 2) as shader resources
1355 */
1356
1357 /* Set the VGT regs. */
1358 r600_set_streamout_targets(ctx, num_targets, targets, offsets);
1359
1360 /* Set the shader resources.*/
1361 for (i = 0; i < num_targets; i++) {
1362 bufidx = SI_VS_STREAMOUT_BUF0 + i;
1363
1364 if (targets[i]) {
1365 struct pipe_resource *buffer = targets[i]->buffer;
1366 uint64_t va = r600_resource(buffer)->gpu_address;
1367
1368 /* Set the descriptor.
1369 *
1370 * On VI, the format must be non-INVALID, otherwise
1371 * the buffer will be considered not bound and store
1372 * instructions will be no-ops.
1373 */
1374 uint32_t *desc = descs->list + bufidx*4;
1375 desc[0] = va;
1376 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
1377 desc[2] = 0xffffffff;
1378 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1379 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1380 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1381 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1382 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1383
1384 /* Set the resource. */
1385 pipe_resource_reference(&buffers->buffers[bufidx],
1386 buffer);
1387 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1388 (struct r600_resource*)buffer,
1389 buffers->shader_usage,
1390 RADEON_PRIO_SHADER_RW_BUFFER,
1391 true);
1392 r600_resource(buffer)->bind_history |= PIPE_BIND_STREAM_OUTPUT;
1393
1394 buffers->enabled_mask |= 1u << bufidx;
1395 } else {
1396 /* Clear the descriptor and unset the resource. */
1397 memset(descs->list + bufidx*4, 0,
1398 sizeof(uint32_t) * 4);
1399 pipe_resource_reference(&buffers->buffers[bufidx],
1400 NULL);
1401 buffers->enabled_mask &= ~(1u << bufidx);
1402 }
1403 descs->dirty_mask |= 1u << bufidx;
1404 }
1405 for (; i < old_num_targets; i++) {
1406 bufidx = SI_VS_STREAMOUT_BUF0 + i;
1407 /* Clear the descriptor and unset the resource. */
1408 memset(descs->list + bufidx*4, 0, sizeof(uint32_t) * 4);
1409 pipe_resource_reference(&buffers->buffers[bufidx], NULL);
1410 buffers->enabled_mask &= ~(1u << bufidx);
1411 descs->dirty_mask |= 1u << bufidx;
1412 }
1413
1414 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
1415 }
1416
1417 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
1418 uint32_t *desc, uint64_t old_buf_va,
1419 struct pipe_resource *new_buf)
1420 {
1421 /* Retrieve the buffer offset from the descriptor. */
1422 uint64_t old_desc_va =
1423 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
1424
1425 assert(old_buf_va <= old_desc_va);
1426 uint64_t offset_within_buffer = old_desc_va - old_buf_va;
1427
1428 /* Update the descriptor. */
1429 si_set_buf_desc_address(r600_resource(new_buf), offset_within_buffer,
1430 desc);
1431 }
1432
1433 /* INTERNAL CONST BUFFERS */
1434
1435 static void si_set_polygon_stipple(struct pipe_context *ctx,
1436 const struct pipe_poly_stipple *state)
1437 {
1438 struct si_context *sctx = (struct si_context *)ctx;
1439 struct pipe_constant_buffer cb = {};
1440 unsigned stipple[32];
1441 int i;
1442
1443 for (i = 0; i < 32; i++)
1444 stipple[i] = util_bitreverse(state->stipple[i]);
1445
1446 cb.user_buffer = stipple;
1447 cb.buffer_size = sizeof(stipple);
1448
1449 si_set_rw_buffer(sctx, SI_PS_CONST_POLY_STIPPLE, &cb);
1450 }
1451
1452 /* TEXTURE METADATA ENABLE/DISABLE */
1453
1454 /* CMASK can be enabled (for fast clear) and disabled (for texture export)
1455 * while the texture is bound, possibly by a different context. In that case,
1456 * call this function to update compressed_colortex_masks.
1457 */
1458 void si_update_compressed_colortex_masks(struct si_context *sctx)
1459 {
1460 for (int i = 0; i < SI_NUM_SHADERS; ++i) {
1461 si_samplers_update_compressed_colortex_mask(&sctx->samplers[i]);
1462 si_images_update_compressed_colortex_mask(&sctx->images[i]);
1463 }
1464 }
1465
1466 /* BUFFER DISCARD/INVALIDATION */
1467
1468 /** Reset descriptors of buffer resources after \p buf has been invalidated. */
1469 static void si_reset_buffer_resources(struct si_context *sctx,
1470 struct si_buffer_resources *buffers,
1471 unsigned descriptors_idx,
1472 struct pipe_resource *buf,
1473 uint64_t old_va)
1474 {
1475 struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
1476 unsigned mask = buffers->enabled_mask;
1477
1478 while (mask) {
1479 unsigned i = u_bit_scan(&mask);
1480 if (buffers->buffers[i] == buf) {
1481 si_desc_reset_buffer_offset(&sctx->b.b,
1482 descs->list + i*4,
1483 old_va, buf);
1484 descs->dirty_mask |= 1u << i;
1485 sctx->descriptors_dirty |= 1u << descriptors_idx;
1486
1487 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1488 (struct r600_resource *)buf,
1489 buffers->shader_usage,
1490 buffers->priority, true);
1491 }
1492 }
1493 }
1494
1495 /* Reallocate a buffer a update all resource bindings where the buffer is
1496 * bound.
1497 *
1498 * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
1499 * idle by discarding its contents. Apps usually tell us when to do this using
1500 * map_buffer flags, for example.
1501 */
1502 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
1503 {
1504 struct si_context *sctx = (struct si_context*)ctx;
1505 struct r600_resource *rbuffer = r600_resource(buf);
1506 unsigned i, shader;
1507 uint64_t old_va = rbuffer->gpu_address;
1508 unsigned num_elems = sctx->vertex_elements ?
1509 sctx->vertex_elements->count : 0;
1510
1511 /* Reallocate the buffer in the same pipe_resource. */
1512 r600_alloc_resource(&sctx->screen->b, rbuffer);
1513
1514 /* We changed the buffer, now we need to bind it where the old one
1515 * was bound. This consists of 2 things:
1516 * 1) Updating the resource descriptor and dirtying it.
1517 * 2) Adding a relocation to the CS, so that it's usable.
1518 */
1519
1520 /* Vertex buffers. */
1521 if (rbuffer->bind_history & PIPE_BIND_VERTEX_BUFFER) {
1522 for (i = 0; i < num_elems; i++) {
1523 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
1524
1525 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
1526 continue;
1527 if (!sctx->vertex_buffer[vb].buffer)
1528 continue;
1529
1530 if (sctx->vertex_buffer[vb].buffer == buf) {
1531 sctx->vertex_buffers_dirty = true;
1532 break;
1533 }
1534 }
1535 }
1536
1537 /* Streamout buffers. (other internal buffers can't be invalidated) */
1538 if (rbuffer->bind_history & PIPE_BIND_STREAM_OUTPUT) {
1539 for (i = SI_VS_STREAMOUT_BUF0; i <= SI_VS_STREAMOUT_BUF3; i++) {
1540 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1541 struct si_descriptors *descs =
1542 &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1543
1544 if (buffers->buffers[i] != buf)
1545 continue;
1546
1547 si_desc_reset_buffer_offset(ctx, descs->list + i*4,
1548 old_va, buf);
1549 descs->dirty_mask |= 1u << i;
1550 sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
1551
1552 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1553 rbuffer, buffers->shader_usage,
1554 RADEON_PRIO_SHADER_RW_BUFFER,
1555 true);
1556
1557 /* Update the streamout state. */
1558 if (sctx->b.streamout.begin_emitted)
1559 r600_emit_streamout_end(&sctx->b);
1560 sctx->b.streamout.append_bitmask =
1561 sctx->b.streamout.enabled_mask;
1562 r600_streamout_buffers_dirty(&sctx->b);
1563 }
1564 }
1565
1566 /* Constant and shader buffers. */
1567 if (rbuffer->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
1568 for (shader = 0; shader < SI_NUM_SHADERS; shader++)
1569 si_reset_buffer_resources(sctx, &sctx->const_buffers[shader],
1570 si_const_buffer_descriptors_idx(shader),
1571 buf, old_va);
1572 }
1573
1574 if (rbuffer->bind_history & PIPE_BIND_SHADER_BUFFER) {
1575 for (shader = 0; shader < SI_NUM_SHADERS; shader++)
1576 si_reset_buffer_resources(sctx, &sctx->shader_buffers[shader],
1577 si_shader_buffer_descriptors_idx(shader),
1578 buf, old_va);
1579 }
1580
1581 if (rbuffer->bind_history & PIPE_BIND_SAMPLER_VIEW) {
1582 /* Texture buffers - update bindings. */
1583 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1584 struct si_sampler_views *views = &sctx->samplers[shader].views;
1585 struct si_descriptors *descs =
1586 si_sampler_descriptors(sctx, shader);
1587 unsigned mask = views->enabled_mask;
1588
1589 while (mask) {
1590 unsigned i = u_bit_scan(&mask);
1591 if (views->views[i]->texture == buf) {
1592 si_desc_reset_buffer_offset(ctx,
1593 descs->list +
1594 i * 16 + 4,
1595 old_va, buf);
1596 descs->dirty_mask |= 1u << i;
1597 sctx->descriptors_dirty |=
1598 1u << si_sampler_descriptors_idx(shader);
1599
1600 radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
1601 rbuffer, RADEON_USAGE_READ,
1602 RADEON_PRIO_SAMPLER_BUFFER,
1603 true);
1604 }
1605 }
1606 }
1607 }
1608
1609 /* Shader images */
1610 if (rbuffer->bind_history & PIPE_BIND_SHADER_IMAGE) {
1611 for (shader = 0; shader < SI_NUM_SHADERS; ++shader) {
1612 struct si_images_info *images = &sctx->images[shader];
1613 struct si_descriptors *descs =
1614 si_image_descriptors(sctx, shader);
1615 unsigned mask = images->enabled_mask;
1616
1617 while (mask) {
1618 unsigned i = u_bit_scan(&mask);
1619
1620 if (images->views[i].resource == buf) {
1621 if (images->views[i].access & PIPE_IMAGE_ACCESS_WRITE)
1622 si_mark_image_range_valid(&images->views[i]);
1623
1624 si_desc_reset_buffer_offset(
1625 ctx, descs->list + i * 8 + 4,
1626 old_va, buf);
1627 descs->dirty_mask |= 1u << i;
1628 sctx->descriptors_dirty |=
1629 1u << si_image_descriptors_idx(shader);
1630
1631 radeon_add_to_buffer_list_check_mem(
1632 &sctx->b, &sctx->b.gfx, rbuffer,
1633 RADEON_USAGE_READWRITE,
1634 RADEON_PRIO_SAMPLER_BUFFER, true);
1635 }
1636 }
1637 }
1638 }
1639 }
1640
1641 /* Update mutable image descriptor fields of all bound textures. */
1642 void si_update_all_texture_descriptors(struct si_context *sctx)
1643 {
1644 unsigned shader;
1645
1646 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1647 struct si_sampler_views *samplers = &sctx->samplers[shader].views;
1648 struct si_images_info *images = &sctx->images[shader];
1649 unsigned mask;
1650
1651 /* Images. */
1652 mask = images->enabled_mask;
1653 while (mask) {
1654 unsigned i = u_bit_scan(&mask);
1655 struct pipe_image_view *view = &images->views[i];
1656
1657 if (!view->resource ||
1658 view->resource->target == PIPE_BUFFER)
1659 continue;
1660
1661 si_set_shader_image(sctx, shader, i, view);
1662 }
1663
1664 /* Sampler views. */
1665 mask = samplers->enabled_mask;
1666 while (mask) {
1667 unsigned i = u_bit_scan(&mask);
1668 struct pipe_sampler_view *view = samplers->views[i];
1669
1670 if (!view ||
1671 !view->texture ||
1672 view->texture->target == PIPE_BUFFER)
1673 continue;
1674
1675 si_set_sampler_view(sctx, shader, i,
1676 samplers->views[i], true);
1677 }
1678 }
1679 }
1680
1681 /* SHADER USER DATA */
1682
1683 static void si_mark_shader_pointers_dirty(struct si_context *sctx,
1684 unsigned shader)
1685 {
1686 struct si_descriptors *descs =
1687 &sctx->descriptors[SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS];
1688
1689 for (unsigned i = 0; i < SI_NUM_SHADER_DESCS; ++i, ++descs)
1690 descs->pointer_dirty = true;
1691
1692 if (shader == PIPE_SHADER_VERTEX)
1693 sctx->vertex_buffers.pointer_dirty = true;
1694
1695 si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
1696 }
1697
1698 static void si_shader_userdata_begin_new_cs(struct si_context *sctx)
1699 {
1700 int i;
1701
1702 for (i = 0; i < SI_NUM_SHADERS; i++) {
1703 si_mark_shader_pointers_dirty(sctx, i);
1704 }
1705 sctx->descriptors[SI_DESCS_RW_BUFFERS].pointer_dirty = true;
1706 }
1707
1708 /* Set a base register address for user data constants in the given shader.
1709 * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
1710 */
1711 static void si_set_user_data_base(struct si_context *sctx,
1712 unsigned shader, uint32_t new_base)
1713 {
1714 uint32_t *base = &sctx->shader_userdata.sh_base[shader];
1715
1716 if (*base != new_base) {
1717 *base = new_base;
1718
1719 if (new_base)
1720 si_mark_shader_pointers_dirty(sctx, shader);
1721 }
1722 }
1723
1724 /* This must be called when these shaders are changed from non-NULL to NULL
1725 * and vice versa:
1726 * - geometry shader
1727 * - tessellation control shader
1728 * - tessellation evaluation shader
1729 */
1730 void si_shader_change_notify(struct si_context *sctx)
1731 {
1732 /* VS can be bound as VS, ES, or LS. */
1733 if (sctx->tes_shader.cso)
1734 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1735 R_00B530_SPI_SHADER_USER_DATA_LS_0);
1736 else if (sctx->gs_shader.cso)
1737 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1738 R_00B330_SPI_SHADER_USER_DATA_ES_0);
1739 else
1740 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1741 R_00B130_SPI_SHADER_USER_DATA_VS_0);
1742
1743 /* TES can be bound as ES, VS, or not bound. */
1744 if (sctx->tes_shader.cso) {
1745 if (sctx->gs_shader.cso)
1746 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1747 R_00B330_SPI_SHADER_USER_DATA_ES_0);
1748 else
1749 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1750 R_00B130_SPI_SHADER_USER_DATA_VS_0);
1751 } else {
1752 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL, 0);
1753 }
1754 }
1755
1756 static void si_emit_shader_pointer(struct si_context *sctx,
1757 struct si_descriptors *desc,
1758 unsigned sh_base, bool keep_dirty)
1759 {
1760 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
1761 uint64_t va;
1762
1763 if (!desc->pointer_dirty || !desc->buffer)
1764 return;
1765
1766 va = desc->buffer->gpu_address +
1767 desc->buffer_offset;
1768
1769 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
1770 radeon_emit(cs, (sh_base + desc->shader_userdata_offset - SI_SH_REG_OFFSET) >> 2);
1771 radeon_emit(cs, va);
1772 radeon_emit(cs, va >> 32);
1773
1774 desc->pointer_dirty = keep_dirty;
1775 }
1776
1777 void si_emit_graphics_shader_userdata(struct si_context *sctx,
1778 struct r600_atom *atom)
1779 {
1780 unsigned shader;
1781 uint32_t *sh_base = sctx->shader_userdata.sh_base;
1782 struct si_descriptors *descs;
1783
1784 descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
1785
1786 if (descs->pointer_dirty) {
1787 si_emit_shader_pointer(sctx, descs,
1788 R_00B030_SPI_SHADER_USER_DATA_PS_0, true);
1789 si_emit_shader_pointer(sctx, descs,
1790 R_00B130_SPI_SHADER_USER_DATA_VS_0, true);
1791 si_emit_shader_pointer(sctx, descs,
1792 R_00B230_SPI_SHADER_USER_DATA_GS_0, true);
1793 si_emit_shader_pointer(sctx, descs,
1794 R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
1795 si_emit_shader_pointer(sctx, descs,
1796 R_00B430_SPI_SHADER_USER_DATA_HS_0, true);
1797 descs->pointer_dirty = false;
1798 }
1799
1800 descs = &sctx->descriptors[SI_DESCS_FIRST_SHADER];
1801
1802 for (shader = 0; shader < SI_NUM_GRAPHICS_SHADERS; shader++) {
1803 unsigned base = sh_base[shader];
1804 unsigned i;
1805
1806 if (!base)
1807 continue;
1808
1809 for (i = 0; i < SI_NUM_SHADER_DESCS; i++, descs++)
1810 si_emit_shader_pointer(sctx, descs, base, false);
1811 }
1812 si_emit_shader_pointer(sctx, &sctx->vertex_buffers, sh_base[PIPE_SHADER_VERTEX], false);
1813 }
1814
1815 void si_emit_compute_shader_userdata(struct si_context *sctx)
1816 {
1817 unsigned base = R_00B900_COMPUTE_USER_DATA_0;
1818 struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_FIRST_COMPUTE];
1819
1820 for (unsigned i = 0; i < SI_NUM_SHADER_DESCS; ++i, ++descs)
1821 si_emit_shader_pointer(sctx, descs, base, false);
1822 }
1823
1824 /* INIT/DEINIT/UPLOAD */
1825
1826 void si_init_all_descriptors(struct si_context *sctx)
1827 {
1828 int i;
1829 unsigned ce_offset = 0;
1830
1831 for (i = 0; i < SI_NUM_SHADERS; i++) {
1832 si_init_buffer_resources(&sctx->const_buffers[i],
1833 si_const_buffer_descriptors(sctx, i),
1834 SI_NUM_CONST_BUFFERS, SI_SGPR_CONST_BUFFERS,
1835 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER,
1836 &ce_offset);
1837 si_init_buffer_resources(&sctx->shader_buffers[i],
1838 si_shader_buffer_descriptors(sctx, i),
1839 SI_NUM_SHADER_BUFFERS, SI_SGPR_SHADER_BUFFERS,
1840 RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RW_BUFFER,
1841 &ce_offset);
1842
1843 si_init_descriptors(si_sampler_descriptors(sctx, i),
1844 SI_SGPR_SAMPLERS, 16, SI_NUM_SAMPLERS,
1845 null_texture_descriptor, &ce_offset);
1846
1847 si_init_descriptors(si_image_descriptors(sctx, i),
1848 SI_SGPR_IMAGES, 8, SI_NUM_IMAGES,
1849 null_image_descriptor, &ce_offset);
1850 }
1851
1852 si_init_buffer_resources(&sctx->rw_buffers,
1853 &sctx->descriptors[SI_DESCS_RW_BUFFERS],
1854 SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
1855 RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RINGS,
1856 &ce_offset);
1857 si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
1858 4, SI_NUM_VERTEX_BUFFERS, NULL, NULL);
1859
1860 sctx->descriptors_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
1861
1862 assert(ce_offset <= 32768);
1863
1864 /* Set pipe_context functions. */
1865 sctx->b.b.bind_sampler_states = si_bind_sampler_states;
1866 sctx->b.b.set_shader_images = si_set_shader_images;
1867 sctx->b.b.set_constant_buffer = si_pipe_set_constant_buffer;
1868 sctx->b.b.set_polygon_stipple = si_set_polygon_stipple;
1869 sctx->b.b.set_shader_buffers = si_set_shader_buffers;
1870 sctx->b.b.set_sampler_views = si_set_sampler_views;
1871 sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
1872 sctx->b.invalidate_buffer = si_invalidate_buffer;
1873
1874 /* Shader user data. */
1875 si_init_atom(sctx, &sctx->shader_userdata.atom, &sctx->atoms.s.shader_userdata,
1876 si_emit_graphics_shader_userdata);
1877
1878 /* Set default and immutable mappings. */
1879 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
1880 si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL, R_00B430_SPI_SHADER_USER_DATA_HS_0);
1881 si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY, R_00B230_SPI_SHADER_USER_DATA_GS_0);
1882 si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
1883 }
1884
1885 bool si_upload_graphics_shader_descriptors(struct si_context *sctx)
1886 {
1887 const unsigned mask = u_bit_consecutive(0, SI_DESCS_FIRST_COMPUTE);
1888 unsigned dirty = sctx->descriptors_dirty & mask;
1889
1890 while (dirty) {
1891 unsigned i = u_bit_scan(&dirty);
1892
1893 if (!si_upload_descriptors(sctx, &sctx->descriptors[i],
1894 &sctx->shader_userdata.atom))
1895 return false;
1896 }
1897
1898 sctx->descriptors_dirty &= ~mask;
1899 return true;
1900 }
1901
1902 bool si_upload_compute_shader_descriptors(struct si_context *sctx)
1903 {
1904 /* Does not update rw_buffers as that is not needed for compute shaders
1905 * and the input buffer is using the same SGPR's anyway.
1906 */
1907 const unsigned mask = u_bit_consecutive(SI_DESCS_FIRST_COMPUTE,
1908 SI_NUM_DESCS - SI_DESCS_FIRST_COMPUTE);
1909 unsigned dirty = sctx->descriptors_dirty & mask;
1910
1911 while (dirty) {
1912 unsigned i = u_bit_scan(&dirty);
1913
1914 if (!si_upload_descriptors(sctx, &sctx->descriptors[i], NULL))
1915 return false;
1916 }
1917
1918 sctx->descriptors_dirty &= ~mask;
1919
1920 return true;
1921 }
1922
1923 void si_release_all_descriptors(struct si_context *sctx)
1924 {
1925 int i;
1926
1927 for (i = 0; i < SI_NUM_SHADERS; i++) {
1928 si_release_buffer_resources(&sctx->const_buffers[i],
1929 si_const_buffer_descriptors(sctx, i));
1930 si_release_buffer_resources(&sctx->shader_buffers[i],
1931 si_shader_buffer_descriptors(sctx, i));
1932 si_release_sampler_views(&sctx->samplers[i].views);
1933 si_release_image_views(&sctx->images[i]);
1934 }
1935 si_release_buffer_resources(&sctx->rw_buffers,
1936 &sctx->descriptors[SI_DESCS_RW_BUFFERS]);
1937
1938 for (i = 0; i < SI_NUM_DESCS; ++i)
1939 si_release_descriptors(&sctx->descriptors[i]);
1940 si_release_descriptors(&sctx->vertex_buffers);
1941 }
1942
1943 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
1944 {
1945 int i;
1946
1947 for (i = 0; i < SI_NUM_SHADERS; i++) {
1948 si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
1949 si_buffer_resources_begin_new_cs(sctx, &sctx->shader_buffers[i]);
1950 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
1951 si_image_views_begin_new_cs(sctx, &sctx->images[i]);
1952 }
1953 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers);
1954 si_vertex_buffers_begin_new_cs(sctx);
1955
1956 for (i = 0; i < SI_NUM_DESCS; ++i)
1957 si_descriptors_begin_new_cs(sctx, &sctx->descriptors[i]);
1958
1959 si_shader_userdata_begin_new_cs(sctx);
1960 }