radeonsi: add new SDMA texture copy code
[mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Marek Olšák <marek.olsak@amd.com>
25 */
26
27 /* Resource binding slots and sampler states (each described with 8 or
28 * 4 dwords) are stored in lists in memory which is accessed by shaders
29 * using scalar load instructions.
30 *
31 * This file is responsible for managing such lists. It keeps a copy of all
32 * descriptors in CPU memory and re-uploads a whole list if some slots have
33 * been changed.
34 *
35 * This code is also reponsible for updating shader pointers to those lists.
36 *
37 * Note that CP DMA can't be used for updating the lists, because a GPU hang
38 * could leave the list in a mid-IB state and the next IB would get wrong
39 * descriptors and the whole context would be unusable at that point.
40 * (Note: The register shadowing can't be used due to the same reason)
41 *
42 * Also, uploading descriptors to newly allocated memory doesn't require
43 * a KCACHE flush.
44 *
45 *
46 * Possible scenarios for one 16 dword image+sampler slot:
47 *
48 * | Image | w/ FMASK | Buffer | NULL
49 * [ 0: 3] Image[0:3] | Image[0:3] | Null[0:3] | Null[0:3]
50 * [ 4: 7] Image[4:7] | Image[4:7] | Buffer[0:3] | 0
51 * [ 8:11] Null[0:3] | Fmask[0:3] | Null[0:3] | Null[0:3]
52 * [12:15] Sampler[0:3] | Fmask[4:7] | Sampler[0:3] | Sampler[0:3]
53 *
54 * FMASK implies MSAA, therefore no sampler state.
55 * Sampler states are never unbound except when FMASK is bound.
56 */
57
58 #include "radeon/r600_cs.h"
59 #include "si_pipe.h"
60 #include "si_shader.h"
61 #include "sid.h"
62
63 #include "util/u_math.h"
64 #include "util/u_memory.h"
65 #include "util/u_suballoc.h"
66 #include "util/u_upload_mgr.h"
67
68
69 /* NULL image and buffer descriptor for textures (alpha = 1) and images
70 * (alpha = 0).
71 *
72 * For images, all fields must be zero except for the swizzle, which
73 * supports arbitrary combinations of 0s and 1s. The texture type must be
74 * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
75 *
76 * For buffers, all fields must be zero. If they are not, the hw hangs.
77 *
78 * This is the only reason why the buffer descriptor must be in words [4:7].
79 */
80 static uint32_t null_texture_descriptor[8] = {
81 0,
82 0,
83 0,
84 S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1) |
85 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
86 /* the rest must contain zeros, which is also used by the buffer
87 * descriptor */
88 };
89
90 static uint32_t null_image_descriptor[8] = {
91 0,
92 0,
93 0,
94 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
95 /* the rest must contain zeros, which is also used by the buffer
96 * descriptor */
97 };
98
99 static void si_init_descriptors(struct si_descriptors *desc,
100 unsigned shader_userdata_index,
101 unsigned element_dw_size,
102 unsigned num_elements,
103 const uint32_t *null_descriptor,
104 unsigned *ce_offset)
105 {
106 int i;
107
108 assert(num_elements <= sizeof(desc->enabled_mask)*8);
109
110 desc->list = CALLOC(num_elements, element_dw_size * 4);
111 desc->element_dw_size = element_dw_size;
112 desc->num_elements = num_elements;
113 desc->dirty_mask = num_elements == 32 ? ~0u : (1u << num_elements) - 1;
114 desc->shader_userdata_offset = shader_userdata_index * 4;
115
116 if (ce_offset) {
117 desc->ce_offset = *ce_offset;
118
119 /* make sure that ce_offset stays 32 byte aligned */
120 *ce_offset += align(element_dw_size * num_elements * 4, 32);
121 }
122
123 /* Initialize the array to NULL descriptors if the element size is 8. */
124 if (null_descriptor) {
125 assert(element_dw_size % 8 == 0);
126 for (i = 0; i < num_elements * element_dw_size / 8; i++)
127 memcpy(desc->list + i * 8, null_descriptor,
128 8 * 4);
129 }
130 }
131
132 static void si_release_descriptors(struct si_descriptors *desc)
133 {
134 pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
135 FREE(desc->list);
136 }
137
138 static bool si_ce_upload(struct si_context *sctx, unsigned ce_offset, unsigned size,
139 unsigned *out_offset, struct r600_resource **out_buf) {
140 uint64_t va;
141
142 u_suballocator_alloc(sctx->ce_suballocator, size, out_offset,
143 (struct pipe_resource**)out_buf);
144 if (!out_buf)
145 return false;
146
147 va = (*out_buf)->gpu_address + *out_offset;
148
149 radeon_emit(sctx->ce_ib, PKT3(PKT3_DUMP_CONST_RAM, 3, 0));
150 radeon_emit(sctx->ce_ib, ce_offset);
151 radeon_emit(sctx->ce_ib, size / 4);
152 radeon_emit(sctx->ce_ib, va);
153 radeon_emit(sctx->ce_ib, va >> 32);
154
155 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, *out_buf,
156 RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
157
158 sctx->ce_need_synchronization = true;
159 return true;
160 }
161
162 static void si_reinitialize_ce_ram(struct si_context *sctx,
163 struct si_descriptors *desc)
164 {
165 if (desc->buffer) {
166 struct r600_resource *buffer = (struct r600_resource*)desc->buffer;
167 unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
168 uint64_t va = buffer->gpu_address + desc->buffer_offset;
169 struct radeon_winsys_cs *ib = sctx->ce_preamble_ib;
170
171 if (!ib)
172 ib = sctx->ce_ib;
173
174 list_size = align(list_size, 32);
175
176 radeon_emit(ib, PKT3(PKT3_LOAD_CONST_RAM, 3, 0));
177 radeon_emit(ib, va);
178 radeon_emit(ib, va >> 32);
179 radeon_emit(ib, list_size / 4);
180 radeon_emit(ib, desc->ce_offset);
181
182 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
183 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
184 }
185 desc->ce_ram_dirty = false;
186 }
187
188 void si_ce_enable_loads(struct radeon_winsys_cs *ib)
189 {
190 radeon_emit(ib, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
191 radeon_emit(ib, CONTEXT_CONTROL_LOAD_ENABLE(1) |
192 CONTEXT_CONTROL_LOAD_CE_RAM(1));
193 radeon_emit(ib, CONTEXT_CONTROL_SHADOW_ENABLE(1));
194 }
195
196 static bool si_upload_descriptors(struct si_context *sctx,
197 struct si_descriptors *desc,
198 struct r600_atom * atom)
199 {
200 unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
201
202 if (!desc->dirty_mask)
203 return true;
204
205 if (sctx->ce_ib) {
206 uint32_t const* list = (uint32_t const*)desc->list;
207
208 if (desc->ce_ram_dirty)
209 si_reinitialize_ce_ram(sctx, desc);
210
211 while(desc->dirty_mask) {
212 int begin, count;
213 u_bit_scan_consecutive_range(&desc->dirty_mask, &begin,
214 &count);
215
216 begin *= desc->element_dw_size;
217 count *= desc->element_dw_size;
218
219 radeon_emit(sctx->ce_ib,
220 PKT3(PKT3_WRITE_CONST_RAM, count, 0));
221 radeon_emit(sctx->ce_ib, desc->ce_offset + begin * 4);
222 radeon_emit_array(sctx->ce_ib, list + begin, count);
223 }
224
225 if (!si_ce_upload(sctx, desc->ce_offset, list_size,
226 &desc->buffer_offset, &desc->buffer))
227 return false;
228 } else {
229 void *ptr;
230
231 u_upload_alloc(sctx->b.uploader, 0, list_size, 256,
232 &desc->buffer_offset,
233 (struct pipe_resource**)&desc->buffer, &ptr);
234 if (!desc->buffer)
235 return false; /* skip the draw call */
236
237 util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
238
239 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
240 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
241 }
242 desc->pointer_dirty = true;
243 desc->dirty_mask = 0;
244
245 if (atom)
246 si_mark_atom_dirty(sctx, atom);
247
248 return true;
249 }
250
251 /* SAMPLER VIEWS */
252
253 static void si_release_sampler_views(struct si_sampler_views *views)
254 {
255 int i;
256
257 for (i = 0; i < Elements(views->views); i++) {
258 pipe_sampler_view_reference(&views->views[i], NULL);
259 }
260 si_release_descriptors(&views->desc);
261 }
262
263 static void si_sampler_view_add_buffer(struct si_context *sctx,
264 struct pipe_resource *resource,
265 enum radeon_bo_usage usage)
266 {
267 struct r600_resource *rres = (struct r600_resource*)resource;
268
269 if (!resource)
270 return;
271
272 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, rres, usage,
273 r600_get_sampler_view_priority(rres));
274 }
275
276 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
277 struct si_sampler_views *views)
278 {
279 unsigned mask = views->desc.enabled_mask;
280
281 /* Add buffers to the CS. */
282 while (mask) {
283 int i = u_bit_scan(&mask);
284
285 si_sampler_view_add_buffer(sctx, views->views[i]->texture,
286 RADEON_USAGE_READ);
287 }
288
289 views->desc.ce_ram_dirty = true;
290
291 if (!views->desc.buffer)
292 return;
293 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, views->desc.buffer,
294 RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
295 }
296
297 static void si_set_sampler_view(struct si_context *sctx,
298 struct si_sampler_views *views,
299 unsigned slot, struct pipe_sampler_view *view)
300 {
301 struct si_sampler_view *rview = (struct si_sampler_view*)view;
302
303 if (view && view->texture && view->texture->target != PIPE_BUFFER &&
304 G_008F28_COMPRESSION_EN(rview->state[6]) &&
305 ((struct r600_texture*)view->texture)->dcc_offset == 0) {
306 rview->state[6] &= C_008F28_COMPRESSION_EN &
307 C_008F28_ALPHA_IS_ON_MSB;
308 } else if (views->views[slot] == view)
309 return;
310
311 if (view) {
312 struct r600_texture *rtex = (struct r600_texture *)view->texture;
313
314 si_sampler_view_add_buffer(sctx, view->texture,
315 RADEON_USAGE_READ);
316
317 pipe_sampler_view_reference(&views->views[slot], view);
318 memcpy(views->desc.list + slot * 16, rview->state, 8*4);
319
320 if (view->texture && view->texture->target != PIPE_BUFFER &&
321 rtex->fmask.size) {
322 memcpy(views->desc.list + slot*16 + 8,
323 rview->fmask_state, 8*4);
324 } else {
325 /* Disable FMASK and bind sampler state in [12:15]. */
326 memcpy(views->desc.list + slot*16 + 8,
327 null_texture_descriptor, 4*4);
328
329 if (views->sampler_states[slot])
330 memcpy(views->desc.list + slot*16 + 12,
331 views->sampler_states[slot], 4*4);
332 }
333
334 views->desc.enabled_mask |= 1u << slot;
335 } else {
336 pipe_sampler_view_reference(&views->views[slot], NULL);
337 memcpy(views->desc.list + slot*16, null_texture_descriptor, 8*4);
338 /* Only clear the lower dwords of FMASK. */
339 memcpy(views->desc.list + slot*16 + 8, null_texture_descriptor, 4*4);
340 views->desc.enabled_mask &= ~(1u << slot);
341 }
342
343 views->desc.dirty_mask |= 1u << slot;
344 }
345
346 static bool is_compressed_colortex(struct r600_texture *rtex)
347 {
348 return rtex->cmask.size || rtex->fmask.size ||
349 (rtex->dcc_offset && rtex->dirty_level_mask);
350 }
351
352 static void si_set_sampler_views(struct pipe_context *ctx,
353 unsigned shader, unsigned start,
354 unsigned count,
355 struct pipe_sampler_view **views)
356 {
357 struct si_context *sctx = (struct si_context *)ctx;
358 struct si_textures_info *samplers = &sctx->samplers[shader];
359 int i;
360
361 if (!count || shader >= SI_NUM_SHADERS)
362 return;
363
364 for (i = 0; i < count; i++) {
365 unsigned slot = start + i;
366
367 if (!views || !views[i]) {
368 samplers->depth_texture_mask &= ~(1u << slot);
369 samplers->compressed_colortex_mask &= ~(1u << slot);
370 si_set_sampler_view(sctx, &samplers->views, slot, NULL);
371 continue;
372 }
373
374 si_set_sampler_view(sctx, &samplers->views, slot, views[i]);
375
376 if (views[i]->texture && views[i]->texture->target != PIPE_BUFFER) {
377 struct r600_texture *rtex =
378 (struct r600_texture*)views[i]->texture;
379
380 if (rtex->is_depth && !rtex->is_flushing_texture) {
381 samplers->depth_texture_mask |= 1u << slot;
382 } else {
383 samplers->depth_texture_mask &= ~(1u << slot);
384 }
385 if (is_compressed_colortex(rtex)) {
386 samplers->compressed_colortex_mask |= 1u << slot;
387 } else {
388 samplers->compressed_colortex_mask &= ~(1u << slot);
389 }
390 } else {
391 samplers->depth_texture_mask &= ~(1u << slot);
392 samplers->compressed_colortex_mask &= ~(1u << slot);
393 }
394 }
395 }
396
397 static void
398 si_samplers_update_compressed_colortex_mask(struct si_textures_info *samplers)
399 {
400 unsigned mask = samplers->views.desc.enabled_mask;
401
402 while (mask) {
403 int i = u_bit_scan(&mask);
404 struct pipe_resource *res = samplers->views.views[i]->texture;
405
406 if (res && res->target != PIPE_BUFFER) {
407 struct r600_texture *rtex = (struct r600_texture *)res;
408
409 if (is_compressed_colortex(rtex)) {
410 samplers->compressed_colortex_mask |= 1u << i;
411 } else {
412 samplers->compressed_colortex_mask &= ~(1u << i);
413 }
414 }
415 }
416 }
417
418 /* IMAGE VIEWS */
419
420 static void
421 si_release_image_views(struct si_images_info *images)
422 {
423 unsigned i;
424
425 for (i = 0; i < SI_NUM_IMAGES; ++i) {
426 struct pipe_image_view *view = &images->views[i];
427
428 pipe_resource_reference(&view->resource, NULL);
429 }
430
431 si_release_descriptors(&images->desc);
432 }
433
434 static void
435 si_image_views_begin_new_cs(struct si_context *sctx, struct si_images_info *images)
436 {
437 uint mask = images->desc.enabled_mask;
438
439 /* Add buffers to the CS. */
440 while (mask) {
441 int i = u_bit_scan(&mask);
442 struct pipe_image_view *view = &images->views[i];
443
444 assert(view->resource);
445
446 si_sampler_view_add_buffer(sctx, view->resource,
447 RADEON_USAGE_READWRITE);
448 }
449
450 images->desc.ce_ram_dirty = true;
451
452 if (images->desc.buffer) {
453 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
454 images->desc.buffer,
455 RADEON_USAGE_READ,
456 RADEON_PRIO_DESCRIPTORS);
457 }
458 }
459
460 static void
461 si_disable_shader_image(struct si_images_info *images, unsigned slot)
462 {
463 if (images->desc.enabled_mask & (1u << slot)) {
464 pipe_resource_reference(&images->views[slot].resource, NULL);
465 images->compressed_colortex_mask &= ~(1 << slot);
466
467 memcpy(images->desc.list + slot*8, null_image_descriptor, 8*4);
468 images->desc.enabled_mask &= ~(1u << slot);
469 images->desc.dirty_mask |= 1u << slot;
470 }
471 }
472
473 static void
474 si_set_shader_images(struct pipe_context *pipe, unsigned shader,
475 unsigned start_slot, unsigned count,
476 struct pipe_image_view *views)
477 {
478 struct si_context *ctx = (struct si_context *)pipe;
479 struct si_screen *screen = ctx->screen;
480 struct si_images_info *images = &ctx->images[shader];
481 unsigned i, slot;
482
483 assert(shader < SI_NUM_SHADERS);
484
485 if (!count)
486 return;
487
488 assert(start_slot + count <= SI_NUM_IMAGES);
489
490 for (i = 0, slot = start_slot; i < count; ++i, ++slot) {
491 struct r600_resource *res;
492
493 if (!views || !views[i].resource) {
494 si_disable_shader_image(images, slot);
495 continue;
496 }
497
498 res = (struct r600_resource *)views[i].resource;
499 util_copy_image_view(&images->views[slot], &views[i]);
500
501 si_sampler_view_add_buffer(ctx, &res->b.b,
502 RADEON_USAGE_READWRITE);
503
504 if (res->b.b.target == PIPE_BUFFER) {
505 si_make_buffer_descriptor(screen, res,
506 views[i].format,
507 views[i].u.buf.first_element,
508 views[i].u.buf.last_element,
509 images->desc.list + slot * 8);
510 images->compressed_colortex_mask &= ~(1 << slot);
511 } else {
512 static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
513 struct r600_texture *tex = (struct r600_texture *)res;
514 unsigned level;
515 unsigned width, height, depth;
516
517 assert(!tex->is_depth);
518 assert(tex->fmask.size == 0);
519
520 if (tex->dcc_offset &&
521 views[i].access & PIPE_IMAGE_ACCESS_WRITE)
522 r600_texture_disable_dcc(&screen->b, tex);
523
524 if (is_compressed_colortex(tex)) {
525 images->compressed_colortex_mask |= 1 << slot;
526 } else {
527 images->compressed_colortex_mask &= ~(1 << slot);
528 }
529
530 /* Always force the base level to the selected level.
531 *
532 * This is required for 3D textures, where otherwise
533 * selecting a single slice for non-layered bindings
534 * fails. It doesn't hurt the other targets.
535 */
536 level = views[i].u.tex.level;
537 width = u_minify(res->b.b.width0, level);
538 height = u_minify(res->b.b.height0, level);
539 depth = u_minify(res->b.b.depth0, level);
540
541 si_make_texture_descriptor(screen, tex, false, res->b.b.target,
542 views[i].format, swizzle,
543 level, 0, 0,
544 views[i].u.tex.first_layer, views[i].u.tex.last_layer,
545 width, height, depth,
546 images->desc.list + slot * 8,
547 NULL);
548 }
549
550 images->desc.enabled_mask |= 1u << slot;
551 images->desc.dirty_mask |= 1u << slot;
552 }
553 }
554
555 static void
556 si_images_update_compressed_colortex_mask(struct si_images_info *images)
557 {
558 unsigned mask = images->desc.enabled_mask;
559
560 while (mask) {
561 int i = u_bit_scan(&mask);
562 struct pipe_resource *res = images->views[i].resource;
563
564 if (res && res->target != PIPE_BUFFER) {
565 struct r600_texture *rtex = (struct r600_texture *)res;
566
567 if (is_compressed_colortex(rtex)) {
568 images->compressed_colortex_mask |= 1 << i;
569 } else {
570 images->compressed_colortex_mask &= ~(1 << i);
571 }
572 }
573 }
574 }
575
576 /* SAMPLER STATES */
577
578 static void si_bind_sampler_states(struct pipe_context *ctx, unsigned shader,
579 unsigned start, unsigned count, void **states)
580 {
581 struct si_context *sctx = (struct si_context *)ctx;
582 struct si_textures_info *samplers = &sctx->samplers[shader];
583 struct si_descriptors *desc = &samplers->views.desc;
584 struct si_sampler_state **sstates = (struct si_sampler_state**)states;
585 int i;
586
587 if (!count || shader >= SI_NUM_SHADERS)
588 return;
589
590 for (i = 0; i < count; i++) {
591 unsigned slot = start + i;
592
593 if (!sstates[i] ||
594 sstates[i] == samplers->views.sampler_states[slot])
595 continue;
596
597 samplers->views.sampler_states[slot] = sstates[i];
598
599 /* If FMASK is bound, don't overwrite it.
600 * The sampler state will be set after FMASK is unbound.
601 */
602 if (samplers->views.views[i] &&
603 samplers->views.views[i]->texture &&
604 samplers->views.views[i]->texture->target != PIPE_BUFFER &&
605 ((struct r600_texture*)samplers->views.views[i]->texture)->fmask.size)
606 continue;
607
608 memcpy(desc->list + slot * 16 + 12, sstates[i]->val, 4*4);
609 desc->dirty_mask |= 1u << slot;
610 }
611 }
612
613 /* BUFFER RESOURCES */
614
615 static void si_init_buffer_resources(struct si_buffer_resources *buffers,
616 unsigned num_buffers,
617 unsigned shader_userdata_index,
618 enum radeon_bo_usage shader_usage,
619 enum radeon_bo_priority priority,
620 unsigned *ce_offset)
621 {
622 buffers->shader_usage = shader_usage;
623 buffers->priority = priority;
624 buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
625
626 si_init_descriptors(&buffers->desc, shader_userdata_index, 4,
627 num_buffers, NULL, ce_offset);
628 }
629
630 static void si_release_buffer_resources(struct si_buffer_resources *buffers)
631 {
632 int i;
633
634 for (i = 0; i < buffers->desc.num_elements; i++) {
635 pipe_resource_reference(&buffers->buffers[i], NULL);
636 }
637
638 FREE(buffers->buffers);
639 si_release_descriptors(&buffers->desc);
640 }
641
642 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
643 struct si_buffer_resources *buffers)
644 {
645 unsigned mask = buffers->desc.enabled_mask;
646
647 /* Add buffers to the CS. */
648 while (mask) {
649 int i = u_bit_scan(&mask);
650
651 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
652 (struct r600_resource*)buffers->buffers[i],
653 buffers->shader_usage, buffers->priority);
654 }
655
656 buffers->desc.ce_ram_dirty = true;
657
658 if (!buffers->desc.buffer)
659 return;
660 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
661 buffers->desc.buffer, RADEON_USAGE_READWRITE,
662 RADEON_PRIO_DESCRIPTORS);
663 }
664
665 /* VERTEX BUFFERS */
666
667 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
668 {
669 struct si_descriptors *desc = &sctx->vertex_buffers;
670 int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
671 int i;
672
673 for (i = 0; i < count; i++) {
674 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
675
676 if (vb >= Elements(sctx->vertex_buffer))
677 continue;
678 if (!sctx->vertex_buffer[vb].buffer)
679 continue;
680
681 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
682 (struct r600_resource*)sctx->vertex_buffer[vb].buffer,
683 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
684 }
685
686 if (!desc->buffer)
687 return;
688 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
689 desc->buffer, RADEON_USAGE_READ,
690 RADEON_PRIO_DESCRIPTORS);
691 }
692
693 static bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
694 {
695 struct si_descriptors *desc = &sctx->vertex_buffers;
696 bool bound[SI_NUM_VERTEX_BUFFERS] = {};
697 unsigned i, count = sctx->vertex_elements->count;
698 uint64_t va;
699 uint32_t *ptr;
700
701 if (!sctx->vertex_buffers_dirty)
702 return true;
703 if (!count || !sctx->vertex_elements)
704 return true;
705
706 /* Vertex buffer descriptors are the only ones which are uploaded
707 * directly through a staging buffer and don't go through
708 * the fine-grained upload path.
709 */
710 u_upload_alloc(sctx->b.uploader, 0, count * 16, 256, &desc->buffer_offset,
711 (struct pipe_resource**)&desc->buffer, (void**)&ptr);
712 if (!desc->buffer)
713 return false;
714
715 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
716 desc->buffer, RADEON_USAGE_READ,
717 RADEON_PRIO_DESCRIPTORS);
718
719 assert(count <= SI_NUM_VERTEX_BUFFERS);
720
721 for (i = 0; i < count; i++) {
722 struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
723 struct pipe_vertex_buffer *vb;
724 struct r600_resource *rbuffer;
725 unsigned offset;
726 uint32_t *desc = &ptr[i*4];
727
728 if (ve->vertex_buffer_index >= Elements(sctx->vertex_buffer)) {
729 memset(desc, 0, 16);
730 continue;
731 }
732
733 vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
734 rbuffer = (struct r600_resource*)vb->buffer;
735 if (!rbuffer) {
736 memset(desc, 0, 16);
737 continue;
738 }
739
740 offset = vb->buffer_offset + ve->src_offset;
741 va = rbuffer->gpu_address + offset;
742
743 /* Fill in T# buffer resource description */
744 desc[0] = va;
745 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
746 S_008F04_STRIDE(vb->stride);
747
748 if (sctx->b.chip_class <= CIK && vb->stride)
749 /* Round up by rounding down and adding 1 */
750 desc[2] = (vb->buffer->width0 - offset -
751 sctx->vertex_elements->format_size[i]) /
752 vb->stride + 1;
753 else
754 desc[2] = vb->buffer->width0 - offset;
755
756 desc[3] = sctx->vertex_elements->rsrc_word3[i];
757
758 if (!bound[ve->vertex_buffer_index]) {
759 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
760 (struct r600_resource*)vb->buffer,
761 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
762 bound[ve->vertex_buffer_index] = true;
763 }
764 }
765
766 /* Don't flush the const cache. It would have a very negative effect
767 * on performance (confirmed by testing). New descriptors are always
768 * uploaded to a fresh new buffer, so I don't think flushing the const
769 * cache is needed. */
770 desc->pointer_dirty = true;
771 si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
772 sctx->vertex_buffers_dirty = false;
773 return true;
774 }
775
776
777 /* CONSTANT BUFFERS */
778
779 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
780 const uint8_t *ptr, unsigned size, uint32_t *const_offset)
781 {
782 void *tmp;
783
784 u_upload_alloc(sctx->b.uploader, 0, size, 256, const_offset,
785 (struct pipe_resource**)rbuffer, &tmp);
786 if (*rbuffer)
787 util_memcpy_cpu_to_le32(tmp, ptr, size);
788 }
789
790 void si_set_constant_buffer(struct si_context *sctx,
791 struct si_buffer_resources *buffers,
792 uint slot, struct pipe_constant_buffer *input)
793 {
794 assert(slot < buffers->desc.num_elements);
795 pipe_resource_reference(&buffers->buffers[slot], NULL);
796
797 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
798 * with a NULL buffer). We need to use a dummy buffer instead. */
799 if (sctx->b.chip_class == CIK &&
800 (!input || (!input->buffer && !input->user_buffer)))
801 input = &sctx->null_const_buf;
802
803 if (input && (input->buffer || input->user_buffer)) {
804 struct pipe_resource *buffer = NULL;
805 uint64_t va;
806
807 /* Upload the user buffer if needed. */
808 if (input->user_buffer) {
809 unsigned buffer_offset;
810
811 si_upload_const_buffer(sctx,
812 (struct r600_resource**)&buffer, input->user_buffer,
813 input->buffer_size, &buffer_offset);
814 if (!buffer) {
815 /* Just unbind on failure. */
816 si_set_constant_buffer(sctx, buffers, slot, NULL);
817 return;
818 }
819 va = r600_resource(buffer)->gpu_address + buffer_offset;
820 } else {
821 pipe_resource_reference(&buffer, input->buffer);
822 va = r600_resource(buffer)->gpu_address + input->buffer_offset;
823 }
824
825 /* Set the descriptor. */
826 uint32_t *desc = buffers->desc.list + slot*4;
827 desc[0] = va;
828 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
829 S_008F04_STRIDE(0);
830 desc[2] = input->buffer_size;
831 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
832 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
833 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
834 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
835 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
836 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
837
838 buffers->buffers[slot] = buffer;
839 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
840 (struct r600_resource*)buffer,
841 buffers->shader_usage, buffers->priority);
842 buffers->desc.enabled_mask |= 1u << slot;
843 } else {
844 /* Clear the descriptor. */
845 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
846 buffers->desc.enabled_mask &= ~(1u << slot);
847 }
848
849 buffers->desc.dirty_mask |= 1u << slot;
850 }
851
852 static void si_pipe_set_constant_buffer(struct pipe_context *ctx,
853 uint shader, uint slot,
854 struct pipe_constant_buffer *input)
855 {
856 struct si_context *sctx = (struct si_context *)ctx;
857
858 if (shader >= SI_NUM_SHADERS)
859 return;
860
861 si_set_constant_buffer(sctx, &sctx->const_buffers[shader], slot, input);
862 }
863
864 /* SHADER BUFFERS */
865
866 static void si_set_shader_buffers(struct pipe_context *ctx, unsigned shader,
867 unsigned start_slot, unsigned count,
868 struct pipe_shader_buffer *sbuffers)
869 {
870 struct si_context *sctx = (struct si_context *)ctx;
871 struct si_buffer_resources *buffers = &sctx->shader_buffers[shader];
872 unsigned i;
873
874 assert(start_slot + count <= SI_NUM_SHADER_BUFFERS);
875
876 for (i = 0; i < count; ++i) {
877 struct pipe_shader_buffer *sbuffer = sbuffers ? &sbuffers[i] : NULL;
878 struct r600_resource *buf;
879 unsigned slot = start_slot + i;
880 uint32_t *desc = buffers->desc.list + slot * 4;
881 uint64_t va;
882
883 if (!sbuffer || !sbuffer->buffer) {
884 pipe_resource_reference(&buffers->buffers[slot], NULL);
885 memset(desc, 0, sizeof(uint32_t) * 4);
886 buffers->desc.enabled_mask &= ~(1u << slot);
887 continue;
888 }
889
890 buf = (struct r600_resource *)sbuffer->buffer;
891 va = buf->gpu_address + sbuffer->buffer_offset;
892
893 desc[0] = va;
894 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
895 S_008F04_STRIDE(0);
896 desc[2] = sbuffer->buffer_size;
897 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
898 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
899 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
900 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
901 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
902 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
903
904 pipe_resource_reference(&buffers->buffers[slot], &buf->b.b);
905 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buf,
906 buffers->shader_usage, buffers->priority);
907 buffers->desc.enabled_mask |= 1u << slot;
908 buffers->desc.dirty_mask |= 1u << slot;
909 }
910 }
911
912 /* RING BUFFERS */
913
914 void si_set_ring_buffer(struct pipe_context *ctx, uint slot,
915 struct pipe_resource *buffer,
916 unsigned stride, unsigned num_records,
917 bool add_tid, bool swizzle,
918 unsigned element_size, unsigned index_stride, uint64_t offset)
919 {
920 struct si_context *sctx = (struct si_context *)ctx;
921 struct si_buffer_resources *buffers = &sctx->rw_buffers;
922
923 /* The stride field in the resource descriptor has 14 bits */
924 assert(stride < (1 << 14));
925
926 assert(slot < buffers->desc.num_elements);
927 pipe_resource_reference(&buffers->buffers[slot], NULL);
928
929 if (buffer) {
930 uint64_t va;
931
932 va = r600_resource(buffer)->gpu_address + offset;
933
934 switch (element_size) {
935 default:
936 assert(!"Unsupported ring buffer element size");
937 case 0:
938 case 2:
939 element_size = 0;
940 break;
941 case 4:
942 element_size = 1;
943 break;
944 case 8:
945 element_size = 2;
946 break;
947 case 16:
948 element_size = 3;
949 break;
950 }
951
952 switch (index_stride) {
953 default:
954 assert(!"Unsupported ring buffer index stride");
955 case 0:
956 case 8:
957 index_stride = 0;
958 break;
959 case 16:
960 index_stride = 1;
961 break;
962 case 32:
963 index_stride = 2;
964 break;
965 case 64:
966 index_stride = 3;
967 break;
968 }
969
970 if (sctx->b.chip_class >= VI && stride)
971 num_records *= stride;
972
973 /* Set the descriptor. */
974 uint32_t *desc = buffers->desc.list + slot*4;
975 desc[0] = va;
976 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
977 S_008F04_STRIDE(stride) |
978 S_008F04_SWIZZLE_ENABLE(swizzle);
979 desc[2] = num_records;
980 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
981 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
982 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
983 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
984 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
985 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
986 S_008F0C_ELEMENT_SIZE(element_size) |
987 S_008F0C_INDEX_STRIDE(index_stride) |
988 S_008F0C_ADD_TID_ENABLE(add_tid);
989
990 pipe_resource_reference(&buffers->buffers[slot], buffer);
991 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
992 (struct r600_resource*)buffer,
993 buffers->shader_usage, buffers->priority);
994 buffers->desc.enabled_mask |= 1u << slot;
995 } else {
996 /* Clear the descriptor. */
997 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
998 buffers->desc.enabled_mask &= ~(1u << slot);
999 }
1000
1001 buffers->desc.dirty_mask |= 1u << slot;
1002 }
1003
1004 /* STREAMOUT BUFFERS */
1005
1006 static void si_set_streamout_targets(struct pipe_context *ctx,
1007 unsigned num_targets,
1008 struct pipe_stream_output_target **targets,
1009 const unsigned *offsets)
1010 {
1011 struct si_context *sctx = (struct si_context *)ctx;
1012 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1013 unsigned old_num_targets = sctx->b.streamout.num_targets;
1014 unsigned i, bufidx;
1015
1016 /* We are going to unbind the buffers. Mark which caches need to be flushed. */
1017 if (sctx->b.streamout.num_targets && sctx->b.streamout.begin_emitted) {
1018 /* Since streamout uses vector writes which go through TC L2
1019 * and most other clients can use TC L2 as well, we don't need
1020 * to flush it.
1021 *
1022 * The only case which requires flushing it is VGT DMA index
1023 * fetching, which is a rare case. Thus, flag the TC L2
1024 * dirtiness in the resource and handle it when index fetching
1025 * is used.
1026 */
1027 for (i = 0; i < sctx->b.streamout.num_targets; i++)
1028 if (sctx->b.streamout.targets[i])
1029 r600_resource(sctx->b.streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
1030
1031 /* Invalidate the scalar cache in case a streamout buffer is
1032 * going to be used as a constant buffer.
1033 *
1034 * Invalidate TC L1, because streamout bypasses it (done by
1035 * setting GLC=1 in the store instruction), but it can contain
1036 * outdated data of streamout buffers.
1037 *
1038 * VS_PARTIAL_FLUSH is required if the buffers are going to be
1039 * used as an input immediately.
1040 */
1041 sctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
1042 SI_CONTEXT_INV_VMEM_L1 |
1043 SI_CONTEXT_VS_PARTIAL_FLUSH;
1044 }
1045
1046 /* All readers of the streamout targets need to be finished before we can
1047 * start writing to the targets.
1048 */
1049 if (num_targets)
1050 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
1051 SI_CONTEXT_CS_PARTIAL_FLUSH;
1052
1053 /* Streamout buffers must be bound in 2 places:
1054 * 1) in VGT by setting the VGT_STRMOUT registers
1055 * 2) as shader resources
1056 */
1057
1058 /* Set the VGT regs. */
1059 r600_set_streamout_targets(ctx, num_targets, targets, offsets);
1060
1061 /* Set the shader resources.*/
1062 for (i = 0; i < num_targets; i++) {
1063 bufidx = SI_VS_STREAMOUT_BUF0 + i;
1064
1065 if (targets[i]) {
1066 struct pipe_resource *buffer = targets[i]->buffer;
1067 uint64_t va = r600_resource(buffer)->gpu_address;
1068
1069 /* Set the descriptor.
1070 *
1071 * On VI, the format must be non-INVALID, otherwise
1072 * the buffer will be considered not bound and store
1073 * instructions will be no-ops.
1074 */
1075 uint32_t *desc = buffers->desc.list + bufidx*4;
1076 desc[0] = va;
1077 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
1078 desc[2] = 0xffffffff;
1079 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1080 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1081 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1082 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1083 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1084
1085 /* Set the resource. */
1086 pipe_resource_reference(&buffers->buffers[bufidx],
1087 buffer);
1088 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1089 (struct r600_resource*)buffer,
1090 buffers->shader_usage, buffers->priority);
1091 buffers->desc.enabled_mask |= 1u << bufidx;
1092 } else {
1093 /* Clear the descriptor and unset the resource. */
1094 memset(buffers->desc.list + bufidx*4, 0,
1095 sizeof(uint32_t) * 4);
1096 pipe_resource_reference(&buffers->buffers[bufidx],
1097 NULL);
1098 buffers->desc.enabled_mask &= ~(1u << bufidx);
1099 }
1100 buffers->desc.dirty_mask |= 1u << bufidx;
1101 }
1102 for (; i < old_num_targets; i++) {
1103 bufidx = SI_VS_STREAMOUT_BUF0 + i;
1104 /* Clear the descriptor and unset the resource. */
1105 memset(buffers->desc.list + bufidx*4, 0, sizeof(uint32_t) * 4);
1106 pipe_resource_reference(&buffers->buffers[bufidx], NULL);
1107 buffers->desc.enabled_mask &= ~(1u << bufidx);
1108 buffers->desc.dirty_mask |= 1u << bufidx;
1109 }
1110 }
1111
1112 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
1113 uint32_t *desc, uint64_t old_buf_va,
1114 struct pipe_resource *new_buf)
1115 {
1116 /* Retrieve the buffer offset from the descriptor. */
1117 uint64_t old_desc_va =
1118 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
1119
1120 assert(old_buf_va <= old_desc_va);
1121 uint64_t offset_within_buffer = old_desc_va - old_buf_va;
1122
1123 /* Update the descriptor. */
1124 uint64_t va = r600_resource(new_buf)->gpu_address + offset_within_buffer;
1125
1126 desc[0] = va;
1127 desc[1] = (desc[1] & C_008F04_BASE_ADDRESS_HI) |
1128 S_008F04_BASE_ADDRESS_HI(va >> 32);
1129 }
1130
1131 /* INTERNAL CONST BUFFERS */
1132
1133 static void si_set_polygon_stipple(struct pipe_context *ctx,
1134 const struct pipe_poly_stipple *state)
1135 {
1136 struct si_context *sctx = (struct si_context *)ctx;
1137 struct pipe_constant_buffer cb = {};
1138 unsigned stipple[32];
1139 int i;
1140
1141 for (i = 0; i < 32; i++)
1142 stipple[i] = util_bitreverse(state->stipple[i]);
1143
1144 cb.user_buffer = stipple;
1145 cb.buffer_size = sizeof(stipple);
1146
1147 si_set_constant_buffer(sctx, &sctx->rw_buffers,
1148 SI_PS_CONST_POLY_STIPPLE, &cb);
1149 }
1150
1151 /* TEXTURE METADATA ENABLE/DISABLE */
1152
1153 /* CMASK can be enabled (for fast clear) and disabled (for texture export)
1154 * while the texture is bound, possibly by a different context. In that case,
1155 * call this function to update compressed_colortex_masks.
1156 */
1157 void si_update_compressed_colortex_masks(struct si_context *sctx)
1158 {
1159 for (int i = 0; i < SI_NUM_SHADERS; ++i) {
1160 si_samplers_update_compressed_colortex_mask(&sctx->samplers[i]);
1161 si_images_update_compressed_colortex_mask(&sctx->images[i]);
1162 }
1163 }
1164
1165 /* BUFFER DISCARD/INVALIDATION */
1166
1167 /** Reset descriptors of buffer resources after \p buf has been invalidated. */
1168 static void si_reset_buffer_resources(struct si_context *sctx,
1169 struct si_buffer_resources *buffers,
1170 struct pipe_resource *buf,
1171 uint64_t old_va)
1172 {
1173 unsigned mask = buffers->desc.enabled_mask;
1174
1175 while (mask) {
1176 unsigned i = u_bit_scan(&mask);
1177 if (buffers->buffers[i] == buf) {
1178 si_desc_reset_buffer_offset(&sctx->b.b,
1179 buffers->desc.list + i*4,
1180 old_va, buf);
1181 buffers->desc.dirty_mask |= 1u << i;
1182
1183 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1184 (struct r600_resource *)buf,
1185 buffers->shader_usage,
1186 buffers->priority);
1187 }
1188 }
1189 }
1190
1191 /* Reallocate a buffer a update all resource bindings where the buffer is
1192 * bound.
1193 *
1194 * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
1195 * idle by discarding its contents. Apps usually tell us when to do this using
1196 * map_buffer flags, for example.
1197 */
1198 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
1199 {
1200 struct si_context *sctx = (struct si_context*)ctx;
1201 struct r600_resource *rbuffer = r600_resource(buf);
1202 unsigned i, shader, alignment = rbuffer->buf->alignment;
1203 uint64_t old_va = rbuffer->gpu_address;
1204 unsigned num_elems = sctx->vertex_elements ?
1205 sctx->vertex_elements->count : 0;
1206 struct si_sampler_view *view;
1207
1208 /* Reallocate the buffer in the same pipe_resource. */
1209 r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0,
1210 alignment);
1211
1212 /* We changed the buffer, now we need to bind it where the old one
1213 * was bound. This consists of 2 things:
1214 * 1) Updating the resource descriptor and dirtying it.
1215 * 2) Adding a relocation to the CS, so that it's usable.
1216 */
1217
1218 /* Vertex buffers. */
1219 for (i = 0; i < num_elems; i++) {
1220 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
1221
1222 if (vb >= Elements(sctx->vertex_buffer))
1223 continue;
1224 if (!sctx->vertex_buffer[vb].buffer)
1225 continue;
1226
1227 if (sctx->vertex_buffer[vb].buffer == buf) {
1228 sctx->vertex_buffers_dirty = true;
1229 break;
1230 }
1231 }
1232
1233 /* Streamout buffers. (other internal buffers can't be invalidated) */
1234 for (i = SI_VS_STREAMOUT_BUF0; i <= SI_VS_STREAMOUT_BUF3; i++) {
1235 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1236
1237 if (buffers->buffers[i] != buf)
1238 continue;
1239
1240 si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
1241 old_va, buf);
1242 buffers->desc.dirty_mask |= 1u << i;
1243
1244 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1245 rbuffer, buffers->shader_usage,
1246 buffers->priority);
1247
1248 /* Update the streamout state. */
1249 if (sctx->b.streamout.begin_emitted)
1250 r600_emit_streamout_end(&sctx->b);
1251 sctx->b.streamout.append_bitmask =
1252 sctx->b.streamout.enabled_mask;
1253 r600_streamout_buffers_dirty(&sctx->b);
1254 }
1255
1256 /* Constant and shader buffers. */
1257 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1258 si_reset_buffer_resources(sctx, &sctx->const_buffers[shader],
1259 buf, old_va);
1260 si_reset_buffer_resources(sctx, &sctx->shader_buffers[shader],
1261 buf, old_va);
1262 }
1263
1264 /* Texture buffers - update virtual addresses in sampler view descriptors. */
1265 LIST_FOR_EACH_ENTRY(view, &sctx->b.texture_buffers, list) {
1266 if (view->base.texture == buf) {
1267 si_desc_reset_buffer_offset(ctx, &view->state[4], old_va, buf);
1268 }
1269 }
1270 /* Texture buffers - update bindings. */
1271 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1272 struct si_sampler_views *views = &sctx->samplers[shader].views;
1273 unsigned mask = views->desc.enabled_mask;
1274
1275 while (mask) {
1276 unsigned i = u_bit_scan(&mask);
1277 if (views->views[i]->texture == buf) {
1278 si_desc_reset_buffer_offset(ctx,
1279 views->desc.list +
1280 i * 16 + 4,
1281 old_va, buf);
1282 views->desc.dirty_mask |= 1u << i;
1283
1284 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1285 rbuffer, RADEON_USAGE_READ,
1286 RADEON_PRIO_SAMPLER_BUFFER);
1287 }
1288 }
1289 }
1290
1291 /* Shader images */
1292 for (shader = 0; shader < SI_NUM_SHADERS; ++shader) {
1293 struct si_images_info *images = &sctx->images[shader];
1294 unsigned mask = images->desc.enabled_mask;
1295
1296 while (mask) {
1297 unsigned i = u_bit_scan(&mask);
1298
1299 if (images->views[i].resource == buf) {
1300 si_desc_reset_buffer_offset(
1301 ctx, images->desc.list + i * 8 + 4,
1302 old_va, buf);
1303 images->desc.dirty_mask |= 1u << i;
1304
1305 radeon_add_to_buffer_list(
1306 &sctx->b, &sctx->b.gfx, rbuffer,
1307 RADEON_USAGE_READWRITE,
1308 RADEON_PRIO_SAMPLER_BUFFER);
1309 }
1310 }
1311 }
1312 }
1313
1314 /* SHADER USER DATA */
1315
1316 static void si_mark_shader_pointers_dirty(struct si_context *sctx,
1317 unsigned shader)
1318 {
1319 sctx->const_buffers[shader].desc.pointer_dirty = true;
1320 sctx->shader_buffers[shader].desc.pointer_dirty = true;
1321 sctx->samplers[shader].views.desc.pointer_dirty = true;
1322 sctx->images[shader].desc.pointer_dirty = true;
1323
1324 if (shader == PIPE_SHADER_VERTEX)
1325 sctx->vertex_buffers.pointer_dirty = true;
1326
1327 si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
1328 }
1329
1330 static void si_shader_userdata_begin_new_cs(struct si_context *sctx)
1331 {
1332 int i;
1333
1334 for (i = 0; i < SI_NUM_SHADERS; i++) {
1335 si_mark_shader_pointers_dirty(sctx, i);
1336 }
1337 sctx->rw_buffers.desc.pointer_dirty = true;
1338 }
1339
1340 /* Set a base register address for user data constants in the given shader.
1341 * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
1342 */
1343 static void si_set_user_data_base(struct si_context *sctx,
1344 unsigned shader, uint32_t new_base)
1345 {
1346 uint32_t *base = &sctx->shader_userdata.sh_base[shader];
1347
1348 if (*base != new_base) {
1349 *base = new_base;
1350
1351 if (new_base)
1352 si_mark_shader_pointers_dirty(sctx, shader);
1353 }
1354 }
1355
1356 /* This must be called when these shaders are changed from non-NULL to NULL
1357 * and vice versa:
1358 * - geometry shader
1359 * - tessellation control shader
1360 * - tessellation evaluation shader
1361 */
1362 void si_shader_change_notify(struct si_context *sctx)
1363 {
1364 /* VS can be bound as VS, ES, or LS. */
1365 if (sctx->tes_shader.cso)
1366 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1367 R_00B530_SPI_SHADER_USER_DATA_LS_0);
1368 else if (sctx->gs_shader.cso)
1369 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1370 R_00B330_SPI_SHADER_USER_DATA_ES_0);
1371 else
1372 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1373 R_00B130_SPI_SHADER_USER_DATA_VS_0);
1374
1375 /* TES can be bound as ES, VS, or not bound. */
1376 if (sctx->tes_shader.cso) {
1377 if (sctx->gs_shader.cso)
1378 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1379 R_00B330_SPI_SHADER_USER_DATA_ES_0);
1380 else
1381 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1382 R_00B130_SPI_SHADER_USER_DATA_VS_0);
1383 } else {
1384 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL, 0);
1385 }
1386 }
1387
1388 static void si_emit_shader_pointer(struct si_context *sctx,
1389 struct si_descriptors *desc,
1390 unsigned sh_base, bool keep_dirty)
1391 {
1392 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
1393 uint64_t va;
1394
1395 if (!desc->pointer_dirty || !desc->buffer)
1396 return;
1397
1398 va = desc->buffer->gpu_address +
1399 desc->buffer_offset;
1400
1401 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
1402 radeon_emit(cs, (sh_base + desc->shader_userdata_offset - SI_SH_REG_OFFSET) >> 2);
1403 radeon_emit(cs, va);
1404 radeon_emit(cs, va >> 32);
1405
1406 desc->pointer_dirty = keep_dirty;
1407 }
1408
1409 void si_emit_graphics_shader_userdata(struct si_context *sctx,
1410 struct r600_atom *atom)
1411 {
1412 unsigned i;
1413 uint32_t *sh_base = sctx->shader_userdata.sh_base;
1414
1415 if (sctx->rw_buffers.desc.pointer_dirty) {
1416 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1417 R_00B030_SPI_SHADER_USER_DATA_PS_0, true);
1418 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1419 R_00B130_SPI_SHADER_USER_DATA_VS_0, true);
1420 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1421 R_00B230_SPI_SHADER_USER_DATA_GS_0, true);
1422 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1423 R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
1424 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1425 R_00B430_SPI_SHADER_USER_DATA_HS_0, true);
1426 sctx->rw_buffers.desc.pointer_dirty = false;
1427 }
1428
1429 for (i = 0; i < SI_NUM_GRAPHICS_SHADERS; i++) {
1430 unsigned base = sh_base[i];
1431
1432 if (!base)
1433 continue;
1434
1435 si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, base, false);
1436 si_emit_shader_pointer(sctx, &sctx->shader_buffers[i].desc, base, false);
1437 si_emit_shader_pointer(sctx, &sctx->samplers[i].views.desc, base, false);
1438 si_emit_shader_pointer(sctx, &sctx->images[i].desc, base, false);
1439 }
1440 si_emit_shader_pointer(sctx, &sctx->vertex_buffers, sh_base[PIPE_SHADER_VERTEX], false);
1441 }
1442
1443 void si_emit_compute_shader_userdata(struct si_context *sctx)
1444 {
1445 unsigned base = R_00B900_COMPUTE_USER_DATA_0;
1446
1447 si_emit_shader_pointer(sctx, &sctx->const_buffers[PIPE_SHADER_COMPUTE].desc,
1448 base, false);
1449 si_emit_shader_pointer(sctx, &sctx->shader_buffers[PIPE_SHADER_COMPUTE].desc,
1450 base, false);
1451 si_emit_shader_pointer(sctx, &sctx->samplers[PIPE_SHADER_COMPUTE].views.desc,
1452 base, false);
1453 si_emit_shader_pointer(sctx, &sctx->images[PIPE_SHADER_COMPUTE].desc,
1454 base, false);
1455 }
1456
1457 /* INIT/DEINIT/UPLOAD */
1458
1459 void si_init_all_descriptors(struct si_context *sctx)
1460 {
1461 int i;
1462 unsigned ce_offset = 0;
1463
1464 for (i = 0; i < SI_NUM_SHADERS; i++) {
1465 si_init_buffer_resources(&sctx->const_buffers[i],
1466 SI_NUM_CONST_BUFFERS, SI_SGPR_CONST_BUFFERS,
1467 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER,
1468 &ce_offset);
1469 si_init_buffer_resources(&sctx->shader_buffers[i],
1470 SI_NUM_SHADER_BUFFERS, SI_SGPR_SHADER_BUFFERS,
1471 RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RW_BUFFER,
1472 &ce_offset);
1473
1474 si_init_descriptors(&sctx->samplers[i].views.desc,
1475 SI_SGPR_SAMPLERS, 16, SI_NUM_SAMPLERS,
1476 null_texture_descriptor, &ce_offset);
1477
1478 si_init_descriptors(&sctx->images[i].desc,
1479 SI_SGPR_IMAGES, 8, SI_NUM_IMAGES,
1480 null_image_descriptor, &ce_offset);
1481 }
1482
1483 si_init_buffer_resources(&sctx->rw_buffers,
1484 SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
1485 RADEON_USAGE_READWRITE, RADEON_PRIO_RINGS_STREAMOUT,
1486 &ce_offset);
1487 si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
1488 4, SI_NUM_VERTEX_BUFFERS, NULL, NULL);
1489
1490 assert(ce_offset <= 32768);
1491
1492 /* Set pipe_context functions. */
1493 sctx->b.b.bind_sampler_states = si_bind_sampler_states;
1494 sctx->b.b.set_shader_images = si_set_shader_images;
1495 sctx->b.b.set_constant_buffer = si_pipe_set_constant_buffer;
1496 sctx->b.b.set_polygon_stipple = si_set_polygon_stipple;
1497 sctx->b.b.set_shader_buffers = si_set_shader_buffers;
1498 sctx->b.b.set_sampler_views = si_set_sampler_views;
1499 sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
1500 sctx->b.invalidate_buffer = si_invalidate_buffer;
1501
1502 /* Shader user data. */
1503 si_init_atom(sctx, &sctx->shader_userdata.atom, &sctx->atoms.s.shader_userdata,
1504 si_emit_graphics_shader_userdata);
1505
1506 /* Set default and immutable mappings. */
1507 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
1508 si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL, R_00B430_SPI_SHADER_USER_DATA_HS_0);
1509 si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY, R_00B230_SPI_SHADER_USER_DATA_GS_0);
1510 si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
1511 }
1512
1513 bool si_upload_graphics_shader_descriptors(struct si_context *sctx)
1514 {
1515 int i;
1516
1517 for (i = 0; i < SI_NUM_SHADERS; i++) {
1518 if (!si_upload_descriptors(sctx, &sctx->const_buffers[i].desc,
1519 &sctx->shader_userdata.atom) ||
1520 !si_upload_descriptors(sctx, &sctx->shader_buffers[i].desc,
1521 &sctx->shader_userdata.atom) ||
1522 !si_upload_descriptors(sctx, &sctx->samplers[i].views.desc,
1523 &sctx->shader_userdata.atom) ||
1524 !si_upload_descriptors(sctx, &sctx->images[i].desc,
1525 &sctx->shader_userdata.atom))
1526 return false;
1527 }
1528 return si_upload_descriptors(sctx, &sctx->rw_buffers.desc,
1529 &sctx->shader_userdata.atom) &&
1530 si_upload_vertex_buffer_descriptors(sctx);
1531 }
1532
1533 bool si_upload_compute_shader_descriptors(struct si_context *sctx)
1534 {
1535 /* Does not update rw_buffers as that is not needed for compute shaders
1536 * and the input buffer is using the same SGPR's anyway.
1537 */
1538 return si_upload_descriptors(sctx,
1539 &sctx->const_buffers[PIPE_SHADER_COMPUTE].desc, NULL) &&
1540 si_upload_descriptors(sctx,
1541 &sctx->shader_buffers[PIPE_SHADER_COMPUTE].desc, NULL) &&
1542 si_upload_descriptors(sctx,
1543 &sctx->samplers[PIPE_SHADER_COMPUTE].views.desc, NULL) &&
1544 si_upload_descriptors(sctx,
1545 &sctx->images[PIPE_SHADER_COMPUTE].desc, NULL);
1546 }
1547
1548 void si_release_all_descriptors(struct si_context *sctx)
1549 {
1550 int i;
1551
1552 for (i = 0; i < SI_NUM_SHADERS; i++) {
1553 si_release_buffer_resources(&sctx->const_buffers[i]);
1554 si_release_buffer_resources(&sctx->shader_buffers[i]);
1555 si_release_sampler_views(&sctx->samplers[i].views);
1556 si_release_image_views(&sctx->images[i]);
1557 }
1558 si_release_buffer_resources(&sctx->rw_buffers);
1559 si_release_descriptors(&sctx->vertex_buffers);
1560 }
1561
1562 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
1563 {
1564 int i;
1565
1566 for (i = 0; i < SI_NUM_SHADERS; i++) {
1567 si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
1568 si_buffer_resources_begin_new_cs(sctx, &sctx->shader_buffers[i]);
1569 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
1570 si_image_views_begin_new_cs(sctx, &sctx->images[i]);
1571 }
1572 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers);
1573 si_vertex_buffers_begin_new_cs(sctx);
1574 si_shader_userdata_begin_new_cs(sctx);
1575 }