radeonsi: don't allow draw calls with uninitialized VS inputs
[mesa.git] / src / gallium / drivers / radeonsi / si_compute_blit.c
1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include "si_pipe.h"
27 #include "util/format/u_format.h"
28 #include "util/format_srgb.h"
29
30 /* Note: Compute shaders always use SI_COMPUTE_DST_CACHE_POLICY for dst
31 * and L2_STREAM for src.
32 */
33 static enum si_cache_policy get_cache_policy(struct si_context *sctx,
34 enum si_coherency coher,
35 uint64_t size)
36 {
37 if ((sctx->chip_class >= GFX9 && (coher == SI_COHERENCY_CB_META ||
38 coher == SI_COHERENCY_CP)) ||
39 (sctx->chip_class >= GFX7 && coher == SI_COHERENCY_SHADER))
40 return size <= 256 * 1024 ? L2_LRU : L2_STREAM;
41
42 return L2_BYPASS;
43 }
44
45 unsigned si_get_flush_flags(struct si_context *sctx, enum si_coherency coher,
46 enum si_cache_policy cache_policy)
47 {
48 switch (coher) {
49 default:
50 case SI_COHERENCY_NONE:
51 case SI_COHERENCY_CP:
52 return 0;
53 case SI_COHERENCY_SHADER:
54 return SI_CONTEXT_INV_SCACHE |
55 SI_CONTEXT_INV_VCACHE |
56 (cache_policy == L2_BYPASS ? SI_CONTEXT_INV_L2 : 0);
57 case SI_COHERENCY_CB_META:
58 return SI_CONTEXT_FLUSH_AND_INV_CB;
59 }
60 }
61
62 static void si_compute_internal_begin(struct si_context *sctx)
63 {
64 sctx->flags &= ~SI_CONTEXT_START_PIPELINE_STATS;
65 sctx->flags |= SI_CONTEXT_STOP_PIPELINE_STATS;
66 sctx->render_cond_force_off = true;
67 }
68
69 static void si_compute_internal_end(struct si_context *sctx)
70 {
71 sctx->flags &= ~SI_CONTEXT_STOP_PIPELINE_STATS;
72 sctx->flags |= SI_CONTEXT_START_PIPELINE_STATS;
73 sctx->render_cond_force_off = false;
74 }
75
76 static void si_compute_clear_12bytes_buffer(struct si_context *sctx,
77 struct pipe_resource *dst,
78 unsigned dst_offset,
79 unsigned size,
80 const uint32_t *clear_value,
81 enum si_coherency coher)
82 {
83 struct pipe_context *ctx = &sctx->b;
84
85 assert(dst_offset % 4 == 0);
86 assert(size % 4 == 0);
87 unsigned size_12 = DIV_ROUND_UP(size, 12);
88
89 unsigned data[4] = {0};
90 memcpy(data, clear_value, 12);
91
92 si_compute_internal_begin(sctx);
93
94 sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
95 SI_CONTEXT_CS_PARTIAL_FLUSH |
96 si_get_flush_flags(sctx, coher, SI_COMPUTE_DST_CACHE_POLICY);
97
98 struct pipe_shader_buffer saved_sb = {0};
99 si_get_shader_buffers(sctx, PIPE_SHADER_COMPUTE, 0, 1, &saved_sb);
100
101 unsigned saved_writable_mask = 0;
102 if (sctx->const_and_shader_buffers[PIPE_SHADER_COMPUTE].writable_mask &
103 (1u << si_get_shaderbuf_slot(0)))
104 saved_writable_mask = 1;
105
106 struct pipe_constant_buffer saved_cb = {};
107 si_get_pipe_constant_buffer(sctx, PIPE_SHADER_COMPUTE, 0, &saved_cb);
108
109 void *saved_cs = sctx->cs_shader_state.program;
110
111 struct pipe_constant_buffer cb = {};
112 cb.buffer_size = sizeof(data);
113 cb.user_buffer = data;
114 ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &cb);
115
116 struct pipe_shader_buffer sb = {0};
117 sb.buffer = dst;
118 sb.buffer_offset = dst_offset;
119 sb.buffer_size = size;
120
121 ctx->set_shader_buffers(ctx, PIPE_SHADER_COMPUTE, 0, 1, &sb, 0x1);
122
123 struct pipe_grid_info info = {0};
124
125 if (!sctx->cs_clear_12bytes_buffer)
126 sctx->cs_clear_12bytes_buffer =
127 si_clear_12bytes_buffer_shader(ctx);
128 ctx->bind_compute_state(ctx, sctx->cs_clear_12bytes_buffer);
129 info.block[0] = 64;
130 info.last_block[0] = size_12 % 64;
131 info.block[1] = 1;
132 info.block[2] = 1;
133 info.grid[0] = DIV_ROUND_UP(size_12, 64);
134 info.grid[1] = 1;
135 info.grid[2] = 1;
136
137 ctx->launch_grid(ctx, &info);
138
139 ctx->bind_compute_state(ctx, saved_cs);
140 ctx->set_shader_buffers(ctx, PIPE_SHADER_COMPUTE, 0, 1, &saved_sb, saved_writable_mask);
141 ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &saved_cb);
142
143 si_compute_internal_end(sctx);
144 pipe_resource_reference(&saved_sb.buffer, NULL);
145 pipe_resource_reference(&saved_cb.buffer, NULL);
146 }
147
148 static void si_compute_do_clear_or_copy(struct si_context *sctx,
149 struct pipe_resource *dst,
150 unsigned dst_offset,
151 struct pipe_resource *src,
152 unsigned src_offset,
153 unsigned size,
154 const uint32_t *clear_value,
155 unsigned clear_value_size,
156 enum si_coherency coher)
157 {
158 struct pipe_context *ctx = &sctx->b;
159
160 assert(src_offset % 4 == 0);
161 assert(dst_offset % 4 == 0);
162 assert(size % 4 == 0);
163
164 assert(dst->target != PIPE_BUFFER || dst_offset + size <= dst->width0);
165 assert(!src || src_offset + size <= src->width0);
166
167 si_compute_internal_begin(sctx);
168 sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
169 SI_CONTEXT_CS_PARTIAL_FLUSH |
170 si_get_flush_flags(sctx, coher, SI_COMPUTE_DST_CACHE_POLICY);
171
172 /* Save states. */
173 void *saved_cs = sctx->cs_shader_state.program;
174 struct pipe_shader_buffer saved_sb[2] = {};
175 si_get_shader_buffers(sctx, PIPE_SHADER_COMPUTE, 0, src ? 2 : 1, saved_sb);
176
177 unsigned saved_writable_mask = 0;
178 for (unsigned i = 0; i < (src ? 2 : 1); i++) {
179 if (sctx->const_and_shader_buffers[PIPE_SHADER_COMPUTE].writable_mask &
180 (1u << si_get_shaderbuf_slot(i)))
181 saved_writable_mask |= 1 << i;
182 }
183
184 /* The memory accesses are coalesced, meaning that the 1st instruction writes
185 * the 1st contiguous block of data for the whole wave, the 2nd instruction
186 * writes the 2nd contiguous block of data, etc.
187 */
188 unsigned dwords_per_thread = src ? SI_COMPUTE_COPY_DW_PER_THREAD :
189 SI_COMPUTE_CLEAR_DW_PER_THREAD;
190 unsigned instructions_per_thread = MAX2(1, dwords_per_thread / 4);
191 unsigned dwords_per_instruction = dwords_per_thread / instructions_per_thread;
192 unsigned wave_size = sctx->screen->compute_wave_size;
193 unsigned dwords_per_wave = dwords_per_thread * wave_size;
194
195 unsigned num_dwords = size / 4;
196 unsigned num_instructions = DIV_ROUND_UP(num_dwords, dwords_per_instruction);
197
198 struct pipe_grid_info info = {};
199 info.block[0] = MIN2(wave_size, num_instructions);
200 info.block[1] = 1;
201 info.block[2] = 1;
202 info.grid[0] = DIV_ROUND_UP(num_dwords, dwords_per_wave);
203 info.grid[1] = 1;
204 info.grid[2] = 1;
205
206 struct pipe_shader_buffer sb[2] = {};
207 sb[0].buffer = dst;
208 sb[0].buffer_offset = dst_offset;
209 sb[0].buffer_size = size;
210
211 bool shader_dst_stream_policy = SI_COMPUTE_DST_CACHE_POLICY != L2_LRU;
212
213 if (src) {
214 sb[1].buffer = src;
215 sb[1].buffer_offset = src_offset;
216 sb[1].buffer_size = size;
217
218 ctx->set_shader_buffers(ctx, PIPE_SHADER_COMPUTE, 0, 2, sb, 0x1);
219
220 if (!sctx->cs_copy_buffer) {
221 sctx->cs_copy_buffer = si_create_dma_compute_shader(&sctx->b,
222 SI_COMPUTE_COPY_DW_PER_THREAD,
223 shader_dst_stream_policy, true);
224 }
225 ctx->bind_compute_state(ctx, sctx->cs_copy_buffer);
226 } else {
227 assert(clear_value_size >= 4 &&
228 clear_value_size <= 16 &&
229 util_is_power_of_two_or_zero(clear_value_size));
230
231 for (unsigned i = 0; i < 4; i++)
232 sctx->cs_user_data[i] = clear_value[i % (clear_value_size / 4)];
233
234 ctx->set_shader_buffers(ctx, PIPE_SHADER_COMPUTE, 0, 1, sb, 0x1);
235
236 if (!sctx->cs_clear_buffer) {
237 sctx->cs_clear_buffer = si_create_dma_compute_shader(&sctx->b,
238 SI_COMPUTE_CLEAR_DW_PER_THREAD,
239 shader_dst_stream_policy, false);
240 }
241 ctx->bind_compute_state(ctx, sctx->cs_clear_buffer);
242 }
243
244 ctx->launch_grid(ctx, &info);
245
246 enum si_cache_policy cache_policy = get_cache_policy(sctx, coher, size);
247 sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
248 (cache_policy == L2_BYPASS ? SI_CONTEXT_WB_L2 : 0);
249
250 if (cache_policy != L2_BYPASS)
251 si_resource(dst)->TC_L2_dirty = true;
252
253 /* Restore states. */
254 ctx->bind_compute_state(ctx, saved_cs);
255 ctx->set_shader_buffers(ctx, PIPE_SHADER_COMPUTE, 0, src ? 2 : 1, saved_sb,
256 saved_writable_mask);
257 si_compute_internal_end(sctx);
258 for (int i = 0; i < 2; i++)
259 pipe_resource_reference(&saved_sb[i].buffer, NULL);
260 }
261
262 void si_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
263 uint64_t offset, uint64_t size, uint32_t *clear_value,
264 uint32_t clear_value_size, enum si_coherency coher,
265 bool force_cpdma)
266 {
267 if (!size)
268 return;
269
270 ASSERTED unsigned clear_alignment = MIN2(clear_value_size, 4);
271
272 assert(clear_value_size != 3 && clear_value_size != 6); /* 12 is allowed. */
273 assert(offset % clear_alignment == 0);
274 assert(size % clear_alignment == 0);
275 assert(size < (UINT_MAX & ~0xf)); /* TODO: test 64-bit sizes in all codepaths */
276
277 /* Reduce a large clear value size if possible. */
278 if (clear_value_size > 4) {
279 bool clear_dword_duplicated = true;
280
281 /* See if we can lower large fills to dword fills. */
282 for (unsigned i = 1; i < clear_value_size / 4; i++) {
283 if (clear_value[0] != clear_value[i]) {
284 clear_dword_duplicated = false;
285 break;
286 }
287 }
288 if (clear_dword_duplicated)
289 clear_value_size = 4;
290 }
291
292 /* Expand a small clear value size. */
293 uint32_t tmp_clear_value;
294 if (clear_value_size <= 2) {
295 if (clear_value_size == 1) {
296 tmp_clear_value = *(uint8_t*)clear_value;
297 tmp_clear_value |= (tmp_clear_value << 8) |
298 (tmp_clear_value << 16) |
299 (tmp_clear_value << 24);
300 } else {
301 tmp_clear_value = *(uint16_t*)clear_value;
302 tmp_clear_value |= tmp_clear_value << 16;
303 }
304 clear_value = &tmp_clear_value;
305 clear_value_size = 4;
306 }
307
308 if (clear_value_size == 12) {
309 si_compute_clear_12bytes_buffer(sctx, dst, offset, size, clear_value, coher);
310 return;
311 }
312
313 uint64_t aligned_size = size & ~3ull;
314 if (aligned_size >= 4) {
315 /* Before GFX9, CP DMA was very slow when clearing GTT, so never
316 * use CP DMA clears on those chips, because we can't be certain
317 * about buffer placements.
318 */
319 if (clear_value_size > 4 ||
320 (!force_cpdma &&
321 clear_value_size == 4 &&
322 offset % 4 == 0 &&
323 (size > 32*1024 || sctx->chip_class <= GFX8))) {
324 si_compute_do_clear_or_copy(sctx, dst, offset, NULL, 0,
325 aligned_size, clear_value,
326 clear_value_size, coher);
327 } else {
328 assert(clear_value_size == 4);
329 si_cp_dma_clear_buffer(sctx, sctx->gfx_cs, dst, offset,
330 aligned_size, *clear_value, 0, coher,
331 get_cache_policy(sctx, coher, size));
332 }
333
334 offset += aligned_size;
335 size -= aligned_size;
336 }
337
338 /* Handle non-dword alignment. */
339 if (size) {
340 assert(dst);
341 assert(dst->target == PIPE_BUFFER);
342 assert(size < 4);
343
344 pipe_buffer_write(&sctx->b, dst, offset, size, clear_value);
345 }
346 }
347
348 static void si_pipe_clear_buffer(struct pipe_context *ctx,
349 struct pipe_resource *dst,
350 unsigned offset, unsigned size,
351 const void *clear_value,
352 int clear_value_size)
353 {
354 si_clear_buffer((struct si_context*)ctx, dst, offset, size, (uint32_t*)clear_value,
355 clear_value_size, SI_COHERENCY_SHADER, false);
356 }
357
358 void si_copy_buffer(struct si_context *sctx,
359 struct pipe_resource *dst, struct pipe_resource *src,
360 uint64_t dst_offset, uint64_t src_offset, unsigned size)
361 {
362 if (!size)
363 return;
364
365 enum si_coherency coher = SI_COHERENCY_SHADER;
366 enum si_cache_policy cache_policy = get_cache_policy(sctx, coher, size);
367
368 /* Only use compute for VRAM copies on dGPUs. */
369 if (sctx->screen->info.has_dedicated_vram &&
370 si_resource(dst)->domains & RADEON_DOMAIN_VRAM &&
371 si_resource(src)->domains & RADEON_DOMAIN_VRAM &&
372 size > 32 * 1024 &&
373 dst_offset % 4 == 0 && src_offset % 4 == 0 && size % 4 == 0) {
374 si_compute_do_clear_or_copy(sctx, dst, dst_offset, src, src_offset,
375 size, NULL, 0, coher);
376 } else {
377 si_cp_dma_copy_buffer(sctx, dst, src, dst_offset, src_offset, size,
378 0, coher, cache_policy);
379 }
380 }
381
382 void si_compute_copy_image(struct si_context *sctx,
383 struct pipe_resource *dst,
384 unsigned dst_level,
385 struct pipe_resource *src,
386 unsigned src_level,
387 unsigned dstx, unsigned dsty, unsigned dstz,
388 const struct pipe_box *src_box)
389 {
390 struct pipe_context *ctx = &sctx->b;
391 unsigned width = src_box->width;
392 unsigned height = src_box->height;
393 unsigned depth = src_box->depth;
394
395 unsigned data[] = {src_box->x, src_box->y, src_box->z, 0, dstx, dsty, dstz, 0};
396
397 if (width == 0 || height == 0)
398 return;
399
400 si_compute_internal_begin(sctx);
401 sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
402 si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_STREAM);
403
404 /* src and dst have the same number of samples. */
405 si_make_CB_shader_coherent(sctx, src->nr_samples, true,
406 /* Only src can have DCC.*/
407 ((struct si_texture*)src)->surface.u.gfx9.dcc.pipe_aligned);
408
409 struct pipe_constant_buffer saved_cb = {};
410 si_get_pipe_constant_buffer(sctx, PIPE_SHADER_COMPUTE, 0, &saved_cb);
411
412 struct si_images *images = &sctx->images[PIPE_SHADER_COMPUTE];
413 struct pipe_image_view saved_image[2] = {0};
414 util_copy_image_view(&saved_image[0], &images->views[0]);
415 util_copy_image_view(&saved_image[1], &images->views[1]);
416
417 void *saved_cs = sctx->cs_shader_state.program;
418
419 struct pipe_constant_buffer cb = {};
420 cb.buffer_size = sizeof(data);
421 cb.user_buffer = data;
422 ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &cb);
423
424 struct pipe_image_view image[2] = {0};
425 image[0].resource = src;
426 image[0].shader_access = image[0].access = PIPE_IMAGE_ACCESS_READ;
427 image[0].format = util_format_linear(src->format);
428 image[0].u.tex.level = src_level;
429 image[0].u.tex.first_layer = 0;
430 image[0].u.tex.last_layer =
431 src->target == PIPE_TEXTURE_3D ? u_minify(src->depth0, src_level) - 1
432 : (unsigned)(src->array_size - 1);
433 image[1].resource = dst;
434 image[1].shader_access = image[1].access = PIPE_IMAGE_ACCESS_WRITE;
435 image[1].format = util_format_linear(dst->format);
436 image[1].u.tex.level = dst_level;
437 image[1].u.tex.first_layer = 0;
438 image[1].u.tex.last_layer =
439 dst->target == PIPE_TEXTURE_3D ? u_minify(dst->depth0, dst_level) - 1
440 : (unsigned)(dst->array_size - 1);
441
442 if (src->format == PIPE_FORMAT_R9G9B9E5_FLOAT)
443 image[0].format = image[1].format = PIPE_FORMAT_R32_UINT;
444
445 /* SNORM8 blitting has precision issues on some chips. Use the SINT
446 * equivalent instead, which doesn't force DCC decompression.
447 * Note that some chips avoid this issue by using SDMA.
448 */
449 if (util_format_is_snorm8(dst->format)) {
450 image[0].format = image[1].format =
451 util_format_snorm8_to_sint8(dst->format);
452 }
453
454 ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 2, image);
455
456 struct pipe_grid_info info = {0};
457
458 if (dst->target == PIPE_TEXTURE_1D_ARRAY && src->target == PIPE_TEXTURE_1D_ARRAY) {
459 if (!sctx->cs_copy_image_1d_array)
460 sctx->cs_copy_image_1d_array =
461 si_create_copy_image_compute_shader_1d_array(ctx);
462 ctx->bind_compute_state(ctx, sctx->cs_copy_image_1d_array);
463 info.block[0] = 64;
464 info.last_block[0] = width % 64;
465 info.block[1] = 1;
466 info.block[2] = 1;
467 info.grid[0] = DIV_ROUND_UP(width, 64);
468 info.grid[1] = depth;
469 info.grid[2] = 1;
470 } else {
471 if (!sctx->cs_copy_image)
472 sctx->cs_copy_image = si_create_copy_image_compute_shader(ctx);
473 ctx->bind_compute_state(ctx, sctx->cs_copy_image);
474 info.block[0] = 8;
475 info.last_block[0] = width % 8;
476 info.block[1] = 8;
477 info.last_block[1] = height % 8;
478 info.block[2] = 1;
479 info.grid[0] = DIV_ROUND_UP(width, 8);
480 info.grid[1] = DIV_ROUND_UP(height, 8);
481 info.grid[2] = depth;
482 }
483
484 ctx->launch_grid(ctx, &info);
485
486 sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
487 (sctx->chip_class <= GFX8 ? SI_CONTEXT_WB_L2 : 0) |
488 si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_STREAM);
489 ctx->bind_compute_state(ctx, saved_cs);
490 ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 2, saved_image);
491 ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &saved_cb);
492 si_compute_internal_end(sctx);
493 for (int i = 0; i < 2; i++)
494 pipe_resource_reference(&saved_image[i].resource, NULL);
495 pipe_resource_reference(&saved_cb.buffer, NULL);
496 }
497
498 void si_retile_dcc(struct si_context *sctx, struct si_texture *tex)
499 {
500 struct pipe_context *ctx = &sctx->b;
501
502 sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
503 SI_CONTEXT_CS_PARTIAL_FLUSH |
504 si_get_flush_flags(sctx, SI_COHERENCY_CB_META, L2_LRU) |
505 si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_LRU);
506 sctx->emit_cache_flush(sctx);
507
508 /* Save states. */
509 void *saved_cs = sctx->cs_shader_state.program;
510 struct pipe_image_view saved_img[3] = {};
511
512 for (unsigned i = 0; i < 3; i++) {
513 util_copy_image_view(&saved_img[i],
514 &sctx->images[PIPE_SHADER_COMPUTE].views[i]);
515 }
516
517 /* Set images. */
518 bool use_uint16 = tex->surface.u.gfx9.dcc_retile_use_uint16;
519 unsigned num_elements = tex->surface.u.gfx9.dcc_retile_num_elements;
520 struct pipe_image_view img[3];
521
522 assert(tex->surface.dcc_retile_map_offset && tex->surface.dcc_retile_map_offset <= UINT_MAX);
523 assert(tex->surface.dcc_offset && tex->surface.dcc_offset <= UINT_MAX);
524 assert(tex->surface.display_dcc_offset && tex->surface.display_dcc_offset <= UINT_MAX);
525
526 for (unsigned i = 0; i < 3; i++) {
527 img[i].resource = &tex->buffer.b.b;
528 img[i].access = i == 2 ? PIPE_IMAGE_ACCESS_WRITE : PIPE_IMAGE_ACCESS_READ;
529 img[i].shader_access = SI_IMAGE_ACCESS_AS_BUFFER;
530 }
531
532 img[0].format = use_uint16 ? PIPE_FORMAT_R16G16B16A16_UINT :
533 PIPE_FORMAT_R32G32B32A32_UINT;
534 img[0].u.buf.offset = tex->surface.dcc_retile_map_offset;
535 img[0].u.buf.size = num_elements * (use_uint16 ? 2 : 4);
536
537 img[1].format = PIPE_FORMAT_R8_UINT;
538 img[1].u.buf.offset = tex->surface.dcc_offset;
539 img[1].u.buf.size = tex->surface.dcc_size;
540
541 img[2].format = PIPE_FORMAT_R8_UINT;
542 img[2].u.buf.offset = tex->surface.display_dcc_offset;
543 img[2].u.buf.size = tex->surface.u.gfx9.display_dcc_size;
544
545 ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 3, img);
546
547 /* Bind the compute shader. */
548 if (!sctx->cs_dcc_retile)
549 sctx->cs_dcc_retile = si_create_dcc_retile_cs(ctx);
550 ctx->bind_compute_state(ctx, sctx->cs_dcc_retile);
551
552 /* Dispatch compute. */
553 /* img[0] has 4 channels per element containing 2 pairs of DCC offsets. */
554 unsigned num_threads = num_elements / 4;
555
556 struct pipe_grid_info info = {};
557 info.block[0] = 64;
558 info.block[1] = 1;
559 info.block[2] = 1;
560 info.grid[0] = DIV_ROUND_UP(num_threads, 64); /* includes the partial block */
561 info.grid[1] = 1;
562 info.grid[2] = 1;
563 info.last_block[0] = num_threads % 64;
564
565 ctx->launch_grid(ctx, &info);
566
567 /* Don't flush caches or wait. The driver will wait at the end of this IB,
568 * and L2 will be flushed by the kernel fence.
569 */
570
571 /* Restore states. */
572 ctx->bind_compute_state(ctx, saved_cs);
573 ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 3, saved_img);
574
575 for (unsigned i = 0; i < 3; i++) {
576 pipe_resource_reference(&saved_img[i].resource, NULL);
577 }
578 }
579
580 /* Expand FMASK to make it identity, so that image stores can ignore it. */
581 void si_compute_expand_fmask(struct pipe_context *ctx, struct pipe_resource *tex)
582 {
583 struct si_context *sctx = (struct si_context *)ctx;
584 bool is_array = tex->target == PIPE_TEXTURE_2D_ARRAY;
585 unsigned log_fragments = util_logbase2(tex->nr_storage_samples);
586 unsigned log_samples = util_logbase2(tex->nr_samples);
587 assert(tex->nr_samples >= 2);
588
589 /* EQAA FMASK expansion is unimplemented. */
590 if (tex->nr_samples != tex->nr_storage_samples)
591 return;
592
593 si_compute_internal_begin(sctx);
594
595 /* Flush caches and sync engines. */
596 sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
597 si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_STREAM);
598 si_make_CB_shader_coherent(sctx, tex->nr_samples, true,
599 true /* DCC is not possible with image stores */);
600
601 /* Save states. */
602 void *saved_cs = sctx->cs_shader_state.program;
603 struct pipe_image_view saved_image = {0};
604 util_copy_image_view(&saved_image, &sctx->images[PIPE_SHADER_COMPUTE].views[0]);
605
606 /* Bind the image. */
607 struct pipe_image_view image = {0};
608 image.resource = tex;
609 /* Don't set WRITE so as not to trigger FMASK expansion, causing
610 * an infinite loop. */
611 image.shader_access = image.access = PIPE_IMAGE_ACCESS_READ;
612 image.format = util_format_linear(tex->format);
613 if (is_array)
614 image.u.tex.last_layer = tex->array_size - 1;
615
616 ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, &image);
617
618 /* Bind the shader. */
619 void **shader = &sctx->cs_fmask_expand[log_samples - 1][is_array];
620 if (!*shader)
621 *shader = si_create_fmask_expand_cs(ctx, tex->nr_samples, is_array);
622 ctx->bind_compute_state(ctx, *shader);
623
624 /* Dispatch compute. */
625 struct pipe_grid_info info = {0};
626 info.block[0] = 8;
627 info.last_block[0] = tex->width0 % 8;
628 info.block[1] = 8;
629 info.last_block[1] = tex->height0 % 8;
630 info.block[2] = 1;
631 info.grid[0] = DIV_ROUND_UP(tex->width0, 8);
632 info.grid[1] = DIV_ROUND_UP(tex->height0, 8);
633 info.grid[2] = is_array ? tex->array_size : 1;
634
635 ctx->launch_grid(ctx, &info);
636
637 /* Flush caches and sync engines. */
638 sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
639 (sctx->chip_class <= GFX8 ? SI_CONTEXT_WB_L2 : 0) |
640 si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_STREAM);
641
642 /* Restore previous states. */
643 ctx->bind_compute_state(ctx, saved_cs);
644 ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, &saved_image);
645 si_compute_internal_end(sctx);
646 pipe_resource_reference(&saved_image.resource, NULL);
647
648 /* Array of fully expanded FMASK values, arranged by [log2(fragments)][log2(samples)-1]. */
649 #define INVALID 0 /* never used */
650 static const uint64_t fmask_expand_values[][4] = {
651 /* samples */
652 /* 2 (8 bpp) 4 (8 bpp) 8 (8-32bpp) 16 (16-64bpp) fragments */
653 {0x02020202, 0x0E0E0E0E, 0xFEFEFEFE, 0xFFFEFFFE}, /* 1 */
654 {0x02020202, 0xA4A4A4A4, 0xAAA4AAA4, 0xAAAAAAA4}, /* 2 */
655 {INVALID, 0xE4E4E4E4, 0x44443210, 0x4444444444443210}, /* 4 */
656 {INVALID, INVALID, 0x76543210, 0x8888888876543210}, /* 8 */
657 };
658
659 /* Clear FMASK to identity. */
660 struct si_texture *stex = (struct si_texture*)tex;
661 si_clear_buffer(sctx, tex, stex->surface.fmask_offset, stex->surface.fmask_size,
662 (uint32_t*)&fmask_expand_values[log_fragments][log_samples - 1],
663 4, SI_COHERENCY_SHADER, false);
664 }
665
666 void si_init_compute_blit_functions(struct si_context *sctx)
667 {
668 sctx->b.clear_buffer = si_pipe_clear_buffer;
669 }
670
671 /* Clear a region of a color surface to a constant value. */
672 void si_compute_clear_render_target(struct pipe_context *ctx,
673 struct pipe_surface *dstsurf,
674 const union pipe_color_union *color,
675 unsigned dstx, unsigned dsty,
676 unsigned width, unsigned height,
677 bool render_condition_enabled)
678 {
679 struct si_context *sctx = (struct si_context *)ctx;
680 unsigned num_layers = dstsurf->u.tex.last_layer - dstsurf->u.tex.first_layer + 1;
681 unsigned data[4 + sizeof(color->ui)] = {dstx, dsty, dstsurf->u.tex.first_layer, 0};
682
683 if (width == 0 || height == 0)
684 return;
685
686 if (util_format_is_srgb(dstsurf->format)) {
687 union pipe_color_union color_srgb;
688 for (int i = 0; i < 3; i++)
689 color_srgb.f[i] = util_format_linear_to_srgb_float(color->f[i]);
690 color_srgb.f[3] = color->f[3];
691 memcpy(data + 4, color_srgb.ui, sizeof(color->ui));
692 } else {
693 memcpy(data + 4, color->ui, sizeof(color->ui));
694 }
695
696 si_compute_internal_begin(sctx);
697 sctx->render_cond_force_off = !render_condition_enabled;
698
699 sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
700 si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_STREAM);
701 si_make_CB_shader_coherent(sctx, dstsurf->texture->nr_samples, true,
702 true /* DCC is not possible with image stores */);
703
704 struct pipe_constant_buffer saved_cb = {};
705 si_get_pipe_constant_buffer(sctx, PIPE_SHADER_COMPUTE, 0, &saved_cb);
706
707 struct si_images *images = &sctx->images[PIPE_SHADER_COMPUTE];
708 struct pipe_image_view saved_image = {0};
709 util_copy_image_view(&saved_image, &images->views[0]);
710
711 void *saved_cs = sctx->cs_shader_state.program;
712
713 struct pipe_constant_buffer cb = {};
714 cb.buffer_size = sizeof(data);
715 cb.user_buffer = data;
716 ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &cb);
717
718 struct pipe_image_view image = {0};
719 image.resource = dstsurf->texture;
720 image.shader_access = image.access = PIPE_IMAGE_ACCESS_WRITE;
721 image.format = util_format_linear(dstsurf->format);
722 image.u.tex.level = dstsurf->u.tex.level;
723 image.u.tex.first_layer = 0; /* 3D images ignore first_layer (BASE_ARRAY) */
724 image.u.tex.last_layer = dstsurf->u.tex.last_layer;
725
726 ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, &image);
727
728 struct pipe_grid_info info = {0};
729
730 if (dstsurf->texture->target != PIPE_TEXTURE_1D_ARRAY) {
731 if (!sctx->cs_clear_render_target)
732 sctx->cs_clear_render_target = si_clear_render_target_shader(ctx);
733 ctx->bind_compute_state(ctx, sctx->cs_clear_render_target);
734 info.block[0] = 8;
735 info.last_block[0] = width % 8;
736 info.block[1] = 8;
737 info.last_block[1] = height % 8;
738 info.block[2] = 1;
739 info.grid[0] = DIV_ROUND_UP(width, 8);
740 info.grid[1] = DIV_ROUND_UP(height, 8);
741 info.grid[2] = num_layers;
742 } else {
743 if (!sctx->cs_clear_render_target_1d_array)
744 sctx->cs_clear_render_target_1d_array =
745 si_clear_render_target_shader_1d_array(ctx);
746 ctx->bind_compute_state(ctx, sctx->cs_clear_render_target_1d_array);
747 info.block[0] = 64;
748 info.last_block[0] = width % 64;
749 info.block[1] = 1;
750 info.block[2] = 1;
751 info.grid[0] = DIV_ROUND_UP(width, 64);
752 info.grid[1] = num_layers;
753 info.grid[2] = 1;
754 }
755
756 ctx->launch_grid(ctx, &info);
757
758 sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
759 (sctx->chip_class <= GFX8 ? SI_CONTEXT_WB_L2 : 0) |
760 si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_STREAM);
761 ctx->bind_compute_state(ctx, saved_cs);
762 ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, &saved_image);
763 ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &saved_cb);
764 si_compute_internal_end(sctx);
765 pipe_resource_reference(&saved_image.resource, NULL);
766 pipe_resource_reference(&saved_cb.buffer, NULL);
767 }