7d5124e51b29691c93b4eca1751003d9aecc861e
[mesa.git] / src / gallium / drivers / radeonsi / si_compute_blit.c
1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include "si_pipe.h"
27 #include "util/u_format.h"
28 #include "util/format_srgb.h"
29
30 /* Note: Compute shaders always use SI_COMPUTE_DST_CACHE_POLICY for dst
31 * and L2_STREAM for src.
32 */
33 static enum si_cache_policy get_cache_policy(struct si_context *sctx,
34 enum si_coherency coher,
35 uint64_t size)
36 {
37 if ((sctx->chip_class >= GFX9 && (coher == SI_COHERENCY_CB_META ||
38 coher == SI_COHERENCY_CP)) ||
39 (sctx->chip_class >= GFX7 && coher == SI_COHERENCY_SHADER))
40 return size <= 256 * 1024 ? L2_LRU : L2_STREAM;
41
42 return L2_BYPASS;
43 }
44
45 unsigned si_get_flush_flags(struct si_context *sctx, enum si_coherency coher,
46 enum si_cache_policy cache_policy)
47 {
48 switch (coher) {
49 default:
50 case SI_COHERENCY_NONE:
51 case SI_COHERENCY_CP:
52 return 0;
53 case SI_COHERENCY_SHADER:
54 return SI_CONTEXT_INV_SCACHE |
55 SI_CONTEXT_INV_VCACHE |
56 (cache_policy == L2_BYPASS ? SI_CONTEXT_INV_L2 : 0);
57 case SI_COHERENCY_CB_META:
58 return SI_CONTEXT_FLUSH_AND_INV_CB;
59 }
60 }
61
62 static void si_compute_internal_begin(struct si_context *sctx)
63 {
64 sctx->flags &= ~SI_CONTEXT_START_PIPELINE_STATS;
65 sctx->flags |= SI_CONTEXT_STOP_PIPELINE_STATS;
66 sctx->render_cond_force_off = true;
67 }
68
69 static void si_compute_internal_end(struct si_context *sctx)
70 {
71 sctx->flags &= ~SI_CONTEXT_STOP_PIPELINE_STATS;
72 sctx->flags |= SI_CONTEXT_START_PIPELINE_STATS;
73 sctx->render_cond_force_off = false;
74 }
75
76 static void si_compute_do_clear_or_copy(struct si_context *sctx,
77 struct pipe_resource *dst,
78 unsigned dst_offset,
79 struct pipe_resource *src,
80 unsigned src_offset,
81 unsigned size,
82 const uint32_t *clear_value,
83 unsigned clear_value_size,
84 enum si_coherency coher)
85 {
86 struct pipe_context *ctx = &sctx->b;
87
88 assert(src_offset % 4 == 0);
89 assert(dst_offset % 4 == 0);
90 assert(size % 4 == 0);
91
92 assert(dst->target != PIPE_BUFFER || dst_offset + size <= dst->width0);
93 assert(!src || src_offset + size <= src->width0);
94
95 si_compute_internal_begin(sctx);
96 sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
97 SI_CONTEXT_CS_PARTIAL_FLUSH |
98 si_get_flush_flags(sctx, coher, SI_COMPUTE_DST_CACHE_POLICY);
99
100 /* Save states. */
101 void *saved_cs = sctx->cs_shader_state.program;
102 struct pipe_shader_buffer saved_sb[2] = {};
103 si_get_shader_buffers(sctx, PIPE_SHADER_COMPUTE, 0, src ? 2 : 1, saved_sb);
104
105 unsigned saved_writable_mask = 0;
106 for (unsigned i = 0; i < (src ? 2 : 1); i++) {
107 if (sctx->const_and_shader_buffers[PIPE_SHADER_COMPUTE].writable_mask &
108 (1u << si_get_shaderbuf_slot(i)))
109 saved_writable_mask |= 1 << i;
110 }
111
112 /* The memory accesses are coalesced, meaning that the 1st instruction writes
113 * the 1st contiguous block of data for the whole wave, the 2nd instruction
114 * writes the 2nd contiguous block of data, etc.
115 */
116 unsigned dwords_per_thread = src ? SI_COMPUTE_COPY_DW_PER_THREAD :
117 SI_COMPUTE_CLEAR_DW_PER_THREAD;
118 unsigned instructions_per_thread = MAX2(1, dwords_per_thread / 4);
119 unsigned dwords_per_instruction = dwords_per_thread / instructions_per_thread;
120 unsigned wave_size = sctx->screen->compute_wave_size;
121 unsigned dwords_per_wave = dwords_per_thread * wave_size;
122
123 unsigned num_dwords = size / 4;
124 unsigned num_instructions = DIV_ROUND_UP(num_dwords, dwords_per_instruction);
125
126 struct pipe_grid_info info = {};
127 info.block[0] = MIN2(wave_size, num_instructions);
128 info.block[1] = 1;
129 info.block[2] = 1;
130 info.grid[0] = DIV_ROUND_UP(num_dwords, dwords_per_wave);
131 info.grid[1] = 1;
132 info.grid[2] = 1;
133
134 struct pipe_shader_buffer sb[2] = {};
135 sb[0].buffer = dst;
136 sb[0].buffer_offset = dst_offset;
137 sb[0].buffer_size = size;
138
139 bool shader_dst_stream_policy = SI_COMPUTE_DST_CACHE_POLICY != L2_LRU;
140
141 if (src) {
142 sb[1].buffer = src;
143 sb[1].buffer_offset = src_offset;
144 sb[1].buffer_size = size;
145
146 ctx->set_shader_buffers(ctx, PIPE_SHADER_COMPUTE, 0, 2, sb, 0x1);
147
148 if (!sctx->cs_copy_buffer) {
149 sctx->cs_copy_buffer = si_create_dma_compute_shader(&sctx->b,
150 SI_COMPUTE_COPY_DW_PER_THREAD,
151 shader_dst_stream_policy, true);
152 }
153 ctx->bind_compute_state(ctx, sctx->cs_copy_buffer);
154 } else {
155 assert(clear_value_size >= 4 &&
156 clear_value_size <= 16 &&
157 util_is_power_of_two_or_zero(clear_value_size));
158
159 for (unsigned i = 0; i < 4; i++)
160 sctx->cs_user_data[i] = clear_value[i % (clear_value_size / 4)];
161
162 ctx->set_shader_buffers(ctx, PIPE_SHADER_COMPUTE, 0, 1, sb, 0x1);
163
164 if (!sctx->cs_clear_buffer) {
165 sctx->cs_clear_buffer = si_create_dma_compute_shader(&sctx->b,
166 SI_COMPUTE_CLEAR_DW_PER_THREAD,
167 shader_dst_stream_policy, false);
168 }
169 ctx->bind_compute_state(ctx, sctx->cs_clear_buffer);
170 }
171
172 ctx->launch_grid(ctx, &info);
173
174 enum si_cache_policy cache_policy = get_cache_policy(sctx, coher, size);
175 sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
176 (cache_policy == L2_BYPASS ? SI_CONTEXT_WB_L2 : 0);
177
178 if (cache_policy != L2_BYPASS)
179 si_resource(dst)->TC_L2_dirty = true;
180
181 /* Restore states. */
182 ctx->bind_compute_state(ctx, saved_cs);
183 ctx->set_shader_buffers(ctx, PIPE_SHADER_COMPUTE, 0, src ? 2 : 1, saved_sb,
184 saved_writable_mask);
185 si_compute_internal_end(sctx);
186 }
187
188 void si_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
189 uint64_t offset, uint64_t size, uint32_t *clear_value,
190 uint32_t clear_value_size, enum si_coherency coher,
191 bool force_cpdma)
192 {
193 if (!size)
194 return;
195
196 MAYBE_UNUSED unsigned clear_alignment = MIN2(clear_value_size, 4);
197
198 assert(clear_value_size != 3 && clear_value_size != 6); /* 12 is allowed. */
199 assert(offset % clear_alignment == 0);
200 assert(size % clear_alignment == 0);
201 assert(size < (UINT_MAX & ~0xf)); /* TODO: test 64-bit sizes in all codepaths */
202
203 /* Reduce a large clear value size if possible. */
204 if (clear_value_size > 4) {
205 bool clear_dword_duplicated = true;
206
207 /* See if we can lower large fills to dword fills. */
208 for (unsigned i = 1; i < clear_value_size / 4; i++) {
209 if (clear_value[0] != clear_value[i]) {
210 clear_dword_duplicated = false;
211 break;
212 }
213 }
214 if (clear_dword_duplicated)
215 clear_value_size = 4;
216 }
217
218 /* Expand a small clear value size. */
219 uint32_t tmp_clear_value;
220 if (clear_value_size <= 2) {
221 if (clear_value_size == 1) {
222 tmp_clear_value = *(uint8_t*)clear_value;
223 tmp_clear_value |= (tmp_clear_value << 8) |
224 (tmp_clear_value << 16) |
225 (tmp_clear_value << 24);
226 } else {
227 tmp_clear_value = *(uint16_t*)clear_value;
228 tmp_clear_value |= tmp_clear_value << 16;
229 }
230 clear_value = &tmp_clear_value;
231 clear_value_size = 4;
232 }
233
234 /* Use transform feedback for 12-byte clears. */
235 /* TODO: Use compute. */
236 if (clear_value_size == 12) {
237 union pipe_color_union streamout_clear_value;
238
239 memcpy(&streamout_clear_value, clear_value, clear_value_size);
240 si_blitter_begin(sctx, SI_DISABLE_RENDER_COND);
241 util_blitter_clear_buffer(sctx->blitter, dst, offset,
242 size, clear_value_size / 4,
243 &streamout_clear_value);
244 si_blitter_end(sctx);
245 return;
246 }
247
248 uint64_t aligned_size = size & ~3ull;
249 if (aligned_size >= 4) {
250 /* Before GFX9, CP DMA was very slow when clearing GTT, so never
251 * use CP DMA clears on those chips, because we can't be certain
252 * about buffer placements.
253 */
254 if (clear_value_size > 4 ||
255 (!force_cpdma &&
256 clear_value_size == 4 &&
257 offset % 4 == 0 &&
258 (size > 32*1024 || sctx->chip_class <= GFX8))) {
259 si_compute_do_clear_or_copy(sctx, dst, offset, NULL, 0,
260 aligned_size, clear_value,
261 clear_value_size, coher);
262 } else {
263 assert(clear_value_size == 4);
264 si_cp_dma_clear_buffer(sctx, sctx->gfx_cs, dst, offset,
265 aligned_size, *clear_value, 0, coher,
266 get_cache_policy(sctx, coher, size));
267 }
268
269 offset += aligned_size;
270 size -= aligned_size;
271 }
272
273 /* Handle non-dword alignment. */
274 if (size) {
275 assert(dst);
276 assert(dst->target == PIPE_BUFFER);
277 assert(size < 4);
278
279 pipe_buffer_write(&sctx->b, dst, offset, size, clear_value);
280 }
281 }
282
283 static void si_pipe_clear_buffer(struct pipe_context *ctx,
284 struct pipe_resource *dst,
285 unsigned offset, unsigned size,
286 const void *clear_value,
287 int clear_value_size)
288 {
289 si_clear_buffer((struct si_context*)ctx, dst, offset, size, (uint32_t*)clear_value,
290 clear_value_size, SI_COHERENCY_SHADER, false);
291 }
292
293 void si_copy_buffer(struct si_context *sctx,
294 struct pipe_resource *dst, struct pipe_resource *src,
295 uint64_t dst_offset, uint64_t src_offset, unsigned size)
296 {
297 if (!size)
298 return;
299
300 enum si_coherency coher = SI_COHERENCY_SHADER;
301 enum si_cache_policy cache_policy = get_cache_policy(sctx, coher, size);
302
303 /* Only use compute for VRAM copies on dGPUs. */
304 if (sctx->screen->info.has_dedicated_vram &&
305 si_resource(dst)->domains & RADEON_DOMAIN_VRAM &&
306 si_resource(src)->domains & RADEON_DOMAIN_VRAM &&
307 size > 32 * 1024 &&
308 dst_offset % 4 == 0 && src_offset % 4 == 0 && size % 4 == 0) {
309 si_compute_do_clear_or_copy(sctx, dst, dst_offset, src, src_offset,
310 size, NULL, 0, coher);
311 } else {
312 si_cp_dma_copy_buffer(sctx, dst, src, dst_offset, src_offset, size,
313 0, coher, cache_policy);
314 }
315 }
316
317 void si_compute_copy_image(struct si_context *sctx,
318 struct pipe_resource *dst,
319 unsigned dst_level,
320 struct pipe_resource *src,
321 unsigned src_level,
322 unsigned dstx, unsigned dsty, unsigned dstz,
323 const struct pipe_box *src_box)
324 {
325 struct pipe_context *ctx = &sctx->b;
326 unsigned width = src_box->width;
327 unsigned height = src_box->height;
328 unsigned depth = src_box->depth;
329
330 unsigned data[] = {src_box->x, src_box->y, src_box->z, 0, dstx, dsty, dstz, 0};
331
332 if (width == 0 || height == 0)
333 return;
334
335 si_compute_internal_begin(sctx);
336 sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
337 si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_STREAM);
338
339 /* src and dst have the same number of samples. */
340 si_make_CB_shader_coherent(sctx, src->nr_samples, true,
341 /* Only src can have DCC.*/
342 ((struct si_texture*)src)->surface.u.gfx9.dcc.pipe_aligned);
343
344 struct pipe_constant_buffer saved_cb = {};
345 si_get_pipe_constant_buffer(sctx, PIPE_SHADER_COMPUTE, 0, &saved_cb);
346
347 struct si_images *images = &sctx->images[PIPE_SHADER_COMPUTE];
348 struct pipe_image_view saved_image[2] = {0};
349 util_copy_image_view(&saved_image[0], &images->views[0]);
350 util_copy_image_view(&saved_image[1], &images->views[1]);
351
352 void *saved_cs = sctx->cs_shader_state.program;
353
354 struct pipe_constant_buffer cb = {};
355 cb.buffer_size = sizeof(data);
356 cb.user_buffer = data;
357 ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &cb);
358
359 struct pipe_image_view image[2] = {0};
360 image[0].resource = src;
361 image[0].shader_access = image[0].access = PIPE_IMAGE_ACCESS_READ;
362 image[0].format = util_format_linear(src->format);
363 image[0].u.tex.level = src_level;
364 image[0].u.tex.first_layer = 0;
365 image[0].u.tex.last_layer =
366 src->target == PIPE_TEXTURE_3D ? u_minify(src->depth0, src_level) - 1
367 : (unsigned)(src->array_size - 1);
368 image[1].resource = dst;
369 image[1].shader_access = image[1].access = PIPE_IMAGE_ACCESS_WRITE;
370 image[1].format = util_format_linear(dst->format);
371 image[1].u.tex.level = dst_level;
372 image[1].u.tex.first_layer = 0;
373 image[1].u.tex.last_layer =
374 dst->target == PIPE_TEXTURE_3D ? u_minify(dst->depth0, dst_level) - 1
375 : (unsigned)(dst->array_size - 1);
376
377 if (src->format == PIPE_FORMAT_R9G9B9E5_FLOAT)
378 image[0].format = image[1].format = PIPE_FORMAT_R32_UINT;
379
380 /* SNORM8 blitting has precision issues on some chips. Use the SINT
381 * equivalent instead, which doesn't force DCC decompression.
382 * Note that some chips avoid this issue by using SDMA.
383 */
384 if (util_format_is_snorm8(dst->format)) {
385 image[0].format = image[1].format =
386 util_format_snorm8_to_sint8(dst->format);
387 }
388
389 ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 2, image);
390
391 struct pipe_grid_info info = {0};
392
393 if (dst->target == PIPE_TEXTURE_1D_ARRAY && src->target == PIPE_TEXTURE_1D_ARRAY) {
394 if (!sctx->cs_copy_image_1d_array)
395 sctx->cs_copy_image_1d_array =
396 si_create_copy_image_compute_shader_1d_array(ctx);
397 ctx->bind_compute_state(ctx, sctx->cs_copy_image_1d_array);
398 info.block[0] = 64;
399 info.last_block[0] = width % 64;
400 info.block[1] = 1;
401 info.block[2] = 1;
402 info.grid[0] = DIV_ROUND_UP(width, 64);
403 info.grid[1] = depth;
404 info.grid[2] = 1;
405 } else {
406 if (!sctx->cs_copy_image)
407 sctx->cs_copy_image = si_create_copy_image_compute_shader(ctx);
408 ctx->bind_compute_state(ctx, sctx->cs_copy_image);
409 info.block[0] = 8;
410 info.last_block[0] = width % 8;
411 info.block[1] = 8;
412 info.last_block[1] = height % 8;
413 info.block[2] = 1;
414 info.grid[0] = DIV_ROUND_UP(width, 8);
415 info.grid[1] = DIV_ROUND_UP(height, 8);
416 info.grid[2] = depth;
417 }
418
419 ctx->launch_grid(ctx, &info);
420
421 sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
422 (sctx->chip_class <= GFX8 ? SI_CONTEXT_WB_L2 : 0) |
423 si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_STREAM);
424 ctx->bind_compute_state(ctx, saved_cs);
425 ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 2, saved_image);
426 ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &saved_cb);
427 si_compute_internal_end(sctx);
428 }
429
430 void si_retile_dcc(struct si_context *sctx, struct si_texture *tex)
431 {
432 struct pipe_context *ctx = &sctx->b;
433
434 sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
435 SI_CONTEXT_CS_PARTIAL_FLUSH |
436 si_get_flush_flags(sctx, SI_COHERENCY_CB_META, L2_LRU) |
437 si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_LRU);
438 sctx->emit_cache_flush(sctx);
439
440 /* Save states. */
441 void *saved_cs = sctx->cs_shader_state.program;
442 struct pipe_image_view saved_img[3] = {};
443
444 for (unsigned i = 0; i < 3; i++) {
445 util_copy_image_view(&saved_img[i],
446 &sctx->images[PIPE_SHADER_COMPUTE].views[i]);
447 }
448
449 /* Set images. */
450 bool use_uint16 = tex->surface.u.gfx9.dcc_retile_use_uint16;
451 unsigned num_elements = tex->surface.u.gfx9.dcc_retile_num_elements;
452 struct pipe_image_view img[3];
453
454 assert(tex->dcc_retile_map_offset && tex->dcc_retile_map_offset <= UINT_MAX);
455 assert(tex->dcc_offset && tex->dcc_offset <= UINT_MAX);
456 assert(tex->display_dcc_offset && tex->display_dcc_offset <= UINT_MAX);
457
458 for (unsigned i = 0; i < 3; i++) {
459 img[i].resource = &tex->buffer.b.b;
460 img[i].access = i == 2 ? PIPE_IMAGE_ACCESS_WRITE : PIPE_IMAGE_ACCESS_READ;
461 img[i].shader_access = SI_IMAGE_ACCESS_AS_BUFFER;
462 }
463
464 img[0].format = use_uint16 ? PIPE_FORMAT_R16G16B16A16_UINT :
465 PIPE_FORMAT_R32G32B32A32_UINT;
466 img[0].u.buf.offset = tex->dcc_retile_map_offset;
467 img[0].u.buf.size = num_elements * (use_uint16 ? 2 : 4);
468
469 img[1].format = PIPE_FORMAT_R8_UINT;
470 img[1].u.buf.offset = tex->dcc_offset;
471 img[1].u.buf.size = tex->surface.dcc_size;
472
473 img[2].format = PIPE_FORMAT_R8_UINT;
474 img[2].u.buf.offset = tex->display_dcc_offset;
475 img[2].u.buf.size = tex->surface.u.gfx9.display_dcc_size;
476
477 ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 3, img);
478
479 /* Bind the compute shader. */
480 if (!sctx->cs_dcc_retile)
481 sctx->cs_dcc_retile = si_create_dcc_retile_cs(ctx);
482 ctx->bind_compute_state(ctx, sctx->cs_dcc_retile);
483
484 /* Dispatch compute. */
485 /* img[0] has 4 channels per element containing 2 pairs of DCC offsets. */
486 unsigned num_threads = num_elements / 4;
487
488 struct pipe_grid_info info = {};
489 info.block[0] = 64;
490 info.block[1] = 1;
491 info.block[2] = 1;
492 info.grid[0] = DIV_ROUND_UP(num_threads, 64); /* includes the partial block */
493 info.grid[1] = 1;
494 info.grid[2] = 1;
495 info.last_block[0] = num_threads % 64;
496
497 ctx->launch_grid(ctx, &info);
498
499 /* Don't flush caches or wait. The driver will wait at the end of this IB,
500 * and L2 will be flushed by the kernel fence.
501 */
502
503 /* Restore states. */
504 ctx->bind_compute_state(ctx, saved_cs);
505 ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 3, saved_img);
506 }
507
508 void si_init_compute_blit_functions(struct si_context *sctx)
509 {
510 sctx->b.clear_buffer = si_pipe_clear_buffer;
511 }
512
513 /* Clear a region of a color surface to a constant value. */
514 void si_compute_clear_render_target(struct pipe_context *ctx,
515 struct pipe_surface *dstsurf,
516 const union pipe_color_union *color,
517 unsigned dstx, unsigned dsty,
518 unsigned width, unsigned height,
519 bool render_condition_enabled)
520 {
521 struct si_context *sctx = (struct si_context *)ctx;
522 unsigned num_layers = dstsurf->u.tex.last_layer - dstsurf->u.tex.first_layer + 1;
523 unsigned data[4 + sizeof(color->ui)] = {dstx, dsty, dstsurf->u.tex.first_layer, 0};
524
525 if (width == 0 || height == 0)
526 return;
527
528 if (util_format_is_srgb(dstsurf->format)) {
529 union pipe_color_union color_srgb;
530 for (int i = 0; i < 3; i++)
531 color_srgb.f[i] = util_format_linear_to_srgb_float(color->f[i]);
532 color_srgb.f[3] = color->f[3];
533 memcpy(data + 4, color_srgb.ui, sizeof(color->ui));
534 } else {
535 memcpy(data + 4, color->ui, sizeof(color->ui));
536 }
537
538 si_compute_internal_begin(sctx);
539 sctx->render_cond_force_off = !render_condition_enabled;
540
541 sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
542 si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_STREAM);
543 si_make_CB_shader_coherent(sctx, dstsurf->texture->nr_samples, true,
544 true /* DCC is not possible with image stores */);
545
546 struct pipe_constant_buffer saved_cb = {};
547 si_get_pipe_constant_buffer(sctx, PIPE_SHADER_COMPUTE, 0, &saved_cb);
548
549 struct si_images *images = &sctx->images[PIPE_SHADER_COMPUTE];
550 struct pipe_image_view saved_image = {0};
551 util_copy_image_view(&saved_image, &images->views[0]);
552
553 void *saved_cs = sctx->cs_shader_state.program;
554
555 struct pipe_constant_buffer cb = {};
556 cb.buffer_size = sizeof(data);
557 cb.user_buffer = data;
558 ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &cb);
559
560 struct pipe_image_view image = {0};
561 image.resource = dstsurf->texture;
562 image.shader_access = image.access = PIPE_IMAGE_ACCESS_WRITE;
563 image.format = util_format_linear(dstsurf->format);
564 image.u.tex.level = dstsurf->u.tex.level;
565 image.u.tex.first_layer = 0; /* 3D images ignore first_layer (BASE_ARRAY) */
566 image.u.tex.last_layer = dstsurf->u.tex.last_layer;
567
568 ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, &image);
569
570 struct pipe_grid_info info = {0};
571
572 if (dstsurf->texture->target != PIPE_TEXTURE_1D_ARRAY) {
573 if (!sctx->cs_clear_render_target)
574 sctx->cs_clear_render_target = si_clear_render_target_shader(ctx);
575 ctx->bind_compute_state(ctx, sctx->cs_clear_render_target);
576 info.block[0] = 8;
577 info.last_block[0] = width % 8;
578 info.block[1] = 8;
579 info.last_block[1] = height % 8;
580 info.block[2] = 1;
581 info.grid[0] = DIV_ROUND_UP(width, 8);
582 info.grid[1] = DIV_ROUND_UP(height, 8);
583 info.grid[2] = num_layers;
584 } else {
585 if (!sctx->cs_clear_render_target_1d_array)
586 sctx->cs_clear_render_target_1d_array =
587 si_clear_render_target_shader_1d_array(ctx);
588 ctx->bind_compute_state(ctx, sctx->cs_clear_render_target_1d_array);
589 info.block[0] = 64;
590 info.last_block[0] = width % 64;
591 info.block[1] = 1;
592 info.block[2] = 1;
593 info.grid[0] = DIV_ROUND_UP(width, 64);
594 info.grid[1] = num_layers;
595 info.grid[2] = 1;
596 }
597
598 ctx->launch_grid(ctx, &info);
599
600 sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
601 (sctx->chip_class <= GFX8 ? SI_CONTEXT_WB_L2 : 0) |
602 si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_STREAM);
603 ctx->bind_compute_state(ctx, saved_cs);
604 ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, &saved_image);
605 ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &saved_cb);
606 si_compute_internal_end(sctx);
607 }