2 * Copyright 2018 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "util/u_format.h"
28 #include "util/format_srgb.h"
30 /* Note: Compute shaders always use SI_COMPUTE_DST_CACHE_POLICY for dst
31 * and L2_STREAM for src.
33 static enum si_cache_policy
get_cache_policy(struct si_context
*sctx
,
34 enum si_coherency coher
,
37 if ((sctx
->chip_class
>= GFX9
&& (coher
== SI_COHERENCY_CB_META
||
38 coher
== SI_COHERENCY_CP
)) ||
39 (sctx
->chip_class
>= CIK
&& coher
== SI_COHERENCY_SHADER
))
40 return size
<= 256 * 1024 ? L2_LRU
: L2_STREAM
;
45 unsigned si_get_flush_flags(struct si_context
*sctx
, enum si_coherency coher
,
46 enum si_cache_policy cache_policy
)
50 case SI_COHERENCY_NONE
:
53 case SI_COHERENCY_SHADER
:
54 return SI_CONTEXT_INV_SMEM_L1
|
55 SI_CONTEXT_INV_VMEM_L1
|
56 (cache_policy
== L2_BYPASS
? SI_CONTEXT_INV_GLOBAL_L2
: 0);
57 case SI_COHERENCY_CB_META
:
58 return SI_CONTEXT_FLUSH_AND_INV_CB
;
62 static void si_compute_internal_begin(struct si_context
*sctx
)
64 sctx
->flags
&= ~SI_CONTEXT_START_PIPELINE_STATS
;
65 sctx
->flags
|= SI_CONTEXT_STOP_PIPELINE_STATS
;
66 sctx
->render_cond_force_off
= true;
69 static void si_compute_internal_end(struct si_context
*sctx
)
71 sctx
->flags
&= ~SI_CONTEXT_STOP_PIPELINE_STATS
;
72 sctx
->flags
|= SI_CONTEXT_START_PIPELINE_STATS
;
73 sctx
->render_cond_force_off
= false;
76 static void si_compute_do_clear_or_copy(struct si_context
*sctx
,
77 struct pipe_resource
*dst
,
79 struct pipe_resource
*src
,
82 const uint32_t *clear_value
,
83 unsigned clear_value_size
,
84 enum si_coherency coher
)
86 struct pipe_context
*ctx
= &sctx
->b
;
88 assert(src_offset
% 4 == 0);
89 assert(dst_offset
% 4 == 0);
90 assert(size
% 4 == 0);
92 assert(dst
->target
!= PIPE_BUFFER
|| dst_offset
+ size
<= dst
->width0
);
93 assert(!src
|| src_offset
+ size
<= src
->width0
);
95 si_compute_internal_begin(sctx
);
96 sctx
->flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
|
97 SI_CONTEXT_CS_PARTIAL_FLUSH
|
98 si_get_flush_flags(sctx
, coher
, SI_COMPUTE_DST_CACHE_POLICY
);
101 void *saved_cs
= sctx
->cs_shader_state
.program
;
102 struct pipe_shader_buffer saved_sb
[2] = {};
103 si_get_shader_buffers(sctx
, PIPE_SHADER_COMPUTE
, 0, src
? 2 : 1, saved_sb
);
105 /* The memory accesses are coalesced, meaning that the 1st instruction writes
106 * the 1st contiguous block of data for the whole wave, the 2nd instruction
107 * writes the 2nd contiguous block of data, etc.
109 unsigned dwords_per_thread
= src
? SI_COMPUTE_COPY_DW_PER_THREAD
:
110 SI_COMPUTE_CLEAR_DW_PER_THREAD
;
111 unsigned instructions_per_thread
= MAX2(1, dwords_per_thread
/ 4);
112 unsigned dwords_per_instruction
= dwords_per_thread
/ instructions_per_thread
;
113 unsigned dwords_per_wave
= dwords_per_thread
* 64;
115 unsigned num_dwords
= size
/ 4;
116 unsigned num_instructions
= DIV_ROUND_UP(num_dwords
, dwords_per_instruction
);
118 struct pipe_grid_info info
= {};
119 info
.block
[0] = MIN2(64, num_instructions
);
122 info
.grid
[0] = DIV_ROUND_UP(num_dwords
, dwords_per_wave
);
126 struct pipe_shader_buffer sb
[2] = {};
128 sb
[0].buffer_offset
= dst_offset
;
129 sb
[0].buffer_size
= size
;
131 bool shader_dst_stream_policy
= SI_COMPUTE_DST_CACHE_POLICY
!= L2_LRU
;
135 sb
[1].buffer_offset
= src_offset
;
136 sb
[1].buffer_size
= size
;
138 ctx
->set_shader_buffers(ctx
, PIPE_SHADER_COMPUTE
, 0, 2, sb
);
140 if (!sctx
->cs_copy_buffer
) {
141 sctx
->cs_copy_buffer
= si_create_dma_compute_shader(&sctx
->b
,
142 SI_COMPUTE_COPY_DW_PER_THREAD
,
143 shader_dst_stream_policy
, true);
145 ctx
->bind_compute_state(ctx
, sctx
->cs_copy_buffer
);
147 assert(clear_value_size
>= 4 &&
148 clear_value_size
<= 16 &&
149 util_is_power_of_two_or_zero(clear_value_size
));
151 for (unsigned i
= 0; i
< 4; i
++)
152 sctx
->cs_user_data
[i
] = clear_value
[i
% (clear_value_size
/ 4)];
154 ctx
->set_shader_buffers(ctx
, PIPE_SHADER_COMPUTE
, 0, 1, sb
);
156 if (!sctx
->cs_clear_buffer
) {
157 sctx
->cs_clear_buffer
= si_create_dma_compute_shader(&sctx
->b
,
158 SI_COMPUTE_CLEAR_DW_PER_THREAD
,
159 shader_dst_stream_policy
, false);
161 ctx
->bind_compute_state(ctx
, sctx
->cs_clear_buffer
);
164 ctx
->launch_grid(ctx
, &info
);
166 enum si_cache_policy cache_policy
= get_cache_policy(sctx
, coher
, size
);
167 sctx
->flags
|= SI_CONTEXT_CS_PARTIAL_FLUSH
|
168 (cache_policy
== L2_BYPASS
? SI_CONTEXT_WRITEBACK_GLOBAL_L2
: 0);
170 if (cache_policy
!= L2_BYPASS
)
171 si_resource(dst
)->TC_L2_dirty
= true;
173 /* Restore states. */
174 ctx
->bind_compute_state(ctx
, saved_cs
);
175 ctx
->set_shader_buffers(ctx
, PIPE_SHADER_COMPUTE
, 0, src
? 2 : 1, saved_sb
);
176 si_compute_internal_end(sctx
);
179 void si_clear_buffer(struct si_context
*sctx
, struct pipe_resource
*dst
,
180 uint64_t offset
, uint64_t size
, uint32_t *clear_value
,
181 uint32_t clear_value_size
, enum si_coherency coher
)
186 unsigned clear_alignment
= MIN2(clear_value_size
, 4);
188 assert(clear_value_size
!= 3 && clear_value_size
!= 6); /* 12 is allowed. */
189 assert(offset
% clear_alignment
== 0);
190 assert(size
% clear_alignment
== 0);
191 assert(size
< (UINT_MAX
& ~0xf)); /* TODO: test 64-bit sizes in all codepaths */
193 /* Reduce a large clear value size if possible. */
194 if (clear_value_size
> 4) {
195 bool clear_dword_duplicated
= true;
197 /* See if we can lower large fills to dword fills. */
198 for (unsigned i
= 1; i
< clear_value_size
/ 4; i
++) {
199 if (clear_value
[0] != clear_value
[i
]) {
200 clear_dword_duplicated
= false;
204 if (clear_dword_duplicated
)
205 clear_value_size
= 4;
208 /* Expand a small clear value size. */
209 uint32_t tmp_clear_value
;
210 if (clear_value_size
<= 2) {
211 if (clear_value_size
== 1) {
212 tmp_clear_value
= *(uint8_t*)clear_value
;
213 tmp_clear_value
|= (tmp_clear_value
<< 8) |
214 (tmp_clear_value
<< 16) |
215 (tmp_clear_value
<< 24);
217 tmp_clear_value
= *(uint16_t*)clear_value
;
218 tmp_clear_value
|= tmp_clear_value
<< 16;
220 clear_value
= &tmp_clear_value
;
221 clear_value_size
= 4;
224 /* Use transform feedback for 12-byte clears. */
225 /* TODO: Use compute. */
226 if (clear_value_size
== 12) {
227 union pipe_color_union streamout_clear_value
;
229 memcpy(&streamout_clear_value
, clear_value
, clear_value_size
);
230 si_blitter_begin(sctx
, SI_DISABLE_RENDER_COND
);
231 util_blitter_clear_buffer(sctx
->blitter
, dst
, offset
,
232 size
, clear_value_size
/ 4,
233 &streamout_clear_value
);
234 si_blitter_end(sctx
);
238 uint64_t aligned_size
= size
& ~3ull;
239 if (aligned_size
>= 4) {
240 /* Before GFX9, CP DMA was very slow when clearing GTT, so never
241 * use CP DMA clears on those chips, because we can't be certain
242 * about buffer placements.
244 if (clear_value_size
> 4 ||
245 (clear_value_size
== 4 &&
247 (size
> 32*1024 || sctx
->chip_class
<= VI
))) {
248 si_compute_do_clear_or_copy(sctx
, dst
, offset
, NULL
, 0,
249 aligned_size
, clear_value
,
250 clear_value_size
, coher
);
252 assert(clear_value_size
== 4);
253 si_cp_dma_clear_buffer(sctx
, sctx
->gfx_cs
, dst
, offset
,
254 aligned_size
, *clear_value
, 0, coher
,
255 get_cache_policy(sctx
, coher
, size
));
258 offset
+= aligned_size
;
259 size
-= aligned_size
;
262 /* Handle non-dword alignment. */
265 assert(dst
->target
== PIPE_BUFFER
);
268 pipe_buffer_write(&sctx
->b
, dst
, offset
, size
, clear_value
);
272 static void si_pipe_clear_buffer(struct pipe_context
*ctx
,
273 struct pipe_resource
*dst
,
274 unsigned offset
, unsigned size
,
275 const void *clear_value
,
276 int clear_value_size
)
278 enum si_coherency coher
;
280 if (dst
->flags
& SI_RESOURCE_FLAG_SO_FILLED_SIZE
)
281 coher
= SI_COHERENCY_CP
;
283 coher
= SI_COHERENCY_SHADER
;
285 si_clear_buffer((struct si_context
*)ctx
, dst
, offset
, size
, (uint32_t*)clear_value
,
286 clear_value_size
, coher
);
289 void si_copy_buffer(struct si_context
*sctx
,
290 struct pipe_resource
*dst
, struct pipe_resource
*src
,
291 uint64_t dst_offset
, uint64_t src_offset
, unsigned size
)
296 enum si_coherency coher
= SI_COHERENCY_SHADER
;
297 enum si_cache_policy cache_policy
= get_cache_policy(sctx
, coher
, size
);
299 /* Only use compute for VRAM copies on dGPUs. */
300 if (sctx
->screen
->info
.has_dedicated_vram
&&
301 si_resource(dst
)->domains
& RADEON_DOMAIN_VRAM
&&
302 si_resource(src
)->domains
& RADEON_DOMAIN_VRAM
&&
304 dst_offset
% 4 == 0 && src_offset
% 4 == 0 && size
% 4 == 0) {
305 si_compute_do_clear_or_copy(sctx
, dst
, dst_offset
, src
, src_offset
,
306 size
, NULL
, 0, coher
);
308 si_cp_dma_copy_buffer(sctx
, dst
, src
, dst_offset
, src_offset
, size
,
309 0, coher
, cache_policy
);
313 void si_compute_copy_image(struct si_context
*sctx
,
314 struct pipe_resource
*dst
,
316 struct pipe_resource
*src
,
318 unsigned dstx
, unsigned dsty
, unsigned dstz
,
319 const struct pipe_box
*src_box
)
321 struct pipe_context
*ctx
= &sctx
->b
;
322 unsigned width
= src_box
->width
;
323 unsigned height
= src_box
->height
;
324 unsigned depth
= src_box
->depth
;
326 unsigned data
[] = {src_box
->x
, src_box
->y
, src_box
->z
, 0, dstx
, dsty
, dstz
, 0};
328 if (width
== 0 || height
== 0)
331 si_compute_internal_begin(sctx
);
332 sctx
->flags
|= SI_CONTEXT_CS_PARTIAL_FLUSH
|
333 si_get_flush_flags(sctx
, SI_COHERENCY_SHADER
, L2_STREAM
);
334 si_make_CB_shader_coherent(sctx
, dst
->nr_samples
, true);
336 struct pipe_constant_buffer saved_cb
= {};
337 si_get_pipe_constant_buffer(sctx
, PIPE_SHADER_COMPUTE
, 0, &saved_cb
);
339 struct si_images
*images
= &sctx
->images
[PIPE_SHADER_COMPUTE
];
340 struct pipe_image_view saved_image
[2] = {0};
341 util_copy_image_view(&saved_image
[0], &images
->views
[0]);
342 util_copy_image_view(&saved_image
[1], &images
->views
[1]);
344 void *saved_cs
= sctx
->cs_shader_state
.program
;
346 struct pipe_constant_buffer cb
= {};
347 cb
.buffer_size
= sizeof(data
);
348 cb
.user_buffer
= data
;
349 ctx
->set_constant_buffer(ctx
, PIPE_SHADER_COMPUTE
, 0, &cb
);
351 struct pipe_image_view image
[2] = {0};
352 image
[0].resource
= src
;
353 image
[0].shader_access
= image
[0].access
= PIPE_IMAGE_ACCESS_READ
;
354 image
[0].format
= util_format_linear(src
->format
);
355 image
[0].u
.tex
.level
= src_level
;
356 image
[0].u
.tex
.first_layer
= 0;
357 image
[0].u
.tex
.last_layer
=
358 src
->target
== PIPE_TEXTURE_3D
? u_minify(src
->depth0
, src_level
) - 1
359 : (unsigned)(src
->array_size
- 1);
360 image
[1].resource
= dst
;
361 image
[1].shader_access
= image
[1].access
= PIPE_IMAGE_ACCESS_WRITE
;
362 image
[1].format
= util_format_linear(dst
->format
);
363 image
[1].u
.tex
.level
= dst_level
;
364 image
[1].u
.tex
.first_layer
= 0;
365 image
[1].u
.tex
.last_layer
=
366 dst
->target
== PIPE_TEXTURE_3D
? u_minify(dst
->depth0
, dst_level
) - 1
367 : (unsigned)(dst
->array_size
- 1);
369 if (src
->format
== PIPE_FORMAT_R9G9B9E5_FLOAT
)
370 image
[0].format
= image
[1].format
= PIPE_FORMAT_R32_UINT
;
372 /* SNORM8 blitting has precision issues on some chips. Use the SINT
373 * equivalent instead, which doesn't force DCC decompression.
374 * Note that some chips avoid this issue by using SDMA.
376 if (util_format_is_snorm8(dst
->format
)) {
377 image
[0].format
= image
[1].format
=
378 util_format_snorm8_to_sint8(dst
->format
);
381 ctx
->set_shader_images(ctx
, PIPE_SHADER_COMPUTE
, 0, 2, image
);
383 struct pipe_grid_info info
= {0};
385 if (dst
->target
== PIPE_TEXTURE_1D_ARRAY
&& src
->target
== PIPE_TEXTURE_1D_ARRAY
) {
386 if (!sctx
->cs_copy_image_1d_array
)
387 sctx
->cs_copy_image_1d_array
=
388 si_create_copy_image_compute_shader_1d_array(ctx
);
389 ctx
->bind_compute_state(ctx
, sctx
->cs_copy_image_1d_array
);
391 sctx
->compute_last_block
[0] = width
% 64;
394 info
.grid
[0] = DIV_ROUND_UP(width
, 64);
395 info
.grid
[1] = depth
;
398 if (!sctx
->cs_copy_image
)
399 sctx
->cs_copy_image
= si_create_copy_image_compute_shader(ctx
);
400 ctx
->bind_compute_state(ctx
, sctx
->cs_copy_image
);
402 sctx
->compute_last_block
[0] = width
% 8;
404 sctx
->compute_last_block
[1] = height
% 8;
406 info
.grid
[0] = DIV_ROUND_UP(width
, 8);
407 info
.grid
[1] = DIV_ROUND_UP(height
, 8);
408 info
.grid
[2] = depth
;
411 ctx
->launch_grid(ctx
, &info
);
413 sctx
->compute_last_block
[0] = 0;
414 sctx
->compute_last_block
[1] = 0;
416 sctx
->flags
|= SI_CONTEXT_CS_PARTIAL_FLUSH
|
417 (sctx
->chip_class
<= VI
? SI_CONTEXT_WRITEBACK_GLOBAL_L2
: 0) |
418 si_get_flush_flags(sctx
, SI_COHERENCY_SHADER
, L2_STREAM
);
419 ctx
->bind_compute_state(ctx
, saved_cs
);
420 ctx
->set_shader_images(ctx
, PIPE_SHADER_COMPUTE
, 0, 2, saved_image
);
421 ctx
->set_constant_buffer(ctx
, PIPE_SHADER_COMPUTE
, 0, &saved_cb
);
422 si_compute_internal_end(sctx
);
425 void si_init_compute_blit_functions(struct si_context
*sctx
)
427 sctx
->b
.clear_buffer
= si_pipe_clear_buffer
;
430 /* Clear a region of a color surface to a constant value. */
431 void si_compute_clear_render_target(struct pipe_context
*ctx
,
432 struct pipe_surface
*dstsurf
,
433 const union pipe_color_union
*color
,
434 unsigned dstx
, unsigned dsty
,
435 unsigned width
, unsigned height
,
436 bool render_condition_enabled
)
438 struct si_context
*sctx
= (struct si_context
*)ctx
;
439 unsigned num_layers
= dstsurf
->u
.tex
.last_layer
- dstsurf
->u
.tex
.first_layer
+ 1;
440 unsigned data
[4 + sizeof(color
->ui
)] = {dstx
, dsty
, dstsurf
->u
.tex
.first_layer
, 0};
442 if (width
== 0 || height
== 0)
445 if (util_format_is_srgb(dstsurf
->format
)) {
446 union pipe_color_union color_srgb
;
447 for (int i
= 0; i
< 3; i
++)
448 color_srgb
.f
[i
] = util_format_linear_to_srgb_float(color
->f
[i
]);
449 color_srgb
.f
[3] = color
->f
[3];
450 memcpy(data
+ 4, color_srgb
.ui
, sizeof(color
->ui
));
452 memcpy(data
+ 4, color
->ui
, sizeof(color
->ui
));
455 si_compute_internal_begin(sctx
);
456 sctx
->render_cond_force_off
= !render_condition_enabled
;
458 sctx
->flags
|= SI_CONTEXT_CS_PARTIAL_FLUSH
|
459 si_get_flush_flags(sctx
, SI_COHERENCY_SHADER
, L2_STREAM
);
460 si_make_CB_shader_coherent(sctx
, dstsurf
->texture
->nr_samples
, true);
462 struct pipe_constant_buffer saved_cb
= {};
463 si_get_pipe_constant_buffer(sctx
, PIPE_SHADER_COMPUTE
, 0, &saved_cb
);
465 struct si_images
*images
= &sctx
->images
[PIPE_SHADER_COMPUTE
];
466 struct pipe_image_view saved_image
= {0};
467 util_copy_image_view(&saved_image
, &images
->views
[0]);
469 void *saved_cs
= sctx
->cs_shader_state
.program
;
471 struct pipe_constant_buffer cb
= {};
472 cb
.buffer_size
= sizeof(data
);
473 cb
.user_buffer
= data
;
474 ctx
->set_constant_buffer(ctx
, PIPE_SHADER_COMPUTE
, 0, &cb
);
476 struct pipe_image_view image
= {0};
477 image
.resource
= dstsurf
->texture
;
478 image
.shader_access
= image
.access
= PIPE_IMAGE_ACCESS_WRITE
;
479 image
.format
= util_format_linear(dstsurf
->format
);
480 image
.u
.tex
.level
= dstsurf
->u
.tex
.level
;
481 image
.u
.tex
.first_layer
= 0; /* 3D images ignore first_layer (BASE_ARRAY) */
482 image
.u
.tex
.last_layer
= dstsurf
->u
.tex
.last_layer
;
484 ctx
->set_shader_images(ctx
, PIPE_SHADER_COMPUTE
, 0, 1, &image
);
486 struct pipe_grid_info info
= {0};
488 if (dstsurf
->texture
->target
!= PIPE_TEXTURE_1D_ARRAY
) {
489 if (!sctx
->cs_clear_render_target
)
490 sctx
->cs_clear_render_target
= si_clear_render_target_shader(ctx
);
491 ctx
->bind_compute_state(ctx
, sctx
->cs_clear_render_target
);
493 sctx
->compute_last_block
[0] = width
% 8;
495 sctx
->compute_last_block
[1] = height
% 8;
497 info
.grid
[0] = DIV_ROUND_UP(width
, 8);
498 info
.grid
[1] = DIV_ROUND_UP(height
, 8);
499 info
.grid
[2] = num_layers
;
501 if (!sctx
->cs_clear_render_target_1d_array
)
502 sctx
->cs_clear_render_target_1d_array
=
503 si_clear_render_target_shader_1d_array(ctx
);
504 ctx
->bind_compute_state(ctx
, sctx
->cs_clear_render_target_1d_array
);
506 sctx
->compute_last_block
[0] = width
% 64;
509 info
.grid
[0] = DIV_ROUND_UP(width
, 64);
510 info
.grid
[1] = num_layers
;
514 ctx
->launch_grid(ctx
, &info
);
516 sctx
->compute_last_block
[0] = 0;
517 sctx
->compute_last_block
[1] = 0;
519 sctx
->flags
|= SI_CONTEXT_CS_PARTIAL_FLUSH
|
520 (sctx
->chip_class
<= VI
? SI_CONTEXT_WRITEBACK_GLOBAL_L2
: 0) |
521 si_get_flush_flags(sctx
, SI_COHERENCY_SHADER
, L2_STREAM
);
522 ctx
->bind_compute_state(ctx
, saved_cs
);
523 ctx
->set_shader_images(ctx
, PIPE_SHADER_COMPUTE
, 0, 1, &saved_image
);
524 ctx
->set_constant_buffer(ctx
, PIPE_SHADER_COMPUTE
, 0, &saved_cb
);
525 si_compute_internal_end(sctx
);