2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 /* Set this if you want the ME to wait until CP DMA is done.
29 * It should be set on the last CP DMA packet. */
30 #define CP_DMA_SYNC (1 << 0)
32 /* Set this if the source data was used as a destination in a previous CP DMA
33 * packet. It's for preventing a read-after-write (RAW) hazard between two
35 #define CP_DMA_RAW_WAIT (1 << 1)
36 #define CP_DMA_DST_IS_GDS (1 << 2)
37 #define CP_DMA_CLEAR (1 << 3)
38 #define CP_DMA_PFP_SYNC_ME (1 << 4)
39 #define CP_DMA_SRC_IS_GDS (1 << 5)
41 /* The max number of bytes that can be copied per packet. */
42 static inline unsigned cp_dma_max_byte_count(struct si_context
*sctx
)
45 sctx
->chip_class
>= GFX9
? S_414_BYTE_COUNT_GFX9(~0u) : S_414_BYTE_COUNT_GFX6(~0u);
47 /* make it aligned for optimal performance */
48 return max
& ~(SI_CPDMA_ALIGNMENT
- 1);
51 /* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
52 * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
55 static void si_emit_cp_dma(struct si_context
*sctx
, struct radeon_cmdbuf
*cs
, uint64_t dst_va
,
56 uint64_t src_va
, unsigned size
, unsigned flags
,
57 enum si_cache_policy cache_policy
)
59 uint32_t header
= 0, command
= 0;
61 assert(size
<= cp_dma_max_byte_count(sctx
));
62 assert(sctx
->chip_class
!= GFX6
|| cache_policy
== L2_BYPASS
);
64 if (sctx
->chip_class
>= GFX9
)
65 command
|= S_414_BYTE_COUNT_GFX9(size
);
67 command
|= S_414_BYTE_COUNT_GFX6(size
);
70 if (flags
& CP_DMA_SYNC
)
71 header
|= S_411_CP_SYNC(1);
73 if (sctx
->chip_class
>= GFX9
)
74 command
|= S_414_DISABLE_WR_CONFIRM_GFX9(1);
76 command
|= S_414_DISABLE_WR_CONFIRM_GFX6(1);
79 if (flags
& CP_DMA_RAW_WAIT
)
80 command
|= S_414_RAW_WAIT(1);
82 /* Src and dst flags. */
83 if (sctx
->chip_class
>= GFX9
&& !(flags
& CP_DMA_CLEAR
) && src_va
== dst_va
) {
84 header
|= S_411_DST_SEL(V_411_NOWHERE
); /* prefetch only */
85 } else if (flags
& CP_DMA_DST_IS_GDS
) {
86 header
|= S_411_DST_SEL(V_411_GDS
);
87 /* GDS increments the address, not CP. */
88 command
|= S_414_DAS(V_414_REGISTER
) | S_414_DAIC(V_414_NO_INCREMENT
);
89 } else if (sctx
->chip_class
>= GFX7
&& cache_policy
!= L2_BYPASS
) {
91 S_411_DST_SEL(V_411_DST_ADDR_TC_L2
) | S_500_DST_CACHE_POLICY(cache_policy
== L2_STREAM
);
94 if (flags
& CP_DMA_CLEAR
) {
95 header
|= S_411_SRC_SEL(V_411_DATA
);
96 } else if (flags
& CP_DMA_SRC_IS_GDS
) {
97 header
|= S_411_SRC_SEL(V_411_GDS
);
98 /* Both of these are required for GDS. It does increment the address. */
99 command
|= S_414_SAS(V_414_REGISTER
) | S_414_SAIC(V_414_NO_INCREMENT
);
100 } else if (sctx
->chip_class
>= GFX7
&& cache_policy
!= L2_BYPASS
) {
102 S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2
) | S_500_SRC_CACHE_POLICY(cache_policy
== L2_STREAM
);
105 if (sctx
->chip_class
>= GFX7
) {
106 radeon_emit(cs
, PKT3(PKT3_DMA_DATA
, 5, 0));
107 radeon_emit(cs
, header
);
108 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
109 radeon_emit(cs
, src_va
>> 32); /* SRC_ADDR_HI [31:0] */
110 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
111 radeon_emit(cs
, dst_va
>> 32); /* DST_ADDR_HI [31:0] */
112 radeon_emit(cs
, command
);
114 header
|= S_411_SRC_ADDR_HI(src_va
>> 32);
116 radeon_emit(cs
, PKT3(PKT3_CP_DMA
, 4, 0));
117 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
118 radeon_emit(cs
, header
); /* SRC_ADDR_HI [15:0] + flags. */
119 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
120 radeon_emit(cs
, (dst_va
>> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
121 radeon_emit(cs
, command
);
124 /* CP DMA is executed in ME, but index buffers are read by PFP.
125 * This ensures that ME (CP DMA) is idle before PFP starts fetching
126 * indices. If we wanted to execute CP DMA in PFP, this packet
129 if (sctx
->has_graphics
&& flags
& CP_DMA_PFP_SYNC_ME
) {
130 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
135 void si_cp_dma_wait_for_idle(struct si_context
*sctx
)
137 /* Issue a dummy DMA that copies zero bytes.
139 * The DMA engine will see that there's no work to do and skip this
140 * DMA request, however, the CP will see the sync flag and still wait
141 * for all DMAs to complete.
143 si_emit_cp_dma(sctx
, sctx
->gfx_cs
, 0, 0, 0, CP_DMA_SYNC
, L2_BYPASS
);
146 static void si_cp_dma_prepare(struct si_context
*sctx
, struct pipe_resource
*dst
,
147 struct pipe_resource
*src
, unsigned byte_count
,
148 uint64_t remaining_size
, unsigned user_flags
, enum si_coherency coher
,
149 bool *is_first
, unsigned *packet_flags
)
151 /* Fast exit for a CPDMA prefetch. */
152 if ((user_flags
& SI_CPDMA_SKIP_ALL
) == SI_CPDMA_SKIP_ALL
) {
157 if (!(user_flags
& SI_CPDMA_SKIP_BO_LIST_UPDATE
)) {
158 /* Count memory usage in so that need_cs_space can take it into account. */
160 si_context_add_resource_size(sctx
, dst
);
162 si_context_add_resource_size(sctx
, src
);
165 if (!(user_flags
& SI_CPDMA_SKIP_CHECK_CS_SPACE
))
166 si_need_gfx_cs_space(sctx
);
168 /* This must be done after need_cs_space. */
169 if (!(user_flags
& SI_CPDMA_SKIP_BO_LIST_UPDATE
)) {
171 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, si_resource(dst
), RADEON_USAGE_WRITE
,
174 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, si_resource(src
), RADEON_USAGE_READ
,
178 /* Flush the caches for the first copy only.
179 * Also wait for the previous CP DMA operations.
181 if (!(user_flags
& SI_CPDMA_SKIP_GFX_SYNC
) && sctx
->flags
)
182 sctx
->emit_cache_flush(sctx
);
184 if (!(user_flags
& SI_CPDMA_SKIP_SYNC_BEFORE
) && *is_first
&& !(*packet_flags
& CP_DMA_CLEAR
))
185 *packet_flags
|= CP_DMA_RAW_WAIT
;
189 /* Do the synchronization after the last dma, so that all data
190 * is written to memory.
192 if (!(user_flags
& SI_CPDMA_SKIP_SYNC_AFTER
) && byte_count
== remaining_size
) {
193 *packet_flags
|= CP_DMA_SYNC
;
195 if (coher
== SI_COHERENCY_SHADER
)
196 *packet_flags
|= CP_DMA_PFP_SYNC_ME
;
200 void si_cp_dma_clear_buffer(struct si_context
*sctx
, struct radeon_cmdbuf
*cs
,
201 struct pipe_resource
*dst
, uint64_t offset
, uint64_t size
,
202 unsigned value
, unsigned user_flags
, enum si_coherency coher
,
203 enum si_cache_policy cache_policy
)
205 struct si_resource
*sdst
= si_resource(dst
);
206 uint64_t va
= (sdst
? sdst
->gpu_address
: 0) + offset
;
207 bool is_first
= true;
209 assert(size
&& size
% 4 == 0);
211 /* Mark the buffer range of destination as valid (initialized),
212 * so that transfer_map knows it should wait for the GPU when mapping
215 util_range_add(dst
, &sdst
->valid_buffer_range
, offset
, offset
+ size
);
217 /* Flush the caches. */
218 if (sdst
&& !(user_flags
& SI_CPDMA_SKIP_GFX_SYNC
)) {
219 sctx
->flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
| SI_CONTEXT_CS_PARTIAL_FLUSH
|
220 si_get_flush_flags(sctx
, coher
, cache_policy
);
224 unsigned byte_count
= MIN2(size
, cp_dma_max_byte_count(sctx
));
225 unsigned dma_flags
= CP_DMA_CLEAR
| (sdst
? 0 : CP_DMA_DST_IS_GDS
);
227 si_cp_dma_prepare(sctx
, dst
, NULL
, byte_count
, size
, user_flags
, coher
, &is_first
,
230 /* Emit the clear packet. */
231 si_emit_cp_dma(sctx
, cs
, va
, value
, byte_count
, dma_flags
, cache_policy
);
237 if (sdst
&& cache_policy
!= L2_BYPASS
)
238 sdst
->TC_L2_dirty
= true;
240 /* If it's not a framebuffer fast clear... */
241 if (coher
== SI_COHERENCY_SHADER
) {
242 sctx
->num_cp_dma_calls
++;
243 si_prim_discard_signal_next_compute_ib_start(sctx
);
248 * Realign the CP DMA engine. This must be done after a copy with an unaligned
251 * \param size Remaining size to the CP DMA alignment.
253 static void si_cp_dma_realign_engine(struct si_context
*sctx
, unsigned size
, unsigned user_flags
,
254 enum si_coherency coher
, enum si_cache_policy cache_policy
,
258 unsigned dma_flags
= 0;
259 unsigned scratch_size
= SI_CPDMA_ALIGNMENT
* 2;
261 assert(size
< SI_CPDMA_ALIGNMENT
);
263 /* Use the scratch buffer as the dummy buffer. The 3D engine should be
264 * idle at this point.
266 if (!sctx
->scratch_buffer
|| sctx
->scratch_buffer
->b
.b
.width0
< scratch_size
) {
267 si_resource_reference(&sctx
->scratch_buffer
, NULL
);
268 sctx
->scratch_buffer
= si_aligned_buffer_create(&sctx
->screen
->b
, SI_RESOURCE_FLAG_UNMAPPABLE
,
269 PIPE_USAGE_DEFAULT
, scratch_size
, 256);
270 if (!sctx
->scratch_buffer
)
273 si_mark_atom_dirty(sctx
, &sctx
->atoms
.s
.scratch_state
);
276 si_cp_dma_prepare(sctx
, &sctx
->scratch_buffer
->b
.b
, &sctx
->scratch_buffer
->b
.b
, size
, size
,
277 user_flags
, coher
, is_first
, &dma_flags
);
279 va
= sctx
->scratch_buffer
->gpu_address
;
280 si_emit_cp_dma(sctx
, sctx
->gfx_cs
, va
, va
+ SI_CPDMA_ALIGNMENT
, size
, dma_flags
, cache_policy
);
284 * Do memcpy between buffers using CP DMA.
285 * If src or dst is NULL, it means read or write GDS, respectively.
287 * \param user_flags bitmask of SI_CPDMA_*
289 void si_cp_dma_copy_buffer(struct si_context
*sctx
, struct pipe_resource
*dst
,
290 struct pipe_resource
*src
, uint64_t dst_offset
, uint64_t src_offset
,
291 unsigned size
, unsigned user_flags
, enum si_coherency coher
,
292 enum si_cache_policy cache_policy
)
294 uint64_t main_dst_offset
, main_src_offset
;
295 unsigned skipped_size
= 0;
296 unsigned realign_size
= 0;
297 unsigned gds_flags
= (dst
? 0 : CP_DMA_DST_IS_GDS
) | (src
? 0 : CP_DMA_SRC_IS_GDS
);
298 bool is_first
= true;
303 /* Skip this for the L2 prefetch. */
304 if (dst
!= src
|| dst_offset
!= src_offset
) {
305 /* Mark the buffer range of destination as valid (initialized),
306 * so that transfer_map knows it should wait for the GPU when mapping
308 util_range_add(dst
, &si_resource(dst
)->valid_buffer_range
, dst_offset
, dst_offset
+ size
);
311 dst_offset
+= si_resource(dst
)->gpu_address
;
314 src_offset
+= si_resource(src
)->gpu_address
;
316 /* The workarounds aren't needed on Fiji and beyond. */
317 if (sctx
->family
<= CHIP_CARRIZO
|| sctx
->family
== CHIP_STONEY
) {
318 /* If the size is not aligned, we must add a dummy copy at the end
319 * just to align the internal counter. Otherwise, the DMA engine
320 * would slow down by an order of magnitude for following copies.
322 if (size
% SI_CPDMA_ALIGNMENT
)
323 realign_size
= SI_CPDMA_ALIGNMENT
- (size
% SI_CPDMA_ALIGNMENT
);
325 /* If the copy begins unaligned, we must start copying from the next
326 * aligned block and the skipped part should be copied after everything
327 * else has been copied. Only the src alignment matters, not dst.
329 * GDS doesn't need the source address to be aligned.
331 if (src
&& src_offset
% SI_CPDMA_ALIGNMENT
) {
332 skipped_size
= SI_CPDMA_ALIGNMENT
- (src_offset
% SI_CPDMA_ALIGNMENT
);
333 /* The main part will be skipped if the size is too small. */
334 skipped_size
= MIN2(skipped_size
, size
);
335 size
-= skipped_size
;
340 if (unlikely(sctx
->ws
->ws_is_secure(sctx
->ws
) &&
341 !(user_flags
& SI_CPDMA_SKIP_TMZ
))) {
342 bool secure
= src
&& (si_resource(src
)->flags
& RADEON_FLAG_ENCRYPTED
);
343 assert(!secure
|| (!dst
|| (si_resource(dst
)->flags
& RADEON_FLAG_ENCRYPTED
)));
344 if (secure
!= sctx
->ws
->cs_is_secure(sctx
->gfx_cs
)) {
345 si_flush_gfx_cs(sctx
, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
346 sctx
->ws
->cs_set_secure(sctx
->gfx_cs
, secure
);
350 /* Flush the caches. */
351 if ((dst
|| src
) && !(user_flags
& SI_CPDMA_SKIP_GFX_SYNC
)) {
352 sctx
->flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
| SI_CONTEXT_CS_PARTIAL_FLUSH
|
353 si_get_flush_flags(sctx
, coher
, cache_policy
);
356 /* This is the main part doing the copying. Src is always aligned. */
357 main_dst_offset
= dst_offset
+ skipped_size
;
358 main_src_offset
= src_offset
+ skipped_size
;
361 unsigned byte_count
= MIN2(size
, cp_dma_max_byte_count(sctx
));
362 unsigned dma_flags
= gds_flags
;
364 si_cp_dma_prepare(sctx
, dst
, src
, byte_count
, size
+ skipped_size
+ realign_size
, user_flags
,
365 coher
, &is_first
, &dma_flags
);
367 si_emit_cp_dma(sctx
, sctx
->gfx_cs
, main_dst_offset
, main_src_offset
, byte_count
, dma_flags
,
371 main_src_offset
+= byte_count
;
372 main_dst_offset
+= byte_count
;
375 /* Copy the part we skipped because src wasn't aligned. */
377 unsigned dma_flags
= gds_flags
;
379 si_cp_dma_prepare(sctx
, dst
, src
, skipped_size
, skipped_size
+ realign_size
, user_flags
,
380 coher
, &is_first
, &dma_flags
);
382 si_emit_cp_dma(sctx
, sctx
->gfx_cs
, dst_offset
, src_offset
, skipped_size
, dma_flags
,
386 /* Finally, realign the engine if the size wasn't aligned. */
388 si_cp_dma_realign_engine(sctx
, realign_size
, user_flags
, coher
, cache_policy
, &is_first
);
391 if (dst
&& cache_policy
!= L2_BYPASS
)
392 si_resource(dst
)->TC_L2_dirty
= true;
394 /* If it's not a prefetch or GDS copy... */
395 if (dst
&& src
&& (dst
!= src
|| dst_offset
!= src_offset
)) {
396 sctx
->num_cp_dma_calls
++;
397 si_prim_discard_signal_next_compute_ib_start(sctx
);
401 void cik_prefetch_TC_L2_async(struct si_context
*sctx
, struct pipe_resource
*buf
, uint64_t offset
,
404 assert(sctx
->chip_class
>= GFX7
);
406 si_cp_dma_copy_buffer(sctx
, buf
, buf
, offset
, offset
, size
, SI_CPDMA_SKIP_ALL
,
407 SI_COHERENCY_SHADER
, L2_LRU
);
410 static void cik_prefetch_shader_async(struct si_context
*sctx
, struct si_pm4_state
*state
)
412 struct pipe_resource
*bo
= &state
->shader
->bo
->b
.b
;
414 cik_prefetch_TC_L2_async(sctx
, bo
, 0, bo
->width0
);
417 static void cik_prefetch_VBO_descriptors(struct si_context
*sctx
)
419 if (!sctx
->vertex_elements
|| !sctx
->vertex_elements
->vb_desc_list_alloc_size
)
422 cik_prefetch_TC_L2_async(sctx
, &sctx
->vb_descriptors_buffer
->b
.b
, sctx
->vb_descriptors_offset
,
423 sctx
->vertex_elements
->vb_desc_list_alloc_size
);
427 * Prefetch shaders and VBO descriptors.
429 * \param vertex_stage_only Whether only the the API VS and VBO descriptors
430 * should be prefetched.
432 void cik_emit_prefetch_L2(struct si_context
*sctx
, bool vertex_stage_only
)
434 unsigned mask
= sctx
->prefetch_L2_mask
;
437 /* Prefetch shaders and VBO descriptors to TC L2. */
438 if (sctx
->chip_class
>= GFX9
) {
439 /* Choose the right spot for the VBO prefetch. */
440 if (sctx
->queued
.named
.hs
) {
441 if (mask
& SI_PREFETCH_HS
)
442 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.hs
);
443 if (mask
& SI_PREFETCH_VBO_DESCRIPTORS
)
444 cik_prefetch_VBO_descriptors(sctx
);
445 if (vertex_stage_only
) {
446 sctx
->prefetch_L2_mask
&= ~(SI_PREFETCH_HS
| SI_PREFETCH_VBO_DESCRIPTORS
);
450 if (mask
& SI_PREFETCH_GS
)
451 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.gs
);
452 if (mask
& SI_PREFETCH_VS
)
453 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
454 } else if (sctx
->queued
.named
.gs
) {
455 if (mask
& SI_PREFETCH_GS
)
456 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.gs
);
457 if (mask
& SI_PREFETCH_VBO_DESCRIPTORS
)
458 cik_prefetch_VBO_descriptors(sctx
);
459 if (vertex_stage_only
) {
460 sctx
->prefetch_L2_mask
&= ~(SI_PREFETCH_GS
| SI_PREFETCH_VBO_DESCRIPTORS
);
464 if (mask
& SI_PREFETCH_VS
)
465 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
467 if (mask
& SI_PREFETCH_VS
)
468 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
469 if (mask
& SI_PREFETCH_VBO_DESCRIPTORS
)
470 cik_prefetch_VBO_descriptors(sctx
);
471 if (vertex_stage_only
) {
472 sctx
->prefetch_L2_mask
&= ~(SI_PREFETCH_VS
| SI_PREFETCH_VBO_DESCRIPTORS
);
478 /* Choose the right spot for the VBO prefetch. */
479 if (sctx
->tes_shader
.cso
) {
480 if (mask
& SI_PREFETCH_LS
)
481 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.ls
);
482 if (mask
& SI_PREFETCH_VBO_DESCRIPTORS
)
483 cik_prefetch_VBO_descriptors(sctx
);
484 if (vertex_stage_only
) {
485 sctx
->prefetch_L2_mask
&= ~(SI_PREFETCH_LS
| SI_PREFETCH_VBO_DESCRIPTORS
);
489 if (mask
& SI_PREFETCH_HS
)
490 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.hs
);
491 if (mask
& SI_PREFETCH_ES
)
492 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.es
);
493 if (mask
& SI_PREFETCH_GS
)
494 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.gs
);
495 if (mask
& SI_PREFETCH_VS
)
496 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
497 } else if (sctx
->gs_shader
.cso
) {
498 if (mask
& SI_PREFETCH_ES
)
499 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.es
);
500 if (mask
& SI_PREFETCH_VBO_DESCRIPTORS
)
501 cik_prefetch_VBO_descriptors(sctx
);
502 if (vertex_stage_only
) {
503 sctx
->prefetch_L2_mask
&= ~(SI_PREFETCH_ES
| SI_PREFETCH_VBO_DESCRIPTORS
);
507 if (mask
& SI_PREFETCH_GS
)
508 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.gs
);
509 if (mask
& SI_PREFETCH_VS
)
510 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
512 if (mask
& SI_PREFETCH_VS
)
513 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
514 if (mask
& SI_PREFETCH_VBO_DESCRIPTORS
)
515 cik_prefetch_VBO_descriptors(sctx
);
516 if (vertex_stage_only
) {
517 sctx
->prefetch_L2_mask
&= ~(SI_PREFETCH_VS
| SI_PREFETCH_VBO_DESCRIPTORS
);
523 if (mask
& SI_PREFETCH_PS
)
524 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.ps
);
526 sctx
->prefetch_L2_mask
= 0;
529 void si_test_gds(struct si_context
*sctx
)
531 struct pipe_context
*ctx
= &sctx
->b
;
532 struct pipe_resource
*src
, *dst
;
534 unsigned offset
= debug_get_num_option("OFFSET", 16);
536 src
= pipe_buffer_create(ctx
->screen
, 0, PIPE_USAGE_DEFAULT
, 16);
537 dst
= pipe_buffer_create(ctx
->screen
, 0, PIPE_USAGE_DEFAULT
, 16);
538 si_cp_dma_clear_buffer(sctx
, sctx
->gfx_cs
, src
, 0, 4, 0xabcdef01, 0, SI_COHERENCY_SHADER
,
540 si_cp_dma_clear_buffer(sctx
, sctx
->gfx_cs
, src
, 4, 4, 0x23456789, 0, SI_COHERENCY_SHADER
,
542 si_cp_dma_clear_buffer(sctx
, sctx
->gfx_cs
, src
, 8, 4, 0x87654321, 0, SI_COHERENCY_SHADER
,
544 si_cp_dma_clear_buffer(sctx
, sctx
->gfx_cs
, src
, 12, 4, 0xfedcba98, 0, SI_COHERENCY_SHADER
,
546 si_cp_dma_clear_buffer(sctx
, sctx
->gfx_cs
, dst
, 0, 16, 0xdeadbeef, 0, SI_COHERENCY_SHADER
,
549 si_cp_dma_copy_buffer(sctx
, NULL
, src
, offset
, 0, 16, 0, SI_COHERENCY_NONE
, L2_BYPASS
);
550 si_cp_dma_copy_buffer(sctx
, dst
, NULL
, 0, offset
, 16, 0, SI_COHERENCY_NONE
, L2_BYPASS
);
552 pipe_buffer_read(ctx
, dst
, 0, sizeof(r
), r
);
553 printf("GDS copy = %08x %08x %08x %08x -> %s\n", r
[0], r
[1], r
[2], r
[3],
554 r
[0] == 0xabcdef01 && r
[1] == 0x23456789 && r
[2] == 0x87654321 && r
[3] == 0xfedcba98
558 si_cp_dma_clear_buffer(sctx
, sctx
->gfx_cs
, NULL
, offset
, 16, 0xc1ea4146, 0, SI_COHERENCY_NONE
,
560 si_cp_dma_copy_buffer(sctx
, dst
, NULL
, 0, offset
, 16, 0, SI_COHERENCY_NONE
, L2_BYPASS
);
562 pipe_buffer_read(ctx
, dst
, 0, sizeof(r
), r
);
563 printf("GDS clear = %08x %08x %08x %08x -> %s\n", r
[0], r
[1], r
[2], r
[3],
564 r
[0] == 0xc1ea4146 && r
[1] == 0xc1ea4146 && r
[2] == 0xc1ea4146 && r
[3] == 0xc1ea4146
568 pipe_resource_reference(&src
, NULL
);
569 pipe_resource_reference(&dst
, NULL
);
573 void si_cp_write_data(struct si_context
*sctx
, struct si_resource
*buf
, unsigned offset
,
574 unsigned size
, unsigned dst_sel
, unsigned engine
, const void *data
)
576 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
578 assert(offset
% 4 == 0);
579 assert(size
% 4 == 0);
581 if (sctx
->chip_class
== GFX6
&& dst_sel
== V_370_MEM
)
582 dst_sel
= V_370_MEM_GRBM
;
584 radeon_add_to_buffer_list(sctx
, cs
, buf
, RADEON_USAGE_WRITE
, RADEON_PRIO_CP_DMA
);
585 uint64_t va
= buf
->gpu_address
+ offset
;
587 radeon_emit(cs
, PKT3(PKT3_WRITE_DATA
, 2 + size
/ 4, 0));
588 radeon_emit(cs
, S_370_DST_SEL(dst_sel
) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(engine
));
590 radeon_emit(cs
, va
>> 32);
591 radeon_emit_array(cs
, (const uint32_t *)data
, size
/ 4);
594 void si_cp_copy_data(struct si_context
*sctx
, struct radeon_cmdbuf
*cs
, unsigned dst_sel
,
595 struct si_resource
*dst
, unsigned dst_offset
, unsigned src_sel
,
596 struct si_resource
*src
, unsigned src_offset
)
598 /* cs can point to the compute IB, which has the buffer list in gfx_cs. */
600 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, dst
, RADEON_USAGE_WRITE
, RADEON_PRIO_CP_DMA
);
603 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, src
, RADEON_USAGE_READ
, RADEON_PRIO_CP_DMA
);
606 uint64_t dst_va
= (dst
? dst
->gpu_address
: 0ull) + dst_offset
;
607 uint64_t src_va
= (src
? src
->gpu_address
: 0ull) + src_offset
;
609 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
610 radeon_emit(cs
, COPY_DATA_SRC_SEL(src_sel
) | COPY_DATA_DST_SEL(dst_sel
) | COPY_DATA_WR_CONFIRM
);
611 radeon_emit(cs
, src_va
);
612 radeon_emit(cs
, src_va
>> 32);
613 radeon_emit(cs
, dst_va
);
614 radeon_emit(cs
, dst_va
>> 32);