2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 /* Recommended maximum sizes for optimal performance.
29 * Fall back to compute or SDMA if the size is greater.
31 #define CP_DMA_COPY_PERF_THRESHOLD (64 * 1024) /* copied from Vulkan */
32 #define CP_DMA_CLEAR_PERF_THRESHOLD (32 * 1024) /* guess (clear is much slower) */
34 /* Set this if you want the ME to wait until CP DMA is done.
35 * It should be set on the last CP DMA packet. */
36 #define CP_DMA_SYNC (1 << 0)
38 /* Set this if the source data was used as a destination in a previous CP DMA
39 * packet. It's for preventing a read-after-write (RAW) hazard between two
41 #define CP_DMA_RAW_WAIT (1 << 1)
42 #define CP_DMA_DST_IS_GDS (1 << 2)
43 #define CP_DMA_CLEAR (1 << 3)
44 #define CP_DMA_PFP_SYNC_ME (1 << 4)
45 #define CP_DMA_SRC_IS_GDS (1 << 5)
47 /* The max number of bytes that can be copied per packet. */
48 static inline unsigned cp_dma_max_byte_count(struct si_context
*sctx
)
50 unsigned max
= sctx
->chip_class
>= GFX9
?
51 S_414_BYTE_COUNT_GFX9(~0u) :
52 S_414_BYTE_COUNT_GFX6(~0u);
54 /* make it aligned for optimal performance */
55 return max
& ~(SI_CPDMA_ALIGNMENT
- 1);
59 /* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
60 * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
63 static void si_emit_cp_dma(struct si_context
*sctx
, uint64_t dst_va
,
64 uint64_t src_va
, unsigned size
, unsigned flags
,
65 enum si_cache_policy cache_policy
)
67 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
68 uint32_t header
= 0, command
= 0;
70 assert(size
<= cp_dma_max_byte_count(sctx
));
71 assert(sctx
->chip_class
!= SI
|| cache_policy
== L2_BYPASS
);
73 if (sctx
->chip_class
>= GFX9
)
74 command
|= S_414_BYTE_COUNT_GFX9(size
);
76 command
|= S_414_BYTE_COUNT_GFX6(size
);
79 if (flags
& CP_DMA_SYNC
)
80 header
|= S_411_CP_SYNC(1);
82 if (sctx
->chip_class
>= GFX9
)
83 command
|= S_414_DISABLE_WR_CONFIRM_GFX9(1);
85 command
|= S_414_DISABLE_WR_CONFIRM_GFX6(1);
88 if (flags
& CP_DMA_RAW_WAIT
)
89 command
|= S_414_RAW_WAIT(1);
91 /* Src and dst flags. */
92 if (sctx
->chip_class
>= GFX9
&& !(flags
& CP_DMA_CLEAR
) &&
94 header
|= S_411_DST_SEL(V_411_NOWHERE
); /* prefetch only */
95 } else if (flags
& CP_DMA_DST_IS_GDS
) {
96 header
|= S_411_DST_SEL(V_411_GDS
);
97 /* GDS increments the address, not CP. */
98 command
|= S_414_DAS(V_414_REGISTER
) |
99 S_414_DAIC(V_414_NO_INCREMENT
);
100 } else if (sctx
->chip_class
>= CIK
&& cache_policy
!= L2_BYPASS
) {
101 header
|= S_411_DST_SEL(V_411_DST_ADDR_TC_L2
) |
102 S_500_DST_CACHE_POLICY(cache_policy
== L2_STREAM
);
105 if (flags
& CP_DMA_CLEAR
) {
106 header
|= S_411_SRC_SEL(V_411_DATA
);
107 } else if (flags
& CP_DMA_SRC_IS_GDS
) {
108 header
|= S_411_SRC_SEL(V_411_GDS
);
109 /* Both of these are required for GDS. It does increment the address. */
110 command
|= S_414_SAS(V_414_REGISTER
) |
111 S_414_SAIC(V_414_NO_INCREMENT
);
112 } else if (sctx
->chip_class
>= CIK
&& cache_policy
!= L2_BYPASS
) {
113 header
|= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2
) |
114 S_500_SRC_CACHE_POLICY(cache_policy
== L2_STREAM
);
117 if (sctx
->chip_class
>= CIK
) {
118 radeon_emit(cs
, PKT3(PKT3_DMA_DATA
, 5, 0));
119 radeon_emit(cs
, header
);
120 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
121 radeon_emit(cs
, src_va
>> 32); /* SRC_ADDR_HI [31:0] */
122 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
123 radeon_emit(cs
, dst_va
>> 32); /* DST_ADDR_HI [31:0] */
124 radeon_emit(cs
, command
);
126 header
|= S_411_SRC_ADDR_HI(src_va
>> 32);
128 radeon_emit(cs
, PKT3(PKT3_CP_DMA
, 4, 0));
129 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
130 radeon_emit(cs
, header
); /* SRC_ADDR_HI [15:0] + flags. */
131 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
132 radeon_emit(cs
, (dst_va
>> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
133 radeon_emit(cs
, command
);
136 /* CP DMA is executed in ME, but index buffers are read by PFP.
137 * This ensures that ME (CP DMA) is idle before PFP starts fetching
138 * indices. If we wanted to execute CP DMA in PFP, this packet
141 if (flags
& CP_DMA_PFP_SYNC_ME
) {
142 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
147 void si_cp_dma_wait_for_idle(struct si_context
*sctx
)
149 /* Issue a dummy DMA that copies zero bytes.
151 * The DMA engine will see that there's no work to do and skip this
152 * DMA request, however, the CP will see the sync flag and still wait
153 * for all DMAs to complete.
155 si_emit_cp_dma(sctx
, 0, 0, 0, CP_DMA_SYNC
, L2_BYPASS
);
158 static unsigned get_flush_flags(struct si_context
*sctx
, enum si_coherency coher
,
159 enum si_cache_policy cache_policy
)
163 case SI_COHERENCY_NONE
:
165 case SI_COHERENCY_SHADER
:
166 assert(sctx
->chip_class
!= SI
|| cache_policy
== L2_BYPASS
);
167 return SI_CONTEXT_INV_SMEM_L1
|
168 SI_CONTEXT_INV_VMEM_L1
|
169 (cache_policy
== L2_BYPASS
? SI_CONTEXT_INV_GLOBAL_L2
: 0);
170 case SI_COHERENCY_CB_META
:
171 assert(sctx
->chip_class
>= GFX9
? cache_policy
!= L2_BYPASS
:
172 cache_policy
== L2_BYPASS
);
173 return SI_CONTEXT_FLUSH_AND_INV_CB
;
177 static enum si_cache_policy
get_cache_policy(struct si_context
*sctx
,
178 enum si_coherency coher
)
180 if ((sctx
->chip_class
>= GFX9
&& coher
== SI_COHERENCY_CB_META
) ||
181 (sctx
->chip_class
>= CIK
&& coher
== SI_COHERENCY_SHADER
))
187 static void si_cp_dma_prepare(struct si_context
*sctx
, struct pipe_resource
*dst
,
188 struct pipe_resource
*src
, unsigned byte_count
,
189 uint64_t remaining_size
, unsigned user_flags
,
190 enum si_coherency coher
, bool *is_first
,
191 unsigned *packet_flags
)
193 /* Fast exit for a CPDMA prefetch. */
194 if ((user_flags
& SI_CPDMA_SKIP_ALL
) == SI_CPDMA_SKIP_ALL
) {
199 if (!(user_flags
& SI_CPDMA_SKIP_BO_LIST_UPDATE
)) {
200 /* Count memory usage in so that need_cs_space can take it into account. */
202 si_context_add_resource_size(sctx
, dst
);
204 si_context_add_resource_size(sctx
, src
);
207 if (!(user_flags
& SI_CPDMA_SKIP_CHECK_CS_SPACE
))
208 si_need_gfx_cs_space(sctx
);
210 /* This must be done after need_cs_space. */
211 if (!(user_flags
& SI_CPDMA_SKIP_BO_LIST_UPDATE
)) {
213 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
,
215 RADEON_USAGE_WRITE
, RADEON_PRIO_CP_DMA
);
217 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
,
219 RADEON_USAGE_READ
, RADEON_PRIO_CP_DMA
);
222 /* Flush the caches for the first copy only.
223 * Also wait for the previous CP DMA operations.
225 if (!(user_flags
& SI_CPDMA_SKIP_GFX_SYNC
) && sctx
->flags
)
226 si_emit_cache_flush(sctx
);
228 if (!(user_flags
& SI_CPDMA_SKIP_SYNC_BEFORE
) && *is_first
)
229 *packet_flags
|= CP_DMA_RAW_WAIT
;
233 /* Do the synchronization after the last dma, so that all data
234 * is written to memory.
236 if (!(user_flags
& SI_CPDMA_SKIP_SYNC_AFTER
) &&
237 byte_count
== remaining_size
) {
238 *packet_flags
|= CP_DMA_SYNC
;
240 if (coher
== SI_COHERENCY_SHADER
)
241 *packet_flags
|= CP_DMA_PFP_SYNC_ME
;
245 void si_cp_dma_clear_buffer(struct si_context
*sctx
, struct pipe_resource
*dst
,
246 uint64_t offset
, uint64_t size
, unsigned value
,
247 enum si_coherency coher
,
248 enum si_cache_policy cache_policy
)
250 struct r600_resource
*rdst
= r600_resource(dst
);
251 uint64_t va
= (rdst
? rdst
->gpu_address
: 0) + offset
;
252 bool is_first
= true;
254 assert(size
&& size
% 4 == 0);
256 /* Mark the buffer range of destination as valid (initialized),
257 * so that transfer_map knows it should wait for the GPU when mapping
260 util_range_add(&rdst
->valid_buffer_range
, offset
, offset
+ size
);
262 /* Flush the caches. */
263 sctx
->flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
|
264 SI_CONTEXT_CS_PARTIAL_FLUSH
|
265 get_flush_flags(sctx
, coher
, cache_policy
);
268 unsigned byte_count
= MIN2(size
, cp_dma_max_byte_count(sctx
));
269 unsigned dma_flags
= CP_DMA_CLEAR
| (rdst
? 0 : CP_DMA_DST_IS_GDS
);
271 si_cp_dma_prepare(sctx
, dst
, NULL
, byte_count
, size
, 0, coher
,
272 &is_first
, &dma_flags
);
274 /* Emit the clear packet. */
275 si_emit_cp_dma(sctx
, va
, value
, byte_count
, dma_flags
, cache_policy
);
281 if (rdst
&& cache_policy
!= L2_BYPASS
)
282 rdst
->TC_L2_dirty
= true;
284 /* If it's not a framebuffer fast clear... */
285 if (coher
== SI_COHERENCY_SHADER
)
286 sctx
->num_cp_dma_calls
++;
289 /* dst == NULL means GDS. */
290 void si_clear_buffer(struct si_context
*sctx
, struct pipe_resource
*dst
,
291 uint64_t offset
, uint64_t size
, unsigned value
,
292 enum si_coherency coher
)
294 struct radeon_winsys
*ws
= sctx
->ws
;
295 struct r600_resource
*rdst
= r600_resource(dst
);
296 enum si_cache_policy cache_policy
= get_cache_policy(sctx
, coher
);
297 uint64_t dma_clear_size
;
302 dma_clear_size
= size
& ~3ull;
304 /* dma_clear_buffer can use clear_buffer on failure. Make sure that
305 * doesn't happen. We don't want an infinite recursion: */
307 !(dst
->flags
& PIPE_RESOURCE_FLAG_SPARSE
) &&
309 /* CP DMA is very slow. Always use SDMA for big clears. This
310 * alone improves DeusEx:MD performance by 70%. */
311 (size
> CP_DMA_CLEAR_PERF_THRESHOLD
||
312 /* Buffers not used by the GFX IB yet will be cleared by SDMA.
313 * This happens to move most buffer clears to SDMA, including
314 * DCC and CMASK clears, because pipe->clear clears them before
315 * si_emit_framebuffer_state (in a draw call) adds them.
316 * For example, DeusEx:MD has 21 buffer clears per frame and all
317 * of them are moved to SDMA thanks to this. */
318 !ws
->cs_is_buffer_referenced(sctx
->gfx_cs
, rdst
->buf
,
319 RADEON_USAGE_READWRITE
))) {
320 si_sdma_clear_buffer(sctx
, dst
, offset
, dma_clear_size
, value
);
322 offset
+= dma_clear_size
;
323 size
-= dma_clear_size
;
324 } else if (dma_clear_size
>= 4) {
325 si_cp_dma_clear_buffer(sctx
, dst
, offset
, dma_clear_size
, value
,
326 coher
, cache_policy
);
328 offset
+= dma_clear_size
;
329 size
-= dma_clear_size
;
333 /* Handle non-dword alignment.
335 * This function is called for embedded texture metadata clears,
336 * but those should always be properly aligned. */
338 assert(dst
->target
== PIPE_BUFFER
);
341 pipe_buffer_write(&sctx
->b
, dst
, offset
, size
, &value
);
345 static void si_pipe_clear_buffer(struct pipe_context
*ctx
,
346 struct pipe_resource
*dst
,
347 unsigned offset
, unsigned size
,
348 const void *clear_value_ptr
,
349 int clear_value_size
)
351 struct si_context
*sctx
= (struct si_context
*)ctx
;
352 uint32_t dword_value
;
355 assert(offset
% clear_value_size
== 0);
356 assert(size
% clear_value_size
== 0);
358 if (clear_value_size
> 4) {
359 const uint32_t *u32
= clear_value_ptr
;
360 bool clear_dword_duplicated
= true;
362 /* See if we can lower large fills to dword fills. */
363 for (i
= 1; i
< clear_value_size
/ 4; i
++)
364 if (u32
[0] != u32
[i
]) {
365 clear_dword_duplicated
= false;
369 if (!clear_dword_duplicated
) {
370 /* Use transform feedback for 64-bit, 96-bit, and
373 union pipe_color_union clear_value
;
375 memcpy(&clear_value
, clear_value_ptr
, clear_value_size
);
376 si_blitter_begin(sctx
, SI_DISABLE_RENDER_COND
);
377 util_blitter_clear_buffer(sctx
->blitter
, dst
, offset
,
378 size
, clear_value_size
/ 4,
380 si_blitter_end(sctx
);
385 /* Expand the clear value to a dword. */
386 switch (clear_value_size
) {
388 dword_value
= *(uint8_t*)clear_value_ptr
;
389 dword_value
|= (dword_value
<< 8) |
390 (dword_value
<< 16) |
394 dword_value
= *(uint16_t*)clear_value_ptr
;
395 dword_value
|= dword_value
<< 16;
398 dword_value
= *(uint32_t*)clear_value_ptr
;
401 si_clear_buffer(sctx
, dst
, offset
, size
, dword_value
,
402 SI_COHERENCY_SHADER
);
406 * Realign the CP DMA engine. This must be done after a copy with an unaligned
409 * \param size Remaining size to the CP DMA alignment.
411 static void si_cp_dma_realign_engine(struct si_context
*sctx
, unsigned size
,
412 unsigned user_flags
, enum si_coherency coher
,
413 enum si_cache_policy cache_policy
,
417 unsigned dma_flags
= 0;
418 unsigned scratch_size
= SI_CPDMA_ALIGNMENT
* 2;
420 assert(size
< SI_CPDMA_ALIGNMENT
);
422 /* Use the scratch buffer as the dummy buffer. The 3D engine should be
423 * idle at this point.
425 if (!sctx
->scratch_buffer
||
426 sctx
->scratch_buffer
->b
.b
.width0
< scratch_size
) {
427 r600_resource_reference(&sctx
->scratch_buffer
, NULL
);
428 sctx
->scratch_buffer
=
429 si_aligned_buffer_create(&sctx
->screen
->b
,
430 SI_RESOURCE_FLAG_UNMAPPABLE
,
433 if (!sctx
->scratch_buffer
)
436 si_mark_atom_dirty(sctx
, &sctx
->atoms
.s
.scratch_state
);
439 si_cp_dma_prepare(sctx
, &sctx
->scratch_buffer
->b
.b
,
440 &sctx
->scratch_buffer
->b
.b
, size
, size
, user_flags
,
441 coher
, is_first
, &dma_flags
);
443 va
= sctx
->scratch_buffer
->gpu_address
;
444 si_emit_cp_dma(sctx
, va
, va
+ SI_CPDMA_ALIGNMENT
, size
, dma_flags
,
449 * Do memcpy between buffers using CP DMA.
450 * If src or dst is NULL, it means read or write GDS, respectively.
452 * \param user_flags bitmask of SI_CPDMA_*
454 void si_cp_dma_copy_buffer(struct si_context
*sctx
,
455 struct pipe_resource
*dst
, struct pipe_resource
*src
,
456 uint64_t dst_offset
, uint64_t src_offset
, unsigned size
,
457 unsigned user_flags
, enum si_coherency coher
,
458 enum si_cache_policy cache_policy
)
460 uint64_t main_dst_offset
, main_src_offset
;
461 unsigned skipped_size
= 0;
462 unsigned realign_size
= 0;
463 unsigned gds_flags
= (dst
? 0 : CP_DMA_DST_IS_GDS
) |
464 (src
? 0 : CP_DMA_SRC_IS_GDS
);
465 bool is_first
= true;
470 /* Skip this for the L2 prefetch. */
471 if (dst
!= src
|| dst_offset
!= src_offset
) {
472 /* Mark the buffer range of destination as valid (initialized),
473 * so that transfer_map knows it should wait for the GPU when mapping
475 util_range_add(&r600_resource(dst
)->valid_buffer_range
, dst_offset
,
479 dst_offset
+= r600_resource(dst
)->gpu_address
;
482 src_offset
+= r600_resource(src
)->gpu_address
;
484 /* The workarounds aren't needed on Fiji and beyond. */
485 if (sctx
->family
<= CHIP_CARRIZO
||
486 sctx
->family
== CHIP_STONEY
) {
487 /* If the size is not aligned, we must add a dummy copy at the end
488 * just to align the internal counter. Otherwise, the DMA engine
489 * would slow down by an order of magnitude for following copies.
491 if (size
% SI_CPDMA_ALIGNMENT
)
492 realign_size
= SI_CPDMA_ALIGNMENT
- (size
% SI_CPDMA_ALIGNMENT
);
494 /* If the copy begins unaligned, we must start copying from the next
495 * aligned block and the skipped part should be copied after everything
496 * else has been copied. Only the src alignment matters, not dst.
498 * GDS doesn't need the source address to be aligned.
500 if (src
&& src_offset
% SI_CPDMA_ALIGNMENT
) {
501 skipped_size
= SI_CPDMA_ALIGNMENT
- (src_offset
% SI_CPDMA_ALIGNMENT
);
502 /* The main part will be skipped if the size is too small. */
503 skipped_size
= MIN2(skipped_size
, size
);
504 size
-= skipped_size
;
508 /* Flush the caches. */
509 if ((dst
|| src
) && !(user_flags
& SI_CPDMA_SKIP_GFX_SYNC
)) {
510 sctx
->flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
|
511 SI_CONTEXT_CS_PARTIAL_FLUSH
|
512 get_flush_flags(sctx
, coher
, cache_policy
);
515 /* This is the main part doing the copying. Src is always aligned. */
516 main_dst_offset
= dst_offset
+ skipped_size
;
517 main_src_offset
= src_offset
+ skipped_size
;
520 unsigned byte_count
= MIN2(size
, cp_dma_max_byte_count(sctx
));
521 unsigned dma_flags
= gds_flags
;
523 si_cp_dma_prepare(sctx
, dst
, src
, byte_count
,
524 size
+ skipped_size
+ realign_size
,
525 user_flags
, coher
, &is_first
, &dma_flags
);
527 si_emit_cp_dma(sctx
, main_dst_offset
, main_src_offset
,
528 byte_count
, dma_flags
, cache_policy
);
531 main_src_offset
+= byte_count
;
532 main_dst_offset
+= byte_count
;
535 /* Copy the part we skipped because src wasn't aligned. */
537 unsigned dma_flags
= gds_flags
;
539 si_cp_dma_prepare(sctx
, dst
, src
, skipped_size
,
540 skipped_size
+ realign_size
, user_flags
,
541 coher
, &is_first
, &dma_flags
);
543 si_emit_cp_dma(sctx
, dst_offset
, src_offset
, skipped_size
,
544 dma_flags
, cache_policy
);
547 /* Finally, realign the engine if the size wasn't aligned. */
549 si_cp_dma_realign_engine(sctx
, realign_size
, user_flags
, coher
,
550 cache_policy
, &is_first
);
554 void si_copy_buffer(struct si_context
*sctx
,
555 struct pipe_resource
*dst
, struct pipe_resource
*src
,
556 uint64_t dst_offset
, uint64_t src_offset
, unsigned size
)
558 enum si_coherency coher
= SI_COHERENCY_SHADER
;
559 enum si_cache_policy cache_policy
= get_cache_policy(sctx
, coher
);
564 si_cp_dma_copy_buffer(sctx
, dst
, src
, dst_offset
, src_offset
, size
,
565 0, coher
, cache_policy
);
567 if (cache_policy
!= L2_BYPASS
)
568 r600_resource(dst
)->TC_L2_dirty
= true;
570 /* If it's not a prefetch... */
571 if (dst_offset
!= src_offset
)
572 sctx
->num_cp_dma_calls
++;
575 void cik_prefetch_TC_L2_async(struct si_context
*sctx
, struct pipe_resource
*buf
,
576 uint64_t offset
, unsigned size
)
578 assert(sctx
->chip_class
>= CIK
);
580 si_cp_dma_copy_buffer(sctx
, buf
, buf
, offset
, offset
, size
,
581 SI_CPDMA_SKIP_ALL
, SI_COHERENCY_SHADER
, L2_LRU
);
584 static void cik_prefetch_shader_async(struct si_context
*sctx
,
585 struct si_pm4_state
*state
)
587 struct pipe_resource
*bo
= &state
->bo
[0]->b
.b
;
588 assert(state
->nbo
== 1);
590 cik_prefetch_TC_L2_async(sctx
, bo
, 0, bo
->width0
);
593 static void cik_prefetch_VBO_descriptors(struct si_context
*sctx
)
595 if (!sctx
->vertex_elements
|| !sctx
->vertex_elements
->desc_list_byte_size
)
598 cik_prefetch_TC_L2_async(sctx
, &sctx
->vb_descriptors_buffer
->b
.b
,
599 sctx
->vb_descriptors_offset
,
600 sctx
->vertex_elements
->desc_list_byte_size
);
604 * Prefetch shaders and VBO descriptors.
606 * \param vertex_stage_only Whether only the the API VS and VBO descriptors
607 * should be prefetched.
609 void cik_emit_prefetch_L2(struct si_context
*sctx
, bool vertex_stage_only
)
611 unsigned mask
= sctx
->prefetch_L2_mask
;
614 /* Prefetch shaders and VBO descriptors to TC L2. */
615 if (sctx
->chip_class
>= GFX9
) {
616 /* Choose the right spot for the VBO prefetch. */
617 if (sctx
->tes_shader
.cso
) {
618 if (mask
& SI_PREFETCH_HS
)
619 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.hs
);
620 if (mask
& SI_PREFETCH_VBO_DESCRIPTORS
)
621 cik_prefetch_VBO_descriptors(sctx
);
622 if (vertex_stage_only
) {
623 sctx
->prefetch_L2_mask
&= ~(SI_PREFETCH_HS
|
624 SI_PREFETCH_VBO_DESCRIPTORS
);
628 if (mask
& SI_PREFETCH_GS
)
629 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.gs
);
630 if (mask
& SI_PREFETCH_VS
)
631 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
632 } else if (sctx
->gs_shader
.cso
) {
633 if (mask
& SI_PREFETCH_GS
)
634 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.gs
);
635 if (mask
& SI_PREFETCH_VBO_DESCRIPTORS
)
636 cik_prefetch_VBO_descriptors(sctx
);
637 if (vertex_stage_only
) {
638 sctx
->prefetch_L2_mask
&= ~(SI_PREFETCH_GS
|
639 SI_PREFETCH_VBO_DESCRIPTORS
);
643 if (mask
& SI_PREFETCH_VS
)
644 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
646 if (mask
& SI_PREFETCH_VS
)
647 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
648 if (mask
& SI_PREFETCH_VBO_DESCRIPTORS
)
649 cik_prefetch_VBO_descriptors(sctx
);
650 if (vertex_stage_only
) {
651 sctx
->prefetch_L2_mask
&= ~(SI_PREFETCH_VS
|
652 SI_PREFETCH_VBO_DESCRIPTORS
);
658 /* Choose the right spot for the VBO prefetch. */
659 if (sctx
->tes_shader
.cso
) {
660 if (mask
& SI_PREFETCH_LS
)
661 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.ls
);
662 if (mask
& SI_PREFETCH_VBO_DESCRIPTORS
)
663 cik_prefetch_VBO_descriptors(sctx
);
664 if (vertex_stage_only
) {
665 sctx
->prefetch_L2_mask
&= ~(SI_PREFETCH_LS
|
666 SI_PREFETCH_VBO_DESCRIPTORS
);
670 if (mask
& SI_PREFETCH_HS
)
671 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.hs
);
672 if (mask
& SI_PREFETCH_ES
)
673 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.es
);
674 if (mask
& SI_PREFETCH_GS
)
675 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.gs
);
676 if (mask
& SI_PREFETCH_VS
)
677 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
678 } else if (sctx
->gs_shader
.cso
) {
679 if (mask
& SI_PREFETCH_ES
)
680 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.es
);
681 if (mask
& SI_PREFETCH_VBO_DESCRIPTORS
)
682 cik_prefetch_VBO_descriptors(sctx
);
683 if (vertex_stage_only
) {
684 sctx
->prefetch_L2_mask
&= ~(SI_PREFETCH_ES
|
685 SI_PREFETCH_VBO_DESCRIPTORS
);
689 if (mask
& SI_PREFETCH_GS
)
690 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.gs
);
691 if (mask
& SI_PREFETCH_VS
)
692 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
694 if (mask
& SI_PREFETCH_VS
)
695 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
696 if (mask
& SI_PREFETCH_VBO_DESCRIPTORS
)
697 cik_prefetch_VBO_descriptors(sctx
);
698 if (vertex_stage_only
) {
699 sctx
->prefetch_L2_mask
&= ~(SI_PREFETCH_VS
|
700 SI_PREFETCH_VBO_DESCRIPTORS
);
706 if (mask
& SI_PREFETCH_PS
)
707 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.ps
);
709 sctx
->prefetch_L2_mask
= 0;
712 void si_test_gds(struct si_context
*sctx
)
714 struct pipe_context
*ctx
= &sctx
->b
;
715 struct pipe_resource
*src
, *dst
;
717 unsigned offset
= debug_get_num_option("OFFSET", 16);
719 src
= pipe_buffer_create(ctx
->screen
, 0, PIPE_USAGE_DEFAULT
, 16);
720 dst
= pipe_buffer_create(ctx
->screen
, 0, PIPE_USAGE_DEFAULT
, 16);
721 si_cp_dma_clear_buffer(sctx
, src
, 0, 4, 0xabcdef01, SI_COHERENCY_SHADER
, L2_BYPASS
);
722 si_cp_dma_clear_buffer(sctx
, src
, 4, 4, 0x23456789, SI_COHERENCY_SHADER
, L2_BYPASS
);
723 si_cp_dma_clear_buffer(sctx
, src
, 8, 4, 0x87654321, SI_COHERENCY_SHADER
, L2_BYPASS
);
724 si_cp_dma_clear_buffer(sctx
, src
, 12, 4, 0xfedcba98, SI_COHERENCY_SHADER
, L2_BYPASS
);
725 si_cp_dma_clear_buffer(sctx
, dst
, 0, 16, 0xdeadbeef, SI_COHERENCY_SHADER
, L2_BYPASS
);
727 si_cp_dma_copy_buffer(sctx
, NULL
, src
, offset
, 0, 16, 0, SI_COHERENCY_NONE
, L2_BYPASS
);
728 si_cp_dma_copy_buffer(sctx
, dst
, NULL
, 0, offset
, 16, 0, SI_COHERENCY_NONE
, L2_BYPASS
);
730 pipe_buffer_read(ctx
, dst
, 0, sizeof(r
), r
);
731 printf("GDS copy = %08x %08x %08x %08x -> %s\n", r
[0], r
[1], r
[2], r
[3],
732 r
[0] == 0xabcdef01 && r
[1] == 0x23456789 &&
733 r
[2] == 0x87654321 && r
[3] == 0xfedcba98 ? "pass" : "fail");
735 si_cp_dma_clear_buffer(sctx
, NULL
, offset
, 16, 0xc1ea4146, SI_COHERENCY_NONE
, L2_BYPASS
);
736 si_cp_dma_copy_buffer(sctx
, dst
, NULL
, 0, offset
, 16, 0, SI_COHERENCY_NONE
, L2_BYPASS
);
738 pipe_buffer_read(ctx
, dst
, 0, sizeof(r
), r
);
739 printf("GDS clear = %08x %08x %08x %08x -> %s\n", r
[0], r
[1], r
[2], r
[3],
740 r
[0] == 0xc1ea4146 && r
[1] == 0xc1ea4146 &&
741 r
[2] == 0xc1ea4146 && r
[3] == 0xc1ea4146 ? "pass" : "fail");
743 pipe_resource_reference(&src
, NULL
);
744 pipe_resource_reference(&dst
, NULL
);
748 void si_init_cp_dma_functions(struct si_context
*sctx
)
750 sctx
->b
.clear_buffer
= si_pipe_clear_buffer
;