2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 /* Recommended maximum sizes for optimal performance.
29 * Fall back to compute or SDMA if the size is greater.
31 #define CP_DMA_COPY_PERF_THRESHOLD (64 * 1024) /* copied from Vulkan */
32 #define CP_DMA_CLEAR_PERF_THRESHOLD (32 * 1024) /* guess (clear is much slower) */
34 /* Set this if you want the ME to wait until CP DMA is done.
35 * It should be set on the last CP DMA packet. */
36 #define CP_DMA_SYNC (1 << 0)
38 /* Set this if the source data was used as a destination in a previous CP DMA
39 * packet. It's for preventing a read-after-write (RAW) hazard between two
41 #define CP_DMA_RAW_WAIT (1 << 1)
42 #define CP_DMA_USE_L2 (1 << 2) /* CIK+ */
43 #define CP_DMA_CLEAR (1 << 3)
45 /* The max number of bytes that can be copied per packet. */
46 static inline unsigned cp_dma_max_byte_count(struct si_context
*sctx
)
48 unsigned max
= sctx
->b
.chip_class
>= GFX9
?
49 S_414_BYTE_COUNT_GFX9(~0u) :
50 S_414_BYTE_COUNT_GFX6(~0u);
52 /* make it aligned for optimal performance */
53 return max
& ~(SI_CPDMA_ALIGNMENT
- 1);
57 /* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
58 * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
61 static void si_emit_cp_dma(struct si_context
*sctx
, uint64_t dst_va
,
62 uint64_t src_va
, unsigned size
, unsigned flags
,
63 enum si_coherency coher
)
65 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx_cs
;
66 uint32_t header
= 0, command
= 0;
69 assert(size
<= cp_dma_max_byte_count(sctx
));
71 if (sctx
->b
.chip_class
>= GFX9
)
72 command
|= S_414_BYTE_COUNT_GFX9(size
);
74 command
|= S_414_BYTE_COUNT_GFX6(size
);
77 if (flags
& CP_DMA_SYNC
)
78 header
|= S_411_CP_SYNC(1);
80 if (sctx
->b
.chip_class
>= GFX9
)
81 command
|= S_414_DISABLE_WR_CONFIRM_GFX9(1);
83 command
|= S_414_DISABLE_WR_CONFIRM_GFX6(1);
86 if (flags
& CP_DMA_RAW_WAIT
)
87 command
|= S_414_RAW_WAIT(1);
89 /* Src and dst flags. */
90 if (sctx
->b
.chip_class
>= GFX9
&& !(flags
& CP_DMA_CLEAR
) &&
92 header
|= S_411_DSL_SEL(V_411_NOWHERE
); /* prefetch only */
93 else if (flags
& CP_DMA_USE_L2
)
94 header
|= S_411_DSL_SEL(V_411_DST_ADDR_TC_L2
);
96 if (flags
& CP_DMA_CLEAR
)
97 header
|= S_411_SRC_SEL(V_411_DATA
);
98 else if (flags
& CP_DMA_USE_L2
)
99 header
|= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2
);
101 if (sctx
->b
.chip_class
>= CIK
) {
102 radeon_emit(cs
, PKT3(PKT3_DMA_DATA
, 5, 0));
103 radeon_emit(cs
, header
);
104 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
105 radeon_emit(cs
, src_va
>> 32); /* SRC_ADDR_HI [31:0] */
106 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
107 radeon_emit(cs
, dst_va
>> 32); /* DST_ADDR_HI [31:0] */
108 radeon_emit(cs
, command
);
110 header
|= S_411_SRC_ADDR_HI(src_va
>> 32);
112 radeon_emit(cs
, PKT3(PKT3_CP_DMA
, 4, 0));
113 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
114 radeon_emit(cs
, header
); /* SRC_ADDR_HI [15:0] + flags. */
115 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
116 radeon_emit(cs
, (dst_va
>> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
117 radeon_emit(cs
, command
);
120 /* CP DMA is executed in ME, but index buffers are read by PFP.
121 * This ensures that ME (CP DMA) is idle before PFP starts fetching
122 * indices. If we wanted to execute CP DMA in PFP, this packet
125 if (coher
== SI_COHERENCY_SHADER
&& flags
& CP_DMA_SYNC
) {
126 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
131 static unsigned get_flush_flags(struct si_context
*sctx
, enum si_coherency coher
)
135 case SI_COHERENCY_NONE
:
137 case SI_COHERENCY_SHADER
:
138 return SI_CONTEXT_INV_SMEM_L1
|
139 SI_CONTEXT_INV_VMEM_L1
|
140 (sctx
->b
.chip_class
== SI
? SI_CONTEXT_INV_GLOBAL_L2
: 0);
141 case SI_COHERENCY_CB_META
:
142 return SI_CONTEXT_FLUSH_AND_INV_CB
;
146 static unsigned get_tc_l2_flag(struct si_context
*sctx
, enum si_coherency coher
)
148 if ((sctx
->b
.chip_class
>= GFX9
&& coher
== SI_COHERENCY_CB_META
) ||
149 (sctx
->b
.chip_class
>= CIK
&& coher
== SI_COHERENCY_SHADER
))
150 return CP_DMA_USE_L2
;
155 static void si_cp_dma_prepare(struct si_context
*sctx
, struct pipe_resource
*dst
,
156 struct pipe_resource
*src
, unsigned byte_count
,
157 uint64_t remaining_size
, unsigned user_flags
,
158 bool *is_first
, unsigned *packet_flags
)
160 /* Fast exit for a CPDMA prefetch. */
161 if ((user_flags
& SI_CPDMA_SKIP_ALL
) == SI_CPDMA_SKIP_ALL
) {
166 if (!(user_flags
& SI_CPDMA_SKIP_BO_LIST_UPDATE
)) {
167 /* Count memory usage in so that need_cs_space can take it into account. */
168 si_context_add_resource_size(sctx
, dst
);
170 si_context_add_resource_size(sctx
, src
);
173 if (!(user_flags
& SI_CPDMA_SKIP_CHECK_CS_SPACE
))
174 si_need_gfx_cs_space(sctx
);
176 /* This must be done after need_cs_space. */
177 if (!(user_flags
& SI_CPDMA_SKIP_BO_LIST_UPDATE
)) {
178 radeon_add_to_buffer_list(sctx
, sctx
->b
.gfx_cs
,
179 (struct r600_resource
*)dst
,
180 RADEON_USAGE_WRITE
, RADEON_PRIO_CP_DMA
);
182 radeon_add_to_buffer_list(sctx
, sctx
->b
.gfx_cs
,
183 (struct r600_resource
*)src
,
184 RADEON_USAGE_READ
, RADEON_PRIO_CP_DMA
);
187 /* Flush the caches for the first copy only.
188 * Also wait for the previous CP DMA operations.
190 if (!(user_flags
& SI_CPDMA_SKIP_GFX_SYNC
) && sctx
->b
.flags
)
191 si_emit_cache_flush(sctx
);
193 if (!(user_flags
& SI_CPDMA_SKIP_SYNC_BEFORE
) && *is_first
)
194 *packet_flags
|= CP_DMA_RAW_WAIT
;
198 /* Do the synchronization after the last dma, so that all data
199 * is written to memory.
201 if (!(user_flags
& SI_CPDMA_SKIP_SYNC_AFTER
) &&
202 byte_count
== remaining_size
)
203 *packet_flags
|= CP_DMA_SYNC
;
206 void si_clear_buffer(struct si_context
*sctx
, struct pipe_resource
*dst
,
207 uint64_t offset
, uint64_t size
, unsigned value
,
208 enum si_coherency coher
)
210 struct radeon_winsys
*ws
= sctx
->b
.ws
;
211 struct r600_resource
*rdst
= r600_resource(dst
);
212 unsigned tc_l2_flag
= get_tc_l2_flag(sctx
, coher
);
213 unsigned flush_flags
= get_flush_flags(sctx
, coher
);
214 uint64_t dma_clear_size
;
215 bool is_first
= true;
220 dma_clear_size
= size
& ~3ull;
222 /* Mark the buffer range of destination as valid (initialized),
223 * so that transfer_map knows it should wait for the GPU when mapping
225 util_range_add(&rdst
->valid_buffer_range
, offset
,
226 offset
+ dma_clear_size
);
228 /* dma_clear_buffer can use clear_buffer on failure. Make sure that
229 * doesn't happen. We don't want an infinite recursion: */
230 if (sctx
->b
.dma_cs
&&
231 !(dst
->flags
& PIPE_RESOURCE_FLAG_SPARSE
) &&
233 /* CP DMA is very slow. Always use SDMA for big clears. This
234 * alone improves DeusEx:MD performance by 70%. */
235 (size
> CP_DMA_CLEAR_PERF_THRESHOLD
||
236 /* Buffers not used by the GFX IB yet will be cleared by SDMA.
237 * This happens to move most buffer clears to SDMA, including
238 * DCC and CMASK clears, because pipe->clear clears them before
239 * si_emit_framebuffer_state (in a draw call) adds them.
240 * For example, DeusEx:MD has 21 buffer clears per frame and all
241 * of them are moved to SDMA thanks to this. */
242 !ws
->cs_is_buffer_referenced(sctx
->b
.gfx_cs
, rdst
->buf
,
243 RADEON_USAGE_READWRITE
))) {
244 sctx
->b
.dma_clear_buffer(sctx
, dst
, offset
, dma_clear_size
, value
);
246 offset
+= dma_clear_size
;
247 size
-= dma_clear_size
;
248 } else if (dma_clear_size
>= 4) {
249 uint64_t va
= rdst
->gpu_address
+ offset
;
251 offset
+= dma_clear_size
;
252 size
-= dma_clear_size
;
254 /* Flush the caches. */
255 sctx
->b
.flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
|
256 SI_CONTEXT_CS_PARTIAL_FLUSH
| flush_flags
;
258 while (dma_clear_size
) {
259 unsigned byte_count
= MIN2(dma_clear_size
, cp_dma_max_byte_count(sctx
));
260 unsigned dma_flags
= tc_l2_flag
| CP_DMA_CLEAR
;
262 si_cp_dma_prepare(sctx
, dst
, NULL
, byte_count
, dma_clear_size
, 0,
263 &is_first
, &dma_flags
);
265 /* Emit the clear packet. */
266 si_emit_cp_dma(sctx
, va
, value
, byte_count
, dma_flags
, coher
);
268 dma_clear_size
-= byte_count
;
273 rdst
->TC_L2_dirty
= true;
275 /* If it's not a framebuffer fast clear... */
276 if (coher
== SI_COHERENCY_SHADER
)
277 sctx
->b
.num_cp_dma_calls
++;
281 /* Handle non-dword alignment.
283 * This function is called for embedded texture metadata clears,
284 * but those should always be properly aligned. */
285 assert(dst
->target
== PIPE_BUFFER
);
288 pipe_buffer_write(&sctx
->b
.b
, dst
, offset
, size
, &value
);
292 static void si_pipe_clear_buffer(struct pipe_context
*ctx
,
293 struct pipe_resource
*dst
,
294 unsigned offset
, unsigned size
,
295 const void *clear_value_ptr
,
296 int clear_value_size
)
298 struct si_context
*sctx
= (struct si_context
*)ctx
;
299 uint32_t dword_value
;
302 assert(offset
% clear_value_size
== 0);
303 assert(size
% clear_value_size
== 0);
305 if (clear_value_size
> 4) {
306 const uint32_t *u32
= clear_value_ptr
;
307 bool clear_dword_duplicated
= true;
309 /* See if we can lower large fills to dword fills. */
310 for (i
= 1; i
< clear_value_size
/ 4; i
++)
311 if (u32
[0] != u32
[i
]) {
312 clear_dword_duplicated
= false;
316 if (!clear_dword_duplicated
) {
317 /* Use transform feedback for 64-bit, 96-bit, and
320 union pipe_color_union clear_value
;
322 memcpy(&clear_value
, clear_value_ptr
, clear_value_size
);
323 si_blitter_begin(sctx
, SI_DISABLE_RENDER_COND
);
324 util_blitter_clear_buffer(sctx
->blitter
, dst
, offset
,
325 size
, clear_value_size
/ 4,
327 si_blitter_end(sctx
);
332 /* Expand the clear value to a dword. */
333 switch (clear_value_size
) {
335 dword_value
= *(uint8_t*)clear_value_ptr
;
336 dword_value
|= (dword_value
<< 8) |
337 (dword_value
<< 16) |
341 dword_value
= *(uint16_t*)clear_value_ptr
;
342 dword_value
|= dword_value
<< 16;
345 dword_value
= *(uint32_t*)clear_value_ptr
;
348 si_clear_buffer(sctx
, dst
, offset
, size
, dword_value
,
349 SI_COHERENCY_SHADER
);
353 * Realign the CP DMA engine. This must be done after a copy with an unaligned
356 * \param size Remaining size to the CP DMA alignment.
358 static void si_cp_dma_realign_engine(struct si_context
*sctx
, unsigned size
,
359 unsigned user_flags
, bool *is_first
)
362 unsigned dma_flags
= 0;
363 unsigned scratch_size
= SI_CPDMA_ALIGNMENT
* 2;
365 assert(size
< SI_CPDMA_ALIGNMENT
);
367 /* Use the scratch buffer as the dummy buffer. The 3D engine should be
368 * idle at this point.
370 if (!sctx
->scratch_buffer
||
371 sctx
->scratch_buffer
->b
.b
.width0
< scratch_size
) {
372 r600_resource_reference(&sctx
->scratch_buffer
, NULL
);
373 sctx
->scratch_buffer
= (struct r600_resource
*)
374 si_aligned_buffer_create(&sctx
->screen
->b
,
375 SI_RESOURCE_FLAG_UNMAPPABLE
,
378 if (!sctx
->scratch_buffer
)
381 si_mark_atom_dirty(sctx
, &sctx
->scratch_state
);
384 si_cp_dma_prepare(sctx
, &sctx
->scratch_buffer
->b
.b
,
385 &sctx
->scratch_buffer
->b
.b
, size
, size
, user_flags
,
386 is_first
, &dma_flags
);
388 va
= sctx
->scratch_buffer
->gpu_address
;
389 si_emit_cp_dma(sctx
, va
, va
+ SI_CPDMA_ALIGNMENT
, size
, dma_flags
,
390 SI_COHERENCY_SHADER
);
394 * Do memcpy between buffers using CP DMA.
396 * \param user_flags bitmask of SI_CPDMA_*
398 void si_copy_buffer(struct si_context
*sctx
,
399 struct pipe_resource
*dst
, struct pipe_resource
*src
,
400 uint64_t dst_offset
, uint64_t src_offset
, unsigned size
,
403 uint64_t main_dst_offset
, main_src_offset
;
404 unsigned skipped_size
= 0;
405 unsigned realign_size
= 0;
406 unsigned tc_l2_flag
= get_tc_l2_flag(sctx
, SI_COHERENCY_SHADER
);
407 unsigned flush_flags
= get_flush_flags(sctx
, SI_COHERENCY_SHADER
);
408 bool is_first
= true;
413 if (dst
!= src
|| dst_offset
!= src_offset
) {
414 /* Mark the buffer range of destination as valid (initialized),
415 * so that transfer_map knows it should wait for the GPU when mapping
417 util_range_add(&r600_resource(dst
)->valid_buffer_range
, dst_offset
,
421 dst_offset
+= r600_resource(dst
)->gpu_address
;
422 src_offset
+= r600_resource(src
)->gpu_address
;
424 /* The workarounds aren't needed on Fiji and beyond. */
425 if (sctx
->b
.family
<= CHIP_CARRIZO
||
426 sctx
->b
.family
== CHIP_STONEY
) {
427 /* If the size is not aligned, we must add a dummy copy at the end
428 * just to align the internal counter. Otherwise, the DMA engine
429 * would slow down by an order of magnitude for following copies.
431 if (size
% SI_CPDMA_ALIGNMENT
)
432 realign_size
= SI_CPDMA_ALIGNMENT
- (size
% SI_CPDMA_ALIGNMENT
);
434 /* If the copy begins unaligned, we must start copying from the next
435 * aligned block and the skipped part should be copied after everything
436 * else has been copied. Only the src alignment matters, not dst.
438 if (src_offset
% SI_CPDMA_ALIGNMENT
) {
439 skipped_size
= SI_CPDMA_ALIGNMENT
- (src_offset
% SI_CPDMA_ALIGNMENT
);
440 /* The main part will be skipped if the size is too small. */
441 skipped_size
= MIN2(skipped_size
, size
);
442 size
-= skipped_size
;
446 /* Flush the caches. */
447 if (!(user_flags
& SI_CPDMA_SKIP_GFX_SYNC
))
448 sctx
->b
.flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
|
449 SI_CONTEXT_CS_PARTIAL_FLUSH
| flush_flags
;
451 /* This is the main part doing the copying. Src is always aligned. */
452 main_dst_offset
= dst_offset
+ skipped_size
;
453 main_src_offset
= src_offset
+ skipped_size
;
456 unsigned dma_flags
= tc_l2_flag
;
457 unsigned byte_count
= MIN2(size
, cp_dma_max_byte_count(sctx
));
459 si_cp_dma_prepare(sctx
, dst
, src
, byte_count
,
460 size
+ skipped_size
+ realign_size
,
461 user_flags
, &is_first
, &dma_flags
);
463 si_emit_cp_dma(sctx
, main_dst_offset
, main_src_offset
,
464 byte_count
, dma_flags
, SI_COHERENCY_SHADER
);
467 main_src_offset
+= byte_count
;
468 main_dst_offset
+= byte_count
;
471 /* Copy the part we skipped because src wasn't aligned. */
473 unsigned dma_flags
= tc_l2_flag
;
475 si_cp_dma_prepare(sctx
, dst
, src
, skipped_size
,
476 skipped_size
+ realign_size
, user_flags
,
477 &is_first
, &dma_flags
);
479 si_emit_cp_dma(sctx
, dst_offset
, src_offset
, skipped_size
,
480 dma_flags
, SI_COHERENCY_SHADER
);
483 /* Finally, realign the engine if the size wasn't aligned. */
485 si_cp_dma_realign_engine(sctx
, realign_size
, user_flags
,
489 r600_resource(dst
)->TC_L2_dirty
= true;
491 /* If it's not a prefetch... */
492 if (dst_offset
!= src_offset
)
493 sctx
->b
.num_cp_dma_calls
++;
496 void cik_prefetch_TC_L2_async(struct si_context
*sctx
, struct pipe_resource
*buf
,
497 uint64_t offset
, unsigned size
)
499 assert(sctx
->b
.chip_class
>= CIK
);
501 si_copy_buffer(sctx
, buf
, buf
, offset
, offset
, size
, SI_CPDMA_SKIP_ALL
);
504 static void cik_prefetch_shader_async(struct si_context
*sctx
,
505 struct si_pm4_state
*state
)
507 struct pipe_resource
*bo
= &state
->bo
[0]->b
.b
;
508 assert(state
->nbo
== 1);
510 cik_prefetch_TC_L2_async(sctx
, bo
, 0, bo
->width0
);
513 static void cik_prefetch_VBO_descriptors(struct si_context
*sctx
)
515 if (!sctx
->vertex_elements
)
518 cik_prefetch_TC_L2_async(sctx
, &sctx
->vb_descriptors_buffer
->b
.b
,
519 sctx
->vb_descriptors_offset
,
520 sctx
->vertex_elements
->desc_list_byte_size
);
523 void cik_emit_prefetch_L2(struct si_context
*sctx
)
525 /* Prefetch shaders and VBO descriptors to TC L2. */
526 if (sctx
->b
.chip_class
>= GFX9
) {
527 /* Choose the right spot for the VBO prefetch. */
528 if (sctx
->tes_shader
.cso
) {
529 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_HS
)
530 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.hs
);
531 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_VBO_DESCRIPTORS
)
532 cik_prefetch_VBO_descriptors(sctx
);
533 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_GS
)
534 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.gs
);
535 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_VS
)
536 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
537 } else if (sctx
->gs_shader
.cso
) {
538 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_GS
)
539 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.gs
);
540 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_VBO_DESCRIPTORS
)
541 cik_prefetch_VBO_descriptors(sctx
);
542 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_VS
)
543 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
545 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_VS
)
546 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
547 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_VBO_DESCRIPTORS
)
548 cik_prefetch_VBO_descriptors(sctx
);
552 /* Choose the right spot for the VBO prefetch. */
553 if (sctx
->tes_shader
.cso
) {
554 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_LS
)
555 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.ls
);
556 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_VBO_DESCRIPTORS
)
557 cik_prefetch_VBO_descriptors(sctx
);
558 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_HS
)
559 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.hs
);
560 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_ES
)
561 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.es
);
562 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_GS
)
563 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.gs
);
564 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_VS
)
565 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
566 } else if (sctx
->gs_shader
.cso
) {
567 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_ES
)
568 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.es
);
569 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_VBO_DESCRIPTORS
)
570 cik_prefetch_VBO_descriptors(sctx
);
571 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_GS
)
572 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.gs
);
573 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_VS
)
574 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
576 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_VS
)
577 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
578 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_VBO_DESCRIPTORS
)
579 cik_prefetch_VBO_descriptors(sctx
);
583 if (sctx
->prefetch_L2_mask
& SI_PREFETCH_PS
)
584 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.ps
);
586 sctx
->prefetch_L2_mask
= 0;
589 void si_init_cp_dma_functions(struct si_context
*sctx
)
591 sctx
->b
.b
.clear_buffer
= si_pipe_clear_buffer
;