2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "r600_pipe.h"
28 #include "util/u_memory.h"
33 void r600_need_cs_space(struct r600_context
*ctx
, unsigned num_dw
,
34 boolean count_draw_in
)
36 struct radeon_winsys_cs
*dma
= ctx
->b
.dma
.cs
;
38 /* Flush the DMA IB if it's not empty. */
40 ctx
->b
.dma
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
42 if (!ctx
->b
.ws
->cs_memory_below_limit(ctx
->b
.gfx
.cs
, ctx
->b
.vram
, ctx
->b
.gtt
)) {
45 ctx
->b
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
48 /* all will be accounted once relocation are emited */
52 /* The number of dwords we already used in the CS so far. */
53 num_dw
+= ctx
->b
.gfx
.cs
->cdw
;
58 /* The number of dwords all the dirty states would take. */
59 mask
= ctx
->dirty_atoms
;
61 num_dw
+= ctx
->atoms
[u_bit_scan64(&mask
)]->num_dw
;
63 /* The upper-bound of how much space a draw command would take. */
64 num_dw
+= R600_MAX_FLUSH_CS_DWORDS
+ R600_MAX_DRAW_CS_DWORDS
;
67 /* Count in queries_suspend. */
68 num_dw
+= ctx
->b
.num_cs_dw_queries_suspend
;
70 /* Count in streamout_end at the end of CS. */
71 if (ctx
->b
.streamout
.begin_emitted
) {
72 num_dw
+= ctx
->b
.streamout
.num_dw_for_end
;
76 if (ctx
->b
.chip_class
== R600
) {
80 /* Count in framebuffer cache flushes at the end of CS. */
81 num_dw
+= R600_MAX_FLUSH_CS_DWORDS
;
83 /* The fence at the end of CS. */
86 /* Flush if there's not enough space. */
87 if (num_dw
> ctx
->b
.gfx
.cs
->max_dw
) {
88 ctx
->b
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
92 void r600_flush_emit(struct r600_context
*rctx
)
94 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
95 unsigned cp_coher_cntl
= 0;
96 unsigned wait_until
= 0;
102 if (rctx
->b
.flags
& R600_CONTEXT_WAIT_3D_IDLE
) {
103 wait_until
|= S_008040_WAIT_3D_IDLE(1);
105 if (rctx
->b
.flags
& R600_CONTEXT_WAIT_CP_DMA_IDLE
) {
106 wait_until
|= S_008040_WAIT_CP_DMA_IDLE(1);
110 /* Use of WAIT_UNTIL is deprecated on Cayman+ */
111 if (rctx
->b
.family
>= CHIP_CAYMAN
) {
112 /* emit a PS partial flush on Cayman/TN */
113 rctx
->b
.flags
|= R600_CONTEXT_PS_PARTIAL_FLUSH
;
117 if (rctx
->b
.flags
& R600_CONTEXT_PS_PARTIAL_FLUSH
) {
118 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
119 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4);
122 if (rctx
->b
.chip_class
>= R700
&&
123 (rctx
->b
.flags
& R600_CONTEXT_FLUSH_AND_INV_CB_META
)) {
124 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
125 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0);
128 if (rctx
->b
.chip_class
>= R700
&&
129 (rctx
->b
.flags
& R600_CONTEXT_FLUSH_AND_INV_DB_META
)) {
130 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
131 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0);
133 /* Set FULL_CACHE_ENA for DB META flushes on r7xx and later.
135 * This hack predates use of FLUSH_AND_INV_DB_META, so it's
136 * unclear whether it's still needed or even whether it has
139 cp_coher_cntl
|= S_0085F0_FULL_CACHE_ENA(1);
142 if (rctx
->b
.flags
& R600_CONTEXT_FLUSH_AND_INV
||
143 (rctx
->b
.chip_class
== R600
&& rctx
->b
.flags
& R600_CONTEXT_STREAMOUT_FLUSH
)) {
144 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
145 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT
) | EVENT_INDEX(0);
148 if (rctx
->b
.flags
& R600_CONTEXT_INV_CONST_CACHE
) {
149 /* Direct constant addressing uses the shader cache.
150 * Indirect contant addressing uses the vertex cache. */
151 cp_coher_cntl
|= S_0085F0_SH_ACTION_ENA(1) |
152 (rctx
->has_vertex_cache
? S_0085F0_VC_ACTION_ENA(1)
153 : S_0085F0_TC_ACTION_ENA(1));
155 if (rctx
->b
.flags
& R600_CONTEXT_INV_VERTEX_CACHE
) {
156 cp_coher_cntl
|= rctx
->has_vertex_cache
? S_0085F0_VC_ACTION_ENA(1)
157 : S_0085F0_TC_ACTION_ENA(1);
159 if (rctx
->b
.flags
& R600_CONTEXT_INV_TEX_CACHE
) {
160 /* Textures use the texture cache.
161 * Texture buffer objects use the vertex cache. */
162 cp_coher_cntl
|= S_0085F0_TC_ACTION_ENA(1) |
163 (rctx
->has_vertex_cache
? S_0085F0_VC_ACTION_ENA(1) : 0);
166 /* Don't use the DB CP COHER logic on r6xx.
169 if (rctx
->b
.chip_class
>= R700
&&
170 (rctx
->b
.flags
& R600_CONTEXT_FLUSH_AND_INV_DB
)) {
171 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) |
172 S_0085F0_DB_DEST_BASE_ENA(1) |
173 S_0085F0_SMX_ACTION_ENA(1);
176 /* Don't use the CB CP COHER logic on r6xx.
179 if (rctx
->b
.chip_class
>= R700
&&
180 (rctx
->b
.flags
& R600_CONTEXT_FLUSH_AND_INV_CB
)) {
181 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) |
182 S_0085F0_CB0_DEST_BASE_ENA(1) |
183 S_0085F0_CB1_DEST_BASE_ENA(1) |
184 S_0085F0_CB2_DEST_BASE_ENA(1) |
185 S_0085F0_CB3_DEST_BASE_ENA(1) |
186 S_0085F0_CB4_DEST_BASE_ENA(1) |
187 S_0085F0_CB5_DEST_BASE_ENA(1) |
188 S_0085F0_CB6_DEST_BASE_ENA(1) |
189 S_0085F0_CB7_DEST_BASE_ENA(1) |
190 S_0085F0_SMX_ACTION_ENA(1);
191 if (rctx
->b
.chip_class
>= EVERGREEN
)
192 cp_coher_cntl
|= S_0085F0_CB8_DEST_BASE_ENA(1) |
193 S_0085F0_CB9_DEST_BASE_ENA(1) |
194 S_0085F0_CB10_DEST_BASE_ENA(1) |
195 S_0085F0_CB11_DEST_BASE_ENA(1);
198 if (rctx
->b
.chip_class
>= R700
&&
199 rctx
->b
.flags
& R600_CONTEXT_STREAMOUT_FLUSH
) {
200 cp_coher_cntl
|= S_0085F0_SO0_DEST_BASE_ENA(1) |
201 S_0085F0_SO1_DEST_BASE_ENA(1) |
202 S_0085F0_SO2_DEST_BASE_ENA(1) |
203 S_0085F0_SO3_DEST_BASE_ENA(1) |
204 S_0085F0_SMX_ACTION_ENA(1);
207 /* Workaround for buggy flushing on some R6xx chipsets. */
208 if ((rctx
->b
.flags
& (R600_CONTEXT_FLUSH_AND_INV
|
209 R600_CONTEXT_STREAMOUT_FLUSH
)) &&
210 (rctx
->b
.family
== CHIP_RV670
||
211 rctx
->b
.family
== CHIP_RS780
||
212 rctx
->b
.family
== CHIP_RS880
)) {
213 cp_coher_cntl
|= S_0085F0_CB1_DEST_BASE_ENA(1) |
214 S_0085F0_DEST_BASE_0_ENA(1);
218 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SURFACE_SYNC
, 3, 0);
219 cs
->buf
[cs
->cdw
++] = cp_coher_cntl
; /* CP_COHER_CNTL */
220 cs
->buf
[cs
->cdw
++] = 0xffffffff; /* CP_COHER_SIZE */
221 cs
->buf
[cs
->cdw
++] = 0; /* CP_COHER_BASE */
222 cs
->buf
[cs
->cdw
++] = 0x0000000A; /* POLL_INTERVAL */
225 if (rctx
->b
.flags
& R600_CONTEXT_START_PIPELINE_STATS
) {
226 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
227 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_START
) |
229 } else if (rctx
->b
.flags
& R600_CONTEXT_STOP_PIPELINE_STATS
) {
230 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
231 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_STOP
) |
236 /* Use of WAIT_UNTIL is deprecated on Cayman+ */
237 if (rctx
->b
.family
< CHIP_CAYMAN
) {
238 /* wait for things to settle */
239 radeon_set_config_reg(cs
, R_008040_WAIT_UNTIL
, wait_until
);
243 /* everything is properly flushed */
247 void r600_context_gfx_flush(void *context
, unsigned flags
,
248 struct pipe_fence_handle
**fence
)
250 struct r600_context
*ctx
= context
;
251 struct radeon_winsys_cs
*cs
= ctx
->b
.gfx
.cs
;
253 if (cs
->cdw
== ctx
->b
.initial_gfx_cs_size
&& !fence
)
256 r600_preflush_suspend_features(&ctx
->b
);
258 /* flush the framebuffer cache */
259 ctx
->b
.flags
|= R600_CONTEXT_FLUSH_AND_INV
|
260 R600_CONTEXT_FLUSH_AND_INV_CB
|
261 R600_CONTEXT_FLUSH_AND_INV_DB
|
262 R600_CONTEXT_FLUSH_AND_INV_CB_META
|
263 R600_CONTEXT_FLUSH_AND_INV_DB_META
|
264 R600_CONTEXT_WAIT_3D_IDLE
|
265 R600_CONTEXT_WAIT_CP_DMA_IDLE
;
267 r600_flush_emit(ctx
);
269 /* old kernels and userspace don't set SX_MISC, so we must reset it to 0 here */
270 if (ctx
->b
.chip_class
== R600
) {
271 radeon_set_context_reg(cs
, R_028350_SX_MISC
, 0);
274 /* force to keep tiling flags */
275 flags
|= RADEON_FLUSH_KEEP_TILING_FLAGS
;
278 ctx
->b
.ws
->cs_flush(cs
, flags
, fence
);
280 r600_begin_new_cs(ctx
);
283 void r600_begin_new_cs(struct r600_context
*ctx
)
291 /* Begin a new CS. */
292 r600_emit_command_buffer(ctx
->b
.gfx
.cs
, &ctx
->start_cs_cmd
);
294 /* Re-emit states. */
295 r600_mark_atom_dirty(ctx
, &ctx
->alphatest_state
.atom
);
296 r600_mark_atom_dirty(ctx
, &ctx
->blend_color
.atom
);
297 r600_mark_atom_dirty(ctx
, &ctx
->cb_misc_state
.atom
);
298 r600_mark_atom_dirty(ctx
, &ctx
->clip_misc_state
.atom
);
299 r600_mark_atom_dirty(ctx
, &ctx
->clip_state
.atom
);
300 r600_mark_atom_dirty(ctx
, &ctx
->db_misc_state
.atom
);
301 r600_mark_atom_dirty(ctx
, &ctx
->db_state
.atom
);
302 r600_mark_atom_dirty(ctx
, &ctx
->framebuffer
.atom
);
303 r600_mark_atom_dirty(ctx
, &ctx
->hw_shader_stages
[R600_HW_STAGE_PS
].atom
);
304 r600_mark_atom_dirty(ctx
, &ctx
->poly_offset_state
.atom
);
305 r600_mark_atom_dirty(ctx
, &ctx
->vgt_state
.atom
);
306 r600_mark_atom_dirty(ctx
, &ctx
->sample_mask
.atom
);
307 ctx
->scissor
.dirty_mask
= (1 << R600_MAX_VIEWPORTS
) - 1;
308 ctx
->scissor
.atom
.num_dw
= R600_MAX_VIEWPORTS
* 4;
309 r600_mark_atom_dirty(ctx
, &ctx
->scissor
.atom
);
310 ctx
->viewport
.dirty_mask
= (1 << R600_MAX_VIEWPORTS
) - 1;
311 ctx
->viewport
.atom
.num_dw
= R600_MAX_VIEWPORTS
* 8;
312 r600_mark_atom_dirty(ctx
, &ctx
->viewport
.atom
);
313 if (ctx
->b
.chip_class
<= EVERGREEN
) {
314 r600_mark_atom_dirty(ctx
, &ctx
->config_state
.atom
);
316 r600_mark_atom_dirty(ctx
, &ctx
->stencil_ref
.atom
);
317 r600_mark_atom_dirty(ctx
, &ctx
->vertex_fetch_shader
.atom
);
318 r600_mark_atom_dirty(ctx
, &ctx
->hw_shader_stages
[R600_HW_STAGE_ES
].atom
);
319 r600_mark_atom_dirty(ctx
, &ctx
->shader_stages
.atom
);
320 if (ctx
->gs_shader
) {
321 r600_mark_atom_dirty(ctx
, &ctx
->hw_shader_stages
[R600_HW_STAGE_GS
].atom
);
322 r600_mark_atom_dirty(ctx
, &ctx
->gs_rings
.atom
);
324 if (ctx
->tes_shader
) {
325 r600_mark_atom_dirty(ctx
, &ctx
->hw_shader_stages
[EG_HW_STAGE_HS
].atom
);
326 r600_mark_atom_dirty(ctx
, &ctx
->hw_shader_stages
[EG_HW_STAGE_LS
].atom
);
328 r600_mark_atom_dirty(ctx
, &ctx
->hw_shader_stages
[R600_HW_STAGE_VS
].atom
);
329 r600_mark_atom_dirty(ctx
, &ctx
->b
.streamout
.enable_atom
);
330 r600_mark_atom_dirty(ctx
, &ctx
->b
.render_cond_atom
);
332 if (ctx
->blend_state
.cso
)
333 r600_mark_atom_dirty(ctx
, &ctx
->blend_state
.atom
);
334 if (ctx
->dsa_state
.cso
)
335 r600_mark_atom_dirty(ctx
, &ctx
->dsa_state
.atom
);
336 if (ctx
->rasterizer_state
.cso
)
337 r600_mark_atom_dirty(ctx
, &ctx
->rasterizer_state
.atom
);
339 if (ctx
->b
.chip_class
<= R700
) {
340 r600_mark_atom_dirty(ctx
, &ctx
->seamless_cube_map
.atom
);
343 ctx
->vertex_buffer_state
.dirty_mask
= ctx
->vertex_buffer_state
.enabled_mask
;
344 r600_vertex_buffers_dirty(ctx
);
346 /* Re-emit shader resources. */
347 for (shader
= 0; shader
< PIPE_SHADER_TYPES
; shader
++) {
348 struct r600_constbuf_state
*constbuf
= &ctx
->constbuf_state
[shader
];
349 struct r600_textures_info
*samplers
= &ctx
->samplers
[shader
];
351 constbuf
->dirty_mask
= constbuf
->enabled_mask
;
352 samplers
->views
.dirty_mask
= samplers
->views
.enabled_mask
;
353 samplers
->states
.dirty_mask
= samplers
->states
.enabled_mask
;
355 r600_constant_buffers_dirty(ctx
, constbuf
);
356 r600_sampler_views_dirty(ctx
, &samplers
->views
);
357 r600_sampler_states_dirty(ctx
, &samplers
->states
);
360 r600_postflush_resume_features(&ctx
->b
);
362 /* Re-emit the draw state. */
363 ctx
->last_primitive_type
= -1;
364 ctx
->last_start_instance
= -1;
366 ctx
->b
.initial_gfx_cs_size
= ctx
->b
.gfx
.cs
->cdw
;
369 /* The max number of bytes to copy per packet. */
370 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
372 void r600_cp_dma_copy_buffer(struct r600_context
*rctx
,
373 struct pipe_resource
*dst
, uint64_t dst_offset
,
374 struct pipe_resource
*src
, uint64_t src_offset
,
377 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
380 assert(rctx
->screen
->b
.has_cp_dma
);
382 /* Mark the buffer range of destination as valid (initialized),
383 * so that transfer_map knows it should wait for the GPU when mapping
385 util_range_add(&r600_resource(dst
)->valid_buffer_range
, dst_offset
,
388 dst_offset
+= r600_resource(dst
)->gpu_address
;
389 src_offset
+= r600_resource(src
)->gpu_address
;
391 /* Flush the caches where the resources are bound. */
392 rctx
->b
.flags
|= R600_CONTEXT_INV_CONST_CACHE
|
393 R600_CONTEXT_INV_VERTEX_CACHE
|
394 R600_CONTEXT_INV_TEX_CACHE
|
395 R600_CONTEXT_FLUSH_AND_INV
|
396 R600_CONTEXT_FLUSH_AND_INV_CB
|
397 R600_CONTEXT_FLUSH_AND_INV_DB
|
398 R600_CONTEXT_FLUSH_AND_INV_CB_META
|
399 R600_CONTEXT_FLUSH_AND_INV_DB_META
|
400 R600_CONTEXT_STREAMOUT_FLUSH
|
401 R600_CONTEXT_WAIT_3D_IDLE
;
403 /* There are differences between R700 and EG in CP DMA,
404 * but we only use the common bits here. */
407 unsigned byte_count
= MIN2(size
, CP_DMA_MAX_BYTE_COUNT
);
408 unsigned src_reloc
, dst_reloc
;
410 r600_need_cs_space(rctx
, 10 + (rctx
->b
.flags
? R600_MAX_FLUSH_CS_DWORDS
: 0), FALSE
);
412 /* Flush the caches for the first copy only. */
414 r600_flush_emit(rctx
);
417 /* Do the synchronization after the last copy, so that all data is written to memory. */
418 if (size
== byte_count
) {
419 sync
= PKT3_CP_DMA_CP_SYNC
;
422 /* This must be done after r600_need_cs_space. */
423 src_reloc
= radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, (struct r600_resource
*)src
,
424 RADEON_USAGE_READ
, RADEON_PRIO_CP_DMA
);
425 dst_reloc
= radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, (struct r600_resource
*)dst
,
426 RADEON_USAGE_WRITE
, RADEON_PRIO_CP_DMA
);
428 radeon_emit(cs
, PKT3(PKT3_CP_DMA
, 4, 0));
429 radeon_emit(cs
, src_offset
); /* SRC_ADDR_LO [31:0] */
430 radeon_emit(cs
, sync
| ((src_offset
>> 32) & 0xff)); /* CP_SYNC [31] | SRC_ADDR_HI [7:0] */
431 radeon_emit(cs
, dst_offset
); /* DST_ADDR_LO [31:0] */
432 radeon_emit(cs
, (dst_offset
>> 32) & 0xff); /* DST_ADDR_HI [7:0] */
433 radeon_emit(cs
, byte_count
); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
435 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
436 radeon_emit(cs
, src_reloc
);
437 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
438 radeon_emit(cs
, dst_reloc
);
441 src_offset
+= byte_count
;
442 dst_offset
+= byte_count
;
445 /* Invalidate the read caches. */
446 rctx
->b
.flags
|= R600_CONTEXT_INV_CONST_CACHE
|
447 R600_CONTEXT_INV_VERTEX_CACHE
|
448 R600_CONTEXT_INV_TEX_CACHE
;
451 void r600_dma_copy_buffer(struct r600_context
*rctx
,
452 struct pipe_resource
*dst
,
453 struct pipe_resource
*src
,
458 struct radeon_winsys_cs
*cs
= rctx
->b
.dma
.cs
;
459 unsigned i
, ncopy
, csize
;
460 struct r600_resource
*rdst
= (struct r600_resource
*)dst
;
461 struct r600_resource
*rsrc
= (struct r600_resource
*)src
;
463 /* Mark the buffer range of destination as valid (initialized),
464 * so that transfer_map knows it should wait for the GPU when mapping
466 util_range_add(&rdst
->valid_buffer_range
, dst_offset
,
469 size
>>= 2; /* convert to dwords */
470 ncopy
= (size
/ R600_DMA_COPY_MAX_SIZE_DW
) + !!(size
% R600_DMA_COPY_MAX_SIZE_DW
);
472 r600_need_dma_space(&rctx
->b
, ncopy
* 5);
473 for (i
= 0; i
< ncopy
; i
++) {
474 csize
= size
< R600_DMA_COPY_MAX_SIZE_DW
? size
: R600_DMA_COPY_MAX_SIZE_DW
;
475 /* emit reloc before writing cs so that cs is always in consistent state */
476 radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.dma
, rsrc
, RADEON_USAGE_READ
,
477 RADEON_PRIO_SDMA_BUFFER
);
478 radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.dma
, rdst
, RADEON_USAGE_WRITE
,
479 RADEON_PRIO_SDMA_BUFFER
);
480 cs
->buf
[cs
->cdw
++] = DMA_PACKET(DMA_PACKET_COPY
, 0, 0, csize
);
481 cs
->buf
[cs
->cdw
++] = dst_offset
& 0xfffffffc;
482 cs
->buf
[cs
->cdw
++] = src_offset
& 0xfffffffc;
483 cs
->buf
[cs
->cdw
++] = (dst_offset
>> 32UL) & 0xff;
484 cs
->buf
[cs
->cdw
++] = (src_offset
>> 32UL) & 0xff;
485 dst_offset
+= csize
<< 2;
486 src_offset
+= csize
<< 2;