radeonsi: disable SDMA clears and copies for sparse buffers
[mesa.git] / src / gallium / drivers / radeonsi / cik_sdma.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014,2015 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jerome Glisse
26 */
27
28 #include "sid.h"
29 #include "si_pipe.h"
30
31 static void cik_sdma_copy_buffer(struct si_context *ctx,
32 struct pipe_resource *dst,
33 struct pipe_resource *src,
34 uint64_t dst_offset,
35 uint64_t src_offset,
36 uint64_t size)
37 {
38 struct radeon_winsys_cs *cs = ctx->b.dma.cs;
39 unsigned i, ncopy, csize;
40 struct r600_resource *rdst = r600_resource(dst);
41 struct r600_resource *rsrc = r600_resource(src);
42
43 /* Mark the buffer range of destination as valid (initialized),
44 * so that transfer_map knows it should wait for the GPU when mapping
45 * that range. */
46 util_range_add(&rdst->valid_buffer_range, dst_offset,
47 dst_offset + size);
48
49 dst_offset += rdst->gpu_address;
50 src_offset += rsrc->gpu_address;
51
52 ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
53 r600_need_dma_space(&ctx->b, ncopy * 7, rdst, rsrc);
54
55 for (i = 0; i < ncopy; i++) {
56 csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE);
57 radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
58 CIK_SDMA_COPY_SUB_OPCODE_LINEAR,
59 0));
60 radeon_emit(cs, ctx->b.chip_class >= GFX9 ? csize - 1 : csize);
61 radeon_emit(cs, 0); /* src/dst endian swap */
62 radeon_emit(cs, src_offset);
63 radeon_emit(cs, src_offset >> 32);
64 radeon_emit(cs, dst_offset);
65 radeon_emit(cs, dst_offset >> 32);
66 dst_offset += csize;
67 src_offset += csize;
68 size -= csize;
69 }
70 }
71
72 static void cik_sdma_clear_buffer(struct pipe_context *ctx,
73 struct pipe_resource *dst,
74 uint64_t offset,
75 uint64_t size,
76 unsigned clear_value)
77 {
78 struct si_context *sctx = (struct si_context *)ctx;
79 struct radeon_winsys_cs *cs = sctx->b.dma.cs;
80 unsigned i, ncopy, csize;
81 struct r600_resource *rdst = r600_resource(dst);
82
83 if (!cs || offset % 4 != 0 || size % 4 != 0 ||
84 dst->flags & PIPE_RESOURCE_FLAG_SPARSE) {
85 ctx->clear_buffer(ctx, dst, offset, size, &clear_value, 4);
86 return;
87 }
88
89 /* Mark the buffer range of destination as valid (initialized),
90 * so that transfer_map knows it should wait for the GPU when mapping
91 * that range. */
92 util_range_add(&rdst->valid_buffer_range, offset, offset + size);
93
94 offset += rdst->gpu_address;
95
96 /* the same maximum size as for copying */
97 ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
98 r600_need_dma_space(&sctx->b, ncopy * 5, rdst, NULL);
99
100 for (i = 0; i < ncopy; i++) {
101 csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE);
102 radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_PACKET_CONSTANT_FILL, 0,
103 0x8000 /* dword copy */));
104 radeon_emit(cs, offset);
105 radeon_emit(cs, offset >> 32);
106 radeon_emit(cs, clear_value);
107 radeon_emit(cs, sctx->b.chip_class >= GFX9 ? csize - 1 : csize);
108 offset += csize;
109 size -= csize;
110 }
111 }
112
113 static unsigned minify_as_blocks(unsigned width, unsigned level, unsigned blk_w)
114 {
115 width = u_minify(width, level);
116 return DIV_ROUND_UP(width, blk_w);
117 }
118
119 static unsigned encode_tile_info(struct si_context *sctx,
120 struct r600_texture *tex, unsigned level,
121 bool set_bpp)
122 {
123 struct radeon_info *info = &sctx->screen->b.info;
124 unsigned tile_index = tex->surface.u.legacy.tiling_index[level];
125 unsigned macro_tile_index = tex->surface.u.legacy.macro_tile_index;
126 unsigned tile_mode = info->si_tile_mode_array[tile_index];
127 unsigned macro_tile_mode = info->cik_macrotile_mode_array[macro_tile_index];
128
129 return (set_bpp ? util_logbase2(tex->surface.bpe) : 0) |
130 (G_009910_ARRAY_MODE(tile_mode) << 3) |
131 (G_009910_MICRO_TILE_MODE_NEW(tile_mode) << 8) |
132 /* Non-depth modes don't have TILE_SPLIT set. */
133 ((util_logbase2(tex->surface.u.legacy.tile_split >> 6)) << 11) |
134 (G_009990_BANK_WIDTH(macro_tile_mode) << 15) |
135 (G_009990_BANK_HEIGHT(macro_tile_mode) << 18) |
136 (G_009990_NUM_BANKS(macro_tile_mode) << 21) |
137 (G_009990_MACRO_TILE_ASPECT(macro_tile_mode) << 24) |
138 (G_009910_PIPE_CONFIG(tile_mode) << 26);
139 }
140
141 static bool cik_sdma_copy_texture(struct si_context *sctx,
142 struct pipe_resource *dst,
143 unsigned dst_level,
144 unsigned dstx, unsigned dsty, unsigned dstz,
145 struct pipe_resource *src,
146 unsigned src_level,
147 const struct pipe_box *src_box)
148 {
149 struct radeon_info *info = &sctx->screen->b.info;
150 struct r600_texture *rsrc = (struct r600_texture*)src;
151 struct r600_texture *rdst = (struct r600_texture*)dst;
152 unsigned bpp = rdst->surface.bpe;
153 uint64_t dst_address = rdst->resource.gpu_address +
154 rdst->surface.u.legacy.level[dst_level].offset;
155 uint64_t src_address = rsrc->resource.gpu_address +
156 rsrc->surface.u.legacy.level[src_level].offset;
157 unsigned dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
158 unsigned src_mode = rsrc->surface.u.legacy.level[src_level].mode;
159 unsigned dst_tile_index = rdst->surface.u.legacy.tiling_index[dst_level];
160 unsigned src_tile_index = rsrc->surface.u.legacy.tiling_index[src_level];
161 unsigned dst_tile_mode = info->si_tile_mode_array[dst_tile_index];
162 unsigned src_tile_mode = info->si_tile_mode_array[src_tile_index];
163 unsigned dst_micro_mode = G_009910_MICRO_TILE_MODE_NEW(dst_tile_mode);
164 unsigned src_micro_mode = G_009910_MICRO_TILE_MODE_NEW(src_tile_mode);
165 unsigned dst_pitch = rdst->surface.u.legacy.level[dst_level].nblk_x;
166 unsigned src_pitch = rsrc->surface.u.legacy.level[src_level].nblk_x;
167 uint64_t dst_slice_pitch = rdst->surface.u.legacy.level[dst_level].slice_size / bpp;
168 uint64_t src_slice_pitch = rsrc->surface.u.legacy.level[src_level].slice_size / bpp;
169 unsigned dst_width = minify_as_blocks(rdst->resource.b.b.width0,
170 dst_level, rdst->surface.blk_w);
171 unsigned src_width = minify_as_blocks(rsrc->resource.b.b.width0,
172 src_level, rsrc->surface.blk_w);
173 unsigned dst_height = minify_as_blocks(rdst->resource.b.b.height0,
174 dst_level, rdst->surface.blk_h);
175 unsigned src_height = minify_as_blocks(rsrc->resource.b.b.height0,
176 src_level, rsrc->surface.blk_h);
177 unsigned srcx = src_box->x / rsrc->surface.blk_w;
178 unsigned srcy = src_box->y / rsrc->surface.blk_h;
179 unsigned srcz = src_box->z;
180 unsigned copy_width = DIV_ROUND_UP(src_box->width, rsrc->surface.blk_w);
181 unsigned copy_height = DIV_ROUND_UP(src_box->height, rsrc->surface.blk_h);
182 unsigned copy_depth = src_box->depth;
183
184 assert(src_level <= src->last_level);
185 assert(dst_level <= dst->last_level);
186 assert(rdst->surface.u.legacy.level[dst_level].offset +
187 dst_slice_pitch * bpp * (dstz + src_box->depth) <=
188 rdst->resource.buf->size);
189 assert(rsrc->surface.u.legacy.level[src_level].offset +
190 src_slice_pitch * bpp * (srcz + src_box->depth) <=
191 rsrc->resource.buf->size);
192
193 if (!r600_prepare_for_dma_blit(&sctx->b, rdst, dst_level, dstx, dsty,
194 dstz, rsrc, src_level, src_box))
195 return false;
196
197 dstx /= rdst->surface.blk_w;
198 dsty /= rdst->surface.blk_h;
199
200 if (srcx >= (1 << 14) ||
201 srcy >= (1 << 14) ||
202 srcz >= (1 << 11) ||
203 dstx >= (1 << 14) ||
204 dsty >= (1 << 14) ||
205 dstz >= (1 << 11))
206 return false;
207
208 /* Linear -> linear sub-window copy. */
209 if (dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED &&
210 src_mode == RADEON_SURF_MODE_LINEAR_ALIGNED &&
211 /* check if everything fits into the bitfields */
212 src_pitch <= (1 << 14) &&
213 dst_pitch <= (1 << 14) &&
214 src_slice_pitch <= (1 << 28) &&
215 dst_slice_pitch <= (1 << 28) &&
216 copy_width <= (1 << 14) &&
217 copy_height <= (1 << 14) &&
218 copy_depth <= (1 << 11) &&
219 /* HW limitation - CIK: */
220 (sctx->b.chip_class != CIK ||
221 (copy_width < (1 << 14) &&
222 copy_height < (1 << 14) &&
223 copy_depth < (1 << 11))) &&
224 /* HW limitation - some CIK parts: */
225 ((sctx->b.family != CHIP_BONAIRE &&
226 sctx->b.family != CHIP_KAVERI) ||
227 (srcx + copy_width != (1 << 14) &&
228 srcy + copy_height != (1 << 14)))) {
229 struct radeon_winsys_cs *cs = sctx->b.dma.cs;
230
231 r600_need_dma_space(&sctx->b, 13, &rdst->resource, &rsrc->resource);
232
233 radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
234 CIK_SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW, 0) |
235 (util_logbase2(bpp) << 29));
236 radeon_emit(cs, src_address);
237 radeon_emit(cs, src_address >> 32);
238 radeon_emit(cs, srcx | (srcy << 16));
239 radeon_emit(cs, srcz | ((src_pitch - 1) << 16));
240 radeon_emit(cs, src_slice_pitch - 1);
241 radeon_emit(cs, dst_address);
242 radeon_emit(cs, dst_address >> 32);
243 radeon_emit(cs, dstx | (dsty << 16));
244 radeon_emit(cs, dstz | ((dst_pitch - 1) << 16));
245 radeon_emit(cs, dst_slice_pitch - 1);
246 if (sctx->b.chip_class == CIK) {
247 radeon_emit(cs, copy_width | (copy_height << 16));
248 radeon_emit(cs, copy_depth);
249 } else {
250 radeon_emit(cs, (copy_width - 1) | ((copy_height - 1) << 16));
251 radeon_emit(cs, (copy_depth - 1));
252 }
253 return true;
254 }
255
256 /* Tiled <-> linear sub-window copy. */
257 if ((src_mode >= RADEON_SURF_MODE_1D) != (dst_mode >= RADEON_SURF_MODE_1D)) {
258 struct r600_texture *tiled = src_mode >= RADEON_SURF_MODE_1D ? rsrc : rdst;
259 struct r600_texture *linear = tiled == rsrc ? rdst : rsrc;
260 unsigned tiled_level = tiled == rsrc ? src_level : dst_level;
261 unsigned linear_level = linear == rsrc ? src_level : dst_level;
262 unsigned tiled_x = tiled == rsrc ? srcx : dstx;
263 unsigned linear_x = linear == rsrc ? srcx : dstx;
264 unsigned tiled_y = tiled == rsrc ? srcy : dsty;
265 unsigned linear_y = linear == rsrc ? srcy : dsty;
266 unsigned tiled_z = tiled == rsrc ? srcz : dstz;
267 unsigned linear_z = linear == rsrc ? srcz : dstz;
268 unsigned tiled_width = tiled == rsrc ? src_width : dst_width;
269 unsigned linear_width = linear == rsrc ? src_width : dst_width;
270 unsigned tiled_pitch = tiled == rsrc ? src_pitch : dst_pitch;
271 unsigned linear_pitch = linear == rsrc ? src_pitch : dst_pitch;
272 unsigned tiled_slice_pitch = tiled == rsrc ? src_slice_pitch : dst_slice_pitch;
273 unsigned linear_slice_pitch = linear == rsrc ? src_slice_pitch : dst_slice_pitch;
274 uint64_t tiled_address = tiled == rsrc ? src_address : dst_address;
275 uint64_t linear_address = linear == rsrc ? src_address : dst_address;
276 unsigned tiled_micro_mode = tiled == rsrc ? src_micro_mode : dst_micro_mode;
277
278 assert(tiled_pitch % 8 == 0);
279 assert(tiled_slice_pitch % 64 == 0);
280 unsigned pitch_tile_max = tiled_pitch / 8 - 1;
281 unsigned slice_tile_max = tiled_slice_pitch / 64 - 1;
282 unsigned xalign = MAX2(1, 4 / bpp);
283 unsigned copy_width_aligned = copy_width;
284
285 /* If the region ends at the last pixel and is unaligned, we
286 * can copy the remainder of the line that is not visible to
287 * make it aligned.
288 */
289 if (copy_width % xalign != 0 &&
290 linear_x + copy_width == linear_width &&
291 tiled_x + copy_width == tiled_width &&
292 linear_x + align(copy_width, xalign) <= linear_pitch &&
293 tiled_x + align(copy_width, xalign) <= tiled_pitch)
294 copy_width_aligned = align(copy_width, xalign);
295
296 /* HW limitations. */
297 if ((sctx->b.family == CHIP_BONAIRE ||
298 sctx->b.family == CHIP_KAVERI) &&
299 linear_pitch - 1 == 0x3fff &&
300 bpp == 16)
301 return false;
302
303 if (sctx->b.chip_class == CIK &&
304 (copy_width_aligned == (1 << 14) ||
305 copy_height == (1 << 14) ||
306 copy_depth == (1 << 11)))
307 return false;
308
309 if ((sctx->b.family == CHIP_BONAIRE ||
310 sctx->b.family == CHIP_KAVERI ||
311 sctx->b.family == CHIP_KABINI ||
312 sctx->b.family == CHIP_MULLINS) &&
313 (tiled_x + copy_width == (1 << 14) ||
314 tiled_y + copy_height == (1 << 14)))
315 return false;
316
317 /* The hw can read outside of the given linear buffer bounds,
318 * or access those pages but not touch the memory in case
319 * of writes. (it still causes a VM fault)
320 *
321 * Out-of-bounds memory access or page directory access must
322 * be prevented.
323 */
324 int64_t start_linear_address, end_linear_address;
325 unsigned granularity;
326
327 /* Deduce the size of reads from the linear surface. */
328 switch (tiled_micro_mode) {
329 case V_009910_ADDR_SURF_DISPLAY_MICRO_TILING:
330 granularity = bpp == 1 ? 64 / (8*bpp) :
331 128 / (8*bpp);
332 break;
333 case V_009910_ADDR_SURF_THIN_MICRO_TILING:
334 case V_009910_ADDR_SURF_DEPTH_MICRO_TILING:
335 if (0 /* TODO: THICK microtiling */)
336 granularity = bpp == 1 ? 32 / (8*bpp) :
337 bpp == 2 ? 64 / (8*bpp) :
338 bpp <= 8 ? 128 / (8*bpp) :
339 256 / (8*bpp);
340 else
341 granularity = bpp <= 2 ? 64 / (8*bpp) :
342 bpp <= 8 ? 128 / (8*bpp) :
343 256 / (8*bpp);
344 break;
345 default:
346 return false;
347 }
348
349 /* The linear reads start at tiled_x & ~(granularity - 1).
350 * If linear_x == 0 && tiled_x % granularity != 0, the hw
351 * starts reading from an address preceding linear_address!!!
352 */
353 start_linear_address =
354 linear->surface.u.legacy.level[linear_level].offset +
355 bpp * (linear_z * linear_slice_pitch +
356 linear_y * linear_pitch +
357 linear_x);
358 start_linear_address -= (int)(bpp * (tiled_x % granularity));
359
360 end_linear_address =
361 linear->surface.u.legacy.level[linear_level].offset +
362 bpp * ((linear_z + copy_depth - 1) * linear_slice_pitch +
363 (linear_y + copy_height - 1) * linear_pitch +
364 (linear_x + copy_width));
365
366 if ((tiled_x + copy_width) % granularity)
367 end_linear_address += granularity -
368 (tiled_x + copy_width) % granularity;
369
370 if (start_linear_address < 0 ||
371 end_linear_address > linear->surface.surf_size)
372 return false;
373
374 /* Check requirements. */
375 if (tiled_address % 256 == 0 &&
376 linear_address % 4 == 0 &&
377 linear_pitch % xalign == 0 &&
378 linear_x % xalign == 0 &&
379 tiled_x % xalign == 0 &&
380 copy_width_aligned % xalign == 0 &&
381 tiled_micro_mode != V_009910_ADDR_SURF_ROTATED_MICRO_TILING &&
382 /* check if everything fits into the bitfields */
383 tiled->surface.u.legacy.tile_split <= 4096 &&
384 pitch_tile_max < (1 << 11) &&
385 slice_tile_max < (1 << 22) &&
386 linear_pitch <= (1 << 14) &&
387 linear_slice_pitch <= (1 << 28) &&
388 copy_width_aligned <= (1 << 14) &&
389 copy_height <= (1 << 14) &&
390 copy_depth <= (1 << 11)) {
391 struct radeon_winsys_cs *cs = sctx->b.dma.cs;
392 uint32_t direction = linear == rdst ? 1u << 31 : 0;
393
394 r600_need_dma_space(&sctx->b, 14, &rdst->resource, &rsrc->resource);
395
396 radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
397 CIK_SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW, 0) |
398 direction);
399 radeon_emit(cs, tiled_address);
400 radeon_emit(cs, tiled_address >> 32);
401 radeon_emit(cs, tiled_x | (tiled_y << 16));
402 radeon_emit(cs, tiled_z | (pitch_tile_max << 16));
403 radeon_emit(cs, slice_tile_max);
404 radeon_emit(cs, encode_tile_info(sctx, tiled, tiled_level, true));
405 radeon_emit(cs, linear_address);
406 radeon_emit(cs, linear_address >> 32);
407 radeon_emit(cs, linear_x | (linear_y << 16));
408 radeon_emit(cs, linear_z | ((linear_pitch - 1) << 16));
409 radeon_emit(cs, linear_slice_pitch - 1);
410 if (sctx->b.chip_class == CIK) {
411 radeon_emit(cs, copy_width_aligned | (copy_height << 16));
412 radeon_emit(cs, copy_depth);
413 } else {
414 radeon_emit(cs, (copy_width_aligned - 1) | ((copy_height - 1) << 16));
415 radeon_emit(cs, (copy_depth - 1));
416 }
417 return true;
418 }
419 }
420
421 /* Tiled -> Tiled sub-window copy. */
422 if (dst_mode >= RADEON_SURF_MODE_1D &&
423 src_mode >= RADEON_SURF_MODE_1D &&
424 /* check if these fit into the bitfields */
425 src_address % 256 == 0 &&
426 dst_address % 256 == 0 &&
427 rsrc->surface.u.legacy.tile_split <= 4096 &&
428 rdst->surface.u.legacy.tile_split <= 4096 &&
429 dstx % 8 == 0 &&
430 dsty % 8 == 0 &&
431 srcx % 8 == 0 &&
432 srcy % 8 == 0 &&
433 /* this can either be equal, or display->rotated (VI+ only) */
434 (src_micro_mode == dst_micro_mode ||
435 (sctx->b.chip_class >= VI &&
436 src_micro_mode == V_009910_ADDR_SURF_DISPLAY_MICRO_TILING &&
437 dst_micro_mode == V_009910_ADDR_SURF_ROTATED_MICRO_TILING))) {
438 assert(src_pitch % 8 == 0);
439 assert(dst_pitch % 8 == 0);
440 assert(src_slice_pitch % 64 == 0);
441 assert(dst_slice_pitch % 64 == 0);
442 unsigned src_pitch_tile_max = src_pitch / 8 - 1;
443 unsigned dst_pitch_tile_max = dst_pitch / 8 - 1;
444 unsigned src_slice_tile_max = src_slice_pitch / 64 - 1;
445 unsigned dst_slice_tile_max = dst_slice_pitch / 64 - 1;
446 unsigned copy_width_aligned = copy_width;
447 unsigned copy_height_aligned = copy_height;
448
449 /* If the region ends at the last pixel and is unaligned, we
450 * can copy the remainder of the tile that is not visible to
451 * make it aligned.
452 */
453 if (copy_width % 8 != 0 &&
454 srcx + copy_width == src_width &&
455 dstx + copy_width == dst_width)
456 copy_width_aligned = align(copy_width, 8);
457
458 if (copy_height % 8 != 0 &&
459 srcy + copy_height == src_height &&
460 dsty + copy_height == dst_height)
461 copy_height_aligned = align(copy_height, 8);
462
463 /* check if these fit into the bitfields */
464 if (src_pitch_tile_max < (1 << 11) &&
465 dst_pitch_tile_max < (1 << 11) &&
466 src_slice_tile_max < (1 << 22) &&
467 dst_slice_tile_max < (1 << 22) &&
468 copy_width_aligned <= (1 << 14) &&
469 copy_height_aligned <= (1 << 14) &&
470 copy_depth <= (1 << 11) &&
471 copy_width_aligned % 8 == 0 &&
472 copy_height_aligned % 8 == 0 &&
473 /* HW limitation - CIK: */
474 (sctx->b.chip_class != CIK ||
475 (copy_width_aligned < (1 << 14) &&
476 copy_height_aligned < (1 << 14) &&
477 copy_depth < (1 << 11))) &&
478 /* HW limitation - some CIK parts: */
479 ((sctx->b.family != CHIP_BONAIRE &&
480 sctx->b.family != CHIP_KAVERI &&
481 sctx->b.family != CHIP_KABINI &&
482 sctx->b.family != CHIP_MULLINS) ||
483 (srcx + copy_width_aligned != (1 << 14) &&
484 srcy + copy_height_aligned != (1 << 14) &&
485 dstx + copy_width != (1 << 14)))) {
486 struct radeon_winsys_cs *cs = sctx->b.dma.cs;
487
488 r600_need_dma_space(&sctx->b, 15, &rdst->resource, &rsrc->resource);
489
490 radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
491 CIK_SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW, 0));
492 radeon_emit(cs, src_address);
493 radeon_emit(cs, src_address >> 32);
494 radeon_emit(cs, srcx | (srcy << 16));
495 radeon_emit(cs, srcz | (src_pitch_tile_max << 16));
496 radeon_emit(cs, src_slice_tile_max);
497 radeon_emit(cs, encode_tile_info(sctx, rsrc, src_level, true));
498 radeon_emit(cs, dst_address);
499 radeon_emit(cs, dst_address >> 32);
500 radeon_emit(cs, dstx | (dsty << 16));
501 radeon_emit(cs, dstz | (dst_pitch_tile_max << 16));
502 radeon_emit(cs, dst_slice_tile_max);
503 radeon_emit(cs, encode_tile_info(sctx, rdst, dst_level, false));
504 if (sctx->b.chip_class == CIK) {
505 radeon_emit(cs, copy_width_aligned |
506 (copy_height_aligned << 16));
507 radeon_emit(cs, copy_depth);
508 } else {
509 radeon_emit(cs, (copy_width_aligned - 8) |
510 ((copy_height_aligned - 8) << 16));
511 radeon_emit(cs, (copy_depth - 1));
512 }
513 return true;
514 }
515 }
516
517 return false;
518 }
519
520 static void cik_sdma_copy(struct pipe_context *ctx,
521 struct pipe_resource *dst,
522 unsigned dst_level,
523 unsigned dstx, unsigned dsty, unsigned dstz,
524 struct pipe_resource *src,
525 unsigned src_level,
526 const struct pipe_box *src_box)
527 {
528 struct si_context *sctx = (struct si_context *)ctx;
529
530 if (!sctx->b.dma.cs ||
531 src->flags & PIPE_RESOURCE_FLAG_SPARSE ||
532 dst->flags & PIPE_RESOURCE_FLAG_SPARSE)
533 goto fallback;
534
535 if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
536 cik_sdma_copy_buffer(sctx, dst, src, dstx, src_box->x, src_box->width);
537 return;
538 }
539
540 if ((sctx->b.chip_class == CIK || sctx->b.chip_class == VI) &&
541 cik_sdma_copy_texture(sctx, dst, dst_level, dstx, dsty, dstz,
542 src, src_level, src_box))
543 return;
544
545 fallback:
546 si_resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz,
547 src, src_level, src_box);
548 }
549
550 void cik_init_sdma_functions(struct si_context *sctx)
551 {
552 sctx->b.dma_copy = cik_sdma_copy;
553 sctx->b.dma_clear_buffer = cik_sdma_clear_buffer;
554 }