Revert "radeonsi: enable SDMA on CIK"
[mesa.git] / src / gallium / drivers / radeonsi / cik_sdma.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014,2015 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jerome Glisse
26 */
27
28 #include "sid.h"
29 #include "si_pipe.h"
30 #include "radeon/r600_cs.h"
31
32 #include "util/u_format.h"
33
34 static void cik_sdma_do_copy_buffer(struct si_context *ctx,
35 struct pipe_resource *dst,
36 struct pipe_resource *src,
37 uint64_t dst_offset,
38 uint64_t src_offset,
39 uint64_t size)
40 {
41 struct radeon_winsys_cs *cs = ctx->b.dma.cs;
42 unsigned i, ncopy, csize;
43 struct r600_resource *rdst = (struct r600_resource*)dst;
44 struct r600_resource *rsrc = (struct r600_resource*)src;
45
46 dst_offset += r600_resource(dst)->gpu_address;
47 src_offset += r600_resource(src)->gpu_address;
48
49 ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
50 r600_need_dma_space(&ctx->b, ncopy * 7, rdst, rsrc);
51
52 for (i = 0; i < ncopy; i++) {
53 csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE);
54 radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
55 CIK_SDMA_COPY_SUB_OPCODE_LINEAR,
56 0));
57 radeon_emit(cs, csize);
58 radeon_emit(cs, 0); /* src/dst endian swap */
59 radeon_emit(cs, src_offset);
60 radeon_emit(cs, src_offset >> 32);
61 radeon_emit(cs, dst_offset);
62 radeon_emit(cs, dst_offset >> 32);
63 dst_offset += csize;
64 src_offset += csize;
65 size -= csize;
66 }
67 }
68
69 static void cik_sdma_copy_buffer(struct si_context *ctx,
70 struct pipe_resource *dst,
71 struct pipe_resource *src,
72 uint64_t dst_offset,
73 uint64_t src_offset,
74 uint64_t size)
75 {
76 struct r600_resource *rdst = (struct r600_resource*)dst;
77
78 /* Mark the buffer range of destination as valid (initialized),
79 * so that transfer_map knows it should wait for the GPU when mapping
80 * that range. */
81 util_range_add(&rdst->valid_buffer_range, dst_offset,
82 dst_offset + size);
83
84 cik_sdma_do_copy_buffer(ctx, dst, src, dst_offset, src_offset, size);
85 r600_dma_emit_wait_idle(&ctx->b);
86 }
87
88 static unsigned minify_as_blocks(unsigned width, unsigned level, unsigned blk_w)
89 {
90 width = u_minify(width, level);
91 return DIV_ROUND_UP(width, blk_w);
92 }
93
94 static unsigned encode_tile_info(struct si_context *sctx,
95 struct r600_texture *tex, unsigned level,
96 bool set_bpp)
97 {
98 struct radeon_info *info = &sctx->screen->b.info;
99 unsigned tile_index = tex->surface.tiling_index[level];
100 unsigned macro_tile_index = tex->surface.macro_tile_index;
101 unsigned tile_mode = info->si_tile_mode_array[tile_index];
102 unsigned macro_tile_mode = info->cik_macrotile_mode_array[macro_tile_index];
103
104 return (set_bpp ? util_logbase2(tex->surface.bpe) : 0) |
105 (G_009910_ARRAY_MODE(tile_mode) << 3) |
106 (G_009910_MICRO_TILE_MODE_NEW(tile_mode) << 8) |
107 /* Non-depth modes don't have TILE_SPLIT set. */
108 ((util_logbase2(tex->surface.tile_split >> 6)) << 11) |
109 (G_009990_BANK_WIDTH(macro_tile_mode) << 15) |
110 (G_009990_BANK_HEIGHT(macro_tile_mode) << 18) |
111 (G_009990_NUM_BANKS(macro_tile_mode) << 21) |
112 (G_009990_MACRO_TILE_ASPECT(macro_tile_mode) << 24) |
113 (G_009910_PIPE_CONFIG(tile_mode) << 26);
114 }
115
116 static bool cik_sdma_copy_texture(struct si_context *sctx,
117 struct pipe_resource *dst,
118 unsigned dst_level,
119 unsigned dstx, unsigned dsty, unsigned dstz,
120 struct pipe_resource *src,
121 unsigned src_level,
122 const struct pipe_box *src_box)
123 {
124 struct radeon_info *info = &sctx->screen->b.info;
125 struct r600_texture *rsrc = (struct r600_texture*)src;
126 struct r600_texture *rdst = (struct r600_texture*)dst;
127 unsigned bpp = rdst->surface.bpe;
128 uint64_t dst_address = rdst->resource.gpu_address +
129 rdst->surface.level[dst_level].offset;
130 uint64_t src_address = rsrc->resource.gpu_address +
131 rsrc->surface.level[src_level].offset;
132 unsigned dst_mode = rdst->surface.level[dst_level].mode;
133 unsigned src_mode = rsrc->surface.level[src_level].mode;
134 unsigned dst_tile_index = rdst->surface.tiling_index[dst_level];
135 unsigned src_tile_index = rsrc->surface.tiling_index[src_level];
136 unsigned dst_tile_mode = info->si_tile_mode_array[dst_tile_index];
137 unsigned src_tile_mode = info->si_tile_mode_array[src_tile_index];
138 unsigned dst_micro_mode = G_009910_MICRO_TILE_MODE_NEW(dst_tile_mode);
139 unsigned src_micro_mode = G_009910_MICRO_TILE_MODE_NEW(src_tile_mode);
140 unsigned dst_pitch = rdst->surface.level[dst_level].pitch_bytes / bpp;
141 unsigned src_pitch = rsrc->surface.level[src_level].pitch_bytes / bpp;
142 uint64_t dst_slice_pitch = rdst->surface.level[dst_level].slice_size / bpp;
143 uint64_t src_slice_pitch = rsrc->surface.level[src_level].slice_size / bpp;
144 unsigned dst_width = minify_as_blocks(rdst->resource.b.b.width0,
145 dst_level, rdst->surface.blk_w);
146 unsigned src_width = minify_as_blocks(rsrc->resource.b.b.width0,
147 src_level, rsrc->surface.blk_w);
148 unsigned dst_height = minify_as_blocks(rdst->resource.b.b.height0,
149 dst_level, rdst->surface.blk_h);
150 unsigned src_height = minify_as_blocks(rsrc->resource.b.b.height0,
151 src_level, rsrc->surface.blk_h);
152 unsigned srcx = src_box->x / rsrc->surface.blk_w;
153 unsigned srcy = src_box->y / rsrc->surface.blk_h;
154 unsigned srcz = src_box->z;
155 unsigned copy_width = DIV_ROUND_UP(src_box->width, rsrc->surface.blk_w);
156 unsigned copy_height = DIV_ROUND_UP(src_box->height, rsrc->surface.blk_h);
157 unsigned copy_depth = src_box->depth;
158
159 assert(src_level <= src->last_level);
160 assert(dst_level <= dst->last_level);
161 assert(rdst->surface.level[dst_level].offset +
162 dst_slice_pitch * bpp * (dstz + src_box->depth) <=
163 rdst->resource.buf->size);
164 assert(rsrc->surface.level[src_level].offset +
165 src_slice_pitch * bpp * (srcz + src_box->depth) <=
166 rsrc->resource.buf->size);
167
168 /* Test CIK with radeon and amdgpu before enabling this. */
169 if (sctx->b.chip_class == CIK)
170 return false;
171
172 if (!r600_prepare_for_dma_blit(&sctx->b, rdst, dst_level, dstx, dsty,
173 dstz, rsrc, src_level, src_box))
174 return false;
175
176 dstx /= rdst->surface.blk_w;
177 dsty /= rdst->surface.blk_h;
178
179 if (srcx >= (1 << 14) ||
180 srcy >= (1 << 14) ||
181 srcz >= (1 << 11) ||
182 dstx >= (1 << 14) ||
183 dsty >= (1 << 14) ||
184 dstz >= (1 << 11))
185 return false;
186
187 /* Linear -> linear sub-window copy. */
188 if (dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED &&
189 src_mode == RADEON_SURF_MODE_LINEAR_ALIGNED &&
190 /* check if everything fits into the bitfields */
191 src_pitch <= (1 << 14) &&
192 dst_pitch <= (1 << 14) &&
193 src_slice_pitch <= (1 << 28) &&
194 dst_slice_pitch <= (1 << 28) &&
195 copy_width <= (1 << 14) &&
196 copy_height <= (1 << 14) &&
197 copy_depth <= (1 << 11) &&
198 /* HW limitation - CIK: */
199 (sctx->b.chip_class != CIK ||
200 (copy_width < (1 << 14) &&
201 copy_height < (1 << 14) &&
202 copy_depth < (1 << 11))) &&
203 /* HW limitation - some CIK parts: */
204 ((sctx->b.family != CHIP_BONAIRE &&
205 sctx->b.family != CHIP_KAVERI) ||
206 (srcx + copy_width != (1 << 14) &&
207 srcy + copy_height != (1 << 14)))) {
208 struct radeon_winsys_cs *cs = sctx->b.dma.cs;
209
210 r600_need_dma_space(&sctx->b, 13, &rdst->resource, &rsrc->resource);
211
212 radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
213 CIK_SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW, 0) |
214 (util_logbase2(bpp) << 29));
215 radeon_emit(cs, src_address);
216 radeon_emit(cs, src_address >> 32);
217 radeon_emit(cs, srcx | (srcy << 16));
218 radeon_emit(cs, srcz | ((src_pitch - 1) << 16));
219 radeon_emit(cs, src_slice_pitch - 1);
220 radeon_emit(cs, dst_address);
221 radeon_emit(cs, dst_address >> 32);
222 radeon_emit(cs, dstx | (dsty << 16));
223 radeon_emit(cs, dstz | ((dst_pitch - 1) << 16));
224 radeon_emit(cs, dst_slice_pitch - 1);
225 if (sctx->b.chip_class == CIK) {
226 radeon_emit(cs, copy_width | (copy_height << 16));
227 radeon_emit(cs, copy_depth);
228 } else {
229 radeon_emit(cs, (copy_width - 1) | ((copy_height - 1) << 16));
230 radeon_emit(cs, (copy_depth - 1));
231 }
232
233 r600_dma_emit_wait_idle(&sctx->b);
234 return true;
235 }
236
237 /* Tiled <-> linear sub-window copy. */
238 if ((src_mode >= RADEON_SURF_MODE_1D) != (dst_mode >= RADEON_SURF_MODE_1D)) {
239 struct r600_texture *tiled = src_mode >= RADEON_SURF_MODE_1D ? rsrc : rdst;
240 struct r600_texture *linear = tiled == rsrc ? rdst : rsrc;
241 unsigned tiled_level = tiled == rsrc ? src_level : dst_level;
242 unsigned linear_level = linear == rsrc ? src_level : dst_level;
243 unsigned tiled_x = tiled == rsrc ? srcx : dstx;
244 unsigned linear_x = linear == rsrc ? srcx : dstx;
245 unsigned tiled_y = tiled == rsrc ? srcy : dsty;
246 unsigned linear_y = linear == rsrc ? srcy : dsty;
247 unsigned tiled_z = tiled == rsrc ? srcz : dstz;
248 unsigned linear_z = linear == rsrc ? srcz : dstz;
249 unsigned tiled_width = tiled == rsrc ? src_width : dst_width;
250 unsigned linear_width = linear == rsrc ? src_width : dst_width;
251 unsigned tiled_pitch = tiled == rsrc ? src_pitch : dst_pitch;
252 unsigned linear_pitch = linear == rsrc ? src_pitch : dst_pitch;
253 unsigned tiled_slice_pitch = tiled == rsrc ? src_slice_pitch : dst_slice_pitch;
254 unsigned linear_slice_pitch = linear == rsrc ? src_slice_pitch : dst_slice_pitch;
255 uint64_t tiled_address = tiled == rsrc ? src_address : dst_address;
256 uint64_t linear_address = linear == rsrc ? src_address : dst_address;
257 unsigned tiled_micro_mode = tiled == rsrc ? src_micro_mode : dst_micro_mode;
258
259 assert(tiled_pitch % 8 == 0);
260 assert(tiled_slice_pitch % 64 == 0);
261 unsigned pitch_tile_max = tiled_pitch / 8 - 1;
262 unsigned slice_tile_max = tiled_slice_pitch / 64 - 1;
263 unsigned xalign = MAX2(1, 4 / bpp);
264 unsigned copy_width_aligned = copy_width;
265
266 /* If the region ends at the last pixel and is unaligned, we
267 * can copy the remainder of the line that is not visible to
268 * make it aligned.
269 */
270 if (copy_width % xalign != 0 &&
271 linear_x + copy_width == linear_width &&
272 tiled_x + copy_width == tiled_width &&
273 linear_x + align(copy_width, xalign) <= linear_pitch &&
274 tiled_x + align(copy_width, xalign) <= tiled_pitch)
275 copy_width_aligned = align(copy_width, xalign);
276
277 /* HW limitations. */
278 if ((sctx->b.family == CHIP_BONAIRE ||
279 sctx->b.family == CHIP_KAVERI) &&
280 linear_pitch - 1 == 0x3fff &&
281 bpp == 16)
282 return false;
283
284 if (sctx->b.chip_class == CIK &&
285 (copy_width_aligned == (1 << 14) ||
286 copy_height == (1 << 14) ||
287 copy_depth == (1 << 11)))
288 return false;
289
290 if ((sctx->b.family == CHIP_BONAIRE ||
291 sctx->b.family == CHIP_KAVERI ||
292 sctx->b.family == CHIP_KABINI ||
293 sctx->b.family == CHIP_MULLINS) &&
294 (tiled_x + copy_width == (1 << 14) ||
295 tiled_y + copy_height == (1 << 14)))
296 return false;
297
298 /* The hw can read outside of the given linear buffer bounds,
299 * or access those pages but not touch the memory in case
300 * of writes. (it still causes a VM fault)
301 *
302 * Out-of-bounds memory access or page directory access must
303 * be prevented.
304 */
305 int64_t start_linear_address, end_linear_address;
306 unsigned granularity;
307
308 /* Deduce the size of reads from the linear surface. */
309 switch (tiled_micro_mode) {
310 case V_009910_ADDR_SURF_DISPLAY_MICRO_TILING:
311 granularity = bpp == 1 ? 64 / (8*bpp) :
312 128 / (8*bpp);
313 break;
314 case V_009910_ADDR_SURF_THIN_MICRO_TILING:
315 case V_009910_ADDR_SURF_DEPTH_MICRO_TILING:
316 if (0 /* TODO: THICK microtiling */)
317 granularity = bpp == 1 ? 32 / (8*bpp) :
318 bpp == 2 ? 64 / (8*bpp) :
319 bpp <= 8 ? 128 / (8*bpp) :
320 256 / (8*bpp);
321 else
322 granularity = bpp <= 2 ? 64 / (8*bpp) :
323 bpp <= 8 ? 128 / (8*bpp) :
324 256 / (8*bpp);
325 break;
326 default:
327 return false;
328 }
329
330 /* The linear reads start at tiled_x & ~(granularity - 1).
331 * If linear_x == 0 && tiled_x % granularity != 0, the hw
332 * starts reading from an address preceding linear_address!!!
333 */
334 start_linear_address =
335 linear->surface.level[linear_level].offset +
336 bpp * (linear_z * linear_slice_pitch +
337 linear_y * linear_pitch +
338 linear_x);
339 start_linear_address -= (int)(bpp * (tiled_x % granularity));
340
341 end_linear_address =
342 linear->surface.level[linear_level].offset +
343 bpp * ((linear_z + copy_depth - 1) * linear_slice_pitch +
344 (linear_y + copy_height - 1) * linear_pitch +
345 (linear_x + copy_width));
346
347 if ((tiled_x + copy_width) % granularity)
348 end_linear_address += granularity -
349 (tiled_x + copy_width) % granularity;
350
351 if (start_linear_address < 0 ||
352 end_linear_address > linear->surface.bo_size)
353 return false;
354
355 /* Check requirements. */
356 if (tiled_address % 256 == 0 &&
357 linear_address % 4 == 0 &&
358 linear_pitch % xalign == 0 &&
359 linear_x % xalign == 0 &&
360 tiled_x % xalign == 0 &&
361 copy_width_aligned % xalign == 0 &&
362 tiled_micro_mode != V_009910_ADDR_SURF_ROTATED_MICRO_TILING &&
363 /* check if everything fits into the bitfields */
364 tiled->surface.tile_split <= 4096 &&
365 pitch_tile_max < (1 << 11) &&
366 slice_tile_max < (1 << 22) &&
367 linear_pitch <= (1 << 14) &&
368 linear_slice_pitch <= (1 << 28) &&
369 copy_width_aligned <= (1 << 14) &&
370 copy_height <= (1 << 14) &&
371 copy_depth <= (1 << 11)) {
372 struct radeon_winsys_cs *cs = sctx->b.dma.cs;
373 uint32_t direction = linear == rdst ? 1u << 31 : 0;
374
375 r600_need_dma_space(&sctx->b, 14, &rdst->resource, &rsrc->resource);
376
377 radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
378 CIK_SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW, 0) |
379 direction);
380 radeon_emit(cs, tiled_address);
381 radeon_emit(cs, tiled_address >> 32);
382 radeon_emit(cs, tiled_x | (tiled_y << 16));
383 radeon_emit(cs, tiled_z | (pitch_tile_max << 16));
384 radeon_emit(cs, slice_tile_max);
385 radeon_emit(cs, encode_tile_info(sctx, tiled, tiled_level, true));
386 radeon_emit(cs, linear_address);
387 radeon_emit(cs, linear_address >> 32);
388 radeon_emit(cs, linear_x | (linear_y << 16));
389 radeon_emit(cs, linear_z | ((linear_pitch - 1) << 16));
390 radeon_emit(cs, linear_slice_pitch - 1);
391 if (sctx->b.chip_class == CIK) {
392 radeon_emit(cs, copy_width_aligned | (copy_height << 16));
393 radeon_emit(cs, copy_depth);
394 } else {
395 radeon_emit(cs, (copy_width_aligned - 1) | ((copy_height - 1) << 16));
396 radeon_emit(cs, (copy_depth - 1));
397 }
398
399 r600_dma_emit_wait_idle(&sctx->b);
400 return true;
401 }
402 }
403
404 /* Tiled -> Tiled sub-window copy. */
405 if (dst_mode >= RADEON_SURF_MODE_1D &&
406 src_mode >= RADEON_SURF_MODE_1D &&
407 /* check if these fit into the bitfields */
408 src_address % 256 == 0 &&
409 dst_address % 256 == 0 &&
410 rsrc->surface.tile_split <= 4096 &&
411 rdst->surface.tile_split <= 4096 &&
412 dstx % 8 == 0 &&
413 dsty % 8 == 0 &&
414 srcx % 8 == 0 &&
415 srcy % 8 == 0 &&
416 /* this can either be equal, or display->rotated (VI only) */
417 (src_micro_mode == dst_micro_mode ||
418 (sctx->b.chip_class == VI &&
419 src_micro_mode == V_009910_ADDR_SURF_DISPLAY_MICRO_TILING &&
420 dst_micro_mode == V_009910_ADDR_SURF_ROTATED_MICRO_TILING))) {
421 assert(src_pitch % 8 == 0);
422 assert(dst_pitch % 8 == 0);
423 assert(src_slice_pitch % 64 == 0);
424 assert(dst_slice_pitch % 64 == 0);
425 unsigned src_pitch_tile_max = src_pitch / 8 - 1;
426 unsigned dst_pitch_tile_max = dst_pitch / 8 - 1;
427 unsigned src_slice_tile_max = src_slice_pitch / 64 - 1;
428 unsigned dst_slice_tile_max = dst_slice_pitch / 64 - 1;
429 unsigned copy_width_aligned = copy_width;
430 unsigned copy_height_aligned = copy_height;
431
432 /* If the region ends at the last pixel and is unaligned, we
433 * can copy the remainder of the tile that is not visible to
434 * make it aligned.
435 */
436 if (copy_width % 8 != 0 &&
437 srcx + copy_width == src_width &&
438 dstx + copy_width == dst_width)
439 copy_width_aligned = align(copy_width, 8);
440
441 if (copy_height % 8 != 0 &&
442 srcy + copy_height == src_height &&
443 dsty + copy_height == dst_height)
444 copy_height_aligned = align(copy_height, 8);
445
446 /* check if these fit into the bitfields */
447 if (src_pitch_tile_max < (1 << 11) &&
448 dst_pitch_tile_max < (1 << 11) &&
449 src_slice_tile_max < (1 << 22) &&
450 dst_slice_tile_max < (1 << 22) &&
451 copy_width_aligned <= (1 << 14) &&
452 copy_height_aligned <= (1 << 14) &&
453 copy_depth <= (1 << 11) &&
454 copy_width_aligned % 8 == 0 &&
455 copy_height_aligned % 8 == 0 &&
456 /* HW limitation - CIK: */
457 (sctx->b.chip_class != CIK ||
458 (copy_width_aligned < (1 << 14) &&
459 copy_height_aligned < (1 << 14) &&
460 copy_depth < (1 << 11))) &&
461 /* HW limitation - some CIK parts: */
462 ((sctx->b.family != CHIP_BONAIRE &&
463 sctx->b.family != CHIP_KAVERI &&
464 sctx->b.family != CHIP_KABINI &&
465 sctx->b.family != CHIP_MULLINS) ||
466 (srcx + copy_width_aligned != (1 << 14) &&
467 srcy + copy_height_aligned != (1 << 14) &&
468 dstx + copy_width != (1 << 14)))) {
469 struct radeon_winsys_cs *cs = sctx->b.dma.cs;
470
471 r600_need_dma_space(&sctx->b, 15, &rdst->resource, &rsrc->resource);
472
473 radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
474 CIK_SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW, 0));
475 radeon_emit(cs, src_address);
476 radeon_emit(cs, src_address >> 32);
477 radeon_emit(cs, srcx | (srcy << 16));
478 radeon_emit(cs, srcz | (src_pitch_tile_max << 16));
479 radeon_emit(cs, src_slice_tile_max);
480 radeon_emit(cs, encode_tile_info(sctx, rsrc, src_level, true));
481 radeon_emit(cs, dst_address);
482 radeon_emit(cs, dst_address >> 32);
483 radeon_emit(cs, dstx | (dsty << 16));
484 radeon_emit(cs, dstz | (dst_pitch_tile_max << 16));
485 radeon_emit(cs, dst_slice_tile_max);
486 radeon_emit(cs, encode_tile_info(sctx, rdst, dst_level, false));
487 if (sctx->b.chip_class == CIK) {
488 radeon_emit(cs, copy_width_aligned |
489 (copy_height_aligned << 16));
490 radeon_emit(cs, copy_depth);
491 } else {
492 radeon_emit(cs, (copy_width_aligned - 8) |
493 ((copy_height_aligned - 8) << 16));
494 radeon_emit(cs, (copy_depth - 1));
495 }
496
497 r600_dma_emit_wait_idle(&sctx->b);
498 return true;
499 }
500 }
501
502 return false;
503 }
504
505 static void cik_sdma_copy(struct pipe_context *ctx,
506 struct pipe_resource *dst,
507 unsigned dst_level,
508 unsigned dstx, unsigned dsty, unsigned dstz,
509 struct pipe_resource *src,
510 unsigned src_level,
511 const struct pipe_box *src_box)
512 {
513 struct si_context *sctx = (struct si_context *)ctx;
514
515 if (!sctx->b.dma.cs)
516 goto fallback;
517
518 if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
519 cik_sdma_copy_buffer(sctx, dst, src, dstx, src_box->x, src_box->width);
520 return;
521 }
522
523 /* Carrizo SDMA texture copying is very broken for some users.
524 * https://bugs.freedesktop.org/show_bug.cgi?id=97029
525 */
526 if (sctx->b.family == CHIP_CARRIZO)
527 goto fallback;
528
529 if (cik_sdma_copy_texture(sctx, dst, dst_level, dstx, dsty, dstz,
530 src, src_level, src_box))
531 return;
532
533 fallback:
534 si_resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz,
535 src, src_level, src_box);
536 }
537
538 void cik_init_sdma_functions(struct si_context *sctx)
539 {
540 sctx->b.dma_copy = cik_sdma_copy;
541 }