gallium/radeon: flush if DMA IB memory usage is too high
[mesa.git] / src / gallium / drivers / radeonsi / cik_sdma.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014,2015 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jerome Glisse
26 */
27
28 #include "sid.h"
29 #include "si_pipe.h"
30 #include "radeon/r600_cs.h"
31
32 #include "util/u_format.h"
33
34 static void cik_sdma_do_copy_buffer(struct si_context *ctx,
35 struct pipe_resource *dst,
36 struct pipe_resource *src,
37 uint64_t dst_offset,
38 uint64_t src_offset,
39 uint64_t size)
40 {
41 struct radeon_winsys_cs *cs = ctx->b.dma.cs;
42 unsigned i, ncopy, csize;
43 struct r600_resource *rdst = (struct r600_resource*)dst;
44 struct r600_resource *rsrc = (struct r600_resource*)src;
45
46 dst_offset += r600_resource(dst)->gpu_address;
47 src_offset += r600_resource(src)->gpu_address;
48
49 ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
50 r600_need_dma_space(&ctx->b, ncopy * 7, rdst, rsrc);
51
52 radeon_add_to_buffer_list(&ctx->b, &ctx->b.dma, rsrc, RADEON_USAGE_READ,
53 RADEON_PRIO_SDMA_BUFFER);
54 radeon_add_to_buffer_list(&ctx->b, &ctx->b.dma, rdst, RADEON_USAGE_WRITE,
55 RADEON_PRIO_SDMA_BUFFER);
56
57 for (i = 0; i < ncopy; i++) {
58 csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE);
59 cs->buf[cs->cdw++] = CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
60 CIK_SDMA_COPY_SUB_OPCODE_LINEAR,
61 0);
62 cs->buf[cs->cdw++] = csize;
63 cs->buf[cs->cdw++] = 0; /* src/dst endian swap */
64 cs->buf[cs->cdw++] = src_offset;
65 cs->buf[cs->cdw++] = src_offset >> 32;
66 cs->buf[cs->cdw++] = dst_offset;
67 cs->buf[cs->cdw++] = dst_offset >> 32;
68 dst_offset += csize;
69 src_offset += csize;
70 size -= csize;
71 }
72 }
73
74 static void cik_sdma_copy_buffer(struct si_context *ctx,
75 struct pipe_resource *dst,
76 struct pipe_resource *src,
77 uint64_t dst_offset,
78 uint64_t src_offset,
79 uint64_t size)
80 {
81 struct r600_resource *rdst = (struct r600_resource*)dst;
82
83 /* Mark the buffer range of destination as valid (initialized),
84 * so that transfer_map knows it should wait for the GPU when mapping
85 * that range. */
86 util_range_add(&rdst->valid_buffer_range, dst_offset,
87 dst_offset + size);
88
89 cik_sdma_do_copy_buffer(ctx, dst, src, dst_offset, src_offset, size);
90 r600_dma_emit_wait_idle(&ctx->b);
91 }
92
93 static unsigned minify_as_blocks(unsigned width, unsigned level, unsigned blk_w)
94 {
95 width = u_minify(width, level);
96 return DIV_ROUND_UP(width, blk_w);
97 }
98
99 static unsigned encode_tile_info(struct si_context *sctx,
100 struct r600_texture *tex, unsigned level,
101 bool set_bpp)
102 {
103 struct radeon_info *info = &sctx->screen->b.info;
104 unsigned tile_index = tex->surface.tiling_index[level];
105 unsigned macro_tile_index = tex->surface.macro_tile_index;
106 unsigned tile_mode = info->si_tile_mode_array[tile_index];
107 unsigned macro_tile_mode = info->cik_macrotile_mode_array[macro_tile_index];
108
109 return (set_bpp ? util_logbase2(tex->surface.bpe) : 0) |
110 (G_009910_ARRAY_MODE(tile_mode) << 3) |
111 (G_009910_MICRO_TILE_MODE_NEW(tile_mode) << 8) |
112 /* Non-depth modes don't have TILE_SPLIT set. */
113 ((util_logbase2(tex->surface.tile_split >> 6)) << 11) |
114 (G_009990_BANK_WIDTH(macro_tile_mode) << 15) |
115 (G_009990_BANK_HEIGHT(macro_tile_mode) << 18) |
116 (G_009990_NUM_BANKS(macro_tile_mode) << 21) |
117 (G_009990_MACRO_TILE_ASPECT(macro_tile_mode) << 24) |
118 (G_009910_PIPE_CONFIG(tile_mode) << 26);
119 }
120
121 static bool cik_sdma_copy_texture(struct si_context *sctx,
122 struct pipe_resource *dst,
123 unsigned dst_level,
124 unsigned dstx, unsigned dsty, unsigned dstz,
125 struct pipe_resource *src,
126 unsigned src_level,
127 const struct pipe_box *src_box)
128 {
129 struct radeon_info *info = &sctx->screen->b.info;
130 struct r600_texture *rsrc = (struct r600_texture*)src;
131 struct r600_texture *rdst = (struct r600_texture*)dst;
132 unsigned bpp = rdst->surface.bpe;
133 uint64_t dst_address = rdst->resource.gpu_address +
134 rdst->surface.level[dst_level].offset;
135 uint64_t src_address = rsrc->resource.gpu_address +
136 rsrc->surface.level[src_level].offset;
137 unsigned dst_mode = rdst->surface.level[dst_level].mode;
138 unsigned src_mode = rsrc->surface.level[src_level].mode;
139 unsigned dst_tile_index = rdst->surface.tiling_index[dst_level];
140 unsigned src_tile_index = rsrc->surface.tiling_index[src_level];
141 unsigned dst_tile_mode = info->si_tile_mode_array[dst_tile_index];
142 unsigned src_tile_mode = info->si_tile_mode_array[src_tile_index];
143 unsigned dst_micro_mode = G_009910_MICRO_TILE_MODE_NEW(dst_tile_mode);
144 unsigned src_micro_mode = G_009910_MICRO_TILE_MODE_NEW(src_tile_mode);
145 unsigned dst_pitch = rdst->surface.level[dst_level].pitch_bytes / bpp;
146 unsigned src_pitch = rsrc->surface.level[src_level].pitch_bytes / bpp;
147 uint64_t dst_slice_pitch = rdst->surface.level[dst_level].slice_size / bpp;
148 uint64_t src_slice_pitch = rsrc->surface.level[src_level].slice_size / bpp;
149 unsigned dst_width = minify_as_blocks(rdst->resource.b.b.width0,
150 dst_level, rdst->surface.blk_w);
151 unsigned src_width = minify_as_blocks(rsrc->resource.b.b.width0,
152 src_level, rsrc->surface.blk_w);
153 unsigned dst_height = minify_as_blocks(rdst->resource.b.b.height0,
154 dst_level, rdst->surface.blk_h);
155 unsigned src_height = minify_as_blocks(rsrc->resource.b.b.height0,
156 src_level, rsrc->surface.blk_h);
157 unsigned srcx = src_box->x / rsrc->surface.blk_w;
158 unsigned srcy = src_box->y / rsrc->surface.blk_h;
159 unsigned srcz = src_box->z;
160 unsigned copy_width = DIV_ROUND_UP(src_box->width, rsrc->surface.blk_w);
161 unsigned copy_height = DIV_ROUND_UP(src_box->height, rsrc->surface.blk_h);
162 unsigned copy_depth = src_box->depth;
163
164 assert(src_level <= src->last_level);
165 assert(dst_level <= dst->last_level);
166 assert(rdst->surface.level[dst_level].offset +
167 dst_slice_pitch * bpp * (dstz + src_box->depth) <=
168 rdst->resource.buf->size);
169 assert(rsrc->surface.level[src_level].offset +
170 src_slice_pitch * bpp * (srcz + src_box->depth) <=
171 rsrc->resource.buf->size);
172
173 /* Test CIK with radeon and amdgpu before enabling this. */
174 if (sctx->b.chip_class == CIK)
175 return false;
176
177 if (!r600_prepare_for_dma_blit(&sctx->b, rdst, dst_level, dstx, dsty,
178 dstz, rsrc, src_level, src_box))
179 return false;
180
181 dstx /= rdst->surface.blk_w;
182 dsty /= rdst->surface.blk_h;
183
184 if (srcx >= (1 << 14) ||
185 srcy >= (1 << 14) ||
186 srcz >= (1 << 11) ||
187 dstx >= (1 << 14) ||
188 dsty >= (1 << 14) ||
189 dstz >= (1 << 11))
190 return false;
191
192 /* Linear -> linear sub-window copy. */
193 if (dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED &&
194 src_mode == RADEON_SURF_MODE_LINEAR_ALIGNED &&
195 /* check if everything fits into the bitfields */
196 src_pitch <= (1 << 14) &&
197 dst_pitch <= (1 << 14) &&
198 src_slice_pitch <= (1 << 28) &&
199 dst_slice_pitch <= (1 << 28) &&
200 copy_width <= (1 << 14) &&
201 copy_height <= (1 << 14) &&
202 copy_depth <= (1 << 11) &&
203 /* HW limitation - CIK: */
204 (sctx->b.chip_class != CIK ||
205 (copy_width < (1 << 14) &&
206 copy_height < (1 << 14) &&
207 copy_depth < (1 << 11))) &&
208 /* HW limitation - some CIK parts: */
209 ((sctx->b.family != CHIP_BONAIRE &&
210 sctx->b.family != CHIP_KAVERI) ||
211 (srcx + copy_width != (1 << 14) &&
212 srcy + copy_height != (1 << 14)))) {
213 struct radeon_winsys_cs *cs = sctx->b.dma.cs;
214
215 r600_need_dma_space(&sctx->b, 13, &rdst->resource, &rsrc->resource);
216 radeon_add_to_buffer_list(&sctx->b, &sctx->b.dma, &rsrc->resource,
217 RADEON_USAGE_READ,
218 RADEON_PRIO_SDMA_TEXTURE);
219 radeon_add_to_buffer_list(&sctx->b, &sctx->b.dma, &rdst->resource,
220 RADEON_USAGE_WRITE,
221 RADEON_PRIO_SDMA_TEXTURE);
222
223 radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
224 CIK_SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW, 0) |
225 (util_logbase2(bpp) << 29));
226 radeon_emit(cs, src_address);
227 radeon_emit(cs, src_address >> 32);
228 radeon_emit(cs, srcx | (srcy << 16));
229 radeon_emit(cs, srcz | ((src_pitch - 1) << 16));
230 radeon_emit(cs, src_slice_pitch - 1);
231 radeon_emit(cs, dst_address);
232 radeon_emit(cs, dst_address >> 32);
233 radeon_emit(cs, dstx | (dsty << 16));
234 radeon_emit(cs, dstz | ((dst_pitch - 1) << 16));
235 radeon_emit(cs, dst_slice_pitch - 1);
236 if (sctx->b.chip_class == CIK) {
237 radeon_emit(cs, copy_width | (copy_height << 16));
238 radeon_emit(cs, copy_depth);
239 } else {
240 radeon_emit(cs, (copy_width - 1) | ((copy_height - 1) << 16));
241 radeon_emit(cs, (copy_depth - 1));
242 }
243
244 r600_dma_emit_wait_idle(&sctx->b);
245 return true;
246 }
247
248 /* Tiled <-> linear sub-window copy. */
249 if ((src_mode >= RADEON_SURF_MODE_1D) != (dst_mode >= RADEON_SURF_MODE_1D)) {
250 struct r600_texture *tiled = src_mode >= RADEON_SURF_MODE_1D ? rsrc : rdst;
251 struct r600_texture *linear = tiled == rsrc ? rdst : rsrc;
252 unsigned tiled_level = tiled == rsrc ? src_level : dst_level;
253 unsigned linear_level = linear == rsrc ? src_level : dst_level;
254 unsigned tiled_x = tiled == rsrc ? srcx : dstx;
255 unsigned linear_x = linear == rsrc ? srcx : dstx;
256 unsigned tiled_y = tiled == rsrc ? srcy : dsty;
257 unsigned linear_y = linear == rsrc ? srcy : dsty;
258 unsigned tiled_z = tiled == rsrc ? srcz : dstz;
259 unsigned linear_z = linear == rsrc ? srcz : dstz;
260 unsigned tiled_width = tiled == rsrc ? src_width : dst_width;
261 unsigned linear_width = linear == rsrc ? src_width : dst_width;
262 unsigned tiled_pitch = tiled == rsrc ? src_pitch : dst_pitch;
263 unsigned linear_pitch = linear == rsrc ? src_pitch : dst_pitch;
264 unsigned tiled_slice_pitch = tiled == rsrc ? src_slice_pitch : dst_slice_pitch;
265 unsigned linear_slice_pitch = linear == rsrc ? src_slice_pitch : dst_slice_pitch;
266 uint64_t tiled_address = tiled == rsrc ? src_address : dst_address;
267 uint64_t linear_address = linear == rsrc ? src_address : dst_address;
268 unsigned tiled_micro_mode = tiled == rsrc ? src_micro_mode : dst_micro_mode;
269
270 assert(tiled_pitch % 8 == 0);
271 assert(tiled_slice_pitch % 64 == 0);
272 unsigned pitch_tile_max = tiled_pitch / 8 - 1;
273 unsigned slice_tile_max = tiled_slice_pitch / 64 - 1;
274 unsigned xalign = MAX2(1, 4 / bpp);
275 unsigned copy_width_aligned = copy_width;
276
277 /* If the region ends at the last pixel and is unaligned, we
278 * can copy the remainder of the line that is not visible to
279 * make it aligned.
280 */
281 if (copy_width % xalign != 0 &&
282 linear_x + copy_width == linear_width &&
283 tiled_x + copy_width == tiled_width &&
284 linear_x + align(copy_width, xalign) <= linear_pitch &&
285 tiled_x + align(copy_width, xalign) <= tiled_pitch)
286 copy_width_aligned = align(copy_width, xalign);
287
288 /* HW limitations. */
289 if ((sctx->b.family == CHIP_BONAIRE ||
290 sctx->b.family == CHIP_KAVERI) &&
291 linear_pitch - 1 == 0x3fff &&
292 bpp == 16)
293 return false;
294
295 if (sctx->b.chip_class == CIK &&
296 (copy_width_aligned == (1 << 14) ||
297 copy_height == (1 << 14) ||
298 copy_depth == (1 << 11)))
299 return false;
300
301 if ((sctx->b.family == CHIP_BONAIRE ||
302 sctx->b.family == CHIP_KAVERI ||
303 sctx->b.family == CHIP_KABINI ||
304 sctx->b.family == CHIP_MULLINS) &&
305 (tiled_x + copy_width == (1 << 14) ||
306 tiled_y + copy_height == (1 << 14)))
307 return false;
308
309 /* The hw can read outside of the given linear buffer bounds,
310 * or access those pages but not touch the memory in case
311 * of writes. (it still causes a VM fault)
312 *
313 * Out-of-bounds memory access or page directory access must
314 * be prevented.
315 */
316 int64_t start_linear_address, end_linear_address;
317 unsigned granularity;
318
319 /* Deduce the size of reads from the linear surface. */
320 switch (tiled_micro_mode) {
321 case V_009910_ADDR_SURF_DISPLAY_MICRO_TILING:
322 granularity = bpp == 1 ? 64 / (8*bpp) :
323 128 / (8*bpp);
324 break;
325 case V_009910_ADDR_SURF_THIN_MICRO_TILING:
326 case V_009910_ADDR_SURF_DEPTH_MICRO_TILING:
327 if (0 /* TODO: THICK microtiling */)
328 granularity = bpp == 1 ? 32 / (8*bpp) :
329 bpp == 2 ? 64 / (8*bpp) :
330 bpp <= 8 ? 128 / (8*bpp) :
331 256 / (8*bpp);
332 else
333 granularity = bpp <= 2 ? 64 / (8*bpp) :
334 bpp <= 8 ? 128 / (8*bpp) :
335 256 / (8*bpp);
336 break;
337 default:
338 return false;
339 }
340
341 /* The linear reads start at tiled_x & ~(granularity - 1).
342 * If linear_x == 0 && tiled_x % granularity != 0, the hw
343 * starts reading from an address preceding linear_address!!!
344 */
345 start_linear_address =
346 linear->surface.level[linear_level].offset +
347 bpp * (linear_z * linear_slice_pitch +
348 linear_y * linear_pitch +
349 linear_x);
350 start_linear_address -= (int)(bpp * (tiled_x % granularity));
351
352 end_linear_address =
353 linear->surface.level[linear_level].offset +
354 bpp * ((linear_z + copy_depth - 1) * linear_slice_pitch +
355 (linear_y + copy_height - 1) * linear_pitch +
356 (linear_x + copy_width));
357
358 if ((tiled_x + copy_width) % granularity)
359 end_linear_address += granularity -
360 (tiled_x + copy_width) % granularity;
361
362 if (start_linear_address < 0 ||
363 end_linear_address > linear->surface.bo_size)
364 return false;
365
366 /* Check requirements. */
367 if (tiled_address % 256 == 0 &&
368 linear_address % 4 == 0 &&
369 linear_pitch % xalign == 0 &&
370 linear_x % xalign == 0 &&
371 tiled_x % xalign == 0 &&
372 copy_width_aligned % xalign == 0 &&
373 tiled_micro_mode != V_009910_ADDR_SURF_ROTATED_MICRO_TILING &&
374 /* check if everything fits into the bitfields */
375 tiled->surface.tile_split <= 4096 &&
376 pitch_tile_max < (1 << 11) &&
377 slice_tile_max < (1 << 22) &&
378 linear_pitch <= (1 << 14) &&
379 linear_slice_pitch <= (1 << 28) &&
380 copy_width_aligned <= (1 << 14) &&
381 copy_height <= (1 << 14) &&
382 copy_depth <= (1 << 11)) {
383 struct radeon_winsys_cs *cs = sctx->b.dma.cs;
384
385 r600_need_dma_space(&sctx->b, 14, &rdst->resource, &rsrc->resource);
386 radeon_add_to_buffer_list(&sctx->b, &sctx->b.dma, &rsrc->resource,
387 RADEON_USAGE_READ,
388 RADEON_PRIO_SDMA_TEXTURE);
389 radeon_add_to_buffer_list(&sctx->b, &sctx->b.dma, &rdst->resource,
390 RADEON_USAGE_WRITE,
391 RADEON_PRIO_SDMA_TEXTURE);
392
393 radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
394 CIK_SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW, 0) |
395 ((linear == rdst) << 31));
396 radeon_emit(cs, tiled_address);
397 radeon_emit(cs, tiled_address >> 32);
398 radeon_emit(cs, tiled_x | (tiled_y << 16));
399 radeon_emit(cs, tiled_z | (pitch_tile_max << 16));
400 radeon_emit(cs, slice_tile_max);
401 radeon_emit(cs, encode_tile_info(sctx, tiled, tiled_level, true));
402 radeon_emit(cs, linear_address);
403 radeon_emit(cs, linear_address >> 32);
404 radeon_emit(cs, linear_x | (linear_y << 16));
405 radeon_emit(cs, linear_z | ((linear_pitch - 1) << 16));
406 radeon_emit(cs, linear_slice_pitch - 1);
407 if (sctx->b.chip_class == CIK) {
408 radeon_emit(cs, copy_width_aligned | (copy_height << 16));
409 radeon_emit(cs, copy_depth);
410 } else {
411 radeon_emit(cs, (copy_width_aligned - 1) | ((copy_height - 1) << 16));
412 radeon_emit(cs, (copy_depth - 1));
413 }
414
415 r600_dma_emit_wait_idle(&sctx->b);
416 return true;
417 }
418 }
419
420 /* Tiled -> Tiled sub-window copy. */
421 if (dst_mode >= RADEON_SURF_MODE_1D &&
422 src_mode >= RADEON_SURF_MODE_1D &&
423 /* check if these fit into the bitfields */
424 src_address % 256 == 0 &&
425 dst_address % 256 == 0 &&
426 rsrc->surface.tile_split <= 4096 &&
427 rdst->surface.tile_split <= 4096 &&
428 dstx % 8 == 0 &&
429 dsty % 8 == 0 &&
430 srcx % 8 == 0 &&
431 srcy % 8 == 0 &&
432 /* this can either be equal, or display->rotated (VI only) */
433 (src_micro_mode == dst_micro_mode ||
434 (sctx->b.chip_class == VI &&
435 src_micro_mode == V_009910_ADDR_SURF_DISPLAY_MICRO_TILING &&
436 dst_micro_mode == V_009910_ADDR_SURF_ROTATED_MICRO_TILING))) {
437 assert(src_pitch % 8 == 0);
438 assert(dst_pitch % 8 == 0);
439 assert(src_slice_pitch % 64 == 0);
440 assert(dst_slice_pitch % 64 == 0);
441 unsigned src_pitch_tile_max = src_pitch / 8 - 1;
442 unsigned dst_pitch_tile_max = dst_pitch / 8 - 1;
443 unsigned src_slice_tile_max = src_slice_pitch / 64 - 1;
444 unsigned dst_slice_tile_max = dst_slice_pitch / 64 - 1;
445 unsigned copy_width_aligned = copy_width;
446 unsigned copy_height_aligned = copy_height;
447
448 /* If the region ends at the last pixel and is unaligned, we
449 * can copy the remainder of the tile that is not visible to
450 * make it aligned.
451 */
452 if (copy_width % 8 != 0 &&
453 srcx + copy_width == src_width &&
454 dstx + copy_width == dst_width)
455 copy_width_aligned = align(copy_width, 8);
456
457 if (copy_height % 8 != 0 &&
458 srcy + copy_height == src_height &&
459 dsty + copy_height == dst_height)
460 copy_height_aligned = align(copy_height, 8);
461
462 /* check if these fit into the bitfields */
463 if (src_pitch_tile_max < (1 << 11) &&
464 dst_pitch_tile_max < (1 << 11) &&
465 src_slice_tile_max < (1 << 22) &&
466 dst_slice_tile_max < (1 << 22) &&
467 copy_width_aligned <= (1 << 14) &&
468 copy_height_aligned <= (1 << 14) &&
469 copy_depth <= (1 << 11) &&
470 copy_width_aligned % 8 == 0 &&
471 copy_height_aligned % 8 == 0 &&
472 /* HW limitation - CIK: */
473 (sctx->b.chip_class != CIK ||
474 (copy_width_aligned < (1 << 14) &&
475 copy_height_aligned < (1 << 14) &&
476 copy_depth < (1 << 11))) &&
477 /* HW limitation - some CIK parts: */
478 ((sctx->b.family != CHIP_BONAIRE &&
479 sctx->b.family != CHIP_KAVERI &&
480 sctx->b.family != CHIP_KABINI &&
481 sctx->b.family != CHIP_MULLINS) ||
482 (srcx + copy_width_aligned != (1 << 14) &&
483 srcy + copy_height_aligned != (1 << 14) &&
484 dstx + copy_width != (1 << 14)))) {
485 struct radeon_winsys_cs *cs = sctx->b.dma.cs;
486
487 r600_need_dma_space(&sctx->b, 15, &rdst->resource, &rsrc->resource);
488 radeon_add_to_buffer_list(&sctx->b, &sctx->b.dma, &rsrc->resource,
489 RADEON_USAGE_READ,
490 RADEON_PRIO_SDMA_TEXTURE);
491 radeon_add_to_buffer_list(&sctx->b, &sctx->b.dma, &rdst->resource,
492 RADEON_USAGE_WRITE,
493 RADEON_PRIO_SDMA_TEXTURE);
494
495 radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
496 CIK_SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW, 0));
497 radeon_emit(cs, src_address);
498 radeon_emit(cs, src_address >> 32);
499 radeon_emit(cs, srcx | (srcy << 16));
500 radeon_emit(cs, srcz | (src_pitch_tile_max << 16));
501 radeon_emit(cs, src_slice_tile_max);
502 radeon_emit(cs, encode_tile_info(sctx, rsrc, src_level, true));
503 radeon_emit(cs, dst_address);
504 radeon_emit(cs, dst_address >> 32);
505 radeon_emit(cs, dstx | (dsty << 16));
506 radeon_emit(cs, dstz | (dst_pitch_tile_max << 16));
507 radeon_emit(cs, dst_slice_tile_max);
508 radeon_emit(cs, encode_tile_info(sctx, rdst, dst_level, false));
509 if (sctx->b.chip_class == CIK) {
510 radeon_emit(cs, copy_width_aligned |
511 (copy_height_aligned << 16));
512 radeon_emit(cs, copy_depth);
513 } else {
514 radeon_emit(cs, (copy_width_aligned - 8) |
515 ((copy_height_aligned - 8) << 16));
516 radeon_emit(cs, (copy_depth - 1));
517 }
518
519 r600_dma_emit_wait_idle(&sctx->b);
520 return true;
521 }
522 }
523
524 return false;
525 }
526
527 static void cik_sdma_copy(struct pipe_context *ctx,
528 struct pipe_resource *dst,
529 unsigned dst_level,
530 unsigned dstx, unsigned dsty, unsigned dstz,
531 struct pipe_resource *src,
532 unsigned src_level,
533 const struct pipe_box *src_box)
534 {
535 struct si_context *sctx = (struct si_context *)ctx;
536
537 if (!sctx->b.dma.cs)
538 goto fallback;
539
540 if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
541 cik_sdma_copy_buffer(sctx, dst, src, dstx, src_box->x, src_box->width);
542 return;
543 }
544
545 if (cik_sdma_copy_texture(sctx, dst, dst_level, dstx, dsty, dstz,
546 src, src_level, src_box))
547 return;
548
549 fallback:
550 si_resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz,
551 src, src_level, src_box);
552 }
553
554 void cik_init_sdma_functions(struct si_context *sctx)
555 {
556 sctx->b.dma_copy = cik_sdma_copy;
557 }