radeonsi: only expose *_init_*dma_functions from (S)DMA files
[mesa.git] / src / gallium / drivers / radeonsi / si_dma.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 */
26
27 #include "sid.h"
28 #include "si_pipe.h"
29 #include "radeon/r600_cs.h"
30
31 #include "util/u_format.h"
32
33 static void si_dma_copy_buffer(struct si_context *ctx,
34 struct pipe_resource *dst,
35 struct pipe_resource *src,
36 uint64_t dst_offset,
37 uint64_t src_offset,
38 uint64_t size)
39 {
40 struct radeon_winsys_cs *cs = ctx->b.dma.cs;
41 unsigned i, ncopy, csize, max_csize, sub_cmd, shift;
42 struct r600_resource *rdst = (struct r600_resource*)dst;
43 struct r600_resource *rsrc = (struct r600_resource*)src;
44
45 /* Mark the buffer range of destination as valid (initialized),
46 * so that transfer_map knows it should wait for the GPU when mapping
47 * that range. */
48 util_range_add(&rdst->valid_buffer_range, dst_offset,
49 dst_offset + size);
50
51 dst_offset += rdst->gpu_address;
52 src_offset += rsrc->gpu_address;
53
54 /* see if we use dword or byte copy */
55 if (!(dst_offset % 4) && !(src_offset % 4) && !(size % 4)) {
56 size >>= 2;
57 sub_cmd = SI_DMA_COPY_DWORD_ALIGNED;
58 shift = 2;
59 max_csize = SI_DMA_COPY_MAX_SIZE_DW;
60 } else {
61 sub_cmd = SI_DMA_COPY_BYTE_ALIGNED;
62 shift = 0;
63 max_csize = SI_DMA_COPY_MAX_SIZE;
64 }
65 ncopy = (size / max_csize) + !!(size % max_csize);
66
67 r600_need_dma_space(&ctx->b, ncopy * 5);
68
69 radeon_add_to_buffer_list(&ctx->b, &ctx->b.dma, rsrc, RADEON_USAGE_READ,
70 RADEON_PRIO_SDMA_BUFFER);
71 radeon_add_to_buffer_list(&ctx->b, &ctx->b.dma, rdst, RADEON_USAGE_WRITE,
72 RADEON_PRIO_SDMA_BUFFER);
73
74 for (i = 0; i < ncopy; i++) {
75 csize = size < max_csize ? size : max_csize;
76 cs->buf[cs->cdw++] = SI_DMA_PACKET(SI_DMA_PACKET_COPY, sub_cmd, csize);
77 cs->buf[cs->cdw++] = dst_offset;
78 cs->buf[cs->cdw++] = src_offset;
79 cs->buf[cs->cdw++] = (dst_offset >> 32UL) & 0xff;
80 cs->buf[cs->cdw++] = (src_offset >> 32UL) & 0xff;
81 dst_offset += csize << shift;
82 src_offset += csize << shift;
83 size -= csize;
84 }
85 }
86
87 static void si_dma_copy_tile(struct si_context *ctx,
88 struct pipe_resource *dst,
89 unsigned dst_level,
90 unsigned dst_x,
91 unsigned dst_y,
92 unsigned dst_z,
93 struct pipe_resource *src,
94 unsigned src_level,
95 unsigned src_x,
96 unsigned src_y,
97 unsigned src_z,
98 unsigned copy_height,
99 unsigned pitch,
100 unsigned bpp)
101 {
102 struct radeon_winsys_cs *cs = ctx->b.dma.cs;
103 struct r600_texture *rsrc = (struct r600_texture*)src;
104 struct r600_texture *rdst = (struct r600_texture*)dst;
105 unsigned dst_mode = rdst->surface.level[dst_level].mode;
106 unsigned src_mode = rsrc->surface.level[src_level].mode;
107 bool detile = dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED;
108 struct r600_texture *rlinear = detile ? rdst : rsrc;
109 struct r600_texture *rtiled = detile ? rsrc : rdst;
110 unsigned linear_lvl = detile ? dst_level : src_level;
111 unsigned tiled_lvl = detile ? src_level : dst_level;
112 struct radeon_info *info = &ctx->screen->b.info;
113 unsigned index = rtiled->surface.tiling_index[tiled_lvl];
114 unsigned tile_mode = info->si_tile_mode_array[index];
115 unsigned array_mode, lbpp, pitch_tile_max, slice_tile_max, size;
116 unsigned ncopy, height, cheight, i;
117 unsigned linear_x, linear_y, linear_z, tiled_x, tiled_y, tiled_z;
118 unsigned sub_cmd, bank_h, bank_w, mt_aspect, nbanks, tile_split, mt;
119 uint64_t base, addr;
120 unsigned pipe_config;
121
122 assert(dst_mode != src_mode);
123
124 sub_cmd = SI_DMA_COPY_TILED;
125 lbpp = util_logbase2(bpp);
126 pitch_tile_max = ((pitch / bpp) / 8) - 1;
127
128 linear_x = detile ? dst_x : src_x;
129 linear_y = detile ? dst_y : src_y;
130 linear_z = detile ? dst_z : src_z;
131 tiled_x = detile ? src_x : dst_x;
132 tiled_y = detile ? src_y : dst_y;
133 tiled_z = detile ? src_z : dst_z;
134
135 assert(!util_format_is_depth_and_stencil(rtiled->resource.b.b.format));
136
137 array_mode = G_009910_ARRAY_MODE(tile_mode);
138 slice_tile_max = (rtiled->surface.level[tiled_lvl].nblk_x *
139 rtiled->surface.level[tiled_lvl].nblk_y) / (8*8) - 1;
140 /* linear height must be the same as the slice tile max height, it's ok even
141 * if the linear destination/source have smaller heigh as the size of the
142 * dma packet will be using the copy_height which is always smaller or equal
143 * to the linear height
144 */
145 height = rtiled->surface.level[tiled_lvl].nblk_y;
146 base = rtiled->surface.level[tiled_lvl].offset;
147 addr = rlinear->surface.level[linear_lvl].offset;
148 addr += rlinear->surface.level[linear_lvl].slice_size * linear_z;
149 addr += linear_y * pitch + linear_x * bpp;
150 bank_h = G_009910_BANK_HEIGHT(tile_mode);
151 bank_w = G_009910_BANK_WIDTH(tile_mode);
152 mt_aspect = G_009910_MACRO_TILE_ASPECT(tile_mode);
153 /* Non-depth modes don't have TILE_SPLIT set. */
154 tile_split = util_logbase2(rtiled->surface.tile_split >> 6);
155 nbanks = G_009910_NUM_BANKS(tile_mode);
156 base += rtiled->resource.gpu_address;
157 addr += rlinear->resource.gpu_address;
158
159 pipe_config = G_009910_PIPE_CONFIG(tile_mode);
160 mt = G_009910_MICRO_TILE_MODE(tile_mode);
161 size = (copy_height * pitch) / 4;
162 ncopy = (size / SI_DMA_COPY_MAX_SIZE_DW) + !!(size % SI_DMA_COPY_MAX_SIZE_DW);
163 r600_need_dma_space(&ctx->b, ncopy * 9);
164
165 radeon_add_to_buffer_list(&ctx->b, &ctx->b.dma, &rsrc->resource,
166 RADEON_USAGE_READ, RADEON_PRIO_SDMA_TEXTURE);
167 radeon_add_to_buffer_list(&ctx->b, &ctx->b.dma, &rdst->resource,
168 RADEON_USAGE_WRITE, RADEON_PRIO_SDMA_TEXTURE);
169
170 for (i = 0; i < ncopy; i++) {
171 cheight = copy_height;
172 if (((cheight * pitch) / 4) > SI_DMA_COPY_MAX_SIZE_DW) {
173 cheight = (SI_DMA_COPY_MAX_SIZE_DW * 4) / pitch;
174 }
175 size = (cheight * pitch) / 4;
176 cs->buf[cs->cdw++] = SI_DMA_PACKET(SI_DMA_PACKET_COPY, sub_cmd, size);
177 cs->buf[cs->cdw++] = base >> 8;
178 cs->buf[cs->cdw++] = (detile << 31) | (array_mode << 27) |
179 (lbpp << 24) | (bank_h << 21) |
180 (bank_w << 18) | (mt_aspect << 16);
181 cs->buf[cs->cdw++] = (pitch_tile_max << 0) | ((height - 1) << 16);
182 cs->buf[cs->cdw++] = (slice_tile_max << 0) | (pipe_config << 26);
183 cs->buf[cs->cdw++] = (tiled_x << 0) | (tiled_z << 18);
184 cs->buf[cs->cdw++] = (tiled_y << 0) | (tile_split << 21) | (nbanks << 25) | (mt << 27);
185 cs->buf[cs->cdw++] = addr & 0xfffffffc;
186 cs->buf[cs->cdw++] = (addr >> 32UL) & 0xff;
187 copy_height -= cheight;
188 addr += cheight * pitch;
189 tiled_y += cheight;
190 }
191 }
192
193 static void si_dma_copy(struct pipe_context *ctx,
194 struct pipe_resource *dst,
195 unsigned dst_level,
196 unsigned dstx, unsigned dsty, unsigned dstz,
197 struct pipe_resource *src,
198 unsigned src_level,
199 const struct pipe_box *src_box)
200 {
201 struct si_context *sctx = (struct si_context *)ctx;
202 struct r600_texture *rsrc = (struct r600_texture*)src;
203 struct r600_texture *rdst = (struct r600_texture*)dst;
204 unsigned dst_pitch, src_pitch, bpp, dst_mode, src_mode;
205 unsigned src_w, dst_w;
206 unsigned src_x, src_y;
207 unsigned dst_x = dstx, dst_y = dsty, dst_z = dstz;
208
209 if (sctx->b.dma.cs == NULL) {
210 goto fallback;
211 }
212
213 if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
214 si_dma_copy_buffer(sctx, dst, src, dst_x, src_box->x, src_box->width);
215 return;
216 }
217
218 /* XXX: Using the asynchronous DMA engine for multi-dimensional
219 * operations seems to cause random GPU lockups for various people.
220 * While the root cause for this might need to be fixed in the kernel,
221 * let's disable it for now.
222 *
223 * Before re-enabling this, please make sure you can hit all newly
224 * enabled paths in your testing, preferably with both piglit and real
225 * world apps, and get in touch with people on the bug reports below
226 * for stability testing.
227 *
228 * https://bugs.freedesktop.org/show_bug.cgi?id=85647
229 * https://bugs.freedesktop.org/show_bug.cgi?id=83500
230 */
231 goto fallback;
232
233 if (src_box->depth > 1 ||
234 !r600_prepare_for_dma_blit(&sctx->b, rdst, dst_level, dstx, dsty,
235 dstz, rsrc, src_level, src_box))
236 goto fallback;
237
238 src_x = util_format_get_nblocksx(src->format, src_box->x);
239 dst_x = util_format_get_nblocksx(src->format, dst_x);
240 src_y = util_format_get_nblocksy(src->format, src_box->y);
241 dst_y = util_format_get_nblocksy(src->format, dst_y);
242
243 bpp = rdst->surface.bpe;
244 dst_pitch = rdst->surface.level[dst_level].pitch_bytes;
245 src_pitch = rsrc->surface.level[src_level].pitch_bytes;
246 src_w = rsrc->surface.level[src_level].npix_x;
247 dst_w = rdst->surface.level[dst_level].npix_x;
248
249 dst_mode = rdst->surface.level[dst_level].mode;
250 src_mode = rsrc->surface.level[src_level].mode;
251
252 if (src_pitch != dst_pitch || src_box->x || dst_x || src_w != dst_w ||
253 src_box->width != src_w ||
254 src_box->height != rsrc->surface.level[src_level].npix_y ||
255 src_box->height != rdst->surface.level[dst_level].npix_y ||
256 rsrc->surface.level[src_level].nblk_y !=
257 rdst->surface.level[dst_level].nblk_y) {
258 /* FIXME si can do partial blit */
259 goto fallback;
260 }
261 /* the x test here are currently useless (because we don't support partial blit)
262 * but keep them around so we don't forget about those
263 */
264 if ((src_pitch % 8) || (src_box->x % 8) || (dst_x % 8) ||
265 (src_box->y % 8) || (dst_y % 8) || (src_box->height % 8)) {
266 goto fallback;
267 }
268
269 if (src_mode == dst_mode) {
270 uint64_t dst_offset, src_offset;
271 /* simple dma blit would do NOTE code here assume :
272 * src_box.x/y == 0
273 * dst_x/y == 0
274 * dst_pitch == src_pitch
275 */
276 src_offset= rsrc->surface.level[src_level].offset;
277 src_offset += rsrc->surface.level[src_level].slice_size * src_box->z;
278 src_offset += src_y * src_pitch + src_x * bpp;
279 dst_offset = rdst->surface.level[dst_level].offset;
280 dst_offset += rdst->surface.level[dst_level].slice_size * dst_z;
281 dst_offset += dst_y * dst_pitch + dst_x * bpp;
282 si_dma_copy_buffer(sctx, dst, src, dst_offset, src_offset,
283 rsrc->surface.level[src_level].slice_size);
284 } else {
285 si_dma_copy_tile(sctx, dst, dst_level, dst_x, dst_y, dst_z,
286 src, src_level, src_x, src_y, src_box->z,
287 src_box->height / rsrc->surface.blk_h,
288 dst_pitch, bpp);
289 }
290 return;
291
292 fallback:
293 si_resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz,
294 src, src_level, src_box);
295 }
296
297 void si_init_dma_functions(struct si_context *sctx)
298 {
299 sctx->b.dma_copy = si_dma_copy;
300 }