radeonsi: initialize SX_PS_DOWNCONVERT to 0 on Stoney
[mesa.git] / src / gallium / drivers / radeonsi / si_dma.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 */
26
27 #include "sid.h"
28 #include "si_pipe.h"
29 #include "radeon/r600_cs.h"
30
31 #include "util/u_format.h"
32
33 static uint32_t si_micro_tile_mode(struct si_screen *sscreen, unsigned tile_mode)
34 {
35 if (sscreen->b.info.si_tile_mode_array_valid) {
36 uint32_t gb_tile_mode = sscreen->b.info.si_tile_mode_array[tile_mode];
37
38 return G_009910_MICRO_TILE_MODE(gb_tile_mode);
39 }
40
41 /* The kernel cannod return the tile mode array. Guess? */
42 return V_009910_ADDR_SURF_THIN_MICRO_TILING;
43 }
44
45 static void si_dma_copy_buffer(struct si_context *ctx,
46 struct pipe_resource *dst,
47 struct pipe_resource *src,
48 uint64_t dst_offset,
49 uint64_t src_offset,
50 uint64_t size)
51 {
52 struct radeon_winsys_cs *cs = ctx->b.rings.dma.cs;
53 unsigned i, ncopy, csize, max_csize, sub_cmd, shift;
54 struct r600_resource *rdst = (struct r600_resource*)dst;
55 struct r600_resource *rsrc = (struct r600_resource*)src;
56
57 /* Mark the buffer range of destination as valid (initialized),
58 * so that transfer_map knows it should wait for the GPU when mapping
59 * that range. */
60 util_range_add(&rdst->valid_buffer_range, dst_offset,
61 dst_offset + size);
62
63 dst_offset += rdst->gpu_address;
64 src_offset += rsrc->gpu_address;
65
66 /* see if we use dword or byte copy */
67 if (!(dst_offset % 4) && !(src_offset % 4) && !(size % 4)) {
68 size >>= 2;
69 sub_cmd = SI_DMA_COPY_DWORD_ALIGNED;
70 shift = 2;
71 max_csize = SI_DMA_COPY_MAX_SIZE_DW;
72 } else {
73 sub_cmd = SI_DMA_COPY_BYTE_ALIGNED;
74 shift = 0;
75 max_csize = SI_DMA_COPY_MAX_SIZE;
76 }
77 ncopy = (size / max_csize) + !!(size % max_csize);
78
79 r600_need_dma_space(&ctx->b, ncopy * 5);
80
81 radeon_add_to_buffer_list(&ctx->b, &ctx->b.rings.dma, rsrc, RADEON_USAGE_READ,
82 RADEON_PRIO_SDMA_BUFFER);
83 radeon_add_to_buffer_list(&ctx->b, &ctx->b.rings.dma, rdst, RADEON_USAGE_WRITE,
84 RADEON_PRIO_SDMA_BUFFER);
85
86 for (i = 0; i < ncopy; i++) {
87 csize = size < max_csize ? size : max_csize;
88 cs->buf[cs->cdw++] = SI_DMA_PACKET(SI_DMA_PACKET_COPY, sub_cmd, csize);
89 cs->buf[cs->cdw++] = dst_offset;
90 cs->buf[cs->cdw++] = src_offset;
91 cs->buf[cs->cdw++] = (dst_offset >> 32UL) & 0xff;
92 cs->buf[cs->cdw++] = (src_offset >> 32UL) & 0xff;
93 dst_offset += csize << shift;
94 src_offset += csize << shift;
95 size -= csize;
96 }
97 }
98
99 static void si_dma_copy_tile(struct si_context *ctx,
100 struct pipe_resource *dst,
101 unsigned dst_level,
102 unsigned dst_x,
103 unsigned dst_y,
104 unsigned dst_z,
105 struct pipe_resource *src,
106 unsigned src_level,
107 unsigned src_x,
108 unsigned src_y,
109 unsigned src_z,
110 unsigned copy_height,
111 unsigned pitch,
112 unsigned bpp)
113 {
114 struct radeon_winsys_cs *cs = ctx->b.rings.dma.cs;
115 struct si_screen *sscreen = ctx->screen;
116 struct r600_texture *rsrc = (struct r600_texture*)src;
117 struct r600_texture *rdst = (struct r600_texture*)dst;
118 struct r600_texture *rlinear, *rtiled;
119 unsigned linear_lvl, tiled_lvl;
120 unsigned array_mode, lbpp, pitch_tile_max, slice_tile_max, size;
121 unsigned ncopy, height, cheight, detile, i, src_mode, dst_mode;
122 unsigned linear_x, linear_y, linear_z, tiled_x, tiled_y, tiled_z;
123 unsigned sub_cmd, bank_h, bank_w, mt_aspect, nbanks, tile_split, mt;
124 uint64_t base, addr;
125 unsigned pipe_config, tile_mode_index;
126
127 dst_mode = rdst->surface.level[dst_level].mode;
128 src_mode = rsrc->surface.level[src_level].mode;
129 /* downcast linear aligned to linear to simplify test */
130 src_mode = src_mode == RADEON_SURF_MODE_LINEAR_ALIGNED ? RADEON_SURF_MODE_LINEAR : src_mode;
131 dst_mode = dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED ? RADEON_SURF_MODE_LINEAR : dst_mode;
132 assert(dst_mode != src_mode);
133
134 sub_cmd = SI_DMA_COPY_TILED;
135 lbpp = util_logbase2(bpp);
136 pitch_tile_max = ((pitch / bpp) / 8) - 1;
137
138 detile = dst_mode == RADEON_SURF_MODE_LINEAR;
139 rlinear = detile ? rdst : rsrc;
140 rtiled = detile ? rsrc : rdst;
141 linear_lvl = detile ? dst_level : src_level;
142 tiled_lvl = detile ? src_level : dst_level;
143 linear_x = detile ? dst_x : src_x;
144 linear_y = detile ? dst_y : src_y;
145 linear_z = detile ? dst_z : src_z;
146 tiled_x = detile ? src_x : dst_x;
147 tiled_y = detile ? src_y : dst_y;
148 tiled_z = detile ? src_z : dst_z;
149
150 assert(!util_format_is_depth_and_stencil(rtiled->resource.b.b.format));
151
152 array_mode = si_array_mode(rtiled->surface.level[tiled_lvl].mode);
153 slice_tile_max = (rtiled->surface.level[tiled_lvl].nblk_x *
154 rtiled->surface.level[tiled_lvl].nblk_y) / (8*8) - 1;
155 /* linear height must be the same as the slice tile max height, it's ok even
156 * if the linear destination/source have smaller heigh as the size of the
157 * dma packet will be using the copy_height which is always smaller or equal
158 * to the linear height
159 */
160 height = rtiled->surface.level[tiled_lvl].nblk_y;
161 base = rtiled->surface.level[tiled_lvl].offset;
162 addr = rlinear->surface.level[linear_lvl].offset;
163 addr += rlinear->surface.level[linear_lvl].slice_size * linear_z;
164 addr += linear_y * pitch + linear_x * bpp;
165 bank_h = cik_bank_wh(rtiled->surface.bankh);
166 bank_w = cik_bank_wh(rtiled->surface.bankw);
167 mt_aspect = cik_macro_tile_aspect(rtiled->surface.mtilea);
168 tile_split = cik_tile_split(rtiled->surface.tile_split);
169 tile_mode_index = si_tile_mode_index(rtiled, tiled_lvl, false);
170 nbanks = si_num_banks(sscreen, rtiled);
171 base += rtiled->resource.gpu_address;
172 addr += rlinear->resource.gpu_address;
173
174 pipe_config = cik_db_pipe_config(sscreen, tile_mode_index);
175 mt = si_micro_tile_mode(sscreen, tile_mode_index);
176 size = (copy_height * pitch) / 4;
177 ncopy = (size / SI_DMA_COPY_MAX_SIZE_DW) + !!(size % SI_DMA_COPY_MAX_SIZE_DW);
178 r600_need_dma_space(&ctx->b, ncopy * 9);
179
180 radeon_add_to_buffer_list(&ctx->b, &ctx->b.rings.dma, &rsrc->resource,
181 RADEON_USAGE_READ, RADEON_PRIO_SDMA_TEXTURE);
182 radeon_add_to_buffer_list(&ctx->b, &ctx->b.rings.dma, &rdst->resource,
183 RADEON_USAGE_WRITE, RADEON_PRIO_SDMA_TEXTURE);
184
185 for (i = 0; i < ncopy; i++) {
186 cheight = copy_height;
187 if (((cheight * pitch) / 4) > SI_DMA_COPY_MAX_SIZE_DW) {
188 cheight = (SI_DMA_COPY_MAX_SIZE_DW * 4) / pitch;
189 }
190 size = (cheight * pitch) / 4;
191 cs->buf[cs->cdw++] = SI_DMA_PACKET(SI_DMA_PACKET_COPY, sub_cmd, size);
192 cs->buf[cs->cdw++] = base >> 8;
193 cs->buf[cs->cdw++] = (detile << 31) | (array_mode << 27) |
194 (lbpp << 24) | (bank_h << 21) |
195 (bank_w << 18) | (mt_aspect << 16);
196 cs->buf[cs->cdw++] = (pitch_tile_max << 0) | ((height - 1) << 16);
197 cs->buf[cs->cdw++] = (slice_tile_max << 0) | (pipe_config << 26);
198 cs->buf[cs->cdw++] = (tiled_x << 0) | (tiled_z << 18);
199 cs->buf[cs->cdw++] = (tiled_y << 0) | (tile_split << 21) | (nbanks << 25) | (mt << 27);
200 cs->buf[cs->cdw++] = addr & 0xfffffffc;
201 cs->buf[cs->cdw++] = (addr >> 32UL) & 0xff;
202 copy_height -= cheight;
203 addr += cheight * pitch;
204 tiled_y += cheight;
205 }
206 }
207
208 void si_dma_copy(struct pipe_context *ctx,
209 struct pipe_resource *dst,
210 unsigned dst_level,
211 unsigned dstx, unsigned dsty, unsigned dstz,
212 struct pipe_resource *src,
213 unsigned src_level,
214 const struct pipe_box *src_box)
215 {
216 struct si_context *sctx = (struct si_context *)ctx;
217 struct r600_texture *rsrc = (struct r600_texture*)src;
218 struct r600_texture *rdst = (struct r600_texture*)dst;
219 unsigned dst_pitch, src_pitch, bpp, dst_mode, src_mode;
220 unsigned src_w, dst_w;
221 unsigned src_x, src_y;
222 unsigned dst_x = dstx, dst_y = dsty, dst_z = dstz;
223
224 if (sctx->b.rings.dma.cs == NULL) {
225 goto fallback;
226 }
227
228 if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
229 si_dma_copy_buffer(sctx, dst, src, dst_x, src_box->x, src_box->width);
230 return;
231 }
232
233 /* XXX: Using the asynchronous DMA engine for multi-dimensional
234 * operations seems to cause random GPU lockups for various people.
235 * While the root cause for this might need to be fixed in the kernel,
236 * let's disable it for now.
237 *
238 * Before re-enabling this, please make sure you can hit all newly
239 * enabled paths in your testing, preferably with both piglit and real
240 * world apps, and get in touch with people on the bug reports below
241 * for stability testing.
242 *
243 * https://bugs.freedesktop.org/show_bug.cgi?id=85647
244 * https://bugs.freedesktop.org/show_bug.cgi?id=83500
245 */
246 goto fallback;
247
248 if (src->format != dst->format || src_box->depth > 1 ||
249 (rdst->dirty_level_mask | rdst->stencil_dirty_level_mask) & (1 << dst_level) ||
250 rdst->cmask.size || rdst->fmask.size ||
251 rsrc->cmask.size || rsrc->fmask.size ||
252 rdst->dcc_buffer || rsrc->dcc_buffer) {
253 goto fallback;
254 }
255
256 if (rsrc->dirty_level_mask & (1 << src_level)) {
257 ctx->flush_resource(ctx, src);
258 }
259
260 src_x = util_format_get_nblocksx(src->format, src_box->x);
261 dst_x = util_format_get_nblocksx(src->format, dst_x);
262 src_y = util_format_get_nblocksy(src->format, src_box->y);
263 dst_y = util_format_get_nblocksy(src->format, dst_y);
264
265 bpp = rdst->surface.bpe;
266 dst_pitch = rdst->surface.level[dst_level].pitch_bytes;
267 src_pitch = rsrc->surface.level[src_level].pitch_bytes;
268 src_w = rsrc->surface.level[src_level].npix_x;
269 dst_w = rdst->surface.level[dst_level].npix_x;
270
271 dst_mode = rdst->surface.level[dst_level].mode;
272 src_mode = rsrc->surface.level[src_level].mode;
273 /* downcast linear aligned to linear to simplify test */
274 src_mode = src_mode == RADEON_SURF_MODE_LINEAR_ALIGNED ? RADEON_SURF_MODE_LINEAR : src_mode;
275 dst_mode = dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED ? RADEON_SURF_MODE_LINEAR : dst_mode;
276
277 if (src_pitch != dst_pitch || src_box->x || dst_x || src_w != dst_w ||
278 src_box->width != src_w ||
279 src_box->height != rsrc->surface.level[src_level].npix_y ||
280 src_box->height != rdst->surface.level[dst_level].npix_y ||
281 rsrc->surface.level[src_level].nblk_y !=
282 rdst->surface.level[dst_level].nblk_y) {
283 /* FIXME si can do partial blit */
284 goto fallback;
285 }
286 /* the x test here are currently useless (because we don't support partial blit)
287 * but keep them around so we don't forget about those
288 */
289 if ((src_pitch % 8) || (src_box->x % 8) || (dst_x % 8) ||
290 (src_box->y % 8) || (dst_y % 8) || (src_box->height % 8)) {
291 goto fallback;
292 }
293
294 if (src_mode == dst_mode) {
295 uint64_t dst_offset, src_offset;
296 /* simple dma blit would do NOTE code here assume :
297 * src_box.x/y == 0
298 * dst_x/y == 0
299 * dst_pitch == src_pitch
300 */
301 src_offset= rsrc->surface.level[src_level].offset;
302 src_offset += rsrc->surface.level[src_level].slice_size * src_box->z;
303 src_offset += src_y * src_pitch + src_x * bpp;
304 dst_offset = rdst->surface.level[dst_level].offset;
305 dst_offset += rdst->surface.level[dst_level].slice_size * dst_z;
306 dst_offset += dst_y * dst_pitch + dst_x * bpp;
307 si_dma_copy_buffer(sctx, dst, src, dst_offset, src_offset,
308 rsrc->surface.level[src_level].slice_size);
309 } else {
310 si_dma_copy_tile(sctx, dst, dst_level, dst_x, dst_y, dst_z,
311 src, src_level, src_x, src_y, src_box->z,
312 src_box->height / rsrc->surface.blk_h,
313 dst_pitch, bpp);
314 }
315 return;
316
317 fallback:
318 si_resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz,
319 src, src_level, src_box);
320 }