2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2018 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * on the rights to use, copy, modify, merge, publish, distribute, sub
10 * license, and/or sell copies of the Software, and to permit persons to whom
11 * the Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include "util/u_format.h"
31 static void si_dma_copy_buffer(struct si_context
*ctx
,
32 struct pipe_resource
*dst
,
33 struct pipe_resource
*src
,
38 struct radeon_cmdbuf
*cs
= ctx
->dma_cs
;
39 unsigned i
, ncopy
, count
, max_size
, sub_cmd
, shift
;
40 struct r600_resource
*rdst
= r600_resource(dst
);
41 struct r600_resource
*rsrc
= r600_resource(src
);
43 /* Mark the buffer range of destination as valid (initialized),
44 * so that transfer_map knows it should wait for the GPU when mapping
46 util_range_add(&rdst
->valid_buffer_range
, dst_offset
,
49 dst_offset
+= rdst
->gpu_address
;
50 src_offset
+= rsrc
->gpu_address
;
52 /* see whether we should use the dword-aligned or byte-aligned copy */
53 if (!(dst_offset
% 4) && !(src_offset
% 4) && !(size
% 4)) {
54 sub_cmd
= SI_DMA_COPY_DWORD_ALIGNED
;
56 max_size
= SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE
;
58 sub_cmd
= SI_DMA_COPY_BYTE_ALIGNED
;
60 max_size
= SI_DMA_COPY_MAX_BYTE_ALIGNED_SIZE
;
63 ncopy
= DIV_ROUND_UP(size
, max_size
);
64 si_need_dma_space(ctx
, ncopy
* 5, rdst
, rsrc
);
66 for (i
= 0; i
< ncopy
; i
++) {
67 count
= MIN2(size
, max_size
);
68 radeon_emit(cs
, SI_DMA_PACKET(SI_DMA_PACKET_COPY
, sub_cmd
,
70 radeon_emit(cs
, dst_offset
);
71 radeon_emit(cs
, src_offset
);
72 radeon_emit(cs
, (dst_offset
>> 32UL) & 0xff);
73 radeon_emit(cs
, (src_offset
>> 32UL) & 0xff);
80 static void si_dma_clear_buffer(struct si_context
*sctx
,
81 struct pipe_resource
*dst
,
86 struct radeon_cmdbuf
*cs
= sctx
->dma_cs
;
87 unsigned i
, ncopy
, csize
;
88 struct r600_resource
*rdst
= r600_resource(dst
);
90 if (!cs
|| offset
% 4 != 0 || size
% 4 != 0 ||
91 dst
->flags
& PIPE_RESOURCE_FLAG_SPARSE
) {
92 sctx
->b
.clear_buffer(&sctx
->b
, dst
, offset
, size
, &clear_value
, 4);
96 /* Mark the buffer range of destination as valid (initialized),
97 * so that transfer_map knows it should wait for the GPU when mapping
99 util_range_add(&rdst
->valid_buffer_range
, offset
, offset
+ size
);
101 offset
+= rdst
->gpu_address
;
103 /* the same maximum size as for copying */
104 ncopy
= DIV_ROUND_UP(size
, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE
);
105 si_need_dma_space(sctx
, ncopy
* 4, rdst
, NULL
);
107 for (i
= 0; i
< ncopy
; i
++) {
108 csize
= MIN2(size
, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE
);
109 radeon_emit(cs
, SI_DMA_PACKET(SI_DMA_PACKET_CONSTANT_FILL
, 0,
111 radeon_emit(cs
, offset
);
112 radeon_emit(cs
, clear_value
);
113 radeon_emit(cs
, (offset
>> 32) << 16);
119 static void si_dma_copy_tile(struct si_context
*ctx
,
120 struct pipe_resource
*dst
,
125 struct pipe_resource
*src
,
130 unsigned copy_height
,
134 struct radeon_cmdbuf
*cs
= ctx
->dma_cs
;
135 struct si_texture
*ssrc
= (struct si_texture
*)src
;
136 struct si_texture
*sdst
= (struct si_texture
*)dst
;
137 unsigned dst_mode
= sdst
->surface
.u
.legacy
.level
[dst_level
].mode
;
138 bool detile
= dst_mode
== RADEON_SURF_MODE_LINEAR_ALIGNED
;
139 struct si_texture
*linear
= detile
? sdst
: ssrc
;
140 struct si_texture
*tiled
= detile
? ssrc
: sdst
;
141 unsigned linear_lvl
= detile
? dst_level
: src_level
;
142 unsigned tiled_lvl
= detile
? src_level
: dst_level
;
143 struct radeon_info
*info
= &ctx
->screen
->info
;
144 unsigned index
= tiled
->surface
.u
.legacy
.tiling_index
[tiled_lvl
];
145 unsigned tile_mode
= info
->si_tile_mode_array
[index
];
146 unsigned array_mode
, lbpp
, pitch_tile_max
, slice_tile_max
, size
;
147 unsigned ncopy
, height
, cheight
, i
;
148 unsigned linear_x
, linear_y
, linear_z
, tiled_x
, tiled_y
, tiled_z
;
149 unsigned sub_cmd
, bank_h
, bank_w
, mt_aspect
, nbanks
, tile_split
, mt
;
151 unsigned pipe_config
;
153 assert(dst_mode
!= ssrc
->surface
.u
.legacy
.level
[src_level
].mode
);
155 sub_cmd
= SI_DMA_COPY_TILED
;
156 lbpp
= util_logbase2(bpp
);
157 pitch_tile_max
= ((pitch
/ bpp
) / 8) - 1;
159 linear_x
= detile
? dst_x
: src_x
;
160 linear_y
= detile
? dst_y
: src_y
;
161 linear_z
= detile
? dst_z
: src_z
;
162 tiled_x
= detile
? src_x
: dst_x
;
163 tiled_y
= detile
? src_y
: dst_y
;
164 tiled_z
= detile
? src_z
: dst_z
;
166 assert(!util_format_is_depth_and_stencil(tiled
->buffer
.b
.b
.format
));
168 array_mode
= G_009910_ARRAY_MODE(tile_mode
);
169 slice_tile_max
= (tiled
->surface
.u
.legacy
.level
[tiled_lvl
].nblk_x
*
170 tiled
->surface
.u
.legacy
.level
[tiled_lvl
].nblk_y
) / (8*8) - 1;
171 /* linear height must be the same as the slice tile max height, it's ok even
172 * if the linear destination/source have smaller heigh as the size of the
173 * dma packet will be using the copy_height which is always smaller or equal
174 * to the linear height
176 height
= tiled
->surface
.u
.legacy
.level
[tiled_lvl
].nblk_y
;
177 base
= tiled
->surface
.u
.legacy
.level
[tiled_lvl
].offset
;
178 addr
= linear
->surface
.u
.legacy
.level
[linear_lvl
].offset
;
179 addr
+= (uint64_t)linear
->surface
.u
.legacy
.level
[linear_lvl
].slice_size_dw
* 4 * linear_z
;
180 addr
+= linear_y
* pitch
+ linear_x
* bpp
;
181 bank_h
= G_009910_BANK_HEIGHT(tile_mode
);
182 bank_w
= G_009910_BANK_WIDTH(tile_mode
);
183 mt_aspect
= G_009910_MACRO_TILE_ASPECT(tile_mode
);
184 /* Non-depth modes don't have TILE_SPLIT set. */
185 tile_split
= util_logbase2(tiled
->surface
.u
.legacy
.tile_split
>> 6);
186 nbanks
= G_009910_NUM_BANKS(tile_mode
);
187 base
+= tiled
->buffer
.gpu_address
;
188 addr
+= linear
->buffer
.gpu_address
;
190 pipe_config
= G_009910_PIPE_CONFIG(tile_mode
);
191 mt
= G_009910_MICRO_TILE_MODE(tile_mode
);
192 size
= copy_height
* pitch
;
193 ncopy
= DIV_ROUND_UP(size
, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE
);
194 si_need_dma_space(ctx
, ncopy
* 9, &sdst
->buffer
, &ssrc
->buffer
);
196 for (i
= 0; i
< ncopy
; i
++) {
197 cheight
= copy_height
;
198 if (cheight
* pitch
> SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE
) {
199 cheight
= SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE
/ pitch
;
201 size
= cheight
* pitch
;
202 radeon_emit(cs
, SI_DMA_PACKET(SI_DMA_PACKET_COPY
, sub_cmd
, size
/ 4));
203 radeon_emit(cs
, base
>> 8);
204 radeon_emit(cs
, (detile
<< 31) | (array_mode
<< 27) |
205 (lbpp
<< 24) | (bank_h
<< 21) |
206 (bank_w
<< 18) | (mt_aspect
<< 16));
207 radeon_emit(cs
, (pitch_tile_max
<< 0) | ((height
- 1) << 16));
208 radeon_emit(cs
, (slice_tile_max
<< 0) | (pipe_config
<< 26));
209 radeon_emit(cs
, (tiled_x
<< 0) | (tiled_z
<< 18));
210 radeon_emit(cs
, (tiled_y
<< 0) | (tile_split
<< 21) | (nbanks
<< 25) | (mt
<< 27));
211 radeon_emit(cs
, addr
& 0xfffffffc);
212 radeon_emit(cs
, (addr
>> 32UL) & 0xff);
213 copy_height
-= cheight
;
214 addr
+= cheight
* pitch
;
219 static void si_dma_copy(struct pipe_context
*ctx
,
220 struct pipe_resource
*dst
,
222 unsigned dstx
, unsigned dsty
, unsigned dstz
,
223 struct pipe_resource
*src
,
225 const struct pipe_box
*src_box
)
227 struct si_context
*sctx
= (struct si_context
*)ctx
;
228 struct si_texture
*ssrc
= (struct si_texture
*)src
;
229 struct si_texture
*sdst
= (struct si_texture
*)dst
;
230 unsigned dst_pitch
, src_pitch
, bpp
, dst_mode
, src_mode
;
231 unsigned src_w
, dst_w
;
232 unsigned src_x
, src_y
;
233 unsigned dst_x
= dstx
, dst_y
= dsty
, dst_z
= dstz
;
235 if (sctx
->dma_cs
== NULL
||
236 src
->flags
& PIPE_RESOURCE_FLAG_SPARSE
||
237 dst
->flags
& PIPE_RESOURCE_FLAG_SPARSE
) {
241 if (dst
->target
== PIPE_BUFFER
&& src
->target
== PIPE_BUFFER
) {
242 si_dma_copy_buffer(sctx
, dst
, src
, dst_x
, src_box
->x
, src_box
->width
);
246 /* XXX: Using the asynchronous DMA engine for multi-dimensional
247 * operations seems to cause random GPU lockups for various people.
248 * While the root cause for this might need to be fixed in the kernel,
249 * let's disable it for now.
251 * Before re-enabling this, please make sure you can hit all newly
252 * enabled paths in your testing, preferably with both piglit and real
253 * world apps, and get in touch with people on the bug reports below
254 * for stability testing.
256 * https://bugs.freedesktop.org/show_bug.cgi?id=85647
257 * https://bugs.freedesktop.org/show_bug.cgi?id=83500
261 if (src_box
->depth
> 1 ||
262 !si_prepare_for_dma_blit(sctx
, sdst
, dst_level
, dstx
, dsty
,
263 dstz
, ssrc
, src_level
, src_box
))
266 src_x
= util_format_get_nblocksx(src
->format
, src_box
->x
);
267 dst_x
= util_format_get_nblocksx(src
->format
, dst_x
);
268 src_y
= util_format_get_nblocksy(src
->format
, src_box
->y
);
269 dst_y
= util_format_get_nblocksy(src
->format
, dst_y
);
271 bpp
= sdst
->surface
.bpe
;
272 dst_pitch
= sdst
->surface
.u
.legacy
.level
[dst_level
].nblk_x
* sdst
->surface
.bpe
;
273 src_pitch
= ssrc
->surface
.u
.legacy
.level
[src_level
].nblk_x
* ssrc
->surface
.bpe
;
274 src_w
= u_minify(ssrc
->buffer
.b
.b
.width0
, src_level
);
275 dst_w
= u_minify(sdst
->buffer
.b
.b
.width0
, dst_level
);
277 dst_mode
= sdst
->surface
.u
.legacy
.level
[dst_level
].mode
;
278 src_mode
= ssrc
->surface
.u
.legacy
.level
[src_level
].mode
;
280 if (src_pitch
!= dst_pitch
|| src_box
->x
|| dst_x
|| src_w
!= dst_w
||
281 src_box
->width
!= src_w
||
282 src_box
->height
!= u_minify(ssrc
->buffer
.b
.b
.height0
, src_level
) ||
283 src_box
->height
!= u_minify(sdst
->buffer
.b
.b
.height0
, dst_level
) ||
284 ssrc
->surface
.u
.legacy
.level
[src_level
].nblk_y
!=
285 sdst
->surface
.u
.legacy
.level
[dst_level
].nblk_y
) {
286 /* FIXME si can do partial blit */
289 /* the x test here are currently useless (because we don't support partial blit)
290 * but keep them around so we don't forget about those
292 if ((src_pitch
% 8) || (src_box
->x
% 8) || (dst_x
% 8) ||
293 (src_box
->y
% 8) || (dst_y
% 8) || (src_box
->height
% 8)) {
297 if (src_mode
== dst_mode
) {
298 uint64_t dst_offset
, src_offset
;
299 /* simple dma blit would do NOTE code here assume :
302 * dst_pitch == src_pitch
304 src_offset
= ssrc
->surface
.u
.legacy
.level
[src_level
].offset
;
305 src_offset
+= (uint64_t)ssrc
->surface
.u
.legacy
.level
[src_level
].slice_size_dw
* 4 * src_box
->z
;
306 src_offset
+= src_y
* src_pitch
+ src_x
* bpp
;
307 dst_offset
= sdst
->surface
.u
.legacy
.level
[dst_level
].offset
;
308 dst_offset
+= (uint64_t)sdst
->surface
.u
.legacy
.level
[dst_level
].slice_size_dw
* 4 * dst_z
;
309 dst_offset
+= dst_y
* dst_pitch
+ dst_x
* bpp
;
310 si_dma_copy_buffer(sctx
, dst
, src
, dst_offset
, src_offset
,
311 (uint64_t)ssrc
->surface
.u
.legacy
.level
[src_level
].slice_size_dw
* 4);
313 si_dma_copy_tile(sctx
, dst
, dst_level
, dst_x
, dst_y
, dst_z
,
314 src
, src_level
, src_x
, src_y
, src_box
->z
,
315 src_box
->height
/ ssrc
->surface
.blk_h
,
321 si_resource_copy_region(ctx
, dst
, dst_level
, dstx
, dsty
, dstz
,
322 src
, src_level
, src_box
);
325 void si_init_dma_functions(struct si_context
*sctx
)
327 sctx
->dma_copy
= si_dma_copy
;
328 sctx
->dma_clear_buffer
= si_dma_clear_buffer
;