gallium: Redefine the max texture 2d cap from _LEVELS to _SIZE.
[mesa.git] / src / gallium / drivers / radeonsi / si_test_dma.c
1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 */
25
26 /* This file implements randomized SDMA texture blit tests. */
27
28 #include "si_pipe.h"
29 #include "util/u_surface.h"
30 #include "util/rand_xor.h"
31
32 static uint64_t seed_xorshift128plus[2];
33
34 #define RAND_NUM_SIZE 8
35
36 /* The GPU blits are emulated on the CPU using these CPU textures. */
37
38 struct cpu_texture {
39 uint8_t *ptr;
40 uint64_t size;
41 uint64_t layer_stride;
42 unsigned stride;
43 };
44
45 static void alloc_cpu_texture(struct cpu_texture *tex,
46 struct pipe_resource *templ, int bpp)
47 {
48 tex->stride = align(templ->width0 * bpp, RAND_NUM_SIZE);
49 tex->layer_stride = (uint64_t)tex->stride * templ->height0;
50 tex->size = tex->layer_stride * templ->array_size;
51 tex->ptr = malloc(tex->size);
52 assert(tex->ptr);
53 }
54
55 static void set_random_pixels(struct pipe_context *ctx,
56 struct pipe_resource *tex,
57 struct cpu_texture *cpu)
58 {
59 struct pipe_transfer *t;
60 uint8_t *map;
61 int x,y,z;
62
63 map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_TRANSFER_WRITE,
64 0, 0, 0, tex->width0, tex->height0,
65 tex->array_size, &t);
66 assert(map);
67
68 for (z = 0; z < tex->array_size; z++) {
69 for (y = 0; y < tex->height0; y++) {
70 uint64_t *ptr = (uint64_t*)
71 (map + t->layer_stride*z + t->stride*y);
72 uint64_t *ptr_cpu = (uint64_t*)
73 (cpu->ptr + cpu->layer_stride*z + cpu->stride*y);
74 unsigned size = cpu->stride / RAND_NUM_SIZE;
75
76 assert(t->stride % RAND_NUM_SIZE == 0);
77 assert(cpu->stride % RAND_NUM_SIZE == 0);
78
79 for (x = 0; x < size; x++) {
80 *ptr++ = *ptr_cpu++ =
81 rand_xorshift128plus(seed_xorshift128plus);
82 }
83 }
84 }
85
86 pipe_transfer_unmap(ctx, t);
87 }
88
89 static bool compare_textures(struct pipe_context *ctx,
90 struct pipe_resource *tex,
91 struct cpu_texture *cpu, int bpp)
92 {
93 struct pipe_transfer *t;
94 uint8_t *map;
95 int y,z;
96 bool pass = true;
97
98 map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_TRANSFER_READ,
99 0, 0, 0, tex->width0, tex->height0,
100 tex->array_size, &t);
101 assert(map);
102
103 for (z = 0; z < tex->array_size; z++) {
104 for (y = 0; y < tex->height0; y++) {
105 uint8_t *ptr = map + t->layer_stride*z + t->stride*y;
106 uint8_t *cpu_ptr = cpu->ptr +
107 cpu->layer_stride*z + cpu->stride*y;
108
109 if (memcmp(ptr, cpu_ptr, tex->width0 * bpp)) {
110 pass = false;
111 goto done;
112 }
113 }
114 }
115 done:
116 pipe_transfer_unmap(ctx, t);
117 return pass;
118 }
119
120 static enum pipe_format get_format_from_bpp(int bpp)
121 {
122 switch (bpp) {
123 case 1:
124 return PIPE_FORMAT_R8_UINT;
125 case 2:
126 return PIPE_FORMAT_R16_UINT;
127 case 4:
128 return PIPE_FORMAT_R32_UINT;
129 case 8:
130 return PIPE_FORMAT_R32G32_UINT;
131 case 16:
132 return PIPE_FORMAT_R32G32B32A32_UINT;
133 default:
134 assert(0);
135 return PIPE_FORMAT_NONE;
136 }
137 }
138
139 static const char *array_mode_to_string(struct si_screen *sscreen,
140 struct radeon_surf *surf)
141 {
142 if (sscreen->info.chip_class >= GFX9) {
143 switch (surf->u.gfx9.surf.swizzle_mode) {
144 case 0:
145 return " LINEAR";
146 case 21:
147 return " 4KB_S_X";
148 case 22:
149 return " 4KB_D_X";
150 case 25:
151 return "64KB_S_X";
152 case 26:
153 return "64KB_D_X";
154 default:
155 printf("Unhandled swizzle mode = %u\n",
156 surf->u.gfx9.surf.swizzle_mode);
157 return " UNKNOWN";
158 }
159 } else {
160 switch (surf->u.legacy.level[0].mode) {
161 case RADEON_SURF_MODE_LINEAR_ALIGNED:
162 return "LINEAR_ALIGNED";
163 case RADEON_SURF_MODE_1D:
164 return "1D_TILED_THIN1";
165 case RADEON_SURF_MODE_2D:
166 return "2D_TILED_THIN1";
167 default:
168 assert(0);
169 return " UNKNOWN";
170 }
171 }
172 }
173
174 static unsigned generate_max_tex_side(unsigned max_tex_side)
175 {
176 switch (rand() % 4) {
177 case 0:
178 /* Try to hit large sizes in 1/4 of the cases. */
179 return max_tex_side;
180 case 1:
181 /* Try to hit 1D tiling in 1/4 of the cases. */
182 return 128;
183 default:
184 /* Try to hit common sizes in 2/4 of the cases. */
185 return 2048;
186 }
187 }
188
189 void si_test_dma(struct si_screen *sscreen)
190 {
191 struct pipe_screen *screen = &sscreen->b;
192 struct pipe_context *ctx = screen->context_create(screen, NULL, 0);
193 struct si_context *sctx = (struct si_context*)ctx;
194 uint64_t max_alloc_size;
195 unsigned i, iterations, num_partial_copies, max_tex_side;
196 unsigned num_pass = 0, num_fail = 0;
197
198 max_tex_side = screen->get_param(screen, PIPE_CAP_MAX_TEXTURE_2D_SIZE);
199
200 /* Max 128 MB allowed for both textures. */
201 max_alloc_size = 128 * 1024 * 1024;
202
203 /* the seed for random test parameters */
204 srand(0x9b47d95b);
205 /* the seed for random pixel data */
206 s_rand_xorshift128plus(seed_xorshift128plus, false);
207
208 iterations = 1000000000; /* just kill it when you are bored */
209 num_partial_copies = 30;
210
211 /* These parameters are randomly generated per test:
212 * - whether to do one whole-surface copy or N partial copies per test
213 * - which tiling modes to use (LINEAR_ALIGNED, 1D, 2D)
214 * - which texture dimensions to use
215 * - whether to use VRAM (all tiling modes) and GTT (staging, linear
216 * only) allocations
217 * - random initial pixels in src
218 * - generate random subrectangle copies for partial blits
219 */
220 for (i = 0; i < iterations; i++) {
221 struct pipe_resource tsrc = {}, tdst = {}, *src, *dst;
222 struct si_texture *sdst;
223 struct si_texture *ssrc;
224 struct cpu_texture src_cpu, dst_cpu;
225 unsigned bpp, max_width, max_height, max_depth, j, num;
226 unsigned gfx_blits = 0, dma_blits = 0, max_tex_side_gen;
227 unsigned max_tex_layers;
228 bool pass;
229 bool do_partial_copies = rand() & 1;
230
231 /* generate a random test case */
232 tsrc.target = tdst.target = PIPE_TEXTURE_2D_ARRAY;
233 tsrc.depth0 = tdst.depth0 = 1;
234
235 bpp = 1 << (rand() % 5);
236 tsrc.format = tdst.format = get_format_from_bpp(bpp);
237
238 max_tex_side_gen = generate_max_tex_side(max_tex_side);
239 max_tex_layers = rand() % 4 ? 1 : 5;
240
241 tsrc.width0 = (rand() % max_tex_side_gen) + 1;
242 tsrc.height0 = (rand() % max_tex_side_gen) + 1;
243 tsrc.array_size = (rand() % max_tex_layers) + 1;
244
245 /* Have a 1/4 chance of getting power-of-two dimensions. */
246 if (rand() % 4 == 0) {
247 tsrc.width0 = util_next_power_of_two(tsrc.width0);
248 tsrc.height0 = util_next_power_of_two(tsrc.height0);
249 }
250
251 if (!do_partial_copies) {
252 /* whole-surface copies only, same dimensions */
253 tdst = tsrc;
254 } else {
255 max_tex_side_gen = generate_max_tex_side(max_tex_side);
256 max_tex_layers = rand() % 4 ? 1 : 5;
257
258 /* many partial copies, dimensions can be different */
259 tdst.width0 = (rand() % max_tex_side_gen) + 1;
260 tdst.height0 = (rand() % max_tex_side_gen) + 1;
261 tdst.array_size = (rand() % max_tex_layers) + 1;
262
263 /* Have a 1/4 chance of getting power-of-two dimensions. */
264 if (rand() % 4 == 0) {
265 tdst.width0 = util_next_power_of_two(tdst.width0);
266 tdst.height0 = util_next_power_of_two(tdst.height0);
267 }
268 }
269
270 /* check texture sizes */
271 if ((uint64_t)tsrc.width0 * tsrc.height0 * tsrc.array_size * bpp +
272 (uint64_t)tdst.width0 * tdst.height0 * tdst.array_size * bpp >
273 max_alloc_size) {
274 /* too large, try again */
275 i--;
276 continue;
277 }
278
279 /* VRAM + the tiling mode depends on dimensions (3/4 of cases),
280 * or GTT + linear only (1/4 of cases)
281 */
282 tsrc.usage = rand() % 4 ? PIPE_USAGE_DEFAULT : PIPE_USAGE_STAGING;
283 tdst.usage = rand() % 4 ? PIPE_USAGE_DEFAULT : PIPE_USAGE_STAGING;
284
285 /* Allocate textures (both the GPU and CPU copies).
286 * The CPU will emulate what the GPU should be doing.
287 */
288 src = screen->resource_create(screen, &tsrc);
289 dst = screen->resource_create(screen, &tdst);
290 assert(src);
291 assert(dst);
292 sdst = (struct si_texture*)dst;
293 ssrc = (struct si_texture*)src;
294 alloc_cpu_texture(&src_cpu, &tsrc, bpp);
295 alloc_cpu_texture(&dst_cpu, &tdst, bpp);
296
297 printf("%4u: dst = (%5u x %5u x %u, %s), "
298 " src = (%5u x %5u x %u, %s), bpp = %2u, ",
299 i, tdst.width0, tdst.height0, tdst.array_size,
300 array_mode_to_string(sscreen, &sdst->surface),
301 tsrc.width0, tsrc.height0, tsrc.array_size,
302 array_mode_to_string(sscreen, &ssrc->surface), bpp);
303 fflush(stdout);
304
305 /* set src pixels */
306 set_random_pixels(ctx, src, &src_cpu);
307
308 /* clear dst pixels */
309 uint32_t zero = 0;
310 si_clear_buffer(sctx, dst, 0, sdst->surface.surf_size, &zero, 4,
311 SI_COHERENCY_SHADER, false);
312 memset(dst_cpu.ptr, 0, dst_cpu.layer_stride * tdst.array_size);
313
314 /* preparation */
315 max_width = MIN2(tsrc.width0, tdst.width0);
316 max_height = MIN2(tsrc.height0, tdst.height0);
317 max_depth = MIN2(tsrc.array_size, tdst.array_size);
318
319 num = do_partial_copies ? num_partial_copies : 1;
320 for (j = 0; j < num; j++) {
321 int width, height, depth;
322 int srcx, srcy, srcz, dstx, dsty, dstz;
323 struct pipe_box box;
324 unsigned old_num_draw_calls = sctx->num_draw_calls;
325 unsigned old_num_dma_calls = sctx->num_dma_calls;
326
327 if (!do_partial_copies) {
328 /* copy whole src to dst */
329 width = max_width;
330 height = max_height;
331 depth = max_depth;
332
333 srcx = srcy = srcz = dstx = dsty = dstz = 0;
334 } else {
335 /* random sub-rectangle copies from src to dst */
336 depth = (rand() % max_depth) + 1;
337 srcz = rand() % (tsrc.array_size - depth + 1);
338 dstz = rand() % (tdst.array_size - depth + 1);
339
340 /* special code path to hit the tiled partial copies */
341 if (!ssrc->surface.is_linear &&
342 !sdst->surface.is_linear &&
343 rand() & 1) {
344 if (max_width < 8 || max_height < 8)
345 continue;
346 width = ((rand() % (max_width / 8)) + 1) * 8;
347 height = ((rand() % (max_height / 8)) + 1) * 8;
348
349 srcx = rand() % (tsrc.width0 - width + 1) & ~0x7;
350 srcy = rand() % (tsrc.height0 - height + 1) & ~0x7;
351
352 dstx = rand() % (tdst.width0 - width + 1) & ~0x7;
353 dsty = rand() % (tdst.height0 - height + 1) & ~0x7;
354 } else {
355 /* just make sure that it doesn't divide by zero */
356 assert(max_width > 0 && max_height > 0);
357
358 width = (rand() % max_width) + 1;
359 height = (rand() % max_height) + 1;
360
361 srcx = rand() % (tsrc.width0 - width + 1);
362 srcy = rand() % (tsrc.height0 - height + 1);
363
364 dstx = rand() % (tdst.width0 - width + 1);
365 dsty = rand() % (tdst.height0 - height + 1);
366 }
367
368 /* special code path to hit out-of-bounds reads in L2T */
369 if (ssrc->surface.is_linear &&
370 !sdst->surface.is_linear &&
371 rand() % 4 == 0) {
372 srcx = 0;
373 srcy = 0;
374 srcz = 0;
375 }
376 }
377
378 /* GPU copy */
379 u_box_3d(srcx, srcy, srcz, width, height, depth, &box);
380 sctx->dma_copy(ctx, dst, 0, dstx, dsty, dstz, src, 0, &box);
381
382 /* See which engine was used. */
383 gfx_blits += sctx->num_draw_calls > old_num_draw_calls;
384 dma_blits += sctx->num_dma_calls > old_num_dma_calls;
385
386 /* CPU copy */
387 util_copy_box(dst_cpu.ptr, tdst.format, dst_cpu.stride,
388 dst_cpu.layer_stride,
389 dstx, dsty, dstz, width, height, depth,
390 src_cpu.ptr, src_cpu.stride,
391 src_cpu.layer_stride,
392 srcx, srcy, srcz);
393 }
394
395 pass = compare_textures(ctx, dst, &dst_cpu, bpp);
396 if (pass)
397 num_pass++;
398 else
399 num_fail++;
400
401 printf("BLITs: GFX = %2u, DMA = %2u, %s [%u/%u]\n",
402 gfx_blits, dma_blits, pass ? "pass" : "fail",
403 num_pass, num_pass+num_fail);
404
405 /* cleanup */
406 pipe_resource_reference(&src, NULL);
407 pipe_resource_reference(&dst, NULL);
408 free(src_cpu.ptr);
409 free(dst_cpu.ptr);
410 }
411
412 ctx->destroy(ctx);
413 exit(0);
414 }