v3d: Add a separate flag for CLIF ABI output versus human-readable CLs.
[mesa.git] / src / gallium / drivers / vc4 / vc4_resource.c
1 /*
2 * Copyright © 2014 Broadcom
3 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "pipe/p_defines.h"
26 #include "util/u_blit.h"
27 #include "util/u_memory.h"
28 #include "util/u_format.h"
29 #include "util/u_inlines.h"
30 #include "util/u_surface.h"
31 #include "util/u_transfer_helper.h"
32 #include "util/u_upload_mgr.h"
33
34 #include "drm_fourcc.h"
35 #include "vc4_drm.h"
36 #include "vc4_screen.h"
37 #include "vc4_context.h"
38 #include "vc4_resource.h"
39 #include "vc4_tiling.h"
40
41 static bool
42 vc4_resource_bo_alloc(struct vc4_resource *rsc)
43 {
44 struct pipe_resource *prsc = &rsc->base;
45 struct pipe_screen *pscreen = prsc->screen;
46 struct vc4_bo *bo;
47
48 if (vc4_debug & VC4_DEBUG_SURFACE) {
49 fprintf(stderr, "alloc %p: size %d + offset %d -> %d\n",
50 rsc,
51 rsc->slices[0].size,
52 rsc->slices[0].offset,
53 rsc->slices[0].offset +
54 rsc->slices[0].size +
55 rsc->cube_map_stride * (prsc->array_size - 1));
56 }
57
58 bo = vc4_bo_alloc(vc4_screen(pscreen),
59 rsc->slices[0].offset +
60 rsc->slices[0].size +
61 rsc->cube_map_stride * (prsc->array_size - 1),
62 "resource");
63 if (bo) {
64 vc4_bo_unreference(&rsc->bo);
65 rsc->bo = bo;
66 return true;
67 } else {
68 return false;
69 }
70 }
71
72 static void
73 vc4_resource_transfer_unmap(struct pipe_context *pctx,
74 struct pipe_transfer *ptrans)
75 {
76 struct vc4_context *vc4 = vc4_context(pctx);
77 struct vc4_transfer *trans = vc4_transfer(ptrans);
78
79 if (trans->map) {
80 struct vc4_resource *rsc = vc4_resource(ptrans->resource);
81 struct vc4_resource_slice *slice = &rsc->slices[ptrans->level];
82
83 if (ptrans->usage & PIPE_TRANSFER_WRITE) {
84 vc4_store_tiled_image(rsc->bo->map + slice->offset +
85 ptrans->box.z * rsc->cube_map_stride,
86 slice->stride,
87 trans->map, ptrans->stride,
88 slice->tiling, rsc->cpp,
89 &ptrans->box);
90 }
91 free(trans->map);
92 }
93
94 pipe_resource_reference(&ptrans->resource, NULL);
95 slab_free(&vc4->transfer_pool, ptrans);
96 }
97
98 static void *
99 vc4_resource_transfer_map(struct pipe_context *pctx,
100 struct pipe_resource *prsc,
101 unsigned level, unsigned usage,
102 const struct pipe_box *box,
103 struct pipe_transfer **pptrans)
104 {
105 struct vc4_context *vc4 = vc4_context(pctx);
106 struct vc4_resource *rsc = vc4_resource(prsc);
107 struct vc4_transfer *trans;
108 struct pipe_transfer *ptrans;
109 enum pipe_format format = prsc->format;
110 char *buf;
111
112 /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
113 * being mapped.
114 */
115 if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
116 !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
117 !(prsc->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) &&
118 prsc->last_level == 0 &&
119 prsc->width0 == box->width &&
120 prsc->height0 == box->height &&
121 prsc->depth0 == box->depth &&
122 prsc->array_size == 1 &&
123 rsc->bo->private) {
124 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
125 }
126
127 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
128 if (vc4_resource_bo_alloc(rsc)) {
129 /* If it might be bound as one of our vertex buffers,
130 * make sure we re-emit vertex buffer state.
131 */
132 if (prsc->bind & PIPE_BIND_VERTEX_BUFFER)
133 vc4->dirty |= VC4_DIRTY_VTXBUF;
134 } else {
135 /* If we failed to reallocate, flush users so that we
136 * don't violate any syncing requirements.
137 */
138 vc4_flush_jobs_reading_resource(vc4, prsc);
139 }
140 } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
141 /* If we're writing and the buffer is being used by the CL, we
142 * have to flush the CL first. If we're only reading, we need
143 * to flush if the CL has written our buffer.
144 */
145 if (usage & PIPE_TRANSFER_WRITE)
146 vc4_flush_jobs_reading_resource(vc4, prsc);
147 else
148 vc4_flush_jobs_writing_resource(vc4, prsc);
149 }
150
151 if (usage & PIPE_TRANSFER_WRITE) {
152 rsc->writes++;
153 rsc->initialized_buffers = ~0;
154 }
155
156 trans = slab_alloc(&vc4->transfer_pool);
157 if (!trans)
158 return NULL;
159
160 /* XXX: Handle DONTBLOCK, DISCARD_RANGE, PERSISTENT, COHERENT. */
161
162 /* slab_alloc_st() doesn't zero: */
163 memset(trans, 0, sizeof(*trans));
164 ptrans = &trans->base;
165
166 pipe_resource_reference(&ptrans->resource, prsc);
167 ptrans->level = level;
168 ptrans->usage = usage;
169 ptrans->box = *box;
170
171 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
172 buf = vc4_bo_map_unsynchronized(rsc->bo);
173 else
174 buf = vc4_bo_map(rsc->bo);
175 if (!buf) {
176 fprintf(stderr, "Failed to map bo\n");
177 goto fail;
178 }
179
180 *pptrans = ptrans;
181
182 struct vc4_resource_slice *slice = &rsc->slices[level];
183 if (rsc->tiled) {
184 uint32_t utile_w = vc4_utile_width(rsc->cpp);
185 uint32_t utile_h = vc4_utile_height(rsc->cpp);
186
187 /* No direct mappings of tiled, since we need to manually
188 * tile/untile.
189 */
190 if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
191 return NULL;
192
193 if (format == PIPE_FORMAT_ETC1_RGB8) {
194 /* ETC1 is arranged as 64-bit blocks, where each block
195 * is 4x4 pixels. Texture tiling operates on the
196 * 64-bit block the way it would an uncompressed
197 * pixels.
198 */
199 assert(!(ptrans->box.x & 3));
200 assert(!(ptrans->box.y & 3));
201 ptrans->box.x >>= 2;
202 ptrans->box.y >>= 2;
203 ptrans->box.width = (ptrans->box.width + 3) >> 2;
204 ptrans->box.height = (ptrans->box.height + 3) >> 2;
205 }
206
207 /* We need to align the box to utile boundaries, since that's
208 * what load/store operates on. This may cause us to need to
209 * read out the original contents in that border area. Right
210 * now we just read out the entire contents, including the
211 * middle area that will just get overwritten.
212 */
213 uint32_t box_start_x = ptrans->box.x & (utile_w - 1);
214 uint32_t box_start_y = ptrans->box.y & (utile_h - 1);
215 bool needs_load = (usage & PIPE_TRANSFER_READ) != 0;
216
217 if (box_start_x) {
218 ptrans->box.width += box_start_x;
219 ptrans->box.x -= box_start_x;
220 needs_load = true;
221 }
222 if (box_start_y) {
223 ptrans->box.height += box_start_y;
224 ptrans->box.y -= box_start_y;
225 needs_load = true;
226 }
227 if (ptrans->box.width & (utile_w - 1)) {
228 /* We only need to force a load if our border region
229 * we're extending into is actually part of the
230 * texture.
231 */
232 uint32_t slice_width = u_minify(prsc->width0, level);
233 if (ptrans->box.x + ptrans->box.width != slice_width)
234 needs_load = true;
235 ptrans->box.width = align(ptrans->box.width, utile_w);
236 }
237 if (ptrans->box.height & (utile_h - 1)) {
238 uint32_t slice_height = u_minify(prsc->height0, level);
239 if (ptrans->box.y + ptrans->box.height != slice_height)
240 needs_load = true;
241 ptrans->box.height = align(ptrans->box.height, utile_h);
242 }
243
244 ptrans->stride = ptrans->box.width * rsc->cpp;
245 ptrans->layer_stride = ptrans->stride * ptrans->box.height;
246
247 trans->map = malloc(ptrans->layer_stride * ptrans->box.depth);
248
249 if (needs_load) {
250 vc4_load_tiled_image(trans->map, ptrans->stride,
251 buf + slice->offset +
252 ptrans->box.z * rsc->cube_map_stride,
253 slice->stride,
254 slice->tiling, rsc->cpp,
255 &ptrans->box);
256 }
257 return (trans->map +
258 box_start_x * rsc->cpp +
259 box_start_y * ptrans->stride);
260 } else {
261 ptrans->stride = slice->stride;
262 ptrans->layer_stride = ptrans->stride;
263
264 return buf + slice->offset +
265 ptrans->box.y / util_format_get_blockheight(format) * ptrans->stride +
266 ptrans->box.x / util_format_get_blockwidth(format) * rsc->cpp +
267 ptrans->box.z * rsc->cube_map_stride;
268 }
269
270
271 fail:
272 vc4_resource_transfer_unmap(pctx, ptrans);
273 return NULL;
274 }
275
276 static void
277 vc4_resource_destroy(struct pipe_screen *pscreen,
278 struct pipe_resource *prsc)
279 {
280 struct vc4_screen *screen = vc4_screen(pscreen);
281 struct vc4_resource *rsc = vc4_resource(prsc);
282 vc4_bo_unreference(&rsc->bo);
283
284 if (rsc->scanout)
285 renderonly_scanout_destroy(rsc->scanout, screen->ro);
286
287 free(rsc);
288 }
289
290 static boolean
291 vc4_resource_get_handle(struct pipe_screen *pscreen,
292 struct pipe_context *pctx,
293 struct pipe_resource *prsc,
294 struct winsys_handle *whandle,
295 unsigned usage)
296 {
297 struct vc4_screen *screen = vc4_screen(pscreen);
298 struct vc4_resource *rsc = vc4_resource(prsc);
299
300 whandle->stride = rsc->slices[0].stride;
301 whandle->offset = 0;
302
303 /* If we're passing some reference to our BO out to some other part of
304 * the system, then we can't do any optimizations about only us being
305 * the ones seeing it (like BO caching or shadow update avoidance).
306 */
307 rsc->bo->private = false;
308
309 if (rsc->tiled)
310 whandle->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
311 else
312 whandle->modifier = DRM_FORMAT_MOD_LINEAR;
313
314 switch (whandle->type) {
315 case WINSYS_HANDLE_TYPE_SHARED:
316 if (screen->ro) {
317 /* This could probably be supported, assuming that a
318 * control node was used for pl111.
319 */
320 fprintf(stderr, "flink unsupported with pl111\n");
321 return FALSE;
322 }
323
324 return vc4_bo_flink(rsc->bo, &whandle->handle);
325 case WINSYS_HANDLE_TYPE_KMS:
326 if (screen->ro && renderonly_get_handle(rsc->scanout, whandle))
327 return TRUE;
328 whandle->handle = rsc->bo->handle;
329 return TRUE;
330 case WINSYS_HANDLE_TYPE_FD:
331 /* FDs are cross-device, so we can export directly from vc4.
332 */
333 whandle->handle = vc4_bo_get_dmabuf(rsc->bo);
334 return whandle->handle != -1;
335 }
336
337 return FALSE;
338 }
339
340 static void
341 vc4_setup_slices(struct vc4_resource *rsc, const char *caller)
342 {
343 struct pipe_resource *prsc = &rsc->base;
344 uint32_t width = prsc->width0;
345 uint32_t height = prsc->height0;
346 if (prsc->format == PIPE_FORMAT_ETC1_RGB8) {
347 width = (width + 3) >> 2;
348 height = (height + 3) >> 2;
349 }
350
351 uint32_t pot_width = util_next_power_of_two(width);
352 uint32_t pot_height = util_next_power_of_two(height);
353 uint32_t offset = 0;
354 uint32_t utile_w = vc4_utile_width(rsc->cpp);
355 uint32_t utile_h = vc4_utile_height(rsc->cpp);
356
357 for (int i = prsc->last_level; i >= 0; i--) {
358 struct vc4_resource_slice *slice = &rsc->slices[i];
359
360 uint32_t level_width, level_height;
361 if (i == 0) {
362 level_width = width;
363 level_height = height;
364 } else {
365 level_width = u_minify(pot_width, i);
366 level_height = u_minify(pot_height, i);
367 }
368
369 if (!rsc->tiled) {
370 slice->tiling = VC4_TILING_FORMAT_LINEAR;
371 if (prsc->nr_samples > 1) {
372 /* MSAA (4x) surfaces are stored as raw tile buffer contents. */
373 level_width = align(level_width, 32);
374 level_height = align(level_height, 32);
375 } else {
376 level_width = align(level_width, utile_w);
377 }
378 } else {
379 if (vc4_size_is_lt(level_width, level_height,
380 rsc->cpp)) {
381 slice->tiling = VC4_TILING_FORMAT_LT;
382 level_width = align(level_width, utile_w);
383 level_height = align(level_height, utile_h);
384 } else {
385 slice->tiling = VC4_TILING_FORMAT_T;
386 level_width = align(level_width,
387 4 * 2 * utile_w);
388 level_height = align(level_height,
389 4 * 2 * utile_h);
390 }
391 }
392
393 slice->offset = offset;
394 slice->stride = (level_width * rsc->cpp *
395 MAX2(prsc->nr_samples, 1));
396 slice->size = level_height * slice->stride;
397
398 offset += slice->size;
399
400 if (vc4_debug & VC4_DEBUG_SURFACE) {
401 static const char tiling_chars[] = {
402 [VC4_TILING_FORMAT_LINEAR] = 'R',
403 [VC4_TILING_FORMAT_LT] = 'L',
404 [VC4_TILING_FORMAT_T] = 'T'
405 };
406 fprintf(stderr,
407 "rsc %s %p (format %s: vc4 %d), %dx%d: "
408 "level %d (%c) -> %dx%d, stride %d@0x%08x\n",
409 caller, rsc,
410 util_format_short_name(prsc->format),
411 rsc->vc4_format,
412 prsc->width0, prsc->height0,
413 i, tiling_chars[slice->tiling],
414 level_width, level_height,
415 slice->stride, slice->offset);
416 }
417 }
418
419 /* The texture base pointer that has to point to level 0 doesn't have
420 * intra-page bits, so we have to align it, and thus shift up all the
421 * smaller slices.
422 */
423 uint32_t page_align_offset = (align(rsc->slices[0].offset, 4096) -
424 rsc->slices[0].offset);
425 if (page_align_offset) {
426 for (int i = 0; i <= prsc->last_level; i++)
427 rsc->slices[i].offset += page_align_offset;
428 }
429
430 /* Cube map faces appear as whole miptrees at a page-aligned offset
431 * from the first face's miptree.
432 */
433 if (prsc->target == PIPE_TEXTURE_CUBE) {
434 rsc->cube_map_stride = align(rsc->slices[0].offset +
435 rsc->slices[0].size, 4096);
436 }
437 }
438
439 static struct vc4_resource *
440 vc4_resource_setup(struct pipe_screen *pscreen,
441 const struct pipe_resource *tmpl)
442 {
443 struct vc4_resource *rsc = CALLOC_STRUCT(vc4_resource);
444 if (!rsc)
445 return NULL;
446 struct pipe_resource *prsc = &rsc->base;
447
448 *prsc = *tmpl;
449
450 pipe_reference_init(&prsc->reference, 1);
451 prsc->screen = pscreen;
452
453 if (prsc->nr_samples <= 1)
454 rsc->cpp = util_format_get_blocksize(tmpl->format);
455 else
456 rsc->cpp = sizeof(uint32_t);
457
458 assert(rsc->cpp);
459
460 return rsc;
461 }
462
463 static enum vc4_texture_data_type
464 get_resource_texture_format(struct pipe_resource *prsc)
465 {
466 struct vc4_resource *rsc = vc4_resource(prsc);
467 uint8_t format = vc4_get_tex_format(prsc->format);
468
469 if (!rsc->tiled) {
470 if (prsc->nr_samples > 1) {
471 return ~0;
472 } else {
473 if (format == VC4_TEXTURE_TYPE_RGBA8888)
474 return VC4_TEXTURE_TYPE_RGBA32R;
475 else
476 return ~0;
477 }
478 }
479
480 return format;
481 }
482
483 static bool
484 find_modifier(uint64_t needle, const uint64_t *haystack, int count)
485 {
486 int i;
487
488 for (i = 0; i < count; i++) {
489 if (haystack[i] == needle)
490 return true;
491 }
492
493 return false;
494 }
495
496 static struct pipe_resource *
497 vc4_resource_create_with_modifiers(struct pipe_screen *pscreen,
498 const struct pipe_resource *tmpl,
499 const uint64_t *modifiers,
500 int count)
501 {
502 struct vc4_screen *screen = vc4_screen(pscreen);
503 struct vc4_resource *rsc = vc4_resource_setup(pscreen, tmpl);
504 struct pipe_resource *prsc = &rsc->base;
505 bool linear_ok = find_modifier(DRM_FORMAT_MOD_LINEAR, modifiers, count);
506 /* Use a tiled layout if we can, for better 3D performance. */
507 bool should_tile = true;
508
509 /* VBOs/PBOs are untiled (and 1 height). */
510 if (tmpl->target == PIPE_BUFFER)
511 should_tile = false;
512
513 /* MSAA buffers are linear. */
514 if (tmpl->nr_samples > 1)
515 should_tile = false;
516
517 /* No tiling when we're sharing with another device (pl111). */
518 if (screen->ro && (tmpl->bind & PIPE_BIND_SCANOUT))
519 should_tile = false;
520
521 /* Cursors are always linear, and the user can request linear as well.
522 */
523 if (tmpl->bind & (PIPE_BIND_LINEAR | PIPE_BIND_CURSOR))
524 should_tile = false;
525
526 /* No shared objects with LT format -- the kernel only has T-format
527 * metadata. LT objects are small enough it's not worth the trouble to
528 * give them metadata to tile.
529 */
530 if ((tmpl->bind & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT)) &&
531 vc4_size_is_lt(prsc->width0, prsc->height0, rsc->cpp))
532 should_tile = false;
533
534 /* If we're sharing or scanning out, we need the ioctl present to
535 * inform the kernel or the other side.
536 */
537 if ((tmpl->bind & (PIPE_BIND_SHARED |
538 PIPE_BIND_SCANOUT)) && !screen->has_tiling_ioctl)
539 should_tile = false;
540
541 /* No user-specified modifier; determine our own. */
542 if (count == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID) {
543 linear_ok = true;
544 rsc->tiled = should_tile;
545 } else if (should_tile &&
546 find_modifier(DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
547 modifiers, count)) {
548 rsc->tiled = true;
549 } else if (linear_ok) {
550 rsc->tiled = false;
551 } else {
552 fprintf(stderr, "Unsupported modifier requested\n");
553 return NULL;
554 }
555
556 if (tmpl->target != PIPE_BUFFER)
557 rsc->vc4_format = get_resource_texture_format(prsc);
558
559 vc4_setup_slices(rsc, "create");
560 if (!vc4_resource_bo_alloc(rsc))
561 goto fail;
562
563 if (screen->has_tiling_ioctl) {
564 uint64_t modifier;
565 if (rsc->tiled)
566 modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
567 else
568 modifier = DRM_FORMAT_MOD_LINEAR;
569 struct drm_vc4_set_tiling set_tiling = {
570 .handle = rsc->bo->handle,
571 .modifier = modifier,
572 };
573 int ret = vc4_ioctl(screen->fd, DRM_IOCTL_VC4_SET_TILING,
574 &set_tiling);
575 if (ret != 0)
576 goto fail;
577 }
578
579 if (screen->ro && tmpl->bind & PIPE_BIND_SCANOUT) {
580 rsc->scanout =
581 renderonly_scanout_for_resource(prsc, screen->ro, NULL);
582 if (!rsc->scanout)
583 goto fail;
584 }
585
586 vc4_bo_label(screen, rsc->bo, "%sresource %dx%d@%d/%d",
587 (tmpl->bind & PIPE_BIND_SCANOUT) ? "scanout " : "",
588 tmpl->width0, tmpl->height0,
589 rsc->cpp * 8, prsc->last_level);
590
591 return prsc;
592 fail:
593 vc4_resource_destroy(pscreen, prsc);
594 return NULL;
595 }
596
597 struct pipe_resource *
598 vc4_resource_create(struct pipe_screen *pscreen,
599 const struct pipe_resource *tmpl)
600 {
601 const uint64_t mod = DRM_FORMAT_MOD_INVALID;
602 return vc4_resource_create_with_modifiers(pscreen, tmpl, &mod, 1);
603 }
604
605 static struct pipe_resource *
606 vc4_resource_from_handle(struct pipe_screen *pscreen,
607 const struct pipe_resource *tmpl,
608 struct winsys_handle *whandle,
609 unsigned usage)
610 {
611 struct vc4_screen *screen = vc4_screen(pscreen);
612 struct vc4_resource *rsc = vc4_resource_setup(pscreen, tmpl);
613 struct pipe_resource *prsc = &rsc->base;
614 struct vc4_resource_slice *slice = &rsc->slices[0];
615
616 if (!rsc)
617 return NULL;
618
619 switch (whandle->type) {
620 case WINSYS_HANDLE_TYPE_SHARED:
621 rsc->bo = vc4_bo_open_name(screen,
622 whandle->handle, whandle->stride);
623 break;
624 case WINSYS_HANDLE_TYPE_FD:
625 rsc->bo = vc4_bo_open_dmabuf(screen,
626 whandle->handle, whandle->stride);
627 break;
628 default:
629 fprintf(stderr,
630 "Attempt to import unsupported handle type %d\n",
631 whandle->type);
632 }
633
634 if (!rsc->bo)
635 goto fail;
636
637 struct drm_vc4_get_tiling get_tiling = {
638 .handle = rsc->bo->handle,
639 };
640 int ret = vc4_ioctl(screen->fd, DRM_IOCTL_VC4_GET_TILING, &get_tiling);
641
642 if (ret != 0) {
643 whandle->modifier = DRM_FORMAT_MOD_LINEAR;
644 } else if (whandle->modifier == DRM_FORMAT_MOD_INVALID) {
645 whandle->modifier = get_tiling.modifier;
646 } else if (whandle->modifier != get_tiling.modifier) {
647 fprintf(stderr,
648 "Modifier 0x%llx vs. tiling (0x%llx) mismatch\n",
649 (long long)whandle->modifier, get_tiling.modifier);
650 goto fail;
651 }
652
653 switch (whandle->modifier) {
654 case DRM_FORMAT_MOD_LINEAR:
655 rsc->tiled = false;
656 break;
657 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
658 rsc->tiled = true;
659 break;
660 default:
661 fprintf(stderr,
662 "Attempt to import unsupported modifier 0x%llx\n",
663 (long long)whandle->modifier);
664 goto fail;
665 }
666
667 rsc->vc4_format = get_resource_texture_format(prsc);
668 vc4_setup_slices(rsc, "import");
669
670 if (whandle->offset != 0) {
671 if (rsc->tiled) {
672 fprintf(stderr,
673 "Attempt to import unsupported "
674 "winsys offset %u\n",
675 whandle->offset);
676 goto fail;
677 }
678
679 rsc->slices[0].offset += whandle->offset;
680
681 if (rsc->slices[0].offset + rsc->slices[0].size >
682 rsc->bo->size) {
683 fprintf(stderr, "Attempt to import "
684 "with overflowing offset (%d + %d > %d)\n",
685 whandle->offset,
686 rsc->slices[0].size,
687 rsc->bo->size);
688 goto fail;
689 }
690 }
691
692 if (screen->ro) {
693 /* Make sure that renderonly has a handle to our buffer in the
694 * display's fd, so that a later renderonly_get_handle()
695 * returns correct handles or GEM names.
696 */
697 rsc->scanout =
698 renderonly_create_gpu_import_for_resource(prsc,
699 screen->ro,
700 NULL);
701 if (!rsc->scanout)
702 goto fail;
703 }
704
705 if (rsc->tiled && whandle->stride != slice->stride) {
706 static bool warned = false;
707 if (!warned) {
708 warned = true;
709 fprintf(stderr,
710 "Attempting to import %dx%d %s with "
711 "unsupported stride %d instead of %d\n",
712 prsc->width0, prsc->height0,
713 util_format_short_name(prsc->format),
714 whandle->stride,
715 slice->stride);
716 }
717 goto fail;
718 } else if (!rsc->tiled) {
719 slice->stride = whandle->stride;
720 }
721
722 return prsc;
723
724 fail:
725 vc4_resource_destroy(pscreen, prsc);
726 return NULL;
727 }
728
729 static struct pipe_surface *
730 vc4_create_surface(struct pipe_context *pctx,
731 struct pipe_resource *ptex,
732 const struct pipe_surface *surf_tmpl)
733 {
734 struct vc4_surface *surface = CALLOC_STRUCT(vc4_surface);
735 struct vc4_resource *rsc = vc4_resource(ptex);
736
737 if (!surface)
738 return NULL;
739
740 assert(surf_tmpl->u.tex.first_layer == surf_tmpl->u.tex.last_layer);
741
742 struct pipe_surface *psurf = &surface->base;
743 unsigned level = surf_tmpl->u.tex.level;
744
745 pipe_reference_init(&psurf->reference, 1);
746 pipe_resource_reference(&psurf->texture, ptex);
747
748 psurf->context = pctx;
749 psurf->format = surf_tmpl->format;
750 psurf->width = u_minify(ptex->width0, level);
751 psurf->height = u_minify(ptex->height0, level);
752 psurf->u.tex.level = level;
753 psurf->u.tex.first_layer = surf_tmpl->u.tex.first_layer;
754 psurf->u.tex.last_layer = surf_tmpl->u.tex.last_layer;
755 surface->offset = (rsc->slices[level].offset +
756 psurf->u.tex.first_layer * rsc->cube_map_stride);
757 surface->tiling = rsc->slices[level].tiling;
758
759 return &surface->base;
760 }
761
762 static void
763 vc4_surface_destroy(struct pipe_context *pctx, struct pipe_surface *psurf)
764 {
765 pipe_resource_reference(&psurf->texture, NULL);
766 FREE(psurf);
767 }
768
769 static void
770 vc4_dump_surface_non_msaa(struct pipe_surface *psurf)
771 {
772 struct pipe_resource *prsc = psurf->texture;
773 struct vc4_resource *rsc = vc4_resource(prsc);
774 uint32_t *map = vc4_bo_map(rsc->bo);
775 uint32_t stride = rsc->slices[0].stride / 4;
776 uint32_t width = psurf->width;
777 uint32_t height = psurf->height;
778 uint32_t chunk_w = width / 79;
779 uint32_t chunk_h = height / 40;
780 uint32_t found_colors[10];
781 uint32_t num_found_colors = 0;
782
783 if (rsc->vc4_format != VC4_TEXTURE_TYPE_RGBA32R) {
784 fprintf(stderr, "%s: Unsupported format %s\n",
785 __func__, util_format_short_name(psurf->format));
786 return;
787 }
788
789 for (int by = 0; by < height; by += chunk_h) {
790 for (int bx = 0; bx < width; bx += chunk_w) {
791 int all_found_color = -1; /* nothing found */
792
793 for (int y = by; y < MIN2(height, by + chunk_h); y++) {
794 for (int x = bx; x < MIN2(width, bx + chunk_w); x++) {
795 uint32_t pix = map[y * stride + x];
796
797 int i;
798 for (i = 0; i < num_found_colors; i++) {
799 if (pix == found_colors[i])
800 break;
801 }
802 if (i == num_found_colors &&
803 num_found_colors <
804 ARRAY_SIZE(found_colors)) {
805 found_colors[num_found_colors++] = pix;
806 }
807
808 if (i < num_found_colors) {
809 if (all_found_color == -1)
810 all_found_color = i;
811 else if (i != all_found_color)
812 all_found_color = ARRAY_SIZE(found_colors);
813 }
814 }
815 }
816 /* If all pixels for this chunk have a consistent
817 * value, then print a character for it. Either a
818 * fixed name (particularly common for piglit tests),
819 * or a runtime-generated number.
820 */
821 if (all_found_color >= 0 &&
822 all_found_color < ARRAY_SIZE(found_colors)) {
823 static const struct {
824 uint32_t val;
825 const char *c;
826 } named_colors[] = {
827 { 0xff000000, "█" },
828 { 0x00000000, "█" },
829 { 0xffff0000, "r" },
830 { 0xff00ff00, "g" },
831 { 0xff0000ff, "b" },
832 { 0xffffffff, "w" },
833 };
834 int i;
835 for (i = 0; i < ARRAY_SIZE(named_colors); i++) {
836 if (named_colors[i].val ==
837 found_colors[all_found_color]) {
838 fprintf(stderr, "%s",
839 named_colors[i].c);
840 break;
841 }
842 }
843 /* For unnamed colors, print a number and the
844 * numbers will have values printed at the
845 * end.
846 */
847 if (i == ARRAY_SIZE(named_colors)) {
848 fprintf(stderr, "%c",
849 '0' + all_found_color);
850 }
851 } else {
852 /* If there's no consistent color, print this.
853 */
854 fprintf(stderr, ".");
855 }
856 }
857 fprintf(stderr, "\n");
858 }
859
860 for (int i = 0; i < num_found_colors; i++) {
861 fprintf(stderr, "color %d: 0x%08x\n", i, found_colors[i]);
862 }
863 }
864
865 static uint32_t
866 vc4_surface_msaa_get_sample(struct pipe_surface *psurf,
867 uint32_t x, uint32_t y, uint32_t sample)
868 {
869 struct pipe_resource *prsc = psurf->texture;
870 struct vc4_resource *rsc = vc4_resource(prsc);
871 uint32_t tile_w = 32, tile_h = 32;
872 uint32_t tiles_w = DIV_ROUND_UP(psurf->width, 32);
873
874 uint32_t tile_x = x / tile_w;
875 uint32_t tile_y = y / tile_h;
876 uint32_t *tile = (vc4_bo_map(rsc->bo) +
877 VC4_TILE_BUFFER_SIZE * (tile_y * tiles_w + tile_x));
878 uint32_t subtile_x = x % tile_w;
879 uint32_t subtile_y = y % tile_h;
880
881 uint32_t quad_samples = VC4_MAX_SAMPLES * 4;
882 uint32_t tile_stride = quad_samples * tile_w / 2;
883
884 return *((uint32_t *)tile +
885 (subtile_y >> 1) * tile_stride +
886 (subtile_x >> 1) * quad_samples +
887 ((subtile_y & 1) << 1) +
888 (subtile_x & 1) +
889 sample);
890 }
891
892 static void
893 vc4_dump_surface_msaa_char(struct pipe_surface *psurf,
894 uint32_t start_x, uint32_t start_y,
895 uint32_t w, uint32_t h)
896 {
897 bool all_same_color = true;
898 uint32_t all_pix = 0;
899
900 for (int y = start_y; y < start_y + h; y++) {
901 for (int x = start_x; x < start_x + w; x++) {
902 for (int s = 0; s < VC4_MAX_SAMPLES; s++) {
903 uint32_t pix = vc4_surface_msaa_get_sample(psurf,
904 x, y,
905 s);
906 if (x == start_x && y == start_y)
907 all_pix = pix;
908 else if (all_pix != pix)
909 all_same_color = false;
910 }
911 }
912 }
913 if (all_same_color) {
914 static const struct {
915 uint32_t val;
916 const char *c;
917 } named_colors[] = {
918 { 0xff000000, "█" },
919 { 0x00000000, "█" },
920 { 0xffff0000, "r" },
921 { 0xff00ff00, "g" },
922 { 0xff0000ff, "b" },
923 { 0xffffffff, "w" },
924 };
925 int i;
926 for (i = 0; i < ARRAY_SIZE(named_colors); i++) {
927 if (named_colors[i].val == all_pix) {
928 fprintf(stderr, "%s",
929 named_colors[i].c);
930 return;
931 }
932 }
933 fprintf(stderr, "x");
934 } else {
935 fprintf(stderr, ".");
936 }
937 }
938
939 static void
940 vc4_dump_surface_msaa(struct pipe_surface *psurf)
941 {
942 uint32_t tile_w = 32, tile_h = 32;
943 uint32_t tiles_w = DIV_ROUND_UP(psurf->width, tile_w);
944 uint32_t tiles_h = DIV_ROUND_UP(psurf->height, tile_h);
945 uint32_t char_w = 140, char_h = 60;
946 uint32_t char_w_per_tile = char_w / tiles_w - 1;
947 uint32_t char_h_per_tile = char_h / tiles_h - 1;
948
949 fprintf(stderr, "Surface: %dx%d (%dx MSAA)\n",
950 psurf->width, psurf->height, psurf->texture->nr_samples);
951
952 for (int x = 0; x < (char_w_per_tile + 1) * tiles_w; x++)
953 fprintf(stderr, "-");
954 fprintf(stderr, "\n");
955
956 for (int ty = 0; ty < psurf->height; ty += tile_h) {
957 for (int y = 0; y < char_h_per_tile; y++) {
958
959 for (int tx = 0; tx < psurf->width; tx += tile_w) {
960 for (int x = 0; x < char_w_per_tile; x++) {
961 uint32_t bx1 = (x * tile_w /
962 char_w_per_tile);
963 uint32_t bx2 = ((x + 1) * tile_w /
964 char_w_per_tile);
965 uint32_t by1 = (y * tile_h /
966 char_h_per_tile);
967 uint32_t by2 = ((y + 1) * tile_h /
968 char_h_per_tile);
969
970 vc4_dump_surface_msaa_char(psurf,
971 tx + bx1,
972 ty + by1,
973 bx2 - bx1,
974 by2 - by1);
975 }
976 fprintf(stderr, "|");
977 }
978 fprintf(stderr, "\n");
979 }
980
981 for (int x = 0; x < (char_w_per_tile + 1) * tiles_w; x++)
982 fprintf(stderr, "-");
983 fprintf(stderr, "\n");
984 }
985 }
986
987 /** Debug routine to dump the contents of an 8888 surface to the console */
988 void
989 vc4_dump_surface(struct pipe_surface *psurf)
990 {
991 if (!psurf)
992 return;
993
994 if (psurf->texture->nr_samples > 1)
995 vc4_dump_surface_msaa(psurf);
996 else
997 vc4_dump_surface_non_msaa(psurf);
998 }
999
1000 static void
1001 vc4_flush_resource(struct pipe_context *pctx, struct pipe_resource *resource)
1002 {
1003 /* All calls to flush_resource are followed by a flush of the context,
1004 * so there's nothing to do.
1005 */
1006 }
1007
1008 void
1009 vc4_update_shadow_baselevel_texture(struct pipe_context *pctx,
1010 struct pipe_sampler_view *pview)
1011 {
1012 struct vc4_sampler_view *view = vc4_sampler_view(pview);
1013 struct vc4_resource *shadow = vc4_resource(view->texture);
1014 struct vc4_resource *orig = vc4_resource(pview->texture);
1015
1016 assert(view->texture != pview->texture);
1017
1018 if (shadow->writes == orig->writes && orig->bo->private)
1019 return;
1020
1021 perf_debug("Updating %dx%d@%d shadow texture due to %s\n",
1022 orig->base.width0, orig->base.height0,
1023 pview->u.tex.first_level,
1024 pview->u.tex.first_level ? "base level" : "raster layout");
1025
1026 for (int i = 0; i <= shadow->base.last_level; i++) {
1027 unsigned width = u_minify(shadow->base.width0, i);
1028 unsigned height = u_minify(shadow->base.height0, i);
1029 struct pipe_blit_info info = {
1030 .dst = {
1031 .resource = &shadow->base,
1032 .level = i,
1033 .box = {
1034 .x = 0,
1035 .y = 0,
1036 .z = 0,
1037 .width = width,
1038 .height = height,
1039 .depth = 1,
1040 },
1041 .format = shadow->base.format,
1042 },
1043 .src = {
1044 .resource = &orig->base,
1045 .level = pview->u.tex.first_level + i,
1046 .box = {
1047 .x = 0,
1048 .y = 0,
1049 .z = 0,
1050 .width = width,
1051 .height = height,
1052 .depth = 1,
1053 },
1054 .format = orig->base.format,
1055 },
1056 .mask = ~0,
1057 };
1058 pctx->blit(pctx, &info);
1059 }
1060
1061 shadow->writes = orig->writes;
1062 }
1063
1064 /**
1065 * Converts a 4-byte index buffer to 2 bytes.
1066 *
1067 * Since GLES2 only has support for 1 and 2-byte indices, the hardware doesn't
1068 * include 4-byte index support, and we have to shrink it down.
1069 *
1070 * There's no fallback support for when indices end up being larger than 2^16,
1071 * though it will at least assertion fail. Also, if the original index data
1072 * was in user memory, it would be nice to not have uploaded it to a VBO
1073 * before translating.
1074 */
1075 struct pipe_resource *
1076 vc4_get_shadow_index_buffer(struct pipe_context *pctx,
1077 const struct pipe_draw_info *info,
1078 uint32_t offset,
1079 uint32_t count,
1080 uint32_t *shadow_offset)
1081 {
1082 struct vc4_context *vc4 = vc4_context(pctx);
1083 struct vc4_resource *orig = vc4_resource(info->index.resource);
1084 perf_debug("Fallback conversion for %d uint indices\n", count);
1085
1086 void *data;
1087 struct pipe_resource *shadow_rsc = NULL;
1088 u_upload_alloc(vc4->uploader, 0, count * 2, 4,
1089 shadow_offset, &shadow_rsc, &data);
1090 uint16_t *dst = data;
1091
1092 struct pipe_transfer *src_transfer = NULL;
1093 const uint32_t *src;
1094 if (info->has_user_indices) {
1095 src = info->index.user;
1096 } else {
1097 src = pipe_buffer_map_range(pctx, &orig->base,
1098 offset,
1099 count * 4,
1100 PIPE_TRANSFER_READ, &src_transfer);
1101 }
1102
1103 for (int i = 0; i < count; i++) {
1104 uint32_t src_index = src[i];
1105 assert(src_index <= 0xffff);
1106 dst[i] = src_index;
1107 }
1108
1109 if (src_transfer)
1110 pctx->transfer_unmap(pctx, src_transfer);
1111
1112 return shadow_rsc;
1113 }
1114
1115 static const struct u_transfer_vtbl transfer_vtbl = {
1116 .resource_create = vc4_resource_create,
1117 .resource_destroy = vc4_resource_destroy,
1118 .transfer_map = vc4_resource_transfer_map,
1119 .transfer_unmap = vc4_resource_transfer_unmap,
1120 .transfer_flush_region = u_default_transfer_flush_region,
1121 };
1122
1123 void
1124 vc4_resource_screen_init(struct pipe_screen *pscreen)
1125 {
1126 struct vc4_screen *screen = vc4_screen(pscreen);
1127
1128 pscreen->resource_create = vc4_resource_create;
1129 pscreen->resource_create_with_modifiers =
1130 vc4_resource_create_with_modifiers;
1131 pscreen->resource_from_handle = vc4_resource_from_handle;
1132 pscreen->resource_destroy = u_resource_destroy_vtbl;
1133 pscreen->resource_get_handle = vc4_resource_get_handle;
1134 pscreen->resource_destroy = vc4_resource_destroy;
1135 pscreen->transfer_helper = u_transfer_helper_create(&transfer_vtbl,
1136 false, false, true);
1137
1138 /* Test if the kernel has GET_TILING; it will return -EINVAL if the
1139 * ioctl does not exist, but -ENOENT if we pass an impossible handle.
1140 * 0 cannot be a valid GEM object, so use that.
1141 */
1142 struct drm_vc4_get_tiling get_tiling = {
1143 .handle = 0x0,
1144 };
1145 int ret = vc4_ioctl(screen->fd, DRM_IOCTL_VC4_GET_TILING, &get_tiling);
1146 if (ret == -1 && errno == ENOENT)
1147 screen->has_tiling_ioctl = true;
1148 }
1149
1150 void
1151 vc4_resource_context_init(struct pipe_context *pctx)
1152 {
1153 pctx->transfer_map = u_transfer_helper_transfer_map;
1154 pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
1155 pctx->transfer_unmap = u_transfer_helper_transfer_unmap;
1156 pctx->buffer_subdata = u_default_buffer_subdata;
1157 pctx->texture_subdata = u_default_texture_subdata;
1158 pctx->create_surface = vc4_create_surface;
1159 pctx->surface_destroy = vc4_surface_destroy;
1160 pctx->resource_copy_region = util_resource_copy_region;
1161 pctx->blit = vc4_blit;
1162 pctx->flush_resource = vc4_flush_resource;
1163 }