gallium: Add renderonly-based support for pl111+vc4.
[mesa.git] / src / gallium / drivers / vc4 / vc4_resource.c
1 /*
2 * Copyright © 2014 Broadcom
3 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "util/u_blit.h"
26 #include "util/u_memory.h"
27 #include "util/u_format.h"
28 #include "util/u_inlines.h"
29 #include "util/u_surface.h"
30 #include "util/u_upload_mgr.h"
31
32 #include "vc4_screen.h"
33 #include "vc4_context.h"
34 #include "vc4_resource.h"
35 #include "vc4_tiling.h"
36
37 static bool miptree_debug = false;
38
39 static bool
40 vc4_resource_bo_alloc(struct vc4_resource *rsc)
41 {
42 struct pipe_resource *prsc = &rsc->base;
43 struct pipe_screen *pscreen = prsc->screen;
44 struct vc4_bo *bo;
45
46 if (miptree_debug) {
47 fprintf(stderr, "alloc %p: size %d + offset %d -> %d\n",
48 rsc,
49 rsc->slices[0].size,
50 rsc->slices[0].offset,
51 rsc->slices[0].offset +
52 rsc->slices[0].size +
53 rsc->cube_map_stride * (prsc->array_size - 1));
54 }
55
56 bo = vc4_bo_alloc(vc4_screen(pscreen),
57 rsc->slices[0].offset +
58 rsc->slices[0].size +
59 rsc->cube_map_stride * (prsc->array_size - 1),
60 "resource");
61 if (bo) {
62 vc4_bo_unreference(&rsc->bo);
63 rsc->bo = bo;
64 return true;
65 } else {
66 return false;
67 }
68 }
69
70 static void
71 vc4_resource_transfer_unmap(struct pipe_context *pctx,
72 struct pipe_transfer *ptrans)
73 {
74 struct vc4_context *vc4 = vc4_context(pctx);
75 struct vc4_transfer *trans = vc4_transfer(ptrans);
76
77 if (trans->map) {
78 struct vc4_resource *rsc;
79 struct vc4_resource_slice *slice;
80 if (trans->ss_resource) {
81 rsc = vc4_resource(trans->ss_resource);
82 slice = &rsc->slices[0];
83 } else {
84 rsc = vc4_resource(ptrans->resource);
85 slice = &rsc->slices[ptrans->level];
86 }
87
88 if (ptrans->usage & PIPE_TRANSFER_WRITE) {
89 vc4_store_tiled_image(rsc->bo->map + slice->offset +
90 ptrans->box.z * rsc->cube_map_stride,
91 slice->stride,
92 trans->map, ptrans->stride,
93 slice->tiling, rsc->cpp,
94 &ptrans->box);
95 }
96 free(trans->map);
97 }
98
99 if (trans->ss_resource && (ptrans->usage & PIPE_TRANSFER_WRITE)) {
100 struct pipe_blit_info blit;
101 memset(&blit, 0, sizeof(blit));
102
103 blit.src.resource = trans->ss_resource;
104 blit.src.format = trans->ss_resource->format;
105 blit.src.box.width = trans->ss_box.width;
106 blit.src.box.height = trans->ss_box.height;
107 blit.src.box.depth = 1;
108
109 blit.dst.resource = ptrans->resource;
110 blit.dst.format = ptrans->resource->format;
111 blit.dst.level = ptrans->level;
112 blit.dst.box = trans->ss_box;
113
114 blit.mask = util_format_get_mask(ptrans->resource->format);
115 blit.filter = PIPE_TEX_FILTER_NEAREST;
116
117 pctx->blit(pctx, &blit);
118
119 pipe_resource_reference(&trans->ss_resource, NULL);
120 }
121
122 pipe_resource_reference(&ptrans->resource, NULL);
123 slab_free(&vc4->transfer_pool, ptrans);
124 }
125
126 static struct pipe_resource *
127 vc4_get_temp_resource(struct pipe_context *pctx,
128 struct pipe_resource *prsc,
129 const struct pipe_box *box)
130 {
131 struct pipe_resource temp_setup;
132
133 memset(&temp_setup, 0, sizeof(temp_setup));
134 temp_setup.target = prsc->target;
135 temp_setup.format = prsc->format;
136 temp_setup.width0 = box->width;
137 temp_setup.height0 = box->height;
138 temp_setup.depth0 = 1;
139 temp_setup.array_size = 1;
140
141 return pctx->screen->resource_create(pctx->screen, &temp_setup);
142 }
143
144 static void *
145 vc4_resource_transfer_map(struct pipe_context *pctx,
146 struct pipe_resource *prsc,
147 unsigned level, unsigned usage,
148 const struct pipe_box *box,
149 struct pipe_transfer **pptrans)
150 {
151 struct vc4_context *vc4 = vc4_context(pctx);
152 struct vc4_resource *rsc = vc4_resource(prsc);
153 struct vc4_transfer *trans;
154 struct pipe_transfer *ptrans;
155 enum pipe_format format = prsc->format;
156 char *buf;
157
158 /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
159 * being mapped.
160 */
161 if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
162 !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
163 !(prsc->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT) &&
164 prsc->last_level == 0 &&
165 prsc->width0 == box->width &&
166 prsc->height0 == box->height &&
167 prsc->depth0 == box->depth &&
168 prsc->array_size == 1 &&
169 rsc->bo->private) {
170 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
171 }
172
173 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
174 if (vc4_resource_bo_alloc(rsc)) {
175 /* If it might be bound as one of our vertex buffers,
176 * make sure we re-emit vertex buffer state.
177 */
178 if (prsc->bind & PIPE_BIND_VERTEX_BUFFER)
179 vc4->dirty |= VC4_DIRTY_VTXBUF;
180 } else {
181 /* If we failed to reallocate, flush users so that we
182 * don't violate any syncing requirements.
183 */
184 vc4_flush_jobs_reading_resource(vc4, prsc);
185 }
186 } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
187 /* If we're writing and the buffer is being used by the CL, we
188 * have to flush the CL first. If we're only reading, we need
189 * to flush if the CL has written our buffer.
190 */
191 if (usage & PIPE_TRANSFER_WRITE)
192 vc4_flush_jobs_reading_resource(vc4, prsc);
193 else
194 vc4_flush_jobs_writing_resource(vc4, prsc);
195 }
196
197 if (usage & PIPE_TRANSFER_WRITE) {
198 rsc->writes++;
199 rsc->initialized_buffers = ~0;
200 }
201
202 trans = slab_alloc(&vc4->transfer_pool);
203 if (!trans)
204 return NULL;
205
206 /* XXX: Handle DONTBLOCK, DISCARD_RANGE, PERSISTENT, COHERENT. */
207
208 /* slab_alloc_st() doesn't zero: */
209 memset(trans, 0, sizeof(*trans));
210 ptrans = &trans->base;
211
212 pipe_resource_reference(&ptrans->resource, prsc);
213 ptrans->level = level;
214 ptrans->usage = usage;
215 ptrans->box = *box;
216
217 /* If the resource is multisampled, we need to resolve to single
218 * sample. This seems like it should be handled at a higher layer.
219 */
220 if (prsc->nr_samples > 1) {
221 trans->ss_resource = vc4_get_temp_resource(pctx, prsc, box);
222 if (!trans->ss_resource)
223 goto fail;
224 assert(!trans->ss_resource->nr_samples);
225
226 /* The ptrans->box gets modified for tile alignment, so save
227 * the original box for unmap time.
228 */
229 trans->ss_box = *box;
230
231 if (usage & PIPE_TRANSFER_READ) {
232 struct pipe_blit_info blit;
233 memset(&blit, 0, sizeof(blit));
234
235 blit.src.resource = ptrans->resource;
236 blit.src.format = ptrans->resource->format;
237 blit.src.level = ptrans->level;
238 blit.src.box = trans->ss_box;
239
240 blit.dst.resource = trans->ss_resource;
241 blit.dst.format = trans->ss_resource->format;
242 blit.dst.box.width = trans->ss_box.width;
243 blit.dst.box.height = trans->ss_box.height;
244 blit.dst.box.depth = 1;
245
246 blit.mask = util_format_get_mask(prsc->format);
247 blit.filter = PIPE_TEX_FILTER_NEAREST;
248
249 pctx->blit(pctx, &blit);
250 vc4_flush_jobs_writing_resource(vc4, blit.dst.resource);
251 }
252
253 /* The rest of the mapping process should use our temporary. */
254 prsc = trans->ss_resource;
255 rsc = vc4_resource(prsc);
256 ptrans->box.x = 0;
257 ptrans->box.y = 0;
258 ptrans->box.z = 0;
259 }
260
261 /* Note that the current kernel implementation is synchronous, so no
262 * need to do syncing stuff here yet.
263 */
264
265 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
266 buf = vc4_bo_map_unsynchronized(rsc->bo);
267 else
268 buf = vc4_bo_map(rsc->bo);
269 if (!buf) {
270 fprintf(stderr, "Failed to map bo\n");
271 goto fail;
272 }
273
274 *pptrans = ptrans;
275
276 struct vc4_resource_slice *slice = &rsc->slices[level];
277 if (rsc->tiled) {
278 uint32_t utile_w = vc4_utile_width(rsc->cpp);
279 uint32_t utile_h = vc4_utile_height(rsc->cpp);
280
281 /* No direct mappings of tiled, since we need to manually
282 * tile/untile.
283 */
284 if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
285 return NULL;
286
287 if (format == PIPE_FORMAT_ETC1_RGB8) {
288 /* ETC1 is arranged as 64-bit blocks, where each block
289 * is 4x4 pixels. Texture tiling operates on the
290 * 64-bit block the way it would an uncompressed
291 * pixels.
292 */
293 assert(!(ptrans->box.x & 3));
294 assert(!(ptrans->box.y & 3));
295 ptrans->box.x >>= 2;
296 ptrans->box.y >>= 2;
297 ptrans->box.width = (ptrans->box.width + 3) >> 2;
298 ptrans->box.height = (ptrans->box.height + 3) >> 2;
299 }
300
301 /* We need to align the box to utile boundaries, since that's
302 * what load/store operates on. This may cause us to need to
303 * read out the original contents in that border area. Right
304 * now we just read out the entire contents, including the
305 * middle area that will just get overwritten.
306 */
307 uint32_t box_start_x = ptrans->box.x & (utile_w - 1);
308 uint32_t box_start_y = ptrans->box.y & (utile_h - 1);
309 bool needs_load = (usage & PIPE_TRANSFER_READ) != 0;
310
311 if (box_start_x) {
312 ptrans->box.width += box_start_x;
313 ptrans->box.x -= box_start_x;
314 needs_load = true;
315 }
316 if (box_start_y) {
317 ptrans->box.height += box_start_y;
318 ptrans->box.y -= box_start_y;
319 needs_load = true;
320 }
321 if (ptrans->box.width & (utile_w - 1)) {
322 /* We only need to force a load if our border region
323 * we're extending into is actually part of the
324 * texture.
325 */
326 uint32_t slice_width = u_minify(prsc->width0, level);
327 if (ptrans->box.x + ptrans->box.width != slice_width)
328 needs_load = true;
329 ptrans->box.width = align(ptrans->box.width, utile_w);
330 }
331 if (ptrans->box.height & (utile_h - 1)) {
332 uint32_t slice_height = u_minify(prsc->height0, level);
333 if (ptrans->box.y + ptrans->box.height != slice_height)
334 needs_load = true;
335 ptrans->box.height = align(ptrans->box.height, utile_h);
336 }
337
338 ptrans->stride = ptrans->box.width * rsc->cpp;
339 ptrans->layer_stride = ptrans->stride * ptrans->box.height;
340
341 trans->map = malloc(ptrans->layer_stride * ptrans->box.depth);
342
343 if (needs_load) {
344 vc4_load_tiled_image(trans->map, ptrans->stride,
345 buf + slice->offset +
346 ptrans->box.z * rsc->cube_map_stride,
347 slice->stride,
348 slice->tiling, rsc->cpp,
349 &ptrans->box);
350 }
351 return (trans->map +
352 box_start_x * rsc->cpp +
353 box_start_y * ptrans->stride);
354 } else {
355 ptrans->stride = slice->stride;
356 ptrans->layer_stride = ptrans->stride;
357
358 return buf + slice->offset +
359 ptrans->box.y / util_format_get_blockheight(format) * ptrans->stride +
360 ptrans->box.x / util_format_get_blockwidth(format) * rsc->cpp +
361 ptrans->box.z * rsc->cube_map_stride;
362 }
363
364
365 fail:
366 vc4_resource_transfer_unmap(pctx, ptrans);
367 return NULL;
368 }
369
370 static void
371 vc4_resource_destroy(struct pipe_screen *pscreen,
372 struct pipe_resource *prsc)
373 {
374 struct vc4_screen *screen = vc4_screen(pscreen);
375 struct vc4_resource *rsc = vc4_resource(prsc);
376 pipe_resource_reference(&rsc->shadow_parent, NULL);
377 vc4_bo_unreference(&rsc->bo);
378
379 if (rsc->scanout)
380 renderonly_scanout_destroy(rsc->scanout, screen->ro);
381
382 free(rsc);
383 }
384
385 static boolean
386 vc4_resource_get_handle(struct pipe_screen *pscreen,
387 struct pipe_context *pctx,
388 struct pipe_resource *prsc,
389 struct winsys_handle *whandle,
390 unsigned usage)
391 {
392 struct vc4_screen *screen = vc4_screen(pscreen);
393 struct vc4_resource *rsc = vc4_resource(prsc);
394
395 whandle->stride = rsc->slices[0].stride;
396
397 /* If we're passing some reference to our BO out to some other part of
398 * the system, then we can't do any optimizations about only us being
399 * the ones seeing it (like BO caching or shadow update avoidance).
400 */
401 rsc->bo->private = false;
402
403 switch (whandle->type) {
404 case DRM_API_HANDLE_TYPE_SHARED:
405 if (screen->ro) {
406 /* This could probably be supported, assuming that a
407 * control node was used for pl111.
408 */
409 fprintf(stderr, "flink unsupported with pl111\n");
410 return FALSE;
411 }
412
413 return vc4_bo_flink(rsc->bo, &whandle->handle);
414 case DRM_API_HANDLE_TYPE_KMS:
415 if (screen->ro && renderonly_get_handle(rsc->scanout, whandle))
416 return TRUE;
417 whandle->handle = rsc->bo->handle;
418 return TRUE;
419 case DRM_API_HANDLE_TYPE_FD:
420 /* FDs are cross-device, so we can export directly from vc4.
421 */
422 whandle->handle = vc4_bo_get_dmabuf(rsc->bo);
423 return whandle->handle != -1;
424 }
425
426 return FALSE;
427 }
428
429 static void
430 vc4_setup_slices(struct vc4_resource *rsc)
431 {
432 struct pipe_resource *prsc = &rsc->base;
433 uint32_t width = prsc->width0;
434 uint32_t height = prsc->height0;
435 if (prsc->format == PIPE_FORMAT_ETC1_RGB8) {
436 width = (width + 3) >> 2;
437 height = (height + 3) >> 2;
438 }
439
440 uint32_t pot_width = util_next_power_of_two(width);
441 uint32_t pot_height = util_next_power_of_two(height);
442 uint32_t offset = 0;
443 uint32_t utile_w = vc4_utile_width(rsc->cpp);
444 uint32_t utile_h = vc4_utile_height(rsc->cpp);
445
446 for (int i = prsc->last_level; i >= 0; i--) {
447 struct vc4_resource_slice *slice = &rsc->slices[i];
448
449 uint32_t level_width, level_height;
450 if (i == 0) {
451 level_width = width;
452 level_height = height;
453 } else {
454 level_width = u_minify(pot_width, i);
455 level_height = u_minify(pot_height, i);
456 }
457
458 if (!rsc->tiled) {
459 slice->tiling = VC4_TILING_FORMAT_LINEAR;
460 if (prsc->nr_samples > 1) {
461 /* MSAA (4x) surfaces are stored as raw tile buffer contents. */
462 level_width = align(level_width, 32);
463 level_height = align(level_height, 32);
464 } else {
465 level_width = align(level_width, utile_w);
466 }
467 } else {
468 if (vc4_size_is_lt(level_width, level_height,
469 rsc->cpp)) {
470 slice->tiling = VC4_TILING_FORMAT_LT;
471 level_width = align(level_width, utile_w);
472 level_height = align(level_height, utile_h);
473 } else {
474 slice->tiling = VC4_TILING_FORMAT_T;
475 level_width = align(level_width,
476 4 * 2 * utile_w);
477 level_height = align(level_height,
478 4 * 2 * utile_h);
479 }
480 }
481
482 slice->offset = offset;
483 slice->stride = (level_width * rsc->cpp *
484 MAX2(prsc->nr_samples, 1));
485 slice->size = level_height * slice->stride;
486
487 offset += slice->size;
488
489 if (miptree_debug) {
490 static const char tiling_chars[] = {
491 [VC4_TILING_FORMAT_LINEAR] = 'R',
492 [VC4_TILING_FORMAT_LT] = 'L',
493 [VC4_TILING_FORMAT_T] = 'T'
494 };
495 fprintf(stderr,
496 "rsc setup %p (format %s: vc4 %d), %dx%d: "
497 "level %d (%c) -> %dx%d, stride %d@0x%08x\n",
498 rsc,
499 util_format_short_name(prsc->format),
500 rsc->vc4_format,
501 prsc->width0, prsc->height0,
502 i, tiling_chars[slice->tiling],
503 level_width, level_height,
504 slice->stride, slice->offset);
505 }
506 }
507
508 /* The texture base pointer that has to point to level 0 doesn't have
509 * intra-page bits, so we have to align it, and thus shift up all the
510 * smaller slices.
511 */
512 uint32_t page_align_offset = (align(rsc->slices[0].offset, 4096) -
513 rsc->slices[0].offset);
514 if (page_align_offset) {
515 for (int i = 0; i <= prsc->last_level; i++)
516 rsc->slices[i].offset += page_align_offset;
517 }
518
519 /* Cube map faces appear as whole miptrees at a page-aligned offset
520 * from the first face's miptree.
521 */
522 if (prsc->target == PIPE_TEXTURE_CUBE) {
523 rsc->cube_map_stride = align(rsc->slices[0].offset +
524 rsc->slices[0].size, 4096);
525 }
526 }
527
528 static struct vc4_resource *
529 vc4_resource_setup(struct pipe_screen *pscreen,
530 const struct pipe_resource *tmpl)
531 {
532 struct vc4_resource *rsc = CALLOC_STRUCT(vc4_resource);
533 if (!rsc)
534 return NULL;
535 struct pipe_resource *prsc = &rsc->base;
536
537 *prsc = *tmpl;
538
539 pipe_reference_init(&prsc->reference, 1);
540 prsc->screen = pscreen;
541
542 if (prsc->nr_samples <= 1)
543 rsc->cpp = util_format_get_blocksize(tmpl->format);
544 else
545 rsc->cpp = sizeof(uint32_t);
546
547 assert(rsc->cpp);
548
549 return rsc;
550 }
551
552 static enum vc4_texture_data_type
553 get_resource_texture_format(struct pipe_resource *prsc)
554 {
555 struct vc4_resource *rsc = vc4_resource(prsc);
556 uint8_t format = vc4_get_tex_format(prsc->format);
557
558 if (!rsc->tiled) {
559 if (prsc->nr_samples > 1) {
560 return ~0;
561 } else {
562 assert(format == VC4_TEXTURE_TYPE_RGBA8888);
563 return VC4_TEXTURE_TYPE_RGBA32R;
564 }
565 }
566
567 return format;
568 }
569
570 struct pipe_resource *
571 vc4_resource_create(struct pipe_screen *pscreen,
572 const struct pipe_resource *tmpl)
573 {
574 struct vc4_screen *screen = vc4_screen(pscreen);
575 struct vc4_resource *rsc = vc4_resource_setup(pscreen, tmpl);
576 struct pipe_resource *prsc = &rsc->base;
577
578 /* We have to make shared be untiled, since we don't have any way to
579 * communicate metadata about tiling currently.
580 */
581 if (tmpl->target == PIPE_BUFFER ||
582 tmpl->nr_samples > 1 ||
583 (tmpl->bind & (PIPE_BIND_SCANOUT |
584 PIPE_BIND_LINEAR |
585 PIPE_BIND_SHARED |
586 PIPE_BIND_CURSOR))) {
587 rsc->tiled = false;
588 } else {
589 rsc->tiled = true;
590 }
591
592 if (tmpl->target != PIPE_BUFFER)
593 rsc->vc4_format = get_resource_texture_format(prsc);
594
595 vc4_setup_slices(rsc);
596 if (!vc4_resource_bo_alloc(rsc))
597 goto fail;
598
599 if (screen->ro && tmpl->bind & PIPE_BIND_SCANOUT) {
600 rsc->scanout =
601 renderonly_scanout_for_resource(prsc, screen->ro);
602 if (!rsc->scanout)
603 goto fail;
604 }
605
606 return prsc;
607 fail:
608 vc4_resource_destroy(pscreen, prsc);
609 return NULL;
610 }
611
612 static struct pipe_resource *
613 vc4_resource_from_handle(struct pipe_screen *pscreen,
614 const struct pipe_resource *tmpl,
615 struct winsys_handle *whandle,
616 unsigned usage)
617 {
618 struct vc4_screen *screen = vc4_screen(pscreen);
619 struct vc4_resource *rsc = vc4_resource_setup(pscreen, tmpl);
620 struct pipe_resource *prsc = &rsc->base;
621 struct vc4_resource_slice *slice = &rsc->slices[0];
622 uint32_t expected_stride =
623 align(prsc->width0, vc4_utile_width(rsc->cpp)) * rsc->cpp;
624
625 if (!rsc)
626 return NULL;
627
628 if (whandle->stride != expected_stride) {
629 static bool warned = false;
630 if (!warned) {
631 warned = true;
632 fprintf(stderr,
633 "Attempting to import %dx%d %s with "
634 "unsupported stride %d instead of %d\n",
635 prsc->width0, prsc->height0,
636 util_format_short_name(prsc->format),
637 whandle->stride,
638 expected_stride);
639 }
640 goto fail;
641 }
642
643 rsc->tiled = false;
644
645 if (whandle->offset != 0) {
646 fprintf(stderr,
647 "Attempt to import unsupported winsys offset %u\n",
648 whandle->offset);
649 return NULL;
650 }
651
652 switch (whandle->type) {
653 case DRM_API_HANDLE_TYPE_SHARED:
654 rsc->bo = vc4_bo_open_name(screen,
655 whandle->handle, whandle->stride);
656 break;
657 case DRM_API_HANDLE_TYPE_FD:
658 rsc->bo = vc4_bo_open_dmabuf(screen,
659 whandle->handle, whandle->stride);
660 break;
661 default:
662 fprintf(stderr,
663 "Attempt to import unsupported handle type %d\n",
664 whandle->type);
665 }
666
667 if (!rsc->bo)
668 goto fail;
669
670 slice->stride = whandle->stride;
671 slice->tiling = VC4_TILING_FORMAT_LINEAR;
672
673 rsc->vc4_format = get_resource_texture_format(prsc);
674
675 if (screen->ro) {
676 /* Make sure that renderonly has a handle to our buffer in the
677 * display's fd, so that a later renderonly_get_handle()
678 * returns correct handles or GEM names.
679 */
680 rsc->scanout =
681 renderonly_create_gpu_import_for_resource(prsc,
682 screen->ro);
683 if (!rsc->scanout)
684 goto fail;
685 }
686
687 if (miptree_debug) {
688 fprintf(stderr,
689 "rsc import %p (format %d), %dx%d: "
690 "level 0 (R) -> stride %d@0x%08x\n",
691 rsc, rsc->vc4_format,
692 prsc->width0, prsc->height0,
693 slice->stride, slice->offset);
694 }
695
696 return prsc;
697
698 fail:
699 vc4_resource_destroy(pscreen, prsc);
700 return NULL;
701 }
702
703 static struct pipe_surface *
704 vc4_create_surface(struct pipe_context *pctx,
705 struct pipe_resource *ptex,
706 const struct pipe_surface *surf_tmpl)
707 {
708 struct vc4_surface *surface = CALLOC_STRUCT(vc4_surface);
709 struct vc4_resource *rsc = vc4_resource(ptex);
710
711 if (!surface)
712 return NULL;
713
714 assert(surf_tmpl->u.tex.first_layer == surf_tmpl->u.tex.last_layer);
715
716 struct pipe_surface *psurf = &surface->base;
717 unsigned level = surf_tmpl->u.tex.level;
718
719 pipe_reference_init(&psurf->reference, 1);
720 pipe_resource_reference(&psurf->texture, ptex);
721
722 psurf->context = pctx;
723 psurf->format = surf_tmpl->format;
724 psurf->width = u_minify(ptex->width0, level);
725 psurf->height = u_minify(ptex->height0, level);
726 psurf->u.tex.level = level;
727 psurf->u.tex.first_layer = surf_tmpl->u.tex.first_layer;
728 psurf->u.tex.last_layer = surf_tmpl->u.tex.last_layer;
729 surface->offset = (rsc->slices[level].offset +
730 psurf->u.tex.first_layer * rsc->cube_map_stride);
731 surface->tiling = rsc->slices[level].tiling;
732
733 return &surface->base;
734 }
735
736 static void
737 vc4_surface_destroy(struct pipe_context *pctx, struct pipe_surface *psurf)
738 {
739 pipe_resource_reference(&psurf->texture, NULL);
740 FREE(psurf);
741 }
742
743 static void
744 vc4_dump_surface_non_msaa(struct pipe_surface *psurf)
745 {
746 struct pipe_resource *prsc = psurf->texture;
747 struct vc4_resource *rsc = vc4_resource(prsc);
748 uint32_t *map = vc4_bo_map(rsc->bo);
749 uint32_t stride = rsc->slices[0].stride / 4;
750 uint32_t width = psurf->width;
751 uint32_t height = psurf->height;
752 uint32_t chunk_w = width / 79;
753 uint32_t chunk_h = height / 40;
754 uint32_t found_colors[10];
755 uint32_t num_found_colors = 0;
756
757 if (rsc->vc4_format != VC4_TEXTURE_TYPE_RGBA32R) {
758 fprintf(stderr, "%s: Unsupported format %s\n",
759 __func__, util_format_short_name(psurf->format));
760 return;
761 }
762
763 for (int by = 0; by < height; by += chunk_h) {
764 for (int bx = 0; bx < width; bx += chunk_w) {
765 int all_found_color = -1; /* nothing found */
766
767 for (int y = by; y < MIN2(height, by + chunk_h); y++) {
768 for (int x = bx; x < MIN2(width, bx + chunk_w); x++) {
769 uint32_t pix = map[y * stride + x];
770
771 int i;
772 for (i = 0; i < num_found_colors; i++) {
773 if (pix == found_colors[i])
774 break;
775 }
776 if (i == num_found_colors &&
777 num_found_colors <
778 ARRAY_SIZE(found_colors)) {
779 found_colors[num_found_colors++] = pix;
780 }
781
782 if (i < num_found_colors) {
783 if (all_found_color == -1)
784 all_found_color = i;
785 else if (i != all_found_color)
786 all_found_color = ARRAY_SIZE(found_colors);
787 }
788 }
789 }
790 /* If all pixels for this chunk have a consistent
791 * value, then print a character for it. Either a
792 * fixed name (particularly common for piglit tests),
793 * or a runtime-generated number.
794 */
795 if (all_found_color >= 0 &&
796 all_found_color < ARRAY_SIZE(found_colors)) {
797 static const struct {
798 uint32_t val;
799 const char *c;
800 } named_colors[] = {
801 { 0xff000000, "█" },
802 { 0x00000000, "█" },
803 { 0xffff0000, "r" },
804 { 0xff00ff00, "g" },
805 { 0xff0000ff, "b" },
806 { 0xffffffff, "w" },
807 };
808 int i;
809 for (i = 0; i < ARRAY_SIZE(named_colors); i++) {
810 if (named_colors[i].val ==
811 found_colors[all_found_color]) {
812 fprintf(stderr, "%s",
813 named_colors[i].c);
814 break;
815 }
816 }
817 /* For unnamed colors, print a number and the
818 * numbers will have values printed at the
819 * end.
820 */
821 if (i == ARRAY_SIZE(named_colors)) {
822 fprintf(stderr, "%c",
823 '0' + all_found_color);
824 }
825 } else {
826 /* If there's no consistent color, print this.
827 */
828 fprintf(stderr, ".");
829 }
830 }
831 fprintf(stderr, "\n");
832 }
833
834 for (int i = 0; i < num_found_colors; i++) {
835 fprintf(stderr, "color %d: 0x%08x\n", i, found_colors[i]);
836 }
837 }
838
839 static uint32_t
840 vc4_surface_msaa_get_sample(struct pipe_surface *psurf,
841 uint32_t x, uint32_t y, uint32_t sample)
842 {
843 struct pipe_resource *prsc = psurf->texture;
844 struct vc4_resource *rsc = vc4_resource(prsc);
845 uint32_t tile_w = 32, tile_h = 32;
846 uint32_t tiles_w = DIV_ROUND_UP(psurf->width, 32);
847
848 uint32_t tile_x = x / tile_w;
849 uint32_t tile_y = y / tile_h;
850 uint32_t *tile = (vc4_bo_map(rsc->bo) +
851 VC4_TILE_BUFFER_SIZE * (tile_y * tiles_w + tile_x));
852 uint32_t subtile_x = x % tile_w;
853 uint32_t subtile_y = y % tile_h;
854
855 uint32_t quad_samples = VC4_MAX_SAMPLES * 4;
856 uint32_t tile_stride = quad_samples * tile_w / 2;
857
858 return *((uint32_t *)tile +
859 (subtile_y >> 1) * tile_stride +
860 (subtile_x >> 1) * quad_samples +
861 ((subtile_y & 1) << 1) +
862 (subtile_x & 1) +
863 sample);
864 }
865
866 static void
867 vc4_dump_surface_msaa_char(struct pipe_surface *psurf,
868 uint32_t start_x, uint32_t start_y,
869 uint32_t w, uint32_t h)
870 {
871 bool all_same_color = true;
872 uint32_t all_pix = 0;
873
874 for (int y = start_y; y < start_y + h; y++) {
875 for (int x = start_x; x < start_x + w; x++) {
876 for (int s = 0; s < VC4_MAX_SAMPLES; s++) {
877 uint32_t pix = vc4_surface_msaa_get_sample(psurf,
878 x, y,
879 s);
880 if (x == start_x && y == start_y)
881 all_pix = pix;
882 else if (all_pix != pix)
883 all_same_color = false;
884 }
885 }
886 }
887 if (all_same_color) {
888 static const struct {
889 uint32_t val;
890 const char *c;
891 } named_colors[] = {
892 { 0xff000000, "█" },
893 { 0x00000000, "█" },
894 { 0xffff0000, "r" },
895 { 0xff00ff00, "g" },
896 { 0xff0000ff, "b" },
897 { 0xffffffff, "w" },
898 };
899 int i;
900 for (i = 0; i < ARRAY_SIZE(named_colors); i++) {
901 if (named_colors[i].val == all_pix) {
902 fprintf(stderr, "%s",
903 named_colors[i].c);
904 return;
905 }
906 }
907 fprintf(stderr, "x");
908 } else {
909 fprintf(stderr, ".");
910 }
911 }
912
913 static void
914 vc4_dump_surface_msaa(struct pipe_surface *psurf)
915 {
916 uint32_t tile_w = 32, tile_h = 32;
917 uint32_t tiles_w = DIV_ROUND_UP(psurf->width, tile_w);
918 uint32_t tiles_h = DIV_ROUND_UP(psurf->height, tile_h);
919 uint32_t char_w = 140, char_h = 60;
920 uint32_t char_w_per_tile = char_w / tiles_w - 1;
921 uint32_t char_h_per_tile = char_h / tiles_h - 1;
922
923 fprintf(stderr, "Surface: %dx%d (%dx MSAA)\n",
924 psurf->width, psurf->height, psurf->texture->nr_samples);
925
926 for (int x = 0; x < (char_w_per_tile + 1) * tiles_w; x++)
927 fprintf(stderr, "-");
928 fprintf(stderr, "\n");
929
930 for (int ty = 0; ty < psurf->height; ty += tile_h) {
931 for (int y = 0; y < char_h_per_tile; y++) {
932
933 for (int tx = 0; tx < psurf->width; tx += tile_w) {
934 for (int x = 0; x < char_w_per_tile; x++) {
935 uint32_t bx1 = (x * tile_w /
936 char_w_per_tile);
937 uint32_t bx2 = ((x + 1) * tile_w /
938 char_w_per_tile);
939 uint32_t by1 = (y * tile_h /
940 char_h_per_tile);
941 uint32_t by2 = ((y + 1) * tile_h /
942 char_h_per_tile);
943
944 vc4_dump_surface_msaa_char(psurf,
945 tx + bx1,
946 ty + by1,
947 bx2 - bx1,
948 by2 - by1);
949 }
950 fprintf(stderr, "|");
951 }
952 fprintf(stderr, "\n");
953 }
954
955 for (int x = 0; x < (char_w_per_tile + 1) * tiles_w; x++)
956 fprintf(stderr, "-");
957 fprintf(stderr, "\n");
958 }
959 }
960
961 /** Debug routine to dump the contents of an 8888 surface to the console */
962 void
963 vc4_dump_surface(struct pipe_surface *psurf)
964 {
965 if (!psurf)
966 return;
967
968 if (psurf->texture->nr_samples > 1)
969 vc4_dump_surface_msaa(psurf);
970 else
971 vc4_dump_surface_non_msaa(psurf);
972 }
973
974 static void
975 vc4_flush_resource(struct pipe_context *pctx, struct pipe_resource *resource)
976 {
977 /* All calls to flush_resource are followed by a flush of the context,
978 * so there's nothing to do.
979 */
980 }
981
982 void
983 vc4_update_shadow_baselevel_texture(struct pipe_context *pctx,
984 struct pipe_sampler_view *view)
985 {
986 struct vc4_resource *shadow = vc4_resource(view->texture);
987 struct vc4_resource *orig = vc4_resource(shadow->shadow_parent);
988 assert(orig);
989
990 if (shadow->writes == orig->writes && orig->bo->private)
991 return;
992
993 perf_debug("Updating %dx%d@%d shadow texture due to %s\n",
994 orig->base.width0, orig->base.height0,
995 view->u.tex.first_level,
996 view->u.tex.first_level ? "base level" : "raster layout");
997
998 for (int i = 0; i <= shadow->base.last_level; i++) {
999 unsigned width = u_minify(shadow->base.width0, i);
1000 unsigned height = u_minify(shadow->base.height0, i);
1001 struct pipe_blit_info info = {
1002 .dst = {
1003 .resource = &shadow->base,
1004 .level = i,
1005 .box = {
1006 .x = 0,
1007 .y = 0,
1008 .z = 0,
1009 .width = width,
1010 .height = height,
1011 .depth = 1,
1012 },
1013 .format = shadow->base.format,
1014 },
1015 .src = {
1016 .resource = &orig->base,
1017 .level = view->u.tex.first_level + i,
1018 .box = {
1019 .x = 0,
1020 .y = 0,
1021 .z = 0,
1022 .width = width,
1023 .height = height,
1024 .depth = 1,
1025 },
1026 .format = orig->base.format,
1027 },
1028 .mask = ~0,
1029 };
1030 pctx->blit(pctx, &info);
1031 }
1032
1033 shadow->writes = orig->writes;
1034 }
1035
1036 /**
1037 * Converts a 4-byte index buffer to 2 bytes.
1038 *
1039 * Since GLES2 only has support for 1 and 2-byte indices, the hardware doesn't
1040 * include 4-byte index support, and we have to shrink it down.
1041 *
1042 * There's no fallback support for when indices end up being larger than 2^16,
1043 * though it will at least assertion fail. Also, if the original index data
1044 * was in user memory, it would be nice to not have uploaded it to a VBO
1045 * before translating.
1046 */
1047 struct pipe_resource *
1048 vc4_get_shadow_index_buffer(struct pipe_context *pctx,
1049 const struct pipe_draw_info *info,
1050 uint32_t offset,
1051 uint32_t count,
1052 uint32_t *shadow_offset)
1053 {
1054 struct vc4_context *vc4 = vc4_context(pctx);
1055 struct vc4_resource *orig = vc4_resource(info->index.resource);
1056 perf_debug("Fallback conversion for %d uint indices\n", count);
1057
1058 void *data;
1059 struct pipe_resource *shadow_rsc = NULL;
1060 u_upload_alloc(vc4->uploader, 0, count * 2, 4,
1061 shadow_offset, &shadow_rsc, &data);
1062 uint16_t *dst = data;
1063
1064 struct pipe_transfer *src_transfer = NULL;
1065 const uint32_t *src;
1066 if (info->has_user_indices) {
1067 src = info->index.user;
1068 } else {
1069 src = pipe_buffer_map_range(pctx, &orig->base,
1070 offset,
1071 count * 4,
1072 PIPE_TRANSFER_READ, &src_transfer);
1073 }
1074
1075 for (int i = 0; i < count; i++) {
1076 uint32_t src_index = src[i];
1077 assert(src_index <= 0xffff);
1078 dst[i] = src_index;
1079 }
1080
1081 if (src_transfer)
1082 pctx->transfer_unmap(pctx, src_transfer);
1083
1084 return shadow_rsc;
1085 }
1086
1087 void
1088 vc4_resource_screen_init(struct pipe_screen *pscreen)
1089 {
1090 pscreen->resource_create = vc4_resource_create;
1091 pscreen->resource_from_handle = vc4_resource_from_handle;
1092 pscreen->resource_destroy = u_resource_destroy_vtbl;
1093 pscreen->resource_get_handle = vc4_resource_get_handle;
1094 pscreen->resource_destroy = vc4_resource_destroy;
1095 }
1096
1097 void
1098 vc4_resource_context_init(struct pipe_context *pctx)
1099 {
1100 pctx->transfer_map = vc4_resource_transfer_map;
1101 pctx->transfer_flush_region = u_default_transfer_flush_region;
1102 pctx->transfer_unmap = vc4_resource_transfer_unmap;
1103 pctx->buffer_subdata = u_default_buffer_subdata;
1104 pctx->texture_subdata = u_default_texture_subdata;
1105 pctx->create_surface = vc4_create_surface;
1106 pctx->surface_destroy = vc4_surface_destroy;
1107 pctx->resource_copy_region = util_resource_copy_region;
1108 pctx->blit = vc4_blit;
1109 pctx->flush_resource = vc4_flush_resource;
1110 }