broadcom/vc5: Enable UIF XOR on textures.
[mesa.git] / src / gallium / drivers / vc5 / vc5_resource.c
1 /*
2 * Copyright © 2014-2017 Broadcom
3 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "util/u_blit.h"
26 #include "util/u_memory.h"
27 #include "util/u_format.h"
28 #include "util/u_inlines.h"
29 #include "util/u_surface.h"
30 #include "util/u_transfer_helper.h"
31 #include "util/u_upload_mgr.h"
32 #include "util/u_format_zs.h"
33
34 #include "drm_fourcc.h"
35 #include "vc5_screen.h"
36 #include "vc5_context.h"
37 #include "vc5_resource.h"
38 #include "vc5_tiling.h"
39 #include "broadcom/cle/v3d_packet_v33_pack.h"
40
41 #ifndef DRM_FORMAT_MOD_INVALID
42 #define DRM_FORMAT_MOD_INVALID ((1ULL << 56) - 1)
43 #endif
44
45 static void
46 vc5_debug_resource_layout(struct vc5_resource *rsc, const char *caller)
47 {
48 if (!(V3D_DEBUG & V3D_DEBUG_SURFACE))
49 return;
50
51 struct pipe_resource *prsc = &rsc->base;
52
53 if (prsc->target == PIPE_BUFFER) {
54 fprintf(stderr,
55 "rsc %s %p (format %s), %dx%d buffer @0x%08x-0x%08x\n",
56 caller, rsc,
57 util_format_short_name(prsc->format),
58 prsc->width0, prsc->height0,
59 rsc->bo->offset,
60 rsc->bo->offset + rsc->bo->size - 1);
61 return;
62 }
63
64 static const char *const tiling_descriptions[] = {
65 [VC5_TILING_RASTER] = "R",
66 [VC5_TILING_LINEARTILE] = "LT",
67 [VC5_TILING_UBLINEAR_1_COLUMN] = "UB1",
68 [VC5_TILING_UBLINEAR_2_COLUMN] = "UB2",
69 [VC5_TILING_UIF_NO_XOR] = "UIF",
70 [VC5_TILING_UIF_XOR] = "UIF^",
71 };
72
73 for (int i = 0; i <= prsc->last_level; i++) {
74 struct vc5_resource_slice *slice = &rsc->slices[i];
75
76 int level_width = slice->stride / rsc->cpp;
77 int level_height = slice->padded_height;
78
79 fprintf(stderr,
80 "rsc %s %p (format %s), %dx%d: "
81 "level %d (%s) %dx%d -> %dx%d, stride %d@0x%08x\n",
82 caller, rsc,
83 util_format_short_name(prsc->format),
84 prsc->width0, prsc->height0,
85 i, tiling_descriptions[slice->tiling],
86 u_minify(prsc->width0, i),
87 u_minify(prsc->height0, i),
88 level_width,
89 level_height,
90 slice->stride,
91 rsc->bo->offset + slice->offset);
92 }
93 }
94
95 static bool
96 vc5_resource_bo_alloc(struct vc5_resource *rsc)
97 {
98 struct pipe_resource *prsc = &rsc->base;
99 struct pipe_screen *pscreen = prsc->screen;
100 struct vc5_bo *bo;
101 int layers = (prsc->target == PIPE_TEXTURE_3D ?
102 prsc->depth0 : prsc->array_size);
103
104 bo = vc5_bo_alloc(vc5_screen(pscreen),
105 rsc->slices[0].offset +
106 rsc->slices[0].size +
107 rsc->cube_map_stride * layers - 1,
108 "resource");
109 if (bo) {
110 vc5_bo_unreference(&rsc->bo);
111 rsc->bo = bo;
112 vc5_debug_resource_layout(rsc, "alloc");
113 return true;
114 } else {
115 return false;
116 }
117 }
118
119 static void
120 vc5_resource_transfer_unmap(struct pipe_context *pctx,
121 struct pipe_transfer *ptrans)
122 {
123 struct vc5_context *vc5 = vc5_context(pctx);
124 struct vc5_transfer *trans = vc5_transfer(ptrans);
125
126 if (trans->map) {
127 struct vc5_resource *rsc = vc5_resource(ptrans->resource);
128 struct vc5_resource_slice *slice = &rsc->slices[ptrans->level];
129
130 if (ptrans->usage & PIPE_TRANSFER_WRITE) {
131 vc5_store_tiled_image(rsc->bo->map + slice->offset +
132 ptrans->box.z * rsc->cube_map_stride,
133 slice->stride,
134 trans->map, ptrans->stride,
135 slice->tiling, rsc->cpp,
136 slice->padded_height,
137 &ptrans->box);
138 }
139 free(trans->map);
140 }
141
142 pipe_resource_reference(&ptrans->resource, NULL);
143 slab_free(&vc5->transfer_pool, ptrans);
144 }
145
146 static void *
147 vc5_resource_transfer_map(struct pipe_context *pctx,
148 struct pipe_resource *prsc,
149 unsigned level, unsigned usage,
150 const struct pipe_box *box,
151 struct pipe_transfer **pptrans)
152 {
153 struct vc5_context *vc5 = vc5_context(pctx);
154 struct vc5_resource *rsc = vc5_resource(prsc);
155 struct vc5_transfer *trans;
156 struct pipe_transfer *ptrans;
157 enum pipe_format format = prsc->format;
158 char *buf;
159
160 /* MSAA maps should have been handled by u_transfer_helper. */
161 assert(prsc->nr_samples <= 1);
162
163 /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
164 * being mapped.
165 */
166 if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
167 !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
168 !(prsc->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT) &&
169 prsc->last_level == 0 &&
170 prsc->width0 == box->width &&
171 prsc->height0 == box->height &&
172 prsc->depth0 == box->depth &&
173 prsc->array_size == 1 &&
174 rsc->bo->private) {
175 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
176 }
177
178 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
179 if (vc5_resource_bo_alloc(rsc)) {
180 /* If it might be bound as one of our vertex buffers
181 * or UBOs, make sure we re-emit vertex buffer state
182 * or uniforms.
183 */
184 if (prsc->bind & PIPE_BIND_VERTEX_BUFFER)
185 vc5->dirty |= VC5_DIRTY_VTXBUF;
186 if (prsc->bind & PIPE_BIND_CONSTANT_BUFFER)
187 vc5->dirty |= VC5_DIRTY_CONSTBUF;
188 } else {
189 /* If we failed to reallocate, flush users so that we
190 * don't violate any syncing requirements.
191 */
192 vc5_flush_jobs_reading_resource(vc5, prsc);
193 }
194 } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
195 /* If we're writing and the buffer is being used by the CL, we
196 * have to flush the CL first. If we're only reading, we need
197 * to flush if the CL has written our buffer.
198 */
199 if (usage & PIPE_TRANSFER_WRITE)
200 vc5_flush_jobs_reading_resource(vc5, prsc);
201 else
202 vc5_flush_jobs_writing_resource(vc5, prsc);
203 }
204
205 if (usage & PIPE_TRANSFER_WRITE) {
206 rsc->writes++;
207 rsc->initialized_buffers = ~0;
208 }
209
210 trans = slab_alloc(&vc5->transfer_pool);
211 if (!trans)
212 return NULL;
213
214 /* XXX: Handle DONTBLOCK, DISCARD_RANGE, PERSISTENT, COHERENT. */
215
216 /* slab_alloc_st() doesn't zero: */
217 memset(trans, 0, sizeof(*trans));
218 ptrans = &trans->base;
219
220 pipe_resource_reference(&ptrans->resource, prsc);
221 ptrans->level = level;
222 ptrans->usage = usage;
223 ptrans->box = *box;
224
225 /* Note that the current kernel implementation is synchronous, so no
226 * need to do syncing stuff here yet.
227 */
228
229 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
230 buf = vc5_bo_map_unsynchronized(rsc->bo);
231 else
232 buf = vc5_bo_map(rsc->bo);
233 if (!buf) {
234 fprintf(stderr, "Failed to map bo\n");
235 goto fail;
236 }
237
238 *pptrans = ptrans;
239
240 /* Our load/store routines work on entire compressed blocks. */
241 ptrans->box.x /= util_format_get_blockwidth(format);
242 ptrans->box.y /= util_format_get_blockheight(format);
243 ptrans->box.width = DIV_ROUND_UP(ptrans->box.width,
244 util_format_get_blockwidth(format));
245 ptrans->box.height = DIV_ROUND_UP(ptrans->box.height,
246 util_format_get_blockheight(format));
247
248 struct vc5_resource_slice *slice = &rsc->slices[level];
249 if (rsc->tiled) {
250 /* No direct mappings of tiled, since we need to manually
251 * tile/untile.
252 */
253 if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
254 return NULL;
255
256 ptrans->stride = ptrans->box.width * rsc->cpp;
257 ptrans->layer_stride = ptrans->stride * ptrans->box.height;
258
259 trans->map = malloc(ptrans->layer_stride * ptrans->box.depth);
260
261 if (usage & PIPE_TRANSFER_READ) {
262 vc5_load_tiled_image(trans->map, ptrans->stride,
263 buf + slice->offset +
264 ptrans->box.z * rsc->cube_map_stride,
265 slice->stride,
266 slice->tiling, rsc->cpp,
267 slice->padded_height,
268 &ptrans->box);
269 }
270 return trans->map;
271 } else {
272 ptrans->stride = slice->stride;
273 ptrans->layer_stride = ptrans->stride;
274
275 return buf + slice->offset +
276 ptrans->box.y * ptrans->stride +
277 ptrans->box.x * rsc->cpp +
278 ptrans->box.z * rsc->cube_map_stride;
279 }
280
281
282 fail:
283 vc5_resource_transfer_unmap(pctx, ptrans);
284 return NULL;
285 }
286
287 static void
288 vc5_resource_destroy(struct pipe_screen *pscreen,
289 struct pipe_resource *prsc)
290 {
291 struct vc5_resource *rsc = vc5_resource(prsc);
292
293 vc5_bo_unreference(&rsc->bo);
294 free(rsc);
295 }
296
297 static boolean
298 vc5_resource_get_handle(struct pipe_screen *pscreen,
299 struct pipe_context *pctx,
300 struct pipe_resource *prsc,
301 struct winsys_handle *whandle,
302 unsigned usage)
303 {
304 struct vc5_resource *rsc = vc5_resource(prsc);
305 struct vc5_bo *bo = rsc->bo;
306
307 whandle->stride = rsc->slices[0].stride;
308
309 /* If we're passing some reference to our BO out to some other part of
310 * the system, then we can't do any optimizations about only us being
311 * the ones seeing it (like BO caching).
312 */
313 bo->private = false;
314
315 switch (whandle->type) {
316 case DRM_API_HANDLE_TYPE_SHARED:
317 return vc5_bo_flink(bo, &whandle->handle);
318 case DRM_API_HANDLE_TYPE_KMS:
319 whandle->handle = bo->handle;
320 return TRUE;
321 case DRM_API_HANDLE_TYPE_FD:
322 whandle->handle = vc5_bo_get_dmabuf(bo);
323 return whandle->handle != -1;
324 }
325
326 return FALSE;
327 }
328
329 #define PAGE_UB_ROWS (VC5_UIFCFG_PAGE_SIZE / VC5_UIFBLOCK_ROW_SIZE)
330 #define PAGE_UB_ROWS_TIMES_1_5 ((PAGE_UB_ROWS * 3) >> 1)
331 #define PAGE_CACHE_UB_ROWS (VC5_PAGE_CACHE_SIZE / VC5_UIFBLOCK_ROW_SIZE)
332 #define PAGE_CACHE_MINUS_1_5_UB_ROWS (PAGE_CACHE_UB_ROWS - PAGE_UB_ROWS_TIMES_1_5)
333
334 /**
335 * Computes the HW's UIFblock padding for a given height/cpp.
336 *
337 * The goal of the padding is to keep pages of the same color (bank number) at
338 * least half a page away from each other vertically when crossing between
339 * between columns of UIF blocks.
340 */
341 static uint32_t
342 vc5_get_ub_pad(struct vc5_resource *rsc, uint32_t height)
343 {
344 uint32_t utile_h = vc5_utile_height(rsc->cpp);
345 uint32_t uif_block_h = utile_h * 2;
346 uint32_t height_ub = height / uif_block_h;
347
348 uint32_t height_offset_in_pc = height_ub % PAGE_CACHE_UB_ROWS;
349
350 /* For the perfectly-aligned-for-UIF-XOR case, don't add any pad. */
351 if (height_offset_in_pc == 0)
352 return 0;
353
354 /* Try padding up to where we're offset by at least half a page. */
355 if (height_offset_in_pc < PAGE_UB_ROWS_TIMES_1_5) {
356 /* If we fit entirely in the page cache, don't pad. */
357 if (height_ub < PAGE_CACHE_UB_ROWS)
358 return 0;
359 else
360 return PAGE_UB_ROWS_TIMES_1_5 - height_offset_in_pc;
361 }
362
363 /* If we're close to being aligned to page cache size, then round up
364 * and rely on XOR.
365 */
366 if (height_offset_in_pc > PAGE_CACHE_MINUS_1_5_UB_ROWS)
367 return PAGE_CACHE_UB_ROWS - height_offset_in_pc;
368
369 /* Otherwise, we're far enough away (top and bottom) to not need any
370 * padding.
371 */
372 return 0;
373 }
374
375 static void
376 vc5_setup_slices(struct vc5_resource *rsc)
377 {
378 struct pipe_resource *prsc = &rsc->base;
379 uint32_t width = prsc->width0;
380 uint32_t height = prsc->height0;
381 uint32_t pot_width = util_next_power_of_two(width);
382 uint32_t pot_height = util_next_power_of_two(height);
383 uint32_t offset = 0;
384 uint32_t utile_w = vc5_utile_width(rsc->cpp);
385 uint32_t utile_h = vc5_utile_height(rsc->cpp);
386 uint32_t uif_block_w = utile_w * 2;
387 uint32_t uif_block_h = utile_h * 2;
388 uint32_t block_width = util_format_get_blockwidth(prsc->format);
389 uint32_t block_height = util_format_get_blockheight(prsc->format);
390 bool msaa = prsc->nr_samples > 1;
391 /* MSAA textures/renderbuffers are always laid out as single-level
392 * UIF.
393 */
394 bool uif_top = msaa;
395
396 for (int i = prsc->last_level; i >= 0; i--) {
397 struct vc5_resource_slice *slice = &rsc->slices[i];
398
399 uint32_t level_width, level_height;
400 if (i < 2) {
401 level_width = u_minify(width, i);
402 level_height = u_minify(height, i);
403 } else {
404 level_width = u_minify(pot_width, i);
405 level_height = u_minify(pot_height, i);
406 }
407
408 if (msaa) {
409 level_width *= 2;
410 level_height *= 2;
411 }
412
413 level_width = DIV_ROUND_UP(level_width, block_width);
414 level_height = DIV_ROUND_UP(level_height, block_height);
415
416 if (!rsc->tiled) {
417 slice->tiling = VC5_TILING_RASTER;
418 if (prsc->target == PIPE_TEXTURE_1D)
419 level_width = align(level_width, 64 / rsc->cpp);
420 } else {
421 if ((i != 0 || !uif_top) &&
422 (level_width <= utile_w ||
423 level_height <= utile_h)) {
424 slice->tiling = VC5_TILING_LINEARTILE;
425 level_width = align(level_width, utile_w);
426 level_height = align(level_height, utile_h);
427 } else if ((i != 0 || !uif_top) &&
428 level_width <= uif_block_w) {
429 slice->tiling = VC5_TILING_UBLINEAR_1_COLUMN;
430 level_width = align(level_width, uif_block_w);
431 level_height = align(level_height, uif_block_h);
432 } else if ((i != 0 || !uif_top) &&
433 level_width <= 2 * uif_block_w) {
434 slice->tiling = VC5_TILING_UBLINEAR_2_COLUMN;
435 level_width = align(level_width, 2 * uif_block_w);
436 level_height = align(level_height, uif_block_h);
437 } else {
438 /* We align the width to a 4-block column of
439 * UIF blocks, but we only align height to UIF
440 * blocks.
441 */
442 level_width = align(level_width,
443 4 * uif_block_w);
444 level_height = align(level_height,
445 uif_block_h);
446
447 slice->ub_pad = vc5_get_ub_pad(rsc,
448 level_height);
449 level_height += slice->ub_pad * uif_block_h;
450
451 /* If the padding set us to to be aligned to
452 * the page cache size, then the HW will use
453 * the XOR bit on odd columns to get us
454 * perfectly misaligned
455 */
456 if ((level_height / uif_block_h) %
457 (VC5_PAGE_CACHE_SIZE /
458 VC5_UIFBLOCK_ROW_SIZE) == 0) {
459 slice->tiling = VC5_TILING_UIF_XOR;
460 } else {
461 slice->tiling = VC5_TILING_UIF_NO_XOR;
462 }
463 }
464 }
465
466 slice->offset = offset;
467 slice->stride = level_width * rsc->cpp;
468 slice->padded_height = level_height;
469 slice->size = level_height * slice->stride;
470
471 /* The HW aligns level 1's base to a page if any of level 1 or
472 * below could be UIF XOR. The lower levels then inherit the
473 * alignment for as long as necesary, thanks to being power of
474 * two aligned.
475 */
476 if (i == 1 &&
477 level_width > 4 * uif_block_w &&
478 level_height > PAGE_CACHE_MINUS_1_5_UB_ROWS * uif_block_h) {
479 slice->size = align(slice->size, VC5_UIFCFG_PAGE_SIZE);
480 }
481
482 offset += slice->size;
483 }
484
485 /* UIF/UBLINEAR levels need to be aligned to UIF-blocks, and LT only
486 * needs to be aligned to utile boundaries. Since tiles are laid out
487 * from small to big in memory, we need to align the later UIF slices
488 * to UIF blocks, if they were preceded by non-UIF-block-aligned LT
489 * slices.
490 *
491 * We additionally align to 4k, which improves UIF XOR performance.
492 */
493 uint32_t page_align_offset = (align(rsc->slices[0].offset, 4096) -
494 rsc->slices[0].offset);
495 if (page_align_offset) {
496 for (int i = 0; i <= prsc->last_level; i++)
497 rsc->slices[i].offset += page_align_offset;
498 }
499
500 /* Arrays, cubes, and 3D textures have a stride which is the distance
501 * from one full mipmap tree to the next (64b aligned).
502 */
503 rsc->cube_map_stride = align(rsc->slices[0].offset +
504 rsc->slices[0].size, 64);
505 }
506
507 static struct vc5_resource *
508 vc5_resource_setup(struct pipe_screen *pscreen,
509 const struct pipe_resource *tmpl)
510 {
511 struct vc5_screen *screen = vc5_screen(pscreen);
512 struct vc5_resource *rsc = CALLOC_STRUCT(vc5_resource);
513 if (!rsc)
514 return NULL;
515 struct pipe_resource *prsc = &rsc->base;
516
517 *prsc = *tmpl;
518
519 pipe_reference_init(&prsc->reference, 1);
520 prsc->screen = pscreen;
521
522 if (prsc->nr_samples <= 1) {
523 rsc->cpp = util_format_get_blocksize(prsc->format);
524 } else {
525 assert(vc5_rt_format_supported(&screen->devinfo, prsc->format));
526 uint32_t output_image_format =
527 vc5_get_rt_format(&screen->devinfo, prsc->format);
528 uint32_t internal_type;
529 uint32_t internal_bpp;
530 vc5_get_internal_type_bpp_for_output_format(&screen->devinfo,
531 output_image_format,
532 &internal_type,
533 &internal_bpp);
534 switch (internal_bpp) {
535 case V3D_INTERNAL_BPP_32:
536 rsc->cpp = 4;
537 break;
538 case V3D_INTERNAL_BPP_64:
539 rsc->cpp = 8;
540 break;
541 case V3D_INTERNAL_BPP_128:
542 rsc->cpp = 16;
543 break;
544 }
545 }
546
547 assert(rsc->cpp);
548
549 return rsc;
550 }
551
552 static bool
553 find_modifier(uint64_t needle, const uint64_t *haystack, int count)
554 {
555 int i;
556
557 for (i = 0; i < count; i++) {
558 if (haystack[i] == needle)
559 return true;
560 }
561
562 return false;
563 }
564
565 static struct pipe_resource *
566 vc5_resource_create_with_modifiers(struct pipe_screen *pscreen,
567 const struct pipe_resource *tmpl,
568 const uint64_t *modifiers,
569 int count)
570 {
571 bool linear_ok = find_modifier(DRM_FORMAT_MOD_LINEAR, modifiers, count);
572 struct vc5_resource *rsc = vc5_resource_setup(pscreen, tmpl);
573 struct pipe_resource *prsc = &rsc->base;
574 /* Use a tiled layout if we can, for better 3D performance. */
575 bool should_tile = true;
576
577 /* VBOs/PBOs are untiled (and 1 height). */
578 if (tmpl->target == PIPE_BUFFER)
579 should_tile = false;
580
581 /* Cursors are always linear, and the user can request linear as well.
582 */
583 if (tmpl->bind & (PIPE_BIND_LINEAR | PIPE_BIND_CURSOR))
584 should_tile = false;
585
586 /* 1D and 1D_ARRAY textures are always raster-order. */
587 if (tmpl->target == PIPE_TEXTURE_1D ||
588 tmpl->target == PIPE_TEXTURE_1D_ARRAY)
589 should_tile = false;
590
591 /* Scanout BOs for simulator need to be linear for interaction with
592 * i965.
593 */
594 if (using_vc5_simulator &&
595 tmpl->bind & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT))
596 should_tile = false;
597
598 /* No user-specified modifier; determine our own. */
599 if (count == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID) {
600 linear_ok = true;
601 rsc->tiled = should_tile;
602 } else if (should_tile &&
603 find_modifier(DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
604 modifiers, count)) {
605 rsc->tiled = true;
606 } else if (linear_ok) {
607 rsc->tiled = false;
608 } else {
609 fprintf(stderr, "Unsupported modifier requested\n");
610 return NULL;
611 }
612
613 rsc->internal_format = prsc->format;
614
615 vc5_setup_slices(rsc);
616 if (!vc5_resource_bo_alloc(rsc))
617 goto fail;
618
619 return prsc;
620 fail:
621 vc5_resource_destroy(pscreen, prsc);
622 return NULL;
623 }
624
625 struct pipe_resource *
626 vc5_resource_create(struct pipe_screen *pscreen,
627 const struct pipe_resource *tmpl)
628 {
629 const uint64_t mod = DRM_FORMAT_MOD_INVALID;
630 return vc5_resource_create_with_modifiers(pscreen, tmpl, &mod, 1);
631 }
632
633 static struct pipe_resource *
634 vc5_resource_from_handle(struct pipe_screen *pscreen,
635 const struct pipe_resource *tmpl,
636 struct winsys_handle *whandle,
637 unsigned usage)
638 {
639 struct vc5_screen *screen = vc5_screen(pscreen);
640 struct vc5_resource *rsc = vc5_resource_setup(pscreen, tmpl);
641 struct pipe_resource *prsc = &rsc->base;
642 struct vc5_resource_slice *slice = &rsc->slices[0];
643
644 if (!rsc)
645 return NULL;
646
647 switch (whandle->modifier) {
648 case DRM_FORMAT_MOD_LINEAR:
649 rsc->tiled = false;
650 break;
651 /* XXX: UIF */
652 default:
653 fprintf(stderr,
654 "Attempt to import unsupported modifier 0x%llx\n",
655 (long long)whandle->modifier);
656 goto fail;
657 }
658
659 if (whandle->offset != 0) {
660 fprintf(stderr,
661 "Attempt to import unsupported winsys offset %u\n",
662 whandle->offset);
663 goto fail;
664 }
665
666 switch (whandle->type) {
667 case DRM_API_HANDLE_TYPE_SHARED:
668 rsc->bo = vc5_bo_open_name(screen,
669 whandle->handle, whandle->stride);
670 break;
671 case DRM_API_HANDLE_TYPE_FD:
672 rsc->bo = vc5_bo_open_dmabuf(screen,
673 whandle->handle, whandle->stride);
674 break;
675 default:
676 fprintf(stderr,
677 "Attempt to import unsupported handle type %d\n",
678 whandle->type);
679 goto fail;
680 }
681
682 if (!rsc->bo)
683 goto fail;
684
685 vc5_setup_slices(rsc);
686 vc5_debug_resource_layout(rsc, "import");
687
688 if (whandle->stride != slice->stride) {
689 static bool warned = false;
690 if (!warned) {
691 warned = true;
692 fprintf(stderr,
693 "Attempting to import %dx%d %s with "
694 "unsupported stride %d instead of %d\n",
695 prsc->width0, prsc->height0,
696 util_format_short_name(prsc->format),
697 whandle->stride,
698 slice->stride);
699 }
700 goto fail;
701 }
702
703 return prsc;
704
705 fail:
706 vc5_resource_destroy(pscreen, prsc);
707 return NULL;
708 }
709
710 static struct pipe_surface *
711 vc5_create_surface(struct pipe_context *pctx,
712 struct pipe_resource *ptex,
713 const struct pipe_surface *surf_tmpl)
714 {
715 struct vc5_context *vc5 = vc5_context(pctx);
716 struct vc5_screen *screen = vc5->screen;
717 struct vc5_surface *surface = CALLOC_STRUCT(vc5_surface);
718 struct vc5_resource *rsc = vc5_resource(ptex);
719
720 if (!surface)
721 return NULL;
722
723 assert(surf_tmpl->u.tex.first_layer == surf_tmpl->u.tex.last_layer);
724
725 struct pipe_surface *psurf = &surface->base;
726 unsigned level = surf_tmpl->u.tex.level;
727 struct vc5_resource_slice *slice = &rsc->slices[level];
728
729 pipe_reference_init(&psurf->reference, 1);
730 pipe_resource_reference(&psurf->texture, ptex);
731
732 psurf->context = pctx;
733 psurf->format = surf_tmpl->format;
734 psurf->width = u_minify(ptex->width0, level);
735 psurf->height = u_minify(ptex->height0, level);
736 psurf->u.tex.level = level;
737 psurf->u.tex.first_layer = surf_tmpl->u.tex.first_layer;
738 psurf->u.tex.last_layer = surf_tmpl->u.tex.last_layer;
739
740 surface->offset = (slice->offset +
741 psurf->u.tex.first_layer * rsc->cube_map_stride);
742 surface->tiling = slice->tiling;
743
744 surface->format = vc5_get_rt_format(&screen->devinfo, psurf->format);
745
746 if (util_format_is_depth_or_stencil(psurf->format)) {
747 switch (psurf->format) {
748 case PIPE_FORMAT_Z16_UNORM:
749 surface->internal_type = V3D_INTERNAL_TYPE_DEPTH_16;
750 break;
751 case PIPE_FORMAT_Z32_FLOAT:
752 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
753 surface->internal_type = V3D_INTERNAL_TYPE_DEPTH_32F;
754 break;
755 default:
756 surface->internal_type = V3D_INTERNAL_TYPE_DEPTH_24;
757 }
758 } else {
759 uint32_t bpp, type;
760 vc5_get_internal_type_bpp_for_output_format(&screen->devinfo,
761 surface->format,
762 &type, &bpp);
763 surface->internal_type = type;
764 surface->internal_bpp = bpp;
765 }
766
767 if (surface->tiling == VC5_TILING_UIF_NO_XOR ||
768 surface->tiling == VC5_TILING_UIF_XOR) {
769 surface->padded_height_of_output_image_in_uif_blocks =
770 (slice->padded_height /
771 (2 * vc5_utile_height(rsc->cpp)));
772 }
773
774 if (rsc->separate_stencil) {
775 surface->separate_stencil =
776 vc5_create_surface(pctx, &rsc->separate_stencil->base,
777 surf_tmpl);
778 }
779
780 return &surface->base;
781 }
782
783 static void
784 vc5_surface_destroy(struct pipe_context *pctx, struct pipe_surface *psurf)
785 {
786 struct vc5_surface *surf = vc5_surface(psurf);
787
788 if (surf->separate_stencil)
789 pipe_surface_reference(&surf->separate_stencil, NULL);
790
791 pipe_resource_reference(&psurf->texture, NULL);
792 FREE(psurf);
793 }
794
795 static void
796 vc5_flush_resource(struct pipe_context *pctx, struct pipe_resource *resource)
797 {
798 /* All calls to flush_resource are followed by a flush of the context,
799 * so there's nothing to do.
800 */
801 }
802
803 static enum pipe_format
804 vc5_resource_get_internal_format(struct pipe_resource *prsc)
805 {
806 return vc5_resource(prsc)->internal_format;
807 }
808
809 static void
810 vc5_resource_set_stencil(struct pipe_resource *prsc,
811 struct pipe_resource *stencil)
812 {
813 vc5_resource(prsc)->separate_stencil = vc5_resource(stencil);
814 }
815
816 static struct pipe_resource *
817 vc5_resource_get_stencil(struct pipe_resource *prsc)
818 {
819 struct vc5_resource *rsc = vc5_resource(prsc);
820
821 return &rsc->separate_stencil->base;
822 }
823
824 static const struct u_transfer_vtbl transfer_vtbl = {
825 .resource_create = vc5_resource_create,
826 .resource_destroy = vc5_resource_destroy,
827 .transfer_map = vc5_resource_transfer_map,
828 .transfer_unmap = vc5_resource_transfer_unmap,
829 .transfer_flush_region = u_default_transfer_flush_region,
830 .get_internal_format = vc5_resource_get_internal_format,
831 .set_stencil = vc5_resource_set_stencil,
832 .get_stencil = vc5_resource_get_stencil,
833 };
834
835 void
836 vc5_resource_screen_init(struct pipe_screen *pscreen)
837 {
838 pscreen->resource_create_with_modifiers =
839 vc5_resource_create_with_modifiers;
840 pscreen->resource_create = u_transfer_helper_resource_create;
841 pscreen->resource_from_handle = vc5_resource_from_handle;
842 pscreen->resource_get_handle = vc5_resource_get_handle;
843 pscreen->resource_destroy = u_transfer_helper_resource_destroy;
844 pscreen->transfer_helper = u_transfer_helper_create(&transfer_vtbl,
845 true, true, true);
846 }
847
848 void
849 vc5_resource_context_init(struct pipe_context *pctx)
850 {
851 pctx->transfer_map = u_transfer_helper_transfer_map;
852 pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
853 pctx->transfer_unmap = u_transfer_helper_transfer_unmap;
854 pctx->buffer_subdata = u_default_buffer_subdata;
855 pctx->texture_subdata = u_default_texture_subdata;
856 pctx->create_surface = vc5_create_surface;
857 pctx->surface_destroy = vc5_surface_destroy;
858 pctx->resource_copy_region = util_resource_copy_region;
859 pctx->blit = vc5_blit;
860 pctx->flush_resource = vc5_flush_resource;
861 }