v3d: Fix leak in resource setup error path
[mesa.git] / src / gallium / drivers / v3d / v3d_resource.c
1 /*
2 * Copyright © 2014-2017 Broadcom
3 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "pipe/p_defines.h"
26 #include "util/u_blit.h"
27 #include "util/u_memory.h"
28 #include "util/u_format.h"
29 #include "util/u_inlines.h"
30 #include "util/u_surface.h"
31 #include "util/u_transfer_helper.h"
32 #include "util/u_upload_mgr.h"
33 #include "util/u_format_zs.h"
34
35 #include "drm_fourcc.h"
36 #include "v3d_screen.h"
37 #include "v3d_context.h"
38 #include "v3d_resource.h"
39 #include "v3d_tiling.h"
40 #include "broadcom/cle/v3d_packet_v33_pack.h"
41
42 static void
43 v3d_debug_resource_layout(struct v3d_resource *rsc, const char *caller)
44 {
45 if (!(V3D_DEBUG & V3D_DEBUG_SURFACE))
46 return;
47
48 struct pipe_resource *prsc = &rsc->base;
49
50 if (prsc->target == PIPE_BUFFER) {
51 fprintf(stderr,
52 "rsc %s %p (format %s), %dx%d buffer @0x%08x-0x%08x\n",
53 caller, rsc,
54 util_format_short_name(prsc->format),
55 prsc->width0, prsc->height0,
56 rsc->bo->offset,
57 rsc->bo->offset + rsc->bo->size - 1);
58 return;
59 }
60
61 static const char *const tiling_descriptions[] = {
62 [VC5_TILING_RASTER] = "R",
63 [VC5_TILING_LINEARTILE] = "LT",
64 [VC5_TILING_UBLINEAR_1_COLUMN] = "UB1",
65 [VC5_TILING_UBLINEAR_2_COLUMN] = "UB2",
66 [VC5_TILING_UIF_NO_XOR] = "UIF",
67 [VC5_TILING_UIF_XOR] = "UIF^",
68 };
69
70 for (int i = 0; i <= prsc->last_level; i++) {
71 struct v3d_resource_slice *slice = &rsc->slices[i];
72
73 int level_width = slice->stride / rsc->cpp;
74 int level_height = slice->padded_height;
75 int level_depth =
76 u_minify(util_next_power_of_two(prsc->depth0), i);
77
78 fprintf(stderr,
79 "rsc %s %p (format %s), %dx%d: "
80 "level %d (%s) %dx%dx%d -> %dx%dx%d, stride %d@0x%08x\n",
81 caller, rsc,
82 util_format_short_name(prsc->format),
83 prsc->width0, prsc->height0,
84 i, tiling_descriptions[slice->tiling],
85 u_minify(prsc->width0, i),
86 u_minify(prsc->height0, i),
87 u_minify(prsc->depth0, i),
88 level_width,
89 level_height,
90 level_depth,
91 slice->stride,
92 rsc->bo->offset + slice->offset);
93 }
94 }
95
96 static bool
97 v3d_resource_bo_alloc(struct v3d_resource *rsc)
98 {
99 struct pipe_resource *prsc = &rsc->base;
100 struct pipe_screen *pscreen = prsc->screen;
101 struct v3d_bo *bo;
102
103 bo = v3d_bo_alloc(v3d_screen(pscreen), rsc->size, "resource");
104 if (bo) {
105 v3d_bo_unreference(&rsc->bo);
106 rsc->bo = bo;
107 v3d_debug_resource_layout(rsc, "alloc");
108 return true;
109 } else {
110 return false;
111 }
112 }
113
114 static void
115 v3d_resource_transfer_unmap(struct pipe_context *pctx,
116 struct pipe_transfer *ptrans)
117 {
118 struct v3d_context *v3d = v3d_context(pctx);
119 struct v3d_transfer *trans = v3d_transfer(ptrans);
120
121 if (trans->map) {
122 struct v3d_resource *rsc = v3d_resource(ptrans->resource);
123 struct v3d_resource_slice *slice = &rsc->slices[ptrans->level];
124
125 if (ptrans->usage & PIPE_TRANSFER_WRITE) {
126 for (int z = 0; z < ptrans->box.depth; z++) {
127 void *dst = rsc->bo->map +
128 v3d_layer_offset(&rsc->base,
129 ptrans->level,
130 ptrans->box.z + z);
131 v3d_store_tiled_image(dst,
132 slice->stride,
133 (trans->map +
134 ptrans->stride *
135 ptrans->box.height * z),
136 ptrans->stride,
137 slice->tiling, rsc->cpp,
138 slice->padded_height,
139 &ptrans->box);
140 }
141 }
142 free(trans->map);
143 }
144
145 pipe_resource_reference(&ptrans->resource, NULL);
146 slab_free(&v3d->transfer_pool, ptrans);
147 }
148
149 static void
150 v3d_map_usage_prep(struct pipe_context *pctx,
151 struct pipe_resource *prsc,
152 unsigned usage)
153 {
154 struct v3d_context *v3d = v3d_context(pctx);
155 struct v3d_resource *rsc = v3d_resource(prsc);
156
157 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
158 if (v3d_resource_bo_alloc(rsc)) {
159 /* If it might be bound as one of our vertex buffers
160 * or UBOs, make sure we re-emit vertex buffer state
161 * or uniforms.
162 */
163 if (prsc->bind & PIPE_BIND_VERTEX_BUFFER)
164 v3d->dirty |= VC5_DIRTY_VTXBUF;
165 if (prsc->bind & PIPE_BIND_CONSTANT_BUFFER)
166 v3d->dirty |= VC5_DIRTY_CONSTBUF;
167 } else {
168 /* If we failed to reallocate, flush users so that we
169 * don't violate any syncing requirements.
170 */
171 v3d_flush_jobs_reading_resource(v3d, prsc);
172 }
173 } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
174 /* If we're writing and the buffer is being used by the CL, we
175 * have to flush the CL first. If we're only reading, we need
176 * to flush if the CL has written our buffer.
177 */
178 if (usage & PIPE_TRANSFER_WRITE)
179 v3d_flush_jobs_reading_resource(v3d, prsc);
180 else
181 v3d_flush_jobs_writing_resource(v3d, prsc);
182 }
183
184 if (usage & PIPE_TRANSFER_WRITE) {
185 rsc->writes++;
186 rsc->initialized_buffers = ~0;
187 }
188 }
189
190 static void *
191 v3d_resource_transfer_map(struct pipe_context *pctx,
192 struct pipe_resource *prsc,
193 unsigned level, unsigned usage,
194 const struct pipe_box *box,
195 struct pipe_transfer **pptrans)
196 {
197 struct v3d_context *v3d = v3d_context(pctx);
198 struct v3d_resource *rsc = v3d_resource(prsc);
199 struct v3d_transfer *trans;
200 struct pipe_transfer *ptrans;
201 enum pipe_format format = prsc->format;
202 char *buf;
203
204 /* MSAA maps should have been handled by u_transfer_helper. */
205 assert(prsc->nr_samples <= 1);
206
207 /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
208 * being mapped.
209 */
210 if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
211 !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
212 !(prsc->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) &&
213 prsc->last_level == 0 &&
214 prsc->width0 == box->width &&
215 prsc->height0 == box->height &&
216 prsc->depth0 == box->depth &&
217 prsc->array_size == 1 &&
218 rsc->bo->private) {
219 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
220 }
221
222 v3d_map_usage_prep(pctx, prsc, usage);
223
224 trans = slab_alloc(&v3d->transfer_pool);
225 if (!trans)
226 return NULL;
227
228 /* XXX: Handle DONTBLOCK, DISCARD_RANGE, PERSISTENT, COHERENT. */
229
230 /* slab_alloc_st() doesn't zero: */
231 memset(trans, 0, sizeof(*trans));
232 ptrans = &trans->base;
233
234 pipe_resource_reference(&ptrans->resource, prsc);
235 ptrans->level = level;
236 ptrans->usage = usage;
237 ptrans->box = *box;
238
239 /* Note that the current kernel implementation is synchronous, so no
240 * need to do syncing stuff here yet.
241 */
242
243 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
244 buf = v3d_bo_map_unsynchronized(rsc->bo);
245 else
246 buf = v3d_bo_map(rsc->bo);
247 if (!buf) {
248 fprintf(stderr, "Failed to map bo\n");
249 goto fail;
250 }
251
252 *pptrans = ptrans;
253
254 /* Our load/store routines work on entire compressed blocks. */
255 ptrans->box.x /= util_format_get_blockwidth(format);
256 ptrans->box.y /= util_format_get_blockheight(format);
257 ptrans->box.width = DIV_ROUND_UP(ptrans->box.width,
258 util_format_get_blockwidth(format));
259 ptrans->box.height = DIV_ROUND_UP(ptrans->box.height,
260 util_format_get_blockheight(format));
261
262 struct v3d_resource_slice *slice = &rsc->slices[level];
263 if (rsc->tiled) {
264 /* No direct mappings of tiled, since we need to manually
265 * tile/untile.
266 */
267 if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
268 return NULL;
269
270 ptrans->stride = ptrans->box.width * rsc->cpp;
271 ptrans->layer_stride = ptrans->stride * ptrans->box.height;
272
273 trans->map = malloc(ptrans->layer_stride * ptrans->box.depth);
274
275 if (usage & PIPE_TRANSFER_READ) {
276 for (int z = 0; z < ptrans->box.depth; z++) {
277 void *src = rsc->bo->map +
278 v3d_layer_offset(&rsc->base,
279 ptrans->level,
280 ptrans->box.z + z);
281 v3d_load_tiled_image((trans->map +
282 ptrans->stride *
283 ptrans->box.height * z),
284 ptrans->stride,
285 src,
286 slice->stride,
287 slice->tiling, rsc->cpp,
288 slice->padded_height,
289 &ptrans->box);
290 }
291 }
292 return trans->map;
293 } else {
294 ptrans->stride = slice->stride;
295 ptrans->layer_stride = rsc->cube_map_stride;
296
297 return buf + slice->offset +
298 ptrans->box.y * ptrans->stride +
299 ptrans->box.x * rsc->cpp +
300 ptrans->box.z * rsc->cube_map_stride;
301 }
302
303
304 fail:
305 v3d_resource_transfer_unmap(pctx, ptrans);
306 return NULL;
307 }
308
309 static void
310 v3d_texture_subdata(struct pipe_context *pctx,
311 struct pipe_resource *prsc,
312 unsigned level,
313 unsigned usage,
314 const struct pipe_box *box,
315 const void *data,
316 unsigned stride,
317 unsigned layer_stride)
318 {
319 struct v3d_resource *rsc = v3d_resource(prsc);
320 struct v3d_resource_slice *slice = &rsc->slices[level];
321
322 /* For a direct mapping, we can just take the u_transfer path. */
323 if (!rsc->tiled) {
324 return u_default_texture_subdata(pctx, prsc, level, usage, box,
325 data, stride, layer_stride);
326 }
327
328 /* Otherwise, map and store the texture data directly into the tiled
329 * texture. Note that gallium's texture_subdata may be called with
330 * obvious usage flags missing!
331 */
332 v3d_map_usage_prep(pctx, prsc, usage | (PIPE_TRANSFER_WRITE |
333 PIPE_TRANSFER_DISCARD_RANGE));
334
335 void *buf;
336 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
337 buf = v3d_bo_map_unsynchronized(rsc->bo);
338 else
339 buf = v3d_bo_map(rsc->bo);
340
341 for (int i = 0; i < box->depth; i++) {
342 v3d_store_tiled_image(buf +
343 v3d_layer_offset(&rsc->base,
344 level,
345 box->z + i),
346 slice->stride,
347 (void *)data + layer_stride * i,
348 stride,
349 slice->tiling, rsc->cpp, slice->padded_height,
350 box);
351 }
352 }
353
354 static void
355 v3d_resource_destroy(struct pipe_screen *pscreen,
356 struct pipe_resource *prsc)
357 {
358 struct v3d_screen *screen = v3d_screen(pscreen);
359 struct v3d_resource *rsc = v3d_resource(prsc);
360
361 if (rsc->scanout)
362 renderonly_scanout_destroy(rsc->scanout, screen->ro);
363
364 v3d_bo_unreference(&rsc->bo);
365 free(rsc);
366 }
367
368 static boolean
369 v3d_resource_get_handle(struct pipe_screen *pscreen,
370 struct pipe_context *pctx,
371 struct pipe_resource *prsc,
372 struct winsys_handle *whandle,
373 unsigned usage)
374 {
375 struct v3d_screen *screen = v3d_screen(pscreen);
376 struct v3d_resource *rsc = v3d_resource(prsc);
377 struct v3d_bo *bo = rsc->bo;
378
379 whandle->stride = rsc->slices[0].stride;
380 whandle->offset = 0;
381
382 /* If we're passing some reference to our BO out to some other part of
383 * the system, then we can't do any optimizations about only us being
384 * the ones seeing it (like BO caching).
385 */
386 bo->private = false;
387
388 if (rsc->tiled) {
389 /* A shared tiled buffer should always be allocated as UIF,
390 * not UBLINEAR or LT.
391 */
392 assert(rsc->slices[0].tiling == VC5_TILING_UIF_XOR ||
393 rsc->slices[0].tiling == VC5_TILING_UIF_NO_XOR);
394 whandle->modifier = DRM_FORMAT_MOD_BROADCOM_UIF;
395 } else {
396 whandle->modifier = DRM_FORMAT_MOD_LINEAR;
397 }
398
399 switch (whandle->type) {
400 case WINSYS_HANDLE_TYPE_SHARED:
401 return v3d_bo_flink(bo, &whandle->handle);
402 case WINSYS_HANDLE_TYPE_KMS:
403 if (screen->ro) {
404 assert(rsc->scanout);
405 return renderonly_get_handle(rsc->scanout, whandle);
406 }
407 whandle->handle = bo->handle;
408 return TRUE;
409 case WINSYS_HANDLE_TYPE_FD:
410 whandle->handle = v3d_bo_get_dmabuf(bo);
411 return whandle->handle != -1;
412 }
413
414 return FALSE;
415 }
416
417 #define PAGE_UB_ROWS (VC5_UIFCFG_PAGE_SIZE / VC5_UIFBLOCK_ROW_SIZE)
418 #define PAGE_UB_ROWS_TIMES_1_5 ((PAGE_UB_ROWS * 3) >> 1)
419 #define PAGE_CACHE_UB_ROWS (VC5_PAGE_CACHE_SIZE / VC5_UIFBLOCK_ROW_SIZE)
420 #define PAGE_CACHE_MINUS_1_5_UB_ROWS (PAGE_CACHE_UB_ROWS - PAGE_UB_ROWS_TIMES_1_5)
421
422 /**
423 * Computes the HW's UIFblock padding for a given height/cpp.
424 *
425 * The goal of the padding is to keep pages of the same color (bank number) at
426 * least half a page away from each other vertically when crossing between
427 * between columns of UIF blocks.
428 */
429 static uint32_t
430 v3d_get_ub_pad(struct v3d_resource *rsc, uint32_t height)
431 {
432 uint32_t utile_h = v3d_utile_height(rsc->cpp);
433 uint32_t uif_block_h = utile_h * 2;
434 uint32_t height_ub = height / uif_block_h;
435
436 uint32_t height_offset_in_pc = height_ub % PAGE_CACHE_UB_ROWS;
437
438 /* For the perfectly-aligned-for-UIF-XOR case, don't add any pad. */
439 if (height_offset_in_pc == 0)
440 return 0;
441
442 /* Try padding up to where we're offset by at least half a page. */
443 if (height_offset_in_pc < PAGE_UB_ROWS_TIMES_1_5) {
444 /* If we fit entirely in the page cache, don't pad. */
445 if (height_ub < PAGE_CACHE_UB_ROWS)
446 return 0;
447 else
448 return PAGE_UB_ROWS_TIMES_1_5 - height_offset_in_pc;
449 }
450
451 /* If we're close to being aligned to page cache size, then round up
452 * and rely on XOR.
453 */
454 if (height_offset_in_pc > PAGE_CACHE_MINUS_1_5_UB_ROWS)
455 return PAGE_CACHE_UB_ROWS - height_offset_in_pc;
456
457 /* Otherwise, we're far enough away (top and bottom) to not need any
458 * padding.
459 */
460 return 0;
461 }
462
463 static void
464 v3d_setup_slices(struct v3d_resource *rsc, uint32_t winsys_stride)
465 {
466 struct pipe_resource *prsc = &rsc->base;
467 uint32_t width = prsc->width0;
468 uint32_t height = prsc->height0;
469 uint32_t depth = prsc->depth0;
470 /* Note that power-of-two padding is based on level 1. These are not
471 * equivalent to just util_next_power_of_two(dimension), because at a
472 * level 0 dimension of 9, the level 1 power-of-two padded value is 4,
473 * not 8.
474 */
475 uint32_t pot_width = 2 * util_next_power_of_two(u_minify(width, 1));
476 uint32_t pot_height = 2 * util_next_power_of_two(u_minify(height, 1));
477 uint32_t pot_depth = 2 * util_next_power_of_two(u_minify(depth, 1));
478 uint32_t offset = 0;
479 uint32_t utile_w = v3d_utile_width(rsc->cpp);
480 uint32_t utile_h = v3d_utile_height(rsc->cpp);
481 uint32_t uif_block_w = utile_w * 2;
482 uint32_t uif_block_h = utile_h * 2;
483 uint32_t block_width = util_format_get_blockwidth(prsc->format);
484 uint32_t block_height = util_format_get_blockheight(prsc->format);
485 bool msaa = prsc->nr_samples > 1;
486 /* MSAA textures/renderbuffers are always laid out as single-level
487 * UIF.
488 */
489 bool uif_top = msaa;
490
491 /* Check some easy mistakes to make in a resource_create() call that
492 * will break our setup.
493 */
494 assert(prsc->array_size != 0);
495 assert(prsc->depth0 != 0);
496
497 for (int i = prsc->last_level; i >= 0; i--) {
498 struct v3d_resource_slice *slice = &rsc->slices[i];
499
500 uint32_t level_width, level_height, level_depth;
501 if (i < 2) {
502 level_width = u_minify(width, i);
503 level_height = u_minify(height, i);
504 } else {
505 level_width = u_minify(pot_width, i);
506 level_height = u_minify(pot_height, i);
507 }
508 if (i < 1)
509 level_depth = u_minify(depth, i);
510 else
511 level_depth = u_minify(pot_depth, i);
512
513 if (msaa) {
514 level_width *= 2;
515 level_height *= 2;
516 }
517
518 level_width = DIV_ROUND_UP(level_width, block_width);
519 level_height = DIV_ROUND_UP(level_height, block_height);
520
521 if (!rsc->tiled) {
522 slice->tiling = VC5_TILING_RASTER;
523 if (prsc->target == PIPE_TEXTURE_1D)
524 level_width = align(level_width, 64 / rsc->cpp);
525 } else {
526 if ((i != 0 || !uif_top) &&
527 (level_width <= utile_w ||
528 level_height <= utile_h)) {
529 slice->tiling = VC5_TILING_LINEARTILE;
530 level_width = align(level_width, utile_w);
531 level_height = align(level_height, utile_h);
532 } else if ((i != 0 || !uif_top) &&
533 level_width <= uif_block_w) {
534 slice->tiling = VC5_TILING_UBLINEAR_1_COLUMN;
535 level_width = align(level_width, uif_block_w);
536 level_height = align(level_height, uif_block_h);
537 } else if ((i != 0 || !uif_top) &&
538 level_width <= 2 * uif_block_w) {
539 slice->tiling = VC5_TILING_UBLINEAR_2_COLUMN;
540 level_width = align(level_width, 2 * uif_block_w);
541 level_height = align(level_height, uif_block_h);
542 } else {
543 /* We align the width to a 4-block column of
544 * UIF blocks, but we only align height to UIF
545 * blocks.
546 */
547 level_width = align(level_width,
548 4 * uif_block_w);
549 level_height = align(level_height,
550 uif_block_h);
551
552 slice->ub_pad = v3d_get_ub_pad(rsc,
553 level_height);
554 level_height += slice->ub_pad * uif_block_h;
555
556 /* If the padding set us to to be aligned to
557 * the page cache size, then the HW will use
558 * the XOR bit on odd columns to get us
559 * perfectly misaligned
560 */
561 if ((level_height / uif_block_h) %
562 (VC5_PAGE_CACHE_SIZE /
563 VC5_UIFBLOCK_ROW_SIZE) == 0) {
564 slice->tiling = VC5_TILING_UIF_XOR;
565 } else {
566 slice->tiling = VC5_TILING_UIF_NO_XOR;
567 }
568 }
569 }
570
571 slice->offset = offset;
572 if (winsys_stride)
573 slice->stride = winsys_stride;
574 else
575 slice->stride = level_width * rsc->cpp;
576 slice->padded_height = level_height;
577 slice->size = level_height * slice->stride;
578
579 uint32_t slice_total_size = slice->size * level_depth;
580
581 /* The HW aligns level 1's base to a page if any of level 1 or
582 * below could be UIF XOR. The lower levels then inherit the
583 * alignment for as long as necesary, thanks to being power of
584 * two aligned.
585 */
586 if (i == 1 &&
587 level_width > 4 * uif_block_w &&
588 level_height > PAGE_CACHE_MINUS_1_5_UB_ROWS * uif_block_h) {
589 slice_total_size = align(slice_total_size,
590 VC5_UIFCFG_PAGE_SIZE);
591 }
592
593 offset += slice_total_size;
594
595 }
596 rsc->size = offset;
597
598 /* UIF/UBLINEAR levels need to be aligned to UIF-blocks, and LT only
599 * needs to be aligned to utile boundaries. Since tiles are laid out
600 * from small to big in memory, we need to align the later UIF slices
601 * to UIF blocks, if they were preceded by non-UIF-block-aligned LT
602 * slices.
603 *
604 * We additionally align to 4k, which improves UIF XOR performance.
605 */
606 uint32_t page_align_offset = (align(rsc->slices[0].offset, 4096) -
607 rsc->slices[0].offset);
608 if (page_align_offset) {
609 rsc->size += page_align_offset;
610 for (int i = 0; i <= prsc->last_level; i++)
611 rsc->slices[i].offset += page_align_offset;
612 }
613
614 /* Arrays and cube textures have a stride which is the distance from
615 * one full mipmap tree to the next (64b aligned). For 3D textures,
616 * we need to program the stride between slices of miplevel 0.
617 */
618 if (prsc->target != PIPE_TEXTURE_3D) {
619 rsc->cube_map_stride = align(rsc->slices[0].offset +
620 rsc->slices[0].size, 64);
621 rsc->size += rsc->cube_map_stride * (prsc->array_size - 1);
622 } else {
623 rsc->cube_map_stride = rsc->slices[0].size;
624 }
625 }
626
627 uint32_t
628 v3d_layer_offset(struct pipe_resource *prsc, uint32_t level, uint32_t layer)
629 {
630 struct v3d_resource *rsc = v3d_resource(prsc);
631 struct v3d_resource_slice *slice = &rsc->slices[level];
632
633 if (prsc->target == PIPE_TEXTURE_3D)
634 return slice->offset + layer * slice->size;
635 else
636 return slice->offset + layer * rsc->cube_map_stride;
637 }
638
639 static struct v3d_resource *
640 v3d_resource_setup(struct pipe_screen *pscreen,
641 const struct pipe_resource *tmpl)
642 {
643 struct v3d_screen *screen = v3d_screen(pscreen);
644 struct v3d_resource *rsc = CALLOC_STRUCT(v3d_resource);
645 if (!rsc)
646 return NULL;
647 struct pipe_resource *prsc = &rsc->base;
648
649 *prsc = *tmpl;
650
651 pipe_reference_init(&prsc->reference, 1);
652 prsc->screen = pscreen;
653
654 if (prsc->nr_samples <= 1 ||
655 screen->devinfo.ver >= 40 ||
656 util_format_is_depth_or_stencil(prsc->format)) {
657 rsc->cpp = util_format_get_blocksize(prsc->format);
658 if (screen->devinfo.ver < 40 && prsc->nr_samples > 1)
659 rsc->cpp *= prsc->nr_samples;
660 } else {
661 assert(v3d_rt_format_supported(&screen->devinfo, prsc->format));
662 uint32_t output_image_format =
663 v3d_get_rt_format(&screen->devinfo, prsc->format);
664 uint32_t internal_type;
665 uint32_t internal_bpp;
666 v3d_get_internal_type_bpp_for_output_format(&screen->devinfo,
667 output_image_format,
668 &internal_type,
669 &internal_bpp);
670 switch (internal_bpp) {
671 case V3D_INTERNAL_BPP_32:
672 rsc->cpp = 4;
673 break;
674 case V3D_INTERNAL_BPP_64:
675 rsc->cpp = 8;
676 break;
677 case V3D_INTERNAL_BPP_128:
678 rsc->cpp = 16;
679 break;
680 }
681 }
682
683 assert(rsc->cpp);
684
685 return rsc;
686 }
687
688 static bool
689 find_modifier(uint64_t needle, const uint64_t *haystack, int count)
690 {
691 int i;
692
693 for (i = 0; i < count; i++) {
694 if (haystack[i] == needle)
695 return true;
696 }
697
698 return false;
699 }
700
701 static struct pipe_resource *
702 v3d_resource_create_with_modifiers(struct pipe_screen *pscreen,
703 const struct pipe_resource *tmpl,
704 const uint64_t *modifiers,
705 int count)
706 {
707 struct v3d_screen *screen = v3d_screen(pscreen);
708
709 /* If we're in a renderonly setup, use the other device to perform our
710 * (linear) allocation and just import it to v3d. The other device
711 * may be using CMA, and V3D can import from CMA but doesn't do CMA
712 * allocations on its own.
713 *
714 * We always allocate this way for SHARED, because get_handle will
715 * need a resource on the display fd.
716 */
717 if (screen->ro && (tmpl->bind & (PIPE_BIND_SCANOUT |
718 PIPE_BIND_SHARED))) {
719 struct winsys_handle handle;
720 struct pipe_resource scanout_tmpl = *tmpl;
721 struct renderonly_scanout *scanout =
722 renderonly_scanout_for_resource(&scanout_tmpl,
723 screen->ro,
724 &handle);
725 if (!scanout) {
726 fprintf(stderr, "Failed to create scanout resource\n");
727 return NULL;
728 }
729 assert(handle.type == WINSYS_HANDLE_TYPE_FD);
730 /* The fd is all we need. Destroy the old scanout (and its
731 * GEM handle on kms_fd) before resource_from_handle()'s
732 * renderonly_create_gpu_import_for_resource() call which will
733 * also get a kms_fd GEM handle for the fd.
734 */
735 renderonly_scanout_destroy(scanout, screen->ro);
736 struct pipe_resource *prsc =
737 pscreen->resource_from_handle(pscreen, tmpl,
738 &handle,
739 PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE);
740 close(handle.handle);
741 return prsc;
742 }
743
744 bool linear_ok = find_modifier(DRM_FORMAT_MOD_LINEAR, modifiers, count);
745 struct v3d_resource *rsc = v3d_resource_setup(pscreen, tmpl);
746 struct pipe_resource *prsc = &rsc->base;
747 /* Use a tiled layout if we can, for better 3D performance. */
748 bool should_tile = true;
749
750 /* VBOs/PBOs are untiled (and 1 height). */
751 if (tmpl->target == PIPE_BUFFER)
752 should_tile = false;
753
754 /* Cursors are always linear, and the user can request linear as well.
755 */
756 if (tmpl->bind & (PIPE_BIND_LINEAR | PIPE_BIND_CURSOR))
757 should_tile = false;
758
759 /* 1D and 1D_ARRAY textures are always raster-order. */
760 if (tmpl->target == PIPE_TEXTURE_1D ||
761 tmpl->target == PIPE_TEXTURE_1D_ARRAY)
762 should_tile = false;
763
764 /* Scanout BOs for simulator need to be linear for interaction with
765 * i965.
766 */
767 if (using_v3d_simulator &&
768 tmpl->bind & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT))
769 should_tile = false;
770
771 /* No user-specified modifier; determine our own. */
772 if (count == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID) {
773 linear_ok = true;
774 rsc->tiled = should_tile;
775 } else if (should_tile &&
776 find_modifier(DRM_FORMAT_MOD_BROADCOM_UIF,
777 modifiers, count)) {
778 rsc->tiled = true;
779 } else if (linear_ok) {
780 rsc->tiled = false;
781 } else {
782 fprintf(stderr, "Unsupported modifier requested\n");
783 goto fail;
784 }
785
786 rsc->internal_format = prsc->format;
787
788 v3d_setup_slices(rsc, 0);
789
790 if (!v3d_resource_bo_alloc(rsc))
791 goto fail;
792
793 return prsc;
794 fail:
795 v3d_resource_destroy(pscreen, prsc);
796 return NULL;
797 }
798
799 struct pipe_resource *
800 v3d_resource_create(struct pipe_screen *pscreen,
801 const struct pipe_resource *tmpl)
802 {
803 const uint64_t mod = DRM_FORMAT_MOD_INVALID;
804 return v3d_resource_create_with_modifiers(pscreen, tmpl, &mod, 1);
805 }
806
807 static struct pipe_resource *
808 v3d_resource_from_handle(struct pipe_screen *pscreen,
809 const struct pipe_resource *tmpl,
810 struct winsys_handle *whandle,
811 unsigned usage)
812 {
813 struct v3d_screen *screen = v3d_screen(pscreen);
814 struct v3d_resource *rsc = v3d_resource_setup(pscreen, tmpl);
815 struct pipe_resource *prsc = &rsc->base;
816 struct v3d_resource_slice *slice = &rsc->slices[0];
817
818 if (!rsc)
819 return NULL;
820
821 switch (whandle->modifier) {
822 case DRM_FORMAT_MOD_LINEAR:
823 rsc->tiled = false;
824 break;
825 case DRM_FORMAT_MOD_BROADCOM_UIF:
826 rsc->tiled = true;
827 break;
828 case DRM_FORMAT_MOD_INVALID:
829 rsc->tiled = screen->ro == NULL;
830 break;
831 default:
832 fprintf(stderr,
833 "Attempt to import unsupported modifier 0x%llx\n",
834 (long long)whandle->modifier);
835 goto fail;
836 }
837
838 if (whandle->offset != 0) {
839 fprintf(stderr,
840 "Attempt to import unsupported winsys offset %u\n",
841 whandle->offset);
842 goto fail;
843 }
844
845 switch (whandle->type) {
846 case WINSYS_HANDLE_TYPE_SHARED:
847 rsc->bo = v3d_bo_open_name(screen, whandle->handle);
848 break;
849 case WINSYS_HANDLE_TYPE_FD:
850 rsc->bo = v3d_bo_open_dmabuf(screen, whandle->handle);
851 break;
852 default:
853 fprintf(stderr,
854 "Attempt to import unsupported handle type %d\n",
855 whandle->type);
856 goto fail;
857 }
858
859 if (!rsc->bo)
860 goto fail;
861
862 rsc->internal_format = prsc->format;
863
864 v3d_setup_slices(rsc, whandle->stride);
865 v3d_debug_resource_layout(rsc, "import");
866
867 if (screen->ro) {
868 /* Make sure that renderonly has a handle to our buffer in the
869 * display's fd, so that a later renderonly_get_handle()
870 * returns correct handles or GEM names.
871 */
872 rsc->scanout =
873 renderonly_create_gpu_import_for_resource(prsc,
874 screen->ro,
875 NULL);
876 if (!rsc->scanout) {
877 fprintf(stderr, "Failed to create scanout resource.\n");
878 goto fail;
879 }
880 }
881
882 if (whandle->stride != slice->stride) {
883 static bool warned = false;
884 if (!warned) {
885 warned = true;
886 fprintf(stderr,
887 "Attempting to import %dx%d %s with "
888 "unsupported stride %d instead of %d\n",
889 prsc->width0, prsc->height0,
890 util_format_short_name(prsc->format),
891 whandle->stride,
892 slice->stride);
893 }
894 goto fail;
895 }
896
897 return prsc;
898
899 fail:
900 v3d_resource_destroy(pscreen, prsc);
901 return NULL;
902 }
903
904 void
905 v3d_update_shadow_texture(struct pipe_context *pctx,
906 struct pipe_sampler_view *pview)
907 {
908 struct v3d_context *v3d = v3d_context(pctx);
909 struct v3d_sampler_view *view = v3d_sampler_view(pview);
910 struct v3d_resource *shadow = v3d_resource(view->texture);
911 struct v3d_resource *orig = v3d_resource(pview->texture);
912
913 assert(view->texture != pview->texture);
914
915 if (shadow->writes == orig->writes && orig->bo->private)
916 return;
917
918 perf_debug("Updating %dx%d@%d shadow for linear texture\n",
919 orig->base.width0, orig->base.height0,
920 pview->u.tex.first_level);
921
922 for (int i = 0; i <= shadow->base.last_level; i++) {
923 unsigned width = u_minify(shadow->base.width0, i);
924 unsigned height = u_minify(shadow->base.height0, i);
925 struct pipe_blit_info info = {
926 .dst = {
927 .resource = &shadow->base,
928 .level = i,
929 .box = {
930 .x = 0,
931 .y = 0,
932 .z = 0,
933 .width = width,
934 .height = height,
935 .depth = 1,
936 },
937 .format = shadow->base.format,
938 },
939 .src = {
940 .resource = &orig->base,
941 .level = pview->u.tex.first_level + i,
942 .box = {
943 .x = 0,
944 .y = 0,
945 .z = 0,
946 .width = width,
947 .height = height,
948 .depth = 1,
949 },
950 .format = orig->base.format,
951 },
952 .mask = util_format_get_mask(orig->base.format),
953 };
954 pctx->blit(pctx, &info);
955 }
956
957 shadow->writes = orig->writes;
958 }
959
960 static struct pipe_surface *
961 v3d_create_surface(struct pipe_context *pctx,
962 struct pipe_resource *ptex,
963 const struct pipe_surface *surf_tmpl)
964 {
965 struct v3d_context *v3d = v3d_context(pctx);
966 struct v3d_screen *screen = v3d->screen;
967 struct v3d_surface *surface = CALLOC_STRUCT(v3d_surface);
968 struct v3d_resource *rsc = v3d_resource(ptex);
969
970 if (!surface)
971 return NULL;
972
973 assert(surf_tmpl->u.tex.first_layer == surf_tmpl->u.tex.last_layer);
974
975 struct pipe_surface *psurf = &surface->base;
976 unsigned level = surf_tmpl->u.tex.level;
977 struct v3d_resource_slice *slice = &rsc->slices[level];
978
979 pipe_reference_init(&psurf->reference, 1);
980 pipe_resource_reference(&psurf->texture, ptex);
981
982 psurf->context = pctx;
983 psurf->format = surf_tmpl->format;
984 psurf->width = u_minify(ptex->width0, level);
985 psurf->height = u_minify(ptex->height0, level);
986 psurf->u.tex.level = level;
987 psurf->u.tex.first_layer = surf_tmpl->u.tex.first_layer;
988 psurf->u.tex.last_layer = surf_tmpl->u.tex.last_layer;
989
990 surface->offset = v3d_layer_offset(ptex, level,
991 psurf->u.tex.first_layer);
992 surface->tiling = slice->tiling;
993
994 surface->format = v3d_get_rt_format(&screen->devinfo, psurf->format);
995
996 const struct util_format_description *desc =
997 util_format_description(psurf->format);
998
999 surface->swap_rb = (desc->swizzle[0] == PIPE_SWIZZLE_Z &&
1000 psurf->format != PIPE_FORMAT_B5G6R5_UNORM);
1001
1002 if (util_format_is_depth_or_stencil(psurf->format)) {
1003 switch (psurf->format) {
1004 case PIPE_FORMAT_Z16_UNORM:
1005 surface->internal_type = V3D_INTERNAL_TYPE_DEPTH_16;
1006 break;
1007 case PIPE_FORMAT_Z32_FLOAT:
1008 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
1009 surface->internal_type = V3D_INTERNAL_TYPE_DEPTH_32F;
1010 break;
1011 default:
1012 surface->internal_type = V3D_INTERNAL_TYPE_DEPTH_24;
1013 }
1014 } else {
1015 uint32_t bpp, type;
1016 v3d_get_internal_type_bpp_for_output_format(&screen->devinfo,
1017 surface->format,
1018 &type, &bpp);
1019 surface->internal_type = type;
1020 surface->internal_bpp = bpp;
1021 }
1022
1023 if (surface->tiling == VC5_TILING_UIF_NO_XOR ||
1024 surface->tiling == VC5_TILING_UIF_XOR) {
1025 surface->padded_height_of_output_image_in_uif_blocks =
1026 (slice->padded_height /
1027 (2 * v3d_utile_height(rsc->cpp)));
1028 }
1029
1030 if (rsc->separate_stencil) {
1031 surface->separate_stencil =
1032 v3d_create_surface(pctx, &rsc->separate_stencil->base,
1033 surf_tmpl);
1034 }
1035
1036 return &surface->base;
1037 }
1038
1039 static void
1040 v3d_surface_destroy(struct pipe_context *pctx, struct pipe_surface *psurf)
1041 {
1042 struct v3d_surface *surf = v3d_surface(psurf);
1043
1044 if (surf->separate_stencil)
1045 pipe_surface_reference(&surf->separate_stencil, NULL);
1046
1047 pipe_resource_reference(&psurf->texture, NULL);
1048 FREE(psurf);
1049 }
1050
1051 static void
1052 v3d_flush_resource(struct pipe_context *pctx, struct pipe_resource *resource)
1053 {
1054 /* All calls to flush_resource are followed by a flush of the context,
1055 * so there's nothing to do.
1056 */
1057 }
1058
1059 static enum pipe_format
1060 v3d_resource_get_internal_format(struct pipe_resource *prsc)
1061 {
1062 return v3d_resource(prsc)->internal_format;
1063 }
1064
1065 static void
1066 v3d_resource_set_stencil(struct pipe_resource *prsc,
1067 struct pipe_resource *stencil)
1068 {
1069 v3d_resource(prsc)->separate_stencil = v3d_resource(stencil);
1070 }
1071
1072 static struct pipe_resource *
1073 v3d_resource_get_stencil(struct pipe_resource *prsc)
1074 {
1075 struct v3d_resource *rsc = v3d_resource(prsc);
1076
1077 return &rsc->separate_stencil->base;
1078 }
1079
1080 static const struct u_transfer_vtbl transfer_vtbl = {
1081 .resource_create = v3d_resource_create,
1082 .resource_destroy = v3d_resource_destroy,
1083 .transfer_map = v3d_resource_transfer_map,
1084 .transfer_unmap = v3d_resource_transfer_unmap,
1085 .transfer_flush_region = u_default_transfer_flush_region,
1086 .get_internal_format = v3d_resource_get_internal_format,
1087 .set_stencil = v3d_resource_set_stencil,
1088 .get_stencil = v3d_resource_get_stencil,
1089 };
1090
1091 void
1092 v3d_resource_screen_init(struct pipe_screen *pscreen)
1093 {
1094 pscreen->resource_create_with_modifiers =
1095 v3d_resource_create_with_modifiers;
1096 pscreen->resource_create = u_transfer_helper_resource_create;
1097 pscreen->resource_from_handle = v3d_resource_from_handle;
1098 pscreen->resource_get_handle = v3d_resource_get_handle;
1099 pscreen->resource_destroy = u_transfer_helper_resource_destroy;
1100 pscreen->transfer_helper = u_transfer_helper_create(&transfer_vtbl,
1101 true, false,
1102 true, true);
1103 }
1104
1105 void
1106 v3d_resource_context_init(struct pipe_context *pctx)
1107 {
1108 pctx->transfer_map = u_transfer_helper_transfer_map;
1109 pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
1110 pctx->transfer_unmap = u_transfer_helper_transfer_unmap;
1111 pctx->buffer_subdata = u_default_buffer_subdata;
1112 pctx->texture_subdata = v3d_texture_subdata;
1113 pctx->create_surface = v3d_create_surface;
1114 pctx->surface_destroy = v3d_surface_destroy;
1115 pctx->resource_copy_region = util_resource_copy_region;
1116 pctx->blit = v3d_blit;
1117 pctx->generate_mipmap = v3d_generate_mipmap;
1118 pctx->flush_resource = v3d_flush_resource;
1119 }