broadcom/vc5: Simplify separate stencil surface setup.
[mesa.git] / src / gallium / drivers / vc5 / vc5_resource.c
1 /*
2 * Copyright © 2014-2017 Broadcom
3 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "util/u_blit.h"
26 #include "util/u_memory.h"
27 #include "util/u_format.h"
28 #include "util/u_inlines.h"
29 #include "util/u_surface.h"
30 #include "util/u_transfer_helper.h"
31 #include "util/u_upload_mgr.h"
32 #include "util/u_format_zs.h"
33
34 #include "drm_fourcc.h"
35 #include "vc5_screen.h"
36 #include "vc5_context.h"
37 #include "vc5_resource.h"
38 #include "vc5_tiling.h"
39 #include "broadcom/cle/v3d_packet_v33_pack.h"
40
41 #ifndef DRM_FORMAT_MOD_INVALID
42 #define DRM_FORMAT_MOD_INVALID ((1ULL << 56) - 1)
43 #endif
44
45 static void
46 vc5_debug_resource_layout(struct vc5_resource *rsc, const char *caller)
47 {
48 if (!(V3D_DEBUG & V3D_DEBUG_SURFACE))
49 return;
50
51 struct pipe_resource *prsc = &rsc->base;
52
53 if (prsc->target == PIPE_BUFFER) {
54 fprintf(stderr,
55 "rsc %s %p (format %s), %dx%d buffer @0x%08x-0x%08x\n",
56 caller, rsc,
57 util_format_short_name(prsc->format),
58 prsc->width0, prsc->height0,
59 rsc->bo->offset,
60 rsc->bo->offset + rsc->bo->size - 1);
61 return;
62 }
63
64 static const char *const tiling_descriptions[] = {
65 [VC5_TILING_RASTER] = "R",
66 [VC5_TILING_LINEARTILE] = "LT",
67 [VC5_TILING_UBLINEAR_1_COLUMN] = "UB1",
68 [VC5_TILING_UBLINEAR_2_COLUMN] = "UB2",
69 [VC5_TILING_UIF_NO_XOR] = "UIF",
70 [VC5_TILING_UIF_XOR] = "UIF^",
71 };
72
73 for (int i = 0; i <= prsc->last_level; i++) {
74 struct vc5_resource_slice *slice = &rsc->slices[i];
75
76 int level_width = slice->stride / rsc->cpp;
77 int level_height = slice->size / slice->stride;
78
79 fprintf(stderr,
80 "rsc %s %p (format %s), %dx%d: "
81 "level %d (%s) %dx%d -> %dx%d, stride %d@0x%08x\n",
82 caller, rsc,
83 util_format_short_name(prsc->format),
84 prsc->width0, prsc->height0,
85 i, tiling_descriptions[slice->tiling],
86 u_minify(prsc->width0, i),
87 u_minify(prsc->height0, i),
88 level_width,
89 level_height,
90 slice->stride,
91 rsc->bo->offset + slice->offset);
92 }
93 }
94
95 static bool
96 vc5_resource_bo_alloc(struct vc5_resource *rsc)
97 {
98 struct pipe_resource *prsc = &rsc->base;
99 struct pipe_screen *pscreen = prsc->screen;
100 struct vc5_bo *bo;
101 int layers = (prsc->target == PIPE_TEXTURE_3D ?
102 prsc->depth0 : prsc->array_size);
103
104 bo = vc5_bo_alloc(vc5_screen(pscreen),
105 rsc->slices[0].offset +
106 rsc->slices[0].size +
107 rsc->cube_map_stride * layers - 1,
108 "resource");
109 if (bo) {
110 vc5_bo_unreference(&rsc->bo);
111 rsc->bo = bo;
112 vc5_debug_resource_layout(rsc, "alloc");
113 return true;
114 } else {
115 return false;
116 }
117 }
118
119 static void
120 vc5_resource_transfer_unmap(struct pipe_context *pctx,
121 struct pipe_transfer *ptrans)
122 {
123 struct vc5_context *vc5 = vc5_context(pctx);
124 struct vc5_transfer *trans = vc5_transfer(ptrans);
125
126 if (trans->map) {
127 struct vc5_resource *rsc = vc5_resource(ptrans->resource);
128 struct vc5_resource_slice *slice = &rsc->slices[ptrans->level];
129
130 if (ptrans->usage & PIPE_TRANSFER_WRITE) {
131 vc5_store_tiled_image(rsc->bo->map + slice->offset +
132 ptrans->box.z * rsc->cube_map_stride,
133 slice->stride,
134 trans->map, ptrans->stride,
135 slice->tiling, rsc->cpp,
136 u_minify(rsc->base.height0,
137 ptrans->level),
138 &ptrans->box);
139 }
140 free(trans->map);
141 }
142
143 pipe_resource_reference(&ptrans->resource, NULL);
144 slab_free(&vc5->transfer_pool, ptrans);
145 }
146
147 static void *
148 vc5_resource_transfer_map(struct pipe_context *pctx,
149 struct pipe_resource *prsc,
150 unsigned level, unsigned usage,
151 const struct pipe_box *box,
152 struct pipe_transfer **pptrans)
153 {
154 struct vc5_context *vc5 = vc5_context(pctx);
155 struct vc5_resource *rsc = vc5_resource(prsc);
156 struct vc5_transfer *trans;
157 struct pipe_transfer *ptrans;
158 enum pipe_format format = prsc->format;
159 char *buf;
160
161 /* MSAA maps should have been handled by u_transfer_helper. */
162 assert(prsc->nr_samples <= 1);
163
164 /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
165 * being mapped.
166 */
167 if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
168 !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
169 !(prsc->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT) &&
170 prsc->last_level == 0 &&
171 prsc->width0 == box->width &&
172 prsc->height0 == box->height &&
173 prsc->depth0 == box->depth &&
174 prsc->array_size == 1 &&
175 rsc->bo->private) {
176 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
177 }
178
179 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
180 if (vc5_resource_bo_alloc(rsc)) {
181 /* If it might be bound as one of our vertex buffers
182 * or UBOs, make sure we re-emit vertex buffer state
183 * or uniforms.
184 */
185 if (prsc->bind & PIPE_BIND_VERTEX_BUFFER)
186 vc5->dirty |= VC5_DIRTY_VTXBUF;
187 if (prsc->bind & PIPE_BIND_CONSTANT_BUFFER)
188 vc5->dirty |= VC5_DIRTY_CONSTBUF;
189 } else {
190 /* If we failed to reallocate, flush users so that we
191 * don't violate any syncing requirements.
192 */
193 vc5_flush_jobs_reading_resource(vc5, prsc);
194 }
195 } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
196 /* If we're writing and the buffer is being used by the CL, we
197 * have to flush the CL first. If we're only reading, we need
198 * to flush if the CL has written our buffer.
199 */
200 if (usage & PIPE_TRANSFER_WRITE)
201 vc5_flush_jobs_reading_resource(vc5, prsc);
202 else
203 vc5_flush_jobs_writing_resource(vc5, prsc);
204 }
205
206 if (usage & PIPE_TRANSFER_WRITE) {
207 rsc->writes++;
208 rsc->initialized_buffers = ~0;
209 }
210
211 trans = slab_alloc(&vc5->transfer_pool);
212 if (!trans)
213 return NULL;
214
215 /* XXX: Handle DONTBLOCK, DISCARD_RANGE, PERSISTENT, COHERENT. */
216
217 /* slab_alloc_st() doesn't zero: */
218 memset(trans, 0, sizeof(*trans));
219 ptrans = &trans->base;
220
221 pipe_resource_reference(&ptrans->resource, prsc);
222 ptrans->level = level;
223 ptrans->usage = usage;
224 ptrans->box = *box;
225
226 /* Note that the current kernel implementation is synchronous, so no
227 * need to do syncing stuff here yet.
228 */
229
230 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
231 buf = vc5_bo_map_unsynchronized(rsc->bo);
232 else
233 buf = vc5_bo_map(rsc->bo);
234 if (!buf) {
235 fprintf(stderr, "Failed to map bo\n");
236 goto fail;
237 }
238
239 *pptrans = ptrans;
240
241 /* Our load/store routines work on entire compressed blocks. */
242 ptrans->box.x /= util_format_get_blockwidth(format);
243 ptrans->box.y /= util_format_get_blockheight(format);
244 ptrans->box.width = DIV_ROUND_UP(ptrans->box.width,
245 util_format_get_blockwidth(format));
246 ptrans->box.height = DIV_ROUND_UP(ptrans->box.height,
247 util_format_get_blockheight(format));
248
249 struct vc5_resource_slice *slice = &rsc->slices[level];
250 if (rsc->tiled) {
251 /* No direct mappings of tiled, since we need to manually
252 * tile/untile.
253 */
254 if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
255 return NULL;
256
257 ptrans->stride = ptrans->box.width * rsc->cpp;
258 ptrans->layer_stride = ptrans->stride * ptrans->box.height;
259
260 trans->map = malloc(ptrans->layer_stride * ptrans->box.depth);
261
262 if (usage & PIPE_TRANSFER_READ) {
263 vc5_load_tiled_image(trans->map, ptrans->stride,
264 buf + slice->offset +
265 ptrans->box.z * rsc->cube_map_stride,
266 slice->stride,
267 slice->tiling, rsc->cpp,
268 rsc->base.height0,
269 &ptrans->box);
270 }
271 return trans->map;
272 } else {
273 ptrans->stride = slice->stride;
274 ptrans->layer_stride = ptrans->stride;
275
276 return buf + slice->offset +
277 ptrans->box.y * ptrans->stride +
278 ptrans->box.x * rsc->cpp +
279 ptrans->box.z * rsc->cube_map_stride;
280 }
281
282
283 fail:
284 vc5_resource_transfer_unmap(pctx, ptrans);
285 return NULL;
286 }
287
288 static void
289 vc5_resource_destroy(struct pipe_screen *pscreen,
290 struct pipe_resource *prsc)
291 {
292 struct vc5_resource *rsc = vc5_resource(prsc);
293
294 vc5_bo_unreference(&rsc->bo);
295 free(rsc);
296 }
297
298 static boolean
299 vc5_resource_get_handle(struct pipe_screen *pscreen,
300 struct pipe_context *pctx,
301 struct pipe_resource *prsc,
302 struct winsys_handle *whandle,
303 unsigned usage)
304 {
305 struct vc5_resource *rsc = vc5_resource(prsc);
306 struct vc5_bo *bo = rsc->bo;
307
308 whandle->stride = rsc->slices[0].stride;
309
310 /* If we're passing some reference to our BO out to some other part of
311 * the system, then we can't do any optimizations about only us being
312 * the ones seeing it (like BO caching).
313 */
314 bo->private = false;
315
316 switch (whandle->type) {
317 case DRM_API_HANDLE_TYPE_SHARED:
318 return vc5_bo_flink(bo, &whandle->handle);
319 case DRM_API_HANDLE_TYPE_KMS:
320 whandle->handle = bo->handle;
321 return TRUE;
322 case DRM_API_HANDLE_TYPE_FD:
323 whandle->handle = vc5_bo_get_dmabuf(bo);
324 return whandle->handle != -1;
325 }
326
327 return FALSE;
328 }
329
330 static void
331 vc5_setup_slices(struct vc5_resource *rsc)
332 {
333 struct pipe_resource *prsc = &rsc->base;
334 uint32_t width = prsc->width0;
335 uint32_t height = prsc->height0;
336 uint32_t pot_width = util_next_power_of_two(width);
337 uint32_t pot_height = util_next_power_of_two(height);
338 uint32_t offset = 0;
339 uint32_t utile_w = vc5_utile_width(rsc->cpp);
340 uint32_t utile_h = vc5_utile_height(rsc->cpp);
341 uint32_t uif_block_w = utile_w * 2;
342 uint32_t uif_block_h = utile_h * 2;
343 uint32_t block_width = util_format_get_blockwidth(prsc->format);
344 uint32_t block_height = util_format_get_blockheight(prsc->format);
345 bool msaa = prsc->nr_samples > 1;
346 /* MSAA textures/renderbuffers are always laid out as single-level
347 * UIF.
348 */
349 bool uif_top = msaa;
350
351 for (int i = prsc->last_level; i >= 0; i--) {
352 struct vc5_resource_slice *slice = &rsc->slices[i];
353
354 uint32_t level_width, level_height;
355 if (i < 2) {
356 level_width = u_minify(width, i);
357 level_height = u_minify(height, i);
358 } else {
359 level_width = u_minify(pot_width, i);
360 level_height = u_minify(pot_height, i);
361 }
362
363 if (msaa) {
364 level_width *= 2;
365 level_height *= 2;
366 }
367
368 level_width = DIV_ROUND_UP(level_width, block_width);
369 level_height = DIV_ROUND_UP(level_height, block_height);
370
371 if (!rsc->tiled) {
372 slice->tiling = VC5_TILING_RASTER;
373 if (prsc->target == PIPE_TEXTURE_1D)
374 level_width = align(level_width, 64 / rsc->cpp);
375 } else {
376 if ((i != 0 || !uif_top) &&
377 (level_width <= utile_w ||
378 level_height <= utile_h)) {
379 slice->tiling = VC5_TILING_LINEARTILE;
380 level_width = align(level_width, utile_w);
381 level_height = align(level_height, utile_h);
382 } else if ((i != 0 || !uif_top) &&
383 level_width <= uif_block_w) {
384 slice->tiling = VC5_TILING_UBLINEAR_1_COLUMN;
385 level_width = align(level_width, uif_block_w);
386 level_height = align(level_height, uif_block_h);
387 } else if ((i != 0 || !uif_top) &&
388 level_width <= 2 * uif_block_w) {
389 slice->tiling = VC5_TILING_UBLINEAR_2_COLUMN;
390 level_width = align(level_width, 2 * uif_block_w);
391 level_height = align(level_height, uif_block_h);
392 } else {
393 slice->tiling = VC5_TILING_UIF_NO_XOR;
394
395 /* We align the width to a 4-block column of
396 * UIF blocks, but we only align height to UIF
397 * blocks.
398 */
399 level_width = align(level_width,
400 4 * uif_block_w);
401 level_height = align(level_height,
402 uif_block_h);
403 }
404 }
405
406 slice->offset = offset;
407 slice->stride = level_width * rsc->cpp;
408 slice->size = level_height * slice->stride;
409
410 offset += slice->size;
411 }
412
413 /* UIF/UBLINEAR levels need to be aligned to UIF-blocks, and LT only
414 * needs to be aligned to utile boundaries. Since tiles are laid out
415 * from small to big in memory, we need to align the later UIF slices
416 * to UIF blocks, if they were preceded by non-UIF-block-aligned LT
417 * slices.
418 *
419 * We additionally align to 4k, which improves UIF XOR performance.
420 */
421 uint32_t page_align_offset = (align(rsc->slices[0].offset, 4096) -
422 rsc->slices[0].offset);
423 if (page_align_offset) {
424 for (int i = 0; i <= prsc->last_level; i++)
425 rsc->slices[i].offset += page_align_offset;
426 }
427
428 /* Arrays, cubes, and 3D textures have a stride which is the distance
429 * from one full mipmap tree to the next (64b aligned).
430 */
431 rsc->cube_map_stride = align(rsc->slices[0].offset +
432 rsc->slices[0].size, 64);
433 }
434
435 static struct vc5_resource *
436 vc5_resource_setup(struct pipe_screen *pscreen,
437 const struct pipe_resource *tmpl)
438 {
439 struct vc5_screen *screen = vc5_screen(pscreen);
440 struct vc5_resource *rsc = CALLOC_STRUCT(vc5_resource);
441 if (!rsc)
442 return NULL;
443 struct pipe_resource *prsc = &rsc->base;
444
445 *prsc = *tmpl;
446
447 pipe_reference_init(&prsc->reference, 1);
448 prsc->screen = pscreen;
449
450 if (prsc->nr_samples <= 1) {
451 rsc->cpp = util_format_get_blocksize(prsc->format);
452 } else {
453 assert(vc5_rt_format_supported(&screen->devinfo, prsc->format));
454 uint32_t output_image_format =
455 vc5_get_rt_format(&screen->devinfo, prsc->format);
456 uint32_t internal_type;
457 uint32_t internal_bpp;
458 vc5_get_internal_type_bpp_for_output_format(&screen->devinfo,
459 output_image_format,
460 &internal_type,
461 &internal_bpp);
462 switch (internal_bpp) {
463 case V3D_INTERNAL_BPP_32:
464 rsc->cpp = 4;
465 break;
466 case V3D_INTERNAL_BPP_64:
467 rsc->cpp = 8;
468 break;
469 case V3D_INTERNAL_BPP_128:
470 rsc->cpp = 16;
471 break;
472 }
473 }
474
475 assert(rsc->cpp);
476
477 return rsc;
478 }
479
480 static bool
481 find_modifier(uint64_t needle, const uint64_t *haystack, int count)
482 {
483 int i;
484
485 for (i = 0; i < count; i++) {
486 if (haystack[i] == needle)
487 return true;
488 }
489
490 return false;
491 }
492
493 static struct pipe_resource *
494 vc5_resource_create_with_modifiers(struct pipe_screen *pscreen,
495 const struct pipe_resource *tmpl,
496 const uint64_t *modifiers,
497 int count)
498 {
499 bool linear_ok = find_modifier(DRM_FORMAT_MOD_LINEAR, modifiers, count);
500 struct vc5_resource *rsc = vc5_resource_setup(pscreen, tmpl);
501 struct pipe_resource *prsc = &rsc->base;
502 /* Use a tiled layout if we can, for better 3D performance. */
503 bool should_tile = true;
504
505 /* VBOs/PBOs are untiled (and 1 height). */
506 if (tmpl->target == PIPE_BUFFER)
507 should_tile = false;
508
509 /* Cursors are always linear, and the user can request linear as well.
510 */
511 if (tmpl->bind & (PIPE_BIND_LINEAR | PIPE_BIND_CURSOR))
512 should_tile = false;
513
514 /* 1D and 1D_ARRAY textures are always raster-order. */
515 if (tmpl->target == PIPE_TEXTURE_1D ||
516 tmpl->target == PIPE_TEXTURE_1D_ARRAY)
517 should_tile = false;
518
519 /* Scanout BOs for simulator need to be linear for interaction with
520 * i965.
521 */
522 if (using_vc5_simulator &&
523 tmpl->bind & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT))
524 should_tile = false;
525
526 /* No user-specified modifier; determine our own. */
527 if (count == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID) {
528 linear_ok = true;
529 rsc->tiled = should_tile;
530 } else if (should_tile &&
531 find_modifier(DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
532 modifiers, count)) {
533 rsc->tiled = true;
534 } else if (linear_ok) {
535 rsc->tiled = false;
536 } else {
537 fprintf(stderr, "Unsupported modifier requested\n");
538 return NULL;
539 }
540
541 rsc->internal_format = prsc->format;
542
543 vc5_setup_slices(rsc);
544 if (!vc5_resource_bo_alloc(rsc))
545 goto fail;
546
547 return prsc;
548 fail:
549 vc5_resource_destroy(pscreen, prsc);
550 return NULL;
551 }
552
553 struct pipe_resource *
554 vc5_resource_create(struct pipe_screen *pscreen,
555 const struct pipe_resource *tmpl)
556 {
557 const uint64_t mod = DRM_FORMAT_MOD_INVALID;
558 return vc5_resource_create_with_modifiers(pscreen, tmpl, &mod, 1);
559 }
560
561 static struct pipe_resource *
562 vc5_resource_from_handle(struct pipe_screen *pscreen,
563 const struct pipe_resource *tmpl,
564 struct winsys_handle *whandle,
565 unsigned usage)
566 {
567 struct vc5_screen *screen = vc5_screen(pscreen);
568 struct vc5_resource *rsc = vc5_resource_setup(pscreen, tmpl);
569 struct pipe_resource *prsc = &rsc->base;
570 struct vc5_resource_slice *slice = &rsc->slices[0];
571
572 if (!rsc)
573 return NULL;
574
575 switch (whandle->modifier) {
576 case DRM_FORMAT_MOD_LINEAR:
577 rsc->tiled = false;
578 break;
579 /* XXX: UIF */
580 default:
581 fprintf(stderr,
582 "Attempt to import unsupported modifier 0x%llx\n",
583 (long long)whandle->modifier);
584 goto fail;
585 }
586
587 if (whandle->offset != 0) {
588 fprintf(stderr,
589 "Attempt to import unsupported winsys offset %u\n",
590 whandle->offset);
591 goto fail;
592 }
593
594 switch (whandle->type) {
595 case DRM_API_HANDLE_TYPE_SHARED:
596 rsc->bo = vc5_bo_open_name(screen,
597 whandle->handle, whandle->stride);
598 break;
599 case DRM_API_HANDLE_TYPE_FD:
600 rsc->bo = vc5_bo_open_dmabuf(screen,
601 whandle->handle, whandle->stride);
602 break;
603 default:
604 fprintf(stderr,
605 "Attempt to import unsupported handle type %d\n",
606 whandle->type);
607 goto fail;
608 }
609
610 if (!rsc->bo)
611 goto fail;
612
613 vc5_setup_slices(rsc);
614 vc5_debug_resource_layout(rsc, "import");
615
616 if (whandle->stride != slice->stride) {
617 static bool warned = false;
618 if (!warned) {
619 warned = true;
620 fprintf(stderr,
621 "Attempting to import %dx%d %s with "
622 "unsupported stride %d instead of %d\n",
623 prsc->width0, prsc->height0,
624 util_format_short_name(prsc->format),
625 whandle->stride,
626 slice->stride);
627 }
628 goto fail;
629 }
630
631 return prsc;
632
633 fail:
634 vc5_resource_destroy(pscreen, prsc);
635 return NULL;
636 }
637
638 static struct pipe_surface *
639 vc5_create_surface(struct pipe_context *pctx,
640 struct pipe_resource *ptex,
641 const struct pipe_surface *surf_tmpl)
642 {
643 struct vc5_context *vc5 = vc5_context(pctx);
644 struct vc5_screen *screen = vc5->screen;
645 struct vc5_surface *surface = CALLOC_STRUCT(vc5_surface);
646 struct vc5_resource *rsc = vc5_resource(ptex);
647
648 if (!surface)
649 return NULL;
650
651 assert(surf_tmpl->u.tex.first_layer == surf_tmpl->u.tex.last_layer);
652
653 struct pipe_surface *psurf = &surface->base;
654 unsigned level = surf_tmpl->u.tex.level;
655 struct vc5_resource_slice *slice = &rsc->slices[level];
656
657 pipe_reference_init(&psurf->reference, 1);
658 pipe_resource_reference(&psurf->texture, ptex);
659
660 psurf->context = pctx;
661 psurf->format = surf_tmpl->format;
662 psurf->width = u_minify(ptex->width0, level);
663 psurf->height = u_minify(ptex->height0, level);
664 psurf->u.tex.level = level;
665 psurf->u.tex.first_layer = surf_tmpl->u.tex.first_layer;
666 psurf->u.tex.last_layer = surf_tmpl->u.tex.last_layer;
667
668 surface->offset = (slice->offset +
669 psurf->u.tex.first_layer * rsc->cube_map_stride);
670 surface->tiling = slice->tiling;
671
672 surface->format = vc5_get_rt_format(&screen->devinfo, psurf->format);
673
674 if (util_format_is_depth_or_stencil(psurf->format)) {
675 switch (psurf->format) {
676 case PIPE_FORMAT_Z16_UNORM:
677 surface->internal_type = V3D_INTERNAL_TYPE_DEPTH_16;
678 break;
679 case PIPE_FORMAT_Z32_FLOAT:
680 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
681 surface->internal_type = V3D_INTERNAL_TYPE_DEPTH_32F;
682 break;
683 default:
684 surface->internal_type = V3D_INTERNAL_TYPE_DEPTH_24;
685 }
686 } else {
687 uint32_t bpp, type;
688 vc5_get_internal_type_bpp_for_output_format(&screen->devinfo,
689 surface->format,
690 &type, &bpp);
691 surface->internal_type = type;
692 surface->internal_bpp = bpp;
693 }
694
695 if (surface->tiling == VC5_TILING_UIF_NO_XOR ||
696 surface->tiling == VC5_TILING_UIF_XOR) {
697 surface->padded_height_of_output_image_in_uif_blocks =
698 ((slice->size / slice->stride) /
699 (2 * vc5_utile_height(rsc->cpp)));
700 }
701
702 if (rsc->separate_stencil) {
703 surface->separate_stencil =
704 vc5_create_surface(pctx, &rsc->separate_stencil->base,
705 surf_tmpl);
706 }
707
708 return &surface->base;
709 }
710
711 static void
712 vc5_surface_destroy(struct pipe_context *pctx, struct pipe_surface *psurf)
713 {
714 struct vc5_surface *surf = vc5_surface(psurf);
715
716 if (surf->separate_stencil)
717 pipe_surface_reference(&surf->separate_stencil, NULL);
718
719 pipe_resource_reference(&psurf->texture, NULL);
720 FREE(psurf);
721 }
722
723 static void
724 vc5_flush_resource(struct pipe_context *pctx, struct pipe_resource *resource)
725 {
726 /* All calls to flush_resource are followed by a flush of the context,
727 * so there's nothing to do.
728 */
729 }
730
731 static enum pipe_format
732 vc5_resource_get_internal_format(struct pipe_resource *prsc)
733 {
734 return vc5_resource(prsc)->internal_format;
735 }
736
737 static void
738 vc5_resource_set_stencil(struct pipe_resource *prsc,
739 struct pipe_resource *stencil)
740 {
741 vc5_resource(prsc)->separate_stencil = vc5_resource(stencil);
742 }
743
744 static struct pipe_resource *
745 vc5_resource_get_stencil(struct pipe_resource *prsc)
746 {
747 struct vc5_resource *rsc = vc5_resource(prsc);
748
749 return &rsc->separate_stencil->base;
750 }
751
752 static const struct u_transfer_vtbl transfer_vtbl = {
753 .resource_create = vc5_resource_create,
754 .resource_destroy = vc5_resource_destroy,
755 .transfer_map = vc5_resource_transfer_map,
756 .transfer_unmap = vc5_resource_transfer_unmap,
757 .transfer_flush_region = u_default_transfer_flush_region,
758 .get_internal_format = vc5_resource_get_internal_format,
759 .set_stencil = vc5_resource_set_stencil,
760 .get_stencil = vc5_resource_get_stencil,
761 };
762
763 void
764 vc5_resource_screen_init(struct pipe_screen *pscreen)
765 {
766 pscreen->resource_create_with_modifiers =
767 vc5_resource_create_with_modifiers;
768 pscreen->resource_create = u_transfer_helper_resource_create;
769 pscreen->resource_from_handle = vc5_resource_from_handle;
770 pscreen->resource_get_handle = vc5_resource_get_handle;
771 pscreen->resource_destroy = u_transfer_helper_resource_destroy;
772 pscreen->transfer_helper = u_transfer_helper_create(&transfer_vtbl,
773 true, true, true);
774 }
775
776 void
777 vc5_resource_context_init(struct pipe_context *pctx)
778 {
779 pctx->transfer_map = u_transfer_helper_transfer_map;
780 pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
781 pctx->transfer_unmap = u_transfer_helper_transfer_unmap;
782 pctx->buffer_subdata = u_default_buffer_subdata;
783 pctx->texture_subdata = u_default_texture_subdata;
784 pctx->create_surface = vc5_create_surface;
785 pctx->surface_destroy = vc5_surface_destroy;
786 pctx->resource_copy_region = util_resource_copy_region;
787 pctx->blit = vc5_blit;
788 pctx->flush_resource = vc5_flush_resource;
789 }