broadcom/vc5: Introduce enums for internal depth/type, with V3D prefixes.
[mesa.git] / src / gallium / drivers / vc5 / vc5_resource.c
1 /*
2 * Copyright © 2014-2017 Broadcom
3 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "util/u_blit.h"
26 #include "util/u_memory.h"
27 #include "util/u_format.h"
28 #include "util/u_inlines.h"
29 #include "util/u_surface.h"
30 #include "util/u_transfer_helper.h"
31 #include "util/u_upload_mgr.h"
32 #include "util/u_format_zs.h"
33
34 #include "drm_fourcc.h"
35 #include "vc5_screen.h"
36 #include "vc5_context.h"
37 #include "vc5_resource.h"
38 #include "vc5_tiling.h"
39 #include "broadcom/cle/v3d_packet_v33_pack.h"
40
41 #ifndef DRM_FORMAT_MOD_INVALID
42 #define DRM_FORMAT_MOD_INVALID ((1ULL << 56) - 1)
43 #endif
44
45 static void
46 vc5_debug_resource_layout(struct vc5_resource *rsc, const char *caller)
47 {
48 if (!(V3D_DEBUG & V3D_DEBUG_SURFACE))
49 return;
50
51 struct pipe_resource *prsc = &rsc->base;
52
53 if (prsc->target == PIPE_BUFFER) {
54 fprintf(stderr,
55 "rsc %s %p (format %s), %dx%d buffer @0x%08x-0x%08x\n",
56 caller, rsc,
57 util_format_short_name(prsc->format),
58 prsc->width0, prsc->height0,
59 rsc->bo->offset,
60 rsc->bo->offset + rsc->bo->size - 1);
61 return;
62 }
63
64 static const char *const tiling_descriptions[] = {
65 [VC5_TILING_RASTER] = "R",
66 [VC5_TILING_LINEARTILE] = "LT",
67 [VC5_TILING_UBLINEAR_1_COLUMN] = "UB1",
68 [VC5_TILING_UBLINEAR_2_COLUMN] = "UB2",
69 [VC5_TILING_UIF_NO_XOR] = "UIF",
70 [VC5_TILING_UIF_XOR] = "UIF^",
71 };
72
73 for (int i = 0; i <= prsc->last_level; i++) {
74 struct vc5_resource_slice *slice = &rsc->slices[i];
75
76 int level_width = slice->stride / rsc->cpp;
77 int level_height = slice->size / slice->stride;
78
79 fprintf(stderr,
80 "rsc %s %p (format %s), %dx%d: "
81 "level %d (%s) %dx%d -> %dx%d, stride %d@0x%08x\n",
82 caller, rsc,
83 util_format_short_name(prsc->format),
84 prsc->width0, prsc->height0,
85 i, tiling_descriptions[slice->tiling],
86 u_minify(prsc->width0, i),
87 u_minify(prsc->height0, i),
88 level_width,
89 level_height,
90 slice->stride,
91 rsc->bo->offset + slice->offset);
92 }
93 }
94
95 static bool
96 vc5_resource_bo_alloc(struct vc5_resource *rsc)
97 {
98 struct pipe_resource *prsc = &rsc->base;
99 struct pipe_screen *pscreen = prsc->screen;
100 struct vc5_bo *bo;
101 int layers = (prsc->target == PIPE_TEXTURE_3D ?
102 prsc->depth0 : prsc->array_size);
103
104 bo = vc5_bo_alloc(vc5_screen(pscreen),
105 rsc->slices[0].offset +
106 rsc->slices[0].size +
107 rsc->cube_map_stride * layers - 1,
108 "resource");
109 if (bo) {
110 vc5_bo_unreference(&rsc->bo);
111 rsc->bo = bo;
112 vc5_debug_resource_layout(rsc, "alloc");
113 return true;
114 } else {
115 return false;
116 }
117 }
118
119 static void
120 vc5_resource_transfer_unmap(struct pipe_context *pctx,
121 struct pipe_transfer *ptrans)
122 {
123 struct vc5_context *vc5 = vc5_context(pctx);
124 struct vc5_transfer *trans = vc5_transfer(ptrans);
125
126 if (trans->map) {
127 struct vc5_resource *rsc = vc5_resource(ptrans->resource);
128 struct vc5_resource_slice *slice = &rsc->slices[ptrans->level];
129
130 if (ptrans->usage & PIPE_TRANSFER_WRITE) {
131 vc5_store_tiled_image(rsc->bo->map + slice->offset +
132 ptrans->box.z * rsc->cube_map_stride,
133 slice->stride,
134 trans->map, ptrans->stride,
135 slice->tiling, rsc->cpp,
136 rsc->base.height0,
137 &ptrans->box);
138 }
139 free(trans->map);
140 }
141
142 pipe_resource_reference(&ptrans->resource, NULL);
143 slab_free(&vc5->transfer_pool, ptrans);
144 }
145
146 static void *
147 vc5_resource_transfer_map(struct pipe_context *pctx,
148 struct pipe_resource *prsc,
149 unsigned level, unsigned usage,
150 const struct pipe_box *box,
151 struct pipe_transfer **pptrans)
152 {
153 struct vc5_context *vc5 = vc5_context(pctx);
154 struct vc5_resource *rsc = vc5_resource(prsc);
155 struct vc5_transfer *trans;
156 struct pipe_transfer *ptrans;
157 enum pipe_format format = prsc->format;
158 char *buf;
159
160 /* MSAA maps should have been handled by u_transfer_helper. */
161 assert(prsc->nr_samples <= 1);
162
163 /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
164 * being mapped.
165 */
166 if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
167 !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
168 !(prsc->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT) &&
169 prsc->last_level == 0 &&
170 prsc->width0 == box->width &&
171 prsc->height0 == box->height &&
172 prsc->depth0 == box->depth &&
173 prsc->array_size == 1 &&
174 rsc->bo->private) {
175 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
176 }
177
178 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
179 if (vc5_resource_bo_alloc(rsc)) {
180 /* If it might be bound as one of our vertex buffers
181 * or UBOs, make sure we re-emit vertex buffer state
182 * or uniforms.
183 */
184 if (prsc->bind & PIPE_BIND_VERTEX_BUFFER)
185 vc5->dirty |= VC5_DIRTY_VTXBUF;
186 if (prsc->bind & PIPE_BIND_CONSTANT_BUFFER)
187 vc5->dirty |= VC5_DIRTY_CONSTBUF;
188 } else {
189 /* If we failed to reallocate, flush users so that we
190 * don't violate any syncing requirements.
191 */
192 vc5_flush_jobs_reading_resource(vc5, prsc);
193 }
194 } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
195 /* If we're writing and the buffer is being used by the CL, we
196 * have to flush the CL first. If we're only reading, we need
197 * to flush if the CL has written our buffer.
198 */
199 if (usage & PIPE_TRANSFER_WRITE)
200 vc5_flush_jobs_reading_resource(vc5, prsc);
201 else
202 vc5_flush_jobs_writing_resource(vc5, prsc);
203 }
204
205 if (usage & PIPE_TRANSFER_WRITE) {
206 rsc->writes++;
207 rsc->initialized_buffers = ~0;
208 }
209
210 trans = slab_alloc(&vc5->transfer_pool);
211 if (!trans)
212 return NULL;
213
214 /* XXX: Handle DONTBLOCK, DISCARD_RANGE, PERSISTENT, COHERENT. */
215
216 /* slab_alloc_st() doesn't zero: */
217 memset(trans, 0, sizeof(*trans));
218 ptrans = &trans->base;
219
220 pipe_resource_reference(&ptrans->resource, prsc);
221 ptrans->level = level;
222 ptrans->usage = usage;
223 ptrans->box = *box;
224
225 /* Note that the current kernel implementation is synchronous, so no
226 * need to do syncing stuff here yet.
227 */
228
229 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
230 buf = vc5_bo_map_unsynchronized(rsc->bo);
231 else
232 buf = vc5_bo_map(rsc->bo);
233 if (!buf) {
234 fprintf(stderr, "Failed to map bo\n");
235 goto fail;
236 }
237
238 *pptrans = ptrans;
239
240 struct vc5_resource_slice *slice = &rsc->slices[level];
241 if (rsc->tiled) {
242 /* No direct mappings of tiled, since we need to manually
243 * tile/untile.
244 */
245 if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
246 return NULL;
247
248 ptrans->stride = ptrans->box.width * rsc->cpp;
249 ptrans->layer_stride = ptrans->stride * ptrans->box.height;
250
251 trans->map = malloc(ptrans->layer_stride * ptrans->box.depth);
252
253 if (usage & PIPE_TRANSFER_READ) {
254 vc5_load_tiled_image(trans->map, ptrans->stride,
255 buf + slice->offset +
256 ptrans->box.z * rsc->cube_map_stride,
257 slice->stride,
258 slice->tiling, rsc->cpp,
259 rsc->base.height0,
260 &ptrans->box);
261 }
262 return trans->map;
263 } else {
264 ptrans->stride = slice->stride;
265 ptrans->layer_stride = ptrans->stride;
266
267 return buf + slice->offset +
268 ptrans->box.y / util_format_get_blockheight(format) * ptrans->stride +
269 ptrans->box.x / util_format_get_blockwidth(format) * rsc->cpp +
270 ptrans->box.z * rsc->cube_map_stride;
271 }
272
273
274 fail:
275 vc5_resource_transfer_unmap(pctx, ptrans);
276 return NULL;
277 }
278
279 static void
280 vc5_resource_destroy(struct pipe_screen *pscreen,
281 struct pipe_resource *prsc)
282 {
283 struct vc5_resource *rsc = vc5_resource(prsc);
284
285 vc5_bo_unreference(&rsc->bo);
286 free(rsc);
287 }
288
289 static boolean
290 vc5_resource_get_handle(struct pipe_screen *pscreen,
291 struct pipe_context *pctx,
292 struct pipe_resource *prsc,
293 struct winsys_handle *whandle,
294 unsigned usage)
295 {
296 struct vc5_resource *rsc = vc5_resource(prsc);
297 struct vc5_bo *bo = rsc->bo;
298
299 whandle->stride = rsc->slices[0].stride;
300
301 /* If we're passing some reference to our BO out to some other part of
302 * the system, then we can't do any optimizations about only us being
303 * the ones seeing it (like BO caching).
304 */
305 bo->private = false;
306
307 switch (whandle->type) {
308 case DRM_API_HANDLE_TYPE_SHARED:
309 return vc5_bo_flink(bo, &whandle->handle);
310 case DRM_API_HANDLE_TYPE_KMS:
311 whandle->handle = bo->handle;
312 return TRUE;
313 case DRM_API_HANDLE_TYPE_FD:
314 whandle->handle = vc5_bo_get_dmabuf(bo);
315 return whandle->handle != -1;
316 }
317
318 return FALSE;
319 }
320
321 static void
322 vc5_setup_slices(struct vc5_resource *rsc)
323 {
324 struct pipe_resource *prsc = &rsc->base;
325 uint32_t width = prsc->width0;
326 uint32_t height = prsc->height0;
327 uint32_t pot_width = util_next_power_of_two(width);
328 uint32_t pot_height = util_next_power_of_two(height);
329 uint32_t offset = 0;
330 uint32_t utile_w = vc5_utile_width(rsc->cpp);
331 uint32_t utile_h = vc5_utile_height(rsc->cpp);
332 uint32_t uif_block_w = utile_w * 2;
333 uint32_t uif_block_h = utile_h * 2;
334 bool msaa = prsc->nr_samples > 1;
335 /* MSAA textures/renderbuffers are always laid out as single-level
336 * UIF.
337 */
338 bool uif_top = msaa;
339
340 for (int i = prsc->last_level; i >= 0; i--) {
341 struct vc5_resource_slice *slice = &rsc->slices[i];
342
343 uint32_t level_width, level_height;
344 if (i < 2) {
345 level_width = u_minify(width, i);
346 level_height = u_minify(height, i);
347 } else {
348 level_width = u_minify(pot_width, i);
349 level_height = u_minify(pot_height, i);
350 }
351
352 if (msaa) {
353 level_width *= 2;
354 level_height *= 2;
355 }
356
357 if (!rsc->tiled) {
358 slice->tiling = VC5_TILING_RASTER;
359 if (prsc->target == PIPE_TEXTURE_1D)
360 level_width = align(level_width, 64 / rsc->cpp);
361 } else {
362 if ((i != 0 || !uif_top) &&
363 (level_width <= utile_w ||
364 level_height <= utile_h)) {
365 slice->tiling = VC5_TILING_LINEARTILE;
366 level_width = align(level_width, utile_w);
367 level_height = align(level_height, utile_h);
368 } else if ((i != 0 || !uif_top) &&
369 level_width <= uif_block_w) {
370 slice->tiling = VC5_TILING_UBLINEAR_1_COLUMN;
371 level_width = align(level_width, uif_block_w);
372 level_height = align(level_height, uif_block_h);
373 } else if ((i != 0 || !uif_top) &&
374 level_width <= 2 * uif_block_w) {
375 slice->tiling = VC5_TILING_UBLINEAR_2_COLUMN;
376 level_width = align(level_width, 2 * uif_block_w);
377 level_height = align(level_height, uif_block_h);
378 } else {
379 slice->tiling = VC5_TILING_UIF_NO_XOR;
380
381 /* We align the width to a 4-block column of
382 * UIF blocks, but we only align height to UIF
383 * blocks.
384 */
385 level_width = align(level_width,
386 4 * uif_block_w);
387 level_height = align(level_height,
388 uif_block_h);
389 }
390 }
391
392 slice->offset = offset;
393 slice->stride = level_width * rsc->cpp;
394 slice->size = level_height * slice->stride;
395
396 offset += slice->size;
397 }
398
399 /* UIF/UBLINEAR levels need to be aligned to UIF-blocks, and LT only
400 * needs to be aligned to utile boundaries. Since tiles are laid out
401 * from small to big in memory, we need to align the later UIF slices
402 * to UIF blocks, if they were preceded by non-UIF-block-aligned LT
403 * slices.
404 *
405 * We additionally align to 4k, which improves UIF XOR performance.
406 */
407 uint32_t page_align_offset = (align(rsc->slices[0].offset, 4096) -
408 rsc->slices[0].offset);
409 if (page_align_offset) {
410 for (int i = 0; i <= prsc->last_level; i++)
411 rsc->slices[i].offset += page_align_offset;
412 }
413
414 /* Arrays, cubes, and 3D textures have a stride which is the distance
415 * from one full mipmap tree to the next (64b aligned).
416 */
417 rsc->cube_map_stride = align(rsc->slices[0].offset +
418 rsc->slices[0].size, 64);
419 }
420
421 static struct vc5_resource *
422 vc5_resource_setup(struct pipe_screen *pscreen,
423 const struct pipe_resource *tmpl)
424 {
425 struct vc5_resource *rsc = CALLOC_STRUCT(vc5_resource);
426 if (!rsc)
427 return NULL;
428 struct pipe_resource *prsc = &rsc->base;
429
430 *prsc = *tmpl;
431
432 pipe_reference_init(&prsc->reference, 1);
433 prsc->screen = pscreen;
434
435 if (prsc->nr_samples <= 1) {
436 rsc->cpp = util_format_get_blocksize(prsc->format);
437 } else {
438 assert(vc5_rt_format_supported(prsc->format));
439 uint32_t output_image_format = vc5_get_rt_format(prsc->format);
440 uint32_t internal_type;
441 uint32_t internal_bpp;
442 vc5_get_internal_type_bpp_for_output_format(output_image_format,
443 &internal_type,
444 &internal_bpp);
445 switch (internal_bpp) {
446 case V3D_INTERNAL_BPP_32:
447 rsc->cpp = 4;
448 break;
449 case V3D_INTERNAL_BPP_64:
450 rsc->cpp = 8;
451 break;
452 case V3D_INTERNAL_BPP_128:
453 rsc->cpp = 16;
454 break;
455 }
456 }
457
458 assert(rsc->cpp);
459
460 return rsc;
461 }
462
463 static bool
464 find_modifier(uint64_t needle, const uint64_t *haystack, int count)
465 {
466 int i;
467
468 for (i = 0; i < count; i++) {
469 if (haystack[i] == needle)
470 return true;
471 }
472
473 return false;
474 }
475
476 static struct pipe_resource *
477 vc5_resource_create_with_modifiers(struct pipe_screen *pscreen,
478 const struct pipe_resource *tmpl,
479 const uint64_t *modifiers,
480 int count)
481 {
482 bool linear_ok = find_modifier(DRM_FORMAT_MOD_LINEAR, modifiers, count);
483 struct vc5_resource *rsc = vc5_resource_setup(pscreen, tmpl);
484 struct pipe_resource *prsc = &rsc->base;
485 /* Use a tiled layout if we can, for better 3D performance. */
486 bool should_tile = true;
487
488 /* VBOs/PBOs are untiled (and 1 height). */
489 if (tmpl->target == PIPE_BUFFER)
490 should_tile = false;
491
492 /* Cursors are always linear, and the user can request linear as well.
493 */
494 if (tmpl->bind & (PIPE_BIND_LINEAR | PIPE_BIND_CURSOR))
495 should_tile = false;
496
497 /* 1D and 1D_ARRAY textures are always raster-order. */
498 if (tmpl->target == PIPE_TEXTURE_1D ||
499 tmpl->target == PIPE_TEXTURE_1D_ARRAY)
500 should_tile = false;
501
502 /* Scanout BOs for simulator need to be linear for interaction with
503 * i965.
504 */
505 if (using_vc5_simulator &&
506 tmpl->bind & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT))
507 should_tile = false;
508
509 /* No user-specified modifier; determine our own. */
510 if (count == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID) {
511 linear_ok = true;
512 rsc->tiled = should_tile;
513 } else if (should_tile &&
514 find_modifier(DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
515 modifiers, count)) {
516 rsc->tiled = true;
517 } else if (linear_ok) {
518 rsc->tiled = false;
519 } else {
520 fprintf(stderr, "Unsupported modifier requested\n");
521 return NULL;
522 }
523
524 rsc->internal_format = prsc->format;
525
526 vc5_setup_slices(rsc);
527 if (!vc5_resource_bo_alloc(rsc))
528 goto fail;
529
530 return prsc;
531 fail:
532 vc5_resource_destroy(pscreen, prsc);
533 return NULL;
534 }
535
536 struct pipe_resource *
537 vc5_resource_create(struct pipe_screen *pscreen,
538 const struct pipe_resource *tmpl)
539 {
540 const uint64_t mod = DRM_FORMAT_MOD_INVALID;
541 return vc5_resource_create_with_modifiers(pscreen, tmpl, &mod, 1);
542 }
543
544 static struct pipe_resource *
545 vc5_resource_from_handle(struct pipe_screen *pscreen,
546 const struct pipe_resource *tmpl,
547 struct winsys_handle *whandle,
548 unsigned usage)
549 {
550 struct vc5_screen *screen = vc5_screen(pscreen);
551 struct vc5_resource *rsc = vc5_resource_setup(pscreen, tmpl);
552 struct pipe_resource *prsc = &rsc->base;
553 struct vc5_resource_slice *slice = &rsc->slices[0];
554
555 if (!rsc)
556 return NULL;
557
558 switch (whandle->modifier) {
559 case DRM_FORMAT_MOD_LINEAR:
560 rsc->tiled = false;
561 break;
562 /* XXX: UIF */
563 default:
564 fprintf(stderr,
565 "Attempt to import unsupported modifier 0x%llx\n",
566 (long long)whandle->modifier);
567 goto fail;
568 }
569
570 if (whandle->offset != 0) {
571 fprintf(stderr,
572 "Attempt to import unsupported winsys offset %u\n",
573 whandle->offset);
574 goto fail;
575 }
576
577 switch (whandle->type) {
578 case DRM_API_HANDLE_TYPE_SHARED:
579 rsc->bo = vc5_bo_open_name(screen,
580 whandle->handle, whandle->stride);
581 break;
582 case DRM_API_HANDLE_TYPE_FD:
583 rsc->bo = vc5_bo_open_dmabuf(screen,
584 whandle->handle, whandle->stride);
585 break;
586 default:
587 fprintf(stderr,
588 "Attempt to import unsupported handle type %d\n",
589 whandle->type);
590 goto fail;
591 }
592
593 if (!rsc->bo)
594 goto fail;
595
596 vc5_setup_slices(rsc);
597 vc5_debug_resource_layout(rsc, "import");
598
599 if (whandle->stride != slice->stride) {
600 static bool warned = false;
601 if (!warned) {
602 warned = true;
603 fprintf(stderr,
604 "Attempting to import %dx%d %s with "
605 "unsupported stride %d instead of %d\n",
606 prsc->width0, prsc->height0,
607 util_format_short_name(prsc->format),
608 whandle->stride,
609 slice->stride);
610 }
611 goto fail;
612 }
613
614 return prsc;
615
616 fail:
617 vc5_resource_destroy(pscreen, prsc);
618 return NULL;
619 }
620
621 static struct pipe_surface *
622 vc5_create_surface(struct pipe_context *pctx,
623 struct pipe_resource *ptex,
624 const struct pipe_surface *surf_tmpl)
625 {
626 struct vc5_surface *surface = CALLOC_STRUCT(vc5_surface);
627 struct vc5_resource *rsc = vc5_resource(ptex);
628
629 if (!surface)
630 return NULL;
631
632 assert(surf_tmpl->u.tex.first_layer == surf_tmpl->u.tex.last_layer);
633
634 struct pipe_surface *psurf = &surface->base;
635 unsigned level = surf_tmpl->u.tex.level;
636 struct vc5_resource_slice *slice = &rsc->slices[level];
637
638 struct vc5_resource_slice *separate_stencil_slice = NULL;
639 if (rsc->separate_stencil)
640 separate_stencil_slice = &rsc->separate_stencil->slices[level];
641
642 pipe_reference_init(&psurf->reference, 1);
643 pipe_resource_reference(&psurf->texture, ptex);
644
645 psurf->context = pctx;
646 psurf->format = surf_tmpl->format;
647 psurf->width = u_minify(ptex->width0, level);
648 psurf->height = u_minify(ptex->height0, level);
649 psurf->u.tex.level = level;
650 psurf->u.tex.first_layer = surf_tmpl->u.tex.first_layer;
651 psurf->u.tex.last_layer = surf_tmpl->u.tex.last_layer;
652
653 surface->offset = (slice->offset +
654 psurf->u.tex.first_layer * rsc->cube_map_stride);
655 surface->tiling = slice->tiling;
656 if (separate_stencil_slice) {
657 surface->separate_stencil_offset =
658 (separate_stencil_slice->offset +
659 psurf->u.tex.first_layer *
660 rsc->separate_stencil->cube_map_stride);
661 surface->separate_stencil_tiling =
662 separate_stencil_slice->tiling;
663 }
664
665 surface->format = vc5_get_rt_format(psurf->format);
666
667 if (util_format_is_depth_or_stencil(psurf->format)) {
668 switch (psurf->format) {
669 case PIPE_FORMAT_Z16_UNORM:
670 surface->internal_type = V3D_INTERNAL_TYPE_DEPTH_16;
671 break;
672 case PIPE_FORMAT_Z32_FLOAT:
673 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
674 surface->internal_type = V3D_INTERNAL_TYPE_DEPTH_32F;
675 break;
676 default:
677 surface->internal_type = V3D_INTERNAL_TYPE_DEPTH_24;
678 }
679 } else {
680 uint32_t bpp, type;
681 vc5_get_internal_type_bpp_for_output_format(surface->format,
682 &type, &bpp);
683 surface->internal_type = type;
684 surface->internal_bpp = bpp;
685 }
686
687 if (surface->tiling == VC5_TILING_UIF_NO_XOR ||
688 surface->tiling == VC5_TILING_UIF_XOR) {
689 surface->padded_height_of_output_image_in_uif_blocks =
690 ((slice->size / slice->stride) /
691 (2 * vc5_utile_height(rsc->cpp)));
692
693 if (separate_stencil_slice) {
694 surface->separate_stencil_padded_height_of_output_image_in_uif_blocks =
695 ((separate_stencil_slice->size /
696 separate_stencil_slice->stride) /
697 (2 * vc5_utile_height(rsc->separate_stencil->cpp)));
698 }
699 }
700
701 return &surface->base;
702 }
703
704 static void
705 vc5_surface_destroy(struct pipe_context *pctx, struct pipe_surface *psurf)
706 {
707 pipe_resource_reference(&psurf->texture, NULL);
708 FREE(psurf);
709 }
710
711 static void
712 vc5_flush_resource(struct pipe_context *pctx, struct pipe_resource *resource)
713 {
714 /* All calls to flush_resource are followed by a flush of the context,
715 * so there's nothing to do.
716 */
717 }
718
719 static enum pipe_format
720 vc5_resource_get_internal_format(struct pipe_resource *prsc)
721 {
722 return vc5_resource(prsc)->internal_format;
723 }
724
725 static void
726 vc5_resource_set_stencil(struct pipe_resource *prsc,
727 struct pipe_resource *stencil)
728 {
729 vc5_resource(prsc)->separate_stencil = vc5_resource(stencil);
730 }
731
732 static struct pipe_resource *
733 vc5_resource_get_stencil(struct pipe_resource *prsc)
734 {
735 struct vc5_resource *rsc = vc5_resource(prsc);
736
737 return &rsc->separate_stencil->base;
738 }
739
740 static const struct u_transfer_vtbl transfer_vtbl = {
741 .resource_create = vc5_resource_create,
742 .resource_destroy = vc5_resource_destroy,
743 .transfer_map = vc5_resource_transfer_map,
744 .transfer_unmap = vc5_resource_transfer_unmap,
745 .transfer_flush_region = u_default_transfer_flush_region,
746 .get_internal_format = vc5_resource_get_internal_format,
747 .set_stencil = vc5_resource_set_stencil,
748 .get_stencil = vc5_resource_get_stencil,
749 };
750
751 void
752 vc5_resource_screen_init(struct pipe_screen *pscreen)
753 {
754 pscreen->resource_create_with_modifiers =
755 vc5_resource_create_with_modifiers;
756 pscreen->resource_create = u_transfer_helper_resource_create;
757 pscreen->resource_from_handle = vc5_resource_from_handle;
758 pscreen->resource_get_handle = vc5_resource_get_handle;
759 pscreen->resource_destroy = u_transfer_helper_resource_destroy;
760 pscreen->transfer_helper = u_transfer_helper_create(&transfer_vtbl,
761 true, true, true);
762 }
763
764 void
765 vc5_resource_context_init(struct pipe_context *pctx)
766 {
767 pctx->transfer_map = u_transfer_helper_transfer_map;
768 pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
769 pctx->transfer_unmap = u_transfer_helper_transfer_unmap;
770 pctx->buffer_subdata = u_default_buffer_subdata;
771 pctx->texture_subdata = u_default_texture_subdata;
772 pctx->create_surface = vc5_create_surface;
773 pctx->surface_destroy = vc5_surface_destroy;
774 pctx->resource_copy_region = util_resource_copy_region;
775 pctx->blit = vc5_blit;
776 pctx->flush_resource = vc5_flush_resource;
777 }