2 * Copyright 2018-2019 Alyssa Rosenzweig
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 #include "pan_context.h"
27 #include "pan_cmdstream.h"
29 #include "panfrost-quirks.h"
31 static struct mali_rt_format
32 panfrost_mfbd_format(struct pipe_surface
*surf
)
34 /* Explode details on the format */
36 const struct util_format_description
*desc
=
37 util_format_description(surf
->format
);
39 /* The swizzle for rendering is inverted from texturing */
41 unsigned char swizzle
[4];
42 panfrost_invert_swizzle(desc
->swizzle
, swizzle
);
44 /* Fill in accordingly, defaulting to 8-bit UNORM */
46 struct mali_rt_format fmt
= {
49 .nr_channels
= MALI_POSITIVE(desc
->nr_channels
),
52 .swizzle
= panfrost_translate_swizzle_4(swizzle
),
56 if (desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
)
57 fmt
.flags
|= MALI_MFBD_FORMAT_SRGB
;
59 /* sRGB handled as a dedicated flag */
60 enum pipe_format linearized
= util_format_linear(surf
->format
);
62 /* If RGB, we're good to go */
63 if (util_format_is_unorm8(desc
))
66 /* Set flags for alternative formats */
69 case PIPE_FORMAT_B5G6R5_UNORM
:
70 fmt
.unk1
= 0x14000000;
71 fmt
.nr_channels
= MALI_POSITIVE(2);
75 case PIPE_FORMAT_A4B4G4R4_UNORM
:
76 case PIPE_FORMAT_B4G4R4A4_UNORM
:
77 case PIPE_FORMAT_R4G4B4A4_UNORM
:
78 fmt
.unk1
= 0x10000000;
80 fmt
.nr_channels
= MALI_POSITIVE(1);
83 case PIPE_FORMAT_R10G10B10A2_UNORM
:
84 case PIPE_FORMAT_B10G10R10A2_UNORM
:
85 case PIPE_FORMAT_R10G10B10X2_UNORM
:
86 case PIPE_FORMAT_B10G10R10X2_UNORM
:
87 fmt
.unk1
= 0x08000000;
89 fmt
.nr_channels
= MALI_POSITIVE(1);
92 case PIPE_FORMAT_B5G5R5A1_UNORM
:
93 case PIPE_FORMAT_R5G5B5A1_UNORM
:
94 case PIPE_FORMAT_B5G5R5X1_UNORM
:
95 fmt
.unk1
= 0x18000000;
97 fmt
.nr_channels
= MALI_POSITIVE(2);
101 case PIPE_FORMAT_R8_UINT
:
102 case PIPE_FORMAT_R8_SINT
:
103 fmt
.unk1
= 0x80000000;
105 fmt
.nr_channels
= MALI_POSITIVE(1);
109 case PIPE_FORMAT_R11G11B10_FLOAT
:
110 case PIPE_FORMAT_R8G8B8A8_UINT
:
111 case PIPE_FORMAT_R8G8B8A8_SINT
:
112 case PIPE_FORMAT_R16G16_FLOAT
:
113 case PIPE_FORMAT_R16G16_UINT
:
114 case PIPE_FORMAT_R16G16_SINT
:
115 case PIPE_FORMAT_R32_FLOAT
:
116 case PIPE_FORMAT_R32_UINT
:
117 case PIPE_FORMAT_R32_SINT
:
118 case PIPE_FORMAT_R10G10B10A2_UINT
:
119 fmt
.unk1
= 0x88000000;
121 fmt
.nr_channels
= MALI_POSITIVE(4);
125 case PIPE_FORMAT_R8G8_UINT
:
126 case PIPE_FORMAT_R8G8_SINT
:
127 case PIPE_FORMAT_R16_FLOAT
:
128 case PIPE_FORMAT_R16_UINT
:
129 case PIPE_FORMAT_R16_SINT
:
130 fmt
.unk1
= 0x84000000;
132 fmt
.nr_channels
= MALI_POSITIVE(2);
136 case PIPE_FORMAT_R32G32_FLOAT
:
137 case PIPE_FORMAT_R32G32_SINT
:
138 case PIPE_FORMAT_R32G32_UINT
:
139 case PIPE_FORMAT_R16G16B16A16_FLOAT
:
140 case PIPE_FORMAT_R16G16B16A16_SINT
:
141 case PIPE_FORMAT_R16G16B16A16_UINT
:
142 fmt
.unk1
= 0x8c000000;
144 fmt
.nr_channels
= MALI_POSITIVE(2);
147 /* Generic 128-bit */
148 case PIPE_FORMAT_R32G32B32A32_FLOAT
:
149 case PIPE_FORMAT_R32G32B32A32_SINT
:
150 case PIPE_FORMAT_R32G32B32A32_UINT
:
151 fmt
.unk1
= 0x90000000;
153 fmt
.nr_channels
= MALI_POSITIVE(4);
157 unreachable("Invalid format rendering");
166 struct panfrost_batch
*batch
,
167 struct mali_framebuffer
*fb
,
168 struct mali_framebuffer_extra
*fbx
,
169 struct mali_render_target
*rts
,
172 struct panfrost_context
*ctx
= batch
->ctx
;
173 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
174 struct panfrost_device
*dev
= pan_device(gallium
->screen
);
176 for (unsigned i
= 0; i
< rt_count
; ++i
) {
177 if (!(batch
->clear
& (PIPE_CLEAR_COLOR0
<< i
)))
180 rts
[i
].clear_color_1
= batch
->clear_color
[i
][0];
181 rts
[i
].clear_color_2
= batch
->clear_color
[i
][1];
182 rts
[i
].clear_color_3
= batch
->clear_color
[i
][2];
183 rts
[i
].clear_color_4
= batch
->clear_color
[i
][3];
186 if (batch
->clear
& PIPE_CLEAR_DEPTH
) {
187 fb
->clear_depth
= batch
->clear_depth
;
190 if (batch
->clear
& PIPE_CLEAR_STENCIL
) {
191 fb
->clear_stencil
= batch
->clear_stencil
;
194 if (dev
->quirks
& IS_BIFROST
) {
195 fbx
->clear_color_1
= batch
->clear_color
[0][0];
196 fbx
->clear_color_2
= 0xc0000000 | (fbx
->clear_color_1
& 0xffff); /* WTF? */
201 panfrost_mfbd_set_cbuf(
202 struct mali_render_target
*rt
,
203 struct pipe_surface
*surf
)
205 struct panfrost_resource
*rsrc
= pan_resource(surf
->texture
);
206 struct panfrost_device
*dev
= pan_device(surf
->context
->screen
);
207 bool is_bifrost
= dev
->quirks
& IS_BIFROST
;
209 unsigned level
= surf
->u
.tex
.level
;
210 unsigned first_layer
= surf
->u
.tex
.first_layer
;
211 assert(surf
->u
.tex
.last_layer
== first_layer
);
212 int stride
= rsrc
->slices
[level
].stride
;
214 /* Only set layer_stride for layered MSAA rendering */
216 unsigned nr_samples
= surf
->texture
->nr_samples
;
217 unsigned layer_stride
= (nr_samples
> 1) ? rsrc
->slices
[level
].size0
: 0;
219 mali_ptr base
= panfrost_get_texture_address(rsrc
, level
, first_layer
, 0);
221 rt
->format
= panfrost_mfbd_format(surf
);
224 rt
->format
.msaa
= MALI_MSAA_LAYERED
;
225 else if (surf
->nr_samples
)
226 rt
->format
.msaa
= MALI_MSAA_AVERAGE
;
228 rt
->format
.msaa
= MALI_MSAA_SINGLE
;
230 /* Now, we set the modifier specific pieces */
232 if (rsrc
->modifier
== DRM_FORMAT_MOD_LINEAR
) {
234 rt
->format
.unk4
= 0x1;
236 rt
->format
.block
= MALI_BLOCK_LINEAR
;
239 rt
->framebuffer
= base
;
240 rt
->framebuffer_stride
= stride
/ 16;
241 rt
->layer_stride
= layer_stride
;
242 } else if (rsrc
->modifier
== DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED
) {
244 rt
->format
.unk3
|= 0x8;
246 rt
->format
.block
= MALI_BLOCK_TILED
;
249 rt
->framebuffer
= base
;
250 rt
->framebuffer_stride
= stride
;
251 rt
->layer_stride
= layer_stride
;
252 } else if (drm_is_afbc(rsrc
->modifier
)) {
253 rt
->format
.block
= MALI_BLOCK_AFBC
;
255 unsigned header_size
= rsrc
->slices
[level
].header_size
;
257 rt
->framebuffer
= base
+ header_size
;
258 rt
->layer_stride
= layer_stride
;
259 rt
->afbc
.metadata
= base
;
261 rt
->afbc
.flags
= MALI_AFBC_FLAGS
;
263 if (rsrc
->modifier
& AFBC_FORMAT_MOD_YTR
)
264 rt
->afbc
.flags
|= MALI_AFBC_YTR
;
266 /* TODO: The blob sets this to something nonzero, but it's not
267 * clear what/how to calculate/if it matters */
268 rt
->framebuffer_stride
= 0;
270 unreachable("Invalid mod");
275 panfrost_mfbd_set_zsbuf(
276 struct mali_framebuffer
*fb
,
277 struct mali_framebuffer_extra
*fbx
,
278 struct pipe_surface
*surf
)
280 struct panfrost_device
*dev
= pan_device(surf
->context
->screen
);
281 bool is_bifrost
= dev
->quirks
& IS_BIFROST
;
282 struct panfrost_resource
*rsrc
= pan_resource(surf
->texture
);
284 unsigned nr_samples
= surf
->texture
->nr_samples
;
285 nr_samples
= MAX2(nr_samples
, 1);
287 fbx
->zs_samples
= MALI_POSITIVE(nr_samples
);
289 unsigned level
= surf
->u
.tex
.level
;
290 unsigned first_layer
= surf
->u
.tex
.first_layer
;
291 assert(surf
->u
.tex
.last_layer
== first_layer
);
293 mali_ptr base
= panfrost_get_texture_address(rsrc
, level
, first_layer
, 0);
295 if (drm_is_afbc(rsrc
->modifier
)) {
296 /* The only Z/S format we can compress is Z24S8 or variants
297 * thereof (handled by the gallium frontend) */
298 assert(panfrost_is_z24s8_variant(surf
->format
));
300 unsigned header_size
= rsrc
->slices
[level
].header_size
;
302 fb
->mfbd_flags
|= MALI_MFBD_EXTRA
| MALI_MFBD_DEPTH_WRITE
;
304 fbx
->flags_hi
|= MALI_EXTRA_PRESENT
;
305 fbx
->flags_lo
|= MALI_EXTRA_ZS
| 0x1; /* unknown */
306 fbx
->zs_block
= MALI_BLOCK_AFBC
;
308 fbx
->ds_afbc
.depth_stencil
= base
+ header_size
;
309 fbx
->ds_afbc
.depth_stencil_afbc_metadata
= base
;
310 fbx
->ds_afbc
.depth_stencil_afbc_stride
= 0;
312 fbx
->ds_afbc
.flags
= MALI_AFBC_FLAGS
;
313 fbx
->ds_afbc
.padding
= 0x1000;
315 assert(rsrc
->modifier
== DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED
|| rsrc
->modifier
== DRM_FORMAT_MOD_LINEAR
);
317 /* TODO: Z32F(S8) support, which is always linear */
319 int stride
= rsrc
->slices
[level
].stride
;
321 unsigned layer_stride
= (nr_samples
> 1) ? rsrc
->slices
[level
].size0
: 0;
323 fb
->mfbd_flags
|= MALI_MFBD_EXTRA
| MALI_MFBD_DEPTH_WRITE
;
324 fbx
->flags_hi
|= MALI_EXTRA_PRESENT
;
325 fbx
->flags_lo
|= MALI_EXTRA_ZS
;
327 fbx
->ds_linear
.depth
= base
;
329 if (rsrc
->modifier
== DRM_FORMAT_MOD_LINEAR
) {
330 fbx
->zs_block
= MALI_BLOCK_LINEAR
;
331 fbx
->ds_linear
.depth_stride
= stride
/ 16;
332 fbx
->ds_linear
.depth_layer_stride
= layer_stride
;
335 fbx
->zs_block
= MALI_BLOCK_UNKNOWN
;
336 fbx
->flags_hi
|= 0x440;
337 fbx
->flags_lo
|= 0x1;
339 fbx
->zs_block
= MALI_BLOCK_TILED
;
342 fbx
->ds_linear
.depth_stride
= stride
;
343 fbx
->ds_linear
.depth_layer_stride
= layer_stride
;
346 if (panfrost_is_z24s8_variant(surf
->format
)) {
347 fbx
->flags_lo
|= 0x1;
348 } else if (surf
->format
== PIPE_FORMAT_Z32_FLOAT
) {
349 fbx
->flags_lo
|= 0xA;
350 fb
->mfbd_flags
^= 0x100;
351 fb
->mfbd_flags
|= 0x200;
352 } else if (surf
->format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
353 fbx
->flags_hi
|= 0x40;
354 fbx
->flags_lo
|= 0xA;
355 fb
->mfbd_flags
^= 0x100;
356 fb
->mfbd_flags
|= 0x201;
358 struct panfrost_resource
*stencil
= rsrc
->separate_stencil
;
359 struct panfrost_slice stencil_slice
= stencil
->slices
[level
];
360 unsigned stencil_layer_stride
= (nr_samples
> 1) ? stencil_slice
.size0
: 0;
362 fbx
->ds_linear
.stencil
= panfrost_get_texture_address(stencil
, level
, first_layer
, 0);
363 fbx
->ds_linear
.stencil_stride
= stencil_slice
.stride
;
364 fbx
->ds_linear
.stencil_layer_stride
= stencil_layer_stride
;
369 /* Helper for sequential uploads used for MFBD */
371 #define UPLOAD(dest, offset, src, max) { \
372 size_t sz = sizeof(*src); \
373 memcpy(dest.cpu + offset, src, sz); \
374 assert((offset + sz) <= max); \
379 panfrost_mfbd_upload(struct panfrost_batch
*batch
,
380 struct mali_framebuffer
*fb
,
381 struct mali_framebuffer_extra
*fbx
,
382 struct mali_render_target
*rts
,
387 /* There may be extra data stuck in the middle */
388 bool has_extra
= fb
->mfbd_flags
& MALI_MFBD_EXTRA
;
390 /* Compute total size for transfer */
393 sizeof(struct mali_framebuffer
) +
394 (has_extra
? sizeof(struct mali_framebuffer_extra
) : 0) +
395 sizeof(struct mali_render_target
) * 8;
397 struct panfrost_transfer m_f_trans
=
398 panfrost_pool_alloc(&batch
->pool
, total_sz
);
400 /* Do the transfer */
402 UPLOAD(m_f_trans
, offset
, fb
, total_sz
);
405 UPLOAD(m_f_trans
, offset
, fbx
, total_sz
);
407 for (unsigned c
= 0; c
< 8; ++c
) {
408 UPLOAD(m_f_trans
, offset
, &rts
[c
], total_sz
);
411 /* Return pointer suitable for the fragment section */
414 (has_extra
? MALI_MFBD_TAG_EXTRA
: 0) |
415 (MALI_POSITIVE(rt_count
) << 2);
417 return m_f_trans
.gpu
| tag
;
422 /* Determines the # of bytes per pixel we need to reserve for a given format in
423 * the tilebuffer (compared to 128-bit budget, etc). Usually the same as the
424 * bytes per pixel of the format itself, but there are some special cases I
425 * don't understand. */
428 pan_bytes_per_pixel_tib(enum pipe_format format
)
430 const struct util_format_description
*desc
=
431 util_format_description(format
);
433 if (util_format_is_unorm8(desc
) || format
== PIPE_FORMAT_B5G6R5_UNORM
)
436 return desc
->block
.bits
/ 8;
439 /* Determines whether a framebuffer uses too much tilebuffer space (requiring
440 * us to scale up the tile at a performance penalty). This is conservative but
441 * afaict you get 128-bits per pixel normally */
444 pan_tib_size(struct panfrost_batch
*batch
)
448 for (int cb
= 0; cb
< batch
->key
.nr_cbufs
; ++cb
) {
449 struct pipe_surface
*surf
= batch
->key
.cbufs
[cb
];
451 size
+= pan_bytes_per_pixel_tib(surf
->format
);
458 pan_tib_shift(struct panfrost_batch
*batch
)
460 unsigned size
= pan_tib_size(batch
);
474 static struct mali_framebuffer
475 panfrost_emit_mfbd(struct panfrost_batch
*batch
, unsigned vertex_count
)
477 struct panfrost_context
*ctx
= batch
->ctx
;
478 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
479 struct panfrost_device
*dev
= pan_device(gallium
->screen
);
481 unsigned width
= batch
->key
.width
;
482 unsigned height
= batch
->key
.height
;
484 struct mali_framebuffer mfbd
= {
485 .width1
= MALI_POSITIVE(width
),
486 .height1
= MALI_POSITIVE(height
),
487 .width2
= MALI_POSITIVE(width
),
488 .height2
= MALI_POSITIVE(height
),
490 /* Configures tib size */
491 .unk1
= (pan_tib_shift(batch
) << 9) | 0x80,
493 .rt_count_1
= MALI_POSITIVE(MAX2(batch
->key
.nr_cbufs
, 1)),
497 if (dev
->quirks
& IS_BIFROST
) {
498 mfbd
.msaa
.sample_locations
= panfrost_emit_sample_locations(batch
);
499 mfbd
.tiler_meta
= panfrost_batch_get_tiler_meta(batch
, vertex_count
);
501 unsigned shift
= panfrost_get_stack_shift(batch
->stack_size
);
502 struct panfrost_bo
*bo
= panfrost_batch_get_scratchpad(batch
,
504 dev
->thread_tls_alloc
,
506 mfbd
.shared_memory
.stack_shift
= shift
;
507 mfbd
.shared_memory
.scratchpad
= bo
->gpu
;
508 mfbd
.shared_memory
.shared_workgroup_count
= ~0;
510 mfbd
.tiler
= panfrost_emit_midg_tiler(batch
, vertex_count
);
517 panfrost_attach_mfbd(struct panfrost_batch
*batch
, unsigned vertex_count
)
519 struct mali_framebuffer mfbd
=
520 panfrost_emit_mfbd(batch
, vertex_count
);
522 memcpy(batch
->framebuffer
.cpu
, &mfbd
, sizeof(mfbd
));
525 /* Creates an MFBD for the FRAGMENT section of the bound framebuffer */
528 panfrost_mfbd_fragment(struct panfrost_batch
*batch
, bool has_draws
)
530 struct panfrost_device
*dev
= pan_device(batch
->ctx
->base
.screen
);
531 bool is_bifrost
= dev
->quirks
& IS_BIFROST
;
533 struct mali_framebuffer fb
= panfrost_emit_mfbd(batch
, has_draws
);
534 struct mali_framebuffer_extra fbx
= {0};
535 struct mali_render_target rts
[8] = {0};
537 /* We always upload at least one dummy GL_NONE render target */
539 unsigned rt_descriptors
= MAX2(batch
->key
.nr_cbufs
, 1);
541 fb
.rt_count_1
= MALI_POSITIVE(rt_descriptors
);
542 fb
.mfbd_flags
= 0x100;
544 panfrost_mfbd_clear(batch
, &fb
, &fbx
, rts
, rt_descriptors
);
546 /* Upload either the render target or a dummy GL_NONE target */
549 unsigned tib_shift
= pan_tib_shift(batch
);
551 for (int cb
= 0; cb
< rt_descriptors
; ++cb
) {
552 struct pipe_surface
*surf
= batch
->key
.cbufs
[cb
];
553 unsigned rt_offset
= offset
<< tib_shift
;
555 if (surf
&& ((batch
->clear
| batch
->draws
) & (PIPE_CLEAR_COLOR0
<< cb
))) {
556 if (MAX2(surf
->nr_samples
, surf
->texture
->nr_samples
) > 1)
557 batch
->requirements
|= PAN_REQ_MSAA
;
559 panfrost_mfbd_set_cbuf(&rts
[cb
], surf
);
561 offset
+= pan_bytes_per_pixel_tib(surf
->format
);
563 struct mali_rt_format null_rt
= {
573 rts
[cb
].format
= null_rt
;
574 rts
[cb
].framebuffer
= 0;
575 rts
[cb
].framebuffer_stride
= 0;
578 /* TODO: Break out the field */
579 rts
[cb
].format
.unk1
|= rt_offset
;
582 fb
.rt_count_2
= MAX2(DIV_ROUND_UP(offset
, 1 << (10 - tib_shift
)), 1);
584 if (batch
->key
.zsbuf
&& ((batch
->clear
| batch
->draws
) & PIPE_CLEAR_DEPTHSTENCIL
)) {
585 if (MAX2(batch
->key
.zsbuf
->nr_samples
, batch
->key
.zsbuf
->nr_samples
) > 1)
586 batch
->requirements
|= PAN_REQ_MSAA
;
588 panfrost_mfbd_set_zsbuf(&fb
, &fbx
, batch
->key
.zsbuf
);
591 /* When scanning out, the depth buffer is immediately invalidated, so
592 * we don't need to waste bandwidth writing it out. This can improve
593 * performance substantially (Z24X8_UNORM 1080p @ 60fps is 475 MB/s of
594 * memory bandwidth!).
596 * The exception is ReadPixels, but this is not supported on GLES so we
597 * can safely ignore it. */
599 if (panfrost_batch_is_scanout(batch
))
600 batch
->requirements
&= ~PAN_REQ_DEPTH_WRITE
;
602 /* Actualize the requirements */
604 if (batch
->requirements
& PAN_REQ_MSAA
) {
606 fb
.unk1
|= (1 << 4) | (1 << 1);
610 if (batch
->requirements
& PAN_REQ_DEPTH_WRITE
)
611 fb
.mfbd_flags
|= MALI_MFBD_DEPTH_WRITE
;
613 /* Checksumming only works with a single render target */
615 if (batch
->key
.nr_cbufs
== 1) {
616 struct pipe_surface
*surf
= batch
->key
.cbufs
[0];
617 struct panfrost_resource
*rsrc
= pan_resource(surf
->texture
);
619 if (rsrc
->checksummed
) {
620 unsigned level
= surf
->u
.tex
.level
;
621 struct panfrost_slice
*slice
= &rsrc
->slices
[level
];
623 fb
.mfbd_flags
|= MALI_MFBD_EXTRA
;
624 fbx
.flags_hi
|= MALI_EXTRA_PRESENT
;
625 fbx
.checksum_stride
= slice
->checksum_stride
;
626 if (slice
->checksum_bo
)
627 fbx
.checksum
= slice
->checksum_bo
->gpu
;
629 fbx
.checksum
= rsrc
->bo
->gpu
+ slice
->checksum_offset
;
633 return panfrost_mfbd_upload(batch
, &fb
, &fbx
, rts
, rt_descriptors
);