2 * © Copyright 2018 Alyssa Rosenzweig
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #include "pan_context.h"
29 #include "pan_swizzle.h"
30 #include "pan_format.h"
32 #include "util/macros.h"
33 #include "util/u_format.h"
34 #include "util/u_inlines.h"
35 #include "util/u_upload_mgr.h"
36 #include "util/u_memory.h"
37 #include "util/half_float.h"
38 #include "indices/u_primconvert.h"
39 #include "tgsi/tgsi_parse.h"
41 #include "pan_screen.h"
42 #include "pan_blending.h"
43 #include "pan_blend_shaders.h"
44 #include "pan_wallpaper.h"
46 static int performance_counter_number
= 0;
47 extern const char *pan_counters_base
;
49 /* Do not actually send anything to the GPU; merely generate the cmdstream as fast as possible. Disables framebuffer writes */
52 #define SET_BIT(lval, bit, cond) \
58 /* TODO: Sample size, etc */
61 panfrost_set_framebuffer_msaa(struct panfrost_context
*ctx
, bool enabled
)
63 SET_BIT(ctx
->fragment_shader_core
.unknown2_3
, MALI_HAS_MSAA
, enabled
);
64 SET_BIT(ctx
->fragment_shader_core
.unknown2_4
, MALI_NO_MSAA
, !enabled
);
66 if (ctx
->require_sfbd
) {
67 SET_BIT(ctx
->fragment_sfbd
.format
, MALI_FRAMEBUFFER_MSAA_A
| MALI_FRAMEBUFFER_MSAA_B
, enabled
);
69 SET_BIT(ctx
->fragment_rts
[0].format
.flags
, MALI_MFBD_FORMAT_MSAA
, enabled
);
71 SET_BIT(ctx
->fragment_mfbd
.unk1
, (1 << 4) | (1 << 1), enabled
);
74 ctx
->fragment_mfbd
.rt_count_2
= enabled
? 4 : 1;
78 /* AFBC is enabled on a per-resource basis (AFBC enabling is theoretically
79 * indepdent between color buffers and depth/stencil). To enable, we allocate
80 * the AFBC metadata buffer and mark that it is enabled. We do -not- actually
81 * edit the fragment job here. This routine should be called ONCE per
82 * AFBC-compressed buffer, rather than on every frame. */
85 panfrost_enable_afbc(struct panfrost_context
*ctx
, struct panfrost_resource
*rsrc
, bool ds
)
87 if (ctx
->require_sfbd
) {
88 printf("AFBC not supported yet on SFBD\n");
92 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
93 struct panfrost_screen
*screen
= pan_screen(gallium
->screen
);
94 /* AFBC metadata is 16 bytes per tile */
95 int tile_w
= (rsrc
->base
.width0
+ (MALI_TILE_LENGTH
- 1)) >> MALI_TILE_SHIFT
;
96 int tile_h
= (rsrc
->base
.height0
+ (MALI_TILE_LENGTH
- 1)) >> MALI_TILE_SHIFT
;
97 int bytes_per_pixel
= util_format_get_blocksize(rsrc
->base
.format
);
98 int stride
= bytes_per_pixel
* rsrc
->base
.width0
; /* TODO: Alignment? */
100 stride
*= 2; /* TODO: Should this be carried over? */
101 int main_size
= stride
* rsrc
->base
.height0
;
102 rsrc
->bo
->afbc_metadata_size
= tile_w
* tile_h
* 16;
104 /* Allocate the AFBC slab itself, large enough to hold the above */
105 screen
->driver
->allocate_slab(screen
, &rsrc
->bo
->afbc_slab
,
106 (rsrc
->bo
->afbc_metadata_size
+ main_size
+ 4095) / 4096,
109 rsrc
->bo
->has_afbc
= true;
110 rsrc
->bo
->gem_handle
= rsrc
->bo
->afbc_slab
.gem_handle
;
112 /* Compressed textured reads use a tagged pointer to the metadata */
114 rsrc
->bo
->gpu
[0] = rsrc
->bo
->afbc_slab
.gpu
| (ds
? 0 : 1);
115 rsrc
->bo
->cpu
[0] = rsrc
->bo
->afbc_slab
.cpu
;
119 panfrost_enable_checksum(struct panfrost_context
*ctx
, struct panfrost_resource
*rsrc
)
121 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
122 struct panfrost_screen
*screen
= pan_screen(gallium
->screen
);
123 int tile_w
= (rsrc
->base
.width0
+ (MALI_TILE_LENGTH
- 1)) >> MALI_TILE_SHIFT
;
124 int tile_h
= (rsrc
->base
.height0
+ (MALI_TILE_LENGTH
- 1)) >> MALI_TILE_SHIFT
;
126 /* 8 byte checksum per tile */
127 rsrc
->bo
->checksum_stride
= tile_w
* 8;
128 int pages
= (((rsrc
->bo
->checksum_stride
* tile_h
) + 4095) / 4096);
129 screen
->driver
->allocate_slab(screen
, &rsrc
->bo
->checksum_slab
, pages
, false, 0, 0, 0);
131 rsrc
->bo
->has_checksum
= true;
134 /* ..by contrast, this routine runs for every FRAGMENT job, but does no
135 * allocation. AFBC is enabled on a per-surface basis */
138 panfrost_set_fragment_afbc(struct panfrost_context
*ctx
)
140 for (int cb
= 0; cb
< ctx
->pipe_framebuffer
.nr_cbufs
; ++cb
) {
141 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) ctx
->pipe_framebuffer
.cbufs
[cb
]->texture
;
143 /* Non-AFBC is the default */
144 if (!rsrc
->bo
->has_afbc
)
147 if (ctx
->require_sfbd
) {
148 fprintf(stderr
, "Color AFBC not supported on SFBD\n");
152 /* Enable AFBC for the render target */
153 ctx
->fragment_rts
[0].afbc
.metadata
= rsrc
->bo
->afbc_slab
.gpu
;
154 ctx
->fragment_rts
[0].afbc
.stride
= 0;
155 ctx
->fragment_rts
[0].afbc
.unk
= 0x30009;
157 ctx
->fragment_rts
[0].format
.flags
|= MALI_MFBD_FORMAT_AFBC
;
159 /* Point rendering to our special framebuffer */
160 ctx
->fragment_rts
[0].framebuffer
= rsrc
->bo
->afbc_slab
.gpu
+ rsrc
->bo
->afbc_metadata_size
;
162 /* WAT? Stride is diff from the scanout case */
163 ctx
->fragment_rts
[0].framebuffer_stride
= ctx
->pipe_framebuffer
.width
* 2 * 4;
166 /* Enable depth/stencil AFBC for the framebuffer (not the render target) */
167 if (ctx
->pipe_framebuffer
.zsbuf
) {
168 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) ctx
->pipe_framebuffer
.zsbuf
->texture
;
170 if (rsrc
->bo
->has_afbc
) {
171 if (ctx
->require_sfbd
) {
172 fprintf(stderr
, "Depth AFBC not supported on SFBD\n");
176 ctx
->fragment_mfbd
.unk3
|= MALI_MFBD_EXTRA
;
178 ctx
->fragment_extra
.ds_afbc
.depth_stencil_afbc_metadata
= rsrc
->bo
->afbc_slab
.gpu
;
179 ctx
->fragment_extra
.ds_afbc
.depth_stencil_afbc_stride
= 0;
181 ctx
->fragment_extra
.ds_afbc
.depth_stencil
= rsrc
->bo
->afbc_slab
.gpu
+ rsrc
->bo
->afbc_metadata_size
;
183 ctx
->fragment_extra
.ds_afbc
.zero1
= 0x10009;
184 ctx
->fragment_extra
.ds_afbc
.padding
= 0x1000;
186 ctx
->fragment_extra
.unk
= 0x435; /* General 0x400 in all unks. 0x5 for depth/stencil. 0x10 for AFBC encoded depth stencil. Unclear where the 0x20 is from */
188 ctx
->fragment_mfbd
.unk3
|= 0x400;
192 /* For the special case of a depth-only FBO, we need to attach a dummy render target */
194 if (ctx
->pipe_framebuffer
.nr_cbufs
== 0) {
195 if (ctx
->require_sfbd
) {
196 fprintf(stderr
, "Depth-only FBO not supported on SFBD\n");
200 struct mali_rt_format null_rt
= {
205 ctx
->fragment_rts
[0].format
= null_rt
;
206 ctx
->fragment_rts
[0].framebuffer
= 0;
207 ctx
->fragment_rts
[0].framebuffer_stride
= 0;
211 /* Framebuffer descriptor */
214 panfrost_set_framebuffer_resolution(struct mali_single_framebuffer
*fb
, int w
, int h
)
216 fb
->width
= MALI_POSITIVE(w
);
217 fb
->height
= MALI_POSITIVE(h
);
219 /* No idea why this is needed, but it's how resolution_check is
220 * calculated. It's not clear to us yet why the hardware wants this.
221 * The formula itself was discovered mostly by manual bruteforce and
222 * aggressive algebraic simplification. */
224 fb
->resolution_check
= ((w
+ h
) / 3) << 4;
227 static struct mali_single_framebuffer
228 panfrost_emit_sfbd(struct panfrost_context
*ctx
)
230 struct mali_single_framebuffer framebuffer
= {
232 .format
= 0x30000000,
233 .clear_flags
= 0x1000,
234 .unknown_address_0
= ctx
->scratchpad
.gpu
,
235 .unknown_address_1
= ctx
->misc_0
.gpu
,
236 .unknown_address_2
= ctx
->misc_0
.gpu
+ 40960,
238 .tiler_heap_free
= ctx
->tiler_heap
.gpu
,
239 .tiler_heap_end
= ctx
->tiler_heap
.gpu
+ ctx
->tiler_heap
.size
,
242 panfrost_set_framebuffer_resolution(&framebuffer
, ctx
->pipe_framebuffer
.width
, ctx
->pipe_framebuffer
.height
);
247 static struct bifrost_framebuffer
248 panfrost_emit_mfbd(struct panfrost_context
*ctx
)
250 struct bifrost_framebuffer framebuffer
= {
251 /* It is not yet clear what tiler_meta means or how it's
252 * calculated, but we can tell the lower 32-bits are a
253 * (monotonically increasing?) function of tile count and
254 * geometry complexity; I suspect it defines a memory size of
255 * some kind? for the tiler. It's really unclear at the
256 * moment... but to add to the confusion, the hardware is happy
257 * enough to accept a zero in this field, so we don't even have
258 * to worry about it right now.
260 * The byte (just after the 32-bit mark) is much more
261 * interesting. The higher nibble I've only ever seen as 0xF,
262 * but the lower one I've seen as 0x0 or 0xF, and it's not
263 * obvious what the difference is. But what -is- obvious is
264 * that when the lower nibble is zero, performance is severely
265 * degraded compared to when the lower nibble is set.
266 * Evidently, that nibble enables some sort of fast path,
267 * perhaps relating to caching or tile flush? Regardless, at
268 * this point there's no clear reason not to set it, aside from
269 * substantially increased memory requirements (of the misc_0
272 .tiler_meta
= ((uint64_t) 0xff << 32) | 0x0,
274 .width1
= MALI_POSITIVE(ctx
->pipe_framebuffer
.width
),
275 .height1
= MALI_POSITIVE(ctx
->pipe_framebuffer
.height
),
276 .width2
= MALI_POSITIVE(ctx
->pipe_framebuffer
.width
),
277 .height2
= MALI_POSITIVE(ctx
->pipe_framebuffer
.height
),
282 .rt_count_1
= MALI_POSITIVE(1),
287 /* Corresponds to unknown_address_X of SFBD */
288 .scratchpad
= ctx
->scratchpad
.gpu
,
289 .tiler_scratch_start
= ctx
->misc_0
.gpu
,
291 /* The constant added here is, like the lower word of
292 * tiler_meta, (loosely) another product of framebuffer size
293 * and geometry complexity. It must be sufficiently large for
294 * the tiler_meta fast path to work; if it's too small, there
295 * will be DATA_INVALID_FAULTs. Conversely, it must be less
296 * than the total size of misc_0, or else there's no room. It's
297 * possible this constant configures a partition between two
298 * parts of misc_0? We haven't investigated the functionality,
299 * as these buffers are internally used by the hardware
300 * (presumably by the tiler) but not seemingly touched by the driver
303 .tiler_scratch_middle
= ctx
->misc_0
.gpu
+ 0xf0000,
305 .tiler_heap_start
= ctx
->tiler_heap
.gpu
,
306 .tiler_heap_end
= ctx
->tiler_heap
.gpu
+ ctx
->tiler_heap
.size
,
312 /* Are we currently rendering to the screen (rather than an FBO)? */
315 panfrost_is_scanout(struct panfrost_context
*ctx
)
317 /* If there is no color buffer, it's an FBO */
318 if (!ctx
->pipe_framebuffer
.nr_cbufs
)
321 /* If we're too early that no framebuffer was sent, it's scanout */
322 if (!ctx
->pipe_framebuffer
.cbufs
[0])
325 return ctx
->pipe_framebuffer
.cbufs
[0]->texture
->bind
& PIPE_BIND_DISPLAY_TARGET
||
326 ctx
->pipe_framebuffer
.cbufs
[0]->texture
->bind
& PIPE_BIND_SCANOUT
||
327 ctx
->pipe_framebuffer
.cbufs
[0]->texture
->bind
& PIPE_BIND_SHARED
;
330 /* The above function is for generalised fbd emission, used in both fragment as
331 * well as vertex/tiler payloads. This payload is specific to fragment
335 panfrost_new_frag_framebuffer(struct panfrost_context
*ctx
)
337 mali_ptr framebuffer
;
340 if (ctx
->pipe_framebuffer
.nr_cbufs
> 0) {
341 framebuffer
= ((struct panfrost_resource
*) ctx
->pipe_framebuffer
.cbufs
[0]->texture
)->bo
->gpu
[0];
342 stride
= util_format_get_stride(ctx
->pipe_framebuffer
.cbufs
[0]->format
, ctx
->pipe_framebuffer
.width
);
344 /* Depth-only framebuffer -> dummy RT */
349 /* The default is upside down from OpenGL's perspective. */
350 if (panfrost_is_scanout(ctx
)) {
351 framebuffer
+= stride
* (ctx
->pipe_framebuffer
.height
- 1);
355 if (ctx
->require_sfbd
) {
356 struct mali_single_framebuffer fb
= panfrost_emit_sfbd(ctx
);
358 fb
.framebuffer
= framebuffer
;
361 fb
.format
= 0xb84e0281; /* RGB32, no MSAA */
362 memcpy(&ctx
->fragment_sfbd
, &fb
, sizeof(fb
));
364 struct bifrost_framebuffer fb
= panfrost_emit_mfbd(ctx
);
370 /* By default, Gallium seems to need a BGR framebuffer */
371 unsigned char bgra
[4] = {
372 PIPE_SWIZZLE_Z
, PIPE_SWIZZLE_Y
, PIPE_SWIZZLE_X
, PIPE_SWIZZLE_W
375 struct bifrost_render_target rt
= {
379 .nr_channels
= MALI_POSITIVE(4),
381 .swizzle
= panfrost_translate_swizzle_4(bgra
),
384 .framebuffer
= framebuffer
,
385 .framebuffer_stride
= (stride
/ 16) & 0xfffffff,
388 memcpy(&ctx
->fragment_rts
[0], &rt
, sizeof(rt
));
390 memset(&ctx
->fragment_extra
, 0, sizeof(ctx
->fragment_extra
));
391 memcpy(&ctx
->fragment_mfbd
, &fb
, sizeof(fb
));
395 /* Maps float 0.0-1.0 to int 0x00-0xFF */
397 normalised_float_to_u8(float f
)
399 return (uint8_t) (int) (f
* 255.0f
);
403 panfrost_clear_sfbd(struct panfrost_job
*job
)
405 struct panfrost_context
*ctx
= job
->ctx
;
406 struct mali_single_framebuffer
*sfbd
= &ctx
->fragment_sfbd
;
408 if (job
->clear
& PIPE_CLEAR_COLOR
) {
409 sfbd
->clear_color_1
= job
->clear_color
;
410 sfbd
->clear_color_2
= job
->clear_color
;
411 sfbd
->clear_color_3
= job
->clear_color
;
412 sfbd
->clear_color_4
= job
->clear_color
;
415 if (job
->clear
& PIPE_CLEAR_DEPTH
) {
416 sfbd
->clear_depth_1
= job
->clear_depth
;
417 sfbd
->clear_depth_2
= job
->clear_depth
;
418 sfbd
->clear_depth_3
= job
->clear_depth
;
419 sfbd
->clear_depth_4
= job
->clear_depth
;
421 sfbd
->depth_buffer
= ctx
->depth_stencil_buffer
.gpu
;
422 sfbd
->depth_buffer_enable
= MALI_DEPTH_STENCIL_ENABLE
;
425 if (job
->clear
& PIPE_CLEAR_STENCIL
) {
426 sfbd
->clear_stencil
= job
->clear_stencil
;
428 sfbd
->stencil_buffer
= ctx
->depth_stencil_buffer
.gpu
;
429 sfbd
->stencil_buffer_enable
= MALI_DEPTH_STENCIL_ENABLE
;
432 /* Set flags based on what has been cleared, for the SFBD case */
433 /* XXX: What do these flags mean? */
434 int clear_flags
= 0x101100;
436 if (!(job
->clear
& ~(PIPE_CLEAR_COLOR
| PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
))) {
437 /* On a tiler like this, it's fastest to clear all three buffers at once */
439 clear_flags
|= MALI_CLEAR_FAST
;
441 clear_flags
|= MALI_CLEAR_SLOW
;
443 if (job
->clear
& PIPE_CLEAR_STENCIL
)
444 clear_flags
|= MALI_CLEAR_SLOW_STENCIL
;
447 sfbd
->clear_flags
= clear_flags
;
451 panfrost_clear_mfbd(struct panfrost_job
*job
)
453 struct panfrost_context
*ctx
= job
->ctx
;
454 struct bifrost_render_target
*buffer_color
= &ctx
->fragment_rts
[0];
455 struct bifrost_framebuffer
*buffer_ds
= &ctx
->fragment_mfbd
;
457 if (job
->clear
& PIPE_CLEAR_COLOR
) {
458 buffer_color
->clear_color_1
= job
->clear_color
;
459 buffer_color
->clear_color_2
= job
->clear_color
;
460 buffer_color
->clear_color_3
= job
->clear_color
;
461 buffer_color
->clear_color_4
= job
->clear_color
;
464 if (job
->clear
& PIPE_CLEAR_DEPTH
) {
465 buffer_ds
->clear_depth
= job
->clear_depth
;
468 if (job
->clear
& PIPE_CLEAR_STENCIL
) {
469 buffer_ds
->clear_stencil
= job
->clear_stencil
;
472 if (job
->clear
& (PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
)) {
473 /* Setup combined 24/8 depth/stencil */
474 ctx
->fragment_mfbd
.unk3
|= MALI_MFBD_EXTRA
;
475 ctx
->fragment_extra
.unk
= 0x405;
476 ctx
->fragment_extra
.ds_linear
.depth
= ctx
->depth_stencil_buffer
.gpu
;
477 ctx
->fragment_extra
.ds_linear
.depth_stride
= ctx
->pipe_framebuffer
.width
* 4;
483 struct pipe_context
*pipe
,
485 const union pipe_color_union
*color
,
486 double depth
, unsigned stencil
)
488 struct panfrost_context
*ctx
= pan_context(pipe
);
489 struct panfrost_job
*job
= panfrost_get_job_for_fbo(ctx
);
491 if (buffers
& PIPE_CLEAR_COLOR
) {
492 /* Alpha clear only meaningful without alpha channel, TODO less ad hoc */
493 bool has_alpha
= util_format_has_alpha(ctx
->pipe_framebuffer
.cbufs
[0]->format
);
494 float clear_alpha
= has_alpha
? color
->f
[3] : 1.0f
;
496 uint32_t packed_color
=
497 (normalised_float_to_u8(clear_alpha
) << 24) |
498 (normalised_float_to_u8(color
->f
[2]) << 16) |
499 (normalised_float_to_u8(color
->f
[1]) << 8) |
500 (normalised_float_to_u8(color
->f
[0]) << 0);
502 job
->clear_color
= packed_color
;
506 if (buffers
& PIPE_CLEAR_DEPTH
) {
507 job
->clear_depth
= depth
;
510 if (buffers
& PIPE_CLEAR_STENCIL
) {
511 job
->clear_stencil
= stencil
;
514 job
->clear
|= buffers
;
518 panfrost_attach_vt_mfbd(struct panfrost_context
*ctx
)
520 /* MFBD needs a sequential semi-render target upload, but what exactly this is, is beyond me for now */
521 struct bifrost_render_target rts_list
[] = {
526 .framebuffer
= ctx
->misc_0
.gpu
,
531 /* Allocate memory for the three components */
532 int size
= 1024 + sizeof(ctx
->vt_framebuffer_mfbd
) + sizeof(rts_list
);
533 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, size
);
535 /* Opaque 1024-block */
536 rts_list
[0].chunknown
.pointer
= transfer
.gpu
;
538 memcpy(transfer
.cpu
+ 1024, &ctx
->vt_framebuffer_mfbd
, sizeof(ctx
->vt_framebuffer_mfbd
));
539 memcpy(transfer
.cpu
+ 1024 + sizeof(ctx
->vt_framebuffer_mfbd
), rts_list
, sizeof(rts_list
));
541 return (transfer
.gpu
+ 1024) | MALI_MFBD
;
545 panfrost_attach_vt_sfbd(struct panfrost_context
*ctx
)
547 return panfrost_upload_transient(ctx
, &ctx
->vt_framebuffer_sfbd
, sizeof(ctx
->vt_framebuffer_sfbd
)) | MALI_SFBD
;
551 panfrost_attach_vt_framebuffer(struct panfrost_context
*ctx
)
553 mali_ptr framebuffer
= ctx
->require_sfbd
?
554 panfrost_attach_vt_sfbd(ctx
) :
555 panfrost_attach_vt_mfbd(ctx
);
557 ctx
->payload_vertex
.postfix
.framebuffer
= framebuffer
;
558 ctx
->payload_tiler
.postfix
.framebuffer
= framebuffer
;
562 panfrost_viewport(struct panfrost_context
*ctx
,
563 float depth_clip_near
,
564 float depth_clip_far
,
565 int viewport_x0
, int viewport_y0
,
566 int viewport_x1
, int viewport_y1
)
568 /* Clip bounds are encoded as floats. The viewport itself is encoded as
569 * (somewhat) asymmetric ints. */
571 struct mali_viewport ret
= {
572 /* By default, do no viewport clipping, i.e. clip to (-inf,
573 * inf) in each direction. Clipping to the viewport in theory
574 * should work, but in practice causes issues when we're not
575 * explicitly trying to scissor */
582 /* We always perform depth clipping (TODO: Can this be disabled?) */
584 .clip_minz
= depth_clip_near
,
585 .clip_maxz
= depth_clip_far
,
587 .viewport0
= { viewport_x0
, viewport_y0
},
588 .viewport1
= { MALI_POSITIVE(viewport_x1
), MALI_POSITIVE(viewport_y1
) },
591 memcpy(ctx
->viewport
, &ret
, sizeof(ret
));
594 /* Reset per-frame context, called on context initialisation as well as after
595 * flushing a frame */
598 panfrost_invalidate_frame(struct panfrost_context
*ctx
)
600 unsigned transient_count
= ctx
->transient_pools
[ctx
->cmdstream_i
].entry_index
*ctx
->transient_pools
[0].entry_size
+ ctx
->transient_pools
[ctx
->cmdstream_i
].entry_offset
;
601 printf("Uploaded transient %d bytes\n", transient_count
);
603 /* Rotate cmdstream */
604 if ((++ctx
->cmdstream_i
) == (sizeof(ctx
->transient_pools
) / sizeof(ctx
->transient_pools
[0])))
605 ctx
->cmdstream_i
= 0;
607 if (ctx
->require_sfbd
)
608 ctx
->vt_framebuffer_sfbd
= panfrost_emit_sfbd(ctx
);
610 ctx
->vt_framebuffer_mfbd
= panfrost_emit_mfbd(ctx
);
612 panfrost_new_frag_framebuffer(ctx
);
614 /* Reset varyings allocated */
615 ctx
->varying_height
= 0;
617 /* The transient cmdstream is dirty every frame; the only bits worth preserving
618 * (textures, shaders, etc) are in other buffers anyways */
620 ctx
->transient_pools
[ctx
->cmdstream_i
].entry_index
= 0;
621 ctx
->transient_pools
[ctx
->cmdstream_i
].entry_offset
= 0;
623 /* Regenerate payloads */
624 panfrost_attach_vt_framebuffer(ctx
);
627 ctx
->dirty
|= PAN_DIRTY_RASTERIZER
;
630 ctx
->dirty
|= PAN_DIRTY_SAMPLERS
| PAN_DIRTY_TEXTURES
;
633 /* In practice, every field of these payloads should be configurable
634 * arbitrarily, which means these functions are basically catch-all's for
635 * as-of-yet unwavering unknowns */
638 panfrost_emit_vertex_payload(struct panfrost_context
*ctx
)
640 struct midgard_payload_vertex_tiler payload
= {
642 .workgroups_z_shift
= 32,
643 .workgroups_x_shift_2
= 0x2,
644 .workgroups_x_shift_3
= 0x5,
646 .gl_enables
= 0x4 | (ctx
->is_t6xx
? 0 : 0x2),
649 memcpy(&ctx
->payload_vertex
, &payload
, sizeof(payload
));
653 panfrost_emit_tiler_payload(struct panfrost_context
*ctx
)
655 struct midgard_payload_vertex_tiler payload
= {
657 .workgroups_z_shift
= 32,
658 .workgroups_x_shift_2
= 0x2,
659 .workgroups_x_shift_3
= 0x6,
661 .zero1
= 0xffff, /* Why is this only seen on test-quad-textured? */
665 /* Reserve the viewport */
666 struct panfrost_transfer t
= panfrost_allocate_chunk(ctx
, sizeof(struct mali_viewport
), HEAP_DESCRIPTOR
);
667 ctx
->viewport
= (struct mali_viewport
*) t
.cpu
;
668 payload
.postfix
.viewport
= t
.gpu
;
670 memcpy(&ctx
->payload_tiler
, &payload
, sizeof(payload
));
674 translate_tex_wrap(enum pipe_tex_wrap w
)
677 case PIPE_TEX_WRAP_REPEAT
:
678 return MALI_WRAP_REPEAT
;
680 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
681 return MALI_WRAP_CLAMP_TO_EDGE
;
683 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
684 return MALI_WRAP_CLAMP_TO_BORDER
;
686 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
687 return MALI_WRAP_MIRRORED_REPEAT
;
696 translate_tex_filter(enum pipe_tex_filter f
)
699 case PIPE_TEX_FILTER_NEAREST
:
702 case PIPE_TEX_FILTER_LINEAR
:
712 translate_mip_filter(enum pipe_tex_mipfilter f
)
714 return (f
== PIPE_TEX_MIPFILTER_LINEAR
) ? MALI_MIP_LINEAR
: 0;
718 panfrost_translate_compare_func(enum pipe_compare_func in
)
721 case PIPE_FUNC_NEVER
:
722 return MALI_FUNC_NEVER
;
725 return MALI_FUNC_LESS
;
727 case PIPE_FUNC_EQUAL
:
728 return MALI_FUNC_EQUAL
;
730 case PIPE_FUNC_LEQUAL
:
731 return MALI_FUNC_LEQUAL
;
733 case PIPE_FUNC_GREATER
:
734 return MALI_FUNC_GREATER
;
736 case PIPE_FUNC_NOTEQUAL
:
737 return MALI_FUNC_NOTEQUAL
;
739 case PIPE_FUNC_GEQUAL
:
740 return MALI_FUNC_GEQUAL
;
742 case PIPE_FUNC_ALWAYS
:
743 return MALI_FUNC_ALWAYS
;
747 return 0; /* Unreachable */
751 panfrost_translate_alt_compare_func(enum pipe_compare_func in
)
754 case PIPE_FUNC_NEVER
:
755 return MALI_ALT_FUNC_NEVER
;
758 return MALI_ALT_FUNC_LESS
;
760 case PIPE_FUNC_EQUAL
:
761 return MALI_ALT_FUNC_EQUAL
;
763 case PIPE_FUNC_LEQUAL
:
764 return MALI_ALT_FUNC_LEQUAL
;
766 case PIPE_FUNC_GREATER
:
767 return MALI_ALT_FUNC_GREATER
;
769 case PIPE_FUNC_NOTEQUAL
:
770 return MALI_ALT_FUNC_NOTEQUAL
;
772 case PIPE_FUNC_GEQUAL
:
773 return MALI_ALT_FUNC_GEQUAL
;
775 case PIPE_FUNC_ALWAYS
:
776 return MALI_ALT_FUNC_ALWAYS
;
780 return 0; /* Unreachable */
784 panfrost_translate_stencil_op(enum pipe_stencil_op in
)
787 case PIPE_STENCIL_OP_KEEP
:
788 return MALI_STENCIL_KEEP
;
790 case PIPE_STENCIL_OP_ZERO
:
791 return MALI_STENCIL_ZERO
;
793 case PIPE_STENCIL_OP_REPLACE
:
794 return MALI_STENCIL_REPLACE
;
796 case PIPE_STENCIL_OP_INCR
:
797 return MALI_STENCIL_INCR
;
799 case PIPE_STENCIL_OP_DECR
:
800 return MALI_STENCIL_DECR
;
802 case PIPE_STENCIL_OP_INCR_WRAP
:
803 return MALI_STENCIL_INCR_WRAP
;
805 case PIPE_STENCIL_OP_DECR_WRAP
:
806 return MALI_STENCIL_DECR_WRAP
;
808 case PIPE_STENCIL_OP_INVERT
:
809 return MALI_STENCIL_INVERT
;
813 return 0; /* Unreachable */
817 panfrost_make_stencil_state(const struct pipe_stencil_state
*in
, struct mali_stencil_test
*out
)
819 out
->ref
= 0; /* Gallium gets it from elsewhere */
821 out
->mask
= in
->valuemask
;
822 out
->func
= panfrost_translate_compare_func(in
->func
);
823 out
->sfail
= panfrost_translate_stencil_op(in
->fail_op
);
824 out
->dpfail
= panfrost_translate_stencil_op(in
->zfail_op
);
825 out
->dppass
= panfrost_translate_stencil_op(in
->zpass_op
);
829 panfrost_default_shader_backend(struct panfrost_context
*ctx
)
831 struct mali_shader_meta shader
= {
832 .alpha_coverage
= ~MALI_ALPHA_COVERAGE(0.000000),
834 .unknown2_3
= MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS
) | 0x3010,
835 .unknown2_4
= MALI_NO_MSAA
| 0x4e0,
839 shader
.unknown2_4
|= 0x10;
842 struct pipe_stencil_state default_stencil
= {
844 .func
= PIPE_FUNC_ALWAYS
,
845 .fail_op
= MALI_STENCIL_KEEP
,
846 .zfail_op
= MALI_STENCIL_KEEP
,
847 .zpass_op
= MALI_STENCIL_KEEP
,
852 panfrost_make_stencil_state(&default_stencil
, &shader
.stencil_front
);
853 shader
.stencil_mask_front
= default_stencil
.writemask
;
855 panfrost_make_stencil_state(&default_stencil
, &shader
.stencil_back
);
856 shader
.stencil_mask_back
= default_stencil
.writemask
;
858 if (default_stencil
.enabled
)
859 shader
.unknown2_4
|= MALI_STENCIL_TEST
;
861 memcpy(&ctx
->fragment_shader_core
, &shader
, sizeof(shader
));
864 /* Generates a vertex/tiler job. This is, in some sense, the heart of the
865 * graphics command stream. It should be called once per draw, accordding to
866 * presentations. Set is_tiler for "tiler" jobs (fragment shader jobs, but in
867 * Mali parlance, "fragment" refers to framebuffer writeout). Clear it for
870 struct panfrost_transfer
871 panfrost_vertex_tiler_job(struct panfrost_context
*ctx
, bool is_tiler
, bool is_elided_tiler
)
873 /* Each draw call corresponds to two jobs, and we want to offset to leave room for the set-value job */
874 int draw_job_index
= 1 + (2 * ctx
->draw_count
);
876 struct mali_job_descriptor_header job
= {
877 .job_type
= is_tiler
? JOB_TYPE_TILER
: JOB_TYPE_VERTEX
,
878 .job_index
= draw_job_index
+ (is_tiler
? 1 : 0),
880 .job_descriptor_size
= 1,
884 /* Only non-elided tiler jobs have dependencies which are known at this point */
886 if (is_tiler
&& !is_elided_tiler
) {
887 /* Tiler jobs depend on vertex jobs */
889 job
.job_dependency_index_1
= draw_job_index
;
891 /* Tiler jobs also depend on the previous tiler job */
894 job
.job_dependency_index_2
= draw_job_index
- 1;
897 struct midgard_payload_vertex_tiler
*payload
= is_tiler
? &ctx
->payload_tiler
: &ctx
->payload_vertex
;
899 /* There's some padding hacks on 32-bit */
906 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, sizeof(job
) + sizeof(*payload
));
907 memcpy(transfer
.cpu
, &job
, sizeof(job
));
908 memcpy(transfer
.cpu
+ sizeof(job
) - offset
, payload
, sizeof(*payload
));
912 /* Generates a set value job. It's unclear what exactly this does, why it's
913 * necessary, and when to call it. */
916 panfrost_set_value_job(struct panfrost_context
*ctx
)
918 struct mali_job_descriptor_header job
= {
919 .job_type
= JOB_TYPE_SET_VALUE
,
920 .job_descriptor_size
= 1,
921 .job_index
= 1 + (2 * ctx
->draw_count
),
924 struct mali_payload_set_value payload
= {
925 .out
= ctx
->misc_0
.gpu
,
929 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, sizeof(job
) + sizeof(payload
));
930 memcpy(transfer
.cpu
, &job
, sizeof(job
));
931 memcpy(transfer
.cpu
+ sizeof(job
), &payload
, sizeof(payload
));
933 ctx
->u_set_value_job
= (struct mali_job_descriptor_header
*) transfer
.cpu
;
934 ctx
->set_value_job
= transfer
.gpu
;
937 /* Generate a fragment job. This should be called once per frame. (According to
938 * presentations, this is supposed to correspond to eglSwapBuffers) */
941 panfrost_fragment_job(struct panfrost_context
*ctx
)
943 struct panfrost_job
*job
= panfrost_get_job_for_fbo(ctx
);
945 /* Actualize the clear late; TODO: Fix order dependency between clear
948 if (ctx
->require_sfbd
) {
949 panfrost_clear_sfbd(job
);
951 panfrost_clear_mfbd(job
);
954 panfrost_set_fragment_afbc(ctx
);
956 if (ctx
->pipe_framebuffer
.nr_cbufs
== 1) {
957 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) ctx
->pipe_framebuffer
.cbufs
[0]->texture
;
959 if (rsrc
->bo
->has_checksum
) {
960 if (ctx
->require_sfbd
) {
961 fprintf(stderr
, "Checksumming not supported on SFBD\n");
965 int stride
= util_format_get_stride(rsrc
->base
.format
, rsrc
->base
.width0
);
967 ctx
->fragment_mfbd
.unk3
|= MALI_MFBD_EXTRA
;
968 ctx
->fragment_extra
.unk
|= 0x420;
969 ctx
->fragment_extra
.checksum_stride
= rsrc
->bo
->checksum_stride
;
970 ctx
->fragment_extra
.checksum
= rsrc
->bo
->gpu
[0] + stride
* rsrc
->base
.height0
;
974 /* The frame is complete and therefore the framebuffer descriptor is
975 * ready for linkage and upload */
977 size_t sz
= ctx
->require_sfbd
? sizeof(struct mali_single_framebuffer
) : (sizeof(struct bifrost_framebuffer
) + sizeof(struct bifrost_fb_extra
) + sizeof(struct bifrost_render_target
) * 1);
978 struct panfrost_transfer fbd_t
= panfrost_allocate_transient(ctx
, sz
);
981 if (ctx
->require_sfbd
) {
982 /* Upload just the SFBD all at once */
983 memcpy(fbd_t
.cpu
, &ctx
->fragment_sfbd
, sizeof(ctx
->fragment_sfbd
));
984 offset
+= sizeof(ctx
->fragment_sfbd
);
986 /* Upload the MFBD header */
987 memcpy(fbd_t
.cpu
, &ctx
->fragment_mfbd
, sizeof(ctx
->fragment_mfbd
));
988 offset
+= sizeof(ctx
->fragment_mfbd
);
990 /* Upload extra framebuffer info if necessary */
991 if (ctx
->fragment_mfbd
.unk3
& MALI_MFBD_EXTRA
) {
992 memcpy(fbd_t
.cpu
+ offset
, &ctx
->fragment_extra
, sizeof(struct bifrost_fb_extra
));
993 offset
+= sizeof(struct bifrost_fb_extra
);
996 /* Upload (single) render target */
997 memcpy(fbd_t
.cpu
+ offset
, &ctx
->fragment_rts
[0], sizeof(struct bifrost_render_target
) * 1);
1000 /* Generate the fragment (frame) job */
1002 struct mali_job_descriptor_header header
= {
1003 .job_type
= JOB_TYPE_FRAGMENT
,
1006 .job_descriptor_size
= 1
1010 struct mali_payload_fragment payload
= {
1011 .min_tile_coord
= MALI_COORDINATE_TO_TILE_MIN(0, 0),
1012 .max_tile_coord
= MALI_COORDINATE_TO_TILE_MAX(ctx
->pipe_framebuffer
.width
, ctx
->pipe_framebuffer
.height
),
1013 .framebuffer
= fbd_t
.gpu
| (ctx
->require_sfbd
? MALI_SFBD
: MALI_MFBD
),
1016 if (!ctx
->require_sfbd
&& ctx
->fragment_mfbd
.unk3
& MALI_MFBD_EXTRA
) {
1017 /* Signal that there is an extra portion of the framebuffer
1020 payload
.framebuffer
|= 2;
1023 /* Normally, there should be no padding. However, fragment jobs are
1024 * shared with 64-bit Bifrost systems, and accordingly there is 4-bytes
1025 * of zero padding in between. */
1027 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, sizeof(header
) + sizeof(payload
));
1028 memcpy(transfer
.cpu
, &header
, sizeof(header
));
1029 memcpy(transfer
.cpu
+ sizeof(header
), &payload
, sizeof(payload
));
1030 return transfer
.gpu
;
1033 /* Emits attributes and varying descriptors, which should be called every draw,
1034 * excepting some obscure circumstances */
1037 panfrost_emit_vertex_data(struct panfrost_context
*ctx
)
1039 /* TODO: Only update the dirtied buffers */
1040 union mali_attr attrs
[PIPE_MAX_ATTRIBS
];
1041 union mali_attr varyings
[PIPE_MAX_ATTRIBS
];
1043 unsigned invocation_count
= MALI_NEGATIVE(ctx
->payload_tiler
.prefix
.invocation_count
);
1045 for (int i
= 0; i
< ctx
->vertex_buffer_count
; ++i
) {
1046 struct pipe_vertex_buffer
*buf
= &ctx
->vertex_buffers
[i
];
1047 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) (buf
->buffer
.resource
);
1049 /* Let's figure out the layout of the attributes in memory so
1050 * we can be smart about size computation. The idea is to
1051 * figure out the maximum src_offset, which tells us the latest
1052 * spot a vertex could start. Meanwhile, we figure out the size
1053 * of the attribute memory (assuming interleaved
1054 * representation) and tack on the max src_offset for a
1055 * reasonably good upper bound on the size.
1057 * Proving correctness is left as an exercise to the reader.
1060 unsigned max_src_offset
= 0;
1062 for (unsigned j
= 0; j
< ctx
->vertex
->num_elements
; ++j
) {
1063 if (ctx
->vertex
->pipe
[j
].vertex_buffer_index
!= i
) continue;
1064 max_src_offset
= MAX2(max_src_offset
, ctx
->vertex
->pipe
[j
].src_offset
);
1067 /* Offset vertex count by draw_start to make sure we upload enough */
1068 attrs
[i
].stride
= buf
->stride
;
1069 attrs
[i
].size
= buf
->stride
* (ctx
->payload_vertex
.draw_start
+ invocation_count
) + max_src_offset
;
1071 /* Vertex elements are -already- GPU-visible, at
1072 * rsrc->gpu. However, attribute buffers must be 64 aligned. If
1073 * it is not, for now we have to duplicate the buffer. */
1075 mali_ptr effective_address
= (rsrc
->bo
->gpu
[0] + buf
->buffer_offset
);
1077 if (effective_address
& 0x3F) {
1078 attrs
[i
].elements
= panfrost_upload_transient(ctx
, rsrc
->bo
->cpu
[0] + buf
->buffer_offset
, attrs
[i
].size
) | 1;
1080 attrs
[i
].elements
= effective_address
| 1;
1084 struct panfrost_varyings
*vars
= &ctx
->vs
->variants
[ctx
->vs
->active_variant
].varyings
;
1086 for (int i
= 0; i
< vars
->varying_buffer_count
; ++i
) {
1087 mali_ptr varying_address
= ctx
->varying_mem
.gpu
+ ctx
->varying_height
;
1089 varyings
[i
].elements
= varying_address
| 1;
1090 varyings
[i
].stride
= vars
->varyings_stride
[i
];
1091 varyings
[i
].size
= vars
->varyings_stride
[i
] * invocation_count
;
1093 /* If this varying has to be linked somewhere, do it now. See
1094 * pan_assemble.c for the indices. TODO: Use a more generic
1095 * linking interface */
1099 ctx
->payload_tiler
.postfix
.position_varying
= varying_address
;
1100 } else if (i
== 2) {
1102 ctx
->payload_tiler
.primitive_size
.pointer
= varying_address
;
1105 /* Varyings appear to need 64-byte alignment */
1106 ctx
->varying_height
+= ALIGN(varyings
[i
].size
, 64);
1108 /* Ensure that we fit */
1109 assert(ctx
->varying_height
< ctx
->varying_mem
.size
);
1112 ctx
->payload_vertex
.postfix
.attributes
= panfrost_upload_transient(ctx
, attrs
, ctx
->vertex_buffer_count
* sizeof(union mali_attr
));
1114 mali_ptr varyings_p
= panfrost_upload_transient(ctx
, &varyings
, vars
->varying_buffer_count
* sizeof(union mali_attr
));
1115 ctx
->payload_vertex
.postfix
.varyings
= varyings_p
;
1116 ctx
->payload_tiler
.postfix
.varyings
= varyings_p
;
1119 /* Go through dirty flags and actualise them in the cmdstream. */
1122 panfrost_emit_for_draw(struct panfrost_context
*ctx
, bool with_vertex_data
)
1124 if (with_vertex_data
) {
1125 panfrost_emit_vertex_data(ctx
);
1128 if (ctx
->dirty
& PAN_DIRTY_RASTERIZER
) {
1129 ctx
->payload_tiler
.gl_enables
= ctx
->rasterizer
->tiler_gl_enables
;
1130 panfrost_set_framebuffer_msaa(ctx
, ctx
->rasterizer
->base
.multisample
);
1133 if (ctx
->occlusion_query
) {
1134 ctx
->payload_tiler
.gl_enables
|= MALI_OCCLUSION_QUERY
| MALI_OCCLUSION_PRECISE
;
1135 ctx
->payload_tiler
.postfix
.occlusion_counter
= ctx
->occlusion_query
->transfer
.gpu
;
1138 if (ctx
->dirty
& PAN_DIRTY_VS
) {
1141 struct panfrost_shader_state
*vs
= &ctx
->vs
->variants
[ctx
->vs
->active_variant
];
1143 /* Late shader descriptor assignments */
1144 vs
->tripipe
->texture_count
= ctx
->sampler_view_count
[PIPE_SHADER_VERTEX
];
1145 vs
->tripipe
->sampler_count
= ctx
->sampler_count
[PIPE_SHADER_VERTEX
];
1148 vs
->tripipe
->midgard1
.unknown1
= 0x2201;
1150 ctx
->payload_vertex
.postfix
._shader_upper
= vs
->tripipe_gpu
>> 4;
1152 /* Varying descriptor is tied to the vertex shader. Also the
1153 * fragment shader, I suppose, but it's generated with the
1154 * vertex shader so */
1156 struct panfrost_varyings
*varyings
= &ctx
->vs
->variants
[ctx
->vs
->active_variant
].varyings
;
1158 ctx
->payload_vertex
.postfix
.varying_meta
= varyings
->varyings_descriptor
;
1159 ctx
->payload_tiler
.postfix
.varying_meta
= varyings
->varyings_descriptor_fragment
;
1162 if (ctx
->dirty
& (PAN_DIRTY_RASTERIZER
| PAN_DIRTY_VS
)) {
1163 /* Check if we need to link the gl_PointSize varying */
1165 struct panfrost_shader_state
*vs
= &ctx
->vs
->variants
[ctx
->vs
->active_variant
];
1167 bool needs_gl_point_size
= vs
->writes_point_size
&& ctx
->payload_tiler
.prefix
.draw_mode
== MALI_POINTS
;
1169 if (!needs_gl_point_size
) {
1170 /* If the size is constant, write it out. Otherwise,
1171 * don't touch primitive_size (since we would clobber
1172 * the pointer there) */
1174 ctx
->payload_tiler
.primitive_size
.constant
= ctx
->rasterizer
->base
.line_width
;
1177 /* Set the flag for varying (pointer) point size if the shader needs that */
1178 SET_BIT(ctx
->payload_tiler
.prefix
.unknown_draw
, MALI_DRAW_VARYING_SIZE
, needs_gl_point_size
);
1181 /* TODO: Maybe dirty track FS, maybe not. For now, it's transient. */
1183 ctx
->dirty
|= PAN_DIRTY_FS
;
1185 if (ctx
->dirty
& PAN_DIRTY_FS
) {
1187 struct panfrost_shader_state
*variant
= &ctx
->fs
->variants
[ctx
->fs
->active_variant
];
1189 #define COPY(name) ctx->fragment_shader_core.name = variant->tripipe->name
1192 COPY(attribute_count
);
1193 COPY(varying_count
);
1194 COPY(midgard1
.uniform_count
);
1195 COPY(midgard1
.work_count
);
1196 COPY(midgard1
.unknown2
);
1199 /* If there is a blend shader, work registers are shared */
1201 if (ctx
->blend
->has_blend_shader
)
1202 ctx
->fragment_shader_core
.midgard1
.work_count
= /*MAX2(ctx->fragment_shader_core.midgard1.work_count, ctx->blend->blend_work_count)*/16;
1204 /* Set late due to depending on render state */
1205 /* The one at the end seems to mean "1 UBO" */
1206 ctx
->fragment_shader_core
.midgard1
.unknown1
= MALI_NO_ALPHA_TO_COVERAGE
| 0x200 | 0x2201;
1208 /* Assign texture/sample count right before upload */
1209 ctx
->fragment_shader_core
.texture_count
= ctx
->sampler_view_count
[PIPE_SHADER_FRAGMENT
];
1210 ctx
->fragment_shader_core
.sampler_count
= ctx
->sampler_count
[PIPE_SHADER_FRAGMENT
];
1212 /* Assign the stencil refs late */
1213 ctx
->fragment_shader_core
.stencil_front
.ref
= ctx
->stencil_ref
.ref_value
[0];
1214 ctx
->fragment_shader_core
.stencil_back
.ref
= ctx
->stencil_ref
.ref_value
[1];
1216 /* CAN_DISCARD should be set if the fragment shader possibly
1217 * contains a 'discard' instruction. It is likely this is
1218 * related to optimizations related to forward-pixel kill, as
1219 * per "Mali Performance 3: Is EGL_BUFFER_PRESERVED a good
1220 * thing?" by Peter Harris
1223 if (variant
->can_discard
) {
1224 ctx
->fragment_shader_core
.unknown2_3
|= MALI_CAN_DISCARD
;
1225 ctx
->fragment_shader_core
.midgard1
.unknown1
&= ~MALI_NO_ALPHA_TO_COVERAGE
;
1226 ctx
->fragment_shader_core
.midgard1
.unknown1
|= 0x4000;
1227 ctx
->fragment_shader_core
.midgard1
.unknown1
= 0x4200;
1230 /* Check if we're using the default blend descriptor (fast path) */
1233 !ctx
->blend
->has_blend_shader
&&
1234 (ctx
->blend
->equation
.rgb_mode
== 0x122) &&
1235 (ctx
->blend
->equation
.alpha_mode
== 0x122) &&
1236 (ctx
->blend
->equation
.color_mask
== 0xf);
1238 if (ctx
->require_sfbd
) {
1239 /* When only a single render target platform is used, the blend
1240 * information is inside the shader meta itself. We
1241 * additionally need to signal CAN_DISCARD for nontrivial blend
1242 * modes (so we're able to read back the destination buffer) */
1244 if (ctx
->blend
->has_blend_shader
) {
1245 ctx
->fragment_shader_core
.blend_shader
= ctx
->blend
->blend_shader
;
1247 memcpy(&ctx
->fragment_shader_core
.blend_equation
, &ctx
->blend
->equation
, sizeof(ctx
->blend
->equation
));
1251 ctx
->fragment_shader_core
.unknown2_3
|= MALI_CAN_DISCARD
;
1255 size_t size
= sizeof(struct mali_shader_meta
) + sizeof(struct mali_blend_meta
);
1256 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, size
);
1257 memcpy(transfer
.cpu
, &ctx
->fragment_shader_core
, sizeof(struct mali_shader_meta
));
1259 ctx
->payload_tiler
.postfix
._shader_upper
= (transfer
.gpu
) >> 4;
1261 if (!ctx
->require_sfbd
) {
1262 /* Additional blend descriptor tacked on for jobs using MFBD */
1264 unsigned blend_count
= 0;
1266 if (ctx
->blend
->has_blend_shader
) {
1267 /* For a blend shader, the bottom nibble corresponds to
1268 * the number of work registers used, which signals the
1269 * -existence- of a blend shader */
1271 assert(ctx
->blend
->blend_work_count
>= 2);
1272 blend_count
|= MIN2(ctx
->blend
->blend_work_count
, 3);
1274 /* Otherwise, the bottom bit simply specifies if
1275 * blending (anything other than REPLACE) is enabled */
1282 /* Second blend equation is always a simple replace */
1284 uint64_t replace_magic
= 0xf0122122;
1285 struct mali_blend_equation replace_mode
;
1286 memcpy(&replace_mode
, &replace_magic
, sizeof(replace_mode
));
1288 struct mali_blend_meta blend_meta
[] = {
1290 .unk1
= 0x200 | blend_count
,
1291 .blend_equation_1
= ctx
->blend
->equation
,
1292 .blend_equation_2
= replace_mode
1296 if (ctx
->blend
->has_blend_shader
)
1297 memcpy(&blend_meta
[0].blend_equation_1
, &ctx
->blend
->blend_shader
, sizeof(ctx
->blend
->blend_shader
));
1299 memcpy(transfer
.cpu
+ sizeof(struct mali_shader_meta
), blend_meta
, sizeof(blend_meta
));
1303 if (ctx
->dirty
& PAN_DIRTY_VERTEX
) {
1304 ctx
->payload_vertex
.postfix
.attribute_meta
= ctx
->vertex
->descriptor_ptr
;
1307 if (ctx
->dirty
& PAN_DIRTY_SAMPLERS
) {
1308 /* Upload samplers back to back, no padding */
1310 for (int t
= 0; t
<= PIPE_SHADER_FRAGMENT
; ++t
) {
1311 if (!ctx
->sampler_count
[t
]) continue;
1313 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, sizeof(struct mali_sampler_descriptor
) * ctx
->sampler_count
[t
]);
1314 struct mali_sampler_descriptor
*desc
= (struct mali_sampler_descriptor
*) transfer
.cpu
;
1316 for (int i
= 0; i
< ctx
->sampler_count
[t
]; ++i
) {
1317 desc
[i
] = ctx
->samplers
[t
][i
]->hw
;
1320 if (t
== PIPE_SHADER_FRAGMENT
)
1321 ctx
->payload_tiler
.postfix
.sampler_descriptor
= transfer
.gpu
;
1322 else if (t
== PIPE_SHADER_VERTEX
)
1323 ctx
->payload_vertex
.postfix
.sampler_descriptor
= transfer
.gpu
;
1329 if (ctx
->dirty
& PAN_DIRTY_TEXTURES
) {
1330 for (int t
= 0; t
<= PIPE_SHADER_FRAGMENT
; ++t
) {
1332 if (!ctx
->sampler_view_count
[t
]) continue;
1334 uint64_t trampolines
[PIPE_MAX_SHADER_SAMPLER_VIEWS
];
1336 for (int i
= 0; i
< ctx
->sampler_view_count
[t
]; ++i
) {
1337 if (!ctx
->sampler_views
[t
][i
])
1340 struct pipe_resource
*tex_rsrc
= ctx
->sampler_views
[t
][i
]->base
.texture
;
1341 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) tex_rsrc
;
1343 /* Inject the address in. */
1344 for (int l
= 0; l
< (tex_rsrc
->last_level
+ 1); ++l
)
1345 ctx
->sampler_views
[t
][i
]->hw
.swizzled_bitmaps
[l
] = rsrc
->bo
->gpu
[l
];
1347 /* Workaround maybe-errata (?) with non-mipmaps */
1348 int s
= ctx
->sampler_views
[t
][i
]->hw
.nr_mipmap_levels
;
1350 if (!rsrc
->bo
->is_mipmap
) {
1352 /* HW ERRATA, not needed after t6XX */
1353 ctx
->sampler_views
[t
][i
]->hw
.swizzled_bitmaps
[1] = rsrc
->bo
->gpu
[0];
1355 ctx
->sampler_views
[t
][i
]->hw
.unknown3A
= 1;
1358 ctx
->sampler_views
[t
][i
]->hw
.nr_mipmap_levels
= 0;
1361 trampolines
[i
] = panfrost_upload_transient(ctx
, &ctx
->sampler_views
[t
][i
]->hw
, sizeof(struct mali_texture_descriptor
));
1364 ctx
->sampler_views
[t
][i
]->hw
.nr_mipmap_levels
= s
;
1367 ctx
->sampler_views
[t
][i
]->hw
.unknown3A
= 0;
1371 mali_ptr trampoline
= panfrost_upload_transient(ctx
, trampolines
, sizeof(uint64_t) * ctx
->sampler_view_count
[t
]);
1373 if (t
== PIPE_SHADER_FRAGMENT
)
1374 ctx
->payload_tiler
.postfix
.texture_trampoline
= trampoline
;
1375 else if (t
== PIPE_SHADER_VERTEX
)
1376 ctx
->payload_vertex
.postfix
.texture_trampoline
= trampoline
;
1382 /* Generate the viewport vector of the form: <width/2, height/2, centerx, centery> */
1383 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
1385 float viewport_vec4
[] = {
1387 fabsf(vp
->scale
[1]),
1390 /* -1.0 * vp->translate[1] */ fabs(1.0 * vp
->scale
[1]) /* XXX */
1393 for (int i
= 0; i
< PIPE_SHADER_TYPES
; ++i
) {
1394 struct panfrost_constant_buffer
*buf
= &ctx
->constant_buffer
[i
];
1396 if (i
== PIPE_SHADER_VERTEX
|| i
== PIPE_SHADER_FRAGMENT
) {
1397 /* It doesn't matter if we don't use all the memory;
1398 * we'd need a dummy UBO anyway. Compute the max */
1400 size_t size
= sizeof(viewport_vec4
) + buf
->size
;
1401 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, size
);
1403 /* Keep track how much we've uploaded */
1406 if (i
== PIPE_SHADER_VERTEX
) {
1407 /* Upload viewport */
1408 memcpy(transfer
.cpu
+ offset
, viewport_vec4
, sizeof(viewport_vec4
));
1409 offset
+= sizeof(viewport_vec4
);
1412 /* Upload uniforms */
1413 memcpy(transfer
.cpu
+ offset
, buf
->buffer
, buf
->size
);
1415 int uniform_count
= 0;
1417 struct mali_vertex_tiler_postfix
*postfix
;
1420 case PIPE_SHADER_VERTEX
:
1421 uniform_count
= ctx
->vs
->variants
[ctx
->vs
->active_variant
].uniform_count
;
1422 postfix
= &ctx
->payload_vertex
.postfix
;
1425 case PIPE_SHADER_FRAGMENT
:
1426 uniform_count
= ctx
->fs
->variants
[ctx
->fs
->active_variant
].uniform_count
;
1427 postfix
= &ctx
->payload_tiler
.postfix
;
1431 printf("Unknown shader stage %d in uniform upload\n", i
);
1435 /* Also attach the same buffer as a UBO for extended access */
1437 struct mali_uniform_buffer_meta uniform_buffers
[] = {
1439 .size
= MALI_POSITIVE((2 + uniform_count
)),
1440 .ptr
= transfer
.gpu
>> 2,
1444 mali_ptr ubufs
= panfrost_upload_transient(ctx
, uniform_buffers
, sizeof(uniform_buffers
));
1445 postfix
->uniforms
= transfer
.gpu
;
1446 postfix
->uniform_buffers
= ubufs
;
1455 /* Corresponds to exactly one draw, but does not submit anything */
1458 panfrost_queue_draw(struct panfrost_context
*ctx
)
1460 /* TODO: Expand the array? */
1461 if (ctx
->draw_count
>= MAX_DRAW_CALLS
) {
1462 printf("Job buffer overflow, ignoring draw\n");
1466 /* Handle dirty flags now */
1467 panfrost_emit_for_draw(ctx
, true);
1469 struct panfrost_transfer vertex
= panfrost_vertex_tiler_job(ctx
, false, false);
1470 struct panfrost_transfer tiler
= panfrost_vertex_tiler_job(ctx
, true, false);
1472 ctx
->u_vertex_jobs
[ctx
->vertex_job_count
] = (struct mali_job_descriptor_header
*) vertex
.cpu
;
1473 ctx
->vertex_jobs
[ctx
->vertex_job_count
++] = vertex
.gpu
;
1475 ctx
->u_tiler_jobs
[ctx
->tiler_job_count
] = (struct mali_job_descriptor_header
*) tiler
.cpu
;
1476 ctx
->tiler_jobs
[ctx
->tiler_job_count
++] = tiler
.gpu
;
1481 /* At the end of the frame, the vertex and tiler jobs are linked together and
1482 * then the fragment job is plonked at the end. Set value job is first for
1483 * unknown reasons. */
1486 panfrost_link_job_pair(struct mali_job_descriptor_header
*first
, mali_ptr next
)
1488 if (first
->job_descriptor_size
)
1489 first
->next_job_64
= (u64
) (uintptr_t) next
;
1491 first
->next_job_32
= (u32
) (uintptr_t) next
;
1495 panfrost_link_jobs(struct panfrost_context
*ctx
)
1497 if (ctx
->draw_count
) {
1498 /* Generate the set_value_job */
1499 panfrost_set_value_job(ctx
);
1501 /* Have the first vertex job depend on the set value job */
1502 ctx
->u_vertex_jobs
[0]->job_dependency_index_1
= ctx
->u_set_value_job
->job_index
;
1505 panfrost_link_job_pair(ctx
->u_set_value_job
, ctx
->vertex_jobs
[0]);
1508 /* V -> V/T ; T -> T/null */
1509 for (int i
= 0; i
< ctx
->vertex_job_count
; ++i
) {
1510 bool isLast
= (i
+ 1) == ctx
->vertex_job_count
;
1512 panfrost_link_job_pair(ctx
->u_vertex_jobs
[i
], isLast
? ctx
->tiler_jobs
[0] : ctx
->vertex_jobs
[i
+ 1]);
1516 for (int i
= 0; i
< ctx
->tiler_job_count
; ++i
) {
1517 bool isLast
= (i
+ 1) == ctx
->tiler_job_count
;
1518 panfrost_link_job_pair(ctx
->u_tiler_jobs
[i
], isLast
? 0 : ctx
->tiler_jobs
[i
+ 1]);
1522 /* The entire frame is in memory -- send it off to the kernel! */
1525 panfrost_submit_frame(struct panfrost_context
*ctx
, bool flush_immediate
,
1526 struct pipe_fence_handle
**fence
)
1528 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
1529 struct panfrost_screen
*screen
= pan_screen(gallium
->screen
);
1531 /* Edge case if screen is cleared and nothing else */
1532 bool has_draws
= ctx
->draw_count
> 0;
1534 /* Workaround a bizarre lockup (a hardware errata?) */
1536 flush_immediate
= true;
1538 /* A number of jobs are batched -- this must be linked and cleared */
1539 panfrost_link_jobs(ctx
);
1541 ctx
->draw_count
= 0;
1542 ctx
->vertex_job_count
= 0;
1543 ctx
->tiler_job_count
= 0;
1547 bool is_scanout
= panfrost_is_scanout(ctx
);
1548 int fragment_id
= screen
->driver
->submit_vs_fs_job(ctx
, has_draws
, is_scanout
);
1550 /* If visual, we can stall a frame */
1552 if (!flush_immediate
)
1553 screen
->driver
->force_flush_fragment(ctx
, fence
);
1555 screen
->last_fragment_id
= fragment_id
;
1556 screen
->last_fragment_flushed
= false;
1558 /* If readback, flush now (hurts the pipelined performance) */
1559 if (flush_immediate
)
1560 screen
->driver
->force_flush_fragment(ctx
, fence
);
1562 if (screen
->driver
->dump_counters
&& pan_counters_base
) {
1563 screen
->driver
->dump_counters(screen
);
1566 snprintf(filename
, sizeof(filename
), "%s/frame%d.mdgprf", pan_counters_base
, ++performance_counter_number
);
1567 FILE *fp
= fopen(filename
, "wb");
1568 fwrite(screen
->perf_counters
.cpu
, 4096, sizeof(uint32_t), fp
);
1577 struct pipe_context
*pipe
,
1578 struct pipe_fence_handle
**fence
,
1581 struct panfrost_context
*ctx
= pan_context(pipe
);
1582 struct panfrost_job
*job
= panfrost_get_job_for_fbo(ctx
);
1584 /* Nothing to do! */
1585 if (!ctx
->draw_count
&& !job
->clear
) return;
1587 /* Whether to stall the pipeline for immediately correct results */
1588 bool flush_immediate
= flags
& PIPE_FLUSH_END_OF_FRAME
;
1590 /* Submit the frame itself */
1591 panfrost_submit_frame(ctx
, flush_immediate
, fence
);
1593 /* Prepare for the next frame */
1594 panfrost_invalidate_frame(ctx
);
1597 #define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_##c;
1600 g2m_draw_mode(enum pipe_prim_type mode
)
1603 DEFINE_CASE(POINTS
);
1605 DEFINE_CASE(LINE_LOOP
);
1606 DEFINE_CASE(LINE_STRIP
);
1607 DEFINE_CASE(TRIANGLES
);
1608 DEFINE_CASE(TRIANGLE_STRIP
);
1609 DEFINE_CASE(TRIANGLE_FAN
);
1611 DEFINE_CASE(QUAD_STRIP
);
1612 DEFINE_CASE(POLYGON
);
1615 printf("Illegal draw mode %d\n", mode
);
1617 return MALI_LINE_LOOP
;
1624 panfrost_translate_index_size(unsigned size
)
1628 return MALI_DRAW_INDEXED_UINT8
;
1631 return MALI_DRAW_INDEXED_UINT16
;
1634 return MALI_DRAW_INDEXED_UINT32
;
1637 printf("Unknown index size %d\n", size
);
1643 static const uint8_t *
1644 panfrost_get_index_buffer_raw(const struct pipe_draw_info
*info
)
1646 if (info
->has_user_indices
) {
1647 return (const uint8_t *) info
->index
.user
;
1649 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) (info
->index
.resource
);
1650 return (const uint8_t *) rsrc
->bo
->cpu
[0];
1654 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
1655 * good for the duration of the draw (transient), could last longer */
1658 panfrost_get_index_buffer_mapped(struct panfrost_context
*ctx
, const struct pipe_draw_info
*info
)
1660 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) (info
->index
.resource
);
1662 off_t offset
= info
->start
* info
->index_size
;
1664 if (!info
->has_user_indices
) {
1665 /* Only resources can be directly mapped */
1666 return rsrc
->bo
->gpu
[0] + offset
;
1668 /* Otherwise, we need to upload to transient memory */
1669 const uint8_t *ibuf8
= panfrost_get_index_buffer_raw(info
);
1670 return panfrost_upload_transient(ctx
, ibuf8
+ offset
, info
->count
* info
->index_size
);
1674 #define CALCULATE_MIN_MAX_INDEX(T, buffer, start, count) \
1675 for (unsigned _idx = (start); _idx < (start + count); ++_idx) { \
1676 T idx = buffer[_idx]; \
1677 if (idx > max_index) max_index = idx; \
1678 if (idx < min_index) min_index = idx; \
1683 struct pipe_context
*pipe
,
1684 const struct pipe_draw_info
*info
)
1686 struct panfrost_context
*ctx
= pan_context(pipe
);
1688 ctx
->payload_vertex
.draw_start
= info
->start
;
1689 ctx
->payload_tiler
.draw_start
= info
->start
;
1691 int mode
= info
->mode
;
1693 /* Fallback for unsupported modes */
1695 if (!(ctx
->draw_modes
& mode
)) {
1696 if (mode
== PIPE_PRIM_QUADS
&& info
->count
== 4 && ctx
->rasterizer
&& !ctx
->rasterizer
->base
.flatshade
) {
1697 mode
= PIPE_PRIM_TRIANGLE_FAN
;
1699 if (info
->count
< 4) {
1700 /* Degenerate case? */
1704 util_primconvert_save_rasterizer_state(ctx
->primconvert
, &ctx
->rasterizer
->base
);
1705 util_primconvert_draw_vbo(ctx
->primconvert
, info
);
1710 /* Now that we have a guaranteed terminating path, find the job.
1711 * Assignment commented out to prevent unused warning */
1713 /* struct panfrost_job *job = */ panfrost_get_job_for_fbo(ctx
);
1715 ctx
->payload_tiler
.prefix
.draw_mode
= g2m_draw_mode(mode
);
1717 ctx
->vertex_count
= info
->count
;
1719 /* For non-indexed draws, they're the same */
1720 unsigned invocation_count
= ctx
->vertex_count
;
1722 /* For higher amounts of vertices (greater than what fits in a 16-bit
1723 * short), the other value is needed, otherwise there will be bizarre
1724 * rendering artefacts. It's not clear what these values mean yet. */
1726 ctx
->payload_tiler
.prefix
.unknown_draw
&= ~(0x3000 | 0x18000);
1727 ctx
->payload_tiler
.prefix
.unknown_draw
|= (mode
== PIPE_PRIM_POINTS
|| ctx
->vertex_count
> 65535) ? 0x3000 : 0x18000;
1729 if (info
->index_size
) {
1730 /* Calculate the min/max index used so we can figure out how
1731 * many times to invoke the vertex shader */
1733 const uint8_t *ibuf8
= panfrost_get_index_buffer_raw(info
);
1735 int min_index
= INT_MAX
;
1738 if (info
->index_size
== 1) {
1739 CALCULATE_MIN_MAX_INDEX(uint8_t, ibuf8
, info
->start
, info
->count
);
1740 } else if (info
->index_size
== 2) {
1741 const uint16_t *ibuf16
= (const uint16_t *) ibuf8
;
1742 CALCULATE_MIN_MAX_INDEX(uint16_t, ibuf16
, info
->start
, info
->count
);
1743 } else if (info
->index_size
== 4) {
1744 const uint32_t *ibuf32
= (const uint32_t *) ibuf8
;
1745 CALCULATE_MIN_MAX_INDEX(uint32_t, ibuf32
, info
->start
, info
->count
);
1750 /* Make sure we didn't go crazy */
1751 assert(min_index
< INT_MAX
);
1752 assert(max_index
> 0);
1753 assert(max_index
> min_index
);
1755 /* Use the corresponding values */
1756 invocation_count
= max_index
- min_index
+ 1;
1757 ctx
->payload_vertex
.draw_start
= min_index
;
1758 ctx
->payload_tiler
.draw_start
= min_index
;
1760 ctx
->payload_tiler
.prefix
.negative_start
= -min_index
;
1761 ctx
->payload_tiler
.prefix
.index_count
= MALI_POSITIVE(info
->count
);
1763 //assert(!info->restart_index); /* TODO: Research */
1764 assert(!info
->index_bias
);
1765 //assert(!info->min_index); /* TODO: Use value */
1767 ctx
->payload_tiler
.prefix
.unknown_draw
|= panfrost_translate_index_size(info
->index_size
);
1768 ctx
->payload_tiler
.prefix
.indices
= panfrost_get_index_buffer_mapped(ctx
, info
);
1770 /* Index count == vertex count, if no indexing is applied, as
1771 * if it is internally indexed in the expected order */
1773 ctx
->payload_tiler
.prefix
.negative_start
= 0;
1774 ctx
->payload_tiler
.prefix
.index_count
= MALI_POSITIVE(ctx
->vertex_count
);
1776 /* Reverse index state */
1777 ctx
->payload_tiler
.prefix
.unknown_draw
&= ~MALI_DRAW_INDEXED_UINT32
;
1778 ctx
->payload_tiler
.prefix
.indices
= (uintptr_t) NULL
;
1781 ctx
->payload_vertex
.prefix
.invocation_count
= MALI_POSITIVE(invocation_count
);
1782 ctx
->payload_tiler
.prefix
.invocation_count
= MALI_POSITIVE(invocation_count
);
1784 /* Fire off the draw itself */
1785 panfrost_queue_draw(ctx
);
1791 panfrost_generic_cso_delete(struct pipe_context
*pctx
, void *hwcso
)
1797 panfrost_set_scissor(struct panfrost_context
*ctx
)
1799 const struct pipe_scissor_state
*ss
= &ctx
->scissor
;
1801 if (ss
&& ctx
->rasterizer
&& ctx
->rasterizer
->base
.scissor
&& 0) {
1802 ctx
->viewport
->viewport0
[0] = ss
->minx
;
1803 ctx
->viewport
->viewport0
[1] = ss
->miny
;
1804 ctx
->viewport
->viewport1
[0] = MALI_POSITIVE(ss
->maxx
);
1805 ctx
->viewport
->viewport1
[1] = MALI_POSITIVE(ss
->maxy
);
1807 ctx
->viewport
->viewport0
[0] = 0;
1808 ctx
->viewport
->viewport0
[1] = 0;
1809 ctx
->viewport
->viewport1
[0] = MALI_POSITIVE(ctx
->pipe_framebuffer
.width
);
1810 ctx
->viewport
->viewport1
[1] = MALI_POSITIVE(ctx
->pipe_framebuffer
.height
);
1815 panfrost_create_rasterizer_state(
1816 struct pipe_context
*pctx
,
1817 const struct pipe_rasterizer_state
*cso
)
1819 struct panfrost_context
*ctx
= pan_context(pctx
);
1820 struct panfrost_rasterizer
*so
= CALLOC_STRUCT(panfrost_rasterizer
);
1824 /* Bitmask, unknown meaning of the start value */
1825 so
->tiler_gl_enables
= ctx
->is_t6xx
? 0x105 : 0x7;
1827 so
->tiler_gl_enables
|= MALI_FRONT_FACE(
1828 cso
->front_ccw
? MALI_CCW
: MALI_CW
);
1830 if (cso
->cull_face
& PIPE_FACE_FRONT
)
1831 so
->tiler_gl_enables
|= MALI_CULL_FACE_FRONT
;
1833 if (cso
->cull_face
& PIPE_FACE_BACK
)
1834 so
->tiler_gl_enables
|= MALI_CULL_FACE_BACK
;
1840 panfrost_bind_rasterizer_state(
1841 struct pipe_context
*pctx
,
1844 struct panfrost_context
*ctx
= pan_context(pctx
);
1845 struct pipe_rasterizer_state
*cso
= hwcso
;
1847 /* TODO: Why can't rasterizer be NULL ever? Other drivers are fine.. */
1851 /* If scissor test has changed, we'll need to update that now */
1852 bool update_scissor
= !ctx
->rasterizer
|| ctx
->rasterizer
->base
.scissor
!= cso
->scissor
;
1854 ctx
->rasterizer
= hwcso
;
1856 /* Actualise late changes */
1858 panfrost_set_scissor(ctx
);
1860 ctx
->dirty
|= PAN_DIRTY_RASTERIZER
;
1864 panfrost_create_vertex_elements_state(
1865 struct pipe_context
*pctx
,
1866 unsigned num_elements
,
1867 const struct pipe_vertex_element
*elements
)
1869 struct panfrost_context
*ctx
= pan_context(pctx
);
1870 struct panfrost_vertex_state
*so
= CALLOC_STRUCT(panfrost_vertex_state
);
1872 so
->num_elements
= num_elements
;
1873 memcpy(so
->pipe
, elements
, sizeof(*elements
) * num_elements
);
1875 struct panfrost_transfer transfer
= panfrost_allocate_chunk(ctx
, sizeof(struct mali_attr_meta
) * num_elements
, HEAP_DESCRIPTOR
);
1876 so
->hw
= (struct mali_attr_meta
*) transfer
.cpu
;
1877 so
->descriptor_ptr
= transfer
.gpu
;
1879 /* Allocate memory for the descriptor state */
1881 for (int i
= 0; i
< num_elements
; ++i
) {
1882 so
->hw
[i
].index
= elements
[i
].vertex_buffer_index
;
1884 enum pipe_format fmt
= elements
[i
].src_format
;
1885 const struct util_format_description
*desc
= util_format_description(fmt
);
1886 so
->hw
[i
].unknown1
= 0x2;
1887 so
->hw
[i
].swizzle
= panfrost_get_default_swizzle(desc
->nr_channels
);
1889 so
->hw
[i
].format
= panfrost_find_format(desc
);
1891 /* The field itself should probably be shifted over */
1892 so
->hw
[i
].src_offset
= elements
[i
].src_offset
;
1899 panfrost_bind_vertex_elements_state(
1900 struct pipe_context
*pctx
,
1903 struct panfrost_context
*ctx
= pan_context(pctx
);
1905 ctx
->vertex
= hwcso
;
1906 ctx
->dirty
|= PAN_DIRTY_VERTEX
;
1910 panfrost_delete_vertex_elements_state(struct pipe_context
*pctx
, void *hwcso
)
1912 struct panfrost_vertex_state
*so
= (struct panfrost_vertex_state
*) hwcso
;
1913 unsigned bytes
= sizeof(struct mali_attr_meta
) * so
->num_elements
;
1914 printf("Vertex elements delete leaks descriptor (%d bytes)\n", bytes
);
1919 panfrost_create_shader_state(
1920 struct pipe_context
*pctx
,
1921 const struct pipe_shader_state
*cso
)
1923 struct panfrost_shader_variants
*so
= CALLOC_STRUCT(panfrost_shader_variants
);
1926 /* Token deep copy to prevent memory corruption */
1928 if (cso
->type
== PIPE_SHADER_IR_TGSI
)
1929 so
->base
.tokens
= tgsi_dup_tokens(so
->base
.tokens
);
1935 panfrost_delete_shader_state(
1936 struct pipe_context
*pctx
,
1939 struct panfrost_shader_variants
*cso
= (struct panfrost_shader_variants
*) so
;
1941 if (cso
->base
.type
== PIPE_SHADER_IR_TGSI
) {
1942 printf("Deleting TGSI shader leaks duplicated tokens\n");
1945 unsigned leak
= cso
->variant_count
* sizeof(struct mali_shader_meta
);
1946 printf("Deleting shader state leaks descriptors (%d bytes), and shader bytecode\n", leak
);
1952 panfrost_create_sampler_state(
1953 struct pipe_context
*pctx
,
1954 const struct pipe_sampler_state
*cso
)
1956 struct panfrost_sampler_state
*so
= CALLOC_STRUCT(panfrost_sampler_state
);
1959 /* sampler_state corresponds to mali_sampler_descriptor, which we can generate entirely here */
1961 struct mali_sampler_descriptor sampler_descriptor
= {
1962 .filter_mode
= MALI_TEX_MIN(translate_tex_filter(cso
->min_img_filter
))
1963 | MALI_TEX_MAG(translate_tex_filter(cso
->mag_img_filter
))
1964 | translate_mip_filter(cso
->min_mip_filter
)
1967 .wrap_s
= translate_tex_wrap(cso
->wrap_s
),
1968 .wrap_t
= translate_tex_wrap(cso
->wrap_t
),
1969 .wrap_r
= translate_tex_wrap(cso
->wrap_r
),
1970 .compare_func
= panfrost_translate_alt_compare_func(cso
->compare_func
),
1972 cso
->border_color
.f
[0],
1973 cso
->border_color
.f
[1],
1974 cso
->border_color
.f
[2],
1975 cso
->border_color
.f
[3]
1977 .min_lod
= FIXED_16(0.0),
1978 .max_lod
= FIXED_16(31.0),
1982 so
->hw
= sampler_descriptor
;
1988 panfrost_bind_sampler_states(
1989 struct pipe_context
*pctx
,
1990 enum pipe_shader_type shader
,
1991 unsigned start_slot
, unsigned num_sampler
,
1994 assert(start_slot
== 0);
1996 struct panfrost_context
*ctx
= pan_context(pctx
);
1998 /* XXX: Should upload, not just copy? */
1999 ctx
->sampler_count
[shader
] = num_sampler
;
2000 memcpy(ctx
->samplers
[shader
], sampler
, num_sampler
* sizeof (void *));
2002 ctx
->dirty
|= PAN_DIRTY_SAMPLERS
;
2006 panfrost_variant_matches(struct panfrost_context
*ctx
, struct panfrost_shader_state
*variant
)
2008 struct pipe_alpha_state
*alpha
= &ctx
->depth_stencil
->alpha
;
2010 if (alpha
->enabled
|| variant
->alpha_state
.enabled
) {
2011 /* Make sure enable state is at least the same */
2012 if (alpha
->enabled
!= variant
->alpha_state
.enabled
) {
2016 /* Check that the contents of the test are the same */
2017 bool same_func
= alpha
->func
== variant
->alpha_state
.func
;
2018 bool same_ref
= alpha
->ref_value
== variant
->alpha_state
.ref_value
;
2020 if (!(same_func
&& same_ref
)) {
2024 /* Otherwise, we're good to go */
2029 panfrost_bind_fs_state(
2030 struct pipe_context
*pctx
,
2033 struct panfrost_context
*ctx
= pan_context(pctx
);
2038 /* Match the appropriate variant */
2040 signed variant
= -1;
2042 struct panfrost_shader_variants
*variants
= (struct panfrost_shader_variants
*) hwcso
;
2044 for (unsigned i
= 0; i
< variants
->variant_count
; ++i
) {
2045 if (panfrost_variant_matches(ctx
, &variants
->variants
[i
])) {
2051 if (variant
== -1) {
2052 /* No variant matched, so create a new one */
2053 variant
= variants
->variant_count
++;
2054 assert(variants
->variant_count
< MAX_SHADER_VARIANTS
);
2056 variants
->variants
[variant
].base
= hwcso
;
2057 variants
->variants
[variant
].alpha_state
= ctx
->depth_stencil
->alpha
;
2059 /* Allocate the mapped descriptor ahead-of-time. TODO: Use for FS as well as VS */
2060 struct panfrost_context
*ctx
= pan_context(pctx
);
2061 struct panfrost_transfer transfer
= panfrost_allocate_chunk(ctx
, sizeof(struct mali_shader_meta
), HEAP_DESCRIPTOR
);
2063 variants
->variants
[variant
].tripipe
= (struct mali_shader_meta
*) transfer
.cpu
;
2064 variants
->variants
[variant
].tripipe_gpu
= transfer
.gpu
;
2068 /* Select this variant */
2069 variants
->active_variant
= variant
;
2071 struct panfrost_shader_state
*shader_state
= &variants
->variants
[variant
];
2072 assert(panfrost_variant_matches(ctx
, shader_state
));
2074 /* Now we have a variant selected, so compile and go */
2076 if (!shader_state
->compiled
) {
2077 panfrost_shader_compile(ctx
, shader_state
->tripipe
, NULL
, JOB_TYPE_TILER
, shader_state
);
2078 shader_state
->compiled
= true;
2082 ctx
->dirty
|= PAN_DIRTY_FS
;
2086 panfrost_bind_vs_state(
2087 struct pipe_context
*pctx
,
2090 struct panfrost_context
*ctx
= pan_context(pctx
);
2095 if (!ctx
->vs
->variants
[0].compiled
) {
2096 ctx
->vs
->variants
[0].base
= hwcso
;
2098 /* TODO DRY from above */
2099 struct panfrost_transfer transfer
= panfrost_allocate_chunk(ctx
, sizeof(struct mali_shader_meta
), HEAP_DESCRIPTOR
);
2100 ctx
->vs
->variants
[0].tripipe
= (struct mali_shader_meta
*) transfer
.cpu
;
2101 ctx
->vs
->variants
[0].tripipe_gpu
= transfer
.gpu
;
2103 panfrost_shader_compile(ctx
, ctx
->vs
->variants
[0].tripipe
, NULL
, JOB_TYPE_VERTEX
, &ctx
->vs
->variants
[0]);
2104 ctx
->vs
->variants
[0].compiled
= true;
2108 ctx
->dirty
|= PAN_DIRTY_VS
;
2112 panfrost_set_vertex_buffers(
2113 struct pipe_context
*pctx
,
2114 unsigned start_slot
,
2115 unsigned num_buffers
,
2116 const struct pipe_vertex_buffer
*buffers
)
2118 struct panfrost_context
*ctx
= pan_context(pctx
);
2119 assert(num_buffers
<= PIPE_MAX_ATTRIBS
);
2121 /* XXX: Dirty tracking? etc */
2123 size_t sz
= sizeof(buffers
[0]) * num_buffers
;
2124 ctx
->vertex_buffers
= malloc(sz
);
2125 ctx
->vertex_buffer_count
= num_buffers
;
2126 memcpy(ctx
->vertex_buffers
, buffers
, sz
);
2128 if (ctx
->vertex_buffers
) {
2129 free(ctx
->vertex_buffers
);
2130 ctx
->vertex_buffers
= NULL
;
2133 ctx
->vertex_buffer_count
= 0;
2138 panfrost_set_constant_buffer(
2139 struct pipe_context
*pctx
,
2140 enum pipe_shader_type shader
, uint index
,
2141 const struct pipe_constant_buffer
*buf
)
2143 struct panfrost_context
*ctx
= pan_context(pctx
);
2144 struct panfrost_constant_buffer
*pbuf
= &ctx
->constant_buffer
[shader
];
2146 size_t sz
= buf
? buf
->buffer_size
: 0;
2148 /* Free previous buffer */
2155 pbuf
->buffer
= NULL
;
2158 /* If unbinding, we're done */
2163 /* Multiple constant buffers not yet supported */
2168 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) (buf
->buffer
);
2171 cpu
= rsrc
->bo
->cpu
[0];
2172 } else if (buf
->user_buffer
) {
2173 cpu
= buf
->user_buffer
;
2175 printf("No constant buffer?\n");
2179 /* Copy the constant buffer into the driver context for later upload */
2181 pbuf
->buffer
= malloc(sz
);
2182 memcpy(pbuf
->buffer
, cpu
+ buf
->buffer_offset
, sz
);
2186 panfrost_set_stencil_ref(
2187 struct pipe_context
*pctx
,
2188 const struct pipe_stencil_ref
*ref
)
2190 struct panfrost_context
*ctx
= pan_context(pctx
);
2191 ctx
->stencil_ref
= *ref
;
2193 /* Shader core dirty */
2194 ctx
->dirty
|= PAN_DIRTY_FS
;
2197 static struct pipe_sampler_view
*
2198 panfrost_create_sampler_view(
2199 struct pipe_context
*pctx
,
2200 struct pipe_resource
*texture
,
2201 const struct pipe_sampler_view
*template)
2203 struct panfrost_sampler_view
*so
= CALLOC_STRUCT(panfrost_sampler_view
);
2204 int bytes_per_pixel
= util_format_get_blocksize(texture
->format
);
2206 pipe_reference(NULL
, &texture
->reference
);
2208 struct panfrost_resource
*prsrc
= (struct panfrost_resource
*) texture
;
2210 so
->base
= *template;
2211 so
->base
.texture
= texture
;
2212 so
->base
.reference
.count
= 1;
2213 so
->base
.context
= pctx
;
2215 /* sampler_views correspond to texture descriptors, minus the texture
2216 * (data) itself. So, we serialise the descriptor here and cache it for
2219 /* TODO: Other types of textures */
2220 assert(template->target
== PIPE_TEXTURE_2D
);
2222 /* Make sure it's something with which we're familiar */
2223 assert(bytes_per_pixel
>= 1 && bytes_per_pixel
<= 4);
2225 /* TODO: Detect from format better */
2226 const struct util_format_description
*desc
= util_format_description(prsrc
->base
.format
);
2228 unsigned char user_swizzle
[4] = {
2229 template->swizzle_r
,
2230 template->swizzle_g
,
2231 template->swizzle_b
,
2235 enum mali_format format
= panfrost_find_format(desc
);
2237 struct mali_texture_descriptor texture_descriptor
= {
2238 .width
= MALI_POSITIVE(texture
->width0
),
2239 .height
= MALI_POSITIVE(texture
->height0
),
2240 .depth
= MALI_POSITIVE(texture
->depth0
),
2244 .swizzle
= panfrost_translate_swizzle_4(desc
->swizzle
),
2248 .is_not_cubemap
= 1,
2250 /* 0x11 - regular texture 2d, uncompressed tiled */
2251 /* 0x12 - regular texture 2d, uncompressed linear */
2252 /* 0x1c - AFBC compressed (internally tiled, probably) texture 2D */
2254 .usage2
= prsrc
->bo
->has_afbc
? 0x1c : (prsrc
->bo
->tiled
? 0x11 : 0x12),
2257 .swizzle
= panfrost_translate_swizzle_4(user_swizzle
)
2260 /* TODO: Other base levels require adjusting dimensions / level numbers / etc */
2261 assert (template->u
.tex
.first_level
== 0);
2263 texture_descriptor
.nr_mipmap_levels
= template->u
.tex
.last_level
- template->u
.tex
.first_level
;
2265 so
->hw
= texture_descriptor
;
2267 return (struct pipe_sampler_view
*) so
;
2271 panfrost_set_sampler_views(
2272 struct pipe_context
*pctx
,
2273 enum pipe_shader_type shader
,
2274 unsigned start_slot
, unsigned num_views
,
2275 struct pipe_sampler_view
**views
)
2277 struct panfrost_context
*ctx
= pan_context(pctx
);
2279 assert(start_slot
== 0);
2281 ctx
->sampler_view_count
[shader
] = num_views
;
2282 memcpy(ctx
->sampler_views
[shader
], views
, num_views
* sizeof (void *));
2284 ctx
->dirty
|= PAN_DIRTY_TEXTURES
;
2288 panfrost_sampler_view_destroy(
2289 struct pipe_context
*pctx
,
2290 struct pipe_sampler_view
*views
)
2292 //struct panfrost_context *ctx = pan_context(pctx);
2300 panfrost_set_framebuffer_state(struct pipe_context
*pctx
,
2301 const struct pipe_framebuffer_state
*fb
)
2303 struct panfrost_context
*ctx
= pan_context(pctx
);
2305 /* Flush when switching away from an FBO */
2307 if (!panfrost_is_scanout(ctx
)) {
2308 panfrost_flush(pctx
, NULL
, 0);
2311 ctx
->pipe_framebuffer
.nr_cbufs
= fb
->nr_cbufs
;
2312 ctx
->pipe_framebuffer
.samples
= fb
->samples
;
2313 ctx
->pipe_framebuffer
.layers
= fb
->layers
;
2314 ctx
->pipe_framebuffer
.width
= fb
->width
;
2315 ctx
->pipe_framebuffer
.height
= fb
->height
;
2317 for (int i
= 0; i
< PIPE_MAX_COLOR_BUFS
; i
++) {
2318 struct pipe_surface
*cb
= i
< fb
->nr_cbufs
? fb
->cbufs
[i
] : NULL
;
2320 /* check if changing cbuf */
2321 if (ctx
->pipe_framebuffer
.cbufs
[i
] == cb
) continue;
2323 if (cb
&& (i
!= 0)) {
2324 printf("XXX: Multiple render targets not supported before t7xx!\n");
2329 pipe_surface_reference(&ctx
->pipe_framebuffer
.cbufs
[i
], cb
);
2334 if (ctx
->require_sfbd
)
2335 ctx
->vt_framebuffer_sfbd
= panfrost_emit_sfbd(ctx
);
2337 ctx
->vt_framebuffer_mfbd
= panfrost_emit_mfbd(ctx
);
2339 panfrost_attach_vt_framebuffer(ctx
);
2340 panfrost_new_frag_framebuffer(ctx
);
2341 panfrost_set_scissor(ctx
);
2343 struct panfrost_resource
*tex
= ((struct panfrost_resource
*) ctx
->pipe_framebuffer
.cbufs
[i
]->texture
);
2344 bool is_scanout
= panfrost_is_scanout(ctx
);
2346 if (!is_scanout
&& !tex
->bo
->has_afbc
) {
2347 /* The blob is aggressive about enabling AFBC. As such,
2348 * it's pretty much necessary to use it here, since we
2349 * have no traces of non-compressed FBO. */
2351 panfrost_enable_afbc(ctx
, tex
, false);
2354 if (!is_scanout
&& !tex
->bo
->has_checksum
) {
2355 /* Enable transaction elimination if we can */
2356 panfrost_enable_checksum(ctx
, tex
);
2361 struct pipe_surface
*zb
= fb
->zsbuf
;
2363 if (ctx
->pipe_framebuffer
.zsbuf
!= zb
) {
2364 pipe_surface_reference(&ctx
->pipe_framebuffer
.zsbuf
, zb
);
2369 if (ctx
->require_sfbd
)
2370 ctx
->vt_framebuffer_sfbd
= panfrost_emit_sfbd(ctx
);
2372 ctx
->vt_framebuffer_mfbd
= panfrost_emit_mfbd(ctx
);
2374 panfrost_attach_vt_framebuffer(ctx
);
2375 panfrost_new_frag_framebuffer(ctx
);
2376 panfrost_set_scissor(ctx
);
2378 struct panfrost_resource
*tex
= ((struct panfrost_resource
*) ctx
->pipe_framebuffer
.zsbuf
->texture
);
2380 if (!tex
->bo
->has_afbc
&& !panfrost_is_scanout(ctx
))
2381 panfrost_enable_afbc(ctx
, tex
, true);
2388 panfrost_create_blend_state(struct pipe_context
*pipe
,
2389 const struct pipe_blend_state
*blend
)
2391 struct panfrost_context
*ctx
= pan_context(pipe
);
2392 struct panfrost_blend_state
*so
= CALLOC_STRUCT(panfrost_blend_state
);
2395 /* TODO: The following features are not yet implemented */
2396 assert(!blend
->logicop_enable
);
2397 assert(!blend
->alpha_to_coverage
);
2398 assert(!blend
->alpha_to_one
);
2400 /* Compile the blend state, first as fixed-function if we can */
2402 if (panfrost_make_fixed_blend_mode(&blend
->rt
[0], &so
->equation
, blend
->rt
[0].colormask
, &ctx
->blend_color
))
2405 /* If we can't, compile a blend shader instead */
2407 panfrost_make_blend_shader(ctx
, so
, &ctx
->blend_color
);
2413 panfrost_bind_blend_state(struct pipe_context
*pipe
,
2416 struct panfrost_context
*ctx
= pan_context(pipe
);
2417 struct pipe_blend_state
*blend
= (struct pipe_blend_state
*) cso
;
2418 struct panfrost_blend_state
*pblend
= (struct panfrost_blend_state
*) cso
;
2419 ctx
->blend
= pblend
;
2424 SET_BIT(ctx
->fragment_shader_core
.unknown2_4
, MALI_NO_DITHER
, !blend
->dither
);
2426 /* TODO: Attach color */
2428 /* Shader itself is not dirty, but the shader core is */
2429 ctx
->dirty
|= PAN_DIRTY_FS
;
2433 panfrost_delete_blend_state(struct pipe_context
*pipe
,
2436 struct panfrost_blend_state
*so
= (struct panfrost_blend_state
*) blend
;
2438 if (so
->has_blend_shader
) {
2439 printf("Deleting blend state leak blend shaders bytecode\n");
2446 panfrost_set_blend_color(struct pipe_context
*pipe
,
2447 const struct pipe_blend_color
*blend_color
)
2449 struct panfrost_context
*ctx
= pan_context(pipe
);
2451 /* If blend_color is we're unbinding, so ctx->blend_color is now undefined -> nothing to do */
2454 ctx
->blend_color
= *blend_color
;
2456 /* The blend mode depends on the blend constant color, due to the
2457 * fixed/programmable split. So, we're forced to regenerate the blend
2460 /* TODO: Attach color */
2465 panfrost_create_depth_stencil_state(struct pipe_context
*pipe
,
2466 const struct pipe_depth_stencil_alpha_state
*depth_stencil
)
2468 return mem_dup(depth_stencil
, sizeof(*depth_stencil
));
2472 panfrost_bind_depth_stencil_state(struct pipe_context
*pipe
,
2475 struct panfrost_context
*ctx
= pan_context(pipe
);
2476 struct pipe_depth_stencil_alpha_state
*depth_stencil
= cso
;
2477 ctx
->depth_stencil
= depth_stencil
;
2482 /* Alpha does not exist in the hardware (it's not in ES3), so it's
2483 * emulated in the fragment shader */
2485 if (depth_stencil
->alpha
.enabled
) {
2486 /* We need to trigger a new shader (maybe) */
2487 ctx
->base
.bind_fs_state(&ctx
->base
, ctx
->fs
);
2491 SET_BIT(ctx
->fragment_shader_core
.unknown2_4
, MALI_STENCIL_TEST
, depth_stencil
->stencil
[0].enabled
); /* XXX: which one? */
2493 panfrost_make_stencil_state(&depth_stencil
->stencil
[0], &ctx
->fragment_shader_core
.stencil_front
);
2494 ctx
->fragment_shader_core
.stencil_mask_front
= depth_stencil
->stencil
[0].writemask
;
2496 panfrost_make_stencil_state(&depth_stencil
->stencil
[1], &ctx
->fragment_shader_core
.stencil_back
);
2497 ctx
->fragment_shader_core
.stencil_mask_back
= depth_stencil
->stencil
[1].writemask
;
2499 /* Depth state (TODO: Refactor) */
2500 SET_BIT(ctx
->fragment_shader_core
.unknown2_3
, MALI_DEPTH_TEST
, depth_stencil
->depth
.enabled
);
2502 int func
= depth_stencil
->depth
.enabled
? depth_stencil
->depth
.func
: PIPE_FUNC_ALWAYS
;
2504 ctx
->fragment_shader_core
.unknown2_3
&= ~MALI_DEPTH_FUNC_MASK
;
2505 ctx
->fragment_shader_core
.unknown2_3
|= MALI_DEPTH_FUNC(panfrost_translate_compare_func(func
));
2507 /* Bounds test not implemented */
2508 assert(!depth_stencil
->depth
.bounds_test
);
2510 ctx
->dirty
|= PAN_DIRTY_FS
;
2514 panfrost_delete_depth_stencil_state(struct pipe_context
*pipe
, void *depth
)
2520 panfrost_set_sample_mask(struct pipe_context
*pipe
,
2521 unsigned sample_mask
)
2526 panfrost_set_clip_state(struct pipe_context
*pipe
,
2527 const struct pipe_clip_state
*clip
)
2529 //struct panfrost_context *panfrost = pan_context(pipe);
2533 panfrost_set_viewport_states(struct pipe_context
*pipe
,
2534 unsigned start_slot
,
2535 unsigned num_viewports
,
2536 const struct pipe_viewport_state
*viewports
)
2538 struct panfrost_context
*ctx
= pan_context(pipe
);
2540 assert(start_slot
== 0);
2541 assert(num_viewports
== 1);
2543 ctx
->pipe_viewport
= *viewports
;
2546 /* TODO: What if not centered? */
2547 float w
= abs(viewports
->scale
[0]) * 2.0;
2548 float h
= abs(viewports
->scale
[1]) * 2.0;
2550 ctx
->viewport
.viewport1
[0] = MALI_POSITIVE((int) w
);
2551 ctx
->viewport
.viewport1
[1] = MALI_POSITIVE((int) h
);
2556 panfrost_set_scissor_states(struct pipe_context
*pipe
,
2557 unsigned start_slot
,
2558 unsigned num_scissors
,
2559 const struct pipe_scissor_state
*scissors
)
2561 struct panfrost_context
*ctx
= pan_context(pipe
);
2563 assert(start_slot
== 0);
2564 assert(num_scissors
== 1);
2566 ctx
->scissor
= *scissors
;
2568 panfrost_set_scissor(ctx
);
2572 panfrost_set_polygon_stipple(struct pipe_context
*pipe
,
2573 const struct pipe_poly_stipple
*stipple
)
2575 //struct panfrost_context *panfrost = pan_context(pipe);
2579 panfrost_set_active_query_state(struct pipe_context
*pipe
,
2582 //struct panfrost_context *panfrost = pan_context(pipe);
2586 panfrost_destroy(struct pipe_context
*pipe
)
2588 struct panfrost_context
*panfrost
= pan_context(pipe
);
2589 struct panfrost_screen
*screen
= pan_screen(pipe
->screen
);
2591 if (panfrost
->blitter
)
2592 util_blitter_destroy(panfrost
->blitter
);
2594 screen
->driver
->free_slab(screen
, &panfrost
->scratchpad
);
2595 screen
->driver
->free_slab(screen
, &panfrost
->varying_mem
);
2596 screen
->driver
->free_slab(screen
, &panfrost
->shaders
);
2597 screen
->driver
->free_slab(screen
, &panfrost
->tiler_heap
);
2598 screen
->driver
->free_slab(screen
, &panfrost
->misc_0
);
2601 static struct pipe_query
*
2602 panfrost_create_query(struct pipe_context
*pipe
,
2606 struct panfrost_query
*q
= CALLOC_STRUCT(panfrost_query
);
2611 return (struct pipe_query
*) q
;
2615 panfrost_destroy_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
2621 panfrost_begin_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
2623 struct panfrost_context
*ctx
= pan_context(pipe
);
2624 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
2626 switch (query
->type
) {
2627 case PIPE_QUERY_OCCLUSION_COUNTER
:
2628 case PIPE_QUERY_OCCLUSION_PREDICATE
:
2629 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
2631 /* Allocate a word for the query results to be stored */
2632 query
->transfer
= panfrost_allocate_chunk(ctx
, sizeof(unsigned), HEAP_DESCRIPTOR
);
2634 ctx
->occlusion_query
= query
;
2640 fprintf(stderr
, "Skipping query %d\n", query
->type
);
2648 panfrost_end_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
2650 struct panfrost_context
*ctx
= pan_context(pipe
);
2651 ctx
->occlusion_query
= NULL
;
2656 panfrost_get_query_result(struct pipe_context
*pipe
,
2657 struct pipe_query
*q
,
2659 union pipe_query_result
*vresult
)
2662 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
2664 /* We need to flush out the jobs to actually run the counter, TODO
2665 * check wait, TODO wallpaper after if needed */
2667 panfrost_flush(pipe
, NULL
, PIPE_FLUSH_END_OF_FRAME
);
2669 switch (query
->type
) {
2670 case PIPE_QUERY_OCCLUSION_COUNTER
:
2671 case PIPE_QUERY_OCCLUSION_PREDICATE
:
2672 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
: {
2673 /* Read back the query results */
2674 unsigned *result
= (unsigned *) query
->transfer
.cpu
;
2675 unsigned passed
= *result
;
2677 if (query
->type
== PIPE_QUERY_OCCLUSION_COUNTER
) {
2678 vresult
->u64
= passed
;
2680 vresult
->b
= !!passed
;
2686 fprintf(stderr
, "Skipped query get %d\n", query
->type
);
2694 panfrost_setup_hardware(struct panfrost_context
*ctx
)
2696 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
2697 struct panfrost_screen
*screen
= pan_screen(gallium
->screen
);
2699 for (int i
= 0; i
< ARRAY_SIZE(ctx
->transient_pools
); ++i
) {
2700 /* Allocate the beginning of the transient pool */
2701 int entry_size
= (1 << 22); /* 4MB */
2703 ctx
->transient_pools
[i
].entry_size
= entry_size
;
2704 ctx
->transient_pools
[i
].entry_count
= 1;
2706 ctx
->transient_pools
[i
].entries
[0] = (struct panfrost_memory_entry
*) pb_slab_alloc(&screen
->slabs
, entry_size
, HEAP_TRANSIENT
);
2709 screen
->driver
->allocate_slab(screen
, &ctx
->scratchpad
, 64, false, 0, 0, 0);
2710 screen
->driver
->allocate_slab(screen
, &ctx
->varying_mem
, 16384, false, PAN_ALLOCATE_INVISIBLE
| PAN_ALLOCATE_COHERENT_LOCAL
, 0, 0);
2711 screen
->driver
->allocate_slab(screen
, &ctx
->shaders
, 4096, true, PAN_ALLOCATE_EXECUTE
, 0, 0);
2712 screen
->driver
->allocate_slab(screen
, &ctx
->tiler_heap
, 32768, false, PAN_ALLOCATE_INVISIBLE
| PAN_ALLOCATE_GROWABLE
, 1, 128);
2713 screen
->driver
->allocate_slab(screen
, &ctx
->misc_0
, 128*128, false, PAN_ALLOCATE_INVISIBLE
| PAN_ALLOCATE_GROWABLE
, 1, 128);
2717 /* New context creation, which also does hardware initialisation since I don't
2718 * know the better way to structure this :smirk: */
2720 struct pipe_context
*
2721 panfrost_create_context(struct pipe_screen
*screen
, void *priv
, unsigned flags
)
2723 struct panfrost_context
*ctx
= CALLOC_STRUCT(panfrost_context
);
2724 struct panfrost_screen
*pscreen
= pan_screen(screen
);
2725 memset(ctx
, 0, sizeof(*ctx
));
2726 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
2729 gpu_id
= pscreen
->driver
->query_gpu_version(pscreen
);
2730 ctx
->is_t6xx
= gpu_id
<= 0x0750; /* For now, this flag means t76x or less */
2731 ctx
->require_sfbd
= gpu_id
< 0x0750; /* t76x is the first to support MFD */
2733 gallium
->screen
= screen
;
2735 gallium
->destroy
= panfrost_destroy
;
2737 gallium
->set_framebuffer_state
= panfrost_set_framebuffer_state
;
2739 gallium
->flush
= panfrost_flush
;
2740 gallium
->clear
= panfrost_clear
;
2741 gallium
->draw_vbo
= panfrost_draw_vbo
;
2743 gallium
->set_vertex_buffers
= panfrost_set_vertex_buffers
;
2744 gallium
->set_constant_buffer
= panfrost_set_constant_buffer
;
2746 gallium
->set_stencil_ref
= panfrost_set_stencil_ref
;
2748 gallium
->create_sampler_view
= panfrost_create_sampler_view
;
2749 gallium
->set_sampler_views
= panfrost_set_sampler_views
;
2750 gallium
->sampler_view_destroy
= panfrost_sampler_view_destroy
;
2752 gallium
->create_rasterizer_state
= panfrost_create_rasterizer_state
;
2753 gallium
->bind_rasterizer_state
= panfrost_bind_rasterizer_state
;
2754 gallium
->delete_rasterizer_state
= panfrost_generic_cso_delete
;
2756 gallium
->create_vertex_elements_state
= panfrost_create_vertex_elements_state
;
2757 gallium
->bind_vertex_elements_state
= panfrost_bind_vertex_elements_state
;
2758 gallium
->delete_vertex_elements_state
= panfrost_delete_vertex_elements_state
;
2760 gallium
->create_fs_state
= panfrost_create_shader_state
;
2761 gallium
->delete_fs_state
= panfrost_delete_shader_state
;
2762 gallium
->bind_fs_state
= panfrost_bind_fs_state
;
2764 gallium
->create_vs_state
= panfrost_create_shader_state
;
2765 gallium
->delete_vs_state
= panfrost_delete_shader_state
;
2766 gallium
->bind_vs_state
= panfrost_bind_vs_state
;
2768 gallium
->create_sampler_state
= panfrost_create_sampler_state
;
2769 gallium
->delete_sampler_state
= panfrost_generic_cso_delete
;
2770 gallium
->bind_sampler_states
= panfrost_bind_sampler_states
;
2772 gallium
->create_blend_state
= panfrost_create_blend_state
;
2773 gallium
->bind_blend_state
= panfrost_bind_blend_state
;
2774 gallium
->delete_blend_state
= panfrost_delete_blend_state
;
2776 gallium
->set_blend_color
= panfrost_set_blend_color
;
2778 gallium
->create_depth_stencil_alpha_state
= panfrost_create_depth_stencil_state
;
2779 gallium
->bind_depth_stencil_alpha_state
= panfrost_bind_depth_stencil_state
;
2780 gallium
->delete_depth_stencil_alpha_state
= panfrost_delete_depth_stencil_state
;
2782 gallium
->set_sample_mask
= panfrost_set_sample_mask
;
2784 gallium
->set_clip_state
= panfrost_set_clip_state
;
2785 gallium
->set_viewport_states
= panfrost_set_viewport_states
;
2786 gallium
->set_scissor_states
= panfrost_set_scissor_states
;
2787 gallium
->set_polygon_stipple
= panfrost_set_polygon_stipple
;
2788 gallium
->set_active_query_state
= panfrost_set_active_query_state
;
2790 gallium
->create_query
= panfrost_create_query
;
2791 gallium
->destroy_query
= panfrost_destroy_query
;
2792 gallium
->begin_query
= panfrost_begin_query
;
2793 gallium
->end_query
= panfrost_end_query
;
2794 gallium
->get_query_result
= panfrost_get_query_result
;
2796 panfrost_resource_context_init(gallium
);
2798 pscreen
->driver
->init_context(ctx
);
2800 panfrost_setup_hardware(ctx
);
2803 gallium
->stream_uploader
= u_upload_create_default(gallium
);
2804 gallium
->const_uploader
= gallium
->stream_uploader
;
2805 assert(gallium
->stream_uploader
);
2807 /* Midgard supports ES modes, plus QUADS/QUAD_STRIPS/POLYGON */
2808 ctx
->draw_modes
= (1 << (PIPE_PRIM_POLYGON
+ 1)) - 1;
2810 ctx
->primconvert
= util_primconvert_create(gallium
, ctx
->draw_modes
);
2812 ctx
->blitter
= util_blitter_create(gallium
);
2813 assert(ctx
->blitter
);
2815 /* Prepare for render! */
2817 panfrost_job_init(ctx
);
2818 panfrost_emit_vertex_payload(ctx
);
2819 panfrost_emit_tiler_payload(ctx
);
2820 panfrost_invalidate_frame(ctx
);
2821 panfrost_viewport(ctx
, 0.0, 1.0, 0, 0, ctx
->pipe_framebuffer
.width
, ctx
->pipe_framebuffer
.height
);
2822 panfrost_default_shader_backend(ctx
);
2823 panfrost_generate_space_filler_indices();