2 * © Copyright 2018 Alyssa Rosenzweig
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #include "pan_context.h"
29 #include "pan_swizzle.h"
30 #include "pan_format.h"
32 #include "util/macros.h"
33 #include "util/u_format.h"
34 #include "util/u_inlines.h"
35 #include "util/u_upload_mgr.h"
36 #include "util/u_memory.h"
37 #include "util/half_float.h"
38 #include "indices/u_primconvert.h"
39 #include "tgsi/tgsi_parse.h"
41 #include "pan_screen.h"
42 #include "pan_blending.h"
43 #include "pan_blend_shaders.h"
44 #include "pan_wallpaper.h"
46 static int performance_counter_number
= 0;
47 extern const char *pan_counters_base
;
49 /* Do not actually send anything to the GPU; merely generate the cmdstream as fast as possible. Disables framebuffer writes */
52 #define SET_BIT(lval, bit, cond) \
58 /* TODO: Sample size, etc */
60 /* True for t6XX, false for t8xx. TODO: Run-time settable for automatic
61 * hardware configuration. */
63 static bool is_t6xx
= false;
65 /* If set, we'll require the use of single render-target framebuffer
66 * descriptors (SFBD), for older hardware -- specifically, <T760 hardware, If
67 * false, we'll use the MFBD no matter what. New hardware -does- retain support
68 * for SFBD, and in theory we could flip between them on a per-RT basis, but
69 * there's no real advantage to doing so */
71 static bool require_sfbd
= false;
74 panfrost_set_framebuffer_msaa(struct panfrost_context
*ctx
, bool enabled
)
76 SET_BIT(ctx
->fragment_shader_core
.unknown2_3
, MALI_HAS_MSAA
, enabled
);
77 SET_BIT(ctx
->fragment_shader_core
.unknown2_4
, MALI_NO_MSAA
, !enabled
);
80 SET_BIT(ctx
->fragment_sfbd
.format
, MALI_FRAMEBUFFER_MSAA_A
| MALI_FRAMEBUFFER_MSAA_B
, enabled
);
82 SET_BIT(ctx
->fragment_rts
[0].format
.flags
, MALI_MFBD_FORMAT_MSAA
, enabled
);
84 SET_BIT(ctx
->fragment_mfbd
.unk1
, (1 << 4) | (1 << 1), enabled
);
87 ctx
->fragment_mfbd
.rt_count_2
= enabled
? 4 : 1;
91 /* AFBC is enabled on a per-resource basis (AFBC enabling is theoretically
92 * indepdent between color buffers and depth/stencil). To enable, we allocate
93 * the AFBC metadata buffer and mark that it is enabled. We do -not- actually
94 * edit the fragment job here. This routine should be called ONCE per
95 * AFBC-compressed buffer, rather than on every frame. */
98 panfrost_enable_afbc(struct panfrost_context
*ctx
, struct panfrost_resource
*rsrc
, bool ds
)
101 printf("AFBC not supported yet on SFBD\n");
105 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
106 struct panfrost_screen
*screen
= pan_screen(gallium
->screen
);
107 /* AFBC metadata is 16 bytes per tile */
108 int tile_w
= (rsrc
->base
.width0
+ (MALI_TILE_LENGTH
- 1)) >> MALI_TILE_SHIFT
;
109 int tile_h
= (rsrc
->base
.height0
+ (MALI_TILE_LENGTH
- 1)) >> MALI_TILE_SHIFT
;
110 int bytes_per_pixel
= util_format_get_blocksize(rsrc
->base
.format
);
111 int stride
= bytes_per_pixel
* rsrc
->base
.width0
; /* TODO: Alignment? */
113 stride
*= 2; /* TODO: Should this be carried over? */
114 int main_size
= stride
* rsrc
->base
.height0
;
115 rsrc
->bo
->afbc_metadata_size
= tile_w
* tile_h
* 16;
117 /* Allocate the AFBC slab itself, large enough to hold the above */
118 screen
->driver
->allocate_slab(screen
, &rsrc
->bo
->afbc_slab
,
119 (rsrc
->bo
->afbc_metadata_size
+ main_size
+ 4095) / 4096,
122 rsrc
->bo
->has_afbc
= true;
124 /* Compressed textured reads use a tagged pointer to the metadata */
126 rsrc
->bo
->gpu
[0] = rsrc
->bo
->afbc_slab
.gpu
| (ds
? 0 : 1);
127 rsrc
->bo
->cpu
[0] = rsrc
->bo
->afbc_slab
.cpu
;
131 panfrost_enable_checksum(struct panfrost_context
*ctx
, struct panfrost_resource
*rsrc
)
133 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
134 struct panfrost_screen
*screen
= pan_screen(gallium
->screen
);
135 int tile_w
= (rsrc
->base
.width0
+ (MALI_TILE_LENGTH
- 1)) >> MALI_TILE_SHIFT
;
136 int tile_h
= (rsrc
->base
.height0
+ (MALI_TILE_LENGTH
- 1)) >> MALI_TILE_SHIFT
;
138 /* 8 byte checksum per tile */
139 rsrc
->bo
->checksum_stride
= tile_w
* 8;
140 int pages
= (((rsrc
->bo
->checksum_stride
* tile_h
) + 4095) / 4096);
141 screen
->driver
->allocate_slab(screen
, &rsrc
->bo
->checksum_slab
, pages
, false, 0, 0, 0);
143 rsrc
->bo
->has_checksum
= true;
146 /* ..by contrast, this routine runs for every FRAGMENT job, but does no
147 * allocation. AFBC is enabled on a per-surface basis */
150 panfrost_set_fragment_afbc(struct panfrost_context
*ctx
)
152 for (int cb
= 0; cb
< ctx
->pipe_framebuffer
.nr_cbufs
; ++cb
) {
153 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) ctx
->pipe_framebuffer
.cbufs
[cb
]->texture
;
155 /* Non-AFBC is the default */
156 if (!rsrc
->bo
->has_afbc
)
160 fprintf(stderr
, "Color AFBC not supported on SFBD\n");
164 /* Enable AFBC for the render target */
165 ctx
->fragment_rts
[0].afbc
.metadata
= rsrc
->bo
->afbc_slab
.gpu
;
166 ctx
->fragment_rts
[0].afbc
.stride
= 0;
167 ctx
->fragment_rts
[0].afbc
.unk
= 0x30009;
169 ctx
->fragment_rts
[0].format
.flags
|= MALI_MFBD_FORMAT_AFBC
;
171 /* Point rendering to our special framebuffer */
172 ctx
->fragment_rts
[0].framebuffer
= rsrc
->bo
->afbc_slab
.gpu
+ rsrc
->bo
->afbc_metadata_size
;
174 /* WAT? Stride is diff from the scanout case */
175 ctx
->fragment_rts
[0].framebuffer_stride
= ctx
->pipe_framebuffer
.width
* 2 * 4;
178 /* Enable depth/stencil AFBC for the framebuffer (not the render target) */
179 if (ctx
->pipe_framebuffer
.zsbuf
) {
180 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) ctx
->pipe_framebuffer
.zsbuf
->texture
;
182 if (rsrc
->bo
->has_afbc
) {
184 fprintf(stderr
, "Depth AFBC not supported on SFBD\n");
188 ctx
->fragment_mfbd
.unk3
|= MALI_MFBD_EXTRA
;
190 ctx
->fragment_extra
.ds_afbc
.depth_stencil_afbc_metadata
= rsrc
->bo
->afbc_slab
.gpu
;
191 ctx
->fragment_extra
.ds_afbc
.depth_stencil_afbc_stride
= 0;
193 ctx
->fragment_extra
.ds_afbc
.depth_stencil
= rsrc
->bo
->afbc_slab
.gpu
+ rsrc
->bo
->afbc_metadata_size
;
195 ctx
->fragment_extra
.ds_afbc
.zero1
= 0x10009;
196 ctx
->fragment_extra
.ds_afbc
.padding
= 0x1000;
198 ctx
->fragment_extra
.unk
= 0x435; /* General 0x400 in all unks. 0x5 for depth/stencil. 0x10 for AFBC encoded depth stencil. Unclear where the 0x20 is from */
200 ctx
->fragment_mfbd
.unk3
|= 0x400;
204 /* For the special case of a depth-only FBO, we need to attach a dummy render target */
206 if (ctx
->pipe_framebuffer
.nr_cbufs
== 0) {
208 fprintf(stderr
, "Depth-only FBO not supported on SFBD\n");
212 struct mali_rt_format null_rt
= {
217 ctx
->fragment_rts
[0].format
= null_rt
;
218 ctx
->fragment_rts
[0].framebuffer
= 0;
219 ctx
->fragment_rts
[0].framebuffer_stride
= 0;
223 /* Framebuffer descriptor */
226 panfrost_set_framebuffer_resolution(struct mali_single_framebuffer
*fb
, int w
, int h
)
228 fb
->width
= MALI_POSITIVE(w
);
229 fb
->height
= MALI_POSITIVE(h
);
231 /* No idea why this is needed, but it's how resolution_check is
232 * calculated. It's not clear to us yet why the hardware wants this.
233 * The formula itself was discovered mostly by manual bruteforce and
234 * aggressive algebraic simplification. */
236 fb
->resolution_check
= ((w
+ h
) / 3) << 4;
239 static struct mali_single_framebuffer
240 panfrost_emit_sfbd(struct panfrost_context
*ctx
)
242 struct mali_single_framebuffer framebuffer
= {
244 .format
= 0x30000000,
245 .clear_flags
= 0x1000,
246 .unknown_address_0
= ctx
->scratchpad
.gpu
,
247 .unknown_address_1
= ctx
->misc_0
.gpu
,
248 .unknown_address_2
= ctx
->misc_0
.gpu
+ 40960,
250 .tiler_heap_free
= ctx
->tiler_heap
.gpu
,
251 .tiler_heap_end
= ctx
->tiler_heap
.gpu
+ ctx
->tiler_heap
.size
,
254 panfrost_set_framebuffer_resolution(&framebuffer
, ctx
->pipe_framebuffer
.width
, ctx
->pipe_framebuffer
.height
);
259 static struct bifrost_framebuffer
260 panfrost_emit_mfbd(struct panfrost_context
*ctx
)
262 struct bifrost_framebuffer framebuffer
= {
263 /* It is not yet clear what tiler_meta means or how it's
264 * calculated, but we can tell the lower 32-bits are a
265 * (monotonically increasing?) function of tile count and
266 * geometry complexity; I suspect it defines a memory size of
267 * some kind? for the tiler. It's really unclear at the
268 * moment... but to add to the confusion, the hardware is happy
269 * enough to accept a zero in this field, so we don't even have
270 * to worry about it right now.
272 * The byte (just after the 32-bit mark) is much more
273 * interesting. The higher nibble I've only ever seen as 0xF,
274 * but the lower one I've seen as 0x0 or 0xF, and it's not
275 * obvious what the difference is. But what -is- obvious is
276 * that when the lower nibble is zero, performance is severely
277 * degraded compared to when the lower nibble is set.
278 * Evidently, that nibble enables some sort of fast path,
279 * perhaps relating to caching or tile flush? Regardless, at
280 * this point there's no clear reason not to set it, aside from
281 * substantially increased memory requirements (of the misc_0
284 .tiler_meta
= ((uint64_t) 0xff << 32) | 0x0,
286 .width1
= MALI_POSITIVE(ctx
->pipe_framebuffer
.width
),
287 .height1
= MALI_POSITIVE(ctx
->pipe_framebuffer
.height
),
288 .width2
= MALI_POSITIVE(ctx
->pipe_framebuffer
.width
),
289 .height2
= MALI_POSITIVE(ctx
->pipe_framebuffer
.height
),
294 .rt_count_1
= MALI_POSITIVE(1),
299 /* Corresponds to unknown_address_X of SFBD */
300 .scratchpad
= ctx
->scratchpad
.gpu
,
301 .tiler_scratch_start
= ctx
->misc_0
.gpu
,
303 /* The constant added here is, like the lower word of
304 * tiler_meta, (loosely) another product of framebuffer size
305 * and geometry complexity. It must be sufficiently large for
306 * the tiler_meta fast path to work; if it's too small, there
307 * will be DATA_INVALID_FAULTs. Conversely, it must be less
308 * than the total size of misc_0, or else there's no room. It's
309 * possible this constant configures a partition between two
310 * parts of misc_0? We haven't investigated the functionality,
311 * as these buffers are internally used by the hardware
312 * (presumably by the tiler) but not seemingly touched by the driver
315 .tiler_scratch_middle
= ctx
->misc_0
.gpu
+ 0xf0000,
317 .tiler_heap_start
= ctx
->tiler_heap
.gpu
,
318 .tiler_heap_end
= ctx
->tiler_heap
.gpu
+ ctx
->tiler_heap
.size
,
324 /* Are we currently rendering to the screen (rather than an FBO)? */
327 panfrost_is_scanout(struct panfrost_context
*ctx
)
329 /* If there is no color buffer, it's an FBO */
330 if (!ctx
->pipe_framebuffer
.nr_cbufs
)
333 /* If we're too early that no framebuffer was sent, it's scanout */
334 if (!ctx
->pipe_framebuffer
.cbufs
[0])
337 return ctx
->pipe_framebuffer
.cbufs
[0]->texture
->bind
& PIPE_BIND_DISPLAY_TARGET
||
338 ctx
->pipe_framebuffer
.cbufs
[0]->texture
->bind
& PIPE_BIND_SCANOUT
||
339 ctx
->pipe_framebuffer
.cbufs
[0]->texture
->bind
& PIPE_BIND_SHARED
;
342 /* The above function is for generalised fbd emission, used in both fragment as
343 * well as vertex/tiler payloads. This payload is specific to fragment
347 panfrost_new_frag_framebuffer(struct panfrost_context
*ctx
)
349 mali_ptr framebuffer
;
352 if (ctx
->pipe_framebuffer
.nr_cbufs
> 0) {
353 framebuffer
= ((struct panfrost_resource
*) ctx
->pipe_framebuffer
.cbufs
[0]->texture
)->bo
->gpu
[0];
354 stride
= util_format_get_stride(ctx
->pipe_framebuffer
.cbufs
[0]->format
, ctx
->pipe_framebuffer
.width
);
356 /* Depth-only framebuffer -> dummy RT */
361 /* The default is upside down from OpenGL's perspective. */
362 if (panfrost_is_scanout(ctx
)) {
363 framebuffer
+= stride
* (ctx
->pipe_framebuffer
.height
- 1);
368 struct mali_single_framebuffer fb
= panfrost_emit_sfbd(ctx
);
370 fb
.framebuffer
= framebuffer
;
373 fb
.format
= 0xb84e0281; /* RGB32, no MSAA */
374 memcpy(&ctx
->fragment_sfbd
, &fb
, sizeof(fb
));
376 struct bifrost_framebuffer fb
= panfrost_emit_mfbd(ctx
);
382 /* By default, Gallium seems to need a BGR framebuffer */
383 unsigned char bgra
[4] = {
384 PIPE_SWIZZLE_Z
, PIPE_SWIZZLE_Y
, PIPE_SWIZZLE_X
, PIPE_SWIZZLE_W
387 struct bifrost_render_target rt
= {
391 .nr_channels
= MALI_POSITIVE(4),
393 .swizzle
= panfrost_translate_swizzle_4(bgra
),
396 .framebuffer
= framebuffer
,
397 .framebuffer_stride
= (stride
/ 16) & 0xfffffff,
400 memcpy(&ctx
->fragment_rts
[0], &rt
, sizeof(rt
));
402 memset(&ctx
->fragment_extra
, 0, sizeof(ctx
->fragment_extra
));
403 memcpy(&ctx
->fragment_mfbd
, &fb
, sizeof(fb
));
407 /* Maps float 0.0-1.0 to int 0x00-0xFF */
409 normalised_float_to_u8(float f
)
411 return (uint8_t) (int) (f
* 255.0f
);
415 panfrost_clear_sfbd(struct panfrost_job
*job
)
417 struct panfrost_context
*ctx
= job
->ctx
;
418 struct mali_single_framebuffer
*sfbd
= &ctx
->fragment_sfbd
;
420 if (job
->clear
& PIPE_CLEAR_COLOR
) {
421 sfbd
->clear_color_1
= job
->clear_color
;
422 sfbd
->clear_color_2
= job
->clear_color
;
423 sfbd
->clear_color_3
= job
->clear_color
;
424 sfbd
->clear_color_4
= job
->clear_color
;
427 if (job
->clear
& PIPE_CLEAR_DEPTH
) {
428 sfbd
->clear_depth_1
= job
->clear_depth
;
429 sfbd
->clear_depth_2
= job
->clear_depth
;
430 sfbd
->clear_depth_3
= job
->clear_depth
;
431 sfbd
->clear_depth_4
= job
->clear_depth
;
433 sfbd
->depth_buffer
= ctx
->depth_stencil_buffer
.gpu
;
434 sfbd
->depth_buffer_enable
= MALI_DEPTH_STENCIL_ENABLE
;
437 if (job
->clear
& PIPE_CLEAR_STENCIL
) {
438 sfbd
->clear_stencil
= job
->clear_stencil
;
440 sfbd
->stencil_buffer
= ctx
->depth_stencil_buffer
.gpu
;
441 sfbd
->stencil_buffer_enable
= MALI_DEPTH_STENCIL_ENABLE
;
444 /* Set flags based on what has been cleared, for the SFBD case */
445 /* XXX: What do these flags mean? */
446 int clear_flags
= 0x101100;
448 if (!(job
->clear
& ~(PIPE_CLEAR_COLOR
| PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
))) {
449 /* On a tiler like this, it's fastest to clear all three buffers at once */
451 clear_flags
|= MALI_CLEAR_FAST
;
453 clear_flags
|= MALI_CLEAR_SLOW
;
455 if (job
->clear
& PIPE_CLEAR_STENCIL
)
456 clear_flags
|= MALI_CLEAR_SLOW_STENCIL
;
459 sfbd
->clear_flags
= clear_flags
;
463 panfrost_clear_mfbd(struct panfrost_job
*job
)
465 struct panfrost_context
*ctx
= job
->ctx
;
466 struct bifrost_render_target
*buffer_color
= &ctx
->fragment_rts
[0];
467 struct bifrost_framebuffer
*buffer_ds
= &ctx
->fragment_mfbd
;
469 if (job
->clear
& PIPE_CLEAR_COLOR
) {
470 buffer_color
->clear_color_1
= job
->clear_color
;
471 buffer_color
->clear_color_2
= job
->clear_color
;
472 buffer_color
->clear_color_3
= job
->clear_color
;
473 buffer_color
->clear_color_4
= job
->clear_color
;
476 if (job
->clear
& PIPE_CLEAR_DEPTH
) {
477 buffer_ds
->clear_depth
= job
->clear_depth
;
480 if (job
->clear
& PIPE_CLEAR_STENCIL
) {
481 buffer_ds
->clear_stencil
= job
->clear_stencil
;
484 if (job
->clear
& (PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
)) {
485 /* Setup combined 24/8 depth/stencil */
486 ctx
->fragment_mfbd
.unk3
|= MALI_MFBD_EXTRA
;
487 ctx
->fragment_extra
.unk
= 0x405;
488 ctx
->fragment_extra
.ds_linear
.depth
= ctx
->depth_stencil_buffer
.gpu
;
489 ctx
->fragment_extra
.ds_linear
.depth_stride
= ctx
->pipe_framebuffer
.width
* 4;
495 struct pipe_context
*pipe
,
497 const union pipe_color_union
*color
,
498 double depth
, unsigned stencil
)
500 struct panfrost_context
*ctx
= pan_context(pipe
);
501 struct panfrost_job
*job
= panfrost_get_job_for_fbo(ctx
);
503 if (buffers
& PIPE_CLEAR_COLOR
) {
504 /* Alpha clear only meaningful without alpha channel, TODO less ad hoc */
505 bool has_alpha
= util_format_has_alpha(ctx
->pipe_framebuffer
.cbufs
[0]->format
);
506 float clear_alpha
= has_alpha
? color
->f
[3] : 1.0f
;
508 uint32_t packed_color
=
509 (normalised_float_to_u8(clear_alpha
) << 24) |
510 (normalised_float_to_u8(color
->f
[2]) << 16) |
511 (normalised_float_to_u8(color
->f
[1]) << 8) |
512 (normalised_float_to_u8(color
->f
[0]) << 0);
514 job
->clear_color
= packed_color
;
518 if (buffers
& PIPE_CLEAR_DEPTH
) {
519 job
->clear_depth
= depth
;
522 if (buffers
& PIPE_CLEAR_STENCIL
) {
523 job
->clear_stencil
= stencil
;
526 job
->clear
|= buffers
;
530 panfrost_attach_vt_mfbd(struct panfrost_context
*ctx
)
532 /* MFBD needs a sequential semi-render target upload, but what exactly this is, is beyond me for now */
533 struct bifrost_render_target rts_list
[] = {
538 .framebuffer
= ctx
->misc_0
.gpu
,
543 /* Allocate memory for the three components */
544 int size
= 1024 + sizeof(ctx
->vt_framebuffer_mfbd
) + sizeof(rts_list
);
545 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, size
);
547 /* Opaque 1024-block */
548 rts_list
[0].chunknown
.pointer
= transfer
.gpu
;
550 memcpy(transfer
.cpu
+ 1024, &ctx
->vt_framebuffer_mfbd
, sizeof(ctx
->vt_framebuffer_mfbd
));
551 memcpy(transfer
.cpu
+ 1024 + sizeof(ctx
->vt_framebuffer_mfbd
), rts_list
, sizeof(rts_list
));
553 return (transfer
.gpu
+ 1024) | MALI_MFBD
;
557 panfrost_attach_vt_sfbd(struct panfrost_context
*ctx
)
559 return panfrost_upload_transient(ctx
, &ctx
->vt_framebuffer_sfbd
, sizeof(ctx
->vt_framebuffer_sfbd
)) | MALI_SFBD
;
563 panfrost_attach_vt_framebuffer(struct panfrost_context
*ctx
)
565 mali_ptr framebuffer
= require_sfbd
?
566 panfrost_attach_vt_sfbd(ctx
) :
567 panfrost_attach_vt_mfbd(ctx
);
569 ctx
->payload_vertex
.postfix
.framebuffer
= framebuffer
;
570 ctx
->payload_tiler
.postfix
.framebuffer
= framebuffer
;
574 panfrost_viewport(struct panfrost_context
*ctx
,
575 float depth_clip_near
,
576 float depth_clip_far
,
577 int viewport_x0
, int viewport_y0
,
578 int viewport_x1
, int viewport_y1
)
580 /* Clip bounds are encoded as floats. The viewport itself is encoded as
581 * (somewhat) asymmetric ints. */
583 struct mali_viewport ret
= {
584 /* By default, do no viewport clipping, i.e. clip to (-inf,
585 * inf) in each direction. Clipping to the viewport in theory
586 * should work, but in practice causes issues when we're not
587 * explicitly trying to scissor */
594 /* We always perform depth clipping (TODO: Can this be disabled?) */
596 .clip_minz
= depth_clip_near
,
597 .clip_maxz
= depth_clip_far
,
599 .viewport0
= { viewport_x0
, viewport_y0
},
600 .viewport1
= { MALI_POSITIVE(viewport_x1
), MALI_POSITIVE(viewport_y1
) },
603 memcpy(ctx
->viewport
, &ret
, sizeof(ret
));
606 /* Reset per-frame context, called on context initialisation as well as after
607 * flushing a frame */
610 panfrost_invalidate_frame(struct panfrost_context
*ctx
)
612 unsigned transient_count
= ctx
->transient_pools
[ctx
->cmdstream_i
].entry_index
*ctx
->transient_pools
[0].entry_size
+ ctx
->transient_pools
[ctx
->cmdstream_i
].entry_offset
;
613 printf("Uploaded transient %d bytes\n", transient_count
);
615 /* Rotate cmdstream */
616 if ((++ctx
->cmdstream_i
) == (sizeof(ctx
->transient_pools
) / sizeof(ctx
->transient_pools
[0])))
617 ctx
->cmdstream_i
= 0;
620 ctx
->vt_framebuffer_sfbd
= panfrost_emit_sfbd(ctx
);
622 ctx
->vt_framebuffer_mfbd
= panfrost_emit_mfbd(ctx
);
624 panfrost_new_frag_framebuffer(ctx
);
626 /* Reset varyings allocated */
627 ctx
->varying_height
= 0;
629 /* The transient cmdstream is dirty every frame; the only bits worth preserving
630 * (textures, shaders, etc) are in other buffers anyways */
632 ctx
->transient_pools
[ctx
->cmdstream_i
].entry_index
= 0;
633 ctx
->transient_pools
[ctx
->cmdstream_i
].entry_offset
= 0;
635 /* Regenerate payloads */
636 panfrost_attach_vt_framebuffer(ctx
);
639 ctx
->dirty
|= PAN_DIRTY_RASTERIZER
;
642 ctx
->dirty
|= PAN_DIRTY_SAMPLERS
| PAN_DIRTY_TEXTURES
;
645 /* In practice, every field of these payloads should be configurable
646 * arbitrarily, which means these functions are basically catch-all's for
647 * as-of-yet unwavering unknowns */
650 panfrost_emit_vertex_payload(struct panfrost_context
*ctx
)
652 struct midgard_payload_vertex_tiler payload
= {
654 .workgroups_z_shift
= 32,
655 .workgroups_x_shift_2
= 0x2,
656 .workgroups_x_shift_3
= 0x5,
658 .gl_enables
= 0x4 | (is_t6xx
? 0 : 0x2),
661 memcpy(&ctx
->payload_vertex
, &payload
, sizeof(payload
));
665 panfrost_emit_tiler_payload(struct panfrost_context
*ctx
)
667 struct midgard_payload_vertex_tiler payload
= {
669 .workgroups_z_shift
= 32,
670 .workgroups_x_shift_2
= 0x2,
671 .workgroups_x_shift_3
= 0x6,
673 .zero1
= 0xffff, /* Why is this only seen on test-quad-textured? */
677 /* Reserve the viewport */
678 struct panfrost_transfer t
= panfrost_allocate_chunk(ctx
, sizeof(struct mali_viewport
), HEAP_DESCRIPTOR
);
679 ctx
->viewport
= (struct mali_viewport
*) t
.cpu
;
680 payload
.postfix
.viewport
= t
.gpu
;
682 memcpy(&ctx
->payload_tiler
, &payload
, sizeof(payload
));
686 translate_tex_wrap(enum pipe_tex_wrap w
)
689 case PIPE_TEX_WRAP_REPEAT
:
690 return MALI_WRAP_REPEAT
;
692 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
693 return MALI_WRAP_CLAMP_TO_EDGE
;
695 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
696 return MALI_WRAP_CLAMP_TO_BORDER
;
698 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
699 return MALI_WRAP_MIRRORED_REPEAT
;
708 translate_tex_filter(enum pipe_tex_filter f
)
711 case PIPE_TEX_FILTER_NEAREST
:
714 case PIPE_TEX_FILTER_LINEAR
:
724 translate_mip_filter(enum pipe_tex_mipfilter f
)
726 return (f
== PIPE_TEX_MIPFILTER_LINEAR
) ? MALI_MIP_LINEAR
: 0;
730 panfrost_translate_compare_func(enum pipe_compare_func in
)
733 case PIPE_FUNC_NEVER
:
734 return MALI_FUNC_NEVER
;
737 return MALI_FUNC_LESS
;
739 case PIPE_FUNC_EQUAL
:
740 return MALI_FUNC_EQUAL
;
742 case PIPE_FUNC_LEQUAL
:
743 return MALI_FUNC_LEQUAL
;
745 case PIPE_FUNC_GREATER
:
746 return MALI_FUNC_GREATER
;
748 case PIPE_FUNC_NOTEQUAL
:
749 return MALI_FUNC_NOTEQUAL
;
751 case PIPE_FUNC_GEQUAL
:
752 return MALI_FUNC_GEQUAL
;
754 case PIPE_FUNC_ALWAYS
:
755 return MALI_FUNC_ALWAYS
;
759 return 0; /* Unreachable */
763 panfrost_translate_alt_compare_func(enum pipe_compare_func in
)
766 case PIPE_FUNC_NEVER
:
767 return MALI_ALT_FUNC_NEVER
;
770 return MALI_ALT_FUNC_LESS
;
772 case PIPE_FUNC_EQUAL
:
773 return MALI_ALT_FUNC_EQUAL
;
775 case PIPE_FUNC_LEQUAL
:
776 return MALI_ALT_FUNC_LEQUAL
;
778 case PIPE_FUNC_GREATER
:
779 return MALI_ALT_FUNC_GREATER
;
781 case PIPE_FUNC_NOTEQUAL
:
782 return MALI_ALT_FUNC_NOTEQUAL
;
784 case PIPE_FUNC_GEQUAL
:
785 return MALI_ALT_FUNC_GEQUAL
;
787 case PIPE_FUNC_ALWAYS
:
788 return MALI_ALT_FUNC_ALWAYS
;
792 return 0; /* Unreachable */
796 panfrost_translate_stencil_op(enum pipe_stencil_op in
)
799 case PIPE_STENCIL_OP_KEEP
:
800 return MALI_STENCIL_KEEP
;
802 case PIPE_STENCIL_OP_ZERO
:
803 return MALI_STENCIL_ZERO
;
805 case PIPE_STENCIL_OP_REPLACE
:
806 return MALI_STENCIL_REPLACE
;
808 case PIPE_STENCIL_OP_INCR
:
809 return MALI_STENCIL_INCR
;
811 case PIPE_STENCIL_OP_DECR
:
812 return MALI_STENCIL_DECR
;
814 case PIPE_STENCIL_OP_INCR_WRAP
:
815 return MALI_STENCIL_INCR_WRAP
;
817 case PIPE_STENCIL_OP_DECR_WRAP
:
818 return MALI_STENCIL_DECR_WRAP
;
820 case PIPE_STENCIL_OP_INVERT
:
821 return MALI_STENCIL_INVERT
;
825 return 0; /* Unreachable */
829 panfrost_make_stencil_state(const struct pipe_stencil_state
*in
, struct mali_stencil_test
*out
)
831 out
->ref
= 0; /* Gallium gets it from elsewhere */
833 out
->mask
= in
->valuemask
;
834 out
->func
= panfrost_translate_compare_func(in
->func
);
835 out
->sfail
= panfrost_translate_stencil_op(in
->fail_op
);
836 out
->dpfail
= panfrost_translate_stencil_op(in
->zfail_op
);
837 out
->dppass
= panfrost_translate_stencil_op(in
->zpass_op
);
841 panfrost_default_shader_backend(struct panfrost_context
*ctx
)
843 struct mali_shader_meta shader
= {
844 .alpha_coverage
= ~MALI_ALPHA_COVERAGE(0.000000),
846 .unknown2_3
= MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS
) | 0x3010,
847 .unknown2_4
= MALI_NO_MSAA
| 0x4e0,
851 shader
.unknown2_4
|= 0x10;
854 struct pipe_stencil_state default_stencil
= {
856 .func
= PIPE_FUNC_ALWAYS
,
857 .fail_op
= MALI_STENCIL_KEEP
,
858 .zfail_op
= MALI_STENCIL_KEEP
,
859 .zpass_op
= MALI_STENCIL_KEEP
,
864 panfrost_make_stencil_state(&default_stencil
, &shader
.stencil_front
);
865 shader
.stencil_mask_front
= default_stencil
.writemask
;
867 panfrost_make_stencil_state(&default_stencil
, &shader
.stencil_back
);
868 shader
.stencil_mask_back
= default_stencil
.writemask
;
870 if (default_stencil
.enabled
)
871 shader
.unknown2_4
|= MALI_STENCIL_TEST
;
873 memcpy(&ctx
->fragment_shader_core
, &shader
, sizeof(shader
));
876 /* Generates a vertex/tiler job. This is, in some sense, the heart of the
877 * graphics command stream. It should be called once per draw, accordding to
878 * presentations. Set is_tiler for "tiler" jobs (fragment shader jobs, but in
879 * Mali parlance, "fragment" refers to framebuffer writeout). Clear it for
882 struct panfrost_transfer
883 panfrost_vertex_tiler_job(struct panfrost_context
*ctx
, bool is_tiler
, bool is_elided_tiler
)
885 /* Each draw call corresponds to two jobs, and we want to offset to leave room for the set-value job */
886 int draw_job_index
= 1 + (2 * ctx
->draw_count
);
888 struct mali_job_descriptor_header job
= {
889 .job_type
= is_tiler
? JOB_TYPE_TILER
: JOB_TYPE_VERTEX
,
890 .job_index
= draw_job_index
+ (is_tiler
? 1 : 0),
892 .job_descriptor_size
= 1,
896 /* Only non-elided tiler jobs have dependencies which are known at this point */
898 if (is_tiler
&& !is_elided_tiler
) {
899 /* Tiler jobs depend on vertex jobs */
901 job
.job_dependency_index_1
= draw_job_index
;
903 /* Tiler jobs also depend on the previous tiler job */
906 job
.job_dependency_index_2
= draw_job_index
- 1;
909 struct midgard_payload_vertex_tiler
*payload
= is_tiler
? &ctx
->payload_tiler
: &ctx
->payload_vertex
;
911 /* There's some padding hacks on 32-bit */
918 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, sizeof(job
) + sizeof(*payload
));
919 memcpy(transfer
.cpu
, &job
, sizeof(job
));
920 memcpy(transfer
.cpu
+ sizeof(job
) - offset
, payload
, sizeof(*payload
));
924 /* Generates a set value job. It's unclear what exactly this does, why it's
925 * necessary, and when to call it. */
928 panfrost_set_value_job(struct panfrost_context
*ctx
)
930 struct mali_job_descriptor_header job
= {
931 .job_type
= JOB_TYPE_SET_VALUE
,
932 .job_descriptor_size
= 1,
933 .job_index
= 1 + (2 * ctx
->draw_count
),
936 struct mali_payload_set_value payload
= {
937 .out
= ctx
->misc_0
.gpu
,
941 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, sizeof(job
) + sizeof(payload
));
942 memcpy(transfer
.cpu
, &job
, sizeof(job
));
943 memcpy(transfer
.cpu
+ sizeof(job
), &payload
, sizeof(payload
));
945 ctx
->u_set_value_job
= (struct mali_job_descriptor_header
*) transfer
.cpu
;
946 ctx
->set_value_job
= transfer
.gpu
;
949 /* Generate a fragment job. This should be called once per frame. (According to
950 * presentations, this is supposed to correspond to eglSwapBuffers) */
953 panfrost_fragment_job(struct panfrost_context
*ctx
)
955 struct panfrost_job
*job
= panfrost_get_job_for_fbo(ctx
);
957 /* Actualize the clear late; TODO: Fix order dependency between clear
961 panfrost_clear_sfbd(job
);
963 panfrost_clear_mfbd(job
);
966 panfrost_set_fragment_afbc(ctx
);
968 if (ctx
->pipe_framebuffer
.nr_cbufs
== 1) {
969 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) ctx
->pipe_framebuffer
.cbufs
[0]->texture
;
971 if (rsrc
->bo
->has_checksum
) {
973 fprintf(stderr
, "Checksumming not supported on SFBD\n");
977 int stride
= util_format_get_stride(rsrc
->base
.format
, rsrc
->base
.width0
);
979 ctx
->fragment_mfbd
.unk3
|= MALI_MFBD_EXTRA
;
980 ctx
->fragment_extra
.unk
|= 0x420;
981 ctx
->fragment_extra
.checksum_stride
= rsrc
->bo
->checksum_stride
;
982 ctx
->fragment_extra
.checksum
= rsrc
->bo
->gpu
[0] + stride
* rsrc
->base
.height0
;
986 /* The frame is complete and therefore the framebuffer descriptor is
987 * ready for linkage and upload */
989 size_t sz
= require_sfbd
? sizeof(struct mali_single_framebuffer
) : (sizeof(struct bifrost_framebuffer
) + sizeof(struct bifrost_fb_extra
) + sizeof(struct bifrost_render_target
) * 1);
990 struct panfrost_transfer fbd_t
= panfrost_allocate_transient(ctx
, sz
);
994 /* Upload just the SFBD all at once */
995 memcpy(fbd_t
.cpu
, &ctx
->fragment_sfbd
, sizeof(ctx
->fragment_sfbd
));
996 offset
+= sizeof(ctx
->fragment_sfbd
);
998 /* Upload the MFBD header */
999 memcpy(fbd_t
.cpu
, &ctx
->fragment_mfbd
, sizeof(ctx
->fragment_mfbd
));
1000 offset
+= sizeof(ctx
->fragment_mfbd
);
1002 /* Upload extra framebuffer info if necessary */
1003 if (ctx
->fragment_mfbd
.unk3
& MALI_MFBD_EXTRA
) {
1004 memcpy(fbd_t
.cpu
+ offset
, &ctx
->fragment_extra
, sizeof(struct bifrost_fb_extra
));
1005 offset
+= sizeof(struct bifrost_fb_extra
);
1008 /* Upload (single) render target */
1009 memcpy(fbd_t
.cpu
+ offset
, &ctx
->fragment_rts
[0], sizeof(struct bifrost_render_target
) * 1);
1012 /* Generate the fragment (frame) job */
1014 struct mali_job_descriptor_header header
= {
1015 .job_type
= JOB_TYPE_FRAGMENT
,
1018 .job_descriptor_size
= 1
1022 struct mali_payload_fragment payload
= {
1023 .min_tile_coord
= MALI_COORDINATE_TO_TILE_MIN(0, 0),
1024 .max_tile_coord
= MALI_COORDINATE_TO_TILE_MAX(ctx
->pipe_framebuffer
.width
, ctx
->pipe_framebuffer
.height
),
1025 .framebuffer
= fbd_t
.gpu
| (require_sfbd
? MALI_SFBD
: MALI_MFBD
),
1028 if (!require_sfbd
&& ctx
->fragment_mfbd
.unk3
& MALI_MFBD_EXTRA
) {
1029 /* Signal that there is an extra portion of the framebuffer
1032 payload
.framebuffer
|= 2;
1035 /* Normally, there should be no padding. However, fragment jobs are
1036 * shared with 64-bit Bifrost systems, and accordingly there is 4-bytes
1037 * of zero padding in between. */
1039 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, sizeof(header
) + sizeof(payload
));
1040 memcpy(transfer
.cpu
, &header
, sizeof(header
));
1041 memcpy(transfer
.cpu
+ sizeof(header
), &payload
, sizeof(payload
));
1042 return transfer
.gpu
;
1045 /* Emits attributes and varying descriptors, which should be called every draw,
1046 * excepting some obscure circumstances */
1049 panfrost_emit_vertex_data(struct panfrost_context
*ctx
)
1051 /* TODO: Only update the dirtied buffers */
1052 union mali_attr attrs
[PIPE_MAX_ATTRIBS
];
1053 union mali_attr varyings
[PIPE_MAX_ATTRIBS
];
1055 unsigned invocation_count
= MALI_NEGATIVE(ctx
->payload_tiler
.prefix
.invocation_count
);
1057 for (int i
= 0; i
< ctx
->vertex_buffer_count
; ++i
) {
1058 struct pipe_vertex_buffer
*buf
= &ctx
->vertex_buffers
[i
];
1059 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) (buf
->buffer
.resource
);
1061 /* Let's figure out the layout of the attributes in memory so
1062 * we can be smart about size computation. The idea is to
1063 * figure out the maximum src_offset, which tells us the latest
1064 * spot a vertex could start. Meanwhile, we figure out the size
1065 * of the attribute memory (assuming interleaved
1066 * representation) and tack on the max src_offset for a
1067 * reasonably good upper bound on the size.
1069 * Proving correctness is left as an exercise to the reader.
1072 unsigned max_src_offset
= 0;
1074 for (unsigned j
= 0; j
< ctx
->vertex
->num_elements
; ++j
) {
1075 if (ctx
->vertex
->pipe
[j
].vertex_buffer_index
!= i
) continue;
1076 max_src_offset
= MAX2(max_src_offset
, ctx
->vertex
->pipe
[j
].src_offset
);
1079 /* Offset vertex count by draw_start to make sure we upload enough */
1080 attrs
[i
].stride
= buf
->stride
;
1081 attrs
[i
].size
= buf
->stride
* (ctx
->payload_vertex
.draw_start
+ invocation_count
) + max_src_offset
;
1083 /* Vertex elements are -already- GPU-visible, at
1084 * rsrc->gpu. However, attribute buffers must be 64 aligned. If
1085 * it is not, for now we have to duplicate the buffer. */
1087 mali_ptr effective_address
= (rsrc
->bo
->gpu
[0] + buf
->buffer_offset
);
1089 if (effective_address
& 0x3F) {
1090 attrs
[i
].elements
= panfrost_upload_transient(ctx
, rsrc
->bo
->cpu
[0] + buf
->buffer_offset
, attrs
[i
].size
) | 1;
1092 attrs
[i
].elements
= effective_address
| 1;
1096 struct panfrost_varyings
*vars
= &ctx
->vs
->variants
[ctx
->vs
->active_variant
].varyings
;
1098 for (int i
= 0; i
< vars
->varying_buffer_count
; ++i
) {
1099 mali_ptr varying_address
= ctx
->varying_mem
.gpu
+ ctx
->varying_height
;
1101 varyings
[i
].elements
= varying_address
| 1;
1102 varyings
[i
].stride
= vars
->varyings_stride
[i
];
1103 varyings
[i
].size
= vars
->varyings_stride
[i
] * invocation_count
;
1105 /* If this varying has to be linked somewhere, do it now. See
1106 * pan_assemble.c for the indices. TODO: Use a more generic
1107 * linking interface */
1111 ctx
->payload_tiler
.postfix
.position_varying
= varying_address
;
1112 } else if (i
== 2) {
1114 ctx
->payload_tiler
.primitive_size
.pointer
= varying_address
;
1117 /* Varyings appear to need 64-byte alignment */
1118 ctx
->varying_height
+= ALIGN(varyings
[i
].size
, 64);
1120 /* Ensure that we fit */
1121 assert(ctx
->varying_height
< ctx
->varying_mem
.size
);
1124 ctx
->payload_vertex
.postfix
.attributes
= panfrost_upload_transient(ctx
, attrs
, ctx
->vertex_buffer_count
* sizeof(union mali_attr
));
1126 mali_ptr varyings_p
= panfrost_upload_transient(ctx
, &varyings
, vars
->varying_buffer_count
* sizeof(union mali_attr
));
1127 ctx
->payload_vertex
.postfix
.varyings
= varyings_p
;
1128 ctx
->payload_tiler
.postfix
.varyings
= varyings_p
;
1131 /* Go through dirty flags and actualise them in the cmdstream. */
1134 panfrost_emit_for_draw(struct panfrost_context
*ctx
, bool with_vertex_data
)
1136 if (with_vertex_data
) {
1137 panfrost_emit_vertex_data(ctx
);
1140 if (ctx
->dirty
& PAN_DIRTY_RASTERIZER
) {
1141 ctx
->payload_tiler
.gl_enables
= ctx
->rasterizer
->tiler_gl_enables
;
1142 panfrost_set_framebuffer_msaa(ctx
, ctx
->rasterizer
->base
.multisample
);
1145 if (ctx
->occlusion_query
) {
1146 ctx
->payload_tiler
.gl_enables
|= MALI_OCCLUSION_QUERY
| MALI_OCCLUSION_PRECISE
;
1147 ctx
->payload_tiler
.postfix
.occlusion_counter
= ctx
->occlusion_query
->transfer
.gpu
;
1150 if (ctx
->dirty
& PAN_DIRTY_VS
) {
1153 struct panfrost_shader_state
*vs
= &ctx
->vs
->variants
[ctx
->vs
->active_variant
];
1155 /* Late shader descriptor assignments */
1156 vs
->tripipe
->texture_count
= ctx
->sampler_view_count
[PIPE_SHADER_VERTEX
];
1157 vs
->tripipe
->sampler_count
= ctx
->sampler_count
[PIPE_SHADER_VERTEX
];
1160 vs
->tripipe
->midgard1
.unknown1
= 0x2201;
1162 ctx
->payload_vertex
.postfix
._shader_upper
= vs
->tripipe_gpu
>> 4;
1164 /* Varying descriptor is tied to the vertex shader. Also the
1165 * fragment shader, I suppose, but it's generated with the
1166 * vertex shader so */
1168 struct panfrost_varyings
*varyings
= &ctx
->vs
->variants
[ctx
->vs
->active_variant
].varyings
;
1170 ctx
->payload_vertex
.postfix
.varying_meta
= varyings
->varyings_descriptor
;
1171 ctx
->payload_tiler
.postfix
.varying_meta
= varyings
->varyings_descriptor_fragment
;
1174 if (ctx
->dirty
& (PAN_DIRTY_RASTERIZER
| PAN_DIRTY_VS
)) {
1175 /* Check if we need to link the gl_PointSize varying */
1177 struct panfrost_shader_state
*vs
= &ctx
->vs
->variants
[ctx
->vs
->active_variant
];
1179 bool needs_gl_point_size
= vs
->writes_point_size
&& ctx
->payload_tiler
.prefix
.draw_mode
== MALI_POINTS
;
1181 if (!needs_gl_point_size
) {
1182 /* If the size is constant, write it out. Otherwise,
1183 * don't touch primitive_size (since we would clobber
1184 * the pointer there) */
1186 ctx
->payload_tiler
.primitive_size
.constant
= ctx
->rasterizer
->base
.line_width
;
1189 /* Set the flag for varying (pointer) point size if the shader needs that */
1190 SET_BIT(ctx
->payload_tiler
.prefix
.unknown_draw
, MALI_DRAW_VARYING_SIZE
, needs_gl_point_size
);
1193 /* TODO: Maybe dirty track FS, maybe not. For now, it's transient. */
1195 ctx
->dirty
|= PAN_DIRTY_FS
;
1197 if (ctx
->dirty
& PAN_DIRTY_FS
) {
1199 struct panfrost_shader_state
*variant
= &ctx
->fs
->variants
[ctx
->fs
->active_variant
];
1201 #define COPY(name) ctx->fragment_shader_core.name = variant->tripipe->name
1204 COPY(attribute_count
);
1205 COPY(varying_count
);
1206 COPY(midgard1
.uniform_count
);
1207 COPY(midgard1
.work_count
);
1208 COPY(midgard1
.unknown2
);
1211 /* If there is a blend shader, work registers are shared */
1213 if (ctx
->blend
->has_blend_shader
)
1214 ctx
->fragment_shader_core
.midgard1
.work_count
= /*MAX2(ctx->fragment_shader_core.midgard1.work_count, ctx->blend->blend_work_count)*/16;
1216 /* Set late due to depending on render state */
1217 /* The one at the end seems to mean "1 UBO" */
1218 ctx
->fragment_shader_core
.midgard1
.unknown1
= MALI_NO_ALPHA_TO_COVERAGE
| 0x200 | 0x2201;
1220 /* Assign texture/sample count right before upload */
1221 ctx
->fragment_shader_core
.texture_count
= ctx
->sampler_view_count
[PIPE_SHADER_FRAGMENT
];
1222 ctx
->fragment_shader_core
.sampler_count
= ctx
->sampler_count
[PIPE_SHADER_FRAGMENT
];
1224 /* Assign the stencil refs late */
1225 ctx
->fragment_shader_core
.stencil_front
.ref
= ctx
->stencil_ref
.ref_value
[0];
1226 ctx
->fragment_shader_core
.stencil_back
.ref
= ctx
->stencil_ref
.ref_value
[1];
1228 /* CAN_DISCARD should be set if the fragment shader possibly
1229 * contains a 'discard' instruction. It is likely this is
1230 * related to optimizations related to forward-pixel kill, as
1231 * per "Mali Performance 3: Is EGL_BUFFER_PRESERVED a good
1232 * thing?" by Peter Harris
1235 if (variant
->can_discard
) {
1236 ctx
->fragment_shader_core
.unknown2_3
|= MALI_CAN_DISCARD
;
1237 ctx
->fragment_shader_core
.midgard1
.unknown1
&= ~MALI_NO_ALPHA_TO_COVERAGE
;
1238 ctx
->fragment_shader_core
.midgard1
.unknown1
|= 0x4000;
1239 ctx
->fragment_shader_core
.midgard1
.unknown1
= 0x4200;
1242 /* Check if we're using the default blend descriptor (fast path) */
1245 !ctx
->blend
->has_blend_shader
&&
1246 (ctx
->blend
->equation
.rgb_mode
== 0x122) &&
1247 (ctx
->blend
->equation
.alpha_mode
== 0x122) &&
1248 (ctx
->blend
->equation
.color_mask
== 0xf);
1251 /* When only a single render target platform is used, the blend
1252 * information is inside the shader meta itself. We
1253 * additionally need to signal CAN_DISCARD for nontrivial blend
1254 * modes (so we're able to read back the destination buffer) */
1256 if (ctx
->blend
->has_blend_shader
) {
1257 ctx
->fragment_shader_core
.blend_shader
= ctx
->blend
->blend_shader
;
1259 memcpy(&ctx
->fragment_shader_core
.blend_equation
, &ctx
->blend
->equation
, sizeof(ctx
->blend
->equation
));
1263 ctx
->fragment_shader_core
.unknown2_3
|= MALI_CAN_DISCARD
;
1267 size_t size
= sizeof(struct mali_shader_meta
) + sizeof(struct mali_blend_meta
);
1268 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, size
);
1269 memcpy(transfer
.cpu
, &ctx
->fragment_shader_core
, sizeof(struct mali_shader_meta
));
1271 ctx
->payload_tiler
.postfix
._shader_upper
= (transfer
.gpu
) >> 4;
1273 if (!require_sfbd
) {
1274 /* Additional blend descriptor tacked on for jobs using MFBD */
1276 unsigned blend_count
= 0;
1278 if (ctx
->blend
->has_blend_shader
) {
1279 /* For a blend shader, the bottom nibble corresponds to
1280 * the number of work registers used, which signals the
1281 * -existence- of a blend shader */
1283 assert(ctx
->blend
->blend_work_count
>= 2);
1284 blend_count
|= MIN2(ctx
->blend
->blend_work_count
, 3);
1286 /* Otherwise, the bottom bit simply specifies if
1287 * blending (anything other than REPLACE) is enabled */
1294 /* Second blend equation is always a simple replace */
1296 uint64_t replace_magic
= 0xf0122122;
1297 struct mali_blend_equation replace_mode
;
1298 memcpy(&replace_mode
, &replace_magic
, sizeof(replace_mode
));
1300 struct mali_blend_meta blend_meta
[] = {
1302 .unk1
= 0x200 | blend_count
,
1303 .blend_equation_1
= ctx
->blend
->equation
,
1304 .blend_equation_2
= replace_mode
1308 if (ctx
->blend
->has_blend_shader
)
1309 memcpy(&blend_meta
[0].blend_equation_1
, &ctx
->blend
->blend_shader
, sizeof(ctx
->blend
->blend_shader
));
1311 memcpy(transfer
.cpu
+ sizeof(struct mali_shader_meta
), blend_meta
, sizeof(blend_meta
));
1315 if (ctx
->dirty
& PAN_DIRTY_VERTEX
) {
1316 ctx
->payload_vertex
.postfix
.attribute_meta
= ctx
->vertex
->descriptor_ptr
;
1319 if (ctx
->dirty
& PAN_DIRTY_SAMPLERS
) {
1320 /* Upload samplers back to back, no padding */
1322 for (int t
= 0; t
<= PIPE_SHADER_FRAGMENT
; ++t
) {
1323 if (!ctx
->sampler_count
[t
]) continue;
1325 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, sizeof(struct mali_sampler_descriptor
) * ctx
->sampler_count
[t
]);
1326 struct mali_sampler_descriptor
*desc
= (struct mali_sampler_descriptor
*) transfer
.cpu
;
1328 for (int i
= 0; i
< ctx
->sampler_count
[t
]; ++i
) {
1329 desc
[i
] = ctx
->samplers
[t
][i
]->hw
;
1332 if (t
== PIPE_SHADER_FRAGMENT
)
1333 ctx
->payload_tiler
.postfix
.sampler_descriptor
= transfer
.gpu
;
1334 else if (t
== PIPE_SHADER_VERTEX
)
1335 ctx
->payload_vertex
.postfix
.sampler_descriptor
= transfer
.gpu
;
1341 if (ctx
->dirty
& PAN_DIRTY_TEXTURES
) {
1342 for (int t
= 0; t
<= PIPE_SHADER_FRAGMENT
; ++t
) {
1344 if (!ctx
->sampler_view_count
[t
]) continue;
1346 uint64_t trampolines
[PIPE_MAX_SHADER_SAMPLER_VIEWS
];
1348 for (int i
= 0; i
< ctx
->sampler_view_count
[t
]; ++i
) {
1349 if (!ctx
->sampler_views
[t
][i
])
1352 struct pipe_resource
*tex_rsrc
= ctx
->sampler_views
[t
][i
]->base
.texture
;
1353 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) tex_rsrc
;
1355 /* Inject the address in. */
1356 for (int l
= 0; l
< (tex_rsrc
->last_level
+ 1); ++l
)
1357 ctx
->sampler_views
[t
][i
]->hw
.swizzled_bitmaps
[l
] = rsrc
->bo
->gpu
[l
];
1359 /* Workaround maybe-errata (?) with non-mipmaps */
1360 int s
= ctx
->sampler_views
[t
][i
]->hw
.nr_mipmap_levels
;
1362 if (!rsrc
->bo
->is_mipmap
) {
1364 /* HW ERRATA, not needed after t6XX */
1365 ctx
->sampler_views
[t
][i
]->hw
.swizzled_bitmaps
[1] = rsrc
->bo
->gpu
[0];
1367 ctx
->sampler_views
[t
][i
]->hw
.unknown3A
= 1;
1370 ctx
->sampler_views
[t
][i
]->hw
.nr_mipmap_levels
= 0;
1373 trampolines
[i
] = panfrost_upload_transient(ctx
, &ctx
->sampler_views
[t
][i
]->hw
, sizeof(struct mali_texture_descriptor
));
1376 ctx
->sampler_views
[t
][i
]->hw
.nr_mipmap_levels
= s
;
1379 ctx
->sampler_views
[t
][i
]->hw
.unknown3A
= 0;
1383 mali_ptr trampoline
= panfrost_upload_transient(ctx
, trampolines
, sizeof(uint64_t) * ctx
->sampler_view_count
[t
]);
1385 if (t
== PIPE_SHADER_FRAGMENT
)
1386 ctx
->payload_tiler
.postfix
.texture_trampoline
= trampoline
;
1387 else if (t
== PIPE_SHADER_VERTEX
)
1388 ctx
->payload_vertex
.postfix
.texture_trampoline
= trampoline
;
1394 /* Generate the viewport vector of the form: <width/2, height/2, centerx, centery> */
1395 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
1397 float viewport_vec4
[] = {
1399 fabsf(vp
->scale
[1]),
1402 /* -1.0 * vp->translate[1] */ fabs(1.0 * vp
->scale
[1]) /* XXX */
1405 for (int i
= 0; i
< PIPE_SHADER_TYPES
; ++i
) {
1406 struct panfrost_constant_buffer
*buf
= &ctx
->constant_buffer
[i
];
1408 if (i
== PIPE_SHADER_VERTEX
|| i
== PIPE_SHADER_FRAGMENT
) {
1409 /* It doesn't matter if we don't use all the memory;
1410 * we'd need a dummy UBO anyway. Compute the max */
1412 size_t size
= sizeof(viewport_vec4
) + buf
->size
;
1413 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, size
);
1415 /* Keep track how much we've uploaded */
1418 if (i
== PIPE_SHADER_VERTEX
) {
1419 /* Upload viewport */
1420 memcpy(transfer
.cpu
+ offset
, viewport_vec4
, sizeof(viewport_vec4
));
1421 offset
+= sizeof(viewport_vec4
);
1424 /* Upload uniforms */
1425 memcpy(transfer
.cpu
+ offset
, buf
->buffer
, buf
->size
);
1427 int uniform_count
= 0;
1429 struct mali_vertex_tiler_postfix
*postfix
;
1432 case PIPE_SHADER_VERTEX
:
1433 uniform_count
= ctx
->vs
->variants
[ctx
->vs
->active_variant
].uniform_count
;
1434 postfix
= &ctx
->payload_vertex
.postfix
;
1437 case PIPE_SHADER_FRAGMENT
:
1438 uniform_count
= ctx
->fs
->variants
[ctx
->fs
->active_variant
].uniform_count
;
1439 postfix
= &ctx
->payload_tiler
.postfix
;
1443 printf("Unknown shader stage %d in uniform upload\n", i
);
1447 /* Also attach the same buffer as a UBO for extended access */
1449 struct mali_uniform_buffer_meta uniform_buffers
[] = {
1451 .size
= MALI_POSITIVE((2 + uniform_count
)),
1452 .ptr
= transfer
.gpu
>> 2,
1456 mali_ptr ubufs
= panfrost_upload_transient(ctx
, uniform_buffers
, sizeof(uniform_buffers
));
1457 postfix
->uniforms
= transfer
.gpu
;
1458 postfix
->uniform_buffers
= ubufs
;
1467 /* Corresponds to exactly one draw, but does not submit anything */
1470 panfrost_queue_draw(struct panfrost_context
*ctx
)
1472 /* TODO: Expand the array? */
1473 if (ctx
->draw_count
>= MAX_DRAW_CALLS
) {
1474 printf("Job buffer overflow, ignoring draw\n");
1478 /* Handle dirty flags now */
1479 panfrost_emit_for_draw(ctx
, true);
1481 struct panfrost_transfer vertex
= panfrost_vertex_tiler_job(ctx
, false, false);
1482 struct panfrost_transfer tiler
= panfrost_vertex_tiler_job(ctx
, true, false);
1484 ctx
->u_vertex_jobs
[ctx
->vertex_job_count
] = (struct mali_job_descriptor_header
*) vertex
.cpu
;
1485 ctx
->vertex_jobs
[ctx
->vertex_job_count
++] = vertex
.gpu
;
1487 ctx
->u_tiler_jobs
[ctx
->tiler_job_count
] = (struct mali_job_descriptor_header
*) tiler
.cpu
;
1488 ctx
->tiler_jobs
[ctx
->tiler_job_count
++] = tiler
.gpu
;
1493 /* At the end of the frame, the vertex and tiler jobs are linked together and
1494 * then the fragment job is plonked at the end. Set value job is first for
1495 * unknown reasons. */
1498 panfrost_link_job_pair(struct mali_job_descriptor_header
*first
, mali_ptr next
)
1500 if (first
->job_descriptor_size
)
1501 first
->next_job_64
= (u64
) (uintptr_t) next
;
1503 first
->next_job_32
= (u32
) (uintptr_t) next
;
1507 panfrost_link_jobs(struct panfrost_context
*ctx
)
1509 if (ctx
->draw_count
) {
1510 /* Generate the set_value_job */
1511 panfrost_set_value_job(ctx
);
1513 /* Have the first vertex job depend on the set value job */
1514 ctx
->u_vertex_jobs
[0]->job_dependency_index_1
= ctx
->u_set_value_job
->job_index
;
1517 panfrost_link_job_pair(ctx
->u_set_value_job
, ctx
->vertex_jobs
[0]);
1520 /* V -> V/T ; T -> T/null */
1521 for (int i
= 0; i
< ctx
->vertex_job_count
; ++i
) {
1522 bool isLast
= (i
+ 1) == ctx
->vertex_job_count
;
1524 panfrost_link_job_pair(ctx
->u_vertex_jobs
[i
], isLast
? ctx
->tiler_jobs
[0] : ctx
->vertex_jobs
[i
+ 1]);
1528 for (int i
= 0; i
< ctx
->tiler_job_count
; ++i
) {
1529 bool isLast
= (i
+ 1) == ctx
->tiler_job_count
;
1530 panfrost_link_job_pair(ctx
->u_tiler_jobs
[i
], isLast
? 0 : ctx
->tiler_jobs
[i
+ 1]);
1534 /* The entire frame is in memory -- send it off to the kernel! */
1537 panfrost_submit_frame(struct panfrost_context
*ctx
, bool flush_immediate
)
1539 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
1540 struct panfrost_screen
*screen
= pan_screen(gallium
->screen
);
1542 /* Edge case if screen is cleared and nothing else */
1543 bool has_draws
= ctx
->draw_count
> 0;
1545 /* Workaround a bizarre lockup (a hardware errata?) */
1547 flush_immediate
= true;
1549 /* A number of jobs are batched -- this must be linked and cleared */
1550 panfrost_link_jobs(ctx
);
1552 ctx
->draw_count
= 0;
1553 ctx
->vertex_job_count
= 0;
1554 ctx
->tiler_job_count
= 0;
1558 bool is_scanout
= panfrost_is_scanout(ctx
);
1559 int fragment_id
= screen
->driver
->submit_vs_fs_job(ctx
, has_draws
, is_scanout
);
1561 /* If visual, we can stall a frame */
1563 if (!flush_immediate
)
1564 screen
->driver
->force_flush_fragment(ctx
);
1566 screen
->last_fragment_id
= fragment_id
;
1567 screen
->last_fragment_flushed
= false;
1569 /* If readback, flush now (hurts the pipelined performance) */
1570 if (flush_immediate
)
1571 screen
->driver
->force_flush_fragment(ctx
);
1573 if (screen
->driver
->dump_counters
&& pan_counters_base
) {
1574 screen
->driver
->dump_counters(screen
);
1577 snprintf(filename
, sizeof(filename
), "%s/frame%d.mdgprf", pan_counters_base
, ++performance_counter_number
);
1578 FILE *fp
= fopen(filename
, "wb");
1579 fwrite(screen
->perf_counters
.cpu
, 4096, sizeof(uint32_t), fp
);
1588 struct pipe_context
*pipe
,
1589 struct pipe_fence_handle
**fence
,
1592 struct panfrost_context
*ctx
= pan_context(pipe
);
1593 struct panfrost_job
*job
= panfrost_get_job_for_fbo(ctx
);
1595 /* Nothing to do! */
1596 if (!ctx
->draw_count
&& !job
->clear
) return;
1598 /* Whether to stall the pipeline for immediately correct results */
1599 bool flush_immediate
= flags
& PIPE_FLUSH_END_OF_FRAME
;
1601 /* Submit the frame itself */
1602 panfrost_submit_frame(ctx
, flush_immediate
);
1604 /* Prepare for the next frame */
1605 panfrost_invalidate_frame(ctx
);
1608 #define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_##c;
1611 g2m_draw_mode(enum pipe_prim_type mode
)
1614 DEFINE_CASE(POINTS
);
1616 DEFINE_CASE(LINE_LOOP
);
1617 DEFINE_CASE(LINE_STRIP
);
1618 DEFINE_CASE(TRIANGLES
);
1619 DEFINE_CASE(TRIANGLE_STRIP
);
1620 DEFINE_CASE(TRIANGLE_FAN
);
1622 DEFINE_CASE(QUAD_STRIP
);
1623 DEFINE_CASE(POLYGON
);
1626 printf("Illegal draw mode %d\n", mode
);
1628 return MALI_LINE_LOOP
;
1635 panfrost_translate_index_size(unsigned size
)
1639 return MALI_DRAW_INDEXED_UINT8
;
1642 return MALI_DRAW_INDEXED_UINT16
;
1645 return MALI_DRAW_INDEXED_UINT32
;
1648 printf("Unknown index size %d\n", size
);
1654 static const uint8_t *
1655 panfrost_get_index_buffer_raw(const struct pipe_draw_info
*info
)
1657 if (info
->has_user_indices
) {
1658 return (const uint8_t *) info
->index
.user
;
1660 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) (info
->index
.resource
);
1661 return (const uint8_t *) rsrc
->bo
->cpu
[0];
1665 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
1666 * good for the duration of the draw (transient), could last longer */
1669 panfrost_get_index_buffer_mapped(struct panfrost_context
*ctx
, const struct pipe_draw_info
*info
)
1671 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) (info
->index
.resource
);
1673 off_t offset
= info
->start
* info
->index_size
;
1675 if (!info
->has_user_indices
) {
1676 /* Only resources can be directly mapped */
1677 return rsrc
->bo
->gpu
[0] + offset
;
1679 /* Otherwise, we need to upload to transient memory */
1680 const uint8_t *ibuf8
= panfrost_get_index_buffer_raw(info
);
1681 return panfrost_upload_transient(ctx
, ibuf8
+ offset
, info
->count
* info
->index_size
);
1685 #define CALCULATE_MIN_MAX_INDEX(T, buffer, start, count) \
1686 for (unsigned _idx = (start); _idx < (start + count); ++_idx) { \
1687 T idx = buffer[_idx]; \
1688 if (idx > max_index) max_index = idx; \
1689 if (idx < min_index) min_index = idx; \
1694 struct pipe_context
*pipe
,
1695 const struct pipe_draw_info
*info
)
1697 struct panfrost_context
*ctx
= pan_context(pipe
);
1699 ctx
->payload_vertex
.draw_start
= info
->start
;
1700 ctx
->payload_tiler
.draw_start
= info
->start
;
1702 int mode
= info
->mode
;
1704 /* Fallback for unsupported modes */
1706 if (!(ctx
->draw_modes
& mode
)) {
1707 if (mode
== PIPE_PRIM_QUADS
&& info
->count
== 4 && ctx
->rasterizer
&& !ctx
->rasterizer
->base
.flatshade
) {
1708 mode
= PIPE_PRIM_TRIANGLE_FAN
;
1710 if (info
->count
< 4) {
1711 /* Degenerate case? */
1715 util_primconvert_save_rasterizer_state(ctx
->primconvert
, &ctx
->rasterizer
->base
);
1716 util_primconvert_draw_vbo(ctx
->primconvert
, info
);
1721 /* Now that we have a guaranteed terminating path, find the job.
1722 * Assignment commented out to prevent unused warning */
1724 /* struct panfrost_job *job = */ panfrost_get_job_for_fbo(ctx
);
1726 ctx
->payload_tiler
.prefix
.draw_mode
= g2m_draw_mode(mode
);
1728 ctx
->vertex_count
= info
->count
;
1730 /* For non-indexed draws, they're the same */
1731 unsigned invocation_count
= ctx
->vertex_count
;
1733 /* For higher amounts of vertices (greater than what fits in a 16-bit
1734 * short), the other value is needed, otherwise there will be bizarre
1735 * rendering artefacts. It's not clear what these values mean yet. */
1737 ctx
->payload_tiler
.prefix
.unknown_draw
&= ~(0x3000 | 0x18000);
1738 ctx
->payload_tiler
.prefix
.unknown_draw
|= (mode
== PIPE_PRIM_POINTS
|| ctx
->vertex_count
> 65535) ? 0x3000 : 0x18000;
1740 if (info
->index_size
) {
1741 /* Calculate the min/max index used so we can figure out how
1742 * many times to invoke the vertex shader */
1744 const uint8_t *ibuf8
= panfrost_get_index_buffer_raw(info
);
1746 int min_index
= INT_MAX
;
1749 if (info
->index_size
== 1) {
1750 CALCULATE_MIN_MAX_INDEX(uint8_t, ibuf8
, info
->start
, info
->count
);
1751 } else if (info
->index_size
== 2) {
1752 const uint16_t *ibuf16
= (const uint16_t *) ibuf8
;
1753 CALCULATE_MIN_MAX_INDEX(uint16_t, ibuf16
, info
->start
, info
->count
);
1754 } else if (info
->index_size
== 4) {
1755 const uint32_t *ibuf32
= (const uint32_t *) ibuf8
;
1756 CALCULATE_MIN_MAX_INDEX(uint32_t, ibuf32
, info
->start
, info
->count
);
1761 /* Make sure we didn't go crazy */
1762 assert(min_index
< INT_MAX
);
1763 assert(max_index
> 0);
1764 assert(max_index
> min_index
);
1766 /* Use the corresponding values */
1767 invocation_count
= max_index
- min_index
+ 1;
1768 ctx
->payload_vertex
.draw_start
= min_index
;
1769 ctx
->payload_tiler
.draw_start
= min_index
;
1771 ctx
->payload_tiler
.prefix
.negative_start
= -min_index
;
1772 ctx
->payload_tiler
.prefix
.index_count
= MALI_POSITIVE(info
->count
);
1774 //assert(!info->restart_index); /* TODO: Research */
1775 assert(!info
->index_bias
);
1776 //assert(!info->min_index); /* TODO: Use value */
1778 ctx
->payload_tiler
.prefix
.unknown_draw
|= panfrost_translate_index_size(info
->index_size
);
1779 ctx
->payload_tiler
.prefix
.indices
= panfrost_get_index_buffer_mapped(ctx
, info
);
1781 /* Index count == vertex count, if no indexing is applied, as
1782 * if it is internally indexed in the expected order */
1784 ctx
->payload_tiler
.prefix
.negative_start
= 0;
1785 ctx
->payload_tiler
.prefix
.index_count
= MALI_POSITIVE(ctx
->vertex_count
);
1787 /* Reverse index state */
1788 ctx
->payload_tiler
.prefix
.unknown_draw
&= ~MALI_DRAW_INDEXED_UINT32
;
1789 ctx
->payload_tiler
.prefix
.indices
= (uintptr_t) NULL
;
1792 ctx
->payload_vertex
.prefix
.invocation_count
= MALI_POSITIVE(invocation_count
);
1793 ctx
->payload_tiler
.prefix
.invocation_count
= MALI_POSITIVE(invocation_count
);
1795 /* Fire off the draw itself */
1796 panfrost_queue_draw(ctx
);
1802 panfrost_generic_cso_delete(struct pipe_context
*pctx
, void *hwcso
)
1808 panfrost_set_scissor(struct panfrost_context
*ctx
)
1810 const struct pipe_scissor_state
*ss
= &ctx
->scissor
;
1812 if (ss
&& ctx
->rasterizer
&& ctx
->rasterizer
->base
.scissor
&& 0) {
1813 ctx
->viewport
->viewport0
[0] = ss
->minx
;
1814 ctx
->viewport
->viewport0
[1] = ss
->miny
;
1815 ctx
->viewport
->viewport1
[0] = MALI_POSITIVE(ss
->maxx
);
1816 ctx
->viewport
->viewport1
[1] = MALI_POSITIVE(ss
->maxy
);
1818 ctx
->viewport
->viewport0
[0] = 0;
1819 ctx
->viewport
->viewport0
[1] = 0;
1820 ctx
->viewport
->viewport1
[0] = MALI_POSITIVE(ctx
->pipe_framebuffer
.width
);
1821 ctx
->viewport
->viewport1
[1] = MALI_POSITIVE(ctx
->pipe_framebuffer
.height
);
1826 panfrost_create_rasterizer_state(
1827 struct pipe_context
*pctx
,
1828 const struct pipe_rasterizer_state
*cso
)
1830 struct panfrost_rasterizer
*so
= CALLOC_STRUCT(panfrost_rasterizer
);
1834 /* Bitmask, unknown meaning of the start value */
1835 so
->tiler_gl_enables
= is_t6xx
? 0x105 : 0x7;
1837 so
->tiler_gl_enables
|= MALI_FRONT_FACE(
1838 cso
->front_ccw
? MALI_CCW
: MALI_CW
);
1840 if (cso
->cull_face
& PIPE_FACE_FRONT
)
1841 so
->tiler_gl_enables
|= MALI_CULL_FACE_FRONT
;
1843 if (cso
->cull_face
& PIPE_FACE_BACK
)
1844 so
->tiler_gl_enables
|= MALI_CULL_FACE_BACK
;
1850 panfrost_bind_rasterizer_state(
1851 struct pipe_context
*pctx
,
1854 struct panfrost_context
*ctx
= pan_context(pctx
);
1855 struct pipe_rasterizer_state
*cso
= hwcso
;
1857 /* TODO: Why can't rasterizer be NULL ever? Other drivers are fine.. */
1861 /* If scissor test has changed, we'll need to update that now */
1862 bool update_scissor
= !ctx
->rasterizer
|| ctx
->rasterizer
->base
.scissor
!= cso
->scissor
;
1864 ctx
->rasterizer
= hwcso
;
1866 /* Actualise late changes */
1868 panfrost_set_scissor(ctx
);
1870 ctx
->dirty
|= PAN_DIRTY_RASTERIZER
;
1874 panfrost_create_vertex_elements_state(
1875 struct pipe_context
*pctx
,
1876 unsigned num_elements
,
1877 const struct pipe_vertex_element
*elements
)
1879 struct panfrost_context
*ctx
= pan_context(pctx
);
1880 struct panfrost_vertex_state
*so
= CALLOC_STRUCT(panfrost_vertex_state
);
1882 so
->num_elements
= num_elements
;
1883 memcpy(so
->pipe
, elements
, sizeof(*elements
) * num_elements
);
1885 struct panfrost_transfer transfer
= panfrost_allocate_chunk(ctx
, sizeof(struct mali_attr_meta
) * num_elements
, HEAP_DESCRIPTOR
);
1886 so
->hw
= (struct mali_attr_meta
*) transfer
.cpu
;
1887 so
->descriptor_ptr
= transfer
.gpu
;
1889 /* Allocate memory for the descriptor state */
1891 for (int i
= 0; i
< num_elements
; ++i
) {
1892 so
->hw
[i
].index
= elements
[i
].vertex_buffer_index
;
1894 enum pipe_format fmt
= elements
[i
].src_format
;
1895 const struct util_format_description
*desc
= util_format_description(fmt
);
1896 so
->hw
[i
].unknown1
= 0x2;
1897 so
->hw
[i
].swizzle
= panfrost_get_default_swizzle(desc
->nr_channels
);
1899 so
->hw
[i
].format
= panfrost_find_format(desc
);
1901 /* The field itself should probably be shifted over */
1902 so
->hw
[i
].src_offset
= elements
[i
].src_offset
;
1909 panfrost_bind_vertex_elements_state(
1910 struct pipe_context
*pctx
,
1913 struct panfrost_context
*ctx
= pan_context(pctx
);
1915 ctx
->vertex
= hwcso
;
1916 ctx
->dirty
|= PAN_DIRTY_VERTEX
;
1920 panfrost_delete_vertex_elements_state(struct pipe_context
*pctx
, void *hwcso
)
1922 struct panfrost_vertex_state
*so
= (struct panfrost_vertex_state
*) hwcso
;
1923 unsigned bytes
= sizeof(struct mali_attr_meta
) * so
->num_elements
;
1924 printf("Vertex elements delete leaks descriptor (%d bytes)\n", bytes
);
1929 panfrost_create_shader_state(
1930 struct pipe_context
*pctx
,
1931 const struct pipe_shader_state
*cso
)
1933 struct panfrost_shader_variants
*so
= CALLOC_STRUCT(panfrost_shader_variants
);
1936 /* Token deep copy to prevent memory corruption */
1938 if (cso
->type
== PIPE_SHADER_IR_TGSI
)
1939 so
->base
.tokens
= tgsi_dup_tokens(so
->base
.tokens
);
1945 panfrost_delete_shader_state(
1946 struct pipe_context
*pctx
,
1949 struct panfrost_shader_variants
*cso
= (struct panfrost_shader_variants
*) so
;
1951 if (cso
->base
.type
== PIPE_SHADER_IR_TGSI
) {
1952 printf("Deleting TGSI shader leaks duplicated tokens\n");
1955 unsigned leak
= cso
->variant_count
* sizeof(struct mali_shader_meta
);
1956 printf("Deleting shader state leaks descriptors (%d bytes), and shader bytecode\n", leak
);
1962 panfrost_create_sampler_state(
1963 struct pipe_context
*pctx
,
1964 const struct pipe_sampler_state
*cso
)
1966 struct panfrost_sampler_state
*so
= CALLOC_STRUCT(panfrost_sampler_state
);
1969 /* sampler_state corresponds to mali_sampler_descriptor, which we can generate entirely here */
1971 struct mali_sampler_descriptor sampler_descriptor
= {
1972 .filter_mode
= MALI_TEX_MIN(translate_tex_filter(cso
->min_img_filter
))
1973 | MALI_TEX_MAG(translate_tex_filter(cso
->mag_img_filter
))
1974 | translate_mip_filter(cso
->min_mip_filter
)
1977 .wrap_s
= translate_tex_wrap(cso
->wrap_s
),
1978 .wrap_t
= translate_tex_wrap(cso
->wrap_t
),
1979 .wrap_r
= translate_tex_wrap(cso
->wrap_r
),
1980 .compare_func
= panfrost_translate_alt_compare_func(cso
->compare_func
),
1982 cso
->border_color
.f
[0],
1983 cso
->border_color
.f
[1],
1984 cso
->border_color
.f
[2],
1985 cso
->border_color
.f
[3]
1987 .min_lod
= FIXED_16(0.0),
1988 .max_lod
= FIXED_16(31.0),
1992 so
->hw
= sampler_descriptor
;
1998 panfrost_bind_sampler_states(
1999 struct pipe_context
*pctx
,
2000 enum pipe_shader_type shader
,
2001 unsigned start_slot
, unsigned num_sampler
,
2004 assert(start_slot
== 0);
2006 struct panfrost_context
*ctx
= pan_context(pctx
);
2008 /* XXX: Should upload, not just copy? */
2009 ctx
->sampler_count
[shader
] = num_sampler
;
2010 memcpy(ctx
->samplers
[shader
], sampler
, num_sampler
* sizeof (void *));
2012 ctx
->dirty
|= PAN_DIRTY_SAMPLERS
;
2016 panfrost_variant_matches(struct panfrost_context
*ctx
, struct panfrost_shader_state
*variant
)
2018 struct pipe_alpha_state
*alpha
= &ctx
->depth_stencil
->alpha
;
2020 if (alpha
->enabled
|| variant
->alpha_state
.enabled
) {
2021 /* Make sure enable state is at least the same */
2022 if (alpha
->enabled
!= variant
->alpha_state
.enabled
) {
2026 /* Check that the contents of the test are the same */
2027 bool same_func
= alpha
->func
== variant
->alpha_state
.func
;
2028 bool same_ref
= alpha
->ref_value
== variant
->alpha_state
.ref_value
;
2030 if (!(same_func
&& same_ref
)) {
2034 /* Otherwise, we're good to go */
2039 panfrost_bind_fs_state(
2040 struct pipe_context
*pctx
,
2043 struct panfrost_context
*ctx
= pan_context(pctx
);
2048 /* Match the appropriate variant */
2050 signed variant
= -1;
2052 struct panfrost_shader_variants
*variants
= (struct panfrost_shader_variants
*) hwcso
;
2054 for (unsigned i
= 0; i
< variants
->variant_count
; ++i
) {
2055 if (panfrost_variant_matches(ctx
, &variants
->variants
[i
])) {
2061 if (variant
== -1) {
2062 /* No variant matched, so create a new one */
2063 variant
= variants
->variant_count
++;
2064 assert(variants
->variant_count
< MAX_SHADER_VARIANTS
);
2066 variants
->variants
[variant
].base
= hwcso
;
2067 variants
->variants
[variant
].alpha_state
= ctx
->depth_stencil
->alpha
;
2069 /* Allocate the mapped descriptor ahead-of-time. TODO: Use for FS as well as VS */
2070 struct panfrost_context
*ctx
= pan_context(pctx
);
2071 struct panfrost_transfer transfer
= panfrost_allocate_chunk(ctx
, sizeof(struct mali_shader_meta
), HEAP_DESCRIPTOR
);
2073 variants
->variants
[variant
].tripipe
= (struct mali_shader_meta
*) transfer
.cpu
;
2074 variants
->variants
[variant
].tripipe_gpu
= transfer
.gpu
;
2078 /* Select this variant */
2079 variants
->active_variant
= variant
;
2081 struct panfrost_shader_state
*shader_state
= &variants
->variants
[variant
];
2082 assert(panfrost_variant_matches(ctx
, shader_state
));
2084 /* Now we have a variant selected, so compile and go */
2086 if (!shader_state
->compiled
) {
2087 panfrost_shader_compile(ctx
, shader_state
->tripipe
, NULL
, JOB_TYPE_TILER
, shader_state
);
2088 shader_state
->compiled
= true;
2092 ctx
->dirty
|= PAN_DIRTY_FS
;
2096 panfrost_bind_vs_state(
2097 struct pipe_context
*pctx
,
2100 struct panfrost_context
*ctx
= pan_context(pctx
);
2105 if (!ctx
->vs
->variants
[0].compiled
) {
2106 ctx
->vs
->variants
[0].base
= hwcso
;
2108 /* TODO DRY from above */
2109 struct panfrost_transfer transfer
= panfrost_allocate_chunk(ctx
, sizeof(struct mali_shader_meta
), HEAP_DESCRIPTOR
);
2110 ctx
->vs
->variants
[0].tripipe
= (struct mali_shader_meta
*) transfer
.cpu
;
2111 ctx
->vs
->variants
[0].tripipe_gpu
= transfer
.gpu
;
2113 panfrost_shader_compile(ctx
, ctx
->vs
->variants
[0].tripipe
, NULL
, JOB_TYPE_VERTEX
, &ctx
->vs
->variants
[0]);
2114 ctx
->vs
->variants
[0].compiled
= true;
2118 ctx
->dirty
|= PAN_DIRTY_VS
;
2122 panfrost_set_vertex_buffers(
2123 struct pipe_context
*pctx
,
2124 unsigned start_slot
,
2125 unsigned num_buffers
,
2126 const struct pipe_vertex_buffer
*buffers
)
2128 struct panfrost_context
*ctx
= pan_context(pctx
);
2129 assert(num_buffers
<= PIPE_MAX_ATTRIBS
);
2131 /* XXX: Dirty tracking? etc */
2133 size_t sz
= sizeof(buffers
[0]) * num_buffers
;
2134 ctx
->vertex_buffers
= malloc(sz
);
2135 ctx
->vertex_buffer_count
= num_buffers
;
2136 memcpy(ctx
->vertex_buffers
, buffers
, sz
);
2138 if (ctx
->vertex_buffers
) {
2139 free(ctx
->vertex_buffers
);
2140 ctx
->vertex_buffers
= NULL
;
2143 ctx
->vertex_buffer_count
= 0;
2148 panfrost_set_constant_buffer(
2149 struct pipe_context
*pctx
,
2150 enum pipe_shader_type shader
, uint index
,
2151 const struct pipe_constant_buffer
*buf
)
2153 struct panfrost_context
*ctx
= pan_context(pctx
);
2154 struct panfrost_constant_buffer
*pbuf
= &ctx
->constant_buffer
[shader
];
2156 size_t sz
= buf
? buf
->buffer_size
: 0;
2158 /* Free previous buffer */
2165 pbuf
->buffer
= NULL
;
2168 /* If unbinding, we're done */
2173 /* Multiple constant buffers not yet supported */
2178 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) (buf
->buffer
);
2181 cpu
= rsrc
->bo
->cpu
[0];
2182 } else if (buf
->user_buffer
) {
2183 cpu
= buf
->user_buffer
;
2185 printf("No constant buffer?\n");
2189 /* Copy the constant buffer into the driver context for later upload */
2191 pbuf
->buffer
= malloc(sz
);
2192 memcpy(pbuf
->buffer
, cpu
+ buf
->buffer_offset
, sz
);
2196 panfrost_set_stencil_ref(
2197 struct pipe_context
*pctx
,
2198 const struct pipe_stencil_ref
*ref
)
2200 struct panfrost_context
*ctx
= pan_context(pctx
);
2201 ctx
->stencil_ref
= *ref
;
2203 /* Shader core dirty */
2204 ctx
->dirty
|= PAN_DIRTY_FS
;
2207 static struct pipe_sampler_view
*
2208 panfrost_create_sampler_view(
2209 struct pipe_context
*pctx
,
2210 struct pipe_resource
*texture
,
2211 const struct pipe_sampler_view
*template)
2213 struct panfrost_sampler_view
*so
= CALLOC_STRUCT(panfrost_sampler_view
);
2214 int bytes_per_pixel
= util_format_get_blocksize(texture
->format
);
2216 pipe_reference(NULL
, &texture
->reference
);
2218 struct panfrost_resource
*prsrc
= (struct panfrost_resource
*) texture
;
2220 so
->base
= *template;
2221 so
->base
.texture
= texture
;
2222 so
->base
.reference
.count
= 1;
2223 so
->base
.context
= pctx
;
2225 /* sampler_views correspond to texture descriptors, minus the texture
2226 * (data) itself. So, we serialise the descriptor here and cache it for
2229 /* TODO: Other types of textures */
2230 assert(template->target
== PIPE_TEXTURE_2D
);
2232 /* Make sure it's something with which we're familiar */
2233 assert(bytes_per_pixel
>= 1 && bytes_per_pixel
<= 4);
2235 /* TODO: Detect from format better */
2236 const struct util_format_description
*desc
= util_format_description(prsrc
->base
.format
);
2238 unsigned char user_swizzle
[4] = {
2239 template->swizzle_r
,
2240 template->swizzle_g
,
2241 template->swizzle_b
,
2245 enum mali_format format
= panfrost_find_format(desc
);
2247 struct mali_texture_descriptor texture_descriptor
= {
2248 .width
= MALI_POSITIVE(texture
->width0
),
2249 .height
= MALI_POSITIVE(texture
->height0
),
2250 .depth
= MALI_POSITIVE(texture
->depth0
),
2254 .swizzle
= panfrost_translate_swizzle_4(desc
->swizzle
),
2258 .is_not_cubemap
= 1,
2260 /* 0x11 - regular texture 2d, uncompressed tiled */
2261 /* 0x12 - regular texture 2d, uncompressed linear */
2262 /* 0x1c - AFBC compressed (internally tiled, probably) texture 2D */
2264 .usage2
= prsrc
->bo
->has_afbc
? 0x1c : (prsrc
->bo
->tiled
? 0x11 : 0x12),
2267 .swizzle
= panfrost_translate_swizzle_4(user_swizzle
)
2270 /* TODO: Other base levels require adjusting dimensions / level numbers / etc */
2271 assert (template->u
.tex
.first_level
== 0);
2273 texture_descriptor
.nr_mipmap_levels
= template->u
.tex
.last_level
- template->u
.tex
.first_level
;
2275 so
->hw
= texture_descriptor
;
2277 return (struct pipe_sampler_view
*) so
;
2281 panfrost_set_sampler_views(
2282 struct pipe_context
*pctx
,
2283 enum pipe_shader_type shader
,
2284 unsigned start_slot
, unsigned num_views
,
2285 struct pipe_sampler_view
**views
)
2287 struct panfrost_context
*ctx
= pan_context(pctx
);
2289 assert(start_slot
== 0);
2291 ctx
->sampler_view_count
[shader
] = num_views
;
2292 memcpy(ctx
->sampler_views
[shader
], views
, num_views
* sizeof (void *));
2294 ctx
->dirty
|= PAN_DIRTY_TEXTURES
;
2298 panfrost_sampler_view_destroy(
2299 struct pipe_context
*pctx
,
2300 struct pipe_sampler_view
*views
)
2302 //struct panfrost_context *ctx = pan_context(pctx);
2310 panfrost_set_framebuffer_state(struct pipe_context
*pctx
,
2311 const struct pipe_framebuffer_state
*fb
)
2313 struct panfrost_context
*ctx
= pan_context(pctx
);
2315 /* Flush when switching away from an FBO */
2317 if (!panfrost_is_scanout(ctx
)) {
2318 panfrost_flush(pctx
, NULL
, 0);
2321 ctx
->pipe_framebuffer
.nr_cbufs
= fb
->nr_cbufs
;
2322 ctx
->pipe_framebuffer
.samples
= fb
->samples
;
2323 ctx
->pipe_framebuffer
.layers
= fb
->layers
;
2324 ctx
->pipe_framebuffer
.width
= fb
->width
;
2325 ctx
->pipe_framebuffer
.height
= fb
->height
;
2327 for (int i
= 0; i
< PIPE_MAX_COLOR_BUFS
; i
++) {
2328 struct pipe_surface
*cb
= i
< fb
->nr_cbufs
? fb
->cbufs
[i
] : NULL
;
2330 /* check if changing cbuf */
2331 if (ctx
->pipe_framebuffer
.cbufs
[i
] == cb
) continue;
2333 if (cb
&& (i
!= 0)) {
2334 printf("XXX: Multiple render targets not supported before t7xx!\n");
2339 pipe_surface_reference(&ctx
->pipe_framebuffer
.cbufs
[i
], cb
);
2345 ctx
->vt_framebuffer_sfbd
= panfrost_emit_sfbd(ctx
);
2347 ctx
->vt_framebuffer_mfbd
= panfrost_emit_mfbd(ctx
);
2349 panfrost_attach_vt_framebuffer(ctx
);
2350 panfrost_new_frag_framebuffer(ctx
);
2351 panfrost_set_scissor(ctx
);
2353 struct panfrost_resource
*tex
= ((struct panfrost_resource
*) ctx
->pipe_framebuffer
.cbufs
[i
]->texture
);
2354 bool is_scanout
= panfrost_is_scanout(ctx
);
2356 if (!is_scanout
&& !tex
->bo
->has_afbc
) {
2357 /* The blob is aggressive about enabling AFBC. As such,
2358 * it's pretty much necessary to use it here, since we
2359 * have no traces of non-compressed FBO. */
2361 panfrost_enable_afbc(ctx
, tex
, false);
2364 if (!is_scanout
&& !tex
->bo
->has_checksum
) {
2365 /* Enable transaction elimination if we can */
2366 panfrost_enable_checksum(ctx
, tex
);
2371 struct pipe_surface
*zb
= fb
->zsbuf
;
2373 if (ctx
->pipe_framebuffer
.zsbuf
!= zb
) {
2374 pipe_surface_reference(&ctx
->pipe_framebuffer
.zsbuf
, zb
);
2380 ctx
->vt_framebuffer_sfbd
= panfrost_emit_sfbd(ctx
);
2382 ctx
->vt_framebuffer_mfbd
= panfrost_emit_mfbd(ctx
);
2384 panfrost_attach_vt_framebuffer(ctx
);
2385 panfrost_new_frag_framebuffer(ctx
);
2386 panfrost_set_scissor(ctx
);
2388 struct panfrost_resource
*tex
= ((struct panfrost_resource
*) ctx
->pipe_framebuffer
.zsbuf
->texture
);
2390 if (!tex
->bo
->has_afbc
&& !panfrost_is_scanout(ctx
))
2391 panfrost_enable_afbc(ctx
, tex
, true);
2398 panfrost_create_blend_state(struct pipe_context
*pipe
,
2399 const struct pipe_blend_state
*blend
)
2401 struct panfrost_context
*ctx
= pan_context(pipe
);
2402 struct panfrost_blend_state
*so
= CALLOC_STRUCT(panfrost_blend_state
);
2405 /* TODO: The following features are not yet implemented */
2406 assert(!blend
->logicop_enable
);
2407 assert(!blend
->alpha_to_coverage
);
2408 assert(!blend
->alpha_to_one
);
2410 /* Compile the blend state, first as fixed-function if we can */
2412 if (panfrost_make_fixed_blend_mode(&blend
->rt
[0], &so
->equation
, blend
->rt
[0].colormask
, &ctx
->blend_color
))
2415 /* If we can't, compile a blend shader instead */
2417 panfrost_make_blend_shader(ctx
, so
, &ctx
->blend_color
);
2423 panfrost_bind_blend_state(struct pipe_context
*pipe
,
2426 struct panfrost_context
*ctx
= pan_context(pipe
);
2427 struct pipe_blend_state
*blend
= (struct pipe_blend_state
*) cso
;
2428 struct panfrost_blend_state
*pblend
= (struct panfrost_blend_state
*) cso
;
2429 ctx
->blend
= pblend
;
2434 SET_BIT(ctx
->fragment_shader_core
.unknown2_4
, MALI_NO_DITHER
, !blend
->dither
);
2436 /* TODO: Attach color */
2438 /* Shader itself is not dirty, but the shader core is */
2439 ctx
->dirty
|= PAN_DIRTY_FS
;
2443 panfrost_delete_blend_state(struct pipe_context
*pipe
,
2446 struct panfrost_blend_state
*so
= (struct panfrost_blend_state
*) blend
;
2448 if (so
->has_blend_shader
) {
2449 printf("Deleting blend state leak blend shaders bytecode\n");
2456 panfrost_set_blend_color(struct pipe_context
*pipe
,
2457 const struct pipe_blend_color
*blend_color
)
2459 struct panfrost_context
*ctx
= pan_context(pipe
);
2461 /* If blend_color is we're unbinding, so ctx->blend_color is now undefined -> nothing to do */
2464 ctx
->blend_color
= *blend_color
;
2466 /* The blend mode depends on the blend constant color, due to the
2467 * fixed/programmable split. So, we're forced to regenerate the blend
2470 /* TODO: Attach color */
2475 panfrost_create_depth_stencil_state(struct pipe_context
*pipe
,
2476 const struct pipe_depth_stencil_alpha_state
*depth_stencil
)
2478 return mem_dup(depth_stencil
, sizeof(*depth_stencil
));
2482 panfrost_bind_depth_stencil_state(struct pipe_context
*pipe
,
2485 struct panfrost_context
*ctx
= pan_context(pipe
);
2486 struct pipe_depth_stencil_alpha_state
*depth_stencil
= cso
;
2487 ctx
->depth_stencil
= depth_stencil
;
2492 /* Alpha does not exist in the hardware (it's not in ES3), so it's
2493 * emulated in the fragment shader */
2495 if (depth_stencil
->alpha
.enabled
) {
2496 /* We need to trigger a new shader (maybe) */
2497 ctx
->base
.bind_fs_state(&ctx
->base
, ctx
->fs
);
2501 SET_BIT(ctx
->fragment_shader_core
.unknown2_4
, MALI_STENCIL_TEST
, depth_stencil
->stencil
[0].enabled
); /* XXX: which one? */
2503 panfrost_make_stencil_state(&depth_stencil
->stencil
[0], &ctx
->fragment_shader_core
.stencil_front
);
2504 ctx
->fragment_shader_core
.stencil_mask_front
= depth_stencil
->stencil
[0].writemask
;
2506 panfrost_make_stencil_state(&depth_stencil
->stencil
[1], &ctx
->fragment_shader_core
.stencil_back
);
2507 ctx
->fragment_shader_core
.stencil_mask_back
= depth_stencil
->stencil
[1].writemask
;
2509 /* Depth state (TODO: Refactor) */
2510 SET_BIT(ctx
->fragment_shader_core
.unknown2_3
, MALI_DEPTH_TEST
, depth_stencil
->depth
.enabled
);
2512 int func
= depth_stencil
->depth
.enabled
? depth_stencil
->depth
.func
: PIPE_FUNC_ALWAYS
;
2514 ctx
->fragment_shader_core
.unknown2_3
&= ~MALI_DEPTH_FUNC_MASK
;
2515 ctx
->fragment_shader_core
.unknown2_3
|= MALI_DEPTH_FUNC(panfrost_translate_compare_func(func
));
2517 /* Bounds test not implemented */
2518 assert(!depth_stencil
->depth
.bounds_test
);
2520 ctx
->dirty
|= PAN_DIRTY_FS
;
2524 panfrost_delete_depth_stencil_state(struct pipe_context
*pipe
, void *depth
)
2530 panfrost_set_sample_mask(struct pipe_context
*pipe
,
2531 unsigned sample_mask
)
2536 panfrost_set_clip_state(struct pipe_context
*pipe
,
2537 const struct pipe_clip_state
*clip
)
2539 //struct panfrost_context *panfrost = pan_context(pipe);
2543 panfrost_set_viewport_states(struct pipe_context
*pipe
,
2544 unsigned start_slot
,
2545 unsigned num_viewports
,
2546 const struct pipe_viewport_state
*viewports
)
2548 struct panfrost_context
*ctx
= pan_context(pipe
);
2550 assert(start_slot
== 0);
2551 assert(num_viewports
== 1);
2553 ctx
->pipe_viewport
= *viewports
;
2556 /* TODO: What if not centered? */
2557 float w
= abs(viewports
->scale
[0]) * 2.0;
2558 float h
= abs(viewports
->scale
[1]) * 2.0;
2560 ctx
->viewport
.viewport1
[0] = MALI_POSITIVE((int) w
);
2561 ctx
->viewport
.viewport1
[1] = MALI_POSITIVE((int) h
);
2566 panfrost_set_scissor_states(struct pipe_context
*pipe
,
2567 unsigned start_slot
,
2568 unsigned num_scissors
,
2569 const struct pipe_scissor_state
*scissors
)
2571 struct panfrost_context
*ctx
= pan_context(pipe
);
2573 assert(start_slot
== 0);
2574 assert(num_scissors
== 1);
2576 ctx
->scissor
= *scissors
;
2578 panfrost_set_scissor(ctx
);
2582 panfrost_set_polygon_stipple(struct pipe_context
*pipe
,
2583 const struct pipe_poly_stipple
*stipple
)
2585 //struct panfrost_context *panfrost = pan_context(pipe);
2589 panfrost_set_active_query_state(struct pipe_context
*pipe
,
2592 //struct panfrost_context *panfrost = pan_context(pipe);
2596 panfrost_destroy(struct pipe_context
*pipe
)
2598 struct panfrost_context
*panfrost
= pan_context(pipe
);
2600 if (panfrost
->blitter
)
2601 util_blitter_destroy(panfrost
->blitter
);
2604 static struct pipe_query
*
2605 panfrost_create_query(struct pipe_context
*pipe
,
2609 struct panfrost_query
*q
= CALLOC_STRUCT(panfrost_query
);
2614 return (struct pipe_query
*) q
;
2618 panfrost_destroy_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
2624 panfrost_begin_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
2626 struct panfrost_context
*ctx
= pan_context(pipe
);
2627 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
2629 switch (query
->type
) {
2630 case PIPE_QUERY_OCCLUSION_COUNTER
:
2631 case PIPE_QUERY_OCCLUSION_PREDICATE
:
2632 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
2634 /* Allocate a word for the query results to be stored */
2635 query
->transfer
= panfrost_allocate_chunk(ctx
, sizeof(unsigned), HEAP_DESCRIPTOR
);
2637 ctx
->occlusion_query
= query
;
2643 fprintf(stderr
, "Skipping query %d\n", query
->type
);
2651 panfrost_end_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
2653 struct panfrost_context
*ctx
= pan_context(pipe
);
2654 ctx
->occlusion_query
= NULL
;
2659 panfrost_get_query_result(struct pipe_context
*pipe
,
2660 struct pipe_query
*q
,
2662 union pipe_query_result
*vresult
)
2665 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
2667 /* We need to flush out the jobs to actually run the counter, TODO
2668 * check wait, TODO wallpaper after if needed */
2670 panfrost_flush(pipe
, NULL
, PIPE_FLUSH_END_OF_FRAME
);
2672 switch (query
->type
) {
2673 case PIPE_QUERY_OCCLUSION_COUNTER
:
2674 case PIPE_QUERY_OCCLUSION_PREDICATE
:
2675 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
: {
2676 /* Read back the query results */
2677 unsigned *result
= (unsigned *) query
->transfer
.cpu
;
2678 unsigned passed
= *result
;
2680 if (query
->type
== PIPE_QUERY_OCCLUSION_COUNTER
) {
2681 vresult
->u64
= passed
;
2683 vresult
->b
= !!passed
;
2689 fprintf(stderr
, "Skipped query get %d\n", query
->type
);
2697 panfrost_setup_hardware(struct panfrost_context
*ctx
)
2699 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
2700 struct panfrost_screen
*screen
= pan_screen(gallium
->screen
);
2702 for (int i
= 0; i
< ARRAY_SIZE(ctx
->transient_pools
); ++i
) {
2703 /* Allocate the beginning of the transient pool */
2704 int entry_size
= (1 << 22); /* 4MB */
2706 ctx
->transient_pools
[i
].entry_size
= entry_size
;
2707 ctx
->transient_pools
[i
].entry_count
= 1;
2709 ctx
->transient_pools
[i
].entries
[0] = (struct panfrost_memory_entry
*) pb_slab_alloc(&screen
->slabs
, entry_size
, HEAP_TRANSIENT
);
2712 screen
->driver
->allocate_slab(screen
, &ctx
->scratchpad
, 64, false, 0, 0, 0);
2713 screen
->driver
->allocate_slab(screen
, &ctx
->varying_mem
, 16384, false, PAN_ALLOCATE_INVISIBLE
| PAN_ALLOCATE_COHERENT_LOCAL
, 0, 0);
2714 screen
->driver
->allocate_slab(screen
, &ctx
->shaders
, 4096, true, PAN_ALLOCATE_EXECUTE
, 0, 0);
2715 screen
->driver
->allocate_slab(screen
, &ctx
->tiler_heap
, 32768, false, PAN_ALLOCATE_INVISIBLE
| PAN_ALLOCATE_GROWABLE
, 1, 128);
2716 screen
->driver
->allocate_slab(screen
, &ctx
->misc_0
, 128*128, false, PAN_ALLOCATE_INVISIBLE
| PAN_ALLOCATE_GROWABLE
, 1, 128);
2720 /* New context creation, which also does hardware initialisation since I don't
2721 * know the better way to structure this :smirk: */
2723 struct pipe_context
*
2724 panfrost_create_context(struct pipe_screen
*screen
, void *priv
, unsigned flags
)
2726 struct panfrost_context
*ctx
= CALLOC_STRUCT(panfrost_context
);
2727 memset(ctx
, 0, sizeof(*ctx
));
2728 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
2730 gallium
->screen
= screen
;
2732 gallium
->destroy
= panfrost_destroy
;
2734 gallium
->set_framebuffer_state
= panfrost_set_framebuffer_state
;
2736 gallium
->flush
= panfrost_flush
;
2737 gallium
->clear
= panfrost_clear
;
2738 gallium
->draw_vbo
= panfrost_draw_vbo
;
2740 gallium
->set_vertex_buffers
= panfrost_set_vertex_buffers
;
2741 gallium
->set_constant_buffer
= panfrost_set_constant_buffer
;
2743 gallium
->set_stencil_ref
= panfrost_set_stencil_ref
;
2745 gallium
->create_sampler_view
= panfrost_create_sampler_view
;
2746 gallium
->set_sampler_views
= panfrost_set_sampler_views
;
2747 gallium
->sampler_view_destroy
= panfrost_sampler_view_destroy
;
2749 gallium
->create_rasterizer_state
= panfrost_create_rasterizer_state
;
2750 gallium
->bind_rasterizer_state
= panfrost_bind_rasterizer_state
;
2751 gallium
->delete_rasterizer_state
= panfrost_generic_cso_delete
;
2753 gallium
->create_vertex_elements_state
= panfrost_create_vertex_elements_state
;
2754 gallium
->bind_vertex_elements_state
= panfrost_bind_vertex_elements_state
;
2755 gallium
->delete_vertex_elements_state
= panfrost_delete_vertex_elements_state
;
2757 gallium
->create_fs_state
= panfrost_create_shader_state
;
2758 gallium
->delete_fs_state
= panfrost_delete_shader_state
;
2759 gallium
->bind_fs_state
= panfrost_bind_fs_state
;
2761 gallium
->create_vs_state
= panfrost_create_shader_state
;
2762 gallium
->delete_vs_state
= panfrost_delete_shader_state
;
2763 gallium
->bind_vs_state
= panfrost_bind_vs_state
;
2765 gallium
->create_sampler_state
= panfrost_create_sampler_state
;
2766 gallium
->delete_sampler_state
= panfrost_generic_cso_delete
;
2767 gallium
->bind_sampler_states
= panfrost_bind_sampler_states
;
2769 gallium
->create_blend_state
= panfrost_create_blend_state
;
2770 gallium
->bind_blend_state
= panfrost_bind_blend_state
;
2771 gallium
->delete_blend_state
= panfrost_delete_blend_state
;
2773 gallium
->set_blend_color
= panfrost_set_blend_color
;
2775 gallium
->create_depth_stencil_alpha_state
= panfrost_create_depth_stencil_state
;
2776 gallium
->bind_depth_stencil_alpha_state
= panfrost_bind_depth_stencil_state
;
2777 gallium
->delete_depth_stencil_alpha_state
= panfrost_delete_depth_stencil_state
;
2779 gallium
->set_sample_mask
= panfrost_set_sample_mask
;
2781 gallium
->set_clip_state
= panfrost_set_clip_state
;
2782 gallium
->set_viewport_states
= panfrost_set_viewport_states
;
2783 gallium
->set_scissor_states
= panfrost_set_scissor_states
;
2784 gallium
->set_polygon_stipple
= panfrost_set_polygon_stipple
;
2785 gallium
->set_active_query_state
= panfrost_set_active_query_state
;
2787 gallium
->create_query
= panfrost_create_query
;
2788 gallium
->destroy_query
= panfrost_destroy_query
;
2789 gallium
->begin_query
= panfrost_begin_query
;
2790 gallium
->end_query
= panfrost_end_query
;
2791 gallium
->get_query_result
= panfrost_get_query_result
;
2793 panfrost_resource_context_init(gallium
);
2795 panfrost_setup_hardware(ctx
);
2798 gallium
->stream_uploader
= u_upload_create_default(gallium
);
2799 gallium
->const_uploader
= gallium
->stream_uploader
;
2800 assert(gallium
->stream_uploader
);
2802 /* Midgard supports ES modes, plus QUADS/QUAD_STRIPS/POLYGON */
2803 ctx
->draw_modes
= (1 << (PIPE_PRIM_POLYGON
+ 1)) - 1;
2805 ctx
->primconvert
= util_primconvert_create(gallium
, ctx
->draw_modes
);
2807 ctx
->blitter
= util_blitter_create(gallium
);
2808 assert(ctx
->blitter
);
2810 /* Prepare for render! */
2812 panfrost_job_init(ctx
);
2813 panfrost_emit_vertex_payload(ctx
);
2814 panfrost_emit_tiler_payload(ctx
);
2815 panfrost_invalidate_frame(ctx
);
2816 panfrost_viewport(ctx
, 0.0, 1.0, 0, 0, ctx
->pipe_framebuffer
.width
, ctx
->pipe_framebuffer
.height
);
2817 panfrost_default_shader_backend(ctx
);
2818 panfrost_generate_space_filler_indices();