2 * © Copyright 2018 Alyssa Rosenzweig
3 * Copyright © 2014-2017 Broadcom
4 * Copyright (C) 2017 Intel Corporation
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 #include "pan_context.h"
32 #include "pan_format.h"
33 #include "panfrost-quirks.h"
35 #include "util/macros.h"
36 #include "util/format/u_format.h"
37 #include "util/u_inlines.h"
38 #include "util/u_upload_mgr.h"
39 #include "util/u_memory.h"
40 #include "util/u_vbuf.h"
41 #include "util/half_float.h"
42 #include "util/u_helpers.h"
43 #include "util/format/u_format.h"
44 #include "util/u_prim.h"
45 #include "util/u_prim_restart.h"
46 #include "indices/u_primconvert.h"
47 #include "tgsi/tgsi_parse.h"
48 #include "tgsi/tgsi_from_mesa.h"
49 #include "util/u_math.h"
51 #include "pan_screen.h"
52 #include "pan_blending.h"
53 #include "pan_blend_shaders.h"
56 struct midgard_tiler_descriptor
57 panfrost_emit_midg_tiler(struct panfrost_batch
*batch
, unsigned vertex_count
)
59 struct panfrost_screen
*screen
= pan_screen(batch
->ctx
->base
.screen
);
60 bool hierarchy
= !(screen
->quirks
& MIDGARD_NO_HIER_TILING
);
61 struct midgard_tiler_descriptor t
= {0};
62 unsigned height
= batch
->key
.height
;
63 unsigned width
= batch
->key
.width
;
66 panfrost_choose_hierarchy_mask(width
, height
, vertex_count
, hierarchy
);
68 /* Compute the polygon header size and use that to offset the body */
70 unsigned header_size
= panfrost_tiler_header_size(
71 width
, height
, t
.hierarchy_mask
, hierarchy
);
73 t
.polygon_list_size
= panfrost_tiler_full_size(
74 width
, height
, t
.hierarchy_mask
, hierarchy
);
79 struct panfrost_bo
*tiler_heap
;
81 tiler_heap
= panfrost_batch_get_tiler_heap(batch
);
82 t
.polygon_list
= panfrost_batch_get_polygon_list(batch
,
87 /* Allow the entire tiler heap */
88 t
.heap_start
= tiler_heap
->gpu
;
89 t
.heap_end
= tiler_heap
->gpu
+ tiler_heap
->size
;
91 struct panfrost_bo
*tiler_dummy
;
93 tiler_dummy
= panfrost_batch_get_tiler_dummy(batch
);
94 header_size
= MALI_TILER_MINIMUM_HEADER_SIZE
;
96 /* The tiler is disabled, so don't allow the tiler heap */
97 t
.heap_start
= tiler_dummy
->gpu
;
98 t
.heap_end
= t
.heap_start
;
100 /* Use a dummy polygon list */
101 t
.polygon_list
= tiler_dummy
->gpu
;
103 /* Disable the tiler */
105 t
.hierarchy_mask
|= MALI_TILER_DISABLED
;
107 t
.hierarchy_mask
= MALI_TILER_USER
;
108 t
.polygon_list_size
= MALI_TILER_MINIMUM_HEADER_SIZE
+ 4;
110 /* We don't have a WRITE_VALUE job, so write the polygon list manually */
111 uint32_t *polygon_list_body
= (uint32_t *) (tiler_dummy
->cpu
+ header_size
);
112 polygon_list_body
[0] = 0xa0000000; /* TODO: Just that? */
116 t
.polygon_list_body
=
117 t
.polygon_list
+ header_size
;
124 struct pipe_context
*pipe
,
126 const union pipe_color_union
*color
,
127 double depth
, unsigned stencil
)
129 struct panfrost_context
*ctx
= pan_context(pipe
);
131 /* TODO: panfrost_get_fresh_batch_for_fbo() instantiates a new batch if
132 * the existing batch targeting this FBO has draws. We could probably
133 * avoid that by replacing plain clears by quad-draws with a specific
134 * color/depth/stencil value, thus avoiding the generation of extra
137 struct panfrost_batch
*batch
= panfrost_get_fresh_batch_for_fbo(ctx
);
139 panfrost_batch_add_fbo_bos(batch
);
140 panfrost_batch_clear(batch
, buffers
, color
, depth
, stencil
);
144 panfrost_attach_vt_framebuffer(struct panfrost_context
*ctx
)
146 struct panfrost_screen
*screen
= pan_screen(ctx
->base
.screen
);
147 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
149 /* If we haven't, reserve space for the framebuffer */
151 if (!batch
->framebuffer
.gpu
) {
152 unsigned size
= (screen
->quirks
& MIDGARD_SFBD
) ?
153 sizeof(struct mali_single_framebuffer
) :
154 sizeof(struct bifrost_framebuffer
);
156 batch
->framebuffer
= panfrost_allocate_transient(batch
, size
);
158 /* Tag the pointer */
159 if (!(screen
->quirks
& MIDGARD_SFBD
))
160 batch
->framebuffer
.gpu
|= MALI_MFBD
;
163 for (unsigned i
= 0; i
< PIPE_SHADER_TYPES
; ++i
)
164 ctx
->payloads
[i
].postfix
.framebuffer
= batch
->framebuffer
.gpu
;
167 /* Reset per-frame context, called on context initialisation as well as after
168 * flushing a frame */
171 panfrost_invalidate_frame(struct panfrost_context
*ctx
)
173 for (unsigned i
= 0; i
< PIPE_SHADER_TYPES
; ++i
)
174 ctx
->payloads
[i
].postfix
.framebuffer
= 0;
177 ctx
->dirty
|= PAN_DIRTY_RASTERIZER
;
180 ctx
->dirty
|= PAN_DIRTY_SAMPLERS
| PAN_DIRTY_TEXTURES
;
182 /* TODO: When does this need to be handled? */
183 ctx
->active_queries
= true;
186 /* In practice, every field of these payloads should be configurable
187 * arbitrarily, which means these functions are basically catch-all's for
188 * as-of-yet unwavering unknowns */
191 panfrost_emit_vertex_payload(struct panfrost_context
*ctx
)
193 /* 0x2 bit clear on 32-bit T6XX */
195 struct midgard_payload_vertex_tiler payload
= {
196 .gl_enables
= 0x4 | 0x2,
199 /* Vertex and compute are closely coupled, so share a payload */
201 memcpy(&ctx
->payloads
[PIPE_SHADER_VERTEX
], &payload
, sizeof(payload
));
202 memcpy(&ctx
->payloads
[PIPE_SHADER_COMPUTE
], &payload
, sizeof(payload
));
206 panfrost_emit_tiler_payload(struct panfrost_context
*ctx
)
208 struct midgard_payload_vertex_tiler payload
= {
210 .zero1
= 0xffff, /* Why is this only seen on test-quad-textured? */
214 memcpy(&ctx
->payloads
[PIPE_SHADER_FRAGMENT
], &payload
, sizeof(payload
));
218 translate_tex_wrap(enum pipe_tex_wrap w
)
221 case PIPE_TEX_WRAP_REPEAT
:
222 return MALI_WRAP_REPEAT
;
224 case PIPE_TEX_WRAP_CLAMP
:
225 return MALI_WRAP_CLAMP
;
227 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
228 return MALI_WRAP_CLAMP_TO_EDGE
;
230 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
231 return MALI_WRAP_CLAMP_TO_BORDER
;
233 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
234 return MALI_WRAP_MIRRORED_REPEAT
;
236 case PIPE_TEX_WRAP_MIRROR_CLAMP
:
237 return MALI_WRAP_MIRRORED_CLAMP
;
239 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE
:
240 return MALI_WRAP_MIRRORED_CLAMP_TO_EDGE
;
242 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
:
243 return MALI_WRAP_MIRRORED_CLAMP_TO_BORDER
;
246 unreachable("Invalid wrap");
251 panfrost_translate_compare_func(enum pipe_compare_func in
)
254 case PIPE_FUNC_NEVER
:
255 return MALI_FUNC_NEVER
;
258 return MALI_FUNC_LESS
;
260 case PIPE_FUNC_EQUAL
:
261 return MALI_FUNC_EQUAL
;
263 case PIPE_FUNC_LEQUAL
:
264 return MALI_FUNC_LEQUAL
;
266 case PIPE_FUNC_GREATER
:
267 return MALI_FUNC_GREATER
;
269 case PIPE_FUNC_NOTEQUAL
:
270 return MALI_FUNC_NOTEQUAL
;
272 case PIPE_FUNC_GEQUAL
:
273 return MALI_FUNC_GEQUAL
;
275 case PIPE_FUNC_ALWAYS
:
276 return MALI_FUNC_ALWAYS
;
279 unreachable("Invalid func");
284 panfrost_translate_stencil_op(enum pipe_stencil_op in
)
287 case PIPE_STENCIL_OP_KEEP
:
288 return MALI_STENCIL_KEEP
;
290 case PIPE_STENCIL_OP_ZERO
:
291 return MALI_STENCIL_ZERO
;
293 case PIPE_STENCIL_OP_REPLACE
:
294 return MALI_STENCIL_REPLACE
;
296 case PIPE_STENCIL_OP_INCR
:
297 return MALI_STENCIL_INCR
;
299 case PIPE_STENCIL_OP_DECR
:
300 return MALI_STENCIL_DECR
;
302 case PIPE_STENCIL_OP_INCR_WRAP
:
303 return MALI_STENCIL_INCR_WRAP
;
305 case PIPE_STENCIL_OP_DECR_WRAP
:
306 return MALI_STENCIL_DECR_WRAP
;
308 case PIPE_STENCIL_OP_INVERT
:
309 return MALI_STENCIL_INVERT
;
312 unreachable("Invalid stencil op");
317 panfrost_make_stencil_state(const struct pipe_stencil_state
*in
, struct mali_stencil_test
*out
)
319 out
->ref
= 0; /* Gallium gets it from elsewhere */
321 out
->mask
= in
->valuemask
;
322 out
->func
= panfrost_translate_compare_func(in
->func
);
323 out
->sfail
= panfrost_translate_stencil_op(in
->fail_op
);
324 out
->dpfail
= panfrost_translate_stencil_op(in
->zfail_op
);
325 out
->dppass
= panfrost_translate_stencil_op(in
->zpass_op
);
329 panfrost_default_shader_backend(struct panfrost_context
*ctx
)
331 struct panfrost_screen
*screen
= pan_screen(ctx
->base
.screen
);
332 struct mali_shader_meta shader
= {
333 .alpha_coverage
= ~MALI_ALPHA_COVERAGE(0.000000),
335 .unknown2_3
= MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS
) | 0x3010,
336 .unknown2_4
= MALI_NO_MSAA
| 0x4e0,
339 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this is
340 * required (independent of 32-bit/64-bit descriptors), or why it's not
341 * used on later GPU revisions. Otherwise, all shader jobs fault on
342 * these earlier chips (perhaps this is a chicken bit of some kind).
343 * More investigation is needed. */
345 if (screen
->quirks
& MIDGARD_SFBD
)
346 shader
.unknown2_4
|= 0x10;
348 struct pipe_stencil_state default_stencil
= {
350 .func
= PIPE_FUNC_ALWAYS
,
351 .fail_op
= MALI_STENCIL_KEEP
,
352 .zfail_op
= MALI_STENCIL_KEEP
,
353 .zpass_op
= MALI_STENCIL_KEEP
,
358 panfrost_make_stencil_state(&default_stencil
, &shader
.stencil_front
);
359 shader
.stencil_mask_front
= default_stencil
.writemask
;
361 panfrost_make_stencil_state(&default_stencil
, &shader
.stencil_back
);
362 shader
.stencil_mask_back
= default_stencil
.writemask
;
364 if (default_stencil
.enabled
)
365 shader
.unknown2_4
|= MALI_STENCIL_TEST
;
367 memcpy(&ctx
->fragment_shader_core
, &shader
, sizeof(shader
));
370 /* Generates a vertex/tiler job. This is, in some sense, the heart of the
371 * graphics command stream. It should be called once per draw, accordding to
372 * presentations. Set is_tiler for "tiler" jobs (fragment shader jobs, but in
373 * Mali parlance, "fragment" refers to framebuffer writeout). Clear it for
376 struct panfrost_transfer
377 panfrost_vertex_tiler_job(struct panfrost_context
*ctx
, bool is_tiler
)
379 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
380 struct mali_job_descriptor_header job
= {
381 .job_type
= is_tiler
? JOB_TYPE_TILER
: JOB_TYPE_VERTEX
,
382 .job_descriptor_size
= 1,
385 struct midgard_payload_vertex_tiler
*payload
= is_tiler
? &ctx
->payloads
[PIPE_SHADER_FRAGMENT
] : &ctx
->payloads
[PIPE_SHADER_VERTEX
];
387 struct panfrost_transfer transfer
= panfrost_allocate_transient(batch
, sizeof(job
) + sizeof(*payload
));
388 memcpy(transfer
.cpu
, &job
, sizeof(job
));
389 memcpy(transfer
.cpu
+ sizeof(job
), payload
, sizeof(*payload
));
394 panfrost_vertex_buffer_address(struct panfrost_context
*ctx
, unsigned i
)
396 struct pipe_vertex_buffer
*buf
= &ctx
->vertex_buffers
[i
];
397 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) (buf
->buffer
.resource
);
399 return rsrc
->bo
->gpu
+ buf
->buffer_offset
;
403 panfrost_writes_point_size(struct panfrost_context
*ctx
)
405 assert(ctx
->shader
[PIPE_SHADER_VERTEX
]);
406 struct panfrost_shader_state
*vs
= &ctx
->shader
[PIPE_SHADER_VERTEX
]->variants
[ctx
->shader
[PIPE_SHADER_VERTEX
]->active_variant
];
408 return vs
->writes_point_size
&& ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.draw_mode
== MALI_POINTS
;
411 /* Stage the attribute descriptors so we can adjust src_offset
412 * to let BOs align nicely */
415 panfrost_stage_attributes(struct panfrost_context
*ctx
)
417 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
418 struct panfrost_vertex_state
*so
= ctx
->vertex
;
420 size_t sz
= sizeof(struct mali_attr_meta
) * PAN_MAX_ATTRIBUTE
;
421 struct panfrost_transfer transfer
= panfrost_allocate_transient(batch
, sz
);
422 struct mali_attr_meta
*target
= (struct mali_attr_meta
*) transfer
.cpu
;
424 /* Copy as-is for the first pass */
425 memcpy(target
, so
->hw
, sz
);
427 /* Fixup offsets for the second pass. Recall that the hardware
428 * calculates attribute addresses as:
430 * addr = base + (stride * vtx) + src_offset;
432 * However, on Mali, base must be aligned to 64-bytes, so we
435 * base' = base & ~63 = base - (base & 63)
437 * To compensate when using base' (see emit_vertex_data), we have
438 * to adjust src_offset by the masked off piece:
440 * addr' = base' + (stride * vtx) + (src_offset + (base & 63))
441 * = base - (base & 63) + (stride * vtx) + src_offset + (base & 63)
442 * = base + (stride * vtx) + src_offset
448 unsigned start
= ctx
->payloads
[PIPE_SHADER_VERTEX
].offset_start
;
450 for (unsigned i
= 0; i
< so
->num_elements
; ++i
) {
451 unsigned vbi
= so
->pipe
[i
].vertex_buffer_index
;
452 struct pipe_vertex_buffer
*buf
= &ctx
->vertex_buffers
[vbi
];
453 mali_ptr addr
= panfrost_vertex_buffer_address(ctx
, vbi
);
455 /* Adjust by the masked off bits of the offset */
456 target
[i
].src_offset
+= (addr
& 63);
458 /* Also, somewhat obscurely per-instance data needs to be
459 * offset in response to a delayed start in an indexed draw */
461 if (so
->pipe
[i
].instance_divisor
&& ctx
->instance_count
> 1 && start
)
462 target
[i
].src_offset
-= buf
->stride
* start
;
465 /* Let's also include vertex builtins */
467 target
[PAN_VERTEX_ID
].format
= MALI_R32UI
;
468 target
[PAN_VERTEX_ID
].swizzle
= panfrost_get_default_swizzle(1);
470 target
[PAN_INSTANCE_ID
].format
= MALI_R32UI
;
471 target
[PAN_INSTANCE_ID
].swizzle
= panfrost_get_default_swizzle(1);
473 ctx
->payloads
[PIPE_SHADER_VERTEX
].postfix
.attribute_meta
= transfer
.gpu
;
477 panfrost_upload_sampler_descriptors(struct panfrost_context
*ctx
)
479 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
480 size_t desc_size
= sizeof(struct mali_sampler_descriptor
);
482 for (int t
= 0; t
<= PIPE_SHADER_FRAGMENT
; ++t
) {
485 if (ctx
->sampler_count
[t
]) {
486 size_t transfer_size
= desc_size
* ctx
->sampler_count
[t
];
488 struct panfrost_transfer transfer
=
489 panfrost_allocate_transient(batch
, transfer_size
);
491 struct mali_sampler_descriptor
*desc
=
492 (struct mali_sampler_descriptor
*) transfer
.cpu
;
494 for (int i
= 0; i
< ctx
->sampler_count
[t
]; ++i
)
495 desc
[i
] = ctx
->samplers
[t
][i
]->hw
;
497 upload
= transfer
.gpu
;
500 ctx
->payloads
[t
].postfix
.sampler_descriptor
= upload
;
504 static enum mali_texture_layout
505 panfrost_layout_for_texture(struct panfrost_resource
*rsrc
)
507 /* TODO: other linear depth textures */
508 bool is_depth
= rsrc
->base
.format
== PIPE_FORMAT_Z32_UNORM
;
510 switch (rsrc
->layout
) {
512 return MALI_TEXTURE_AFBC
;
515 return MALI_TEXTURE_TILED
;
517 return is_depth
? MALI_TEXTURE_TILED
: MALI_TEXTURE_LINEAR
;
519 unreachable("Invalid texture layout");
525 struct panfrost_context
*ctx
,
526 enum pipe_shader_type st
,
527 struct panfrost_sampler_view
*view
)
532 struct pipe_sampler_view
*pview
= &view
->base
;
533 struct panfrost_resource
*rsrc
= pan_resource(pview
->texture
);
534 mali_ptr descriptor_gpu
;
537 /* Do we interleave an explicit stride with every element? */
539 bool has_manual_stride
= view
->manual_stride
;
541 /* For easy access */
543 bool is_buffer
= pview
->target
== PIPE_BUFFER
;
544 unsigned first_level
= is_buffer
? 0 : pview
->u
.tex
.first_level
;
545 unsigned last_level
= is_buffer
? 0 : pview
->u
.tex
.last_level
;
546 unsigned first_layer
= is_buffer
? 0 : pview
->u
.tex
.first_layer
;
547 unsigned last_layer
= is_buffer
? 0 : pview
->u
.tex
.last_layer
;
548 unsigned first_face
= 0;
549 unsigned last_face
= 0;
550 unsigned face_mult
= 1;
552 /* Cubemaps have 6 faces as layers in between each actual layer.
553 * There's a bit of an impedence mismatch between Gallium and the
554 * hardware, let's fixup for it */
556 if (pview
->target
== PIPE_TEXTURE_CUBE
|| pview
->target
== PIPE_TEXTURE_CUBE_ARRAY
) {
557 /* TODO: logic wrong in the asserted out cases ... can they happen? */
559 first_face
= first_layer
% 6;
560 last_face
= last_layer
% 6;
564 assert((first_layer
== last_layer
) || (first_face
== 0 && last_face
== 5));
568 /* Lower-bit is set when sampling from colour AFBC */
569 bool is_afbc
= rsrc
->layout
== PAN_AFBC
;
570 bool is_zs
= rsrc
->base
.bind
& PIPE_BIND_DEPTH_STENCIL
;
571 unsigned afbc_bit
= (is_afbc
&& !is_zs
) ? 1 : 0;
573 /* Add the BO to the job so it's retained until the job is done. */
574 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
575 panfrost_batch_add_bo(batch
, rsrc
->bo
,
576 PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_READ
|
577 panfrost_bo_access_for_stage(st
));
579 /* Add the usage flags in, since they can change across the CSO
580 * lifetime due to layout switches */
582 view
->hw
.format
.layout
= panfrost_layout_for_texture(rsrc
);
583 view
->hw
.format
.manual_stride
= has_manual_stride
;
585 /* Inject the addresses in, interleaving array indices, mip levels,
586 * cube faces, and strides in that order */
589 unsigned levels
= 1 + last_level
- first_level
;
590 unsigned layers
= 1 + last_layer
- first_layer
;
591 unsigned faces
= 1 + last_face
- first_face
;
592 unsigned num_elements
= levels
* layers
* faces
;
593 if (has_manual_stride
)
596 descriptor
= malloc(sizeof(struct mali_texture_descriptor
) +
597 sizeof(mali_ptr
) * num_elements
);
598 memcpy(descriptor
, &view
->hw
, sizeof(struct mali_texture_descriptor
));
600 mali_ptr
*pointers_and_strides
= descriptor
+
601 sizeof(struct mali_texture_descriptor
);
603 for (unsigned w
= first_layer
; w
<= last_layer
; ++w
) {
604 for (unsigned l
= first_level
; l
<= last_level
; ++l
) {
605 for (unsigned f
= first_face
; f
<= last_face
; ++f
) {
606 pointers_and_strides
[idx
++] =
607 panfrost_get_texture_address(rsrc
, l
, w
*face_mult
+ f
)
610 if (has_manual_stride
) {
611 pointers_and_strides
[idx
++] =
612 rsrc
->slices
[l
].stride
;
618 descriptor_gpu
= panfrost_upload_transient(batch
, descriptor
,
619 sizeof(struct mali_texture_descriptor
) +
620 num_elements
* sizeof(*pointers_and_strides
));
623 return descriptor_gpu
;
627 panfrost_upload_texture_descriptors(struct panfrost_context
*ctx
)
629 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
631 for (int t
= 0; t
<= PIPE_SHADER_FRAGMENT
; ++t
) {
632 mali_ptr trampoline
= 0;
634 if (ctx
->sampler_view_count
[t
]) {
635 uint64_t trampolines
[PIPE_MAX_SHADER_SAMPLER_VIEWS
];
637 for (int i
= 0; i
< ctx
->sampler_view_count
[t
]; ++i
)
639 panfrost_upload_tex(ctx
, t
, ctx
->sampler_views
[t
][i
]);
641 trampoline
= panfrost_upload_transient(batch
, trampolines
, sizeof(uint64_t) * ctx
->sampler_view_count
[t
]);
644 ctx
->payloads
[t
].postfix
.texture_trampoline
= trampoline
;
648 struct sysval_uniform
{
657 static void panfrost_upload_viewport_scale_sysval(struct panfrost_context
*ctx
,
658 struct sysval_uniform
*uniform
)
660 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
662 uniform
->f
[0] = vp
->scale
[0];
663 uniform
->f
[1] = vp
->scale
[1];
664 uniform
->f
[2] = vp
->scale
[2];
667 static void panfrost_upload_viewport_offset_sysval(struct panfrost_context
*ctx
,
668 struct sysval_uniform
*uniform
)
670 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
672 uniform
->f
[0] = vp
->translate
[0];
673 uniform
->f
[1] = vp
->translate
[1];
674 uniform
->f
[2] = vp
->translate
[2];
677 static void panfrost_upload_txs_sysval(struct panfrost_context
*ctx
,
678 enum pipe_shader_type st
,
679 unsigned int sysvalid
,
680 struct sysval_uniform
*uniform
)
682 unsigned texidx
= PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid
);
683 unsigned dim
= PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid
);
684 bool is_array
= PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid
);
685 struct pipe_sampler_view
*tex
= &ctx
->sampler_views
[st
][texidx
]->base
;
688 uniform
->i
[0] = u_minify(tex
->texture
->width0
, tex
->u
.tex
.first_level
);
691 uniform
->i
[1] = u_minify(tex
->texture
->height0
,
692 tex
->u
.tex
.first_level
);
695 uniform
->i
[2] = u_minify(tex
->texture
->depth0
,
696 tex
->u
.tex
.first_level
);
699 uniform
->i
[dim
] = tex
->texture
->array_size
;
702 static void panfrost_upload_ssbo_sysval(
703 struct panfrost_context
*ctx
,
704 enum pipe_shader_type st
,
706 struct sysval_uniform
*uniform
)
708 assert(ctx
->ssbo_mask
[st
] & (1 << ssbo_id
));
709 struct pipe_shader_buffer sb
= ctx
->ssbo
[st
][ssbo_id
];
711 /* Compute address */
712 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
713 struct panfrost_bo
*bo
= pan_resource(sb
.buffer
)->bo
;
715 panfrost_batch_add_bo(batch
, bo
,
716 PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_RW
|
717 panfrost_bo_access_for_stage(st
));
719 /* Upload address and size as sysval */
720 uniform
->du
[0] = bo
->gpu
+ sb
.buffer_offset
;
721 uniform
->u
[2] = sb
.buffer_size
;
725 panfrost_upload_sampler_sysval(
726 struct panfrost_context
*ctx
,
727 enum pipe_shader_type st
,
728 unsigned sampler_index
,
729 struct sysval_uniform
*uniform
)
731 struct pipe_sampler_state
*sampl
=
732 &ctx
->samplers
[st
][sampler_index
]->base
;
734 uniform
->f
[0] = sampl
->min_lod
;
735 uniform
->f
[1] = sampl
->max_lod
;
736 uniform
->f
[2] = sampl
->lod_bias
;
738 /* Even without any errata, Midgard represents "no mipmapping" as
739 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
740 * panfrost_create_sampler_state which also explains our choice of
741 * epsilon value (again to keep behaviour consistent) */
743 if (sampl
->min_mip_filter
== PIPE_TEX_MIPFILTER_NONE
)
744 uniform
->f
[1] = uniform
->f
[0] + (1.0/256.0);
747 static void panfrost_upload_num_work_groups_sysval(struct panfrost_context
*ctx
,
748 struct sysval_uniform
*uniform
)
750 uniform
->u
[0] = ctx
->compute_grid
->grid
[0];
751 uniform
->u
[1] = ctx
->compute_grid
->grid
[1];
752 uniform
->u
[2] = ctx
->compute_grid
->grid
[2];
755 static void panfrost_upload_sysvals(struct panfrost_context
*ctx
, void *buf
,
756 struct panfrost_shader_state
*ss
,
757 enum pipe_shader_type st
)
759 struct sysval_uniform
*uniforms
= (void *)buf
;
761 for (unsigned i
= 0; i
< ss
->sysval_count
; ++i
) {
762 int sysval
= ss
->sysval
[i
];
764 switch (PAN_SYSVAL_TYPE(sysval
)) {
765 case PAN_SYSVAL_VIEWPORT_SCALE
:
766 panfrost_upload_viewport_scale_sysval(ctx
, &uniforms
[i
]);
768 case PAN_SYSVAL_VIEWPORT_OFFSET
:
769 panfrost_upload_viewport_offset_sysval(ctx
, &uniforms
[i
]);
771 case PAN_SYSVAL_TEXTURE_SIZE
:
772 panfrost_upload_txs_sysval(ctx
, st
, PAN_SYSVAL_ID(sysval
),
775 case PAN_SYSVAL_SSBO
:
776 panfrost_upload_ssbo_sysval(ctx
, st
, PAN_SYSVAL_ID(sysval
),
779 case PAN_SYSVAL_NUM_WORK_GROUPS
:
780 panfrost_upload_num_work_groups_sysval(ctx
, &uniforms
[i
]);
782 case PAN_SYSVAL_SAMPLER
:
783 panfrost_upload_sampler_sysval(ctx
, st
, PAN_SYSVAL_ID(sysval
),
793 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer
*buf
, unsigned index
)
795 struct pipe_constant_buffer
*cb
= &buf
->cb
[index
];
796 struct panfrost_resource
*rsrc
= pan_resource(cb
->buffer
);
799 return rsrc
->bo
->cpu
;
800 else if (cb
->user_buffer
)
801 return cb
->user_buffer
;
803 unreachable("No constant buffer");
807 panfrost_map_constant_buffer_gpu(
808 struct panfrost_context
*ctx
,
809 enum pipe_shader_type st
,
810 struct panfrost_constant_buffer
*buf
,
813 struct pipe_constant_buffer
*cb
= &buf
->cb
[index
];
814 struct panfrost_resource
*rsrc
= pan_resource(cb
->buffer
);
815 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
818 panfrost_batch_add_bo(batch
, rsrc
->bo
,
819 PAN_BO_ACCESS_SHARED
|
821 panfrost_bo_access_for_stage(st
));
823 /* Alignment gauranteed by PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
824 return rsrc
->bo
->gpu
+ cb
->buffer_offset
;
825 } else if (cb
->user_buffer
) {
826 return panfrost_upload_transient(batch
, cb
->user_buffer
+ cb
->buffer_offset
, cb
->buffer_size
);
828 unreachable("No constant buffer");
832 /* Compute number of UBOs active (more specifically, compute the highest UBO
833 * number addressable -- if there are gaps, include them in the count anyway).
834 * We always include UBO #0 in the count, since we *need* uniforms enabled for
838 panfrost_ubo_count(struct panfrost_context
*ctx
, enum pipe_shader_type stage
)
840 unsigned mask
= ctx
->constant_buffer
[stage
].enabled_mask
| 1;
841 return 32 - __builtin_clz(mask
);
844 /* Fixes up a shader state with current state */
847 panfrost_patch_shader_state(struct panfrost_context
*ctx
,
848 enum pipe_shader_type stage
)
850 struct panfrost_shader_variants
*all
= ctx
->shader
[stage
];
853 ctx
->payloads
[stage
].postfix
.shader
= 0;
857 struct panfrost_shader_state
*ss
= &all
->variants
[all
->active_variant
];
859 ss
->tripipe
->texture_count
= ctx
->sampler_view_count
[stage
];
860 ss
->tripipe
->sampler_count
= ctx
->sampler_count
[stage
];
862 ss
->tripipe
->midgard1
.flags
= 0x220;
864 unsigned ubo_count
= panfrost_ubo_count(ctx
, stage
);
865 ss
->tripipe
->midgard1
.uniform_buffer_count
= ubo_count
;
867 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
869 /* Add the shader BO to the batch. */
870 panfrost_batch_add_bo(batch
, ss
->bo
,
871 PAN_BO_ACCESS_PRIVATE
|
873 panfrost_bo_access_for_stage(stage
));
875 ctx
->payloads
[stage
].postfix
.shader
= panfrost_upload_transient(batch
,
877 sizeof(struct mali_shader_meta
));
880 /* Go through dirty flags and actualise them in the cmdstream. */
883 panfrost_emit_for_draw(struct panfrost_context
*ctx
, bool with_vertex_data
)
885 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
886 struct panfrost_screen
*screen
= pan_screen(ctx
->base
.screen
);
888 panfrost_batch_add_fbo_bos(batch
);
889 panfrost_attach_vt_framebuffer(ctx
);
891 if (with_vertex_data
) {
892 panfrost_emit_vertex_data(batch
);
894 /* Varyings emitted for -all- geometry */
895 unsigned total_count
= ctx
->padded_count
* ctx
->instance_count
;
896 panfrost_emit_varying_descriptor(ctx
, total_count
);
899 bool msaa
= ctx
->rasterizer
->base
.multisample
;
901 if (ctx
->dirty
& PAN_DIRTY_RASTERIZER
) {
902 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].gl_enables
= ctx
->rasterizer
->tiler_gl_enables
;
904 /* TODO: Sample size */
905 SET_BIT(ctx
->fragment_shader_core
.unknown2_3
, MALI_HAS_MSAA
, msaa
);
906 SET_BIT(ctx
->fragment_shader_core
.unknown2_4
, MALI_NO_MSAA
, !msaa
);
909 panfrost_batch_set_requirements(batch
);
911 if (ctx
->occlusion_query
) {
912 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].gl_enables
|= MALI_OCCLUSION_QUERY
;
913 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].postfix
.occlusion_counter
= ctx
->occlusion_query
->bo
->gpu
;
916 panfrost_patch_shader_state(ctx
, PIPE_SHADER_VERTEX
);
917 panfrost_patch_shader_state(ctx
, PIPE_SHADER_COMPUTE
);
919 if (ctx
->dirty
& (PAN_DIRTY_RASTERIZER
| PAN_DIRTY_VS
)) {
920 /* Check if we need to link the gl_PointSize varying */
921 if (!panfrost_writes_point_size(ctx
)) {
922 /* If the size is constant, write it out. Otherwise,
923 * don't touch primitive_size (since we would clobber
924 * the pointer there) */
926 bool points
= ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.draw_mode
== MALI_POINTS
;
928 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].primitive_size
.constant
= points
?
929 ctx
->rasterizer
->base
.point_size
:
930 ctx
->rasterizer
->base
.line_width
;
934 /* TODO: Maybe dirty track FS, maybe not. For now, it's transient. */
935 if (ctx
->shader
[PIPE_SHADER_FRAGMENT
])
936 ctx
->dirty
|= PAN_DIRTY_FS
;
938 if (ctx
->dirty
& PAN_DIRTY_FS
) {
939 assert(ctx
->shader
[PIPE_SHADER_FRAGMENT
]);
940 struct panfrost_shader_state
*variant
= &ctx
->shader
[PIPE_SHADER_FRAGMENT
]->variants
[ctx
->shader
[PIPE_SHADER_FRAGMENT
]->active_variant
];
942 panfrost_patch_shader_state(ctx
, PIPE_SHADER_FRAGMENT
);
944 #define COPY(name) ctx->fragment_shader_core.name = variant->tripipe->name
947 COPY(attribute_count
);
951 COPY(midgard1
.uniform_count
);
952 COPY(midgard1
.uniform_buffer_count
);
953 COPY(midgard1
.work_count
);
954 COPY(midgard1
.flags
);
955 COPY(midgard1
.unknown2
);
959 /* Get blending setup */
960 unsigned rt_count
= MAX2(ctx
->pipe_framebuffer
.nr_cbufs
, 1);
962 struct panfrost_blend_final blend
[PIPE_MAX_COLOR_BUFS
];
963 unsigned shader_offset
= 0;
964 struct panfrost_bo
*shader_bo
= NULL
;
966 for (unsigned c
= 0; c
< rt_count
; ++c
) {
967 blend
[c
] = panfrost_get_blend_for_context(ctx
, c
, &shader_bo
, &shader_offset
);
970 /* If there is a blend shader, work registers are shared. XXX: opt */
972 for (unsigned c
= 0; c
< rt_count
; ++c
) {
973 if (blend
[c
].is_shader
)
974 ctx
->fragment_shader_core
.midgard1
.work_count
= 16;
977 /* Depending on whether it's legal to in the given shader, we
978 * try to enable early-z testing (or forward-pixel kill?) */
980 SET_BIT(ctx
->fragment_shader_core
.midgard1
.flags
, MALI_EARLY_Z
, !variant
->can_discard
);
982 /* Any time texturing is used, derivatives are implicitly
983 * calculated, so we need to enable helper invocations */
985 SET_BIT(ctx
->fragment_shader_core
.midgard1
.flags
, MALI_HELPER_INVOCATIONS
, variant
->helper_invocations
);
987 /* Assign the stencil refs late */
989 unsigned front_ref
= ctx
->stencil_ref
.ref_value
[0];
990 unsigned back_ref
= ctx
->stencil_ref
.ref_value
[1];
991 bool back_enab
= ctx
->depth_stencil
->stencil
[1].enabled
;
993 ctx
->fragment_shader_core
.stencil_front
.ref
= front_ref
;
994 ctx
->fragment_shader_core
.stencil_back
.ref
= back_enab
? back_ref
: front_ref
;
996 /* CAN_DISCARD should be set if the fragment shader possibly
997 * contains a 'discard' instruction. It is likely this is
998 * related to optimizations related to forward-pixel kill, as
999 * per "Mali Performance 3: Is EGL_BUFFER_PRESERVED a good
1000 * thing?" by Peter Harris
1003 SET_BIT(ctx
->fragment_shader_core
.unknown2_3
, MALI_CAN_DISCARD
, variant
->can_discard
);
1004 SET_BIT(ctx
->fragment_shader_core
.midgard1
.flags
, 0x400, variant
->can_discard
);
1006 /* Even on MFBD, the shader descriptor gets blend shaders. It's
1007 * *also* copied to the blend_meta appended (by convention),
1008 * but this is the field actually read by the hardware. (Or
1009 * maybe both are read...?). Specify the last RTi with a blend
1012 ctx
->fragment_shader_core
.blend
.shader
= 0;
1014 for (signed rt
= (rt_count
- 1); rt
>= 0; --rt
) {
1015 if (blend
[rt
].is_shader
) {
1016 ctx
->fragment_shader_core
.blend
.shader
=
1017 blend
[rt
].shader
.gpu
| blend
[rt
].shader
.first_tag
;
1022 if (screen
->quirks
& MIDGARD_SFBD
) {
1023 /* When only a single render target platform is used, the blend
1024 * information is inside the shader meta itself. We
1025 * additionally need to signal CAN_DISCARD for nontrivial blend
1026 * modes (so we're able to read back the destination buffer) */
1028 SET_BIT(ctx
->fragment_shader_core
.unknown2_3
, MALI_HAS_BLEND_SHADER
, blend
[0].is_shader
);
1030 if (!blend
[0].is_shader
) {
1031 ctx
->fragment_shader_core
.blend
.equation
=
1032 *blend
[0].equation
.equation
;
1033 ctx
->fragment_shader_core
.blend
.constant
=
1034 blend
[0].equation
.constant
;
1037 SET_BIT(ctx
->fragment_shader_core
.unknown2_3
, MALI_CAN_DISCARD
, !blend
[0].no_blending
);
1040 size_t size
= sizeof(struct mali_shader_meta
) + (sizeof(struct midgard_blend_rt
) * rt_count
);
1041 struct panfrost_transfer transfer
= panfrost_allocate_transient(batch
, size
);
1042 memcpy(transfer
.cpu
, &ctx
->fragment_shader_core
, sizeof(struct mali_shader_meta
));
1044 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].postfix
.shader
= transfer
.gpu
;
1046 if (!(screen
->quirks
& MIDGARD_SFBD
)) {
1047 /* Additional blend descriptor tacked on for jobs using MFBD */
1049 struct midgard_blend_rt rts
[4];
1051 for (unsigned i
= 0; i
< rt_count
; ++i
) {
1052 rts
[i
].flags
= 0x200;
1055 (ctx
->pipe_framebuffer
.nr_cbufs
> i
) &&
1056 (ctx
->pipe_framebuffer
.cbufs
[i
]) &&
1057 util_format_is_srgb(ctx
->pipe_framebuffer
.cbufs
[i
]->format
);
1059 SET_BIT(rts
[i
].flags
, MALI_BLEND_MRT_SHADER
, blend
[i
].is_shader
);
1060 SET_BIT(rts
[i
].flags
, MALI_BLEND_LOAD_TIB
, !blend
[i
].no_blending
);
1061 SET_BIT(rts
[i
].flags
, MALI_BLEND_SRGB
, is_srgb
);
1062 SET_BIT(rts
[i
].flags
, MALI_BLEND_NO_DITHER
, !ctx
->blend
->base
.dither
);
1064 if (blend
[i
].is_shader
) {
1065 rts
[i
].blend
.shader
= blend
[i
].shader
.gpu
| blend
[i
].shader
.first_tag
;
1067 rts
[i
].blend
.equation
= *blend
[i
].equation
.equation
;
1068 rts
[i
].blend
.constant
= blend
[i
].equation
.constant
;
1072 memcpy(transfer
.cpu
+ sizeof(struct mali_shader_meta
), rts
, sizeof(rts
[0]) * rt_count
);
1076 /* We stage to transient, so always dirty.. */
1078 panfrost_stage_attributes(ctx
);
1080 if (ctx
->dirty
& PAN_DIRTY_SAMPLERS
)
1081 panfrost_upload_sampler_descriptors(ctx
);
1083 if (ctx
->dirty
& PAN_DIRTY_TEXTURES
)
1084 panfrost_upload_texture_descriptors(ctx
);
1086 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
1088 for (int i
= 0; i
< PIPE_SHADER_TYPES
; ++i
) {
1089 struct panfrost_shader_variants
*all
= ctx
->shader
[i
];
1094 struct panfrost_constant_buffer
*buf
= &ctx
->constant_buffer
[i
];
1096 struct panfrost_shader_state
*ss
= &all
->variants
[all
->active_variant
];
1098 /* Uniforms are implicitly UBO #0 */
1099 bool has_uniforms
= buf
->enabled_mask
& (1 << 0);
1101 /* Allocate room for the sysval and the uniforms */
1102 size_t sys_size
= sizeof(float) * 4 * ss
->sysval_count
;
1103 size_t uniform_size
= has_uniforms
? (buf
->cb
[0].buffer_size
) : 0;
1104 size_t size
= sys_size
+ uniform_size
;
1105 struct panfrost_transfer transfer
= panfrost_allocate_transient(batch
, size
);
1107 /* Upload sysvals requested by the shader */
1108 panfrost_upload_sysvals(ctx
, transfer
.cpu
, ss
, i
);
1110 /* Upload uniforms */
1112 const void *cpu
= panfrost_map_constant_buffer_cpu(buf
, 0);
1113 memcpy(transfer
.cpu
+ sys_size
, cpu
, uniform_size
);
1117 ctx
->shader
[i
]->variants
[ctx
->shader
[i
]->active_variant
].uniform_count
;
1119 struct mali_vertex_tiler_postfix
*postfix
=
1120 &ctx
->payloads
[i
].postfix
;
1122 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1125 unsigned ubo_count
= panfrost_ubo_count(ctx
, i
);
1126 assert(ubo_count
>= 1);
1128 size_t sz
= sizeof(struct mali_uniform_buffer_meta
) * ubo_count
;
1129 struct mali_uniform_buffer_meta ubos
[PAN_MAX_CONST_BUFFERS
];
1131 /* Upload uniforms as a UBO */
1132 ubos
[0].size
= MALI_POSITIVE((2 + uniform_count
));
1133 ubos
[0].ptr
= transfer
.gpu
>> 2;
1135 /* The rest are honest-to-goodness UBOs */
1137 for (unsigned ubo
= 1; ubo
< ubo_count
; ++ubo
) {
1138 size_t usz
= buf
->cb
[ubo
].buffer_size
;
1140 bool enabled
= buf
->enabled_mask
& (1 << ubo
);
1141 bool empty
= usz
== 0;
1143 if (!enabled
|| empty
) {
1144 /* Stub out disabled UBOs to catch accesses */
1147 ubos
[ubo
].ptr
= 0xDEAD0000;
1151 mali_ptr gpu
= panfrost_map_constant_buffer_gpu(ctx
, i
, buf
, ubo
);
1153 unsigned bytes_per_field
= 16;
1154 unsigned aligned
= ALIGN_POT(usz
, bytes_per_field
);
1155 unsigned fields
= aligned
/ bytes_per_field
;
1157 ubos
[ubo
].size
= MALI_POSITIVE(fields
);
1158 ubos
[ubo
].ptr
= gpu
>> 2;
1161 mali_ptr ubufs
= panfrost_upload_transient(batch
, ubos
, sz
);
1162 postfix
->uniforms
= transfer
.gpu
;
1163 postfix
->uniform_buffers
= ubufs
;
1165 buf
->dirty_mask
= 0;
1168 /* TODO: Upload the viewport somewhere more appropriate */
1170 /* Clip bounds are encoded as floats. The viewport itself is encoded as
1171 * (somewhat) asymmetric ints. */
1172 const struct pipe_scissor_state
*ss
= &ctx
->scissor
;
1174 struct mali_viewport view
= {
1175 /* By default, do no viewport clipping, i.e. clip to (-inf,
1176 * inf) in each direction. Clipping to the viewport in theory
1177 * should work, but in practice causes issues when we're not
1178 * explicitly trying to scissor */
1180 .clip_minx
= -INFINITY
,
1181 .clip_miny
= -INFINITY
,
1182 .clip_maxx
= INFINITY
,
1183 .clip_maxy
= INFINITY
,
1186 /* Always scissor to the viewport by default. */
1187 float vp_minx
= (int) (vp
->translate
[0] - fabsf(vp
->scale
[0]));
1188 float vp_maxx
= (int) (vp
->translate
[0] + fabsf(vp
->scale
[0]));
1190 float vp_miny
= (int) (vp
->translate
[1] - fabsf(vp
->scale
[1]));
1191 float vp_maxy
= (int) (vp
->translate
[1] + fabsf(vp
->scale
[1]));
1193 float minz
= (vp
->translate
[2] - fabsf(vp
->scale
[2]));
1194 float maxz
= (vp
->translate
[2] + fabsf(vp
->scale
[2]));
1196 /* Apply the scissor test */
1198 unsigned minx
, miny
, maxx
, maxy
;
1200 if (ss
&& ctx
->rasterizer
&& ctx
->rasterizer
->base
.scissor
) {
1201 minx
= MAX2(ss
->minx
, vp_minx
);
1202 miny
= MAX2(ss
->miny
, vp_miny
);
1203 maxx
= MIN2(ss
->maxx
, vp_maxx
);
1204 maxy
= MIN2(ss
->maxy
, vp_maxy
);
1212 /* Hardware needs the min/max to be strictly ordered, so flip if we
1213 * need to. The viewport transformation in the vertex shader will
1214 * handle the negatives if we don't */
1217 unsigned temp
= miny
;
1223 unsigned temp
= minx
;
1234 /* Clamp to the framebuffer size as a last check */
1236 minx
= MIN2(ctx
->pipe_framebuffer
.width
, minx
);
1237 maxx
= MIN2(ctx
->pipe_framebuffer
.width
, maxx
);
1239 miny
= MIN2(ctx
->pipe_framebuffer
.height
, miny
);
1240 maxy
= MIN2(ctx
->pipe_framebuffer
.height
, maxy
);
1242 /* Update the job, unless we're doing wallpapering (whose lack of
1243 * scissor we can ignore, since if we "miss" a tile of wallpaper, it'll
1244 * just... be faster :) */
1246 if (!ctx
->wallpaper_batch
)
1247 panfrost_batch_union_scissor(batch
, minx
, miny
, maxx
, maxy
);
1251 view
.viewport0
[0] = minx
;
1252 view
.viewport1
[0] = MALI_POSITIVE(maxx
);
1254 view
.viewport0
[1] = miny
;
1255 view
.viewport1
[1] = MALI_POSITIVE(maxy
);
1257 view
.clip_minz
= minz
;
1258 view
.clip_maxz
= maxz
;
1260 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].postfix
.viewport
=
1261 panfrost_upload_transient(batch
,
1263 sizeof(struct mali_viewport
));
1268 /* Corresponds to exactly one draw, but does not submit anything */
1271 panfrost_queue_draw(struct panfrost_context
*ctx
)
1273 /* Handle dirty flags now */
1274 panfrost_emit_for_draw(ctx
, true);
1276 /* If rasterizer discard is enable, only submit the vertex */
1278 bool rasterizer_discard
= ctx
->rasterizer
1279 && ctx
->rasterizer
->base
.rasterizer_discard
;
1281 struct panfrost_transfer vertex
= panfrost_vertex_tiler_job(ctx
, false);
1282 struct panfrost_transfer tiler
;
1284 if (!rasterizer_discard
)
1285 tiler
= panfrost_vertex_tiler_job(ctx
, true);
1287 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
1289 if (rasterizer_discard
)
1290 panfrost_scoreboard_queue_vertex_job(batch
, vertex
, FALSE
);
1291 else if (ctx
->wallpaper_batch
&& batch
->first_tiler
.gpu
)
1292 panfrost_scoreboard_queue_fused_job_prepend(batch
, vertex
, tiler
);
1294 panfrost_scoreboard_queue_fused_job(batch
, vertex
, tiler
);
1296 for (unsigned i
= 0; i
< PIPE_SHADER_TYPES
; ++i
) {
1297 struct panfrost_shader_variants
*all
= ctx
->shader
[i
];
1302 struct panfrost_shader_state
*ss
= &all
->variants
[all
->active_variant
];
1303 batch
->stack_size
= MAX2(batch
->stack_size
, ss
->stack_size
);
1307 /* The entire frame is in memory -- send it off to the kernel! */
1311 struct pipe_context
*pipe
,
1312 struct pipe_fence_handle
**fence
,
1315 struct panfrost_context
*ctx
= pan_context(pipe
);
1316 struct util_dynarray fences
;
1318 /* We must collect the fences before the flush is done, otherwise we'll
1319 * lose track of them.
1322 util_dynarray_init(&fences
, NULL
);
1323 hash_table_foreach(ctx
->batches
, hentry
) {
1324 struct panfrost_batch
*batch
= hentry
->data
;
1326 panfrost_batch_fence_reference(batch
->out_sync
);
1327 util_dynarray_append(&fences
,
1328 struct panfrost_batch_fence
*,
1333 /* Submit all pending jobs */
1334 panfrost_flush_all_batches(ctx
, false);
1337 struct panfrost_fence
*f
= panfrost_fence_create(ctx
, &fences
);
1338 pipe
->screen
->fence_reference(pipe
->screen
, fence
, NULL
);
1339 *fence
= (struct pipe_fence_handle
*)f
;
1341 util_dynarray_foreach(&fences
, struct panfrost_batch_fence
*, fence
)
1342 panfrost_batch_fence_unreference(*fence
);
1344 util_dynarray_fini(&fences
);
1348 #define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_##c;
1351 g2m_draw_mode(enum pipe_prim_type mode
)
1354 DEFINE_CASE(POINTS
);
1356 DEFINE_CASE(LINE_LOOP
);
1357 DEFINE_CASE(LINE_STRIP
);
1358 DEFINE_CASE(TRIANGLES
);
1359 DEFINE_CASE(TRIANGLE_STRIP
);
1360 DEFINE_CASE(TRIANGLE_FAN
);
1362 DEFINE_CASE(QUAD_STRIP
);
1363 DEFINE_CASE(POLYGON
);
1366 unreachable("Invalid draw mode");
1373 panfrost_translate_index_size(unsigned size
)
1377 return MALI_DRAW_INDEXED_UINT8
;
1380 return MALI_DRAW_INDEXED_UINT16
;
1383 return MALI_DRAW_INDEXED_UINT32
;
1386 unreachable("Invalid index size");
1390 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
1391 * good for the duration of the draw (transient), could last longer */
1394 panfrost_get_index_buffer_mapped(struct panfrost_context
*ctx
, const struct pipe_draw_info
*info
)
1396 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) (info
->index
.resource
);
1398 off_t offset
= info
->start
* info
->index_size
;
1399 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
1401 if (!info
->has_user_indices
) {
1402 /* Only resources can be directly mapped */
1403 panfrost_batch_add_bo(batch
, rsrc
->bo
,
1404 PAN_BO_ACCESS_SHARED
|
1405 PAN_BO_ACCESS_READ
|
1406 PAN_BO_ACCESS_VERTEX_TILER
);
1407 return rsrc
->bo
->gpu
+ offset
;
1409 /* Otherwise, we need to upload to transient memory */
1410 const uint8_t *ibuf8
= (const uint8_t *) info
->index
.user
;
1411 return panfrost_upload_transient(batch
, ibuf8
+ offset
, info
->count
* info
->index_size
);
1416 panfrost_scissor_culls_everything(struct panfrost_context
*ctx
)
1418 const struct pipe_scissor_state
*ss
= &ctx
->scissor
;
1420 /* Check if we're scissoring at all */
1422 if (!(ctx
->rasterizer
&& ctx
->rasterizer
->base
.scissor
))
1425 return (ss
->minx
== ss
->maxx
) || (ss
->miny
== ss
->maxy
);
1428 /* Count generated primitives (when there is no geom/tess shaders) for
1429 * transform feedback */
1432 panfrost_statistics_record(
1433 struct panfrost_context
*ctx
,
1434 const struct pipe_draw_info
*info
)
1436 if (!ctx
->active_queries
)
1439 uint32_t prims
= u_prims_for_vertices(info
->mode
, info
->count
);
1440 ctx
->prims_generated
+= prims
;
1442 if (!ctx
->streamout
.num_targets
)
1445 ctx
->tf_prims_generated
+= prims
;
1450 struct pipe_context
*pipe
,
1451 const struct pipe_draw_info
*info
)
1453 struct panfrost_context
*ctx
= pan_context(pipe
);
1455 /* First of all, check the scissor to see if anything is drawn at all.
1456 * If it's not, we drop the draw (mostly a conformance issue;
1457 * well-behaved apps shouldn't hit this) */
1459 if (panfrost_scissor_culls_everything(ctx
))
1462 int mode
= info
->mode
;
1464 /* Fallback unsupported restart index */
1465 unsigned primitive_index
= (1 << (info
->index_size
* 8)) - 1;
1467 if (info
->primitive_restart
&& info
->index_size
1468 && info
->restart_index
!= primitive_index
) {
1469 util_draw_vbo_without_prim_restart(pipe
, info
);
1473 /* Fallback for unsupported modes */
1475 assert(ctx
->rasterizer
!= NULL
);
1477 if (!(ctx
->draw_modes
& (1 << mode
))) {
1478 if (mode
== PIPE_PRIM_QUADS
&& info
->count
== 4 && !ctx
->rasterizer
->base
.flatshade
) {
1479 mode
= PIPE_PRIM_TRIANGLE_FAN
;
1481 if (info
->count
< 4) {
1482 /* Degenerate case? */
1486 util_primconvert_save_rasterizer_state(ctx
->primconvert
, &ctx
->rasterizer
->base
);
1487 util_primconvert_draw_vbo(ctx
->primconvert
, info
);
1492 ctx
->payloads
[PIPE_SHADER_VERTEX
].offset_start
= info
->start
;
1493 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].offset_start
= info
->start
;
1495 /* Now that we have a guaranteed terminating path, find the job.
1496 * Assignment commented out to prevent unused warning */
1498 /* struct panfrost_batch *batch = */ panfrost_get_batch_for_fbo(ctx
);
1500 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.draw_mode
= g2m_draw_mode(mode
);
1502 /* Take into account a negative bias */
1503 ctx
->vertex_count
= info
->count
+ abs(info
->index_bias
);
1504 ctx
->instance_count
= info
->instance_count
;
1505 ctx
->active_prim
= info
->mode
;
1507 /* For non-indexed draws, they're the same */
1508 unsigned vertex_count
= ctx
->vertex_count
;
1510 unsigned draw_flags
= 0;
1512 /* The draw flags interpret how primitive size is interpreted */
1514 if (panfrost_writes_point_size(ctx
))
1515 draw_flags
|= MALI_DRAW_VARYING_SIZE
;
1517 if (info
->primitive_restart
)
1518 draw_flags
|= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX
;
1520 /* These doesn't make much sense */
1522 draw_flags
|= 0x3000;
1524 if (ctx
->rasterizer
&& ctx
->rasterizer
->base
.flatshade_first
)
1525 draw_flags
|= MALI_DRAW_FLATSHADE_FIRST
;
1527 panfrost_statistics_record(ctx
, info
);
1529 if (info
->index_size
) {
1530 /* Calculate the min/max index used so we can figure out how
1531 * many times to invoke the vertex shader */
1533 /* Fetch / calculate index bounds */
1534 unsigned min_index
= 0, max_index
= 0;
1536 if (info
->max_index
== ~0u) {
1537 u_vbuf_get_minmax_index(pipe
, info
, &min_index
, &max_index
);
1539 min_index
= info
->min_index
;
1540 max_index
= info
->max_index
;
1543 /* Use the corresponding values */
1544 vertex_count
= max_index
- min_index
+ 1;
1545 ctx
->payloads
[PIPE_SHADER_VERTEX
].offset_start
= min_index
+ info
->index_bias
;
1546 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].offset_start
= min_index
+ info
->index_bias
;
1548 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.offset_bias_correction
= -min_index
;
1549 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.index_count
= MALI_POSITIVE(info
->count
);
1551 //assert(!info->restart_index); /* TODO: Research */
1553 draw_flags
|= panfrost_translate_index_size(info
->index_size
);
1554 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.indices
= panfrost_get_index_buffer_mapped(ctx
, info
);
1556 /* Index count == vertex count, if no indexing is applied, as
1557 * if it is internally indexed in the expected order */
1559 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.offset_bias_correction
= 0;
1560 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.index_count
= MALI_POSITIVE(ctx
->vertex_count
);
1562 /* Reverse index state */
1563 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.indices
= (u64
) NULL
;
1566 /* Dispatch "compute jobs" for the vertex/tiler pair as (1,
1567 * vertex_count, 1) */
1569 panfrost_pack_work_groups_fused(
1570 &ctx
->payloads
[PIPE_SHADER_VERTEX
].prefix
,
1571 &ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
,
1572 1, vertex_count
, info
->instance_count
,
1575 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.unknown_draw
= draw_flags
;
1577 /* Encode the padded vertex count */
1579 if (info
->instance_count
> 1) {
1580 ctx
->padded_count
= panfrost_padded_vertex_count(vertex_count
);
1582 unsigned shift
= __builtin_ctz(ctx
->padded_count
);
1583 unsigned k
= ctx
->padded_count
>> (shift
+ 1);
1585 ctx
->payloads
[PIPE_SHADER_VERTEX
].instance_shift
= shift
;
1586 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].instance_shift
= shift
;
1588 ctx
->payloads
[PIPE_SHADER_VERTEX
].instance_odd
= k
;
1589 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].instance_odd
= k
;
1591 ctx
->padded_count
= vertex_count
;
1593 /* Reset instancing state */
1594 ctx
->payloads
[PIPE_SHADER_VERTEX
].instance_shift
= 0;
1595 ctx
->payloads
[PIPE_SHADER_VERTEX
].instance_odd
= 0;
1596 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].instance_shift
= 0;
1597 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].instance_odd
= 0;
1600 /* Fire off the draw itself */
1601 panfrost_queue_draw(ctx
);
1603 /* Increment transform feedback offsets */
1605 for (unsigned i
= 0; i
< ctx
->streamout
.num_targets
; ++i
) {
1606 unsigned output_count
= u_stream_outputs_for_vertices(
1607 ctx
->active_prim
, ctx
->vertex_count
);
1609 ctx
->streamout
.offsets
[i
] += output_count
;
1616 panfrost_generic_cso_delete(struct pipe_context
*pctx
, void *hwcso
)
1622 panfrost_create_rasterizer_state(
1623 struct pipe_context
*pctx
,
1624 const struct pipe_rasterizer_state
*cso
)
1626 struct panfrost_rasterizer
*so
= CALLOC_STRUCT(panfrost_rasterizer
);
1630 /* Bitmask, unknown meaning of the start value. 0x105 on 32-bit T6XX */
1631 so
->tiler_gl_enables
= 0x7;
1634 so
->tiler_gl_enables
|= MALI_FRONT_CCW_TOP
;
1636 if (cso
->cull_face
& PIPE_FACE_FRONT
)
1637 so
->tiler_gl_enables
|= MALI_CULL_FACE_FRONT
;
1639 if (cso
->cull_face
& PIPE_FACE_BACK
)
1640 so
->tiler_gl_enables
|= MALI_CULL_FACE_BACK
;
1646 panfrost_bind_rasterizer_state(
1647 struct pipe_context
*pctx
,
1650 struct panfrost_context
*ctx
= pan_context(pctx
);
1652 /* TODO: Why can't rasterizer be NULL ever? Other drivers are fine.. */
1656 ctx
->rasterizer
= hwcso
;
1657 ctx
->dirty
|= PAN_DIRTY_RASTERIZER
;
1659 ctx
->fragment_shader_core
.depth_units
= ctx
->rasterizer
->base
.offset_units
* 2.0f
;
1660 ctx
->fragment_shader_core
.depth_factor
= ctx
->rasterizer
->base
.offset_scale
;
1662 /* Gauranteed with the core GL call, so don't expose ARB_polygon_offset */
1663 assert(ctx
->rasterizer
->base
.offset_clamp
== 0.0);
1665 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
1667 SET_BIT(ctx
->fragment_shader_core
.unknown2_4
, MALI_DEPTH_RANGE_A
, ctx
->rasterizer
->base
.offset_tri
);
1668 SET_BIT(ctx
->fragment_shader_core
.unknown2_4
, MALI_DEPTH_RANGE_B
, ctx
->rasterizer
->base
.offset_tri
);
1670 /* Point sprites are emulated */
1672 struct panfrost_shader_state
*variant
=
1673 ctx
->shader
[PIPE_SHADER_FRAGMENT
] ? &ctx
->shader
[PIPE_SHADER_FRAGMENT
]->variants
[ctx
->shader
[PIPE_SHADER_FRAGMENT
]->active_variant
] : NULL
;
1675 if (ctx
->rasterizer
->base
.sprite_coord_enable
|| (variant
&& variant
->point_sprite_mask
))
1676 ctx
->base
.bind_fs_state(&ctx
->base
, ctx
->shader
[PIPE_SHADER_FRAGMENT
]);
1680 panfrost_create_vertex_elements_state(
1681 struct pipe_context
*pctx
,
1682 unsigned num_elements
,
1683 const struct pipe_vertex_element
*elements
)
1685 struct panfrost_vertex_state
*so
= CALLOC_STRUCT(panfrost_vertex_state
);
1687 so
->num_elements
= num_elements
;
1688 memcpy(so
->pipe
, elements
, sizeof(*elements
) * num_elements
);
1690 for (int i
= 0; i
< num_elements
; ++i
) {
1691 so
->hw
[i
].index
= i
;
1693 enum pipe_format fmt
= elements
[i
].src_format
;
1694 const struct util_format_description
*desc
= util_format_description(fmt
);
1695 so
->hw
[i
].unknown1
= 0x2;
1696 so
->hw
[i
].swizzle
= panfrost_get_default_swizzle(desc
->nr_channels
);
1698 so
->hw
[i
].format
= panfrost_find_format(desc
);
1700 /* The field itself should probably be shifted over */
1701 so
->hw
[i
].src_offset
= elements
[i
].src_offset
;
1708 panfrost_bind_vertex_elements_state(
1709 struct pipe_context
*pctx
,
1712 struct panfrost_context
*ctx
= pan_context(pctx
);
1714 ctx
->vertex
= hwcso
;
1715 ctx
->dirty
|= PAN_DIRTY_VERTEX
;
1719 panfrost_create_shader_state(
1720 struct pipe_context
*pctx
,
1721 const struct pipe_shader_state
*cso
,
1722 enum pipe_shader_type stage
)
1724 struct panfrost_shader_variants
*so
= CALLOC_STRUCT(panfrost_shader_variants
);
1727 /* Token deep copy to prevent memory corruption */
1729 if (cso
->type
== PIPE_SHADER_IR_TGSI
)
1730 so
->base
.tokens
= tgsi_dup_tokens(so
->base
.tokens
);
1732 /* Precompile for shader-db if we need to */
1733 if (unlikely((pan_debug
& PAN_DBG_PRECOMPILE
) && cso
->type
== PIPE_SHADER_IR_NIR
)) {
1734 struct panfrost_context
*ctx
= pan_context(pctx
);
1736 struct mali_shader_meta meta
;
1737 struct panfrost_shader_state state
;
1738 uint64_t outputs_written
;
1740 panfrost_shader_compile(ctx
, &meta
,
1743 tgsi_processor_to_shader_stage(stage
), &state
,
1751 panfrost_delete_shader_state(
1752 struct pipe_context
*pctx
,
1755 struct panfrost_shader_variants
*cso
= (struct panfrost_shader_variants
*) so
;
1757 if (cso
->base
.type
== PIPE_SHADER_IR_TGSI
) {
1758 DBG("Deleting TGSI shader leaks duplicated tokens\n");
1761 for (unsigned i
= 0; i
< cso
->variant_count
; ++i
) {
1762 struct panfrost_shader_state
*shader_state
= &cso
->variants
[i
];
1763 panfrost_bo_unreference(shader_state
->bo
);
1764 shader_state
->bo
= NULL
;
1771 panfrost_create_sampler_state(
1772 struct pipe_context
*pctx
,
1773 const struct pipe_sampler_state
*cso
)
1775 struct panfrost_sampler_state
*so
= CALLOC_STRUCT(panfrost_sampler_state
);
1778 /* sampler_state corresponds to mali_sampler_descriptor, which we can generate entirely here */
1780 bool min_nearest
= cso
->min_img_filter
== PIPE_TEX_FILTER_NEAREST
;
1781 bool mag_nearest
= cso
->mag_img_filter
== PIPE_TEX_FILTER_NEAREST
;
1782 bool mip_linear
= cso
->min_mip_filter
== PIPE_TEX_MIPFILTER_LINEAR
;
1784 unsigned min_filter
= min_nearest
? MALI_SAMP_MIN_NEAREST
: 0;
1785 unsigned mag_filter
= mag_nearest
? MALI_SAMP_MAG_NEAREST
: 0;
1786 unsigned mip_filter
= mip_linear
?
1787 (MALI_SAMP_MIP_LINEAR_1
| MALI_SAMP_MIP_LINEAR_2
) : 0;
1788 unsigned normalized
= cso
->normalized_coords
? MALI_SAMP_NORM_COORDS
: 0;
1790 struct mali_sampler_descriptor sampler_descriptor
= {
1791 .filter_mode
= min_filter
| mag_filter
| mip_filter
| normalized
,
1792 .wrap_s
= translate_tex_wrap(cso
->wrap_s
),
1793 .wrap_t
= translate_tex_wrap(cso
->wrap_t
),
1794 .wrap_r
= translate_tex_wrap(cso
->wrap_r
),
1795 .compare_func
= panfrost_flip_compare_func(
1796 panfrost_translate_compare_func(
1797 cso
->compare_func
)),
1799 cso
->border_color
.f
[0],
1800 cso
->border_color
.f
[1],
1801 cso
->border_color
.f
[2],
1802 cso
->border_color
.f
[3]
1804 .min_lod
= FIXED_16(cso
->min_lod
, false), /* clamp at 0 */
1805 .max_lod
= FIXED_16(cso
->max_lod
, false),
1806 .lod_bias
= FIXED_16(cso
->lod_bias
, true), /* can be negative */
1807 .seamless_cube_map
= cso
->seamless_cube_map
,
1810 /* If necessary, we disable mipmapping in the sampler descriptor by
1811 * clamping the LOD as tight as possible (from 0 to epsilon,
1812 * essentially -- remember these are fixed point numbers, so
1815 if (cso
->min_mip_filter
== PIPE_TEX_MIPFILTER_NONE
)
1816 sampler_descriptor
.max_lod
= sampler_descriptor
.min_lod
;
1818 /* Enforce that there is something in the middle by adding epsilon*/
1820 if (sampler_descriptor
.min_lod
== sampler_descriptor
.max_lod
)
1821 sampler_descriptor
.max_lod
++;
1824 assert(sampler_descriptor
.max_lod
> sampler_descriptor
.min_lod
);
1826 so
->hw
= sampler_descriptor
;
1832 panfrost_bind_sampler_states(
1833 struct pipe_context
*pctx
,
1834 enum pipe_shader_type shader
,
1835 unsigned start_slot
, unsigned num_sampler
,
1838 assert(start_slot
== 0);
1840 struct panfrost_context
*ctx
= pan_context(pctx
);
1842 /* XXX: Should upload, not just copy? */
1843 ctx
->sampler_count
[shader
] = num_sampler
;
1844 memcpy(ctx
->samplers
[shader
], sampler
, num_sampler
* sizeof (void *));
1846 ctx
->dirty
|= PAN_DIRTY_SAMPLERS
;
1850 panfrost_variant_matches(
1851 struct panfrost_context
*ctx
,
1852 struct panfrost_shader_state
*variant
,
1853 enum pipe_shader_type type
)
1855 struct pipe_rasterizer_state
*rasterizer
= &ctx
->rasterizer
->base
;
1856 struct pipe_alpha_state
*alpha
= &ctx
->depth_stencil
->alpha
;
1858 bool is_fragment
= (type
== PIPE_SHADER_FRAGMENT
);
1860 if (is_fragment
&& (alpha
->enabled
|| variant
->alpha_state
.enabled
)) {
1861 /* Make sure enable state is at least the same */
1862 if (alpha
->enabled
!= variant
->alpha_state
.enabled
) {
1866 /* Check that the contents of the test are the same */
1867 bool same_func
= alpha
->func
== variant
->alpha_state
.func
;
1868 bool same_ref
= alpha
->ref_value
== variant
->alpha_state
.ref_value
;
1870 if (!(same_func
&& same_ref
)) {
1875 if (is_fragment
&& rasterizer
&& (rasterizer
->sprite_coord_enable
|
1876 variant
->point_sprite_mask
)) {
1877 /* Ensure the same varyings are turned to point sprites */
1878 if (rasterizer
->sprite_coord_enable
!= variant
->point_sprite_mask
)
1881 /* Ensure the orientation is correct */
1883 rasterizer
->sprite_coord_mode
==
1884 PIPE_SPRITE_COORD_UPPER_LEFT
;
1886 if (variant
->point_sprite_upper_left
!= upper_left
)
1890 /* Otherwise, we're good to go */
1895 * Fix an uncompiled shader's stream output info, and produce a bitmask
1896 * of which VARYING_SLOT_* are captured for stream output.
1898 * Core Gallium stores output->register_index as a "slot" number, where
1899 * slots are assigned consecutively to all outputs in info->outputs_written.
1900 * This naive packing of outputs doesn't work for us - we too have slots,
1901 * but the layout is defined by the VUE map, which we won't have until we
1902 * compile a specific shader variant. So, we remap these and simply store
1903 * VARYING_SLOT_* in our copy's output->register_index fields.
1905 * We then produce a bitmask of outputs which are used for SO.
1907 * Implementation from iris.
1911 update_so_info(struct pipe_stream_output_info
*so_info
,
1912 uint64_t outputs_written
)
1914 uint64_t so_outputs
= 0;
1915 uint8_t reverse_map
[64] = {0};
1918 while (outputs_written
)
1919 reverse_map
[slot
++] = u_bit_scan64(&outputs_written
);
1921 for (unsigned i
= 0; i
< so_info
->num_outputs
; i
++) {
1922 struct pipe_stream_output
*output
= &so_info
->output
[i
];
1924 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
1925 output
->register_index
= reverse_map
[output
->register_index
];
1927 so_outputs
|= 1ull << output
->register_index
;
1934 panfrost_bind_shader_state(
1935 struct pipe_context
*pctx
,
1937 enum pipe_shader_type type
)
1939 struct panfrost_context
*ctx
= pan_context(pctx
);
1941 ctx
->shader
[type
] = hwcso
;
1943 if (type
== PIPE_SHADER_FRAGMENT
)
1944 ctx
->dirty
|= PAN_DIRTY_FS
;
1946 ctx
->dirty
|= PAN_DIRTY_VS
;
1950 /* Match the appropriate variant */
1952 signed variant
= -1;
1953 struct panfrost_shader_variants
*variants
= (struct panfrost_shader_variants
*) hwcso
;
1955 for (unsigned i
= 0; i
< variants
->variant_count
; ++i
) {
1956 if (panfrost_variant_matches(ctx
, &variants
->variants
[i
], type
)) {
1962 if (variant
== -1) {
1963 /* No variant matched, so create a new one */
1964 variant
= variants
->variant_count
++;
1965 assert(variants
->variant_count
< MAX_SHADER_VARIANTS
);
1967 struct panfrost_shader_state
*v
=
1968 &variants
->variants
[variant
];
1970 if (type
== PIPE_SHADER_FRAGMENT
) {
1971 v
->alpha_state
= ctx
->depth_stencil
->alpha
;
1973 if (ctx
->rasterizer
) {
1974 v
->point_sprite_mask
= ctx
->rasterizer
->base
.sprite_coord_enable
;
1975 v
->point_sprite_upper_left
=
1976 ctx
->rasterizer
->base
.sprite_coord_mode
==
1977 PIPE_SPRITE_COORD_UPPER_LEFT
;
1981 variants
->variants
[variant
].tripipe
= calloc(1, sizeof(struct mali_shader_meta
));
1985 /* Select this variant */
1986 variants
->active_variant
= variant
;
1988 struct panfrost_shader_state
*shader_state
= &variants
->variants
[variant
];
1989 assert(panfrost_variant_matches(ctx
, shader_state
, type
));
1991 /* We finally have a variant, so compile it */
1993 if (!shader_state
->compiled
) {
1994 uint64_t outputs_written
= 0;
1996 panfrost_shader_compile(ctx
, shader_state
->tripipe
,
1997 variants
->base
.type
,
1998 variants
->base
.type
== PIPE_SHADER_IR_NIR
?
1999 variants
->base
.ir
.nir
:
2000 variants
->base
.tokens
,
2001 tgsi_processor_to_shader_stage(type
), shader_state
,
2004 shader_state
->compiled
= true;
2006 /* Fixup the stream out information, since what Gallium returns
2007 * normally is mildly insane */
2009 shader_state
->stream_output
= variants
->base
.stream_output
;
2010 shader_state
->so_mask
=
2011 update_so_info(&shader_state
->stream_output
, outputs_written
);
2016 panfrost_create_vs_state(struct pipe_context
*pctx
, const struct pipe_shader_state
*hwcso
)
2018 return panfrost_create_shader_state(pctx
, hwcso
, PIPE_SHADER_VERTEX
);
2022 panfrost_create_fs_state(struct pipe_context
*pctx
, const struct pipe_shader_state
*hwcso
)
2024 return panfrost_create_shader_state(pctx
, hwcso
, PIPE_SHADER_FRAGMENT
);
2028 panfrost_bind_vs_state(struct pipe_context
*pctx
, void *hwcso
)
2030 panfrost_bind_shader_state(pctx
, hwcso
, PIPE_SHADER_VERTEX
);
2034 panfrost_bind_fs_state(struct pipe_context
*pctx
, void *hwcso
)
2036 panfrost_bind_shader_state(pctx
, hwcso
, PIPE_SHADER_FRAGMENT
);
2040 panfrost_set_vertex_buffers(
2041 struct pipe_context
*pctx
,
2042 unsigned start_slot
,
2043 unsigned num_buffers
,
2044 const struct pipe_vertex_buffer
*buffers
)
2046 struct panfrost_context
*ctx
= pan_context(pctx
);
2048 util_set_vertex_buffers_mask(ctx
->vertex_buffers
, &ctx
->vb_mask
, buffers
, start_slot
, num_buffers
);
2052 panfrost_set_constant_buffer(
2053 struct pipe_context
*pctx
,
2054 enum pipe_shader_type shader
, uint index
,
2055 const struct pipe_constant_buffer
*buf
)
2057 struct panfrost_context
*ctx
= pan_context(pctx
);
2058 struct panfrost_constant_buffer
*pbuf
= &ctx
->constant_buffer
[shader
];
2060 util_copy_constant_buffer(&pbuf
->cb
[index
], buf
);
2062 unsigned mask
= (1 << index
);
2064 if (unlikely(!buf
)) {
2065 pbuf
->enabled_mask
&= ~mask
;
2066 pbuf
->dirty_mask
&= ~mask
;
2070 pbuf
->enabled_mask
|= mask
;
2071 pbuf
->dirty_mask
|= mask
;
2075 panfrost_set_stencil_ref(
2076 struct pipe_context
*pctx
,
2077 const struct pipe_stencil_ref
*ref
)
2079 struct panfrost_context
*ctx
= pan_context(pctx
);
2080 ctx
->stencil_ref
= *ref
;
2082 /* Shader core dirty */
2083 ctx
->dirty
|= PAN_DIRTY_FS
;
2086 static enum mali_texture_type
2087 panfrost_translate_texture_type(enum pipe_texture_target t
) {
2091 case PIPE_TEXTURE_1D
:
2092 case PIPE_TEXTURE_1D_ARRAY
:
2095 case PIPE_TEXTURE_2D
:
2096 case PIPE_TEXTURE_2D_ARRAY
:
2097 case PIPE_TEXTURE_RECT
:
2100 case PIPE_TEXTURE_3D
:
2103 case PIPE_TEXTURE_CUBE
:
2104 case PIPE_TEXTURE_CUBE_ARRAY
:
2105 return MALI_TEX_CUBE
;
2108 unreachable("Unknown target");
2112 static struct pipe_sampler_view
*
2113 panfrost_create_sampler_view(
2114 struct pipe_context
*pctx
,
2115 struct pipe_resource
*texture
,
2116 const struct pipe_sampler_view
*template)
2118 struct panfrost_sampler_view
*so
= rzalloc(pctx
, struct panfrost_sampler_view
);
2119 int bytes_per_pixel
= util_format_get_blocksize(texture
->format
);
2121 pipe_reference(NULL
, &texture
->reference
);
2123 struct panfrost_resource
*prsrc
= (struct panfrost_resource
*) texture
;
2126 so
->base
= *template;
2127 so
->base
.texture
= texture
;
2128 so
->base
.reference
.count
= 1;
2129 so
->base
.context
= pctx
;
2131 /* sampler_views correspond to texture descriptors, minus the texture
2132 * (data) itself. So, we serialise the descriptor here and cache it for
2135 const struct util_format_description
*desc
= util_format_description(prsrc
->base
.format
);
2137 unsigned char user_swizzle
[4] = {
2138 template->swizzle_r
,
2139 template->swizzle_g
,
2140 template->swizzle_b
,
2144 enum mali_format format
= panfrost_find_format(desc
);
2146 /* Check if we need to set a custom stride by computing the "expected"
2147 * stride and comparing it to what the BO actually wants. Only applies
2148 * to linear textures, since tiled/compressed textures have strict
2149 * alignment requirements for their strides as it is */
2151 unsigned first_level
= template->u
.tex
.first_level
;
2152 unsigned last_level
= template->u
.tex
.last_level
;
2154 if (prsrc
->layout
== PAN_LINEAR
) {
2155 for (unsigned l
= first_level
; l
<= last_level
; ++l
) {
2156 unsigned actual_stride
= prsrc
->slices
[l
].stride
;
2157 unsigned width
= u_minify(texture
->width0
, l
);
2158 unsigned comp_stride
= width
* bytes_per_pixel
;
2160 if (comp_stride
!= actual_stride
) {
2161 so
->manual_stride
= true;
2167 /* In the hardware, array_size refers specifically to array textures,
2168 * whereas in Gallium, it also covers cubemaps */
2170 unsigned array_size
= texture
->array_size
;
2172 if (template->target
== PIPE_TEXTURE_CUBE
) {
2173 /* TODO: Cubemap arrays */
2174 assert(array_size
== 6);
2178 struct mali_texture_descriptor texture_descriptor
= {
2179 .width
= MALI_POSITIVE(u_minify(texture
->width0
, first_level
)),
2180 .height
= MALI_POSITIVE(u_minify(texture
->height0
, first_level
)),
2181 .depth
= MALI_POSITIVE(u_minify(texture
->depth0
, first_level
)),
2182 .array_size
= MALI_POSITIVE(array_size
),
2185 .swizzle
= panfrost_translate_swizzle_4(desc
->swizzle
),
2187 .srgb
= desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
,
2188 .type
= panfrost_translate_texture_type(template->target
),
2192 .swizzle
= panfrost_translate_swizzle_4(user_swizzle
)
2195 texture_descriptor
.levels
= last_level
- first_level
;
2197 so
->hw
= texture_descriptor
;
2199 return (struct pipe_sampler_view
*) so
;
2203 panfrost_set_sampler_views(
2204 struct pipe_context
*pctx
,
2205 enum pipe_shader_type shader
,
2206 unsigned start_slot
, unsigned num_views
,
2207 struct pipe_sampler_view
**views
)
2209 struct panfrost_context
*ctx
= pan_context(pctx
);
2210 unsigned new_nr
= 0;
2213 assert(start_slot
== 0);
2215 for (i
= 0; i
< num_views
; ++i
) {
2218 pipe_sampler_view_reference((struct pipe_sampler_view
**)&ctx
->sampler_views
[shader
][i
],
2222 for (; i
< ctx
->sampler_view_count
[shader
]; i
++) {
2223 pipe_sampler_view_reference((struct pipe_sampler_view
**)&ctx
->sampler_views
[shader
][i
],
2226 ctx
->sampler_view_count
[shader
] = new_nr
;
2228 ctx
->dirty
|= PAN_DIRTY_TEXTURES
;
2232 panfrost_sampler_view_destroy(
2233 struct pipe_context
*pctx
,
2234 struct pipe_sampler_view
*view
)
2236 pipe_resource_reference(&view
->texture
, NULL
);
2241 panfrost_set_shader_buffers(
2242 struct pipe_context
*pctx
,
2243 enum pipe_shader_type shader
,
2244 unsigned start
, unsigned count
,
2245 const struct pipe_shader_buffer
*buffers
,
2246 unsigned writable_bitmask
)
2248 struct panfrost_context
*ctx
= pan_context(pctx
);
2250 util_set_shader_buffers_mask(ctx
->ssbo
[shader
], &ctx
->ssbo_mask
[shader
],
2251 buffers
, start
, count
);
2254 /* Hints that a framebuffer should use AFBC where possible */
2258 struct panfrost_screen
*screen
,
2259 const struct pipe_framebuffer_state
*fb
)
2261 /* AFBC implemenation incomplete; hide it */
2262 if (!(pan_debug
& PAN_DBG_AFBC
)) return;
2264 /* Hint AFBC to the resources bound to each color buffer */
2266 for (unsigned i
= 0; i
< fb
->nr_cbufs
; ++i
) {
2267 struct pipe_surface
*surf
= fb
->cbufs
[i
];
2268 struct panfrost_resource
*rsrc
= pan_resource(surf
->texture
);
2269 panfrost_resource_hint_layout(screen
, rsrc
, PAN_AFBC
, 1);
2272 /* Also hint it to the depth buffer */
2275 struct panfrost_resource
*rsrc
= pan_resource(fb
->zsbuf
->texture
);
2276 panfrost_resource_hint_layout(screen
, rsrc
, PAN_AFBC
, 1);
2281 panfrost_set_framebuffer_state(struct pipe_context
*pctx
,
2282 const struct pipe_framebuffer_state
*fb
)
2284 struct panfrost_context
*ctx
= pan_context(pctx
);
2286 panfrost_hint_afbc(pan_screen(pctx
->screen
), fb
);
2287 util_copy_framebuffer_state(&ctx
->pipe_framebuffer
, fb
);
2289 panfrost_invalidate_frame(ctx
);
2293 panfrost_create_depth_stencil_state(struct pipe_context
*pipe
,
2294 const struct pipe_depth_stencil_alpha_state
*depth_stencil
)
2296 return mem_dup(depth_stencil
, sizeof(*depth_stencil
));
2300 panfrost_bind_depth_stencil_state(struct pipe_context
*pipe
,
2303 struct panfrost_context
*ctx
= pan_context(pipe
);
2304 struct pipe_depth_stencil_alpha_state
*depth_stencil
= cso
;
2305 ctx
->depth_stencil
= depth_stencil
;
2310 /* Alpha does not exist in the hardware (it's not in ES3), so it's
2311 * emulated in the fragment shader */
2313 if (depth_stencil
->alpha
.enabled
) {
2314 /* We need to trigger a new shader (maybe) */
2315 ctx
->base
.bind_fs_state(&ctx
->base
, ctx
->shader
[PIPE_SHADER_FRAGMENT
]);
2319 SET_BIT(ctx
->fragment_shader_core
.unknown2_4
, MALI_STENCIL_TEST
, depth_stencil
->stencil
[0].enabled
);
2321 panfrost_make_stencil_state(&depth_stencil
->stencil
[0], &ctx
->fragment_shader_core
.stencil_front
);
2322 ctx
->fragment_shader_core
.stencil_mask_front
= depth_stencil
->stencil
[0].writemask
;
2324 /* If back-stencil is not enabled, use the front values */
2325 bool back_enab
= ctx
->depth_stencil
->stencil
[1].enabled
;
2326 unsigned back_index
= back_enab
? 1 : 0;
2328 panfrost_make_stencil_state(&depth_stencil
->stencil
[back_index
], &ctx
->fragment_shader_core
.stencil_back
);
2329 ctx
->fragment_shader_core
.stencil_mask_back
= depth_stencil
->stencil
[back_index
].writemask
;
2331 /* Depth state (TODO: Refactor) */
2332 SET_BIT(ctx
->fragment_shader_core
.unknown2_3
, MALI_DEPTH_WRITEMASK
,
2333 depth_stencil
->depth
.writemask
);
2335 int func
= depth_stencil
->depth
.enabled
? depth_stencil
->depth
.func
: PIPE_FUNC_ALWAYS
;
2337 ctx
->fragment_shader_core
.unknown2_3
&= ~MALI_DEPTH_FUNC_MASK
;
2338 ctx
->fragment_shader_core
.unknown2_3
|= MALI_DEPTH_FUNC(panfrost_translate_compare_func(func
));
2340 /* Bounds test not implemented */
2341 assert(!depth_stencil
->depth
.bounds_test
);
2343 ctx
->dirty
|= PAN_DIRTY_FS
;
2347 panfrost_delete_depth_stencil_state(struct pipe_context
*pipe
, void *depth
)
2353 panfrost_set_sample_mask(struct pipe_context
*pipe
,
2354 unsigned sample_mask
)
2359 panfrost_set_clip_state(struct pipe_context
*pipe
,
2360 const struct pipe_clip_state
*clip
)
2362 //struct panfrost_context *panfrost = pan_context(pipe);
2366 panfrost_set_viewport_states(struct pipe_context
*pipe
,
2367 unsigned start_slot
,
2368 unsigned num_viewports
,
2369 const struct pipe_viewport_state
*viewports
)
2371 struct panfrost_context
*ctx
= pan_context(pipe
);
2373 assert(start_slot
== 0);
2374 assert(num_viewports
== 1);
2376 ctx
->pipe_viewport
= *viewports
;
2380 panfrost_set_scissor_states(struct pipe_context
*pipe
,
2381 unsigned start_slot
,
2382 unsigned num_scissors
,
2383 const struct pipe_scissor_state
*scissors
)
2385 struct panfrost_context
*ctx
= pan_context(pipe
);
2387 assert(start_slot
== 0);
2388 assert(num_scissors
== 1);
2390 ctx
->scissor
= *scissors
;
2394 panfrost_set_polygon_stipple(struct pipe_context
*pipe
,
2395 const struct pipe_poly_stipple
*stipple
)
2397 //struct panfrost_context *panfrost = pan_context(pipe);
2401 panfrost_set_active_query_state(struct pipe_context
*pipe
,
2404 struct panfrost_context
*ctx
= pan_context(pipe
);
2405 ctx
->active_queries
= enable
;
2409 panfrost_destroy(struct pipe_context
*pipe
)
2411 struct panfrost_context
*panfrost
= pan_context(pipe
);
2413 if (panfrost
->blitter
)
2414 util_blitter_destroy(panfrost
->blitter
);
2416 if (panfrost
->blitter_wallpaper
)
2417 util_blitter_destroy(panfrost
->blitter_wallpaper
);
2419 util_unreference_framebuffer_state(&panfrost
->pipe_framebuffer
);
2420 u_upload_destroy(pipe
->stream_uploader
);
2425 static struct pipe_query
*
2426 panfrost_create_query(struct pipe_context
*pipe
,
2430 struct panfrost_query
*q
= rzalloc(pipe
, struct panfrost_query
);
2435 return (struct pipe_query
*) q
;
2439 panfrost_destroy_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
2441 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
2444 panfrost_bo_unreference(query
->bo
);
2452 panfrost_begin_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
2454 struct panfrost_context
*ctx
= pan_context(pipe
);
2455 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
2457 switch (query
->type
) {
2458 case PIPE_QUERY_OCCLUSION_COUNTER
:
2459 case PIPE_QUERY_OCCLUSION_PREDICATE
:
2460 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
2461 /* Allocate a bo for the query results to be stored */
2463 query
->bo
= panfrost_bo_create(
2464 pan_screen(ctx
->base
.screen
),
2465 sizeof(unsigned), 0);
2468 unsigned *result
= (unsigned *)query
->bo
->cpu
;
2469 *result
= 0; /* Default to 0 if nothing at all drawn. */
2470 ctx
->occlusion_query
= query
;
2473 /* Geometry statistics are computed in the driver. XXX: geom/tess
2476 case PIPE_QUERY_PRIMITIVES_GENERATED
:
2477 query
->start
= ctx
->prims_generated
;
2479 case PIPE_QUERY_PRIMITIVES_EMITTED
:
2480 query
->start
= ctx
->tf_prims_generated
;
2484 fprintf(stderr
, "Skipping query %u\n", query
->type
);
2492 panfrost_end_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
2494 struct panfrost_context
*ctx
= pan_context(pipe
);
2495 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
2497 switch (query
->type
) {
2498 case PIPE_QUERY_OCCLUSION_COUNTER
:
2499 case PIPE_QUERY_OCCLUSION_PREDICATE
:
2500 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
2501 ctx
->occlusion_query
= NULL
;
2503 case PIPE_QUERY_PRIMITIVES_GENERATED
:
2504 query
->end
= ctx
->prims_generated
;
2506 case PIPE_QUERY_PRIMITIVES_EMITTED
:
2507 query
->end
= ctx
->tf_prims_generated
;
2515 panfrost_get_query_result(struct pipe_context
*pipe
,
2516 struct pipe_query
*q
,
2518 union pipe_query_result
*vresult
)
2520 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
2521 struct panfrost_context
*ctx
= pan_context(pipe
);
2524 switch (query
->type
) {
2525 case PIPE_QUERY_OCCLUSION_COUNTER
:
2526 case PIPE_QUERY_OCCLUSION_PREDICATE
:
2527 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
2529 panfrost_flush_all_batches(ctx
, true);
2531 /* Read back the query results */
2532 unsigned *result
= (unsigned *) query
->bo
->cpu
;
2533 unsigned passed
= *result
;
2535 if (query
->type
== PIPE_QUERY_OCCLUSION_COUNTER
) {
2536 vresult
->u64
= passed
;
2538 vresult
->b
= !!passed
;
2543 case PIPE_QUERY_PRIMITIVES_GENERATED
:
2544 case PIPE_QUERY_PRIMITIVES_EMITTED
:
2545 panfrost_flush_all_batches(ctx
, true);
2546 vresult
->u64
= query
->end
- query
->start
;
2550 DBG("Skipped query get %u\n", query
->type
);
2557 static struct pipe_stream_output_target
*
2558 panfrost_create_stream_output_target(struct pipe_context
*pctx
,
2559 struct pipe_resource
*prsc
,
2560 unsigned buffer_offset
,
2561 unsigned buffer_size
)
2563 struct pipe_stream_output_target
*target
;
2565 target
= rzalloc(pctx
, struct pipe_stream_output_target
);
2570 pipe_reference_init(&target
->reference
, 1);
2571 pipe_resource_reference(&target
->buffer
, prsc
);
2573 target
->context
= pctx
;
2574 target
->buffer_offset
= buffer_offset
;
2575 target
->buffer_size
= buffer_size
;
2581 panfrost_stream_output_target_destroy(struct pipe_context
*pctx
,
2582 struct pipe_stream_output_target
*target
)
2584 pipe_resource_reference(&target
->buffer
, NULL
);
2585 ralloc_free(target
);
2589 panfrost_set_stream_output_targets(struct pipe_context
*pctx
,
2590 unsigned num_targets
,
2591 struct pipe_stream_output_target
**targets
,
2592 const unsigned *offsets
)
2594 struct panfrost_context
*ctx
= pan_context(pctx
);
2595 struct panfrost_streamout
*so
= &ctx
->streamout
;
2597 assert(num_targets
<= ARRAY_SIZE(so
->targets
));
2599 for (unsigned i
= 0; i
< num_targets
; i
++) {
2600 if (offsets
[i
] != -1)
2601 so
->offsets
[i
] = offsets
[i
];
2603 pipe_so_target_reference(&so
->targets
[i
], targets
[i
]);
2606 for (unsigned i
= 0; i
< so
->num_targets
; i
++)
2607 pipe_so_target_reference(&so
->targets
[i
], NULL
);
2609 so
->num_targets
= num_targets
;
2612 struct pipe_context
*
2613 panfrost_create_context(struct pipe_screen
*screen
, void *priv
, unsigned flags
)
2615 struct panfrost_context
*ctx
= rzalloc(screen
, struct panfrost_context
);
2616 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
2618 gallium
->screen
= screen
;
2620 gallium
->destroy
= panfrost_destroy
;
2622 gallium
->set_framebuffer_state
= panfrost_set_framebuffer_state
;
2624 gallium
->flush
= panfrost_flush
;
2625 gallium
->clear
= panfrost_clear
;
2626 gallium
->draw_vbo
= panfrost_draw_vbo
;
2628 gallium
->set_vertex_buffers
= panfrost_set_vertex_buffers
;
2629 gallium
->set_constant_buffer
= panfrost_set_constant_buffer
;
2630 gallium
->set_shader_buffers
= panfrost_set_shader_buffers
;
2632 gallium
->set_stencil_ref
= panfrost_set_stencil_ref
;
2634 gallium
->create_sampler_view
= panfrost_create_sampler_view
;
2635 gallium
->set_sampler_views
= panfrost_set_sampler_views
;
2636 gallium
->sampler_view_destroy
= panfrost_sampler_view_destroy
;
2638 gallium
->create_rasterizer_state
= panfrost_create_rasterizer_state
;
2639 gallium
->bind_rasterizer_state
= panfrost_bind_rasterizer_state
;
2640 gallium
->delete_rasterizer_state
= panfrost_generic_cso_delete
;
2642 gallium
->create_vertex_elements_state
= panfrost_create_vertex_elements_state
;
2643 gallium
->bind_vertex_elements_state
= panfrost_bind_vertex_elements_state
;
2644 gallium
->delete_vertex_elements_state
= panfrost_generic_cso_delete
;
2646 gallium
->create_fs_state
= panfrost_create_fs_state
;
2647 gallium
->delete_fs_state
= panfrost_delete_shader_state
;
2648 gallium
->bind_fs_state
= panfrost_bind_fs_state
;
2650 gallium
->create_vs_state
= panfrost_create_vs_state
;
2651 gallium
->delete_vs_state
= panfrost_delete_shader_state
;
2652 gallium
->bind_vs_state
= panfrost_bind_vs_state
;
2654 gallium
->create_sampler_state
= panfrost_create_sampler_state
;
2655 gallium
->delete_sampler_state
= panfrost_generic_cso_delete
;
2656 gallium
->bind_sampler_states
= panfrost_bind_sampler_states
;
2658 gallium
->create_depth_stencil_alpha_state
= panfrost_create_depth_stencil_state
;
2659 gallium
->bind_depth_stencil_alpha_state
= panfrost_bind_depth_stencil_state
;
2660 gallium
->delete_depth_stencil_alpha_state
= panfrost_delete_depth_stencil_state
;
2662 gallium
->set_sample_mask
= panfrost_set_sample_mask
;
2664 gallium
->set_clip_state
= panfrost_set_clip_state
;
2665 gallium
->set_viewport_states
= panfrost_set_viewport_states
;
2666 gallium
->set_scissor_states
= panfrost_set_scissor_states
;
2667 gallium
->set_polygon_stipple
= panfrost_set_polygon_stipple
;
2668 gallium
->set_active_query_state
= panfrost_set_active_query_state
;
2670 gallium
->create_query
= panfrost_create_query
;
2671 gallium
->destroy_query
= panfrost_destroy_query
;
2672 gallium
->begin_query
= panfrost_begin_query
;
2673 gallium
->end_query
= panfrost_end_query
;
2674 gallium
->get_query_result
= panfrost_get_query_result
;
2676 gallium
->create_stream_output_target
= panfrost_create_stream_output_target
;
2677 gallium
->stream_output_target_destroy
= panfrost_stream_output_target_destroy
;
2678 gallium
->set_stream_output_targets
= panfrost_set_stream_output_targets
;
2680 panfrost_resource_context_init(gallium
);
2681 panfrost_blend_context_init(gallium
);
2682 panfrost_compute_context_init(gallium
);
2685 gallium
->stream_uploader
= u_upload_create_default(gallium
);
2686 gallium
->const_uploader
= gallium
->stream_uploader
;
2687 assert(gallium
->stream_uploader
);
2689 /* Midgard supports ES modes, plus QUADS/QUAD_STRIPS/POLYGON */
2690 ctx
->draw_modes
= (1 << (PIPE_PRIM_POLYGON
+ 1)) - 1;
2692 ctx
->primconvert
= util_primconvert_create(gallium
, ctx
->draw_modes
);
2694 ctx
->blitter
= util_blitter_create(gallium
);
2695 ctx
->blitter_wallpaper
= util_blitter_create(gallium
);
2697 assert(ctx
->blitter
);
2698 assert(ctx
->blitter_wallpaper
);
2700 /* Prepare for render! */
2702 panfrost_batch_init(ctx
);
2703 panfrost_emit_vertex_payload(ctx
);
2704 panfrost_emit_tiler_payload(ctx
);
2705 panfrost_invalidate_frame(ctx
);
2706 panfrost_default_shader_backend(ctx
);