2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
29 #include "panfrost-quirks.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage
)
45 assert(stage
== PIPE_SHADER_FRAGMENT
||
46 stage
== PIPE_SHADER_VERTEX
||
47 stage
== PIPE_SHADER_COMPUTE
);
49 return stage
== PIPE_SHADER_FRAGMENT
?
50 PAN_BO_ACCESS_FRAGMENT
:
51 PAN_BO_ACCESS_VERTEX_TILER
;
55 panfrost_vt_emit_shared_memory(struct panfrost_context
*ctx
,
56 struct mali_vertex_tiler_postfix
*postfix
)
58 struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
59 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
61 unsigned shift
= panfrost_get_stack_shift(batch
->stack_size
);
62 struct mali_shared_memory shared
= {
64 .scratchpad
= panfrost_batch_get_scratchpad(batch
, shift
, dev
->thread_tls_alloc
, dev
->core_count
)->gpu
,
65 .shared_workgroup_count
= ~0,
67 postfix
->shared_memory
= panfrost_pool_upload(&batch
->pool
, &shared
, sizeof(shared
));
71 panfrost_vt_attach_framebuffer(struct panfrost_context
*ctx
,
72 struct mali_vertex_tiler_postfix
*postfix
)
74 struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
75 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
77 /* If we haven't, reserve space for the framebuffer */
79 if (!batch
->framebuffer
.gpu
) {
80 unsigned size
= (dev
->quirks
& MIDGARD_SFBD
) ?
81 sizeof(struct mali_single_framebuffer
) :
82 sizeof(struct mali_framebuffer
);
84 batch
->framebuffer
= panfrost_pool_alloc(&batch
->pool
, size
);
87 if (!(dev
->quirks
& MIDGARD_SFBD
))
88 batch
->framebuffer
.gpu
|= MALI_MFBD
;
91 postfix
->shared_memory
= batch
->framebuffer
.gpu
;
95 panfrost_vt_update_rasterizer(struct panfrost_context
*ctx
,
96 struct mali_vertex_tiler_prefix
*prefix
,
97 struct mali_vertex_tiler_postfix
*postfix
)
99 struct panfrost_rasterizer
*rasterizer
= ctx
->rasterizer
;
101 postfix
->gl_enables
|= 0x7;
102 SET_BIT(postfix
->gl_enables
, MALI_FRONT_CCW_TOP
,
103 rasterizer
&& rasterizer
->base
.front_ccw
);
104 SET_BIT(postfix
->gl_enables
, MALI_CULL_FACE_FRONT
,
105 rasterizer
&& (rasterizer
->base
.cull_face
& PIPE_FACE_FRONT
));
106 SET_BIT(postfix
->gl_enables
, MALI_CULL_FACE_BACK
,
107 rasterizer
&& (rasterizer
->base
.cull_face
& PIPE_FACE_BACK
));
108 SET_BIT(prefix
->unknown_draw
, MALI_DRAW_FLATSHADE_FIRST
,
109 rasterizer
&& rasterizer
->base
.flatshade_first
);
113 panfrost_vt_update_primitive_size(struct panfrost_context
*ctx
,
114 struct mali_vertex_tiler_prefix
*prefix
,
115 union midgard_primitive_size
*primitive_size
)
117 struct panfrost_rasterizer
*rasterizer
= ctx
->rasterizer
;
119 if (!panfrost_writes_point_size(ctx
)) {
120 bool points
= prefix
->draw_mode
== MALI_POINTS
;
125 rasterizer
->base
.point_size
:
126 rasterizer
->base
.line_width
;
128 primitive_size
->constant
= val
;
133 panfrost_vt_update_occlusion_query(struct panfrost_context
*ctx
,
134 struct mali_vertex_tiler_postfix
*postfix
)
136 SET_BIT(postfix
->gl_enables
, MALI_OCCLUSION_QUERY
, ctx
->occlusion_query
);
137 if (ctx
->occlusion_query
) {
138 postfix
->occlusion_counter
= ctx
->occlusion_query
->bo
->gpu
;
139 panfrost_batch_add_bo(ctx
->batch
, ctx
->occlusion_query
->bo
,
140 PAN_BO_ACCESS_SHARED
|
142 PAN_BO_ACCESS_FRAGMENT
);
144 postfix
->occlusion_counter
= 0;
149 panfrost_vt_init(struct panfrost_context
*ctx
,
150 enum pipe_shader_type stage
,
151 struct mali_vertex_tiler_prefix
*prefix
,
152 struct mali_vertex_tiler_postfix
*postfix
)
154 struct panfrost_device
*device
= pan_device(ctx
->base
.screen
);
156 if (!ctx
->shader
[stage
])
159 memset(prefix
, 0, sizeof(*prefix
));
160 memset(postfix
, 0, sizeof(*postfix
));
162 if (device
->quirks
& IS_BIFROST
) {
163 postfix
->gl_enables
= 0x2;
164 panfrost_vt_emit_shared_memory(ctx
, postfix
);
166 postfix
->gl_enables
= 0x6;
167 panfrost_vt_attach_framebuffer(ctx
, postfix
);
170 if (stage
== PIPE_SHADER_FRAGMENT
) {
171 panfrost_vt_update_occlusion_query(ctx
, postfix
);
172 panfrost_vt_update_rasterizer(ctx
, prefix
, postfix
);
177 panfrost_translate_index_size(unsigned size
)
181 return MALI_DRAW_INDEXED_UINT8
;
184 return MALI_DRAW_INDEXED_UINT16
;
187 return MALI_DRAW_INDEXED_UINT32
;
190 unreachable("Invalid index size");
194 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
195 * good for the duration of the draw (transient), could last longer. Also get
196 * the bounds on the index buffer for the range accessed by the draw. We do
197 * these operations together because there are natural optimizations which
198 * require them to be together. */
201 panfrost_get_index_buffer_bounded(struct panfrost_context
*ctx
,
202 const struct pipe_draw_info
*info
,
203 unsigned *min_index
, unsigned *max_index
)
205 struct panfrost_resource
*rsrc
= pan_resource(info
->index
.resource
);
206 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
207 off_t offset
= info
->start
* info
->index_size
;
208 bool needs_indices
= true;
211 if (info
->max_index
!= ~0u) {
212 *min_index
= info
->min_index
;
213 *max_index
= info
->max_index
;
214 needs_indices
= false;
217 if (!info
->has_user_indices
) {
218 /* Only resources can be directly mapped */
219 panfrost_batch_add_bo(batch
, rsrc
->bo
,
220 PAN_BO_ACCESS_SHARED
|
222 PAN_BO_ACCESS_VERTEX_TILER
);
223 out
= rsrc
->bo
->gpu
+ offset
;
225 /* Check the cache */
226 needs_indices
= !panfrost_minmax_cache_get(rsrc
->index_cache
,
232 /* Otherwise, we need to upload to transient memory */
233 const uint8_t *ibuf8
= (const uint8_t *) info
->index
.user
;
234 out
= panfrost_pool_upload(&batch
->pool
, ibuf8
+ offset
,
241 u_vbuf_get_minmax_index(&ctx
->base
, info
, min_index
, max_index
);
243 if (!info
->has_user_indices
)
244 panfrost_minmax_cache_add(rsrc
->index_cache
,
245 info
->start
, info
->count
,
246 *min_index
, *max_index
);
253 panfrost_vt_set_draw_info(struct panfrost_context
*ctx
,
254 const struct pipe_draw_info
*info
,
255 enum mali_draw_mode draw_mode
,
256 struct mali_vertex_tiler_postfix
*vertex_postfix
,
257 struct mali_vertex_tiler_prefix
*tiler_prefix
,
258 struct mali_vertex_tiler_postfix
*tiler_postfix
,
259 unsigned *vertex_count
,
260 unsigned *padded_count
)
262 tiler_prefix
->draw_mode
= draw_mode
;
264 unsigned draw_flags
= 0;
266 if (panfrost_writes_point_size(ctx
))
267 draw_flags
|= MALI_DRAW_VARYING_SIZE
;
269 if (info
->primitive_restart
)
270 draw_flags
|= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX
;
272 /* These doesn't make much sense */
274 draw_flags
|= 0x3000;
276 if (info
->index_size
) {
277 unsigned min_index
= 0, max_index
= 0;
279 tiler_prefix
->indices
= panfrost_get_index_buffer_bounded(ctx
,
284 /* Use the corresponding values */
285 *vertex_count
= max_index
- min_index
+ 1;
286 tiler_postfix
->offset_start
= vertex_postfix
->offset_start
= min_index
+ info
->index_bias
;
287 tiler_prefix
->offset_bias_correction
= -min_index
;
288 tiler_prefix
->index_count
= MALI_POSITIVE(info
->count
);
289 draw_flags
|= panfrost_translate_index_size(info
->index_size
);
291 tiler_prefix
->indices
= 0;
292 *vertex_count
= ctx
->vertex_count
;
293 tiler_postfix
->offset_start
= vertex_postfix
->offset_start
= info
->start
;
294 tiler_prefix
->offset_bias_correction
= 0;
295 tiler_prefix
->index_count
= MALI_POSITIVE(ctx
->vertex_count
);
298 tiler_prefix
->unknown_draw
= draw_flags
;
300 /* Encode the padded vertex count */
302 if (info
->instance_count
> 1) {
303 *padded_count
= panfrost_padded_vertex_count(*vertex_count
);
305 unsigned shift
= __builtin_ctz(ctx
->padded_count
);
306 unsigned k
= ctx
->padded_count
>> (shift
+ 1);
308 tiler_postfix
->instance_shift
= vertex_postfix
->instance_shift
= shift
;
309 tiler_postfix
->instance_odd
= vertex_postfix
->instance_odd
= k
;
311 *padded_count
= *vertex_count
;
313 /* Reset instancing state */
314 tiler_postfix
->instance_shift
= vertex_postfix
->instance_shift
= 0;
315 tiler_postfix
->instance_odd
= vertex_postfix
->instance_odd
= 0;
320 panfrost_shader_meta_init(struct panfrost_context
*ctx
,
321 enum pipe_shader_type st
,
322 struct mali_shader_meta
*meta
)
324 const struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
325 struct panfrost_shader_state
*ss
= panfrost_get_shader_state(ctx
, st
);
327 memset(meta
, 0, sizeof(*meta
));
328 meta
->shader
= (ss
->bo
? ss
->bo
->gpu
: 0) | ss
->first_tag
;
329 meta
->attribute_count
= ss
->attribute_count
;
330 meta
->varying_count
= ss
->varying_count
;
331 meta
->texture_count
= ctx
->sampler_view_count
[st
];
332 meta
->sampler_count
= ctx
->sampler_count
[st
];
334 if (dev
->quirks
& IS_BIFROST
) {
335 if (st
== PIPE_SHADER_VERTEX
)
336 meta
->bifrost1
.unk1
= 0x800000;
338 /* First clause ATEST |= 0x4000000.
339 * Less than 32 regs |= 0x200 */
340 meta
->bifrost1
.unk1
= 0x950020;
343 meta
->bifrost1
.uniform_buffer_count
= panfrost_ubo_count(ctx
, st
);
344 if (st
== PIPE_SHADER_VERTEX
)
345 meta
->bifrost2
.preload_regs
= 0xC0;
347 meta
->bifrost2
.preload_regs
= 0x1;
348 SET_BIT(meta
->bifrost2
.preload_regs
, 0x10, ss
->reads_frag_coord
);
351 meta
->bifrost2
.uniform_count
= MIN2(ss
->uniform_count
,
354 meta
->midgard1
.uniform_count
= MIN2(ss
->uniform_count
,
356 meta
->midgard1
.work_count
= ss
->work_reg_count
;
358 /* TODO: This is not conformant on ES3 */
359 meta
->midgard1
.flags_hi
= MALI_SUPPRESS_INF_NAN
;
361 meta
->midgard1
.flags_lo
= 0x20;
362 meta
->midgard1
.uniform_buffer_count
= panfrost_ubo_count(ctx
, st
);
364 SET_BIT(meta
->midgard1
.flags_hi
, MALI_WRITES_GLOBAL
, ss
->writes_global
);
369 panfrost_translate_compare_func(enum pipe_compare_func in
)
372 case PIPE_FUNC_NEVER
:
373 return MALI_FUNC_NEVER
;
376 return MALI_FUNC_LESS
;
378 case PIPE_FUNC_EQUAL
:
379 return MALI_FUNC_EQUAL
;
381 case PIPE_FUNC_LEQUAL
:
382 return MALI_FUNC_LEQUAL
;
384 case PIPE_FUNC_GREATER
:
385 return MALI_FUNC_GREATER
;
387 case PIPE_FUNC_NOTEQUAL
:
388 return MALI_FUNC_NOTEQUAL
;
390 case PIPE_FUNC_GEQUAL
:
391 return MALI_FUNC_GEQUAL
;
393 case PIPE_FUNC_ALWAYS
:
394 return MALI_FUNC_ALWAYS
;
397 unreachable("Invalid func");
402 panfrost_translate_stencil_op(enum pipe_stencil_op in
)
405 case PIPE_STENCIL_OP_KEEP
:
406 return MALI_STENCIL_KEEP
;
408 case PIPE_STENCIL_OP_ZERO
:
409 return MALI_STENCIL_ZERO
;
411 case PIPE_STENCIL_OP_REPLACE
:
412 return MALI_STENCIL_REPLACE
;
414 case PIPE_STENCIL_OP_INCR
:
415 return MALI_STENCIL_INCR
;
417 case PIPE_STENCIL_OP_DECR
:
418 return MALI_STENCIL_DECR
;
420 case PIPE_STENCIL_OP_INCR_WRAP
:
421 return MALI_STENCIL_INCR_WRAP
;
423 case PIPE_STENCIL_OP_DECR_WRAP
:
424 return MALI_STENCIL_DECR_WRAP
;
426 case PIPE_STENCIL_OP_INVERT
:
427 return MALI_STENCIL_INVERT
;
430 unreachable("Invalid stencil op");
435 translate_tex_wrap(enum pipe_tex_wrap w
)
438 case PIPE_TEX_WRAP_REPEAT
:
439 return MALI_WRAP_REPEAT
;
441 case PIPE_TEX_WRAP_CLAMP
:
442 return MALI_WRAP_CLAMP
;
444 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
445 return MALI_WRAP_CLAMP_TO_EDGE
;
447 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
448 return MALI_WRAP_CLAMP_TO_BORDER
;
450 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
451 return MALI_WRAP_MIRRORED_REPEAT
;
453 case PIPE_TEX_WRAP_MIRROR_CLAMP
:
454 return MALI_WRAP_MIRRORED_CLAMP
;
456 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE
:
457 return MALI_WRAP_MIRRORED_CLAMP_TO_EDGE
;
459 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
:
460 return MALI_WRAP_MIRRORED_CLAMP_TO_BORDER
;
463 unreachable("Invalid wrap");
467 void panfrost_sampler_desc_init(const struct pipe_sampler_state
*cso
,
468 struct mali_sampler_descriptor
*hw
)
470 unsigned func
= panfrost_translate_compare_func(cso
->compare_func
);
471 bool min_nearest
= cso
->min_img_filter
== PIPE_TEX_FILTER_NEAREST
;
472 bool mag_nearest
= cso
->mag_img_filter
== PIPE_TEX_FILTER_NEAREST
;
473 bool mip_linear
= cso
->min_mip_filter
== PIPE_TEX_MIPFILTER_LINEAR
;
474 unsigned min_filter
= min_nearest
? MALI_SAMP_MIN_NEAREST
: 0;
475 unsigned mag_filter
= mag_nearest
? MALI_SAMP_MAG_NEAREST
: 0;
476 unsigned mip_filter
= mip_linear
?
477 (MALI_SAMP_MIP_LINEAR_1
| MALI_SAMP_MIP_LINEAR_2
) : 0;
478 unsigned normalized
= cso
->normalized_coords
? MALI_SAMP_NORM_COORDS
: 0;
480 *hw
= (struct mali_sampler_descriptor
) {
481 .filter_mode
= min_filter
| mag_filter
| mip_filter
|
483 .wrap_s
= translate_tex_wrap(cso
->wrap_s
),
484 .wrap_t
= translate_tex_wrap(cso
->wrap_t
),
485 .wrap_r
= translate_tex_wrap(cso
->wrap_r
),
486 .compare_func
= panfrost_flip_compare_func(func
),
488 cso
->border_color
.f
[0],
489 cso
->border_color
.f
[1],
490 cso
->border_color
.f
[2],
491 cso
->border_color
.f
[3]
493 .min_lod
= FIXED_16(cso
->min_lod
, false), /* clamp at 0 */
494 .max_lod
= FIXED_16(cso
->max_lod
, false),
495 .lod_bias
= FIXED_16(cso
->lod_bias
, true), /* can be negative */
496 .seamless_cube_map
= cso
->seamless_cube_map
,
499 /* If necessary, we disable mipmapping in the sampler descriptor by
500 * clamping the LOD as tight as possible (from 0 to epsilon,
501 * essentially -- remember these are fixed point numbers, so
504 if (cso
->min_mip_filter
== PIPE_TEX_MIPFILTER_NONE
)
505 hw
->max_lod
= hw
->min_lod
+ 1;
508 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state
*cso
,
509 struct bifrost_sampler_descriptor
*hw
)
511 *hw
= (struct bifrost_sampler_descriptor
) {
513 .wrap_s
= translate_tex_wrap(cso
->wrap_s
),
514 .wrap_t
= translate_tex_wrap(cso
->wrap_t
),
515 .wrap_r
= translate_tex_wrap(cso
->wrap_r
),
517 .min_filter
= cso
->min_img_filter
== PIPE_TEX_FILTER_NEAREST
,
518 .norm_coords
= cso
->normalized_coords
,
519 .mip_filter
= cso
->min_mip_filter
== PIPE_TEX_MIPFILTER_LINEAR
,
520 .mag_filter
= cso
->mag_img_filter
== PIPE_TEX_FILTER_LINEAR
,
521 .min_lod
= FIXED_16(cso
->min_lod
, false), /* clamp at 0 */
522 .max_lod
= FIXED_16(cso
->max_lod
, false),
525 /* If necessary, we disable mipmapping in the sampler descriptor by
526 * clamping the LOD as tight as possible (from 0 to epsilon,
527 * essentially -- remember these are fixed point numbers, so
530 if (cso
->min_mip_filter
== PIPE_TEX_MIPFILTER_NONE
)
531 hw
->max_lod
= hw
->min_lod
+ 1;
535 panfrost_make_stencil_state(const struct pipe_stencil_state
*in
,
536 struct mali_stencil_test
*out
)
538 out
->ref
= 0; /* Gallium gets it from elsewhere */
540 out
->mask
= in
->valuemask
;
541 out
->func
= panfrost_translate_compare_func(in
->func
);
542 out
->sfail
= panfrost_translate_stencil_op(in
->fail_op
);
543 out
->dpfail
= panfrost_translate_stencil_op(in
->zfail_op
);
544 out
->dppass
= panfrost_translate_stencil_op(in
->zpass_op
);
548 panfrost_frag_meta_rasterizer_update(struct panfrost_context
*ctx
,
549 struct mali_shader_meta
*fragmeta
)
551 if (!ctx
->rasterizer
) {
552 SET_BIT(fragmeta
->unknown2_4
, MALI_NO_MSAA
, true);
553 SET_BIT(fragmeta
->unknown2_3
, MALI_HAS_MSAA
, false);
554 fragmeta
->depth_units
= 0.0f
;
555 fragmeta
->depth_factor
= 0.0f
;
556 SET_BIT(fragmeta
->unknown2_4
, MALI_DEPTH_RANGE_A
, false);
557 SET_BIT(fragmeta
->unknown2_4
, MALI_DEPTH_RANGE_B
, false);
558 SET_BIT(fragmeta
->unknown2_3
, MALI_DEPTH_CLIP_NEAR
, true);
559 SET_BIT(fragmeta
->unknown2_3
, MALI_DEPTH_CLIP_FAR
, true);
563 struct pipe_rasterizer_state
*rast
= &ctx
->rasterizer
->base
;
565 bool msaa
= rast
->multisample
;
567 /* TODO: Sample size */
568 SET_BIT(fragmeta
->unknown2_3
, MALI_HAS_MSAA
, msaa
);
569 SET_BIT(fragmeta
->unknown2_4
, MALI_NO_MSAA
, !msaa
);
571 SET_BIT(fragmeta
->unknown2_3
, MALI_PER_SAMPLE
,
572 msaa
&& ctx
->min_samples
> 1);
574 fragmeta
->depth_units
= rast
->offset_units
* 2.0f
;
575 fragmeta
->depth_factor
= rast
->offset_scale
;
577 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
579 SET_BIT(fragmeta
->unknown2_4
, MALI_DEPTH_RANGE_A
, rast
->offset_tri
);
580 SET_BIT(fragmeta
->unknown2_4
, MALI_DEPTH_RANGE_B
, rast
->offset_tri
);
582 SET_BIT(fragmeta
->unknown2_3
, MALI_DEPTH_CLIP_NEAR
, rast
->depth_clip_near
);
583 SET_BIT(fragmeta
->unknown2_3
, MALI_DEPTH_CLIP_FAR
, rast
->depth_clip_far
);
587 panfrost_frag_meta_zsa_update(struct panfrost_context
*ctx
,
588 struct mali_shader_meta
*fragmeta
)
590 const struct pipe_depth_stencil_alpha_state
*zsa
= ctx
->depth_stencil
;
591 int zfunc
= PIPE_FUNC_ALWAYS
;
594 struct pipe_stencil_state default_stencil
= {
596 .func
= PIPE_FUNC_ALWAYS
,
597 .fail_op
= MALI_STENCIL_KEEP
,
598 .zfail_op
= MALI_STENCIL_KEEP
,
599 .zpass_op
= MALI_STENCIL_KEEP
,
604 panfrost_make_stencil_state(&default_stencil
,
605 &fragmeta
->stencil_front
);
606 fragmeta
->stencil_mask_front
= default_stencil
.writemask
;
607 fragmeta
->stencil_back
= fragmeta
->stencil_front
;
608 fragmeta
->stencil_mask_back
= default_stencil
.writemask
;
609 SET_BIT(fragmeta
->unknown2_4
, MALI_STENCIL_TEST
, false);
610 SET_BIT(fragmeta
->unknown2_3
, MALI_DEPTH_WRITEMASK
, false);
612 SET_BIT(fragmeta
->unknown2_4
, MALI_STENCIL_TEST
,
613 zsa
->stencil
[0].enabled
);
614 panfrost_make_stencil_state(&zsa
->stencil
[0],
615 &fragmeta
->stencil_front
);
616 fragmeta
->stencil_mask_front
= zsa
->stencil
[0].writemask
;
617 fragmeta
->stencil_front
.ref
= ctx
->stencil_ref
.ref_value
[0];
619 /* If back-stencil is not enabled, use the front values */
621 if (zsa
->stencil
[1].enabled
) {
622 panfrost_make_stencil_state(&zsa
->stencil
[1],
623 &fragmeta
->stencil_back
);
624 fragmeta
->stencil_mask_back
= zsa
->stencil
[1].writemask
;
625 fragmeta
->stencil_back
.ref
= ctx
->stencil_ref
.ref_value
[1];
627 fragmeta
->stencil_back
= fragmeta
->stencil_front
;
628 fragmeta
->stencil_mask_back
= fragmeta
->stencil_mask_front
;
629 fragmeta
->stencil_back
.ref
= fragmeta
->stencil_front
.ref
;
632 if (zsa
->depth
.enabled
)
633 zfunc
= zsa
->depth
.func
;
635 /* Depth state (TODO: Refactor) */
637 SET_BIT(fragmeta
->unknown2_3
, MALI_DEPTH_WRITEMASK
,
638 zsa
->depth
.writemask
);
641 fragmeta
->unknown2_3
&= ~MALI_DEPTH_FUNC_MASK
;
642 fragmeta
->unknown2_3
|= MALI_DEPTH_FUNC(panfrost_translate_compare_func(zfunc
));
646 panfrost_fs_required(
647 struct panfrost_shader_state
*fs
,
648 struct panfrost_blend_final
*blend
,
651 /* If we generally have side effects */
655 /* If colour is written we need to execute */
656 for (unsigned i
= 0; i
< rt_count
; ++i
) {
657 if (!blend
[i
].no_colour
)
661 /* If depth is written and not implied we need to execute.
662 * TODO: Predicate on Z/S writes being enabled */
663 return (fs
->writes_depth
|| fs
->writes_stencil
);
667 panfrost_frag_meta_blend_update(struct panfrost_context
*ctx
,
668 struct mali_shader_meta
*fragmeta
,
671 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
672 const struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
673 struct panfrost_shader_state
*fs
;
674 fs
= panfrost_get_shader_state(ctx
, PIPE_SHADER_FRAGMENT
);
676 SET_BIT(fragmeta
->unknown2_4
, MALI_NO_DITHER
,
677 (dev
->quirks
& MIDGARD_SFBD
) && ctx
->blend
&&
678 !ctx
->blend
->base
.dither
);
680 SET_BIT(fragmeta
->unknown2_4
, MALI_ALPHA_TO_COVERAGE
,
681 ctx
->blend
->base
.alpha_to_coverage
);
683 /* Get blending setup */
684 unsigned rt_count
= MAX2(ctx
->pipe_framebuffer
.nr_cbufs
, 1);
686 struct panfrost_blend_final blend
[PIPE_MAX_COLOR_BUFS
];
687 unsigned shader_offset
= 0;
688 struct panfrost_bo
*shader_bo
= NULL
;
690 for (unsigned c
= 0; c
< rt_count
; ++c
)
691 blend
[c
] = panfrost_get_blend_for_context(ctx
, c
, &shader_bo
,
694 /* Disable shader execution if we can */
695 if (dev
->quirks
& MIDGARD_SHADERLESS
696 && !panfrost_fs_required(fs
, blend
, rt_count
)) {
697 fragmeta
->shader
= 0;
698 fragmeta
->attribute_count
= 0;
699 fragmeta
->varying_count
= 0;
700 fragmeta
->texture_count
= 0;
701 fragmeta
->sampler_count
= 0;
703 /* This feature is not known to work on Bifrost */
704 fragmeta
->midgard1
.work_count
= 1;
705 fragmeta
->midgard1
.uniform_count
= 0;
706 fragmeta
->midgard1
.uniform_buffer_count
= 0;
709 /* If there is a blend shader, work registers are shared. We impose 8
710 * work registers as a limit for blend shaders. Should be lower XXX */
712 if (!(dev
->quirks
& IS_BIFROST
)) {
713 for (unsigned c
= 0; c
< rt_count
; ++c
) {
714 if (blend
[c
].is_shader
) {
715 fragmeta
->midgard1
.work_count
=
716 MAX2(fragmeta
->midgard1
.work_count
, 8);
721 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
722 * copied to the blend_meta appended (by convention), but this is the
723 * field actually read by the hardware. (Or maybe both are read...?).
724 * Specify the last RTi with a blend shader. */
726 fragmeta
->blend
.shader
= 0;
728 for (signed rt
= (rt_count
- 1); rt
>= 0; --rt
) {
729 if (!blend
[rt
].is_shader
)
732 fragmeta
->blend
.shader
= blend
[rt
].shader
.gpu
|
733 blend
[rt
].shader
.first_tag
;
737 if (dev
->quirks
& MIDGARD_SFBD
) {
738 /* When only a single render target platform is used, the blend
739 * information is inside the shader meta itself. We additionally
740 * need to signal CAN_DISCARD for nontrivial blend modes (so
741 * we're able to read back the destination buffer) */
743 SET_BIT(fragmeta
->unknown2_3
, MALI_HAS_BLEND_SHADER
,
746 if (!blend
[0].is_shader
) {
747 fragmeta
->blend
.equation
= *blend
[0].equation
.equation
;
748 fragmeta
->blend
.constant
= blend
[0].equation
.constant
;
751 SET_BIT(fragmeta
->unknown2_3
, MALI_CAN_DISCARD
,
752 !blend
[0].no_blending
|| fs
->can_discard
);
754 batch
->draws
|= PIPE_CLEAR_COLOR0
;
758 if (dev
->quirks
& IS_BIFROST
) {
759 bool no_blend
= true;
761 for (unsigned i
= 0; i
< rt_count
; ++i
)
762 no_blend
&= (blend
[i
].no_blending
| blend
[i
].no_colour
);
764 SET_BIT(fragmeta
->bifrost1
.unk1
, MALI_BIFROST_EARLY_Z
,
765 !fs
->can_discard
&& !fs
->writes_depth
&& no_blend
);
768 /* Additional blend descriptor tacked on for jobs using MFBD */
770 for (unsigned i
= 0; i
< rt_count
; ++i
) {
773 if (ctx
->pipe_framebuffer
.nr_cbufs
> i
&& !blend
[i
].no_colour
) {
775 batch
->draws
|= (PIPE_CLEAR_COLOR0
<< i
);
777 bool is_srgb
= (ctx
->pipe_framebuffer
.nr_cbufs
> i
) &&
778 (ctx
->pipe_framebuffer
.cbufs
[i
]) &&
779 util_format_is_srgb(ctx
->pipe_framebuffer
.cbufs
[i
]->format
);
781 SET_BIT(flags
, MALI_BLEND_MRT_SHADER
, blend
[i
].is_shader
);
782 SET_BIT(flags
, MALI_BLEND_LOAD_TIB
, !blend
[i
].no_blending
);
783 SET_BIT(flags
, MALI_BLEND_SRGB
, is_srgb
);
784 SET_BIT(flags
, MALI_BLEND_NO_DITHER
, !ctx
->blend
->base
.dither
);
787 if (dev
->quirks
& IS_BIFROST
) {
788 struct bifrost_blend_rt
*brts
= rts
;
790 brts
[i
].flags
= flags
;
792 if (blend
[i
].is_shader
) {
793 /* The blend shader's address needs to be at
794 * the same top 32 bit as the fragment shader.
795 * TODO: Ensure that's always the case.
797 assert((blend
[i
].shader
.gpu
& (0xffffffffull
<< 32)) ==
798 (fs
->bo
->gpu
& (0xffffffffull
<< 32)));
799 brts
[i
].shader
= blend
[i
].shader
.gpu
;
801 } else if (ctx
->pipe_framebuffer
.nr_cbufs
> i
) {
802 enum pipe_format format
= ctx
->pipe_framebuffer
.cbufs
[i
]->format
;
803 const struct util_format_description
*format_desc
;
804 format_desc
= util_format_description(format
);
806 brts
[i
].equation
= *blend
[i
].equation
.equation
;
808 /* TODO: this is a bit more complicated */
809 brts
[i
].constant
= blend
[i
].equation
.constant
;
811 brts
[i
].format
= panfrost_format_to_bifrost_blend(format_desc
);
813 /* 0x19 disables blending and forces REPLACE
814 * mode (equivalent to rgb_mode = alpha_mode =
815 * x122, colour mask = 0xF). 0x1a allows
817 brts
[i
].unk2
= blend
[i
].no_blending
? 0x19 : 0x1a;
819 brts
[i
].shader_type
= fs
->blend_types
[i
];
821 /* Dummy attachment for depth-only */
823 brts
[i
].shader_type
= fs
->blend_types
[i
];
826 struct midgard_blend_rt
*mrts
= rts
;
827 mrts
[i
].flags
= flags
;
829 if (blend
[i
].is_shader
) {
830 mrts
[i
].blend
.shader
= blend
[i
].shader
.gpu
| blend
[i
].shader
.first_tag
;
832 mrts
[i
].blend
.equation
= *blend
[i
].equation
.equation
;
833 mrts
[i
].blend
.constant
= blend
[i
].equation
.constant
;
840 panfrost_frag_shader_meta_init(struct panfrost_context
*ctx
,
841 struct mali_shader_meta
*fragmeta
,
844 const struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
845 struct panfrost_shader_state
*fs
;
847 fs
= panfrost_get_shader_state(ctx
, PIPE_SHADER_FRAGMENT
);
849 bool msaa
= ctx
->rasterizer
&& ctx
->rasterizer
->base
.multisample
;
850 fragmeta
->coverage_mask
= (msaa
? ctx
->sample_mask
: ~0) & 0xF;
852 fragmeta
->unknown2_3
= MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS
) | 0x10;
853 fragmeta
->unknown2_4
= 0x4e0;
855 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
856 * is required (independent of 32-bit/64-bit descriptors), or why it's
857 * not used on later GPU revisions. Otherwise, all shader jobs fault on
858 * these earlier chips (perhaps this is a chicken bit of some kind).
859 * More investigation is needed. */
861 SET_BIT(fragmeta
->unknown2_4
, 0x10, dev
->quirks
& MIDGARD_SFBD
);
863 if (dev
->quirks
& IS_BIFROST
) {
866 /* Depending on whether it's legal to in the given shader, we try to
867 * enable early-z testing. TODO: respect e-z force */
869 SET_BIT(fragmeta
->midgard1
.flags_lo
, MALI_EARLY_Z
,
870 !fs
->can_discard
&& !fs
->writes_global
&&
871 !fs
->writes_depth
&& !fs
->writes_stencil
&&
872 !ctx
->blend
->base
.alpha_to_coverage
);
874 /* Add the writes Z/S flags if needed. */
875 SET_BIT(fragmeta
->midgard1
.flags_lo
, MALI_WRITES_Z
, fs
->writes_depth
);
876 SET_BIT(fragmeta
->midgard1
.flags_hi
, MALI_WRITES_S
, fs
->writes_stencil
);
878 /* Any time texturing is used, derivatives are implicitly calculated,
879 * so we need to enable helper invocations */
881 SET_BIT(fragmeta
->midgard1
.flags_lo
, MALI_HELPER_INVOCATIONS
,
882 fs
->helper_invocations
);
884 const struct pipe_depth_stencil_alpha_state
*zsa
= ctx
->depth_stencil
;
886 bool depth_enabled
= fs
->writes_depth
||
887 (zsa
&& zsa
->depth
.enabled
&& zsa
->depth
.func
!= PIPE_FUNC_ALWAYS
);
889 SET_BIT(fragmeta
->midgard1
.flags_lo
, MALI_READS_TILEBUFFER
,
890 fs
->outputs_read
|| (!depth_enabled
&& fs
->can_discard
));
891 SET_BIT(fragmeta
->midgard1
.flags_lo
, MALI_READS_ZS
, depth_enabled
&& fs
->can_discard
);
894 panfrost_frag_meta_rasterizer_update(ctx
, fragmeta
);
895 panfrost_frag_meta_zsa_update(ctx
, fragmeta
);
896 panfrost_frag_meta_blend_update(ctx
, fragmeta
, rts
);
900 panfrost_emit_shader_meta(struct panfrost_batch
*batch
,
901 enum pipe_shader_type st
,
902 struct mali_vertex_tiler_postfix
*postfix
)
904 struct panfrost_context
*ctx
= batch
->ctx
;
905 struct panfrost_shader_state
*ss
= panfrost_get_shader_state(ctx
, st
);
912 struct mali_shader_meta meta
;
914 panfrost_shader_meta_init(ctx
, st
, &meta
);
916 /* Add the shader BO to the batch. */
917 panfrost_batch_add_bo(batch
, ss
->bo
,
918 PAN_BO_ACCESS_PRIVATE
|
920 panfrost_bo_access_for_stage(st
));
924 if (st
== PIPE_SHADER_FRAGMENT
) {
925 struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
926 unsigned rt_count
= MAX2(ctx
->pipe_framebuffer
.nr_cbufs
, 1);
927 size_t desc_size
= sizeof(meta
);
929 struct panfrost_transfer xfer
;
932 if (dev
->quirks
& MIDGARD_SFBD
)
934 else if (dev
->quirks
& IS_BIFROST
)
935 rt_size
= sizeof(struct bifrost_blend_rt
);
937 rt_size
= sizeof(struct midgard_blend_rt
);
939 desc_size
+= rt_size
* rt_count
;
942 rts
= rzalloc_size(ctx
, rt_size
* rt_count
);
944 panfrost_frag_shader_meta_init(ctx
, &meta
, rts
);
946 xfer
= panfrost_pool_alloc(&batch
->pool
, desc_size
);
948 memcpy(xfer
.cpu
, &meta
, sizeof(meta
));
949 memcpy(xfer
.cpu
+ sizeof(meta
), rts
, rt_size
* rt_count
);
954 shader_ptr
= xfer
.gpu
;
956 shader_ptr
= panfrost_pool_upload(&batch
->pool
, &meta
,
960 postfix
->shader
= shader_ptr
;
964 panfrost_mali_viewport_init(struct panfrost_context
*ctx
,
965 struct mali_viewport
*mvp
)
967 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
969 /* Clip bounds are encoded as floats. The viewport itself is encoded as
970 * (somewhat) asymmetric ints. */
972 const struct pipe_scissor_state
*ss
= &ctx
->scissor
;
974 memset(mvp
, 0, sizeof(*mvp
));
976 /* By default, do no viewport clipping, i.e. clip to (-inf, inf) in
977 * each direction. Clipping to the viewport in theory should work, but
978 * in practice causes issues when we're not explicitly trying to
981 *mvp
= (struct mali_viewport
) {
982 .clip_minx
= -INFINITY
,
983 .clip_miny
= -INFINITY
,
984 .clip_maxx
= INFINITY
,
985 .clip_maxy
= INFINITY
,
988 /* Always scissor to the viewport by default. */
989 float vp_minx
= (int) (vp
->translate
[0] - fabsf(vp
->scale
[0]));
990 float vp_maxx
= (int) (vp
->translate
[0] + fabsf(vp
->scale
[0]));
992 float vp_miny
= (int) (vp
->translate
[1] - fabsf(vp
->scale
[1]));
993 float vp_maxy
= (int) (vp
->translate
[1] + fabsf(vp
->scale
[1]));
995 float minz
= (vp
->translate
[2] - fabsf(vp
->scale
[2]));
996 float maxz
= (vp
->translate
[2] + fabsf(vp
->scale
[2]));
998 /* Apply the scissor test */
1000 unsigned minx
, miny
, maxx
, maxy
;
1002 if (ss
&& ctx
->rasterizer
&& ctx
->rasterizer
->base
.scissor
) {
1003 minx
= MAX2(ss
->minx
, vp_minx
);
1004 miny
= MAX2(ss
->miny
, vp_miny
);
1005 maxx
= MIN2(ss
->maxx
, vp_maxx
);
1006 maxy
= MIN2(ss
->maxy
, vp_maxy
);
1014 /* Hardware needs the min/max to be strictly ordered, so flip if we
1015 * need to. The viewport transformation in the vertex shader will
1016 * handle the negatives if we don't */
1019 unsigned temp
= miny
;
1025 unsigned temp
= minx
;
1036 /* Clamp to the framebuffer size as a last check */
1038 minx
= MIN2(ctx
->pipe_framebuffer
.width
, minx
);
1039 maxx
= MIN2(ctx
->pipe_framebuffer
.width
, maxx
);
1041 miny
= MIN2(ctx
->pipe_framebuffer
.height
, miny
);
1042 maxy
= MIN2(ctx
->pipe_framebuffer
.height
, maxy
);
1046 mvp
->viewport0
[0] = minx
;
1047 mvp
->viewport1
[0] = MALI_POSITIVE(maxx
);
1049 mvp
->viewport0
[1] = miny
;
1050 mvp
->viewport1
[1] = MALI_POSITIVE(maxy
);
1052 bool clip_near
= true;
1053 bool clip_far
= true;
1055 if (ctx
->rasterizer
) {
1056 clip_near
= ctx
->rasterizer
->base
.depth_clip_near
;
1057 clip_far
= ctx
->rasterizer
->base
.depth_clip_far
;
1060 mvp
->clip_minz
= clip_near
? minz
: -INFINITY
;
1061 mvp
->clip_maxz
= clip_far
? maxz
: INFINITY
;
1065 panfrost_emit_viewport(struct panfrost_batch
*batch
,
1066 struct mali_vertex_tiler_postfix
*tiler_postfix
)
1068 struct panfrost_context
*ctx
= batch
->ctx
;
1069 struct mali_viewport mvp
;
1071 panfrost_mali_viewport_init(batch
->ctx
, &mvp
);
1073 /* Update the job, unless we're doing wallpapering (whose lack of
1074 * scissor we can ignore, since if we "miss" a tile of wallpaper, it'll
1075 * just... be faster :) */
1077 if (!ctx
->wallpaper_batch
)
1078 panfrost_batch_union_scissor(batch
, mvp
.viewport0
[0],
1080 mvp
.viewport1
[0] + 1,
1081 mvp
.viewport1
[1] + 1);
1083 tiler_postfix
->viewport
= panfrost_pool_upload(&batch
->pool
, &mvp
,
1088 panfrost_map_constant_buffer_gpu(struct panfrost_batch
*batch
,
1089 enum pipe_shader_type st
,
1090 struct panfrost_constant_buffer
*buf
,
1093 struct pipe_constant_buffer
*cb
= &buf
->cb
[index
];
1094 struct panfrost_resource
*rsrc
= pan_resource(cb
->buffer
);
1097 panfrost_batch_add_bo(batch
, rsrc
->bo
,
1098 PAN_BO_ACCESS_SHARED
|
1099 PAN_BO_ACCESS_READ
|
1100 panfrost_bo_access_for_stage(st
));
1102 /* Alignment gauranteed by
1103 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
1104 return rsrc
->bo
->gpu
+ cb
->buffer_offset
;
1105 } else if (cb
->user_buffer
) {
1106 return panfrost_pool_upload(&batch
->pool
,
1111 unreachable("No constant buffer");
1115 struct sysval_uniform
{
1125 panfrost_upload_viewport_scale_sysval(struct panfrost_batch
*batch
,
1126 struct sysval_uniform
*uniform
)
1128 struct panfrost_context
*ctx
= batch
->ctx
;
1129 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
1131 uniform
->f
[0] = vp
->scale
[0];
1132 uniform
->f
[1] = vp
->scale
[1];
1133 uniform
->f
[2] = vp
->scale
[2];
1137 panfrost_upload_viewport_offset_sysval(struct panfrost_batch
*batch
,
1138 struct sysval_uniform
*uniform
)
1140 struct panfrost_context
*ctx
= batch
->ctx
;
1141 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
1143 uniform
->f
[0] = vp
->translate
[0];
1144 uniform
->f
[1] = vp
->translate
[1];
1145 uniform
->f
[2] = vp
->translate
[2];
1148 static void panfrost_upload_txs_sysval(struct panfrost_batch
*batch
,
1149 enum pipe_shader_type st
,
1150 unsigned int sysvalid
,
1151 struct sysval_uniform
*uniform
)
1153 struct panfrost_context
*ctx
= batch
->ctx
;
1154 unsigned texidx
= PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid
);
1155 unsigned dim
= PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid
);
1156 bool is_array
= PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid
);
1157 struct pipe_sampler_view
*tex
= &ctx
->sampler_views
[st
][texidx
]->base
;
1160 uniform
->i
[0] = u_minify(tex
->texture
->width0
, tex
->u
.tex
.first_level
);
1163 uniform
->i
[1] = u_minify(tex
->texture
->height0
,
1164 tex
->u
.tex
.first_level
);
1167 uniform
->i
[2] = u_minify(tex
->texture
->depth0
,
1168 tex
->u
.tex
.first_level
);
1171 uniform
->i
[dim
] = tex
->texture
->array_size
;
1175 panfrost_upload_ssbo_sysval(struct panfrost_batch
*batch
,
1176 enum pipe_shader_type st
,
1178 struct sysval_uniform
*uniform
)
1180 struct panfrost_context
*ctx
= batch
->ctx
;
1182 assert(ctx
->ssbo_mask
[st
] & (1 << ssbo_id
));
1183 struct pipe_shader_buffer sb
= ctx
->ssbo
[st
][ssbo_id
];
1185 /* Compute address */
1186 struct panfrost_bo
*bo
= pan_resource(sb
.buffer
)->bo
;
1188 panfrost_batch_add_bo(batch
, bo
,
1189 PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_RW
|
1190 panfrost_bo_access_for_stage(st
));
1192 /* Upload address and size as sysval */
1193 uniform
->du
[0] = bo
->gpu
+ sb
.buffer_offset
;
1194 uniform
->u
[2] = sb
.buffer_size
;
1198 panfrost_upload_sampler_sysval(struct panfrost_batch
*batch
,
1199 enum pipe_shader_type st
,
1201 struct sysval_uniform
*uniform
)
1203 struct panfrost_context
*ctx
= batch
->ctx
;
1204 struct pipe_sampler_state
*sampl
= &ctx
->samplers
[st
][samp_idx
]->base
;
1206 uniform
->f
[0] = sampl
->min_lod
;
1207 uniform
->f
[1] = sampl
->max_lod
;
1208 uniform
->f
[2] = sampl
->lod_bias
;
1210 /* Even without any errata, Midgard represents "no mipmapping" as
1211 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1212 * panfrost_create_sampler_state which also explains our choice of
1213 * epsilon value (again to keep behaviour consistent) */
1215 if (sampl
->min_mip_filter
== PIPE_TEX_MIPFILTER_NONE
)
1216 uniform
->f
[1] = uniform
->f
[0] + (1.0/256.0);
1220 panfrost_upload_num_work_groups_sysval(struct panfrost_batch
*batch
,
1221 struct sysval_uniform
*uniform
)
1223 struct panfrost_context
*ctx
= batch
->ctx
;
1225 uniform
->u
[0] = ctx
->compute_grid
->grid
[0];
1226 uniform
->u
[1] = ctx
->compute_grid
->grid
[1];
1227 uniform
->u
[2] = ctx
->compute_grid
->grid
[2];
1231 panfrost_upload_sysvals(struct panfrost_batch
*batch
, void *buf
,
1232 struct panfrost_shader_state
*ss
,
1233 enum pipe_shader_type st
)
1235 struct sysval_uniform
*uniforms
= (void *)buf
;
1237 for (unsigned i
= 0; i
< ss
->sysval_count
; ++i
) {
1238 int sysval
= ss
->sysval
[i
];
1240 switch (PAN_SYSVAL_TYPE(sysval
)) {
1241 case PAN_SYSVAL_VIEWPORT_SCALE
:
1242 panfrost_upload_viewport_scale_sysval(batch
,
1245 case PAN_SYSVAL_VIEWPORT_OFFSET
:
1246 panfrost_upload_viewport_offset_sysval(batch
,
1249 case PAN_SYSVAL_TEXTURE_SIZE
:
1250 panfrost_upload_txs_sysval(batch
, st
,
1251 PAN_SYSVAL_ID(sysval
),
1254 case PAN_SYSVAL_SSBO
:
1255 panfrost_upload_ssbo_sysval(batch
, st
,
1256 PAN_SYSVAL_ID(sysval
),
1259 case PAN_SYSVAL_NUM_WORK_GROUPS
:
1260 panfrost_upload_num_work_groups_sysval(batch
,
1263 case PAN_SYSVAL_SAMPLER
:
1264 panfrost_upload_sampler_sysval(batch
, st
,
1265 PAN_SYSVAL_ID(sysval
),
1275 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer
*buf
,
1278 struct pipe_constant_buffer
*cb
= &buf
->cb
[index
];
1279 struct panfrost_resource
*rsrc
= pan_resource(cb
->buffer
);
1282 return rsrc
->bo
->cpu
;
1283 else if (cb
->user_buffer
)
1284 return cb
->user_buffer
;
1286 unreachable("No constant buffer");
1290 panfrost_emit_const_buf(struct panfrost_batch
*batch
,
1291 enum pipe_shader_type stage
,
1292 struct mali_vertex_tiler_postfix
*postfix
)
1294 struct panfrost_context
*ctx
= batch
->ctx
;
1295 struct panfrost_shader_variants
*all
= ctx
->shader
[stage
];
1300 struct panfrost_constant_buffer
*buf
= &ctx
->constant_buffer
[stage
];
1302 struct panfrost_shader_state
*ss
= &all
->variants
[all
->active_variant
];
1304 /* Uniforms are implicitly UBO #0 */
1305 bool has_uniforms
= buf
->enabled_mask
& (1 << 0);
1307 /* Allocate room for the sysval and the uniforms */
1308 size_t sys_size
= sizeof(float) * 4 * ss
->sysval_count
;
1309 size_t uniform_size
= has_uniforms
? (buf
->cb
[0].buffer_size
) : 0;
1310 size_t size
= sys_size
+ uniform_size
;
1311 struct panfrost_transfer transfer
= panfrost_pool_alloc(&batch
->pool
,
1314 /* Upload sysvals requested by the shader */
1315 panfrost_upload_sysvals(batch
, transfer
.cpu
, ss
, stage
);
1317 /* Upload uniforms */
1318 if (has_uniforms
&& uniform_size
) {
1319 const void *cpu
= panfrost_map_constant_buffer_cpu(buf
, 0);
1320 memcpy(transfer
.cpu
+ sys_size
, cpu
, uniform_size
);
1323 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1326 unsigned ubo_count
= panfrost_ubo_count(ctx
, stage
);
1327 assert(ubo_count
>= 1);
1329 size_t sz
= sizeof(uint64_t) * ubo_count
;
1330 uint64_t ubos
[PAN_MAX_CONST_BUFFERS
];
1331 int uniform_count
= ss
->uniform_count
;
1333 /* Upload uniforms as a UBO */
1334 ubos
[0] = MALI_MAKE_UBO(2 + uniform_count
, transfer
.gpu
);
1336 /* The rest are honest-to-goodness UBOs */
1338 for (unsigned ubo
= 1; ubo
< ubo_count
; ++ubo
) {
1339 size_t usz
= buf
->cb
[ubo
].buffer_size
;
1340 bool enabled
= buf
->enabled_mask
& (1 << ubo
);
1341 bool empty
= usz
== 0;
1343 if (!enabled
|| empty
) {
1344 /* Stub out disabled UBOs to catch accesses */
1345 ubos
[ubo
] = MALI_MAKE_UBO(0, 0xDEAD0000);
1349 mali_ptr gpu
= panfrost_map_constant_buffer_gpu(batch
, stage
,
1352 unsigned bytes_per_field
= 16;
1353 unsigned aligned
= ALIGN_POT(usz
, bytes_per_field
);
1354 ubos
[ubo
] = MALI_MAKE_UBO(aligned
/ bytes_per_field
, gpu
);
1357 mali_ptr ubufs
= panfrost_pool_upload(&batch
->pool
, ubos
, sz
);
1358 postfix
->uniforms
= transfer
.gpu
;
1359 postfix
->uniform_buffers
= ubufs
;
1361 buf
->dirty_mask
= 0;
1365 panfrost_emit_shared_memory(struct panfrost_batch
*batch
,
1366 const struct pipe_grid_info
*info
,
1367 struct midgard_payload_vertex_tiler
*vtp
)
1369 struct panfrost_context
*ctx
= batch
->ctx
;
1370 struct panfrost_shader_variants
*all
= ctx
->shader
[PIPE_SHADER_COMPUTE
];
1371 struct panfrost_shader_state
*ss
= &all
->variants
[all
->active_variant
];
1372 unsigned single_size
= util_next_power_of_two(MAX2(ss
->shared_size
,
1374 unsigned shared_size
= single_size
* info
->grid
[0] * info
->grid
[1] *
1376 struct panfrost_bo
*bo
= panfrost_batch_get_shared_memory(batch
,
1380 struct mali_shared_memory shared
= {
1381 .shared_memory
= bo
->gpu
,
1382 .shared_workgroup_count
=
1383 util_logbase2_ceil(info
->grid
[0]) +
1384 util_logbase2_ceil(info
->grid
[1]) +
1385 util_logbase2_ceil(info
->grid
[2]),
1387 .shared_shift
= util_logbase2(single_size
) - 1
1390 vtp
->postfix
.shared_memory
= panfrost_pool_upload(&batch
->pool
, &shared
,
1395 panfrost_get_tex_desc(struct panfrost_batch
*batch
,
1396 enum pipe_shader_type st
,
1397 struct panfrost_sampler_view
*view
)
1400 return (mali_ptr
) 0;
1402 struct pipe_sampler_view
*pview
= &view
->base
;
1403 struct panfrost_resource
*rsrc
= pan_resource(pview
->texture
);
1405 /* Add the BO to the job so it's retained until the job is done. */
1407 panfrost_batch_add_bo(batch
, rsrc
->bo
,
1408 PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_READ
|
1409 panfrost_bo_access_for_stage(st
));
1411 panfrost_batch_add_bo(batch
, view
->bo
,
1412 PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_READ
|
1413 panfrost_bo_access_for_stage(st
));
1415 return view
->bo
->gpu
;
1419 panfrost_update_sampler_view(struct panfrost_sampler_view
*view
,
1420 struct pipe_context
*pctx
)
1422 struct panfrost_resource
*rsrc
= pan_resource(view
->base
.texture
);
1423 if (view
->texture_bo
!= rsrc
->bo
->gpu
||
1424 view
->layout
!= rsrc
->layout
) {
1425 panfrost_bo_unreference(view
->bo
);
1426 panfrost_create_sampler_view_bo(view
, pctx
, &rsrc
->base
);
1431 panfrost_emit_texture_descriptors(struct panfrost_batch
*batch
,
1432 enum pipe_shader_type stage
,
1433 struct mali_vertex_tiler_postfix
*postfix
)
1435 struct panfrost_context
*ctx
= batch
->ctx
;
1436 struct panfrost_device
*device
= pan_device(ctx
->base
.screen
);
1438 if (!ctx
->sampler_view_count
[stage
])
1441 if (device
->quirks
& IS_BIFROST
) {
1442 struct bifrost_texture_descriptor
*descriptors
;
1444 descriptors
= malloc(sizeof(struct bifrost_texture_descriptor
) *
1445 ctx
->sampler_view_count
[stage
]);
1447 for (int i
= 0; i
< ctx
->sampler_view_count
[stage
]; ++i
) {
1448 struct panfrost_sampler_view
*view
= ctx
->sampler_views
[stage
][i
];
1449 struct pipe_sampler_view
*pview
= &view
->base
;
1450 struct panfrost_resource
*rsrc
= pan_resource(pview
->texture
);
1451 panfrost_update_sampler_view(view
, &ctx
->base
);
1453 /* Add the BOs to the job so they are retained until the job is done. */
1455 panfrost_batch_add_bo(batch
, rsrc
->bo
,
1456 PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_READ
|
1457 panfrost_bo_access_for_stage(stage
));
1459 panfrost_batch_add_bo(batch
, view
->bo
,
1460 PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_READ
|
1461 panfrost_bo_access_for_stage(stage
));
1463 memcpy(&descriptors
[i
], view
->bifrost_descriptor
, sizeof(*view
->bifrost_descriptor
));
1466 postfix
->textures
= panfrost_pool_upload(&batch
->pool
,
1468 sizeof(struct bifrost_texture_descriptor
) *
1469 ctx
->sampler_view_count
[stage
]);
1473 uint64_t trampolines
[PIPE_MAX_SHADER_SAMPLER_VIEWS
];
1475 for (int i
= 0; i
< ctx
->sampler_view_count
[stage
]; ++i
) {
1476 struct panfrost_sampler_view
*view
= ctx
->sampler_views
[stage
][i
];
1478 panfrost_update_sampler_view(view
, &ctx
->base
);
1480 trampolines
[i
] = panfrost_get_tex_desc(batch
, stage
, view
);
1483 postfix
->textures
= panfrost_pool_upload(&batch
->pool
,
1486 ctx
->sampler_view_count
[stage
]);
1491 panfrost_emit_sampler_descriptors(struct panfrost_batch
*batch
,
1492 enum pipe_shader_type stage
,
1493 struct mali_vertex_tiler_postfix
*postfix
)
1495 struct panfrost_context
*ctx
= batch
->ctx
;
1496 struct panfrost_device
*device
= pan_device(ctx
->base
.screen
);
1498 if (!ctx
->sampler_count
[stage
])
1501 if (device
->quirks
& IS_BIFROST
) {
1502 size_t desc_size
= sizeof(struct bifrost_sampler_descriptor
);
1503 size_t transfer_size
= desc_size
* ctx
->sampler_count
[stage
];
1504 struct panfrost_transfer transfer
= panfrost_pool_alloc(&batch
->pool
,
1506 struct bifrost_sampler_descriptor
*desc
= (struct bifrost_sampler_descriptor
*)transfer
.cpu
;
1508 for (int i
= 0; i
< ctx
->sampler_count
[stage
]; ++i
)
1509 desc
[i
] = ctx
->samplers
[stage
][i
]->bifrost_hw
;
1511 postfix
->sampler_descriptor
= transfer
.gpu
;
1513 size_t desc_size
= sizeof(struct mali_sampler_descriptor
);
1514 size_t transfer_size
= desc_size
* ctx
->sampler_count
[stage
];
1515 struct panfrost_transfer transfer
= panfrost_pool_alloc(&batch
->pool
,
1517 struct mali_sampler_descriptor
*desc
= (struct mali_sampler_descriptor
*)transfer
.cpu
;
1519 for (int i
= 0; i
< ctx
->sampler_count
[stage
]; ++i
)
1520 desc
[i
] = ctx
->samplers
[stage
][i
]->midgard_hw
;
1522 postfix
->sampler_descriptor
= transfer
.gpu
;
1527 panfrost_emit_vertex_attr_meta(struct panfrost_batch
*batch
,
1528 struct mali_vertex_tiler_postfix
*vertex_postfix
)
1530 struct panfrost_context
*ctx
= batch
->ctx
;
1535 struct panfrost_vertex_state
*so
= ctx
->vertex
;
1537 panfrost_vertex_state_upd_attr_offs(ctx
, vertex_postfix
);
1538 vertex_postfix
->attribute_meta
= panfrost_pool_upload(&batch
->pool
, so
->hw
,
1544 panfrost_emit_vertex_data(struct panfrost_batch
*batch
,
1545 struct mali_vertex_tiler_postfix
*vertex_postfix
)
1547 struct panfrost_context
*ctx
= batch
->ctx
;
1548 struct panfrost_vertex_state
*so
= ctx
->vertex
;
1550 /* Staged mali_attr, and index into them. i =/= k, depending on the
1551 * vertex buffer mask and instancing. Twice as much room is allocated,
1552 * for a worst case of NPOT_DIVIDEs which take up extra slot */
1553 union mali_attr attrs
[PIPE_MAX_ATTRIBS
* 2];
1556 for (unsigned i
= 0; i
< so
->num_elements
; ++i
) {
1557 /* We map a mali_attr to be 1:1 with the mali_attr_meta, which
1558 * means duplicating some vertex buffers (who cares? aside from
1559 * maybe some caching implications but I somehow doubt that
1562 struct pipe_vertex_element
*elem
= &so
->pipe
[i
];
1563 unsigned vbi
= elem
->vertex_buffer_index
;
1565 /* The exception to 1:1 mapping is that we can have multiple
1566 * entries (NPOT divisors), so we fixup anyways */
1568 so
->hw
[i
].index
= k
;
1570 if (!(ctx
->vb_mask
& (1 << vbi
)))
1573 struct pipe_vertex_buffer
*buf
= &ctx
->vertex_buffers
[vbi
];
1574 struct panfrost_resource
*rsrc
;
1576 rsrc
= pan_resource(buf
->buffer
.resource
);
1580 /* Align to 64 bytes by masking off the lower bits. This
1581 * will be adjusted back when we fixup the src_offset in
1584 mali_ptr raw_addr
= rsrc
->bo
->gpu
+ buf
->buffer_offset
;
1585 mali_ptr addr
= raw_addr
& ~63;
1586 unsigned chopped_addr
= raw_addr
- addr
;
1588 /* Add a dependency of the batch on the vertex buffer */
1589 panfrost_batch_add_bo(batch
, rsrc
->bo
,
1590 PAN_BO_ACCESS_SHARED
|
1591 PAN_BO_ACCESS_READ
|
1592 PAN_BO_ACCESS_VERTEX_TILER
);
1594 /* Set common fields */
1595 attrs
[k
].elements
= addr
;
1596 attrs
[k
].stride
= buf
->stride
;
1598 /* Since we advanced the base pointer, we shrink the buffer
1600 attrs
[k
].size
= rsrc
->base
.width0
- buf
->buffer_offset
;
1602 /* We need to add the extra size we masked off (for
1603 * correctness) so the data doesn't get clamped away */
1604 attrs
[k
].size
+= chopped_addr
;
1606 /* For non-instancing make sure we initialize */
1607 attrs
[k
].shift
= attrs
[k
].extra_flags
= 0;
1609 /* Instancing uses a dramatically different code path than
1610 * linear, so dispatch for the actual emission now that the
1611 * common code is finished */
1613 unsigned divisor
= elem
->instance_divisor
;
1615 if (divisor
&& ctx
->instance_count
== 1) {
1616 /* Silly corner case where there's a divisor(=1) but
1617 * there's no legitimate instancing. So we want *every*
1618 * attribute to be the same. So set stride to zero so
1619 * we don't go anywhere. */
1621 attrs
[k
].size
= attrs
[k
].stride
+ chopped_addr
;
1622 attrs
[k
].stride
= 0;
1623 attrs
[k
++].elements
|= MALI_ATTR_LINEAR
;
1624 } else if (ctx
->instance_count
<= 1) {
1625 /* Normal, non-instanced attributes */
1626 attrs
[k
++].elements
|= MALI_ATTR_LINEAR
;
1628 unsigned instance_shift
= vertex_postfix
->instance_shift
;
1629 unsigned instance_odd
= vertex_postfix
->instance_odd
;
1631 k
+= panfrost_vertex_instanced(ctx
->padded_count
,
1634 divisor
, &attrs
[k
]);
1638 /* Add special gl_VertexID/gl_InstanceID buffers */
1640 panfrost_vertex_id(ctx
->padded_count
, &attrs
[k
]);
1641 so
->hw
[PAN_VERTEX_ID
].index
= k
++;
1642 panfrost_instance_id(ctx
->padded_count
, &attrs
[k
]);
1643 so
->hw
[PAN_INSTANCE_ID
].index
= k
++;
1645 /* Upload whatever we emitted and go */
1647 vertex_postfix
->attributes
= panfrost_pool_upload(&batch
->pool
, attrs
,
1648 k
* sizeof(*attrs
));
1652 panfrost_emit_varyings(struct panfrost_batch
*batch
, union mali_attr
*slot
,
1653 unsigned stride
, unsigned count
)
1655 /* Fill out the descriptor */
1656 slot
->stride
= stride
;
1657 slot
->size
= stride
* count
;
1658 slot
->shift
= slot
->extra_flags
= 0;
1660 struct panfrost_transfer transfer
= panfrost_pool_alloc(&batch
->pool
,
1663 slot
->elements
= transfer
.gpu
| MALI_ATTR_LINEAR
;
1665 return transfer
.gpu
;
1669 panfrost_streamout_offset(unsigned stride
, unsigned offset
,
1670 struct pipe_stream_output_target
*target
)
1672 return (target
->buffer_offset
+ (offset
* stride
* 4)) & 63;
1676 panfrost_emit_streamout(struct panfrost_batch
*batch
, union mali_attr
*slot
,
1677 unsigned stride
, unsigned offset
, unsigned count
,
1678 struct pipe_stream_output_target
*target
)
1680 /* Fill out the descriptor */
1681 slot
->stride
= stride
* 4;
1682 slot
->shift
= slot
->extra_flags
= 0;
1684 unsigned max_size
= target
->buffer_size
;
1685 unsigned expected_size
= slot
->stride
* count
;
1687 /* Grab the BO and bind it to the batch */
1688 struct panfrost_bo
*bo
= pan_resource(target
->buffer
)->bo
;
1690 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1691 * the perspective of the TILER and FRAGMENT.
1693 panfrost_batch_add_bo(batch
, bo
,
1694 PAN_BO_ACCESS_SHARED
|
1696 PAN_BO_ACCESS_VERTEX_TILER
|
1697 PAN_BO_ACCESS_FRAGMENT
);
1699 /* We will have an offset applied to get alignment */
1700 mali_ptr addr
= bo
->gpu
+ target
->buffer_offset
+ (offset
* slot
->stride
);
1701 slot
->elements
= (addr
& ~63) | MALI_ATTR_LINEAR
;
1702 slot
->size
= MIN2(max_size
, expected_size
) + (addr
& 63);
1706 has_point_coord(unsigned mask
, gl_varying_slot loc
)
1708 if ((loc
>= VARYING_SLOT_TEX0
) && (loc
<= VARYING_SLOT_TEX7
))
1709 return (mask
& (1 << (loc
- VARYING_SLOT_TEX0
)));
1710 else if (loc
== VARYING_SLOT_PNTC
)
1711 return (mask
& (1 << 8));
1716 /* Helpers for manipulating stream out information so we can pack varyings
1717 * accordingly. Compute the src_offset for a given captured varying */
1719 static struct pipe_stream_output
*
1720 pan_get_so(struct pipe_stream_output_info
*info
, gl_varying_slot loc
)
1722 for (unsigned i
= 0; i
< info
->num_outputs
; ++i
) {
1723 if (info
->output
[i
].register_index
== loc
)
1724 return &info
->output
[i
];
1727 unreachable("Varying not captured");
1731 pan_varying_size(enum mali_format fmt
)
1733 unsigned type
= MALI_EXTRACT_TYPE(fmt
);
1734 unsigned chan
= MALI_EXTRACT_CHANNELS(fmt
);
1735 unsigned bits
= MALI_EXTRACT_BITS(fmt
);
1738 if (bits
== MALI_CHANNEL_FLOAT
) {
1740 bool fp16
= (type
== MALI_FORMAT_SINT
);
1741 assert(fp16
|| (type
== MALI_FORMAT_UNORM
));
1745 assert(type
>= MALI_FORMAT_SNORM
&& type
<= MALI_FORMAT_SINT
);
1756 /* Indices for named (non-XFB) varyings that are present. These are packed
1757 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1758 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1759 * of a given special field given a shift S by:
1761 * idx = popcount(P & ((1 << S) - 1))
1763 * That is... look at all of the varyings that come earlier and count them, the
1764 * count is the new index since plus one. Likewise, the total number of special
1765 * buffers required is simply popcount(P)
1768 enum pan_special_varying
{
1769 PAN_VARY_GENERAL
= 0,
1770 PAN_VARY_POSITION
= 1,
1772 PAN_VARY_PNTCOORD
= 3,
1774 PAN_VARY_FRAGCOORD
= 5,
1780 /* Given a varying, figure out which index it correpsonds to */
1782 static inline unsigned
1783 pan_varying_index(unsigned present
, enum pan_special_varying v
)
1785 unsigned mask
= (1 << v
) - 1;
1786 return util_bitcount(present
& mask
);
1789 /* Get the base offset for XFB buffers, which by convention come after
1790 * everything else. Wrapper function for semantic reasons; by construction this
1791 * is just popcount. */
1793 static inline unsigned
1794 pan_xfb_base(unsigned present
)
1796 return util_bitcount(present
);
1799 /* Computes the present mask for varyings so we can start emitting varying records */
1801 static inline unsigned
1802 pan_varying_present(
1803 struct panfrost_shader_state
*vs
,
1804 struct panfrost_shader_state
*fs
,
1807 /* At the moment we always emit general and position buffers. Not
1808 * strictly necessary but usually harmless */
1810 unsigned present
= (1 << PAN_VARY_GENERAL
) | (1 << PAN_VARY_POSITION
);
1812 /* Enable special buffers by the shader info */
1814 if (vs
->writes_point_size
)
1815 present
|= (1 << PAN_VARY_PSIZ
);
1817 if (fs
->reads_point_coord
)
1818 present
|= (1 << PAN_VARY_PNTCOORD
);
1821 present
|= (1 << PAN_VARY_FACE
);
1823 if (fs
->reads_frag_coord
&& !(quirks
& IS_BIFROST
))
1824 present
|= (1 << PAN_VARY_FRAGCOORD
);
1826 /* Also, if we have a point sprite, we need a point coord buffer */
1828 for (unsigned i
= 0; i
< fs
->varying_count
; i
++) {
1829 gl_varying_slot loc
= fs
->varyings_loc
[i
];
1831 if (has_point_coord(fs
->point_sprite_mask
, loc
))
1832 present
|= (1 << PAN_VARY_PNTCOORD
);
1838 /* Emitters for varying records */
1840 static struct mali_attr_meta
1841 pan_emit_vary(unsigned present
, enum pan_special_varying buf
,
1842 unsigned quirks
, enum mali_format format
,
1845 unsigned nr_channels
= MALI_EXTRACT_CHANNELS(format
);
1847 struct mali_attr_meta meta
= {
1848 .index
= pan_varying_index(present
, buf
),
1849 .unknown1
= quirks
& IS_BIFROST
? 0x0 : 0x2,
1850 .swizzle
= quirks
& HAS_SWIZZLES
?
1851 panfrost_get_default_swizzle(nr_channels
) :
1852 panfrost_bifrost_swizzle(nr_channels
),
1854 .src_offset
= offset
1860 /* General varying that is unused */
1862 static struct mali_attr_meta
1863 pan_emit_vary_only(unsigned present
, unsigned quirks
)
1865 return pan_emit_vary(present
, 0, quirks
, MALI_VARYING_DISCARD
, 0);
1868 /* Special records */
1870 static const enum mali_format pan_varying_formats
[PAN_VARY_MAX
] = {
1871 [PAN_VARY_POSITION
] = MALI_VARYING_POS
,
1872 [PAN_VARY_PSIZ
] = MALI_R16F
,
1873 [PAN_VARY_PNTCOORD
] = MALI_R16F
,
1874 [PAN_VARY_FACE
] = MALI_R32I
,
1875 [PAN_VARY_FRAGCOORD
] = MALI_RGBA32F
1878 static struct mali_attr_meta
1879 pan_emit_vary_special(unsigned present
, enum pan_special_varying buf
,
1882 assert(buf
< PAN_VARY_MAX
);
1883 return pan_emit_vary(present
, buf
, quirks
, pan_varying_formats
[buf
], 0);
1886 static enum mali_format
1887 pan_xfb_format(enum mali_format format
, unsigned nr
)
1889 if (MALI_EXTRACT_BITS(format
) == MALI_CHANNEL_FLOAT
)
1890 return MALI_R32F
| MALI_NR_CHANNELS(nr
);
1892 return MALI_EXTRACT_TYPE(format
) | MALI_NR_CHANNELS(nr
) | MALI_CHANNEL_32
;
1895 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1896 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1899 static struct mali_attr_meta
1900 pan_emit_vary_xfb(unsigned present
,
1902 unsigned *streamout_offsets
,
1904 enum mali_format format
,
1905 struct pipe_stream_output o
)
1907 /* Otherwise construct a record for it */
1908 struct mali_attr_meta meta
= {
1909 /* XFB buffers come after everything else */
1910 .index
= pan_xfb_base(present
) + o
.output_buffer
,
1912 /* As usual unknown bit */
1913 .unknown1
= quirks
& IS_BIFROST
? 0x0 : 0x2,
1915 /* Override swizzle with number of channels */
1916 .swizzle
= quirks
& HAS_SWIZZLES
?
1917 panfrost_get_default_swizzle(o
.num_components
) :
1918 panfrost_bifrost_swizzle(o
.num_components
),
1920 /* Override number of channels and precision to highp */
1921 .format
= pan_xfb_format(format
, o
.num_components
),
1923 /* Apply given offsets together */
1924 .src_offset
= (o
.dst_offset
* 4) /* dwords */
1925 + streamout_offsets
[o
.output_buffer
]
1931 /* Determine if we should capture a varying for XFB. This requires actually
1932 * having a buffer for it. If we don't capture it, we'll fallback to a general
1933 * varying path (linked or unlinked, possibly discarding the write) */
1936 panfrost_xfb_captured(struct panfrost_shader_state
*xfb
,
1937 unsigned loc
, unsigned max_xfb
)
1939 if (!(xfb
->so_mask
& (1ll << loc
)))
1942 struct pipe_stream_output
*o
= pan_get_so(&xfb
->stream_output
, loc
);
1943 return o
->output_buffer
< max_xfb
;
1946 /* Higher-level wrapper around all of the above, classifying a varying into one
1947 * of the above types */
1949 static struct mali_attr_meta
1950 panfrost_emit_varying(
1951 struct panfrost_shader_state
*stage
,
1952 struct panfrost_shader_state
*other
,
1953 struct panfrost_shader_state
*xfb
,
1956 unsigned *streamout_offsets
,
1958 unsigned *gen_offsets
,
1959 enum mali_format
*gen_formats
,
1960 unsigned *gen_stride
,
1965 gl_varying_slot loc
= stage
->varyings_loc
[idx
];
1966 enum mali_format format
= stage
->varyings
[idx
];
1968 /* Override format to match linkage */
1969 if (!should_alloc
&& gen_formats
[idx
])
1970 format
= gen_formats
[idx
];
1972 if (has_point_coord(stage
->point_sprite_mask
, loc
)) {
1973 return pan_emit_vary_special(present
, PAN_VARY_PNTCOORD
, quirks
);
1974 } else if (panfrost_xfb_captured(xfb
, loc
, max_xfb
)) {
1975 struct pipe_stream_output
*o
= pan_get_so(&xfb
->stream_output
, loc
);
1976 return pan_emit_vary_xfb(present
, max_xfb
, streamout_offsets
, quirks
, format
, *o
);
1977 } else if (loc
== VARYING_SLOT_POS
) {
1979 return pan_emit_vary_special(present
, PAN_VARY_FRAGCOORD
, quirks
);
1981 return pan_emit_vary_special(present
, PAN_VARY_POSITION
, quirks
);
1982 } else if (loc
== VARYING_SLOT_PSIZ
) {
1983 return pan_emit_vary_special(present
, PAN_VARY_PSIZ
, quirks
);
1984 } else if (loc
== VARYING_SLOT_PNTC
) {
1985 return pan_emit_vary_special(present
, PAN_VARY_PNTCOORD
, quirks
);
1986 } else if (loc
== VARYING_SLOT_FACE
) {
1987 return pan_emit_vary_special(present
, PAN_VARY_FACE
, quirks
);
1990 /* We've exhausted special cases, so it's otherwise a general varying. Check if we're linked */
1991 signed other_idx
= -1;
1993 for (unsigned j
= 0; j
< other
->varying_count
; ++j
) {
1994 if (other
->varyings_loc
[j
] == loc
) {
2001 return pan_emit_vary_only(present
, quirks
);
2003 unsigned offset
= gen_offsets
[other_idx
];
2006 /* We're linked, so allocate a space via a watermark allocation */
2007 enum mali_format alt
= other
->varyings
[other_idx
];
2009 /* Do interpolation at minimum precision */
2010 unsigned size_main
= pan_varying_size(format
);
2011 unsigned size_alt
= pan_varying_size(alt
);
2012 unsigned size
= MIN2(size_main
, size_alt
);
2014 /* If a varying is marked for XFB but not actually captured, we
2015 * should match the format to the format that would otherwise
2016 * be used for XFB, since dEQP checks for invariance here. It's
2017 * unclear if this is required by the spec. */
2019 if (xfb
->so_mask
& (1ull << loc
)) {
2020 struct pipe_stream_output
*o
= pan_get_so(&xfb
->stream_output
, loc
);
2021 format
= pan_xfb_format(format
, o
->num_components
);
2022 size
= pan_varying_size(format
);
2023 } else if (size
== size_alt
) {
2027 gen_offsets
[idx
] = *gen_stride
;
2028 gen_formats
[other_idx
] = format
;
2029 offset
= *gen_stride
;
2030 *gen_stride
+= size
;
2033 return pan_emit_vary(present
, PAN_VARY_GENERAL
,
2034 quirks
, format
, offset
);
2038 pan_emit_special_input(union mali_attr
*varyings
,
2040 enum pan_special_varying v
,
2043 if (present
& (1 << v
)) {
2044 /* Ensure we write exactly once for performance and with fields
2045 * zeroed appropriately to avoid flakes */
2047 union mali_attr s
= {
2051 varyings
[pan_varying_index(present
, v
)] = s
;
2056 panfrost_emit_varying_descriptor(struct panfrost_batch
*batch
,
2057 unsigned vertex_count
,
2058 struct mali_vertex_tiler_postfix
*vertex_postfix
,
2059 struct mali_vertex_tiler_postfix
*tiler_postfix
,
2060 union midgard_primitive_size
*primitive_size
)
2062 /* Load the shaders */
2063 struct panfrost_context
*ctx
= batch
->ctx
;
2064 struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
2065 struct panfrost_shader_state
*vs
, *fs
;
2066 size_t vs_size
, fs_size
;
2068 /* Allocate the varying descriptor */
2070 vs
= panfrost_get_shader_state(ctx
, PIPE_SHADER_VERTEX
);
2071 fs
= panfrost_get_shader_state(ctx
, PIPE_SHADER_FRAGMENT
);
2072 vs_size
= sizeof(struct mali_attr_meta
) * vs
->varying_count
;
2073 fs_size
= sizeof(struct mali_attr_meta
) * fs
->varying_count
;
2075 struct panfrost_transfer trans
= panfrost_pool_alloc(&batch
->pool
,
2079 struct pipe_stream_output_info
*so
= &vs
->stream_output
;
2080 unsigned present
= pan_varying_present(vs
, fs
, dev
->quirks
);
2082 /* Check if this varying is linked by us. This is the case for
2083 * general-purpose, non-captured varyings. If it is, link it. If it's
2084 * not, use the provided stream out information to determine the
2085 * offset, since it was already linked for us. */
2087 unsigned gen_offsets
[32];
2088 enum mali_format gen_formats
[32];
2089 memset(gen_offsets
, 0, sizeof(gen_offsets
));
2090 memset(gen_formats
, 0, sizeof(gen_formats
));
2092 unsigned gen_stride
= 0;
2093 assert(vs
->varying_count
< ARRAY_SIZE(gen_offsets
));
2094 assert(fs
->varying_count
< ARRAY_SIZE(gen_offsets
));
2096 unsigned streamout_offsets
[32];
2098 for (unsigned i
= 0; i
< ctx
->streamout
.num_targets
; ++i
) {
2099 streamout_offsets
[i
] = panfrost_streamout_offset(
2101 ctx
->streamout
.offsets
[i
],
2102 ctx
->streamout
.targets
[i
]);
2105 struct mali_attr_meta
*ovs
= (struct mali_attr_meta
*)trans
.cpu
;
2106 struct mali_attr_meta
*ofs
= ovs
+ vs
->varying_count
;
2108 for (unsigned i
= 0; i
< vs
->varying_count
; i
++) {
2109 ovs
[i
] = panfrost_emit_varying(vs
, fs
, vs
, present
,
2110 ctx
->streamout
.num_targets
, streamout_offsets
,
2112 gen_offsets
, gen_formats
, &gen_stride
, i
, true, false);
2115 for (unsigned i
= 0; i
< fs
->varying_count
; i
++) {
2116 ofs
[i
] = panfrost_emit_varying(fs
, vs
, vs
, present
,
2117 ctx
->streamout
.num_targets
, streamout_offsets
,
2119 gen_offsets
, gen_formats
, &gen_stride
, i
, false, true);
2122 unsigned xfb_base
= pan_xfb_base(present
);
2123 struct panfrost_transfer T
= panfrost_pool_alloc(&batch
->pool
,
2124 sizeof(union mali_attr
) * (xfb_base
+ ctx
->streamout
.num_targets
));
2125 union mali_attr
*varyings
= (union mali_attr
*) T
.cpu
;
2127 /* Emit the stream out buffers */
2129 unsigned out_count
= u_stream_outputs_for_vertices(ctx
->active_prim
,
2132 for (unsigned i
= 0; i
< ctx
->streamout
.num_targets
; ++i
) {
2133 panfrost_emit_streamout(batch
, &varyings
[xfb_base
+ i
],
2135 ctx
->streamout
.offsets
[i
],
2137 ctx
->streamout
.targets
[i
]);
2140 panfrost_emit_varyings(batch
,
2141 &varyings
[pan_varying_index(present
, PAN_VARY_GENERAL
)],
2142 gen_stride
, vertex_count
);
2144 /* fp32 vec4 gl_Position */
2145 tiler_postfix
->position_varying
= panfrost_emit_varyings(batch
,
2146 &varyings
[pan_varying_index(present
, PAN_VARY_POSITION
)],
2147 sizeof(float) * 4, vertex_count
);
2149 if (present
& (1 << PAN_VARY_PSIZ
)) {
2150 primitive_size
->pointer
= panfrost_emit_varyings(batch
,
2151 &varyings
[pan_varying_index(present
, PAN_VARY_PSIZ
)],
2155 pan_emit_special_input(varyings
, present
, PAN_VARY_PNTCOORD
, MALI_VARYING_POINT_COORD
);
2156 pan_emit_special_input(varyings
, present
, PAN_VARY_FACE
, MALI_VARYING_FRONT_FACING
);
2157 pan_emit_special_input(varyings
, present
, PAN_VARY_FRAGCOORD
, MALI_VARYING_FRAG_COORD
);
2159 vertex_postfix
->varyings
= T
.gpu
;
2160 tiler_postfix
->varyings
= T
.gpu
;
2162 vertex_postfix
->varying_meta
= trans
.gpu
;
2163 tiler_postfix
->varying_meta
= trans
.gpu
+ vs_size
;
2167 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch
*batch
,
2168 struct mali_vertex_tiler_prefix
*vertex_prefix
,
2169 struct mali_vertex_tiler_postfix
*vertex_postfix
,
2170 struct mali_vertex_tiler_prefix
*tiler_prefix
,
2171 struct mali_vertex_tiler_postfix
*tiler_postfix
,
2172 union midgard_primitive_size
*primitive_size
)
2174 struct panfrost_context
*ctx
= batch
->ctx
;
2175 struct panfrost_device
*device
= pan_device(ctx
->base
.screen
);
2176 bool wallpapering
= ctx
->wallpaper_batch
&& batch
->scoreboard
.tiler_dep
;
2177 struct bifrost_payload_vertex bifrost_vertex
= {0,};
2178 struct bifrost_payload_tiler bifrost_tiler
= {0,};
2179 struct midgard_payload_vertex_tiler midgard_vertex
= {0,};
2180 struct midgard_payload_vertex_tiler midgard_tiler
= {0,};
2182 size_t vp_size
, tp_size
;
2184 if (device
->quirks
& IS_BIFROST
) {
2185 bifrost_vertex
.prefix
= *vertex_prefix
;
2186 bifrost_vertex
.postfix
= *vertex_postfix
;
2187 vp
= &bifrost_vertex
;
2188 vp_size
= sizeof(bifrost_vertex
);
2190 bifrost_tiler
.prefix
= *tiler_prefix
;
2191 bifrost_tiler
.tiler
.primitive_size
= *primitive_size
;
2192 bifrost_tiler
.tiler
.tiler_meta
= panfrost_batch_get_tiler_meta(batch
, ~0);
2193 bifrost_tiler
.postfix
= *tiler_postfix
;
2194 tp
= &bifrost_tiler
;
2195 tp_size
= sizeof(bifrost_tiler
);
2197 midgard_vertex
.prefix
= *vertex_prefix
;
2198 midgard_vertex
.postfix
= *vertex_postfix
;
2199 vp
= &midgard_vertex
;
2200 vp_size
= sizeof(midgard_vertex
);
2202 midgard_tiler
.prefix
= *tiler_prefix
;
2203 midgard_tiler
.postfix
= *tiler_postfix
;
2204 midgard_tiler
.primitive_size
= *primitive_size
;
2205 tp
= &midgard_tiler
;
2206 tp_size
= sizeof(midgard_tiler
);
2210 /* Inject in reverse order, with "predicted" job indices.
2211 * THIS IS A HACK XXX */
2212 panfrost_new_job(&batch
->pool
, &batch
->scoreboard
, JOB_TYPE_TILER
, false,
2213 batch
->scoreboard
.job_index
+ 2, tp
, tp_size
, true);
2214 panfrost_new_job(&batch
->pool
, &batch
->scoreboard
, JOB_TYPE_VERTEX
, false, 0,
2219 /* If rasterizer discard is enable, only submit the vertex */
2221 bool rasterizer_discard
= ctx
->rasterizer
&&
2222 ctx
->rasterizer
->base
.rasterizer_discard
;
2224 unsigned vertex
= panfrost_new_job(&batch
->pool
, &batch
->scoreboard
, JOB_TYPE_VERTEX
, false, 0,
2225 vp
, vp_size
, false);
2227 if (rasterizer_discard
)
2230 panfrost_new_job(&batch
->pool
, &batch
->scoreboard
, JOB_TYPE_TILER
, false, vertex
, tp
, tp_size
,
2234 /* TODO: stop hardcoding this */
2236 panfrost_emit_sample_locations(struct panfrost_batch
*batch
)
2238 uint16_t locations
[] = {
2289 return panfrost_pool_upload(&batch
->pool
, locations
, 96 * sizeof(uint16_t));