2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
29 #include "panfrost-quirks.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage
)
45 assert(stage
== PIPE_SHADER_FRAGMENT
||
46 stage
== PIPE_SHADER_VERTEX
||
47 stage
== PIPE_SHADER_COMPUTE
);
49 return stage
== PIPE_SHADER_FRAGMENT
?
50 PAN_BO_ACCESS_FRAGMENT
:
51 PAN_BO_ACCESS_VERTEX_TILER
;
55 panfrost_vt_emit_shared_memory(struct panfrost_context
*ctx
,
56 struct mali_vertex_tiler_postfix
*postfix
)
58 struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
59 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
61 unsigned shift
= panfrost_get_stack_shift(batch
->stack_size
);
62 struct mali_shared_memory shared
= {
64 .scratchpad
= panfrost_batch_get_scratchpad(batch
, shift
, dev
->thread_tls_alloc
, dev
->core_count
)->gpu
,
65 .shared_workgroup_count
= ~0,
67 postfix
->shared_memory
= panfrost_pool_upload(&batch
->pool
, &shared
, sizeof(shared
));
71 panfrost_vt_attach_framebuffer(struct panfrost_context
*ctx
,
72 struct mali_vertex_tiler_postfix
*postfix
)
74 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
75 postfix
->shared_memory
= panfrost_batch_reserve_framebuffer(batch
);
79 panfrost_vt_update_rasterizer(struct panfrost_context
*ctx
,
80 struct mali_vertex_tiler_prefix
*prefix
,
81 struct mali_vertex_tiler_postfix
*postfix
)
83 struct panfrost_rasterizer
*rasterizer
= ctx
->rasterizer
;
85 postfix
->gl_enables
|= 0x7;
86 SET_BIT(postfix
->gl_enables
, MALI_FRONT_CCW_TOP
,
87 rasterizer
&& rasterizer
->base
.front_ccw
);
88 SET_BIT(postfix
->gl_enables
, MALI_CULL_FACE_FRONT
,
89 rasterizer
&& (rasterizer
->base
.cull_face
& PIPE_FACE_FRONT
));
90 SET_BIT(postfix
->gl_enables
, MALI_CULL_FACE_BACK
,
91 rasterizer
&& (rasterizer
->base
.cull_face
& PIPE_FACE_BACK
));
92 SET_BIT(prefix
->unknown_draw
, MALI_DRAW_FLATSHADE_FIRST
,
93 rasterizer
&& rasterizer
->base
.flatshade_first
);
97 panfrost_vt_update_primitive_size(struct panfrost_context
*ctx
,
98 struct mali_vertex_tiler_prefix
*prefix
,
99 union midgard_primitive_size
*primitive_size
)
101 struct panfrost_rasterizer
*rasterizer
= ctx
->rasterizer
;
103 if (!panfrost_writes_point_size(ctx
)) {
104 bool points
= prefix
->draw_mode
== MALI_DRAW_MODE_POINTS
;
109 rasterizer
->base
.point_size
:
110 rasterizer
->base
.line_width
;
112 primitive_size
->constant
= val
;
117 panfrost_vt_update_occlusion_query(struct panfrost_context
*ctx
,
118 struct mali_vertex_tiler_postfix
*postfix
)
120 SET_BIT(postfix
->gl_enables
, MALI_OCCLUSION_QUERY
, ctx
->occlusion_query
);
121 if (ctx
->occlusion_query
) {
122 postfix
->occlusion_counter
= ctx
->occlusion_query
->bo
->gpu
;
123 panfrost_batch_add_bo(ctx
->batch
, ctx
->occlusion_query
->bo
,
124 PAN_BO_ACCESS_SHARED
|
126 PAN_BO_ACCESS_FRAGMENT
);
128 postfix
->occlusion_counter
= 0;
133 panfrost_vt_init(struct panfrost_context
*ctx
,
134 enum pipe_shader_type stage
,
135 struct mali_vertex_tiler_prefix
*prefix
,
136 struct mali_vertex_tiler_postfix
*postfix
)
138 struct panfrost_device
*device
= pan_device(ctx
->base
.screen
);
140 if (!ctx
->shader
[stage
])
143 memset(prefix
, 0, sizeof(*prefix
));
144 memset(postfix
, 0, sizeof(*postfix
));
146 if (device
->quirks
& IS_BIFROST
) {
147 postfix
->gl_enables
= 0x2;
148 panfrost_vt_emit_shared_memory(ctx
, postfix
);
150 postfix
->gl_enables
= 0x6;
151 panfrost_vt_attach_framebuffer(ctx
, postfix
);
154 if (stage
== PIPE_SHADER_FRAGMENT
) {
155 panfrost_vt_update_occlusion_query(ctx
, postfix
);
156 panfrost_vt_update_rasterizer(ctx
, prefix
, postfix
);
161 panfrost_translate_index_size(unsigned size
)
165 return MALI_DRAW_INDEXED_UINT8
;
168 return MALI_DRAW_INDEXED_UINT16
;
171 return MALI_DRAW_INDEXED_UINT32
;
174 unreachable("Invalid index size");
178 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
179 * good for the duration of the draw (transient), could last longer. Also get
180 * the bounds on the index buffer for the range accessed by the draw. We do
181 * these operations together because there are natural optimizations which
182 * require them to be together. */
185 panfrost_get_index_buffer_bounded(struct panfrost_context
*ctx
,
186 const struct pipe_draw_info
*info
,
187 unsigned *min_index
, unsigned *max_index
)
189 struct panfrost_resource
*rsrc
= pan_resource(info
->index
.resource
);
190 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
191 off_t offset
= info
->start
* info
->index_size
;
192 bool needs_indices
= true;
195 if (info
->max_index
!= ~0u) {
196 *min_index
= info
->min_index
;
197 *max_index
= info
->max_index
;
198 needs_indices
= false;
201 if (!info
->has_user_indices
) {
202 /* Only resources can be directly mapped */
203 panfrost_batch_add_bo(batch
, rsrc
->bo
,
204 PAN_BO_ACCESS_SHARED
|
206 PAN_BO_ACCESS_VERTEX_TILER
);
207 out
= rsrc
->bo
->gpu
+ offset
;
209 /* Check the cache */
210 needs_indices
= !panfrost_minmax_cache_get(rsrc
->index_cache
,
216 /* Otherwise, we need to upload to transient memory */
217 const uint8_t *ibuf8
= (const uint8_t *) info
->index
.user
;
218 out
= panfrost_pool_upload(&batch
->pool
, ibuf8
+ offset
,
225 u_vbuf_get_minmax_index(&ctx
->base
, info
, min_index
, max_index
);
227 if (!info
->has_user_indices
)
228 panfrost_minmax_cache_add(rsrc
->index_cache
,
229 info
->start
, info
->count
,
230 *min_index
, *max_index
);
237 panfrost_vt_set_draw_info(struct panfrost_context
*ctx
,
238 const struct pipe_draw_info
*info
,
239 enum mali_draw_mode draw_mode
,
240 struct mali_vertex_tiler_postfix
*vertex_postfix
,
241 struct mali_vertex_tiler_prefix
*tiler_prefix
,
242 struct mali_vertex_tiler_postfix
*tiler_postfix
,
243 unsigned *vertex_count
,
244 unsigned *padded_count
)
246 tiler_prefix
->draw_mode
= draw_mode
;
248 unsigned draw_flags
= 0;
250 if (panfrost_writes_point_size(ctx
))
251 draw_flags
|= MALI_DRAW_VARYING_SIZE
;
253 if (info
->primitive_restart
)
254 draw_flags
|= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX
;
256 /* These doesn't make much sense */
258 draw_flags
|= 0x3000;
260 if (info
->index_size
) {
261 unsigned min_index
= 0, max_index
= 0;
263 tiler_prefix
->indices
= panfrost_get_index_buffer_bounded(ctx
,
268 /* Use the corresponding values */
269 *vertex_count
= max_index
- min_index
+ 1;
270 tiler_postfix
->offset_start
= vertex_postfix
->offset_start
= min_index
+ info
->index_bias
;
271 tiler_prefix
->offset_bias_correction
= -min_index
;
272 tiler_prefix
->index_count
= MALI_POSITIVE(info
->count
);
273 draw_flags
|= panfrost_translate_index_size(info
->index_size
);
275 tiler_prefix
->indices
= 0;
276 *vertex_count
= ctx
->vertex_count
;
277 tiler_postfix
->offset_start
= vertex_postfix
->offset_start
= info
->start
;
278 tiler_prefix
->offset_bias_correction
= 0;
279 tiler_prefix
->index_count
= MALI_POSITIVE(ctx
->vertex_count
);
282 tiler_prefix
->unknown_draw
= draw_flags
;
284 /* Encode the padded vertex count */
286 if (info
->instance_count
> 1) {
287 *padded_count
= panfrost_padded_vertex_count(*vertex_count
);
289 unsigned shift
= __builtin_ctz(ctx
->padded_count
);
290 unsigned k
= ctx
->padded_count
>> (shift
+ 1);
292 tiler_postfix
->instance_shift
= vertex_postfix
->instance_shift
= shift
;
293 tiler_postfix
->instance_odd
= vertex_postfix
->instance_odd
= k
;
295 *padded_count
= *vertex_count
;
297 /* Reset instancing state */
298 tiler_postfix
->instance_shift
= vertex_postfix
->instance_shift
= 0;
299 tiler_postfix
->instance_odd
= vertex_postfix
->instance_odd
= 0;
304 panfrost_shader_meta_init(struct panfrost_context
*ctx
,
305 enum pipe_shader_type st
,
306 struct mali_shader_meta
*meta
)
308 const struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
309 struct panfrost_shader_state
*ss
= panfrost_get_shader_state(ctx
, st
);
311 memset(meta
, 0, sizeof(*meta
));
312 meta
->shader
= (ss
->bo
? ss
->bo
->gpu
: 0) | ss
->first_tag
;
313 meta
->attribute_count
= ss
->attribute_count
;
314 meta
->varying_count
= ss
->varying_count
;
315 meta
->texture_count
= ctx
->sampler_view_count
[st
];
316 meta
->sampler_count
= ctx
->sampler_count
[st
];
318 if (dev
->quirks
& IS_BIFROST
) {
319 if (st
== PIPE_SHADER_VERTEX
)
320 meta
->bifrost1
.unk1
= 0x800000;
322 /* First clause ATEST |= 0x4000000.
323 * Less than 32 regs |= 0x200 */
324 meta
->bifrost1
.unk1
= 0x950020;
327 meta
->bifrost1
.uniform_buffer_count
= panfrost_ubo_count(ctx
, st
);
328 if (st
== PIPE_SHADER_VERTEX
)
329 meta
->bifrost2
.preload_regs
= 0xC0;
331 meta
->bifrost2
.preload_regs
= 0x1;
332 SET_BIT(meta
->bifrost2
.preload_regs
, 0x10, ss
->reads_frag_coord
);
335 meta
->bifrost2
.uniform_count
= MIN2(ss
->uniform_count
,
338 meta
->midgard1
.uniform_count
= MIN2(ss
->uniform_count
,
340 meta
->midgard1
.work_count
= ss
->work_reg_count
;
342 /* TODO: This is not conformant on ES3 */
343 meta
->midgard1
.flags_hi
= MALI_SUPPRESS_INF_NAN
;
345 meta
->midgard1
.flags_lo
= 0x20;
346 meta
->midgard1
.uniform_buffer_count
= panfrost_ubo_count(ctx
, st
);
348 SET_BIT(meta
->midgard1
.flags_hi
, MALI_WRITES_GLOBAL
, ss
->writes_global
);
353 translate_tex_wrap(enum pipe_tex_wrap w
)
356 case PIPE_TEX_WRAP_REPEAT
: return MALI_WRAP_MODE_REPEAT
;
357 case PIPE_TEX_WRAP_CLAMP
: return MALI_WRAP_MODE_CLAMP
;
358 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
: return MALI_WRAP_MODE_CLAMP_TO_EDGE
;
359 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
: return MALI_WRAP_MODE_CLAMP_TO_BORDER
;
360 case PIPE_TEX_WRAP_MIRROR_REPEAT
: return MALI_WRAP_MODE_MIRRORED_REPEAT
;
361 case PIPE_TEX_WRAP_MIRROR_CLAMP
: return MALI_WRAP_MODE_MIRRORED_CLAMP
;
362 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE
: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE
;
363 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER
;
364 default: unreachable("Invalid wrap");
368 /* The hardware compares in the wrong order order, so we have to flip before
369 * encoding. Yes, really. */
371 static enum mali_func
372 panfrost_sampler_compare_func(const struct pipe_sampler_state
*cso
)
374 if (!cso
->compare_mode
)
375 return MALI_FUNC_NEVER
;
377 enum mali_func f
= panfrost_translate_compare_func(cso
->compare_func
);
378 return panfrost_flip_compare_func(f
);
381 static enum mali_mipmap_mode
382 pan_pipe_to_mipmode(enum pipe_tex_mipfilter f
)
385 case PIPE_TEX_MIPFILTER_NEAREST
: return MALI_MIPMAP_MODE_NEAREST
;
386 case PIPE_TEX_MIPFILTER_LINEAR
: return MALI_MIPMAP_MODE_TRILINEAR
;
387 case PIPE_TEX_MIPFILTER_NONE
: return MALI_MIPMAP_MODE_NONE
;
388 default: unreachable("Invalid");
392 void panfrost_sampler_desc_init(const struct pipe_sampler_state
*cso
,
393 struct mali_midgard_sampler_packed
*hw
)
395 pan_pack(hw
, MIDGARD_SAMPLER
, cfg
) {
396 cfg
.magnify_nearest
= cso
->mag_img_filter
== PIPE_TEX_FILTER_NEAREST
;
397 cfg
.minify_nearest
= cso
->min_img_filter
== PIPE_TEX_FILTER_NEAREST
;
398 cfg
.mipmap_mode
= (cso
->min_mip_filter
== PIPE_TEX_MIPFILTER_LINEAR
) ?
399 MALI_MIPMAP_MODE_TRILINEAR
: MALI_MIPMAP_MODE_NEAREST
;
400 cfg
.normalized_coordinates
= cso
->normalized_coords
;
402 cfg
.lod_bias
= FIXED_16(cso
->lod_bias
, true);
404 cfg
.minimum_lod
= FIXED_16(cso
->min_lod
, false);
406 /* If necessary, we disable mipmapping in the sampler descriptor by
407 * clamping the LOD as tight as possible (from 0 to epsilon,
408 * essentially -- remember these are fixed point numbers, so
411 cfg
.maximum_lod
= (cso
->min_mip_filter
== PIPE_TEX_MIPFILTER_NONE
) ?
412 cfg
.minimum_lod
+ 1 :
413 FIXED_16(cso
->max_lod
, false);
415 cfg
.wrap_mode_s
= translate_tex_wrap(cso
->wrap_s
);
416 cfg
.wrap_mode_t
= translate_tex_wrap(cso
->wrap_t
);
417 cfg
.wrap_mode_r
= translate_tex_wrap(cso
->wrap_r
);
419 cfg
.compare_function
= panfrost_sampler_compare_func(cso
);
420 cfg
.seamless_cube_map
= cso
->seamless_cube_map
;
422 cfg
.border_color_r
= cso
->border_color
.f
[0];
423 cfg
.border_color_g
= cso
->border_color
.f
[1];
424 cfg
.border_color_b
= cso
->border_color
.f
[2];
425 cfg
.border_color_a
= cso
->border_color
.f
[3];
429 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state
*cso
,
430 struct mali_bifrost_sampler_packed
*hw
)
432 pan_pack(hw
, BIFROST_SAMPLER
, cfg
) {
433 cfg
.magnify_linear
= cso
->mag_img_filter
== PIPE_TEX_FILTER_LINEAR
;
434 cfg
.minify_linear
= cso
->min_img_filter
== PIPE_TEX_FILTER_LINEAR
;
435 cfg
.mipmap_mode
= pan_pipe_to_mipmode(cso
->min_mip_filter
);
436 cfg
.normalized_coordinates
= cso
->normalized_coords
;
438 cfg
.lod_bias
= FIXED_16(cso
->lod_bias
, true);
439 cfg
.minimum_lod
= FIXED_16(cso
->min_lod
, false);
440 cfg
.maximum_lod
= FIXED_16(cso
->max_lod
, false);
442 cfg
.wrap_mode_s
= translate_tex_wrap(cso
->wrap_s
);
443 cfg
.wrap_mode_t
= translate_tex_wrap(cso
->wrap_t
);
444 cfg
.wrap_mode_r
= translate_tex_wrap(cso
->wrap_r
);
446 cfg
.compare_function
= panfrost_sampler_compare_func(cso
);
447 cfg
.seamless_cube_map
= cso
->seamless_cube_map
;
452 panfrost_frag_meta_rasterizer_update(struct panfrost_context
*ctx
,
453 struct mali_shader_meta
*fragmeta
)
455 if (!ctx
->rasterizer
) {
456 SET_BIT(fragmeta
->unknown2_4
, MALI_NO_MSAA
, true);
457 SET_BIT(fragmeta
->unknown2_3
, MALI_HAS_MSAA
, false);
458 fragmeta
->depth_units
= 0.0f
;
459 fragmeta
->depth_factor
= 0.0f
;
460 SET_BIT(fragmeta
->unknown2_4
, MALI_DEPTH_RANGE_A
, false);
461 SET_BIT(fragmeta
->unknown2_4
, MALI_DEPTH_RANGE_B
, false);
462 SET_BIT(fragmeta
->unknown2_3
, MALI_DEPTH_CLIP_NEAR
, true);
463 SET_BIT(fragmeta
->unknown2_3
, MALI_DEPTH_CLIP_FAR
, true);
467 struct pipe_rasterizer_state
*rast
= &ctx
->rasterizer
->base
;
469 bool msaa
= rast
->multisample
;
471 /* TODO: Sample size */
472 SET_BIT(fragmeta
->unknown2_3
, MALI_HAS_MSAA
, msaa
);
473 SET_BIT(fragmeta
->unknown2_4
, MALI_NO_MSAA
, !msaa
);
475 struct panfrost_shader_state
*fs
;
476 fs
= panfrost_get_shader_state(ctx
, PIPE_SHADER_FRAGMENT
);
478 /* EXT_shader_framebuffer_fetch requires the shader to be run
479 * per-sample when outputs are read. */
480 bool per_sample
= ctx
->min_samples
> 1 || fs
->outputs_read
;
481 SET_BIT(fragmeta
->unknown2_3
, MALI_PER_SAMPLE
, msaa
&& per_sample
);
483 fragmeta
->depth_units
= rast
->offset_units
* 2.0f
;
484 fragmeta
->depth_factor
= rast
->offset_scale
;
486 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
488 SET_BIT(fragmeta
->unknown2_4
, MALI_DEPTH_RANGE_A
, rast
->offset_tri
);
489 SET_BIT(fragmeta
->unknown2_4
, MALI_DEPTH_RANGE_B
, rast
->offset_tri
);
491 SET_BIT(fragmeta
->unknown2_3
, MALI_DEPTH_CLIP_NEAR
, rast
->depth_clip_near
);
492 SET_BIT(fragmeta
->unknown2_3
, MALI_DEPTH_CLIP_FAR
, rast
->depth_clip_far
);
496 panfrost_frag_meta_zsa_update(struct panfrost_context
*ctx
,
497 struct mali_shader_meta
*fragmeta
)
499 const struct panfrost_zsa_state
*so
= ctx
->depth_stencil
;
500 int zfunc
= PIPE_FUNC_ALWAYS
;
503 /* If stenciling is disabled, the state is irrelevant */
504 SET_BIT(fragmeta
->unknown2_4
, MALI_STENCIL_TEST
, false);
505 SET_BIT(fragmeta
->unknown2_3
, MALI_DEPTH_WRITEMASK
, false);
507 SET_BIT(fragmeta
->unknown2_4
, MALI_STENCIL_TEST
,
508 so
->base
.stencil
[0].enabled
);
510 fragmeta
->stencil_mask_front
= so
->stencil_mask_front
;
511 fragmeta
->stencil_mask_back
= so
->stencil_mask_back
;
513 /* Bottom bits for stencil ref, exactly one word */
514 fragmeta
->stencil_front
.opaque
[0] = so
->stencil_front
.opaque
[0] | ctx
->stencil_ref
.ref_value
[0];
516 /* If back-stencil is not enabled, use the front values */
518 if (so
->base
.stencil
[1].enabled
)
519 fragmeta
->stencil_back
.opaque
[0] = so
->stencil_back
.opaque
[0] | ctx
->stencil_ref
.ref_value
[1];
521 fragmeta
->stencil_back
= fragmeta
->stencil_front
;
523 if (so
->base
.depth
.enabled
)
524 zfunc
= so
->base
.depth
.func
;
526 /* Depth state (TODO: Refactor) */
528 SET_BIT(fragmeta
->unknown2_3
, MALI_DEPTH_WRITEMASK
,
529 so
->base
.depth
.writemask
);
532 fragmeta
->unknown2_3
&= ~MALI_DEPTH_FUNC_MASK
;
533 fragmeta
->unknown2_3
|= MALI_DEPTH_FUNC(panfrost_translate_compare_func(zfunc
));
537 panfrost_fs_required(
538 struct panfrost_shader_state
*fs
,
539 struct panfrost_blend_final
*blend
,
542 /* If we generally have side effects */
546 /* If colour is written we need to execute */
547 for (unsigned i
= 0; i
< rt_count
; ++i
) {
548 if (!blend
[i
].no_colour
)
552 /* If depth is written and not implied we need to execute.
553 * TODO: Predicate on Z/S writes being enabled */
554 return (fs
->writes_depth
|| fs
->writes_stencil
);
558 panfrost_frag_meta_blend_update(struct panfrost_context
*ctx
,
559 struct mali_shader_meta
*fragmeta
,
562 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
563 const struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
564 struct panfrost_shader_state
*fs
;
565 fs
= panfrost_get_shader_state(ctx
, PIPE_SHADER_FRAGMENT
);
567 SET_BIT(fragmeta
->unknown2_4
, MALI_NO_DITHER
,
568 (dev
->quirks
& MIDGARD_SFBD
) && ctx
->blend
&&
569 !ctx
->blend
->base
.dither
);
571 SET_BIT(fragmeta
->unknown2_4
, MALI_ALPHA_TO_COVERAGE
,
572 ctx
->blend
->base
.alpha_to_coverage
);
574 /* Get blending setup */
575 unsigned rt_count
= MAX2(ctx
->pipe_framebuffer
.nr_cbufs
, 1);
577 struct panfrost_blend_final blend
[PIPE_MAX_COLOR_BUFS
];
578 unsigned shader_offset
= 0;
579 struct panfrost_bo
*shader_bo
= NULL
;
581 for (unsigned c
= 0; c
< rt_count
; ++c
)
582 blend
[c
] = panfrost_get_blend_for_context(ctx
, c
, &shader_bo
,
585 /* Disable shader execution if we can */
586 if (dev
->quirks
& MIDGARD_SHADERLESS
587 && !panfrost_fs_required(fs
, blend
, rt_count
)) {
588 fragmeta
->shader
= 0;
589 fragmeta
->attribute_count
= 0;
590 fragmeta
->varying_count
= 0;
591 fragmeta
->texture_count
= 0;
592 fragmeta
->sampler_count
= 0;
594 /* This feature is not known to work on Bifrost */
595 fragmeta
->midgard1
.work_count
= 1;
596 fragmeta
->midgard1
.uniform_count
= 0;
597 fragmeta
->midgard1
.uniform_buffer_count
= 0;
600 /* If there is a blend shader, work registers are shared. We impose 8
601 * work registers as a limit for blend shaders. Should be lower XXX */
603 if (!(dev
->quirks
& IS_BIFROST
)) {
604 for (unsigned c
= 0; c
< rt_count
; ++c
) {
605 if (blend
[c
].is_shader
) {
606 fragmeta
->midgard1
.work_count
=
607 MAX2(fragmeta
->midgard1
.work_count
, 8);
612 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
613 * copied to the blend_meta appended (by convention), but this is the
614 * field actually read by the hardware. (Or maybe both are read...?).
615 * Specify the last RTi with a blend shader. */
617 fragmeta
->blend
.shader
= 0;
619 for (signed rt
= (rt_count
- 1); rt
>= 0; --rt
) {
620 if (!blend
[rt
].is_shader
)
623 fragmeta
->blend
.shader
= blend
[rt
].shader
.gpu
|
624 blend
[rt
].shader
.first_tag
;
628 if (dev
->quirks
& MIDGARD_SFBD
) {
629 /* When only a single render target platform is used, the blend
630 * information is inside the shader meta itself. We additionally
631 * need to signal CAN_DISCARD for nontrivial blend modes (so
632 * we're able to read back the destination buffer) */
634 SET_BIT(fragmeta
->unknown2_3
, MALI_HAS_BLEND_SHADER
,
637 if (!blend
[0].is_shader
) {
638 fragmeta
->blend
.equation
= *blend
[0].equation
.equation
;
639 fragmeta
->blend
.constant
= blend
[0].equation
.constant
;
642 SET_BIT(fragmeta
->unknown2_3
, MALI_CAN_DISCARD
,
643 !blend
[0].no_blending
|| fs
->can_discard
);
645 batch
->draws
|= PIPE_CLEAR_COLOR0
;
649 if (dev
->quirks
& IS_BIFROST
) {
650 bool no_blend
= true;
652 for (unsigned i
= 0; i
< rt_count
; ++i
)
653 no_blend
&= (blend
[i
].no_blending
| blend
[i
].no_colour
);
655 SET_BIT(fragmeta
->bifrost1
.unk1
, MALI_BIFROST_EARLY_Z
,
656 !fs
->can_discard
&& !fs
->writes_depth
&& no_blend
);
659 /* Additional blend descriptor tacked on for jobs using MFBD */
661 for (unsigned i
= 0; i
< rt_count
; ++i
) {
664 if (ctx
->pipe_framebuffer
.nr_cbufs
> i
&& !blend
[i
].no_colour
) {
666 batch
->draws
|= (PIPE_CLEAR_COLOR0
<< i
);
668 bool is_srgb
= (ctx
->pipe_framebuffer
.nr_cbufs
> i
) &&
669 (ctx
->pipe_framebuffer
.cbufs
[i
]) &&
670 util_format_is_srgb(ctx
->pipe_framebuffer
.cbufs
[i
]->format
);
672 SET_BIT(flags
, MALI_BLEND_MRT_SHADER
, blend
[i
].is_shader
);
673 SET_BIT(flags
, MALI_BLEND_LOAD_TIB
, !blend
[i
].no_blending
);
674 SET_BIT(flags
, MALI_BLEND_SRGB
, is_srgb
);
675 SET_BIT(flags
, MALI_BLEND_NO_DITHER
, !ctx
->blend
->base
.dither
);
678 if (dev
->quirks
& IS_BIFROST
) {
679 struct bifrost_blend_rt
*brts
= rts
;
681 brts
[i
].flags
= flags
;
683 if (blend
[i
].is_shader
) {
684 /* The blend shader's address needs to be at
685 * the same top 32 bit as the fragment shader.
686 * TODO: Ensure that's always the case.
688 assert((blend
[i
].shader
.gpu
& (0xffffffffull
<< 32)) ==
689 (fs
->bo
->gpu
& (0xffffffffull
<< 32)));
690 brts
[i
].shader
= blend
[i
].shader
.gpu
;
692 } else if (ctx
->pipe_framebuffer
.nr_cbufs
> i
) {
693 enum pipe_format format
= ctx
->pipe_framebuffer
.cbufs
[i
]->format
;
694 const struct util_format_description
*format_desc
;
695 format_desc
= util_format_description(format
);
697 brts
[i
].equation
= *blend
[i
].equation
.equation
;
699 /* TODO: this is a bit more complicated */
700 brts
[i
].constant
= blend
[i
].equation
.constant
;
702 brts
[i
].format
= panfrost_format_to_bifrost_blend(format_desc
);
704 /* 0x19 disables blending and forces REPLACE
705 * mode (equivalent to rgb_mode = alpha_mode =
706 * x122, colour mask = 0xF). 0x1a allows
708 brts
[i
].unk2
= blend
[i
].no_blending
? 0x19 : 0x1a;
710 brts
[i
].shader_type
= fs
->blend_types
[i
];
712 /* Dummy attachment for depth-only */
714 brts
[i
].shader_type
= fs
->blend_types
[i
];
717 struct midgard_blend_rt
*mrts
= rts
;
718 mrts
[i
].flags
= flags
;
720 if (blend
[i
].is_shader
) {
721 mrts
[i
].blend
.shader
= blend
[i
].shader
.gpu
| blend
[i
].shader
.first_tag
;
723 mrts
[i
].blend
.equation
= *blend
[i
].equation
.equation
;
724 mrts
[i
].blend
.constant
= blend
[i
].equation
.constant
;
731 panfrost_frag_shader_meta_init(struct panfrost_context
*ctx
,
732 struct mali_shader_meta
*fragmeta
,
735 const struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
736 struct panfrost_shader_state
*fs
;
738 fs
= panfrost_get_shader_state(ctx
, PIPE_SHADER_FRAGMENT
);
740 bool msaa
= ctx
->rasterizer
&& ctx
->rasterizer
->base
.multisample
;
741 fragmeta
->coverage_mask
= msaa
? ctx
->sample_mask
: ~0;
743 fragmeta
->unknown2_3
= MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS
) | 0x10;
744 fragmeta
->unknown2_4
= 0x4e0;
746 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
747 * is required (independent of 32-bit/64-bit descriptors), or why it's
748 * not used on later GPU revisions. Otherwise, all shader jobs fault on
749 * these earlier chips (perhaps this is a chicken bit of some kind).
750 * More investigation is needed. */
752 SET_BIT(fragmeta
->unknown2_4
, 0x10, dev
->quirks
& MIDGARD_SFBD
);
754 if (dev
->quirks
& IS_BIFROST
) {
757 /* Depending on whether it's legal to in the given shader, we try to
758 * enable early-z testing. TODO: respect e-z force */
760 SET_BIT(fragmeta
->midgard1
.flags_lo
, MALI_EARLY_Z
,
761 !fs
->can_discard
&& !fs
->writes_global
&&
762 !fs
->writes_depth
&& !fs
->writes_stencil
&&
763 !ctx
->blend
->base
.alpha_to_coverage
);
765 /* Add the writes Z/S flags if needed. */
766 SET_BIT(fragmeta
->midgard1
.flags_lo
, MALI_WRITES_Z
, fs
->writes_depth
);
767 SET_BIT(fragmeta
->midgard1
.flags_hi
, MALI_WRITES_S
, fs
->writes_stencil
);
769 /* Any time texturing is used, derivatives are implicitly calculated,
770 * so we need to enable helper invocations */
772 SET_BIT(fragmeta
->midgard1
.flags_lo
, MALI_HELPER_INVOCATIONS
,
773 fs
->helper_invocations
);
775 /* If discard is enabled, which bit we set to convey this
776 * depends on if depth/stencil is used for the draw or not.
777 * Just one of depth OR stencil is enough to trigger this. */
779 const struct pipe_depth_stencil_alpha_state
*zsa
= &ctx
->depth_stencil
->base
;
780 bool zs_enabled
= fs
->writes_depth
|| fs
->writes_stencil
;
783 zs_enabled
|= (zsa
->depth
.enabled
&& zsa
->depth
.func
!= PIPE_FUNC_ALWAYS
);
784 zs_enabled
|= zsa
->stencil
[0].enabled
;
787 SET_BIT(fragmeta
->midgard1
.flags_lo
, MALI_READS_TILEBUFFER
,
788 fs
->outputs_read
|| (!zs_enabled
&& fs
->can_discard
));
789 SET_BIT(fragmeta
->midgard1
.flags_lo
, MALI_READS_ZS
, zs_enabled
&& fs
->can_discard
);
792 panfrost_frag_meta_rasterizer_update(ctx
, fragmeta
);
793 panfrost_frag_meta_zsa_update(ctx
, fragmeta
);
794 panfrost_frag_meta_blend_update(ctx
, fragmeta
, rts
);
798 panfrost_emit_shader_meta(struct panfrost_batch
*batch
,
799 enum pipe_shader_type st
,
800 struct mali_vertex_tiler_postfix
*postfix
)
802 struct panfrost_context
*ctx
= batch
->ctx
;
803 struct panfrost_shader_state
*ss
= panfrost_get_shader_state(ctx
, st
);
810 struct mali_shader_meta meta
;
812 panfrost_shader_meta_init(ctx
, st
, &meta
);
814 /* Add the shader BO to the batch. */
815 panfrost_batch_add_bo(batch
, ss
->bo
,
816 PAN_BO_ACCESS_PRIVATE
|
818 panfrost_bo_access_for_stage(st
));
822 if (st
== PIPE_SHADER_FRAGMENT
) {
823 struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
824 unsigned rt_count
= MAX2(ctx
->pipe_framebuffer
.nr_cbufs
, 1);
825 size_t desc_size
= sizeof(meta
);
827 struct panfrost_transfer xfer
;
830 if (dev
->quirks
& MIDGARD_SFBD
)
832 else if (dev
->quirks
& IS_BIFROST
)
833 rt_size
= sizeof(struct bifrost_blend_rt
);
835 rt_size
= sizeof(struct midgard_blend_rt
);
837 desc_size
+= rt_size
* rt_count
;
840 rts
= rzalloc_size(ctx
, rt_size
* rt_count
);
842 panfrost_frag_shader_meta_init(ctx
, &meta
, rts
);
844 xfer
= panfrost_pool_alloc(&batch
->pool
, desc_size
);
846 memcpy(xfer
.cpu
, &meta
, sizeof(meta
));
847 memcpy(xfer
.cpu
+ sizeof(meta
), rts
, rt_size
* rt_count
);
852 shader_ptr
= xfer
.gpu
;
854 shader_ptr
= panfrost_pool_upload(&batch
->pool
, &meta
,
858 postfix
->shader
= shader_ptr
;
862 panfrost_emit_viewport(struct panfrost_batch
*batch
,
863 struct mali_vertex_tiler_postfix
*tiler_postfix
)
865 struct panfrost_context
*ctx
= batch
->ctx
;
866 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
867 const struct pipe_scissor_state
*ss
= &ctx
->scissor
;
868 const struct pipe_rasterizer_state
*rast
= &ctx
->rasterizer
->base
;
869 const struct pipe_framebuffer_state
*fb
= &ctx
->pipe_framebuffer
;
871 /* Derive min/max from translate/scale. Note since |x| >= 0 by
872 * definition, we have that -|x| <= |x| hence translate - |scale| <=
873 * translate + |scale|, so the ordering is correct here. */
874 float vp_minx
= (int) (vp
->translate
[0] - fabsf(vp
->scale
[0]));
875 float vp_maxx
= (int) (vp
->translate
[0] + fabsf(vp
->scale
[0]));
876 float vp_miny
= (int) (vp
->translate
[1] - fabsf(vp
->scale
[1]));
877 float vp_maxy
= (int) (vp
->translate
[1] + fabsf(vp
->scale
[1]));
878 float minz
= (vp
->translate
[2] - fabsf(vp
->scale
[2]));
879 float maxz
= (vp
->translate
[2] + fabsf(vp
->scale
[2]));
881 /* Scissor to the intersection of viewport and to the scissor, clamped
882 * to the framebuffer */
884 unsigned minx
= MIN2(fb
->width
, vp_minx
);
885 unsigned maxx
= MIN2(fb
->width
, vp_maxx
);
886 unsigned miny
= MIN2(fb
->height
, vp_miny
);
887 unsigned maxy
= MIN2(fb
->height
, vp_maxy
);
889 if (ss
&& rast
&& rast
->scissor
) {
890 minx
= MAX2(ss
->minx
, minx
);
891 miny
= MAX2(ss
->miny
, miny
);
892 maxx
= MIN2(ss
->maxx
, maxx
);
893 maxy
= MIN2(ss
->maxy
, maxy
);
896 struct panfrost_transfer T
= panfrost_pool_alloc(&batch
->pool
, MALI_VIEWPORT_LENGTH
);
898 pan_pack(T
.cpu
, VIEWPORT
, cfg
) {
899 cfg
.scissor_minimum_x
= minx
;
900 cfg
.scissor_minimum_y
= miny
;
901 cfg
.scissor_maximum_x
= maxx
- 1;
902 cfg
.scissor_maximum_y
= maxy
- 1;
904 cfg
.minimum_z
= rast
->depth_clip_near
? minz
: -INFINITY
;
905 cfg
.maximum_z
= rast
->depth_clip_far
? maxz
: INFINITY
;
908 tiler_postfix
->viewport
= T
.gpu
;
909 panfrost_batch_union_scissor(batch
, minx
, miny
, maxx
, maxy
);
913 panfrost_map_constant_buffer_gpu(struct panfrost_batch
*batch
,
914 enum pipe_shader_type st
,
915 struct panfrost_constant_buffer
*buf
,
918 struct pipe_constant_buffer
*cb
= &buf
->cb
[index
];
919 struct panfrost_resource
*rsrc
= pan_resource(cb
->buffer
);
922 panfrost_batch_add_bo(batch
, rsrc
->bo
,
923 PAN_BO_ACCESS_SHARED
|
925 panfrost_bo_access_for_stage(st
));
927 /* Alignment gauranteed by
928 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
929 return rsrc
->bo
->gpu
+ cb
->buffer_offset
;
930 } else if (cb
->user_buffer
) {
931 return panfrost_pool_upload(&batch
->pool
,
936 unreachable("No constant buffer");
940 struct sysval_uniform
{
950 panfrost_upload_viewport_scale_sysval(struct panfrost_batch
*batch
,
951 struct sysval_uniform
*uniform
)
953 struct panfrost_context
*ctx
= batch
->ctx
;
954 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
956 uniform
->f
[0] = vp
->scale
[0];
957 uniform
->f
[1] = vp
->scale
[1];
958 uniform
->f
[2] = vp
->scale
[2];
962 panfrost_upload_viewport_offset_sysval(struct panfrost_batch
*batch
,
963 struct sysval_uniform
*uniform
)
965 struct panfrost_context
*ctx
= batch
->ctx
;
966 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
968 uniform
->f
[0] = vp
->translate
[0];
969 uniform
->f
[1] = vp
->translate
[1];
970 uniform
->f
[2] = vp
->translate
[2];
973 static void panfrost_upload_txs_sysval(struct panfrost_batch
*batch
,
974 enum pipe_shader_type st
,
975 unsigned int sysvalid
,
976 struct sysval_uniform
*uniform
)
978 struct panfrost_context
*ctx
= batch
->ctx
;
979 unsigned texidx
= PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid
);
980 unsigned dim
= PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid
);
981 bool is_array
= PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid
);
982 struct pipe_sampler_view
*tex
= &ctx
->sampler_views
[st
][texidx
]->base
;
985 uniform
->i
[0] = u_minify(tex
->texture
->width0
, tex
->u
.tex
.first_level
);
988 uniform
->i
[1] = u_minify(tex
->texture
->height0
,
989 tex
->u
.tex
.first_level
);
992 uniform
->i
[2] = u_minify(tex
->texture
->depth0
,
993 tex
->u
.tex
.first_level
);
996 uniform
->i
[dim
] = tex
->texture
->array_size
;
1000 panfrost_upload_ssbo_sysval(struct panfrost_batch
*batch
,
1001 enum pipe_shader_type st
,
1003 struct sysval_uniform
*uniform
)
1005 struct panfrost_context
*ctx
= batch
->ctx
;
1007 assert(ctx
->ssbo_mask
[st
] & (1 << ssbo_id
));
1008 struct pipe_shader_buffer sb
= ctx
->ssbo
[st
][ssbo_id
];
1010 /* Compute address */
1011 struct panfrost_bo
*bo
= pan_resource(sb
.buffer
)->bo
;
1013 panfrost_batch_add_bo(batch
, bo
,
1014 PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_RW
|
1015 panfrost_bo_access_for_stage(st
));
1017 /* Upload address and size as sysval */
1018 uniform
->du
[0] = bo
->gpu
+ sb
.buffer_offset
;
1019 uniform
->u
[2] = sb
.buffer_size
;
1023 panfrost_upload_sampler_sysval(struct panfrost_batch
*batch
,
1024 enum pipe_shader_type st
,
1026 struct sysval_uniform
*uniform
)
1028 struct panfrost_context
*ctx
= batch
->ctx
;
1029 struct pipe_sampler_state
*sampl
= &ctx
->samplers
[st
][samp_idx
]->base
;
1031 uniform
->f
[0] = sampl
->min_lod
;
1032 uniform
->f
[1] = sampl
->max_lod
;
1033 uniform
->f
[2] = sampl
->lod_bias
;
1035 /* Even without any errata, Midgard represents "no mipmapping" as
1036 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1037 * panfrost_create_sampler_state which also explains our choice of
1038 * epsilon value (again to keep behaviour consistent) */
1040 if (sampl
->min_mip_filter
== PIPE_TEX_MIPFILTER_NONE
)
1041 uniform
->f
[1] = uniform
->f
[0] + (1.0/256.0);
1045 panfrost_upload_num_work_groups_sysval(struct panfrost_batch
*batch
,
1046 struct sysval_uniform
*uniform
)
1048 struct panfrost_context
*ctx
= batch
->ctx
;
1050 uniform
->u
[0] = ctx
->compute_grid
->grid
[0];
1051 uniform
->u
[1] = ctx
->compute_grid
->grid
[1];
1052 uniform
->u
[2] = ctx
->compute_grid
->grid
[2];
1056 panfrost_upload_sysvals(struct panfrost_batch
*batch
, void *buf
,
1057 struct panfrost_shader_state
*ss
,
1058 enum pipe_shader_type st
)
1060 struct sysval_uniform
*uniforms
= (void *)buf
;
1062 for (unsigned i
= 0; i
< ss
->sysval_count
; ++i
) {
1063 int sysval
= ss
->sysval
[i
];
1065 switch (PAN_SYSVAL_TYPE(sysval
)) {
1066 case PAN_SYSVAL_VIEWPORT_SCALE
:
1067 panfrost_upload_viewport_scale_sysval(batch
,
1070 case PAN_SYSVAL_VIEWPORT_OFFSET
:
1071 panfrost_upload_viewport_offset_sysval(batch
,
1074 case PAN_SYSVAL_TEXTURE_SIZE
:
1075 panfrost_upload_txs_sysval(batch
, st
,
1076 PAN_SYSVAL_ID(sysval
),
1079 case PAN_SYSVAL_SSBO
:
1080 panfrost_upload_ssbo_sysval(batch
, st
,
1081 PAN_SYSVAL_ID(sysval
),
1084 case PAN_SYSVAL_NUM_WORK_GROUPS
:
1085 panfrost_upload_num_work_groups_sysval(batch
,
1088 case PAN_SYSVAL_SAMPLER
:
1089 panfrost_upload_sampler_sysval(batch
, st
,
1090 PAN_SYSVAL_ID(sysval
),
1100 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer
*buf
,
1103 struct pipe_constant_buffer
*cb
= &buf
->cb
[index
];
1104 struct panfrost_resource
*rsrc
= pan_resource(cb
->buffer
);
1107 return rsrc
->bo
->cpu
;
1108 else if (cb
->user_buffer
)
1109 return cb
->user_buffer
;
1111 unreachable("No constant buffer");
1115 panfrost_emit_const_buf(struct panfrost_batch
*batch
,
1116 enum pipe_shader_type stage
,
1117 struct mali_vertex_tiler_postfix
*postfix
)
1119 struct panfrost_context
*ctx
= batch
->ctx
;
1120 struct panfrost_shader_variants
*all
= ctx
->shader
[stage
];
1125 struct panfrost_constant_buffer
*buf
= &ctx
->constant_buffer
[stage
];
1127 struct panfrost_shader_state
*ss
= &all
->variants
[all
->active_variant
];
1129 /* Uniforms are implicitly UBO #0 */
1130 bool has_uniforms
= buf
->enabled_mask
& (1 << 0);
1132 /* Allocate room for the sysval and the uniforms */
1133 size_t sys_size
= sizeof(float) * 4 * ss
->sysval_count
;
1134 size_t uniform_size
= has_uniforms
? (buf
->cb
[0].buffer_size
) : 0;
1135 size_t size
= sys_size
+ uniform_size
;
1136 struct panfrost_transfer transfer
= panfrost_pool_alloc(&batch
->pool
,
1139 /* Upload sysvals requested by the shader */
1140 panfrost_upload_sysvals(batch
, transfer
.cpu
, ss
, stage
);
1142 /* Upload uniforms */
1143 if (has_uniforms
&& uniform_size
) {
1144 const void *cpu
= panfrost_map_constant_buffer_cpu(buf
, 0);
1145 memcpy(transfer
.cpu
+ sys_size
, cpu
, uniform_size
);
1148 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1151 unsigned ubo_count
= panfrost_ubo_count(ctx
, stage
);
1152 assert(ubo_count
>= 1);
1154 size_t sz
= MALI_UNIFORM_BUFFER_LENGTH
* ubo_count
;
1155 struct panfrost_transfer ubos
= panfrost_pool_alloc(&batch
->pool
, sz
);
1156 uint64_t *ubo_ptr
= (uint64_t *) ubos
.cpu
;
1158 /* Upload uniforms as a UBO */
1160 if (ss
->uniform_count
) {
1161 pan_pack(ubo_ptr
, UNIFORM_BUFFER
, cfg
) {
1162 cfg
.entries
= ss
->uniform_count
;
1163 cfg
.pointer
= transfer
.gpu
;
1169 /* The rest are honest-to-goodness UBOs */
1171 for (unsigned ubo
= 1; ubo
< ubo_count
; ++ubo
) {
1172 size_t usz
= buf
->cb
[ubo
].buffer_size
;
1173 bool enabled
= buf
->enabled_mask
& (1 << ubo
);
1174 bool empty
= usz
== 0;
1176 if (!enabled
|| empty
) {
1181 pan_pack(ubo_ptr
+ ubo
, UNIFORM_BUFFER
, cfg
) {
1182 cfg
.entries
= DIV_ROUND_UP(usz
, 16);
1183 cfg
.pointer
= panfrost_map_constant_buffer_gpu(batch
,
1188 postfix
->uniforms
= transfer
.gpu
;
1189 postfix
->uniform_buffers
= ubos
.gpu
;
1191 buf
->dirty_mask
= 0;
1195 panfrost_emit_shared_memory(struct panfrost_batch
*batch
,
1196 const struct pipe_grid_info
*info
,
1197 struct midgard_payload_vertex_tiler
*vtp
)
1199 struct panfrost_context
*ctx
= batch
->ctx
;
1200 struct panfrost_shader_variants
*all
= ctx
->shader
[PIPE_SHADER_COMPUTE
];
1201 struct panfrost_shader_state
*ss
= &all
->variants
[all
->active_variant
];
1202 unsigned single_size
= util_next_power_of_two(MAX2(ss
->shared_size
,
1204 unsigned shared_size
= single_size
* info
->grid
[0] * info
->grid
[1] *
1206 struct panfrost_bo
*bo
= panfrost_batch_get_shared_memory(batch
,
1210 struct mali_shared_memory shared
= {
1211 .shared_memory
= bo
->gpu
,
1212 .shared_workgroup_count
=
1213 util_logbase2_ceil(info
->grid
[0]) +
1214 util_logbase2_ceil(info
->grid
[1]) +
1215 util_logbase2_ceil(info
->grid
[2]),
1217 .shared_shift
= util_logbase2(single_size
) - 1
1220 vtp
->postfix
.shared_memory
= panfrost_pool_upload(&batch
->pool
, &shared
,
1225 panfrost_get_tex_desc(struct panfrost_batch
*batch
,
1226 enum pipe_shader_type st
,
1227 struct panfrost_sampler_view
*view
)
1230 return (mali_ptr
) 0;
1232 struct pipe_sampler_view
*pview
= &view
->base
;
1233 struct panfrost_resource
*rsrc
= pan_resource(pview
->texture
);
1235 /* Add the BO to the job so it's retained until the job is done. */
1237 panfrost_batch_add_bo(batch
, rsrc
->bo
,
1238 PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_READ
|
1239 panfrost_bo_access_for_stage(st
));
1241 panfrost_batch_add_bo(batch
, view
->bo
,
1242 PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_READ
|
1243 panfrost_bo_access_for_stage(st
));
1245 return view
->bo
->gpu
;
1249 panfrost_update_sampler_view(struct panfrost_sampler_view
*view
,
1250 struct pipe_context
*pctx
)
1252 struct panfrost_resource
*rsrc
= pan_resource(view
->base
.texture
);
1253 if (view
->texture_bo
!= rsrc
->bo
->gpu
||
1254 view
->modifier
!= rsrc
->modifier
) {
1255 panfrost_bo_unreference(view
->bo
);
1256 panfrost_create_sampler_view_bo(view
, pctx
, &rsrc
->base
);
1261 panfrost_emit_texture_descriptors(struct panfrost_batch
*batch
,
1262 enum pipe_shader_type stage
,
1263 struct mali_vertex_tiler_postfix
*postfix
)
1265 struct panfrost_context
*ctx
= batch
->ctx
;
1266 struct panfrost_device
*device
= pan_device(ctx
->base
.screen
);
1268 if (!ctx
->sampler_view_count
[stage
])
1271 if (device
->quirks
& IS_BIFROST
) {
1272 struct panfrost_transfer T
= panfrost_pool_alloc(&batch
->pool
,
1273 MALI_BIFROST_TEXTURE_LENGTH
*
1274 ctx
->sampler_view_count
[stage
]);
1276 struct mali_bifrost_texture_packed
*out
=
1277 (struct mali_bifrost_texture_packed
*) T
.cpu
;
1279 for (int i
= 0; i
< ctx
->sampler_view_count
[stage
]; ++i
) {
1280 struct panfrost_sampler_view
*view
= ctx
->sampler_views
[stage
][i
];
1281 struct pipe_sampler_view
*pview
= &view
->base
;
1282 struct panfrost_resource
*rsrc
= pan_resource(pview
->texture
);
1284 panfrost_update_sampler_view(view
, &ctx
->base
);
1285 out
[i
] = view
->bifrost_descriptor
;
1287 /* Add the BOs to the job so they are retained until the job is done. */
1289 panfrost_batch_add_bo(batch
, rsrc
->bo
,
1290 PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_READ
|
1291 panfrost_bo_access_for_stage(stage
));
1293 panfrost_batch_add_bo(batch
, view
->bo
,
1294 PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_READ
|
1295 panfrost_bo_access_for_stage(stage
));
1298 postfix
->textures
= T
.gpu
;
1300 uint64_t trampolines
[PIPE_MAX_SHADER_SAMPLER_VIEWS
];
1302 for (int i
= 0; i
< ctx
->sampler_view_count
[stage
]; ++i
) {
1303 struct panfrost_sampler_view
*view
= ctx
->sampler_views
[stage
][i
];
1305 panfrost_update_sampler_view(view
, &ctx
->base
);
1307 trampolines
[i
] = panfrost_get_tex_desc(batch
, stage
, view
);
1310 postfix
->textures
= panfrost_pool_upload(&batch
->pool
,
1313 ctx
->sampler_view_count
[stage
]);
1318 panfrost_emit_sampler_descriptors(struct panfrost_batch
*batch
,
1319 enum pipe_shader_type stage
,
1320 struct mali_vertex_tiler_postfix
*postfix
)
1322 struct panfrost_context
*ctx
= batch
->ctx
;
1324 if (!ctx
->sampler_count
[stage
])
1327 size_t desc_size
= MALI_BIFROST_SAMPLER_LENGTH
;
1328 assert(MALI_BIFROST_SAMPLER_LENGTH
== MALI_MIDGARD_SAMPLER_LENGTH
);
1330 size_t sz
= desc_size
* ctx
->sampler_count
[stage
];
1331 struct panfrost_transfer T
= panfrost_pool_alloc(&batch
->pool
, sz
);
1332 struct mali_midgard_sampler_packed
*out
= (struct mali_midgard_sampler_packed
*) T
.cpu
;
1334 for (unsigned i
= 0; i
< ctx
->sampler_count
[stage
]; ++i
)
1335 out
[i
] = ctx
->samplers
[stage
][i
]->hw
;
1337 postfix
->sampler_descriptor
= T
.gpu
;
1341 panfrost_emit_vertex_data(struct panfrost_batch
*batch
,
1342 struct mali_vertex_tiler_postfix
*vertex_postfix
)
1344 struct panfrost_context
*ctx
= batch
->ctx
;
1345 struct panfrost_vertex_state
*so
= ctx
->vertex
;
1347 /* Staged mali_attr, and index into them. i =/= k, depending on the
1348 * vertex buffer mask and instancing. Twice as much room is allocated,
1349 * for a worst case of NPOT_DIVIDEs which take up extra slot */
1350 union mali_attr attrs
[PIPE_MAX_ATTRIBS
* 2];
1351 unsigned attrib_to_buffer
[PIPE_MAX_ATTRIBS
] = { 0 };
1354 for (unsigned i
= 0; i
< so
->num_elements
; ++i
) {
1355 /* We map a mali_attr to be 1:1 with the mali_attr_meta, which
1356 * means duplicating some vertex buffers (who cares? aside from
1357 * maybe some caching implications but I somehow doubt that
1360 struct pipe_vertex_element
*elem
= &so
->pipe
[i
];
1361 unsigned vbi
= elem
->vertex_buffer_index
;
1362 attrib_to_buffer
[i
] = k
;
1364 if (!(ctx
->vb_mask
& (1 << vbi
)))
1367 struct pipe_vertex_buffer
*buf
= &ctx
->vertex_buffers
[vbi
];
1368 struct panfrost_resource
*rsrc
;
1370 rsrc
= pan_resource(buf
->buffer
.resource
);
1374 /* Align to 64 bytes by masking off the lower bits. This
1375 * will be adjusted back when we fixup the src_offset in
1378 mali_ptr raw_addr
= rsrc
->bo
->gpu
+ buf
->buffer_offset
;
1379 mali_ptr addr
= raw_addr
& ~63;
1380 unsigned chopped_addr
= raw_addr
- addr
;
1382 /* Add a dependency of the batch on the vertex buffer */
1383 panfrost_batch_add_bo(batch
, rsrc
->bo
,
1384 PAN_BO_ACCESS_SHARED
|
1385 PAN_BO_ACCESS_READ
|
1386 PAN_BO_ACCESS_VERTEX_TILER
);
1388 /* Set common fields */
1389 attrs
[k
].elements
= addr
;
1390 attrs
[k
].stride
= buf
->stride
;
1392 /* Since we advanced the base pointer, we shrink the buffer
1394 attrs
[k
].size
= rsrc
->base
.width0
- buf
->buffer_offset
;
1396 /* We need to add the extra size we masked off (for
1397 * correctness) so the data doesn't get clamped away */
1398 attrs
[k
].size
+= chopped_addr
;
1400 /* For non-instancing make sure we initialize */
1401 attrs
[k
].shift
= attrs
[k
].extra_flags
= 0;
1403 /* Instancing uses a dramatically different code path than
1404 * linear, so dispatch for the actual emission now that the
1405 * common code is finished */
1407 unsigned divisor
= elem
->instance_divisor
;
1409 if (divisor
&& ctx
->instance_count
== 1) {
1410 /* Silly corner case where there's a divisor(=1) but
1411 * there's no legitimate instancing. So we want *every*
1412 * attribute to be the same. So set stride to zero so
1413 * we don't go anywhere. */
1415 attrs
[k
].size
= attrs
[k
].stride
+ chopped_addr
;
1416 attrs
[k
].stride
= 0;
1417 attrs
[k
++].elements
|= MALI_ATTR_LINEAR
;
1418 } else if (ctx
->instance_count
<= 1) {
1419 /* Normal, non-instanced attributes */
1420 attrs
[k
++].elements
|= MALI_ATTR_LINEAR
;
1422 unsigned instance_shift
= vertex_postfix
->instance_shift
;
1423 unsigned instance_odd
= vertex_postfix
->instance_odd
;
1425 k
+= panfrost_vertex_instanced(ctx
->padded_count
,
1428 divisor
, &attrs
[k
]);
1432 /* Add special gl_VertexID/gl_InstanceID buffers */
1434 struct mali_attr_meta hw
[PIPE_MAX_ATTRIBS
];
1436 panfrost_vertex_id(ctx
->padded_count
, &attrs
[k
]);
1437 hw
[PAN_VERTEX_ID
].index
= k
++;
1438 hw
[PAN_VERTEX_ID
].format
= so
->formats
[PAN_VERTEX_ID
];
1439 hw
[PAN_VERTEX_ID
].unknown1
= 0x2;
1441 panfrost_instance_id(ctx
->padded_count
, &attrs
[k
]);
1442 hw
[PAN_INSTANCE_ID
].index
= k
++;
1443 hw
[PAN_INSTANCE_ID
].format
= so
->formats
[PAN_VERTEX_ID
];
1444 hw
[PAN_INSTANCE_ID
].unknown1
= 0x2;
1446 /* Attribute addresses require 64-byte alignment, so let:
1448 * base' = base & ~63 = base - (base & 63)
1449 * offset' = offset + (base & 63)
1451 * Since base' + offset' = base + offset, these are equivalent
1452 * addressing modes and now base is 64 aligned.
1455 unsigned start
= vertex_postfix
->offset_start
;
1457 for (unsigned i
= 0; i
< so
->num_elements
; ++i
) {
1458 unsigned vbi
= so
->pipe
[i
].vertex_buffer_index
;
1459 struct pipe_vertex_buffer
*buf
= &ctx
->vertex_buffers
[vbi
];
1461 /* Adjust by the masked off bits of the offset. Make sure we
1462 * read src_offset from so->hw (which is not GPU visible)
1463 * rather than target (which is) due to caching effects */
1465 unsigned src_offset
= so
->pipe
[i
].src_offset
;
1467 /* BOs aligned to 4k so guaranteed aligned to 64 */
1468 src_offset
+= (buf
->buffer_offset
& 63);
1470 /* Also, somewhat obscurely per-instance data needs to be
1471 * offset in response to a delayed start in an indexed draw */
1473 if (so
->pipe
[i
].instance_divisor
&& ctx
->instance_count
> 1 && start
)
1474 src_offset
-= buf
->stride
* start
;
1476 hw
[i
].src_offset
= src_offset
;
1477 hw
[i
].index
= attrib_to_buffer
[i
];
1478 hw
[i
].format
= so
->formats
[i
];
1479 hw
[i
].unknown1
= 0x2;
1483 vertex_postfix
->attributes
= panfrost_pool_upload(&batch
->pool
, attrs
,
1484 k
* sizeof(*attrs
));
1486 vertex_postfix
->attribute_meta
= panfrost_pool_upload(&batch
->pool
, hw
,
1492 panfrost_emit_varyings(struct panfrost_batch
*batch
, union mali_attr
*slot
,
1493 unsigned stride
, unsigned count
)
1495 /* Fill out the descriptor */
1496 slot
->stride
= stride
;
1497 slot
->size
= stride
* count
;
1498 slot
->shift
= slot
->extra_flags
= 0;
1500 struct panfrost_transfer transfer
= panfrost_pool_alloc(&batch
->pool
,
1503 slot
->elements
= transfer
.gpu
| MALI_ATTR_LINEAR
;
1505 return transfer
.gpu
;
1509 panfrost_streamout_offset(unsigned stride
, unsigned offset
,
1510 struct pipe_stream_output_target
*target
)
1512 return (target
->buffer_offset
+ (offset
* stride
* 4)) & 63;
1516 panfrost_emit_streamout(struct panfrost_batch
*batch
, union mali_attr
*slot
,
1517 unsigned stride
, unsigned offset
, unsigned count
,
1518 struct pipe_stream_output_target
*target
)
1520 /* Fill out the descriptor */
1521 slot
->stride
= stride
* 4;
1522 slot
->shift
= slot
->extra_flags
= 0;
1524 unsigned max_size
= target
->buffer_size
;
1525 unsigned expected_size
= slot
->stride
* count
;
1527 /* Grab the BO and bind it to the batch */
1528 struct panfrost_bo
*bo
= pan_resource(target
->buffer
)->bo
;
1530 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1531 * the perspective of the TILER and FRAGMENT.
1533 panfrost_batch_add_bo(batch
, bo
,
1534 PAN_BO_ACCESS_SHARED
|
1536 PAN_BO_ACCESS_VERTEX_TILER
|
1537 PAN_BO_ACCESS_FRAGMENT
);
1539 /* We will have an offset applied to get alignment */
1540 mali_ptr addr
= bo
->gpu
+ target
->buffer_offset
+ (offset
* slot
->stride
);
1541 slot
->elements
= (addr
& ~63) | MALI_ATTR_LINEAR
;
1542 slot
->size
= MIN2(max_size
, expected_size
) + (addr
& 63);
1546 has_point_coord(unsigned mask
, gl_varying_slot loc
)
1548 if ((loc
>= VARYING_SLOT_TEX0
) && (loc
<= VARYING_SLOT_TEX7
))
1549 return (mask
& (1 << (loc
- VARYING_SLOT_TEX0
)));
1550 else if (loc
== VARYING_SLOT_PNTC
)
1551 return (mask
& (1 << 8));
1556 /* Helpers for manipulating stream out information so we can pack varyings
1557 * accordingly. Compute the src_offset for a given captured varying */
1559 static struct pipe_stream_output
*
1560 pan_get_so(struct pipe_stream_output_info
*info
, gl_varying_slot loc
)
1562 for (unsigned i
= 0; i
< info
->num_outputs
; ++i
) {
1563 if (info
->output
[i
].register_index
== loc
)
1564 return &info
->output
[i
];
1567 unreachable("Varying not captured");
1571 pan_varying_size(enum mali_format fmt
)
1573 unsigned type
= MALI_EXTRACT_TYPE(fmt
);
1574 unsigned chan
= MALI_EXTRACT_CHANNELS(fmt
);
1575 unsigned bits
= MALI_EXTRACT_BITS(fmt
);
1578 if (bits
== MALI_CHANNEL_FLOAT
) {
1580 bool fp16
= (type
== MALI_FORMAT_SINT
);
1581 assert(fp16
|| (type
== MALI_FORMAT_UNORM
));
1585 assert(type
>= MALI_FORMAT_SNORM
&& type
<= MALI_FORMAT_SINT
);
1596 /* Indices for named (non-XFB) varyings that are present. These are packed
1597 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1598 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1599 * of a given special field given a shift S by:
1601 * idx = popcount(P & ((1 << S) - 1))
1603 * That is... look at all of the varyings that come earlier and count them, the
1604 * count is the new index since plus one. Likewise, the total number of special
1605 * buffers required is simply popcount(P)
1608 enum pan_special_varying
{
1609 PAN_VARY_GENERAL
= 0,
1610 PAN_VARY_POSITION
= 1,
1612 PAN_VARY_PNTCOORD
= 3,
1614 PAN_VARY_FRAGCOORD
= 5,
1620 /* Given a varying, figure out which index it correpsonds to */
1622 static inline unsigned
1623 pan_varying_index(unsigned present
, enum pan_special_varying v
)
1625 unsigned mask
= (1 << v
) - 1;
1626 return util_bitcount(present
& mask
);
1629 /* Get the base offset for XFB buffers, which by convention come after
1630 * everything else. Wrapper function for semantic reasons; by construction this
1631 * is just popcount. */
1633 static inline unsigned
1634 pan_xfb_base(unsigned present
)
1636 return util_bitcount(present
);
1639 /* Computes the present mask for varyings so we can start emitting varying records */
1641 static inline unsigned
1642 pan_varying_present(
1643 struct panfrost_shader_state
*vs
,
1644 struct panfrost_shader_state
*fs
,
1647 /* At the moment we always emit general and position buffers. Not
1648 * strictly necessary but usually harmless */
1650 unsigned present
= (1 << PAN_VARY_GENERAL
) | (1 << PAN_VARY_POSITION
);
1652 /* Enable special buffers by the shader info */
1654 if (vs
->writes_point_size
)
1655 present
|= (1 << PAN_VARY_PSIZ
);
1657 if (fs
->reads_point_coord
)
1658 present
|= (1 << PAN_VARY_PNTCOORD
);
1661 present
|= (1 << PAN_VARY_FACE
);
1663 if (fs
->reads_frag_coord
&& !(quirks
& IS_BIFROST
))
1664 present
|= (1 << PAN_VARY_FRAGCOORD
);
1666 /* Also, if we have a point sprite, we need a point coord buffer */
1668 for (unsigned i
= 0; i
< fs
->varying_count
; i
++) {
1669 gl_varying_slot loc
= fs
->varyings_loc
[i
];
1671 if (has_point_coord(fs
->point_sprite_mask
, loc
))
1672 present
|= (1 << PAN_VARY_PNTCOORD
);
1678 /* Emitters for varying records */
1680 static struct mali_attr_meta
1681 pan_emit_vary(unsigned present
, enum pan_special_varying buf
,
1682 unsigned quirks
, enum mali_format format
,
1685 unsigned nr_channels
= MALI_EXTRACT_CHANNELS(format
);
1686 unsigned swizzle
= quirks
& HAS_SWIZZLES
?
1687 panfrost_get_default_swizzle(nr_channels
) :
1688 panfrost_bifrost_swizzle(nr_channels
);
1690 struct mali_attr_meta meta
= {
1691 .index
= pan_varying_index(present
, buf
),
1692 .unknown1
= quirks
& IS_BIFROST
? 0x0 : 0x2,
1693 .format
= (format
<< 12) | swizzle
,
1694 .src_offset
= offset
1700 /* General varying that is unused */
1702 static struct mali_attr_meta
1703 pan_emit_vary_only(unsigned present
, unsigned quirks
)
1705 return pan_emit_vary(present
, 0, quirks
, MALI_VARYING_DISCARD
, 0);
1708 /* Special records */
1710 static const enum mali_format pan_varying_formats
[PAN_VARY_MAX
] = {
1711 [PAN_VARY_POSITION
] = MALI_VARYING_POS
,
1712 [PAN_VARY_PSIZ
] = MALI_R16F
,
1713 [PAN_VARY_PNTCOORD
] = MALI_R16F
,
1714 [PAN_VARY_FACE
] = MALI_R32I
,
1715 [PAN_VARY_FRAGCOORD
] = MALI_RGBA32F
1718 static struct mali_attr_meta
1719 pan_emit_vary_special(unsigned present
, enum pan_special_varying buf
,
1722 assert(buf
< PAN_VARY_MAX
);
1723 return pan_emit_vary(present
, buf
, quirks
, pan_varying_formats
[buf
], 0);
1726 static enum mali_format
1727 pan_xfb_format(enum mali_format format
, unsigned nr
)
1729 if (MALI_EXTRACT_BITS(format
) == MALI_CHANNEL_FLOAT
)
1730 return MALI_R32F
| MALI_NR_CHANNELS(nr
);
1732 return MALI_EXTRACT_TYPE(format
) | MALI_NR_CHANNELS(nr
) | MALI_CHANNEL_32
;
1735 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1736 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1739 static struct mali_attr_meta
1740 pan_emit_vary_xfb(unsigned present
,
1742 unsigned *streamout_offsets
,
1744 enum mali_format format
,
1745 struct pipe_stream_output o
)
1747 unsigned swizzle
= quirks
& HAS_SWIZZLES
?
1748 panfrost_get_default_swizzle(o
.num_components
) :
1749 panfrost_bifrost_swizzle(o
.num_components
);
1751 /* Otherwise construct a record for it */
1752 struct mali_attr_meta meta
= {
1753 /* XFB buffers come after everything else */
1754 .index
= pan_xfb_base(present
) + o
.output_buffer
,
1756 /* As usual unknown bit */
1757 .unknown1
= quirks
& IS_BIFROST
? 0x0 : 0x2,
1759 /* Override number of channels and precision to highp */
1760 .format
= (pan_xfb_format(format
, o
.num_components
) << 12) | swizzle
,
1762 /* Apply given offsets together */
1763 .src_offset
= (o
.dst_offset
* 4) /* dwords */
1764 + streamout_offsets
[o
.output_buffer
]
1770 /* Determine if we should capture a varying for XFB. This requires actually
1771 * having a buffer for it. If we don't capture it, we'll fallback to a general
1772 * varying path (linked or unlinked, possibly discarding the write) */
1775 panfrost_xfb_captured(struct panfrost_shader_state
*xfb
,
1776 unsigned loc
, unsigned max_xfb
)
1778 if (!(xfb
->so_mask
& (1ll << loc
)))
1781 struct pipe_stream_output
*o
= pan_get_so(&xfb
->stream_output
, loc
);
1782 return o
->output_buffer
< max_xfb
;
1785 /* Higher-level wrapper around all of the above, classifying a varying into one
1786 * of the above types */
1788 static struct mali_attr_meta
1789 panfrost_emit_varying(
1790 struct panfrost_shader_state
*stage
,
1791 struct panfrost_shader_state
*other
,
1792 struct panfrost_shader_state
*xfb
,
1795 unsigned *streamout_offsets
,
1797 unsigned *gen_offsets
,
1798 enum mali_format
*gen_formats
,
1799 unsigned *gen_stride
,
1804 gl_varying_slot loc
= stage
->varyings_loc
[idx
];
1805 enum mali_format format
= stage
->varyings
[idx
];
1807 /* Override format to match linkage */
1808 if (!should_alloc
&& gen_formats
[idx
])
1809 format
= gen_formats
[idx
];
1811 if (has_point_coord(stage
->point_sprite_mask
, loc
)) {
1812 return pan_emit_vary_special(present
, PAN_VARY_PNTCOORD
, quirks
);
1813 } else if (panfrost_xfb_captured(xfb
, loc
, max_xfb
)) {
1814 struct pipe_stream_output
*o
= pan_get_so(&xfb
->stream_output
, loc
);
1815 return pan_emit_vary_xfb(present
, max_xfb
, streamout_offsets
, quirks
, format
, *o
);
1816 } else if (loc
== VARYING_SLOT_POS
) {
1818 return pan_emit_vary_special(present
, PAN_VARY_FRAGCOORD
, quirks
);
1820 return pan_emit_vary_special(present
, PAN_VARY_POSITION
, quirks
);
1821 } else if (loc
== VARYING_SLOT_PSIZ
) {
1822 return pan_emit_vary_special(present
, PAN_VARY_PSIZ
, quirks
);
1823 } else if (loc
== VARYING_SLOT_PNTC
) {
1824 return pan_emit_vary_special(present
, PAN_VARY_PNTCOORD
, quirks
);
1825 } else if (loc
== VARYING_SLOT_FACE
) {
1826 return pan_emit_vary_special(present
, PAN_VARY_FACE
, quirks
);
1829 /* We've exhausted special cases, so it's otherwise a general varying. Check if we're linked */
1830 signed other_idx
= -1;
1832 for (unsigned j
= 0; j
< other
->varying_count
; ++j
) {
1833 if (other
->varyings_loc
[j
] == loc
) {
1840 return pan_emit_vary_only(present
, quirks
);
1842 unsigned offset
= gen_offsets
[other_idx
];
1845 /* We're linked, so allocate a space via a watermark allocation */
1846 enum mali_format alt
= other
->varyings
[other_idx
];
1848 /* Do interpolation at minimum precision */
1849 unsigned size_main
= pan_varying_size(format
);
1850 unsigned size_alt
= pan_varying_size(alt
);
1851 unsigned size
= MIN2(size_main
, size_alt
);
1853 /* If a varying is marked for XFB but not actually captured, we
1854 * should match the format to the format that would otherwise
1855 * be used for XFB, since dEQP checks for invariance here. It's
1856 * unclear if this is required by the spec. */
1858 if (xfb
->so_mask
& (1ull << loc
)) {
1859 struct pipe_stream_output
*o
= pan_get_so(&xfb
->stream_output
, loc
);
1860 format
= pan_xfb_format(format
, o
->num_components
);
1861 size
= pan_varying_size(format
);
1862 } else if (size
== size_alt
) {
1866 gen_offsets
[idx
] = *gen_stride
;
1867 gen_formats
[other_idx
] = format
;
1868 offset
= *gen_stride
;
1869 *gen_stride
+= size
;
1872 return pan_emit_vary(present
, PAN_VARY_GENERAL
,
1873 quirks
, format
, offset
);
1877 pan_emit_special_input(union mali_attr
*varyings
,
1879 enum pan_special_varying v
,
1882 if (present
& (1 << v
)) {
1883 /* Ensure we write exactly once for performance and with fields
1884 * zeroed appropriately to avoid flakes */
1886 union mali_attr s
= {
1890 varyings
[pan_varying_index(present
, v
)] = s
;
1895 panfrost_emit_varying_descriptor(struct panfrost_batch
*batch
,
1896 unsigned vertex_count
,
1897 struct mali_vertex_tiler_postfix
*vertex_postfix
,
1898 struct mali_vertex_tiler_postfix
*tiler_postfix
,
1899 union midgard_primitive_size
*primitive_size
)
1901 /* Load the shaders */
1902 struct panfrost_context
*ctx
= batch
->ctx
;
1903 struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
1904 struct panfrost_shader_state
*vs
, *fs
;
1905 size_t vs_size
, fs_size
;
1907 /* Allocate the varying descriptor */
1909 vs
= panfrost_get_shader_state(ctx
, PIPE_SHADER_VERTEX
);
1910 fs
= panfrost_get_shader_state(ctx
, PIPE_SHADER_FRAGMENT
);
1911 vs_size
= sizeof(struct mali_attr_meta
) * vs
->varying_count
;
1912 fs_size
= sizeof(struct mali_attr_meta
) * fs
->varying_count
;
1914 struct panfrost_transfer trans
= panfrost_pool_alloc(&batch
->pool
,
1918 struct pipe_stream_output_info
*so
= &vs
->stream_output
;
1919 unsigned present
= pan_varying_present(vs
, fs
, dev
->quirks
);
1921 /* Check if this varying is linked by us. This is the case for
1922 * general-purpose, non-captured varyings. If it is, link it. If it's
1923 * not, use the provided stream out information to determine the
1924 * offset, since it was already linked for us. */
1926 unsigned gen_offsets
[32];
1927 enum mali_format gen_formats
[32];
1928 memset(gen_offsets
, 0, sizeof(gen_offsets
));
1929 memset(gen_formats
, 0, sizeof(gen_formats
));
1931 unsigned gen_stride
= 0;
1932 assert(vs
->varying_count
< ARRAY_SIZE(gen_offsets
));
1933 assert(fs
->varying_count
< ARRAY_SIZE(gen_offsets
));
1935 unsigned streamout_offsets
[32];
1937 for (unsigned i
= 0; i
< ctx
->streamout
.num_targets
; ++i
) {
1938 streamout_offsets
[i
] = panfrost_streamout_offset(
1940 ctx
->streamout
.offsets
[i
],
1941 ctx
->streamout
.targets
[i
]);
1944 struct mali_attr_meta
*ovs
= (struct mali_attr_meta
*)trans
.cpu
;
1945 struct mali_attr_meta
*ofs
= ovs
+ vs
->varying_count
;
1947 for (unsigned i
= 0; i
< vs
->varying_count
; i
++) {
1948 ovs
[i
] = panfrost_emit_varying(vs
, fs
, vs
, present
,
1949 ctx
->streamout
.num_targets
, streamout_offsets
,
1951 gen_offsets
, gen_formats
, &gen_stride
, i
, true, false);
1954 for (unsigned i
= 0; i
< fs
->varying_count
; i
++) {
1955 ofs
[i
] = panfrost_emit_varying(fs
, vs
, vs
, present
,
1956 ctx
->streamout
.num_targets
, streamout_offsets
,
1958 gen_offsets
, gen_formats
, &gen_stride
, i
, false, true);
1961 unsigned xfb_base
= pan_xfb_base(present
);
1962 struct panfrost_transfer T
= panfrost_pool_alloc(&batch
->pool
,
1963 sizeof(union mali_attr
) * (xfb_base
+ ctx
->streamout
.num_targets
));
1964 union mali_attr
*varyings
= (union mali_attr
*) T
.cpu
;
1966 /* Emit the stream out buffers */
1968 unsigned out_count
= u_stream_outputs_for_vertices(ctx
->active_prim
,
1971 for (unsigned i
= 0; i
< ctx
->streamout
.num_targets
; ++i
) {
1972 panfrost_emit_streamout(batch
, &varyings
[xfb_base
+ i
],
1974 ctx
->streamout
.offsets
[i
],
1976 ctx
->streamout
.targets
[i
]);
1979 panfrost_emit_varyings(batch
,
1980 &varyings
[pan_varying_index(present
, PAN_VARY_GENERAL
)],
1981 gen_stride
, vertex_count
);
1983 /* fp32 vec4 gl_Position */
1984 tiler_postfix
->position_varying
= panfrost_emit_varyings(batch
,
1985 &varyings
[pan_varying_index(present
, PAN_VARY_POSITION
)],
1986 sizeof(float) * 4, vertex_count
);
1988 if (present
& (1 << PAN_VARY_PSIZ
)) {
1989 primitive_size
->pointer
= panfrost_emit_varyings(batch
,
1990 &varyings
[pan_varying_index(present
, PAN_VARY_PSIZ
)],
1994 pan_emit_special_input(varyings
, present
, PAN_VARY_PNTCOORD
, MALI_VARYING_POINT_COORD
);
1995 pan_emit_special_input(varyings
, present
, PAN_VARY_FACE
, MALI_VARYING_FRONT_FACING
);
1996 pan_emit_special_input(varyings
, present
, PAN_VARY_FRAGCOORD
, MALI_VARYING_FRAG_COORD
);
1998 vertex_postfix
->varyings
= T
.gpu
;
1999 tiler_postfix
->varyings
= T
.gpu
;
2001 vertex_postfix
->varying_meta
= trans
.gpu
;
2002 tiler_postfix
->varying_meta
= trans
.gpu
+ vs_size
;
2006 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch
*batch
,
2007 struct mali_vertex_tiler_prefix
*vertex_prefix
,
2008 struct mali_vertex_tiler_postfix
*vertex_postfix
,
2009 struct mali_vertex_tiler_prefix
*tiler_prefix
,
2010 struct mali_vertex_tiler_postfix
*tiler_postfix
,
2011 union midgard_primitive_size
*primitive_size
)
2013 struct panfrost_context
*ctx
= batch
->ctx
;
2014 struct panfrost_device
*device
= pan_device(ctx
->base
.screen
);
2015 bool wallpapering
= ctx
->wallpaper_batch
&& batch
->scoreboard
.tiler_dep
;
2016 struct bifrost_payload_vertex bifrost_vertex
= {0,};
2017 struct bifrost_payload_tiler bifrost_tiler
= {0,};
2018 struct midgard_payload_vertex_tiler midgard_vertex
= {0,};
2019 struct midgard_payload_vertex_tiler midgard_tiler
= {0,};
2021 size_t vp_size
, tp_size
;
2023 if (device
->quirks
& IS_BIFROST
) {
2024 bifrost_vertex
.prefix
= *vertex_prefix
;
2025 bifrost_vertex
.postfix
= *vertex_postfix
;
2026 vp
= &bifrost_vertex
;
2027 vp_size
= sizeof(bifrost_vertex
);
2029 bifrost_tiler
.prefix
= *tiler_prefix
;
2030 bifrost_tiler
.tiler
.primitive_size
= *primitive_size
;
2031 bifrost_tiler
.tiler
.tiler_meta
= panfrost_batch_get_tiler_meta(batch
, ~0);
2032 bifrost_tiler
.postfix
= *tiler_postfix
;
2033 tp
= &bifrost_tiler
;
2034 tp_size
= sizeof(bifrost_tiler
);
2036 midgard_vertex
.prefix
= *vertex_prefix
;
2037 midgard_vertex
.postfix
= *vertex_postfix
;
2038 vp
= &midgard_vertex
;
2039 vp_size
= sizeof(midgard_vertex
);
2041 midgard_tiler
.prefix
= *tiler_prefix
;
2042 midgard_tiler
.postfix
= *tiler_postfix
;
2043 midgard_tiler
.primitive_size
= *primitive_size
;
2044 tp
= &midgard_tiler
;
2045 tp_size
= sizeof(midgard_tiler
);
2049 /* Inject in reverse order, with "predicted" job indices.
2050 * THIS IS A HACK XXX */
2051 panfrost_new_job(&batch
->pool
, &batch
->scoreboard
, MALI_JOB_TYPE_TILER
, false,
2052 batch
->scoreboard
.job_index
+ 2, tp
, tp_size
, true);
2053 panfrost_new_job(&batch
->pool
, &batch
->scoreboard
, MALI_JOB_TYPE_VERTEX
, false, 0,
2058 /* If rasterizer discard is enable, only submit the vertex */
2060 bool rasterizer_discard
= ctx
->rasterizer
&&
2061 ctx
->rasterizer
->base
.rasterizer_discard
;
2063 unsigned vertex
= panfrost_new_job(&batch
->pool
, &batch
->scoreboard
, MALI_JOB_TYPE_VERTEX
, false, 0,
2064 vp
, vp_size
, false);
2066 if (rasterizer_discard
)
2069 panfrost_new_job(&batch
->pool
, &batch
->scoreboard
, MALI_JOB_TYPE_TILER
, false, vertex
, tp
, tp_size
,
2073 /* TODO: stop hardcoding this */
2075 panfrost_emit_sample_locations(struct panfrost_batch
*batch
)
2077 uint16_t locations
[] = {
2128 return panfrost_pool_upload(&batch
->pool
, locations
, 96 * sizeof(uint16_t));