2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
29 #include "panfrost-quirks.h"
31 #include "pan_allocate.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
37 /* TODO: Bifrost requires just a mali_shared_memory, without the rest of the
41 panfrost_vt_attach_framebuffer(struct panfrost_context
*ctx
,
42 struct midgard_payload_vertex_tiler
*vt
)
44 struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
45 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
47 /* If we haven't, reserve space for the framebuffer */
49 if (!batch
->framebuffer
.gpu
) {
50 unsigned size
= (dev
->quirks
& MIDGARD_SFBD
) ?
51 sizeof(struct mali_single_framebuffer
) :
52 sizeof(struct mali_framebuffer
);
54 batch
->framebuffer
= panfrost_allocate_transient(batch
, size
);
57 if (!(dev
->quirks
& MIDGARD_SFBD
))
58 batch
->framebuffer
.gpu
|= MALI_MFBD
;
61 vt
->postfix
.shared_memory
= batch
->framebuffer
.gpu
;
65 panfrost_vt_update_rasterizer(struct panfrost_context
*ctx
,
66 struct midgard_payload_vertex_tiler
*tp
)
68 struct panfrost_rasterizer
*rasterizer
= ctx
->rasterizer
;
70 tp
->gl_enables
|= 0x7;
71 SET_BIT(tp
->gl_enables
, MALI_FRONT_CCW_TOP
,
72 rasterizer
&& rasterizer
->base
.front_ccw
);
73 SET_BIT(tp
->gl_enables
, MALI_CULL_FACE_FRONT
,
74 rasterizer
&& (rasterizer
->base
.cull_face
& PIPE_FACE_FRONT
));
75 SET_BIT(tp
->gl_enables
, MALI_CULL_FACE_BACK
,
76 rasterizer
&& (rasterizer
->base
.cull_face
& PIPE_FACE_BACK
));
77 SET_BIT(tp
->prefix
.unknown_draw
, MALI_DRAW_FLATSHADE_FIRST
,
78 rasterizer
&& rasterizer
->base
.flatshade_first
);
80 if (!panfrost_writes_point_size(ctx
)) {
81 bool points
= tp
->prefix
.draw_mode
== MALI_POINTS
;
86 rasterizer
->base
.point_size
:
87 rasterizer
->base
.line_width
;
89 tp
->primitive_size
.constant
= val
;
94 panfrost_vt_update_occlusion_query(struct panfrost_context
*ctx
,
95 struct midgard_payload_vertex_tiler
*tp
)
97 SET_BIT(tp
->gl_enables
, MALI_OCCLUSION_QUERY
, ctx
->occlusion_query
);
98 if (ctx
->occlusion_query
)
99 tp
->postfix
.occlusion_counter
= ctx
->occlusion_query
->bo
->gpu
;
101 tp
->postfix
.occlusion_counter
= 0;
105 panfrost_vt_init(struct panfrost_context
*ctx
,
106 enum pipe_shader_type stage
,
107 struct midgard_payload_vertex_tiler
*vtp
)
109 if (!ctx
->shader
[stage
])
112 memset(vtp
, 0, sizeof(*vtp
));
113 vtp
->gl_enables
= 0x6;
114 panfrost_vt_attach_framebuffer(ctx
, vtp
);
116 if (stage
== PIPE_SHADER_FRAGMENT
) {
117 panfrost_vt_update_occlusion_query(ctx
, vtp
);
118 panfrost_vt_update_rasterizer(ctx
, vtp
);
124 panfrost_translate_index_size(unsigned size
)
128 return MALI_DRAW_INDEXED_UINT8
;
131 return MALI_DRAW_INDEXED_UINT16
;
134 return MALI_DRAW_INDEXED_UINT32
;
137 unreachable("Invalid index size");
141 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
142 * good for the duration of the draw (transient), could last longer. Also get
143 * the bounds on the index buffer for the range accessed by the draw. We do
144 * these operations together because there are natural optimizations which
145 * require them to be together. */
148 panfrost_get_index_buffer_bounded(struct panfrost_context
*ctx
,
149 const struct pipe_draw_info
*info
,
150 unsigned *min_index
, unsigned *max_index
)
152 struct panfrost_resource
*rsrc
= pan_resource(info
->index
.resource
);
153 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
154 off_t offset
= info
->start
* info
->index_size
;
155 bool needs_indices
= true;
158 if (info
->max_index
!= ~0u) {
159 *min_index
= info
->min_index
;
160 *max_index
= info
->max_index
;
161 needs_indices
= false;
164 if (!info
->has_user_indices
) {
165 /* Only resources can be directly mapped */
166 panfrost_batch_add_bo(batch
, rsrc
->bo
,
167 PAN_BO_ACCESS_SHARED
|
169 PAN_BO_ACCESS_VERTEX_TILER
);
170 out
= rsrc
->bo
->gpu
+ offset
;
172 /* Check the cache */
173 needs_indices
= !panfrost_minmax_cache_get(rsrc
->index_cache
,
179 /* Otherwise, we need to upload to transient memory */
180 const uint8_t *ibuf8
= (const uint8_t *) info
->index
.user
;
181 out
= panfrost_upload_transient(batch
, ibuf8
+ offset
,
188 u_vbuf_get_minmax_index(&ctx
->base
, info
, min_index
, max_index
);
190 if (!info
->has_user_indices
)
191 panfrost_minmax_cache_add(rsrc
->index_cache
,
192 info
->start
, info
->count
,
193 *min_index
, *max_index
);
200 panfrost_vt_set_draw_info(struct panfrost_context
*ctx
,
201 const struct pipe_draw_info
*info
,
202 enum mali_draw_mode draw_mode
,
203 struct midgard_payload_vertex_tiler
*vp
,
204 struct midgard_payload_vertex_tiler
*tp
,
205 unsigned *vertex_count
,
206 unsigned *padded_count
)
208 tp
->prefix
.draw_mode
= draw_mode
;
210 unsigned draw_flags
= 0;
212 if (panfrost_writes_point_size(ctx
))
213 draw_flags
|= MALI_DRAW_VARYING_SIZE
;
215 if (info
->primitive_restart
)
216 draw_flags
|= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX
;
218 /* These doesn't make much sense */
220 draw_flags
|= 0x3000;
222 if (info
->index_size
) {
223 unsigned min_index
= 0, max_index
= 0;
225 tp
->prefix
.indices
= panfrost_get_index_buffer_bounded(ctx
,
230 /* Use the corresponding values */
231 *vertex_count
= max_index
- min_index
+ 1;
232 tp
->offset_start
= vp
->offset_start
= min_index
+ info
->index_bias
;
233 tp
->prefix
.offset_bias_correction
= -min_index
;
234 tp
->prefix
.index_count
= MALI_POSITIVE(info
->count
);
235 draw_flags
|= panfrost_translate_index_size(info
->index_size
);
237 tp
->prefix
.indices
= 0;
238 *vertex_count
= ctx
->vertex_count
;
239 tp
->offset_start
= vp
->offset_start
= info
->start
;
240 tp
->prefix
.offset_bias_correction
= 0;
241 tp
->prefix
.index_count
= MALI_POSITIVE(ctx
->vertex_count
);
244 tp
->prefix
.unknown_draw
= draw_flags
;
246 /* Encode the padded vertex count */
248 if (info
->instance_count
> 1) {
249 *padded_count
= panfrost_padded_vertex_count(*vertex_count
);
251 unsigned shift
= __builtin_ctz(ctx
->padded_count
);
252 unsigned k
= ctx
->padded_count
>> (shift
+ 1);
254 tp
->instance_shift
= vp
->instance_shift
= shift
;
255 tp
->instance_odd
= vp
->instance_odd
= k
;
257 *padded_count
= *vertex_count
;
259 /* Reset instancing state */
260 tp
->instance_shift
= vp
->instance_shift
= 0;
261 tp
->instance_odd
= vp
->instance_odd
= 0;
266 panfrost_shader_meta_init(struct panfrost_context
*ctx
,
267 enum pipe_shader_type st
,
268 struct mali_shader_meta
*meta
)
270 struct panfrost_shader_state
*ss
= panfrost_get_shader_state(ctx
, st
);
272 memset(meta
, 0, sizeof(*meta
));
273 meta
->shader
= (ss
->bo
? ss
->bo
->gpu
: 0) | ss
->first_tag
;
274 meta
->midgard1
.uniform_count
= MIN2(ss
->uniform_count
,
276 meta
->midgard1
.work_count
= ss
->work_reg_count
;
277 meta
->attribute_count
= ss
->attribute_count
;
278 meta
->varying_count
= ss
->varying_count
;
279 meta
->midgard1
.flags_hi
= 0x8; /* XXX */
280 meta
->midgard1
.flags_lo
= 0x220;
281 meta
->texture_count
= ctx
->sampler_view_count
[st
];
282 meta
->sampler_count
= ctx
->sampler_count
[st
];
283 meta
->midgard1
.uniform_buffer_count
= panfrost_ubo_count(ctx
, st
);
287 panfrost_translate_compare_func(enum pipe_compare_func in
)
290 case PIPE_FUNC_NEVER
:
291 return MALI_FUNC_NEVER
;
294 return MALI_FUNC_LESS
;
296 case PIPE_FUNC_EQUAL
:
297 return MALI_FUNC_EQUAL
;
299 case PIPE_FUNC_LEQUAL
:
300 return MALI_FUNC_LEQUAL
;
302 case PIPE_FUNC_GREATER
:
303 return MALI_FUNC_GREATER
;
305 case PIPE_FUNC_NOTEQUAL
:
306 return MALI_FUNC_NOTEQUAL
;
308 case PIPE_FUNC_GEQUAL
:
309 return MALI_FUNC_GEQUAL
;
311 case PIPE_FUNC_ALWAYS
:
312 return MALI_FUNC_ALWAYS
;
315 unreachable("Invalid func");
320 panfrost_translate_stencil_op(enum pipe_stencil_op in
)
323 case PIPE_STENCIL_OP_KEEP
:
324 return MALI_STENCIL_KEEP
;
326 case PIPE_STENCIL_OP_ZERO
:
327 return MALI_STENCIL_ZERO
;
329 case PIPE_STENCIL_OP_REPLACE
:
330 return MALI_STENCIL_REPLACE
;
332 case PIPE_STENCIL_OP_INCR
:
333 return MALI_STENCIL_INCR
;
335 case PIPE_STENCIL_OP_DECR
:
336 return MALI_STENCIL_DECR
;
338 case PIPE_STENCIL_OP_INCR_WRAP
:
339 return MALI_STENCIL_INCR_WRAP
;
341 case PIPE_STENCIL_OP_DECR_WRAP
:
342 return MALI_STENCIL_DECR_WRAP
;
344 case PIPE_STENCIL_OP_INVERT
:
345 return MALI_STENCIL_INVERT
;
348 unreachable("Invalid stencil op");
353 translate_tex_wrap(enum pipe_tex_wrap w
)
356 case PIPE_TEX_WRAP_REPEAT
:
357 return MALI_WRAP_REPEAT
;
359 case PIPE_TEX_WRAP_CLAMP
:
360 return MALI_WRAP_CLAMP
;
362 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
363 return MALI_WRAP_CLAMP_TO_EDGE
;
365 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
366 return MALI_WRAP_CLAMP_TO_BORDER
;
368 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
369 return MALI_WRAP_MIRRORED_REPEAT
;
371 case PIPE_TEX_WRAP_MIRROR_CLAMP
:
372 return MALI_WRAP_MIRRORED_CLAMP
;
374 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE
:
375 return MALI_WRAP_MIRRORED_CLAMP_TO_EDGE
;
377 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
:
378 return MALI_WRAP_MIRRORED_CLAMP_TO_BORDER
;
381 unreachable("Invalid wrap");
385 void panfrost_sampler_desc_init(const struct pipe_sampler_state
*cso
,
386 struct mali_sampler_descriptor
*hw
)
388 unsigned func
= panfrost_translate_compare_func(cso
->compare_func
);
389 bool min_nearest
= cso
->min_img_filter
== PIPE_TEX_FILTER_NEAREST
;
390 bool mag_nearest
= cso
->mag_img_filter
== PIPE_TEX_FILTER_NEAREST
;
391 bool mip_linear
= cso
->min_mip_filter
== PIPE_TEX_MIPFILTER_LINEAR
;
392 unsigned min_filter
= min_nearest
? MALI_SAMP_MIN_NEAREST
: 0;
393 unsigned mag_filter
= mag_nearest
? MALI_SAMP_MAG_NEAREST
: 0;
394 unsigned mip_filter
= mip_linear
?
395 (MALI_SAMP_MIP_LINEAR_1
| MALI_SAMP_MIP_LINEAR_2
) : 0;
396 unsigned normalized
= cso
->normalized_coords
? MALI_SAMP_NORM_COORDS
: 0;
398 *hw
= (struct mali_sampler_descriptor
) {
399 .filter_mode
= min_filter
| mag_filter
| mip_filter
|
401 .wrap_s
= translate_tex_wrap(cso
->wrap_s
),
402 .wrap_t
= translate_tex_wrap(cso
->wrap_t
),
403 .wrap_r
= translate_tex_wrap(cso
->wrap_r
),
404 .compare_func
= panfrost_flip_compare_func(func
),
406 cso
->border_color
.f
[0],
407 cso
->border_color
.f
[1],
408 cso
->border_color
.f
[2],
409 cso
->border_color
.f
[3]
411 .min_lod
= FIXED_16(cso
->min_lod
, false), /* clamp at 0 */
412 .max_lod
= FIXED_16(cso
->max_lod
, false),
413 .lod_bias
= FIXED_16(cso
->lod_bias
, true), /* can be negative */
414 .seamless_cube_map
= cso
->seamless_cube_map
,
417 /* If necessary, we disable mipmapping in the sampler descriptor by
418 * clamping the LOD as tight as possible (from 0 to epsilon,
419 * essentially -- remember these are fixed point numbers, so
422 if (cso
->min_mip_filter
== PIPE_TEX_MIPFILTER_NONE
)
423 hw
->max_lod
= hw
->min_lod
+ 1;
427 panfrost_make_stencil_state(const struct pipe_stencil_state
*in
,
428 struct mali_stencil_test
*out
)
430 out
->ref
= 0; /* Gallium gets it from elsewhere */
432 out
->mask
= in
->valuemask
;
433 out
->func
= panfrost_translate_compare_func(in
->func
);
434 out
->sfail
= panfrost_translate_stencil_op(in
->fail_op
);
435 out
->dpfail
= panfrost_translate_stencil_op(in
->zfail_op
);
436 out
->dppass
= panfrost_translate_stencil_op(in
->zpass_op
);
440 panfrost_frag_meta_rasterizer_update(struct panfrost_context
*ctx
,
441 struct mali_shader_meta
*fragmeta
)
443 if (!ctx
->rasterizer
) {
444 SET_BIT(fragmeta
->unknown2_4
, MALI_NO_MSAA
, true);
445 SET_BIT(fragmeta
->unknown2_3
, MALI_HAS_MSAA
, false);
446 fragmeta
->depth_units
= 0.0f
;
447 fragmeta
->depth_factor
= 0.0f
;
448 SET_BIT(fragmeta
->unknown2_4
, MALI_DEPTH_RANGE_A
, false);
449 SET_BIT(fragmeta
->unknown2_4
, MALI_DEPTH_RANGE_B
, false);
453 bool msaa
= ctx
->rasterizer
->base
.multisample
;
455 /* TODO: Sample size */
456 SET_BIT(fragmeta
->unknown2_3
, MALI_HAS_MSAA
, msaa
);
457 SET_BIT(fragmeta
->unknown2_4
, MALI_NO_MSAA
, !msaa
);
458 fragmeta
->depth_units
= ctx
->rasterizer
->base
.offset_units
* 2.0f
;
459 fragmeta
->depth_factor
= ctx
->rasterizer
->base
.offset_scale
;
461 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
463 SET_BIT(fragmeta
->unknown2_4
, MALI_DEPTH_RANGE_A
,
464 ctx
->rasterizer
->base
.offset_tri
);
465 SET_BIT(fragmeta
->unknown2_4
, MALI_DEPTH_RANGE_B
,
466 ctx
->rasterizer
->base
.offset_tri
);
470 panfrost_frag_meta_zsa_update(struct panfrost_context
*ctx
,
471 struct mali_shader_meta
*fragmeta
)
473 const struct pipe_depth_stencil_alpha_state
*zsa
= ctx
->depth_stencil
;
474 int zfunc
= PIPE_FUNC_ALWAYS
;
477 struct pipe_stencil_state default_stencil
= {
479 .func
= PIPE_FUNC_ALWAYS
,
480 .fail_op
= MALI_STENCIL_KEEP
,
481 .zfail_op
= MALI_STENCIL_KEEP
,
482 .zpass_op
= MALI_STENCIL_KEEP
,
487 panfrost_make_stencil_state(&default_stencil
,
488 &fragmeta
->stencil_front
);
489 fragmeta
->stencil_mask_front
= default_stencil
.writemask
;
490 fragmeta
->stencil_back
= fragmeta
->stencil_front
;
491 fragmeta
->stencil_mask_back
= default_stencil
.writemask
;
492 SET_BIT(fragmeta
->unknown2_4
, MALI_STENCIL_TEST
, false);
493 SET_BIT(fragmeta
->unknown2_3
, MALI_DEPTH_WRITEMASK
, false);
495 SET_BIT(fragmeta
->unknown2_4
, MALI_STENCIL_TEST
,
496 zsa
->stencil
[0].enabled
);
497 panfrost_make_stencil_state(&zsa
->stencil
[0],
498 &fragmeta
->stencil_front
);
499 fragmeta
->stencil_mask_front
= zsa
->stencil
[0].writemask
;
500 fragmeta
->stencil_front
.ref
= ctx
->stencil_ref
.ref_value
[0];
502 /* If back-stencil is not enabled, use the front values */
504 if (zsa
->stencil
[1].enabled
) {
505 panfrost_make_stencil_state(&zsa
->stencil
[1],
506 &fragmeta
->stencil_back
);
507 fragmeta
->stencil_mask_back
= zsa
->stencil
[1].writemask
;
508 fragmeta
->stencil_back
.ref
= ctx
->stencil_ref
.ref_value
[1];
510 fragmeta
->stencil_back
= fragmeta
->stencil_front
;
511 fragmeta
->stencil_mask_back
= fragmeta
->stencil_mask_front
;
512 fragmeta
->stencil_back
.ref
= fragmeta
->stencil_front
.ref
;
515 if (zsa
->depth
.enabled
)
516 zfunc
= zsa
->depth
.func
;
518 /* Depth state (TODO: Refactor) */
520 SET_BIT(fragmeta
->unknown2_3
, MALI_DEPTH_WRITEMASK
,
521 zsa
->depth
.writemask
);
524 fragmeta
->unknown2_3
&= ~MALI_DEPTH_FUNC_MASK
;
525 fragmeta
->unknown2_3
|= MALI_DEPTH_FUNC(panfrost_translate_compare_func(zfunc
));
529 panfrost_frag_meta_blend_update(struct panfrost_context
*ctx
,
530 struct mali_shader_meta
*fragmeta
,
531 struct midgard_blend_rt
*rts
)
533 const struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
535 SET_BIT(fragmeta
->unknown2_4
, MALI_NO_DITHER
,
536 (dev
->quirks
& MIDGARD_SFBD
) && ctx
->blend
&&
537 !ctx
->blend
->base
.dither
);
539 /* Get blending setup */
540 unsigned rt_count
= MAX2(ctx
->pipe_framebuffer
.nr_cbufs
, 1);
542 struct panfrost_blend_final blend
[PIPE_MAX_COLOR_BUFS
];
543 unsigned shader_offset
= 0;
544 struct panfrost_bo
*shader_bo
= NULL
;
546 for (unsigned c
= 0; c
< rt_count
; ++c
)
547 blend
[c
] = panfrost_get_blend_for_context(ctx
, c
, &shader_bo
,
550 /* If there is a blend shader, work registers are shared. XXX: opt */
552 for (unsigned c
= 0; c
< rt_count
; ++c
) {
553 if (blend
[c
].is_shader
)
554 fragmeta
->midgard1
.work_count
= 16;
557 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
558 * copied to the blend_meta appended (by convention), but this is the
559 * field actually read by the hardware. (Or maybe both are read...?).
560 * Specify the last RTi with a blend shader. */
562 fragmeta
->blend
.shader
= 0;
564 for (signed rt
= (rt_count
- 1); rt
>= 0; --rt
) {
565 if (!blend
[rt
].is_shader
)
568 fragmeta
->blend
.shader
= blend
[rt
].shader
.gpu
|
569 blend
[rt
].shader
.first_tag
;
573 if (dev
->quirks
& MIDGARD_SFBD
) {
574 /* When only a single render target platform is used, the blend
575 * information is inside the shader meta itself. We additionally
576 * need to signal CAN_DISCARD for nontrivial blend modes (so
577 * we're able to read back the destination buffer) */
579 SET_BIT(fragmeta
->unknown2_3
, MALI_HAS_BLEND_SHADER
,
582 if (!blend
[0].is_shader
) {
583 fragmeta
->blend
.equation
= *blend
[0].equation
.equation
;
584 fragmeta
->blend
.constant
= blend
[0].equation
.constant
;
587 SET_BIT(fragmeta
->unknown2_3
, MALI_CAN_DISCARD
,
588 !blend
[0].no_blending
);
592 /* Additional blend descriptor tacked on for jobs using MFBD */
594 for (unsigned i
= 0; i
< rt_count
; ++i
) {
595 rts
[i
].flags
= 0x200;
597 bool is_srgb
= (ctx
->pipe_framebuffer
.nr_cbufs
> i
) &&
598 (ctx
->pipe_framebuffer
.cbufs
[i
]) &&
599 util_format_is_srgb(ctx
->pipe_framebuffer
.cbufs
[i
]->format
);
601 SET_BIT(rts
[i
].flags
, MALI_BLEND_MRT_SHADER
, blend
[i
].is_shader
);
602 SET_BIT(rts
[i
].flags
, MALI_BLEND_LOAD_TIB
, !blend
[i
].no_blending
);
603 SET_BIT(rts
[i
].flags
, MALI_BLEND_SRGB
, is_srgb
);
604 SET_BIT(rts
[i
].flags
, MALI_BLEND_NO_DITHER
, !ctx
->blend
->base
.dither
);
606 if (blend
[i
].is_shader
) {
607 rts
[i
].blend
.shader
= blend
[i
].shader
.gpu
| blend
[i
].shader
.first_tag
;
609 rts
[i
].blend
.equation
= *blend
[i
].equation
.equation
;
610 rts
[i
].blend
.constant
= blend
[i
].equation
.constant
;
616 panfrost_frag_shader_meta_init(struct panfrost_context
*ctx
,
617 struct mali_shader_meta
*fragmeta
,
618 struct midgard_blend_rt
*rts
)
620 const struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
621 struct panfrost_shader_state
*fs
;
623 fs
= panfrost_get_shader_state(ctx
, PIPE_SHADER_FRAGMENT
);
625 fragmeta
->alpha_coverage
= ~MALI_ALPHA_COVERAGE(0.000000);
626 fragmeta
->unknown2_3
= MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS
) | 0x3010;
627 fragmeta
->unknown2_4
= 0x4e0;
629 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
630 * is required (independent of 32-bit/64-bit descriptors), or why it's
631 * not used on later GPU revisions. Otherwise, all shader jobs fault on
632 * these earlier chips (perhaps this is a chicken bit of some kind).
633 * More investigation is needed. */
635 SET_BIT(fragmeta
->unknown2_4
, 0x10, dev
->quirks
& MIDGARD_SFBD
);
637 /* Depending on whether it's legal to in the given shader, we try to
638 * enable early-z testing (or forward-pixel kill?) */
640 SET_BIT(fragmeta
->midgard1
.flags_lo
, MALI_EARLY_Z
,
641 !fs
->can_discard
&& !fs
->writes_depth
);
643 /* Add the writes Z/S flags if needed. */
644 SET_BIT(fragmeta
->midgard1
.flags_lo
, MALI_WRITES_Z
, fs
->writes_depth
);
645 SET_BIT(fragmeta
->midgard1
.flags_hi
, MALI_WRITES_S
, fs
->writes_stencil
);
647 /* Any time texturing is used, derivatives are implicitly calculated,
648 * so we need to enable helper invocations */
650 SET_BIT(fragmeta
->midgard1
.flags_lo
, MALI_HELPER_INVOCATIONS
,
651 fs
->helper_invocations
);
653 /* CAN_DISCARD should be set if the fragment shader possibly contains a
654 * 'discard' instruction. It is likely this is related to optimizations
655 * related to forward-pixel kill, as per "Mali Performance 3: Is
656 * EGL_BUFFER_PRESERVED a good thing?" by Peter Harris */
658 SET_BIT(fragmeta
->unknown2_3
, MALI_CAN_DISCARD
, fs
->can_discard
);
659 SET_BIT(fragmeta
->midgard1
.flags_lo
, 0x400, fs
->can_discard
);
661 panfrost_frag_meta_rasterizer_update(ctx
, fragmeta
);
662 panfrost_frag_meta_zsa_update(ctx
, fragmeta
);
663 panfrost_frag_meta_blend_update(ctx
, fragmeta
, rts
);
667 panfrost_emit_shader_meta(struct panfrost_batch
*batch
,
668 enum pipe_shader_type st
,
669 struct midgard_payload_vertex_tiler
*vtp
)
671 struct panfrost_context
*ctx
= batch
->ctx
;
672 struct panfrost_shader_state
*ss
= panfrost_get_shader_state(ctx
, st
);
675 vtp
->postfix
.shader
= 0;
679 struct mali_shader_meta meta
;
681 panfrost_shader_meta_init(ctx
, st
, &meta
);
683 /* Add the shader BO to the batch. */
684 panfrost_batch_add_bo(batch
, ss
->bo
,
685 PAN_BO_ACCESS_PRIVATE
|
687 panfrost_bo_access_for_stage(st
));
691 if (st
== PIPE_SHADER_FRAGMENT
) {
692 struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
693 unsigned rt_count
= MAX2(ctx
->pipe_framebuffer
.nr_cbufs
, 1);
694 size_t desc_size
= sizeof(meta
);
695 struct midgard_blend_rt rts
[4];
696 struct panfrost_transfer xfer
;
698 assert(rt_count
<= ARRAY_SIZE(rts
));
700 panfrost_frag_shader_meta_init(ctx
, &meta
, rts
);
702 if (!(dev
->quirks
& MIDGARD_SFBD
))
703 desc_size
+= sizeof(*rts
) * rt_count
;
705 xfer
= panfrost_allocate_transient(batch
, desc_size
);
707 memcpy(xfer
.cpu
, &meta
, sizeof(meta
));
708 memcpy(xfer
.cpu
+ sizeof(meta
), rts
, sizeof(*rts
) * rt_count
);
710 shader_ptr
= xfer
.gpu
;
712 shader_ptr
= panfrost_upload_transient(batch
, &meta
,
716 vtp
->postfix
.shader
= shader_ptr
;
720 panfrost_mali_viewport_init(struct panfrost_context
*ctx
,
721 struct mali_viewport
*mvp
)
723 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
725 /* Clip bounds are encoded as floats. The viewport itself is encoded as
726 * (somewhat) asymmetric ints. */
728 const struct pipe_scissor_state
*ss
= &ctx
->scissor
;
730 memset(mvp
, 0, sizeof(*mvp
));
732 /* By default, do no viewport clipping, i.e. clip to (-inf, inf) in
733 * each direction. Clipping to the viewport in theory should work, but
734 * in practice causes issues when we're not explicitly trying to
737 *mvp
= (struct mali_viewport
) {
738 .clip_minx
= -INFINITY
,
739 .clip_miny
= -INFINITY
,
740 .clip_maxx
= INFINITY
,
741 .clip_maxy
= INFINITY
,
744 /* Always scissor to the viewport by default. */
745 float vp_minx
= (int) (vp
->translate
[0] - fabsf(vp
->scale
[0]));
746 float vp_maxx
= (int) (vp
->translate
[0] + fabsf(vp
->scale
[0]));
748 float vp_miny
= (int) (vp
->translate
[1] - fabsf(vp
->scale
[1]));
749 float vp_maxy
= (int) (vp
->translate
[1] + fabsf(vp
->scale
[1]));
751 float minz
= (vp
->translate
[2] - fabsf(vp
->scale
[2]));
752 float maxz
= (vp
->translate
[2] + fabsf(vp
->scale
[2]));
754 /* Apply the scissor test */
756 unsigned minx
, miny
, maxx
, maxy
;
758 if (ss
&& ctx
->rasterizer
&& ctx
->rasterizer
->base
.scissor
) {
759 minx
= MAX2(ss
->minx
, vp_minx
);
760 miny
= MAX2(ss
->miny
, vp_miny
);
761 maxx
= MIN2(ss
->maxx
, vp_maxx
);
762 maxy
= MIN2(ss
->maxy
, vp_maxy
);
770 /* Hardware needs the min/max to be strictly ordered, so flip if we
771 * need to. The viewport transformation in the vertex shader will
772 * handle the negatives if we don't */
775 unsigned temp
= miny
;
781 unsigned temp
= minx
;
792 /* Clamp to the framebuffer size as a last check */
794 minx
= MIN2(ctx
->pipe_framebuffer
.width
, minx
);
795 maxx
= MIN2(ctx
->pipe_framebuffer
.width
, maxx
);
797 miny
= MIN2(ctx
->pipe_framebuffer
.height
, miny
);
798 maxy
= MIN2(ctx
->pipe_framebuffer
.height
, maxy
);
802 mvp
->viewport0
[0] = minx
;
803 mvp
->viewport1
[0] = MALI_POSITIVE(maxx
);
805 mvp
->viewport0
[1] = miny
;
806 mvp
->viewport1
[1] = MALI_POSITIVE(maxy
);
808 mvp
->clip_minz
= minz
;
809 mvp
->clip_maxz
= maxz
;
813 panfrost_emit_viewport(struct panfrost_batch
*batch
,
814 struct midgard_payload_vertex_tiler
*tp
)
816 struct panfrost_context
*ctx
= batch
->ctx
;
817 struct mali_viewport mvp
;
819 panfrost_mali_viewport_init(batch
->ctx
, &mvp
);
821 /* Update the job, unless we're doing wallpapering (whose lack of
822 * scissor we can ignore, since if we "miss" a tile of wallpaper, it'll
823 * just... be faster :) */
825 if (!ctx
->wallpaper_batch
)
826 panfrost_batch_union_scissor(batch
, mvp
.viewport0
[0],
828 mvp
.viewport1
[0] + 1,
829 mvp
.viewport1
[1] + 1);
831 tp
->postfix
.viewport
= panfrost_upload_transient(batch
, &mvp
,
836 panfrost_map_constant_buffer_gpu(struct panfrost_batch
*batch
,
837 enum pipe_shader_type st
,
838 struct panfrost_constant_buffer
*buf
,
841 struct pipe_constant_buffer
*cb
= &buf
->cb
[index
];
842 struct panfrost_resource
*rsrc
= pan_resource(cb
->buffer
);
845 panfrost_batch_add_bo(batch
, rsrc
->bo
,
846 PAN_BO_ACCESS_SHARED
|
848 panfrost_bo_access_for_stage(st
));
850 /* Alignment gauranteed by
851 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
852 return rsrc
->bo
->gpu
+ cb
->buffer_offset
;
853 } else if (cb
->user_buffer
) {
854 return panfrost_upload_transient(batch
,
859 unreachable("No constant buffer");
863 struct sysval_uniform
{
873 panfrost_upload_viewport_scale_sysval(struct panfrost_batch
*batch
,
874 struct sysval_uniform
*uniform
)
876 struct panfrost_context
*ctx
= batch
->ctx
;
877 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
879 uniform
->f
[0] = vp
->scale
[0];
880 uniform
->f
[1] = vp
->scale
[1];
881 uniform
->f
[2] = vp
->scale
[2];
885 panfrost_upload_viewport_offset_sysval(struct panfrost_batch
*batch
,
886 struct sysval_uniform
*uniform
)
888 struct panfrost_context
*ctx
= batch
->ctx
;
889 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
891 uniform
->f
[0] = vp
->translate
[0];
892 uniform
->f
[1] = vp
->translate
[1];
893 uniform
->f
[2] = vp
->translate
[2];
896 static void panfrost_upload_txs_sysval(struct panfrost_batch
*batch
,
897 enum pipe_shader_type st
,
898 unsigned int sysvalid
,
899 struct sysval_uniform
*uniform
)
901 struct panfrost_context
*ctx
= batch
->ctx
;
902 unsigned texidx
= PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid
);
903 unsigned dim
= PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid
);
904 bool is_array
= PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid
);
905 struct pipe_sampler_view
*tex
= &ctx
->sampler_views
[st
][texidx
]->base
;
908 uniform
->i
[0] = u_minify(tex
->texture
->width0
, tex
->u
.tex
.first_level
);
911 uniform
->i
[1] = u_minify(tex
->texture
->height0
,
912 tex
->u
.tex
.first_level
);
915 uniform
->i
[2] = u_minify(tex
->texture
->depth0
,
916 tex
->u
.tex
.first_level
);
919 uniform
->i
[dim
] = tex
->texture
->array_size
;
923 panfrost_upload_ssbo_sysval(struct panfrost_batch
*batch
,
924 enum pipe_shader_type st
,
926 struct sysval_uniform
*uniform
)
928 struct panfrost_context
*ctx
= batch
->ctx
;
930 assert(ctx
->ssbo_mask
[st
] & (1 << ssbo_id
));
931 struct pipe_shader_buffer sb
= ctx
->ssbo
[st
][ssbo_id
];
933 /* Compute address */
934 struct panfrost_bo
*bo
= pan_resource(sb
.buffer
)->bo
;
936 panfrost_batch_add_bo(batch
, bo
,
937 PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_RW
|
938 panfrost_bo_access_for_stage(st
));
940 /* Upload address and size as sysval */
941 uniform
->du
[0] = bo
->gpu
+ sb
.buffer_offset
;
942 uniform
->u
[2] = sb
.buffer_size
;
946 panfrost_upload_sampler_sysval(struct panfrost_batch
*batch
,
947 enum pipe_shader_type st
,
949 struct sysval_uniform
*uniform
)
951 struct panfrost_context
*ctx
= batch
->ctx
;
952 struct pipe_sampler_state
*sampl
= &ctx
->samplers
[st
][samp_idx
]->base
;
954 uniform
->f
[0] = sampl
->min_lod
;
955 uniform
->f
[1] = sampl
->max_lod
;
956 uniform
->f
[2] = sampl
->lod_bias
;
958 /* Even without any errata, Midgard represents "no mipmapping" as
959 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
960 * panfrost_create_sampler_state which also explains our choice of
961 * epsilon value (again to keep behaviour consistent) */
963 if (sampl
->min_mip_filter
== PIPE_TEX_MIPFILTER_NONE
)
964 uniform
->f
[1] = uniform
->f
[0] + (1.0/256.0);
968 panfrost_upload_num_work_groups_sysval(struct panfrost_batch
*batch
,
969 struct sysval_uniform
*uniform
)
971 struct panfrost_context
*ctx
= batch
->ctx
;
973 uniform
->u
[0] = ctx
->compute_grid
->grid
[0];
974 uniform
->u
[1] = ctx
->compute_grid
->grid
[1];
975 uniform
->u
[2] = ctx
->compute_grid
->grid
[2];
979 panfrost_upload_sysvals(struct panfrost_batch
*batch
, void *buf
,
980 struct panfrost_shader_state
*ss
,
981 enum pipe_shader_type st
)
983 struct sysval_uniform
*uniforms
= (void *)buf
;
985 for (unsigned i
= 0; i
< ss
->sysval_count
; ++i
) {
986 int sysval
= ss
->sysval
[i
];
988 switch (PAN_SYSVAL_TYPE(sysval
)) {
989 case PAN_SYSVAL_VIEWPORT_SCALE
:
990 panfrost_upload_viewport_scale_sysval(batch
,
993 case PAN_SYSVAL_VIEWPORT_OFFSET
:
994 panfrost_upload_viewport_offset_sysval(batch
,
997 case PAN_SYSVAL_TEXTURE_SIZE
:
998 panfrost_upload_txs_sysval(batch
, st
,
999 PAN_SYSVAL_ID(sysval
),
1002 case PAN_SYSVAL_SSBO
:
1003 panfrost_upload_ssbo_sysval(batch
, st
,
1004 PAN_SYSVAL_ID(sysval
),
1007 case PAN_SYSVAL_NUM_WORK_GROUPS
:
1008 panfrost_upload_num_work_groups_sysval(batch
,
1011 case PAN_SYSVAL_SAMPLER
:
1012 panfrost_upload_sampler_sysval(batch
, st
,
1013 PAN_SYSVAL_ID(sysval
),
1023 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer
*buf
,
1026 struct pipe_constant_buffer
*cb
= &buf
->cb
[index
];
1027 struct panfrost_resource
*rsrc
= pan_resource(cb
->buffer
);
1030 return rsrc
->bo
->cpu
;
1031 else if (cb
->user_buffer
)
1032 return cb
->user_buffer
;
1034 unreachable("No constant buffer");
1038 panfrost_emit_const_buf(struct panfrost_batch
*batch
,
1039 enum pipe_shader_type stage
,
1040 struct midgard_payload_vertex_tiler
*vtp
)
1042 struct panfrost_context
*ctx
= batch
->ctx
;
1043 struct panfrost_shader_variants
*all
= ctx
->shader
[stage
];
1048 struct panfrost_constant_buffer
*buf
= &ctx
->constant_buffer
[stage
];
1050 struct panfrost_shader_state
*ss
= &all
->variants
[all
->active_variant
];
1052 /* Uniforms are implicitly UBO #0 */
1053 bool has_uniforms
= buf
->enabled_mask
& (1 << 0);
1055 /* Allocate room for the sysval and the uniforms */
1056 size_t sys_size
= sizeof(float) * 4 * ss
->sysval_count
;
1057 size_t uniform_size
= has_uniforms
? (buf
->cb
[0].buffer_size
) : 0;
1058 size_t size
= sys_size
+ uniform_size
;
1059 struct panfrost_transfer transfer
= panfrost_allocate_transient(batch
,
1062 /* Upload sysvals requested by the shader */
1063 panfrost_upload_sysvals(batch
, transfer
.cpu
, ss
, stage
);
1065 /* Upload uniforms */
1066 if (has_uniforms
&& uniform_size
) {
1067 const void *cpu
= panfrost_map_constant_buffer_cpu(buf
, 0);
1068 memcpy(transfer
.cpu
+ sys_size
, cpu
, uniform_size
);
1071 struct mali_vertex_tiler_postfix
*postfix
= &vtp
->postfix
;
1073 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1076 unsigned ubo_count
= panfrost_ubo_count(ctx
, stage
);
1077 assert(ubo_count
>= 1);
1079 size_t sz
= sizeof(uint64_t) * ubo_count
;
1080 uint64_t ubos
[PAN_MAX_CONST_BUFFERS
];
1081 int uniform_count
= ss
->uniform_count
;
1083 /* Upload uniforms as a UBO */
1084 ubos
[0] = MALI_MAKE_UBO(2 + uniform_count
, transfer
.gpu
);
1086 /* The rest are honest-to-goodness UBOs */
1088 for (unsigned ubo
= 1; ubo
< ubo_count
; ++ubo
) {
1089 size_t usz
= buf
->cb
[ubo
].buffer_size
;
1090 bool enabled
= buf
->enabled_mask
& (1 << ubo
);
1091 bool empty
= usz
== 0;
1093 if (!enabled
|| empty
) {
1094 /* Stub out disabled UBOs to catch accesses */
1095 ubos
[ubo
] = MALI_MAKE_UBO(0, 0xDEAD0000);
1099 mali_ptr gpu
= panfrost_map_constant_buffer_gpu(batch
, stage
,
1102 unsigned bytes_per_field
= 16;
1103 unsigned aligned
= ALIGN_POT(usz
, bytes_per_field
);
1104 ubos
[ubo
] = MALI_MAKE_UBO(aligned
/ bytes_per_field
, gpu
);
1107 mali_ptr ubufs
= panfrost_upload_transient(batch
, ubos
, sz
);
1108 postfix
->uniforms
= transfer
.gpu
;
1109 postfix
->uniform_buffers
= ubufs
;
1111 buf
->dirty_mask
= 0;
1115 panfrost_emit_shared_memory(struct panfrost_batch
*batch
,
1116 const struct pipe_grid_info
*info
,
1117 struct midgard_payload_vertex_tiler
*vtp
)
1119 struct panfrost_context
*ctx
= batch
->ctx
;
1120 struct panfrost_shader_variants
*all
= ctx
->shader
[PIPE_SHADER_COMPUTE
];
1121 struct panfrost_shader_state
*ss
= &all
->variants
[all
->active_variant
];
1122 unsigned single_size
= util_next_power_of_two(MAX2(ss
->shared_size
,
1124 unsigned shared_size
= single_size
* info
->grid
[0] * info
->grid
[1] *
1126 struct panfrost_bo
*bo
= panfrost_batch_get_shared_memory(batch
,
1130 struct mali_shared_memory shared
= {
1131 .shared_memory
= bo
->gpu
,
1132 .shared_workgroup_count
=
1133 util_logbase2_ceil(info
->grid
[0]) +
1134 util_logbase2_ceil(info
->grid
[1]) +
1135 util_logbase2_ceil(info
->grid
[2]),
1137 .shared_shift
= util_logbase2(single_size
) - 1
1140 vtp
->postfix
.shared_memory
= panfrost_upload_transient(batch
, &shared
,
1145 panfrost_get_tex_desc(struct panfrost_batch
*batch
,
1146 enum pipe_shader_type st
,
1147 struct panfrost_sampler_view
*view
)
1150 return (mali_ptr
) 0;
1152 struct pipe_sampler_view
*pview
= &view
->base
;
1153 struct panfrost_resource
*rsrc
= pan_resource(pview
->texture
);
1155 /* Add the BO to the job so it's retained until the job is done. */
1157 panfrost_batch_add_bo(batch
, rsrc
->bo
,
1158 PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_READ
|
1159 panfrost_bo_access_for_stage(st
));
1161 panfrost_batch_add_bo(batch
, view
->bo
,
1162 PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_READ
|
1163 panfrost_bo_access_for_stage(st
));
1165 return view
->bo
->gpu
;
1169 panfrost_emit_texture_descriptors(struct panfrost_batch
*batch
,
1170 enum pipe_shader_type stage
,
1171 struct midgard_payload_vertex_tiler
*vtp
)
1173 struct panfrost_context
*ctx
= batch
->ctx
;
1175 if (!ctx
->sampler_view_count
[stage
])
1178 uint64_t trampolines
[PIPE_MAX_SHADER_SAMPLER_VIEWS
];
1180 for (int i
= 0; i
< ctx
->sampler_view_count
[stage
]; ++i
)
1181 trampolines
[i
] = panfrost_get_tex_desc(batch
, stage
,
1182 ctx
->sampler_views
[stage
][i
]);
1184 vtp
->postfix
.texture_trampoline
= panfrost_upload_transient(batch
,
1187 ctx
->sampler_view_count
[stage
]);
1191 panfrost_emit_sampler_descriptors(struct panfrost_batch
*batch
,
1192 enum pipe_shader_type stage
,
1193 struct midgard_payload_vertex_tiler
*vtp
)
1195 struct panfrost_context
*ctx
= batch
->ctx
;
1197 if (!ctx
->sampler_count
[stage
])
1200 size_t desc_size
= sizeof(struct mali_sampler_descriptor
);
1201 size_t transfer_size
= desc_size
* ctx
->sampler_count
[stage
];
1202 struct panfrost_transfer transfer
= panfrost_allocate_transient(batch
,
1204 struct mali_sampler_descriptor
*desc
= (struct mali_sampler_descriptor
*)transfer
.cpu
;
1206 for (int i
= 0; i
< ctx
->sampler_count
[stage
]; ++i
)
1207 desc
[i
] = ctx
->samplers
[stage
][i
]->hw
;
1209 vtp
->postfix
.sampler_descriptor
= transfer
.gpu
;
1213 panfrost_emit_vertex_attr_meta(struct panfrost_batch
*batch
,
1214 struct midgard_payload_vertex_tiler
*vp
)
1216 struct panfrost_context
*ctx
= batch
->ctx
;
1221 struct panfrost_vertex_state
*so
= ctx
->vertex
;
1223 panfrost_vertex_state_upd_attr_offs(ctx
, vp
);
1224 vp
->postfix
.attribute_meta
= panfrost_upload_transient(batch
, so
->hw
,
1230 panfrost_emit_vertex_data(struct panfrost_batch
*batch
,
1231 struct midgard_payload_vertex_tiler
*vp
)
1233 struct panfrost_context
*ctx
= batch
->ctx
;
1234 struct panfrost_vertex_state
*so
= ctx
->vertex
;
1236 /* Staged mali_attr, and index into them. i =/= k, depending on the
1237 * vertex buffer mask and instancing. Twice as much room is allocated,
1238 * for a worst case of NPOT_DIVIDEs which take up extra slot */
1239 union mali_attr attrs
[PIPE_MAX_ATTRIBS
* 2];
1242 for (unsigned i
= 0; i
< so
->num_elements
; ++i
) {
1243 /* We map a mali_attr to be 1:1 with the mali_attr_meta, which
1244 * means duplicating some vertex buffers (who cares? aside from
1245 * maybe some caching implications but I somehow doubt that
1248 struct pipe_vertex_element
*elem
= &so
->pipe
[i
];
1249 unsigned vbi
= elem
->vertex_buffer_index
;
1251 /* The exception to 1:1 mapping is that we can have multiple
1252 * entries (NPOT divisors), so we fixup anyways */
1254 so
->hw
[i
].index
= k
;
1256 if (!(ctx
->vb_mask
& (1 << vbi
)))
1259 struct pipe_vertex_buffer
*buf
= &ctx
->vertex_buffers
[vbi
];
1260 struct panfrost_resource
*rsrc
;
1262 rsrc
= pan_resource(buf
->buffer
.resource
);
1266 /* Align to 64 bytes by masking off the lower bits. This
1267 * will be adjusted back when we fixup the src_offset in
1270 mali_ptr raw_addr
= rsrc
->bo
->gpu
+ buf
->buffer_offset
;
1271 mali_ptr addr
= raw_addr
& ~63;
1272 unsigned chopped_addr
= raw_addr
- addr
;
1274 /* Add a dependency of the batch on the vertex buffer */
1275 panfrost_batch_add_bo(batch
, rsrc
->bo
,
1276 PAN_BO_ACCESS_SHARED
|
1277 PAN_BO_ACCESS_READ
|
1278 PAN_BO_ACCESS_VERTEX_TILER
);
1280 /* Set common fields */
1281 attrs
[k
].elements
= addr
;
1282 attrs
[k
].stride
= buf
->stride
;
1284 /* Since we advanced the base pointer, we shrink the buffer
1286 attrs
[k
].size
= rsrc
->base
.width0
- buf
->buffer_offset
;
1288 /* We need to add the extra size we masked off (for
1289 * correctness) so the data doesn't get clamped away */
1290 attrs
[k
].size
+= chopped_addr
;
1292 /* For non-instancing make sure we initialize */
1293 attrs
[k
].shift
= attrs
[k
].extra_flags
= 0;
1295 /* Instancing uses a dramatically different code path than
1296 * linear, so dispatch for the actual emission now that the
1297 * common code is finished */
1299 unsigned divisor
= elem
->instance_divisor
;
1301 if (divisor
&& ctx
->instance_count
== 1) {
1302 /* Silly corner case where there's a divisor(=1) but
1303 * there's no legitimate instancing. So we want *every*
1304 * attribute to be the same. So set stride to zero so
1305 * we don't go anywhere. */
1307 attrs
[k
].size
= attrs
[k
].stride
+ chopped_addr
;
1308 attrs
[k
].stride
= 0;
1309 attrs
[k
++].elements
|= MALI_ATTR_LINEAR
;
1310 } else if (ctx
->instance_count
<= 1) {
1311 /* Normal, non-instanced attributes */
1312 attrs
[k
++].elements
|= MALI_ATTR_LINEAR
;
1314 unsigned instance_shift
= vp
->instance_shift
;
1315 unsigned instance_odd
= vp
->instance_odd
;
1317 k
+= panfrost_vertex_instanced(ctx
->padded_count
,
1320 divisor
, &attrs
[k
]);
1324 /* Add special gl_VertexID/gl_InstanceID buffers */
1326 panfrost_vertex_id(ctx
->padded_count
, &attrs
[k
]);
1327 so
->hw
[PAN_VERTEX_ID
].index
= k
++;
1328 panfrost_instance_id(ctx
->padded_count
, &attrs
[k
]);
1329 so
->hw
[PAN_INSTANCE_ID
].index
= k
++;
1331 /* Upload whatever we emitted and go */
1333 vp
->postfix
.attributes
= panfrost_upload_transient(batch
, attrs
,
1334 k
* sizeof(*attrs
));
1338 panfrost_emit_varyings(struct panfrost_batch
*batch
, union mali_attr
*slot
,
1339 unsigned stride
, unsigned count
)
1341 /* Fill out the descriptor */
1342 slot
->stride
= stride
;
1343 slot
->size
= stride
* count
;
1344 slot
->shift
= slot
->extra_flags
= 0;
1346 struct panfrost_transfer transfer
= panfrost_allocate_transient(batch
,
1349 slot
->elements
= transfer
.gpu
| MALI_ATTR_LINEAR
;
1351 return transfer
.gpu
;
1355 panfrost_emit_streamout(struct panfrost_batch
*batch
, union mali_attr
*slot
,
1356 unsigned stride
, unsigned offset
, unsigned count
,
1357 struct pipe_stream_output_target
*target
)
1359 /* Fill out the descriptor */
1360 slot
->stride
= stride
* 4;
1361 slot
->shift
= slot
->extra_flags
= 0;
1363 unsigned max_size
= target
->buffer_size
;
1364 unsigned expected_size
= slot
->stride
* count
;
1366 slot
->size
= MIN2(max_size
, expected_size
);
1368 /* Grab the BO and bind it to the batch */
1369 struct panfrost_bo
*bo
= pan_resource(target
->buffer
)->bo
;
1371 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1372 * the perspective of the TILER and FRAGMENT.
1374 panfrost_batch_add_bo(batch
, bo
,
1375 PAN_BO_ACCESS_SHARED
|
1377 PAN_BO_ACCESS_VERTEX_TILER
|
1378 PAN_BO_ACCESS_FRAGMENT
);
1380 mali_ptr addr
= bo
->gpu
+ target
->buffer_offset
+ (offset
* slot
->stride
);
1381 slot
->elements
= addr
;
1384 /* Given a shader and buffer indices, link varying metadata together */
1387 is_special_varying(gl_varying_slot loc
)
1390 case VARYING_SLOT_POS
:
1391 case VARYING_SLOT_PSIZ
:
1392 case VARYING_SLOT_PNTC
:
1393 case VARYING_SLOT_FACE
:
1401 panfrost_emit_varying_meta(void *outptr
, struct panfrost_shader_state
*ss
,
1402 signed general
, signed gl_Position
,
1403 signed gl_PointSize
, signed gl_PointCoord
,
1404 signed gl_FrontFacing
)
1406 struct mali_attr_meta
*out
= (struct mali_attr_meta
*) outptr
;
1408 for (unsigned i
= 0; i
< ss
->varying_count
; ++i
) {
1409 gl_varying_slot location
= ss
->varyings_loc
[i
];
1413 case VARYING_SLOT_POS
:
1414 index
= gl_Position
;
1416 case VARYING_SLOT_PSIZ
:
1417 index
= gl_PointSize
;
1419 case VARYING_SLOT_PNTC
:
1420 index
= gl_PointCoord
;
1422 case VARYING_SLOT_FACE
:
1423 index
= gl_FrontFacing
;
1431 out
[i
].index
= index
;
1436 has_point_coord(unsigned mask
, gl_varying_slot loc
)
1438 if ((loc
>= VARYING_SLOT_TEX0
) && (loc
<= VARYING_SLOT_TEX7
))
1439 return (mask
& (1 << (loc
- VARYING_SLOT_TEX0
)));
1440 else if (loc
== VARYING_SLOT_PNTC
)
1441 return (mask
& (1 << 8));
1446 /* Helpers for manipulating stream out information so we can pack varyings
1447 * accordingly. Compute the src_offset for a given captured varying */
1449 static struct pipe_stream_output
*
1450 pan_get_so(struct pipe_stream_output_info
*info
, gl_varying_slot loc
)
1452 for (unsigned i
= 0; i
< info
->num_outputs
; ++i
) {
1453 if (info
->output
[i
].register_index
== loc
)
1454 return &info
->output
[i
];
1457 unreachable("Varying not captured");
1460 /* TODO: Integers */
1461 static enum mali_format
1462 pan_xfb_format(unsigned nr_components
)
1464 switch (nr_components
) {
1465 case 1: return MALI_R32F
;
1466 case 2: return MALI_RG32F
;
1467 case 3: return MALI_RGB32F
;
1468 case 4: return MALI_RGBA32F
;
1469 default: unreachable("Invalid format");
1474 panfrost_emit_varying_descriptor(struct panfrost_batch
*batch
,
1475 unsigned vertex_count
,
1476 struct midgard_payload_vertex_tiler
*vp
,
1477 struct midgard_payload_vertex_tiler
*tp
)
1479 /* Load the shaders */
1480 struct panfrost_context
*ctx
= batch
->ctx
;
1481 struct panfrost_shader_state
*vs
, *fs
;
1482 unsigned int num_gen_varyings
= 0;
1483 size_t vs_size
, fs_size
;
1485 /* Allocate the varying descriptor */
1487 vs
= panfrost_get_shader_state(ctx
, PIPE_SHADER_VERTEX
);
1488 fs
= panfrost_get_shader_state(ctx
, PIPE_SHADER_FRAGMENT
);
1489 vs_size
= sizeof(struct mali_attr_meta
) * vs
->varying_count
;
1490 fs_size
= sizeof(struct mali_attr_meta
) * fs
->varying_count
;
1492 struct panfrost_transfer trans
= panfrost_allocate_transient(batch
,
1496 struct pipe_stream_output_info
*so
= &vs
->stream_output
;
1498 /* Check if this varying is linked by us. This is the case for
1499 * general-purpose, non-captured varyings. If it is, link it. If it's
1500 * not, use the provided stream out information to determine the
1501 * offset, since it was already linked for us. */
1503 for (unsigned i
= 0; i
< vs
->varying_count
; i
++) {
1504 gl_varying_slot loc
= vs
->varyings_loc
[i
];
1506 bool special
= is_special_varying(loc
);
1507 bool captured
= ((vs
->so_mask
& (1ll << loc
)) ? true : false);
1510 struct pipe_stream_output
*o
= pan_get_so(so
, loc
);
1512 unsigned dst_offset
= o
->dst_offset
* 4; /* dwords */
1513 vs
->varyings
[i
].src_offset
= dst_offset
;
1514 } else if (!special
) {
1515 vs
->varyings
[i
].src_offset
= 16 * (num_gen_varyings
++);
1519 /* Conversely, we need to set src_offset for the captured varyings.
1520 * Here, the layout is defined by the stream out info, not us */
1522 /* Link up with fragment varyings */
1523 bool reads_point_coord
= fs
->reads_point_coord
;
1525 for (unsigned i
= 0; i
< fs
->varying_count
; i
++) {
1526 gl_varying_slot loc
= fs
->varyings_loc
[i
];
1527 unsigned src_offset
;
1531 for (unsigned j
= 0; j
< vs
->varying_count
; ++j
) {
1532 if (vs
->varyings_loc
[j
] == loc
) {
1538 /* Either assign or reuse */
1540 src_offset
= vs
->varyings
[vs_idx
].src_offset
;
1542 src_offset
= 16 * (num_gen_varyings
++);
1544 fs
->varyings
[i
].src_offset
= src_offset
;
1546 if (has_point_coord(fs
->point_sprite_mask
, loc
))
1547 reads_point_coord
= true;
1550 memcpy(trans
.cpu
, vs
->varyings
, vs_size
);
1551 memcpy(trans
.cpu
+ vs_size
, fs
->varyings
, fs_size
);
1553 union mali_attr varyings
[PIPE_MAX_ATTRIBS
] = {0};
1555 /* Figure out how many streamout buffers could be bound */
1556 unsigned so_count
= ctx
->streamout
.num_targets
;
1557 for (unsigned i
= 0; i
< vs
->varying_count
; i
++) {
1558 gl_varying_slot loc
= vs
->varyings_loc
[i
];
1560 bool captured
= ((vs
->so_mask
& (1ll << loc
)) ? true : false);
1561 if (!captured
) continue;
1563 struct pipe_stream_output
*o
= pan_get_so(so
, loc
);
1564 so_count
= MAX2(so_count
, o
->output_buffer
+ 1);
1567 signed idx
= so_count
;
1568 signed general
= idx
++;
1569 signed gl_Position
= idx
++;
1570 signed gl_PointSize
= vs
->writes_point_size
? (idx
++) : -1;
1571 signed gl_PointCoord
= reads_point_coord
? (idx
++) : -1;
1572 signed gl_FrontFacing
= fs
->reads_face
? (idx
++) : -1;
1573 signed gl_FragCoord
= fs
->reads_frag_coord
? (idx
++) : -1;
1575 /* Emit the stream out buffers */
1577 unsigned out_count
= u_stream_outputs_for_vertices(ctx
->active_prim
,
1580 for (unsigned i
= 0; i
< so_count
; ++i
) {
1581 if (i
< ctx
->streamout
.num_targets
) {
1582 panfrost_emit_streamout(batch
, &varyings
[i
],
1584 ctx
->streamout
.offsets
[i
],
1586 ctx
->streamout
.targets
[i
]);
1588 /* Emit a dummy buffer */
1589 panfrost_emit_varyings(batch
, &varyings
[i
],
1593 /* Clear the attribute type */
1594 varyings
[i
].elements
&= ~0xF;
1598 panfrost_emit_varyings(batch
, &varyings
[general
],
1599 num_gen_varyings
* 16,
1602 mali_ptr varyings_p
;
1604 /* fp32 vec4 gl_Position */
1605 varyings_p
= panfrost_emit_varyings(batch
, &varyings
[gl_Position
],
1606 sizeof(float) * 4, vertex_count
);
1607 tp
->postfix
.position_varying
= varyings_p
;
1610 if (panfrost_writes_point_size(ctx
)) {
1611 varyings_p
= panfrost_emit_varyings(batch
,
1612 &varyings
[gl_PointSize
],
1614 tp
->primitive_size
.pointer
= varyings_p
;
1617 if (reads_point_coord
)
1618 varyings
[gl_PointCoord
].elements
= MALI_VARYING_POINT_COORD
;
1621 varyings
[gl_FrontFacing
].elements
= MALI_VARYING_FRONT_FACING
;
1623 if (fs
->reads_frag_coord
)
1624 varyings
[gl_FragCoord
].elements
= MALI_VARYING_FRAG_COORD
;
1626 /* Let's go ahead and link varying meta to the buffer in question, now
1627 * that that information is available. VARYING_SLOT_POS is mapped to
1628 * gl_FragCoord for fragment shaders but gl_Positionf or vertex shaders
1631 panfrost_emit_varying_meta(trans
.cpu
, vs
, general
, gl_Position
,
1632 gl_PointSize
, gl_PointCoord
,
1635 panfrost_emit_varying_meta(trans
.cpu
+ vs_size
, fs
, general
,
1636 gl_FragCoord
, gl_PointSize
,
1637 gl_PointCoord
, gl_FrontFacing
);
1639 /* Replace streamout */
1641 struct mali_attr_meta
*ovs
= (struct mali_attr_meta
*)trans
.cpu
;
1642 struct mali_attr_meta
*ofs
= ovs
+ vs
->varying_count
;
1644 for (unsigned i
= 0; i
< vs
->varying_count
; i
++) {
1645 gl_varying_slot loc
= vs
->varyings_loc
[i
];
1647 bool captured
= ((vs
->so_mask
& (1ll << loc
)) ? true : false);
1651 struct pipe_stream_output
*o
= pan_get_so(so
, loc
);
1652 ovs
[i
].index
= o
->output_buffer
;
1654 /* Set the type appropriately. TODO: Integer varyings XXX */
1655 assert(o
->stream
== 0);
1656 ovs
[i
].format
= pan_xfb_format(o
->num_components
);
1657 ovs
[i
].swizzle
= panfrost_get_default_swizzle(o
->num_components
);
1659 /* Link to the fragment */
1663 for (unsigned j
= 0; j
< fs
->varying_count
; ++j
) {
1664 if (fs
->varyings_loc
[j
] == loc
) {
1671 ofs
[fs_idx
].index
= ovs
[i
].index
;
1672 ofs
[fs_idx
].format
= ovs
[i
].format
;
1673 ofs
[fs_idx
].swizzle
= ovs
[i
].swizzle
;
1677 /* Replace point sprite */
1678 for (unsigned i
= 0; i
< fs
->varying_count
; i
++) {
1679 /* If we have a point sprite replacement, handle that here. We
1680 * have to translate location first. TODO: Flip y in shader.
1681 * We're already keying ... just time crunch .. */
1683 if (has_point_coord(fs
->point_sprite_mask
,
1684 fs
->varyings_loc
[i
])) {
1685 ofs
[i
].index
= gl_PointCoord
;
1687 /* Swizzle out the z/w to 0/1 */
1688 ofs
[i
].format
= MALI_RG16F
;
1689 ofs
[i
].swizzle
= panfrost_get_default_swizzle(2);
1693 /* Fix up unaligned addresses */
1694 for (unsigned i
= 0; i
< so_count
; ++i
) {
1695 if (varyings
[i
].elements
< MALI_RECORD_SPECIAL
)
1698 unsigned align
= (varyings
[i
].elements
& 63);
1700 /* While we're at it, the SO buffers are linear */
1703 varyings
[i
].elements
|= MALI_ATTR_LINEAR
;
1707 /* We need to adjust alignment */
1708 varyings
[i
].elements
&= ~63;
1709 varyings
[i
].elements
|= MALI_ATTR_LINEAR
;
1710 varyings
[i
].size
+= align
;
1712 for (unsigned v
= 0; v
< vs
->varying_count
; ++v
) {
1713 if (ovs
[v
].index
!= i
)
1716 ovs
[v
].src_offset
= vs
->varyings
[v
].src_offset
+ align
;
1719 for (unsigned f
= 0; f
< fs
->varying_count
; ++f
) {
1720 if (ofs
[f
].index
!= i
)
1723 ofs
[f
].src_offset
= fs
->varyings
[f
].src_offset
+ align
;
1727 varyings_p
= panfrost_upload_transient(batch
, varyings
,
1728 idx
* sizeof(*varyings
));
1729 vp
->postfix
.varyings
= varyings_p
;
1730 tp
->postfix
.varyings
= varyings_p
;
1732 vp
->postfix
.varying_meta
= trans
.gpu
;
1733 tp
->postfix
.varying_meta
= trans
.gpu
+ vs_size
;
1737 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch
*batch
,
1738 struct midgard_payload_vertex_tiler
*vp
,
1739 struct midgard_payload_vertex_tiler
*tp
)
1741 struct panfrost_context
*ctx
= batch
->ctx
;
1742 bool wallpapering
= ctx
->wallpaper_batch
&& batch
->tiler_dep
;
1745 /* Inject in reverse order, with "predicted" job indices.
1746 * THIS IS A HACK XXX */
1747 panfrost_new_job(batch
, JOB_TYPE_TILER
, false,
1748 batch
->job_index
+ 2, tp
, sizeof(*tp
), true);
1749 panfrost_new_job(batch
, JOB_TYPE_VERTEX
, false, 0,
1750 vp
, sizeof(*vp
), true);
1754 /* If rasterizer discard is enable, only submit the vertex */
1756 bool rasterizer_discard
= ctx
->rasterizer
&&
1757 ctx
->rasterizer
->base
.rasterizer_discard
;
1759 unsigned vertex
= panfrost_new_job(batch
, JOB_TYPE_VERTEX
, false, 0,
1760 vp
, sizeof(*vp
), false);
1762 if (rasterizer_discard
)
1765 panfrost_new_job(batch
, JOB_TYPE_TILER
, false, vertex
, tp
, sizeof(*tp
),