panfrost: Move pool routines to common code
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 unsigned shift = panfrost_get_stack_shift(batch->stack_size);
62 struct mali_shared_memory shared = {
63 .stack_shift = shift,
64 .scratchpad = panfrost_batch_get_scratchpad(batch, shift, dev->thread_tls_alloc, dev->core_count)->gpu,
65 .shared_workgroup_count = ~0,
66 };
67 postfix->shared_memory = panfrost_pool_upload(&batch->pool, &shared, sizeof(shared));
68 }
69
70 static void
71 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
72 struct mali_vertex_tiler_postfix *postfix)
73 {
74 struct panfrost_device *dev = pan_device(ctx->base.screen);
75 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
76
77 /* If we haven't, reserve space for the framebuffer */
78
79 if (!batch->framebuffer.gpu) {
80 unsigned size = (dev->quirks & MIDGARD_SFBD) ?
81 sizeof(struct mali_single_framebuffer) :
82 sizeof(struct mali_framebuffer);
83
84 batch->framebuffer = panfrost_pool_alloc(&batch->pool, size);
85
86 /* Tag the pointer */
87 if (!(dev->quirks & MIDGARD_SFBD))
88 batch->framebuffer.gpu |= MALI_MFBD;
89 }
90
91 postfix->shared_memory = batch->framebuffer.gpu;
92 }
93
94 static void
95 panfrost_vt_update_rasterizer(struct panfrost_context *ctx,
96 struct mali_vertex_tiler_prefix *prefix,
97 struct mali_vertex_tiler_postfix *postfix)
98 {
99 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
100
101 postfix->gl_enables |= 0x7;
102 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
103 rasterizer && rasterizer->base.front_ccw);
104 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
105 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_FRONT));
106 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
107 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_BACK));
108 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
109 rasterizer && rasterizer->base.flatshade_first);
110 }
111
112 void
113 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
114 struct mali_vertex_tiler_prefix *prefix,
115 union midgard_primitive_size *primitive_size)
116 {
117 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
118
119 if (!panfrost_writes_point_size(ctx)) {
120 bool points = prefix->draw_mode == MALI_POINTS;
121 float val = 0.0f;
122
123 if (rasterizer)
124 val = points ?
125 rasterizer->base.point_size :
126 rasterizer->base.line_width;
127
128 primitive_size->constant = val;
129 }
130 }
131
132 static void
133 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
134 struct mali_vertex_tiler_postfix *postfix)
135 {
136 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
137 if (ctx->occlusion_query) {
138 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
139 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
140 PAN_BO_ACCESS_SHARED |
141 PAN_BO_ACCESS_RW |
142 PAN_BO_ACCESS_FRAGMENT);
143 } else {
144 postfix->occlusion_counter = 0;
145 }
146 }
147
148 void
149 panfrost_vt_init(struct panfrost_context *ctx,
150 enum pipe_shader_type stage,
151 struct mali_vertex_tiler_prefix *prefix,
152 struct mali_vertex_tiler_postfix *postfix)
153 {
154 struct panfrost_device *device = pan_device(ctx->base.screen);
155
156 if (!ctx->shader[stage])
157 return;
158
159 memset(prefix, 0, sizeof(*prefix));
160 memset(postfix, 0, sizeof(*postfix));
161
162 if (device->quirks & IS_BIFROST) {
163 postfix->gl_enables = 0x2;
164 panfrost_vt_emit_shared_memory(ctx, postfix);
165 } else {
166 postfix->gl_enables = 0x6;
167 panfrost_vt_attach_framebuffer(ctx, postfix);
168 }
169
170 if (stage == PIPE_SHADER_FRAGMENT) {
171 panfrost_vt_update_occlusion_query(ctx, postfix);
172 panfrost_vt_update_rasterizer(ctx, prefix, postfix);
173 }
174 }
175
176 static unsigned
177 panfrost_translate_index_size(unsigned size)
178 {
179 switch (size) {
180 case 1:
181 return MALI_DRAW_INDEXED_UINT8;
182
183 case 2:
184 return MALI_DRAW_INDEXED_UINT16;
185
186 case 4:
187 return MALI_DRAW_INDEXED_UINT32;
188
189 default:
190 unreachable("Invalid index size");
191 }
192 }
193
194 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
195 * good for the duration of the draw (transient), could last longer. Also get
196 * the bounds on the index buffer for the range accessed by the draw. We do
197 * these operations together because there are natural optimizations which
198 * require them to be together. */
199
200 static mali_ptr
201 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
202 const struct pipe_draw_info *info,
203 unsigned *min_index, unsigned *max_index)
204 {
205 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
206 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
207 off_t offset = info->start * info->index_size;
208 bool needs_indices = true;
209 mali_ptr out = 0;
210
211 if (info->max_index != ~0u) {
212 *min_index = info->min_index;
213 *max_index = info->max_index;
214 needs_indices = false;
215 }
216
217 if (!info->has_user_indices) {
218 /* Only resources can be directly mapped */
219 panfrost_batch_add_bo(batch, rsrc->bo,
220 PAN_BO_ACCESS_SHARED |
221 PAN_BO_ACCESS_READ |
222 PAN_BO_ACCESS_VERTEX_TILER);
223 out = rsrc->bo->gpu + offset;
224
225 /* Check the cache */
226 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
227 info->start,
228 info->count,
229 min_index,
230 max_index);
231 } else {
232 /* Otherwise, we need to upload to transient memory */
233 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
234 out = panfrost_pool_upload(&batch->pool, ibuf8 + offset,
235 info->count *
236 info->index_size);
237 }
238
239 if (needs_indices) {
240 /* Fallback */
241 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
242
243 if (!info->has_user_indices)
244 panfrost_minmax_cache_add(rsrc->index_cache,
245 info->start, info->count,
246 *min_index, *max_index);
247 }
248
249 return out;
250 }
251
252 void
253 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
254 const struct pipe_draw_info *info,
255 enum mali_draw_mode draw_mode,
256 struct mali_vertex_tiler_postfix *vertex_postfix,
257 struct mali_vertex_tiler_prefix *tiler_prefix,
258 struct mali_vertex_tiler_postfix *tiler_postfix,
259 unsigned *vertex_count,
260 unsigned *padded_count)
261 {
262 tiler_prefix->draw_mode = draw_mode;
263
264 unsigned draw_flags = 0;
265
266 if (panfrost_writes_point_size(ctx))
267 draw_flags |= MALI_DRAW_VARYING_SIZE;
268
269 if (info->primitive_restart)
270 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
271
272 /* These doesn't make much sense */
273
274 draw_flags |= 0x3000;
275
276 if (info->index_size) {
277 unsigned min_index = 0, max_index = 0;
278
279 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
280 info,
281 &min_index,
282 &max_index);
283
284 /* Use the corresponding values */
285 *vertex_count = max_index - min_index + 1;
286 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
287 tiler_prefix->offset_bias_correction = -min_index;
288 tiler_prefix->index_count = MALI_POSITIVE(info->count);
289 draw_flags |= panfrost_translate_index_size(info->index_size);
290 } else {
291 tiler_prefix->indices = 0;
292 *vertex_count = ctx->vertex_count;
293 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
294 tiler_prefix->offset_bias_correction = 0;
295 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
296 }
297
298 tiler_prefix->unknown_draw = draw_flags;
299
300 /* Encode the padded vertex count */
301
302 if (info->instance_count > 1) {
303 *padded_count = panfrost_padded_vertex_count(*vertex_count);
304
305 unsigned shift = __builtin_ctz(ctx->padded_count);
306 unsigned k = ctx->padded_count >> (shift + 1);
307
308 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
309 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
310 } else {
311 *padded_count = *vertex_count;
312
313 /* Reset instancing state */
314 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
315 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
316 }
317 }
318
319 static void
320 panfrost_shader_meta_init(struct panfrost_context *ctx,
321 enum pipe_shader_type st,
322 struct mali_shader_meta *meta)
323 {
324 const struct panfrost_device *dev = pan_device(ctx->base.screen);
325 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
326
327 memset(meta, 0, sizeof(*meta));
328 meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
329 meta->attribute_count = ss->attribute_count;
330 meta->varying_count = ss->varying_count;
331 meta->texture_count = ctx->sampler_view_count[st];
332 meta->sampler_count = ctx->sampler_count[st];
333
334 if (dev->quirks & IS_BIFROST) {
335 if (st == PIPE_SHADER_VERTEX)
336 meta->bifrost1.unk1 = 0x800000;
337 else {
338 /* First clause ATEST |= 0x4000000.
339 * Less than 32 regs |= 0x200 */
340 meta->bifrost1.unk1 = 0x950020;
341 }
342
343 meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
344 if (st == PIPE_SHADER_VERTEX)
345 meta->bifrost2.preload_regs = 0xC0;
346 else {
347 meta->bifrost2.preload_regs = 0x1;
348 SET_BIT(meta->bifrost2.preload_regs, 0x10, ss->reads_frag_coord);
349 }
350
351 meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
352 ss->uniform_cutoff);
353 } else {
354 meta->midgard1.uniform_count = MIN2(ss->uniform_count,
355 ss->uniform_cutoff);
356 meta->midgard1.work_count = ss->work_reg_count;
357
358 /* TODO: This is not conformant on ES3 */
359 meta->midgard1.flags_hi = MALI_SUPPRESS_INF_NAN;
360
361 meta->midgard1.flags_lo = 0x20;
362 meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
363
364 SET_BIT(meta->midgard1.flags_hi, MALI_WRITES_GLOBAL, ss->writes_global);
365 }
366 }
367
368 static unsigned
369 panfrost_translate_compare_func(enum pipe_compare_func in)
370 {
371 switch (in) {
372 case PIPE_FUNC_NEVER:
373 return MALI_FUNC_NEVER;
374
375 case PIPE_FUNC_LESS:
376 return MALI_FUNC_LESS;
377
378 case PIPE_FUNC_EQUAL:
379 return MALI_FUNC_EQUAL;
380
381 case PIPE_FUNC_LEQUAL:
382 return MALI_FUNC_LEQUAL;
383
384 case PIPE_FUNC_GREATER:
385 return MALI_FUNC_GREATER;
386
387 case PIPE_FUNC_NOTEQUAL:
388 return MALI_FUNC_NOTEQUAL;
389
390 case PIPE_FUNC_GEQUAL:
391 return MALI_FUNC_GEQUAL;
392
393 case PIPE_FUNC_ALWAYS:
394 return MALI_FUNC_ALWAYS;
395
396 default:
397 unreachable("Invalid func");
398 }
399 }
400
401 static unsigned
402 panfrost_translate_stencil_op(enum pipe_stencil_op in)
403 {
404 switch (in) {
405 case PIPE_STENCIL_OP_KEEP:
406 return MALI_STENCIL_KEEP;
407
408 case PIPE_STENCIL_OP_ZERO:
409 return MALI_STENCIL_ZERO;
410
411 case PIPE_STENCIL_OP_REPLACE:
412 return MALI_STENCIL_REPLACE;
413
414 case PIPE_STENCIL_OP_INCR:
415 return MALI_STENCIL_INCR;
416
417 case PIPE_STENCIL_OP_DECR:
418 return MALI_STENCIL_DECR;
419
420 case PIPE_STENCIL_OP_INCR_WRAP:
421 return MALI_STENCIL_INCR_WRAP;
422
423 case PIPE_STENCIL_OP_DECR_WRAP:
424 return MALI_STENCIL_DECR_WRAP;
425
426 case PIPE_STENCIL_OP_INVERT:
427 return MALI_STENCIL_INVERT;
428
429 default:
430 unreachable("Invalid stencil op");
431 }
432 }
433
434 static unsigned
435 translate_tex_wrap(enum pipe_tex_wrap w)
436 {
437 switch (w) {
438 case PIPE_TEX_WRAP_REPEAT:
439 return MALI_WRAP_REPEAT;
440
441 case PIPE_TEX_WRAP_CLAMP:
442 return MALI_WRAP_CLAMP;
443
444 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
445 return MALI_WRAP_CLAMP_TO_EDGE;
446
447 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
448 return MALI_WRAP_CLAMP_TO_BORDER;
449
450 case PIPE_TEX_WRAP_MIRROR_REPEAT:
451 return MALI_WRAP_MIRRORED_REPEAT;
452
453 case PIPE_TEX_WRAP_MIRROR_CLAMP:
454 return MALI_WRAP_MIRRORED_CLAMP;
455
456 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
457 return MALI_WRAP_MIRRORED_CLAMP_TO_EDGE;
458
459 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
460 return MALI_WRAP_MIRRORED_CLAMP_TO_BORDER;
461
462 default:
463 unreachable("Invalid wrap");
464 }
465 }
466
467 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
468 struct mali_sampler_descriptor *hw)
469 {
470 unsigned func = panfrost_translate_compare_func(cso->compare_func);
471 bool min_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
472 bool mag_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
473 bool mip_linear = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR;
474 unsigned min_filter = min_nearest ? MALI_SAMP_MIN_NEAREST : 0;
475 unsigned mag_filter = mag_nearest ? MALI_SAMP_MAG_NEAREST : 0;
476 unsigned mip_filter = mip_linear ?
477 (MALI_SAMP_MIP_LINEAR_1 | MALI_SAMP_MIP_LINEAR_2) : 0;
478 unsigned normalized = cso->normalized_coords ? MALI_SAMP_NORM_COORDS : 0;
479
480 *hw = (struct mali_sampler_descriptor) {
481 .filter_mode = min_filter | mag_filter | mip_filter |
482 normalized,
483 .wrap_s = translate_tex_wrap(cso->wrap_s),
484 .wrap_t = translate_tex_wrap(cso->wrap_t),
485 .wrap_r = translate_tex_wrap(cso->wrap_r),
486 .compare_func = panfrost_flip_compare_func(func),
487 .border_color = {
488 cso->border_color.f[0],
489 cso->border_color.f[1],
490 cso->border_color.f[2],
491 cso->border_color.f[3]
492 },
493 .min_lod = FIXED_16(cso->min_lod, false), /* clamp at 0 */
494 .max_lod = FIXED_16(cso->max_lod, false),
495 .lod_bias = FIXED_16(cso->lod_bias, true), /* can be negative */
496 .seamless_cube_map = cso->seamless_cube_map,
497 };
498
499 /* If necessary, we disable mipmapping in the sampler descriptor by
500 * clamping the LOD as tight as possible (from 0 to epsilon,
501 * essentially -- remember these are fixed point numbers, so
502 * epsilon=1/256) */
503
504 if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
505 hw->max_lod = hw->min_lod + 1;
506 }
507
508 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
509 struct bifrost_sampler_descriptor *hw)
510 {
511 *hw = (struct bifrost_sampler_descriptor) {
512 .unk1 = 0x1,
513 .wrap_s = translate_tex_wrap(cso->wrap_s),
514 .wrap_t = translate_tex_wrap(cso->wrap_t),
515 .wrap_r = translate_tex_wrap(cso->wrap_r),
516 .unk8 = 0x8,
517 .min_filter = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST,
518 .norm_coords = cso->normalized_coords,
519 .mip_filter = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR,
520 .mag_filter = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR,
521 .min_lod = FIXED_16(cso->min_lod, false), /* clamp at 0 */
522 .max_lod = FIXED_16(cso->max_lod, false),
523 };
524
525 /* If necessary, we disable mipmapping in the sampler descriptor by
526 * clamping the LOD as tight as possible (from 0 to epsilon,
527 * essentially -- remember these are fixed point numbers, so
528 * epsilon=1/256) */
529
530 if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
531 hw->max_lod = hw->min_lod + 1;
532 }
533
534 static void
535 panfrost_make_stencil_state(const struct pipe_stencil_state *in,
536 struct mali_stencil_test *out)
537 {
538 out->ref = 0; /* Gallium gets it from elsewhere */
539
540 out->mask = in->valuemask;
541 out->func = panfrost_translate_compare_func(in->func);
542 out->sfail = panfrost_translate_stencil_op(in->fail_op);
543 out->dpfail = panfrost_translate_stencil_op(in->zfail_op);
544 out->dppass = panfrost_translate_stencil_op(in->zpass_op);
545 }
546
547 static void
548 panfrost_frag_meta_rasterizer_update(struct panfrost_context *ctx,
549 struct mali_shader_meta *fragmeta)
550 {
551 if (!ctx->rasterizer) {
552 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, true);
553 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, false);
554 fragmeta->depth_units = 0.0f;
555 fragmeta->depth_factor = 0.0f;
556 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, false);
557 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, false);
558 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, true);
559 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, true);
560 return;
561 }
562
563 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
564
565 bool msaa = rast->multisample;
566
567 /* TODO: Sample size */
568 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
569 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
570 fragmeta->depth_units = rast->offset_units * 2.0f;
571 fragmeta->depth_factor = rast->offset_scale;
572
573 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
574
575 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
576 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
577
578 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
579 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
580 }
581
582 static void
583 panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
584 struct mali_shader_meta *fragmeta)
585 {
586 const struct pipe_depth_stencil_alpha_state *zsa = ctx->depth_stencil;
587 int zfunc = PIPE_FUNC_ALWAYS;
588
589 if (!zsa) {
590 struct pipe_stencil_state default_stencil = {
591 .enabled = 0,
592 .func = PIPE_FUNC_ALWAYS,
593 .fail_op = MALI_STENCIL_KEEP,
594 .zfail_op = MALI_STENCIL_KEEP,
595 .zpass_op = MALI_STENCIL_KEEP,
596 .writemask = 0xFF,
597 .valuemask = 0xFF
598 };
599
600 panfrost_make_stencil_state(&default_stencil,
601 &fragmeta->stencil_front);
602 fragmeta->stencil_mask_front = default_stencil.writemask;
603 fragmeta->stencil_back = fragmeta->stencil_front;
604 fragmeta->stencil_mask_back = default_stencil.writemask;
605 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST, false);
606 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK, false);
607 } else {
608 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
609 zsa->stencil[0].enabled);
610 panfrost_make_stencil_state(&zsa->stencil[0],
611 &fragmeta->stencil_front);
612 fragmeta->stencil_mask_front = zsa->stencil[0].writemask;
613 fragmeta->stencil_front.ref = ctx->stencil_ref.ref_value[0];
614
615 /* If back-stencil is not enabled, use the front values */
616
617 if (zsa->stencil[1].enabled) {
618 panfrost_make_stencil_state(&zsa->stencil[1],
619 &fragmeta->stencil_back);
620 fragmeta->stencil_mask_back = zsa->stencil[1].writemask;
621 fragmeta->stencil_back.ref = ctx->stencil_ref.ref_value[1];
622 } else {
623 fragmeta->stencil_back = fragmeta->stencil_front;
624 fragmeta->stencil_mask_back = fragmeta->stencil_mask_front;
625 fragmeta->stencil_back.ref = fragmeta->stencil_front.ref;
626 }
627
628 if (zsa->depth.enabled)
629 zfunc = zsa->depth.func;
630
631 /* Depth state (TODO: Refactor) */
632
633 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
634 zsa->depth.writemask);
635 }
636
637 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
638 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(zfunc));
639 }
640
641 static bool
642 panfrost_fs_required(
643 struct panfrost_shader_state *fs,
644 struct panfrost_blend_final *blend,
645 unsigned rt_count)
646 {
647 /* If we generally have side effects */
648 if (fs->fs_sidefx)
649 return true;
650
651 /* If colour is written we need to execute */
652 for (unsigned i = 0; i < rt_count; ++i) {
653 if (!blend[i].no_colour)
654 return true;
655 }
656
657 /* If depth is written and not implied we need to execute.
658 * TODO: Predicate on Z/S writes being enabled */
659 return (fs->writes_depth || fs->writes_stencil);
660 }
661
662 static void
663 panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
664 struct mali_shader_meta *fragmeta,
665 void *rts)
666 {
667 const struct panfrost_device *dev = pan_device(ctx->base.screen);
668 struct panfrost_shader_state *fs;
669 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
670
671 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
672 (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
673 !ctx->blend->base.dither);
674
675 SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
676 ctx->blend->base.alpha_to_coverage);
677
678 /* Get blending setup */
679 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
680
681 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
682 unsigned shader_offset = 0;
683 struct panfrost_bo *shader_bo = NULL;
684
685 for (unsigned c = 0; c < rt_count; ++c)
686 blend[c] = panfrost_get_blend_for_context(ctx, c, &shader_bo,
687 &shader_offset);
688
689 /* Disable shader execution if we can */
690 if (dev->quirks & MIDGARD_SHADERLESS
691 && !panfrost_fs_required(fs, blend, rt_count)) {
692 fragmeta->shader = 0;
693 fragmeta->attribute_count = 0;
694 fragmeta->varying_count = 0;
695 fragmeta->texture_count = 0;
696 fragmeta->sampler_count = 0;
697
698 /* This feature is not known to work on Bifrost */
699 fragmeta->midgard1.work_count = 1;
700 fragmeta->midgard1.uniform_count = 0;
701 fragmeta->midgard1.uniform_buffer_count = 0;
702 }
703
704 /* If there is a blend shader, work registers are shared. We impose 8
705 * work registers as a limit for blend shaders. Should be lower XXX */
706
707 if (!(dev->quirks & IS_BIFROST)) {
708 for (unsigned c = 0; c < rt_count; ++c) {
709 if (blend[c].is_shader) {
710 fragmeta->midgard1.work_count =
711 MAX2(fragmeta->midgard1.work_count, 8);
712 }
713 }
714 }
715
716 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
717 * copied to the blend_meta appended (by convention), but this is the
718 * field actually read by the hardware. (Or maybe both are read...?).
719 * Specify the last RTi with a blend shader. */
720
721 fragmeta->blend.shader = 0;
722
723 for (signed rt = (rt_count - 1); rt >= 0; --rt) {
724 if (!blend[rt].is_shader)
725 continue;
726
727 fragmeta->blend.shader = blend[rt].shader.gpu |
728 blend[rt].shader.first_tag;
729 break;
730 }
731
732 if (dev->quirks & MIDGARD_SFBD) {
733 /* When only a single render target platform is used, the blend
734 * information is inside the shader meta itself. We additionally
735 * need to signal CAN_DISCARD for nontrivial blend modes (so
736 * we're able to read back the destination buffer) */
737
738 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
739 blend[0].is_shader);
740
741 if (!blend[0].is_shader) {
742 fragmeta->blend.equation = *blend[0].equation.equation;
743 fragmeta->blend.constant = blend[0].equation.constant;
744 }
745
746 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
747 !blend[0].no_blending || fs->can_discard);
748 return;
749 }
750
751 if (dev->quirks & IS_BIFROST) {
752 bool no_blend = true;
753
754 for (unsigned i = 0; i < rt_count; ++i)
755 no_blend &= (blend[i].no_blending | blend[i].no_colour);
756
757 SET_BIT(fragmeta->bifrost1.unk1, MALI_BIFROST_EARLY_Z,
758 !fs->can_discard && !fs->writes_depth && no_blend);
759 }
760
761 /* Additional blend descriptor tacked on for jobs using MFBD */
762
763 for (unsigned i = 0; i < rt_count; ++i) {
764 unsigned flags = 0;
765
766 if (ctx->pipe_framebuffer.nr_cbufs > i && !blend[i].no_colour) {
767 flags = 0x200;
768
769 bool is_srgb = (ctx->pipe_framebuffer.nr_cbufs > i) &&
770 (ctx->pipe_framebuffer.cbufs[i]) &&
771 util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
772
773 SET_BIT(flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
774 SET_BIT(flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
775 SET_BIT(flags, MALI_BLEND_SRGB, is_srgb);
776 SET_BIT(flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
777 }
778
779 if (dev->quirks & IS_BIFROST) {
780 struct bifrost_blend_rt *brts = rts;
781
782 brts[i].flags = flags;
783
784 if (blend[i].is_shader) {
785 /* The blend shader's address needs to be at
786 * the same top 32 bit as the fragment shader.
787 * TODO: Ensure that's always the case.
788 */
789 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
790 (fs->bo->gpu & (0xffffffffull << 32)));
791 brts[i].shader = blend[i].shader.gpu;
792 brts[i].unk2 = 0x0;
793 } else if (ctx->pipe_framebuffer.nr_cbufs > i) {
794 enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
795 const struct util_format_description *format_desc;
796 format_desc = util_format_description(format);
797
798 brts[i].equation = *blend[i].equation.equation;
799
800 /* TODO: this is a bit more complicated */
801 brts[i].constant = blend[i].equation.constant;
802
803 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
804
805 /* 0x19 disables blending and forces REPLACE
806 * mode (equivalent to rgb_mode = alpha_mode =
807 * x122, colour mask = 0xF). 0x1a allows
808 * blending. */
809 brts[i].unk2 = blend[i].no_blending ? 0x19 : 0x1a;
810
811 brts[i].shader_type = fs->blend_types[i];
812 } else {
813 /* Dummy attachment for depth-only */
814 brts[i].unk2 = 0x3;
815 brts[i].shader_type = fs->blend_types[i];
816 }
817 } else {
818 struct midgard_blend_rt *mrts = rts;
819 mrts[i].flags = flags;
820
821 if (blend[i].is_shader) {
822 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
823 } else {
824 mrts[i].blend.equation = *blend[i].equation.equation;
825 mrts[i].blend.constant = blend[i].equation.constant;
826 }
827 }
828 }
829 }
830
831 static void
832 panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
833 struct mali_shader_meta *fragmeta,
834 void *rts)
835 {
836 const struct panfrost_device *dev = pan_device(ctx->base.screen);
837 struct panfrost_shader_state *fs;
838
839 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
840
841 bool msaa = ctx->rasterizer && ctx->rasterizer->base.multisample;
842 fragmeta->coverage_mask = (msaa ? ctx->sample_mask : ~0) & 0xF;
843
844 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
845 fragmeta->unknown2_4 = 0x4e0;
846
847 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
848 * is required (independent of 32-bit/64-bit descriptors), or why it's
849 * not used on later GPU revisions. Otherwise, all shader jobs fault on
850 * these earlier chips (perhaps this is a chicken bit of some kind).
851 * More investigation is needed. */
852
853 SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
854
855 if (dev->quirks & IS_BIFROST) {
856 /* TODO */
857 } else {
858 /* Depending on whether it's legal to in the given shader, we try to
859 * enable early-z testing. TODO: respect e-z force */
860
861 SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
862 !fs->can_discard && !fs->writes_global &&
863 !fs->writes_depth && !fs->writes_stencil &&
864 !ctx->blend->base.alpha_to_coverage);
865
866 /* Add the writes Z/S flags if needed. */
867 SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
868 SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
869
870 /* Any time texturing is used, derivatives are implicitly calculated,
871 * so we need to enable helper invocations */
872
873 SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
874 fs->helper_invocations);
875
876 const struct pipe_depth_stencil_alpha_state *zsa = ctx->depth_stencil;
877
878 bool depth_enabled = fs->writes_depth ||
879 (zsa && zsa->depth.enabled && zsa->depth.func != PIPE_FUNC_ALWAYS);
880
881 SET_BIT(fragmeta->midgard1.flags_lo, 0x400, !depth_enabled && fs->can_discard);
882 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_ZS, depth_enabled && fs->can_discard);
883 }
884
885 panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
886 panfrost_frag_meta_zsa_update(ctx, fragmeta);
887 panfrost_frag_meta_blend_update(ctx, fragmeta, rts);
888 }
889
890 void
891 panfrost_emit_shader_meta(struct panfrost_batch *batch,
892 enum pipe_shader_type st,
893 struct mali_vertex_tiler_postfix *postfix)
894 {
895 struct panfrost_context *ctx = batch->ctx;
896 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
897
898 if (!ss) {
899 postfix->shader = 0;
900 return;
901 }
902
903 struct mali_shader_meta meta;
904
905 panfrost_shader_meta_init(ctx, st, &meta);
906
907 /* Add the shader BO to the batch. */
908 panfrost_batch_add_bo(batch, ss->bo,
909 PAN_BO_ACCESS_PRIVATE |
910 PAN_BO_ACCESS_READ |
911 panfrost_bo_access_for_stage(st));
912
913 mali_ptr shader_ptr;
914
915 if (st == PIPE_SHADER_FRAGMENT) {
916 struct panfrost_device *dev = pan_device(ctx->base.screen);
917 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
918 size_t desc_size = sizeof(meta);
919 void *rts = NULL;
920 struct panfrost_transfer xfer;
921 unsigned rt_size;
922
923 if (dev->quirks & MIDGARD_SFBD)
924 rt_size = 0;
925 else if (dev->quirks & IS_BIFROST)
926 rt_size = sizeof(struct bifrost_blend_rt);
927 else
928 rt_size = sizeof(struct midgard_blend_rt);
929
930 desc_size += rt_size * rt_count;
931
932 if (rt_size)
933 rts = rzalloc_size(ctx, rt_size * rt_count);
934
935 panfrost_frag_shader_meta_init(ctx, &meta, rts);
936
937 xfer = panfrost_pool_alloc(&batch->pool, desc_size);
938
939 memcpy(xfer.cpu, &meta, sizeof(meta));
940 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
941
942 if (rt_size)
943 ralloc_free(rts);
944
945 shader_ptr = xfer.gpu;
946 } else {
947 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
948 sizeof(meta));
949 }
950
951 postfix->shader = shader_ptr;
952 }
953
954 static void
955 panfrost_mali_viewport_init(struct panfrost_context *ctx,
956 struct mali_viewport *mvp)
957 {
958 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
959
960 /* Clip bounds are encoded as floats. The viewport itself is encoded as
961 * (somewhat) asymmetric ints. */
962
963 const struct pipe_scissor_state *ss = &ctx->scissor;
964
965 memset(mvp, 0, sizeof(*mvp));
966
967 /* By default, do no viewport clipping, i.e. clip to (-inf, inf) in
968 * each direction. Clipping to the viewport in theory should work, but
969 * in practice causes issues when we're not explicitly trying to
970 * scissor */
971
972 *mvp = (struct mali_viewport) {
973 .clip_minx = -INFINITY,
974 .clip_miny = -INFINITY,
975 .clip_maxx = INFINITY,
976 .clip_maxy = INFINITY,
977 };
978
979 /* Always scissor to the viewport by default. */
980 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
981 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
982
983 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
984 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
985
986 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
987 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
988
989 /* Apply the scissor test */
990
991 unsigned minx, miny, maxx, maxy;
992
993 if (ss && ctx->rasterizer && ctx->rasterizer->base.scissor) {
994 minx = MAX2(ss->minx, vp_minx);
995 miny = MAX2(ss->miny, vp_miny);
996 maxx = MIN2(ss->maxx, vp_maxx);
997 maxy = MIN2(ss->maxy, vp_maxy);
998 } else {
999 minx = vp_minx;
1000 miny = vp_miny;
1001 maxx = vp_maxx;
1002 maxy = vp_maxy;
1003 }
1004
1005 /* Hardware needs the min/max to be strictly ordered, so flip if we
1006 * need to. The viewport transformation in the vertex shader will
1007 * handle the negatives if we don't */
1008
1009 if (miny > maxy) {
1010 unsigned temp = miny;
1011 miny = maxy;
1012 maxy = temp;
1013 }
1014
1015 if (minx > maxx) {
1016 unsigned temp = minx;
1017 minx = maxx;
1018 maxx = temp;
1019 }
1020
1021 if (minz > maxz) {
1022 float temp = minz;
1023 minz = maxz;
1024 maxz = temp;
1025 }
1026
1027 /* Clamp to the framebuffer size as a last check */
1028
1029 minx = MIN2(ctx->pipe_framebuffer.width, minx);
1030 maxx = MIN2(ctx->pipe_framebuffer.width, maxx);
1031
1032 miny = MIN2(ctx->pipe_framebuffer.height, miny);
1033 maxy = MIN2(ctx->pipe_framebuffer.height, maxy);
1034
1035 /* Upload */
1036
1037 mvp->viewport0[0] = minx;
1038 mvp->viewport1[0] = MALI_POSITIVE(maxx);
1039
1040 mvp->viewport0[1] = miny;
1041 mvp->viewport1[1] = MALI_POSITIVE(maxy);
1042
1043 bool clip_near = true;
1044 bool clip_far = true;
1045
1046 if (ctx->rasterizer) {
1047 clip_near = ctx->rasterizer->base.depth_clip_near;
1048 clip_far = ctx->rasterizer->base.depth_clip_far;
1049 }
1050
1051 mvp->clip_minz = clip_near ? minz : -INFINITY;
1052 mvp->clip_maxz = clip_far ? maxz : INFINITY;
1053 }
1054
1055 void
1056 panfrost_emit_viewport(struct panfrost_batch *batch,
1057 struct mali_vertex_tiler_postfix *tiler_postfix)
1058 {
1059 struct panfrost_context *ctx = batch->ctx;
1060 struct mali_viewport mvp;
1061
1062 panfrost_mali_viewport_init(batch->ctx, &mvp);
1063
1064 /* Update the job, unless we're doing wallpapering (whose lack of
1065 * scissor we can ignore, since if we "miss" a tile of wallpaper, it'll
1066 * just... be faster :) */
1067
1068 if (!ctx->wallpaper_batch)
1069 panfrost_batch_union_scissor(batch, mvp.viewport0[0],
1070 mvp.viewport0[1],
1071 mvp.viewport1[0] + 1,
1072 mvp.viewport1[1] + 1);
1073
1074 tiler_postfix->viewport = panfrost_pool_upload(&batch->pool, &mvp,
1075 sizeof(mvp));
1076 }
1077
1078 static mali_ptr
1079 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
1080 enum pipe_shader_type st,
1081 struct panfrost_constant_buffer *buf,
1082 unsigned index)
1083 {
1084 struct pipe_constant_buffer *cb = &buf->cb[index];
1085 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1086
1087 if (rsrc) {
1088 panfrost_batch_add_bo(batch, rsrc->bo,
1089 PAN_BO_ACCESS_SHARED |
1090 PAN_BO_ACCESS_READ |
1091 panfrost_bo_access_for_stage(st));
1092
1093 /* Alignment gauranteed by
1094 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
1095 return rsrc->bo->gpu + cb->buffer_offset;
1096 } else if (cb->user_buffer) {
1097 return panfrost_pool_upload(&batch->pool,
1098 cb->user_buffer +
1099 cb->buffer_offset,
1100 cb->buffer_size);
1101 } else {
1102 unreachable("No constant buffer");
1103 }
1104 }
1105
1106 struct sysval_uniform {
1107 union {
1108 float f[4];
1109 int32_t i[4];
1110 uint32_t u[4];
1111 uint64_t du[2];
1112 };
1113 };
1114
1115 static void
1116 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
1117 struct sysval_uniform *uniform)
1118 {
1119 struct panfrost_context *ctx = batch->ctx;
1120 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
1121
1122 uniform->f[0] = vp->scale[0];
1123 uniform->f[1] = vp->scale[1];
1124 uniform->f[2] = vp->scale[2];
1125 }
1126
1127 static void
1128 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
1129 struct sysval_uniform *uniform)
1130 {
1131 struct panfrost_context *ctx = batch->ctx;
1132 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
1133
1134 uniform->f[0] = vp->translate[0];
1135 uniform->f[1] = vp->translate[1];
1136 uniform->f[2] = vp->translate[2];
1137 }
1138
1139 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
1140 enum pipe_shader_type st,
1141 unsigned int sysvalid,
1142 struct sysval_uniform *uniform)
1143 {
1144 struct panfrost_context *ctx = batch->ctx;
1145 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
1146 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
1147 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
1148 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
1149
1150 assert(dim);
1151 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
1152
1153 if (dim > 1)
1154 uniform->i[1] = u_minify(tex->texture->height0,
1155 tex->u.tex.first_level);
1156
1157 if (dim > 2)
1158 uniform->i[2] = u_minify(tex->texture->depth0,
1159 tex->u.tex.first_level);
1160
1161 if (is_array)
1162 uniform->i[dim] = tex->texture->array_size;
1163 }
1164
1165 static void
1166 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
1167 enum pipe_shader_type st,
1168 unsigned ssbo_id,
1169 struct sysval_uniform *uniform)
1170 {
1171 struct panfrost_context *ctx = batch->ctx;
1172
1173 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
1174 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
1175
1176 /* Compute address */
1177 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
1178
1179 panfrost_batch_add_bo(batch, bo,
1180 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
1181 panfrost_bo_access_for_stage(st));
1182
1183 /* Upload address and size as sysval */
1184 uniform->du[0] = bo->gpu + sb.buffer_offset;
1185 uniform->u[2] = sb.buffer_size;
1186 }
1187
1188 static void
1189 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
1190 enum pipe_shader_type st,
1191 unsigned samp_idx,
1192 struct sysval_uniform *uniform)
1193 {
1194 struct panfrost_context *ctx = batch->ctx;
1195 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
1196
1197 uniform->f[0] = sampl->min_lod;
1198 uniform->f[1] = sampl->max_lod;
1199 uniform->f[2] = sampl->lod_bias;
1200
1201 /* Even without any errata, Midgard represents "no mipmapping" as
1202 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1203 * panfrost_create_sampler_state which also explains our choice of
1204 * epsilon value (again to keep behaviour consistent) */
1205
1206 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1207 uniform->f[1] = uniform->f[0] + (1.0/256.0);
1208 }
1209
1210 static void
1211 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
1212 struct sysval_uniform *uniform)
1213 {
1214 struct panfrost_context *ctx = batch->ctx;
1215
1216 uniform->u[0] = ctx->compute_grid->grid[0];
1217 uniform->u[1] = ctx->compute_grid->grid[1];
1218 uniform->u[2] = ctx->compute_grid->grid[2];
1219 }
1220
1221 static void
1222 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1223 struct panfrost_shader_state *ss,
1224 enum pipe_shader_type st)
1225 {
1226 struct sysval_uniform *uniforms = (void *)buf;
1227
1228 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1229 int sysval = ss->sysval[i];
1230
1231 switch (PAN_SYSVAL_TYPE(sysval)) {
1232 case PAN_SYSVAL_VIEWPORT_SCALE:
1233 panfrost_upload_viewport_scale_sysval(batch,
1234 &uniforms[i]);
1235 break;
1236 case PAN_SYSVAL_VIEWPORT_OFFSET:
1237 panfrost_upload_viewport_offset_sysval(batch,
1238 &uniforms[i]);
1239 break;
1240 case PAN_SYSVAL_TEXTURE_SIZE:
1241 panfrost_upload_txs_sysval(batch, st,
1242 PAN_SYSVAL_ID(sysval),
1243 &uniforms[i]);
1244 break;
1245 case PAN_SYSVAL_SSBO:
1246 panfrost_upload_ssbo_sysval(batch, st,
1247 PAN_SYSVAL_ID(sysval),
1248 &uniforms[i]);
1249 break;
1250 case PAN_SYSVAL_NUM_WORK_GROUPS:
1251 panfrost_upload_num_work_groups_sysval(batch,
1252 &uniforms[i]);
1253 break;
1254 case PAN_SYSVAL_SAMPLER:
1255 panfrost_upload_sampler_sysval(batch, st,
1256 PAN_SYSVAL_ID(sysval),
1257 &uniforms[i]);
1258 break;
1259 default:
1260 assert(0);
1261 }
1262 }
1263 }
1264
1265 static const void *
1266 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1267 unsigned index)
1268 {
1269 struct pipe_constant_buffer *cb = &buf->cb[index];
1270 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1271
1272 if (rsrc)
1273 return rsrc->bo->cpu;
1274 else if (cb->user_buffer)
1275 return cb->user_buffer;
1276 else
1277 unreachable("No constant buffer");
1278 }
1279
1280 void
1281 panfrost_emit_const_buf(struct panfrost_batch *batch,
1282 enum pipe_shader_type stage,
1283 struct mali_vertex_tiler_postfix *postfix)
1284 {
1285 struct panfrost_context *ctx = batch->ctx;
1286 struct panfrost_shader_variants *all = ctx->shader[stage];
1287
1288 if (!all)
1289 return;
1290
1291 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1292
1293 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1294
1295 /* Uniforms are implicitly UBO #0 */
1296 bool has_uniforms = buf->enabled_mask & (1 << 0);
1297
1298 /* Allocate room for the sysval and the uniforms */
1299 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1300 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1301 size_t size = sys_size + uniform_size;
1302 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1303 size);
1304
1305 /* Upload sysvals requested by the shader */
1306 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1307
1308 /* Upload uniforms */
1309 if (has_uniforms && uniform_size) {
1310 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1311 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1312 }
1313
1314 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1315 * uploaded */
1316
1317 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1318 assert(ubo_count >= 1);
1319
1320 size_t sz = sizeof(uint64_t) * ubo_count;
1321 uint64_t ubos[PAN_MAX_CONST_BUFFERS];
1322 int uniform_count = ss->uniform_count;
1323
1324 /* Upload uniforms as a UBO */
1325 ubos[0] = MALI_MAKE_UBO(2 + uniform_count, transfer.gpu);
1326
1327 /* The rest are honest-to-goodness UBOs */
1328
1329 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1330 size_t usz = buf->cb[ubo].buffer_size;
1331 bool enabled = buf->enabled_mask & (1 << ubo);
1332 bool empty = usz == 0;
1333
1334 if (!enabled || empty) {
1335 /* Stub out disabled UBOs to catch accesses */
1336 ubos[ubo] = MALI_MAKE_UBO(0, 0xDEAD0000);
1337 continue;
1338 }
1339
1340 mali_ptr gpu = panfrost_map_constant_buffer_gpu(batch, stage,
1341 buf, ubo);
1342
1343 unsigned bytes_per_field = 16;
1344 unsigned aligned = ALIGN_POT(usz, bytes_per_field);
1345 ubos[ubo] = MALI_MAKE_UBO(aligned / bytes_per_field, gpu);
1346 }
1347
1348 mali_ptr ubufs = panfrost_pool_upload(&batch->pool, ubos, sz);
1349 postfix->uniforms = transfer.gpu;
1350 postfix->uniform_buffers = ubufs;
1351
1352 buf->dirty_mask = 0;
1353 }
1354
1355 void
1356 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1357 const struct pipe_grid_info *info,
1358 struct midgard_payload_vertex_tiler *vtp)
1359 {
1360 struct panfrost_context *ctx = batch->ctx;
1361 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1362 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1363 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1364 128));
1365 unsigned shared_size = single_size * info->grid[0] * info->grid[1] *
1366 info->grid[2] * 4;
1367 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1368 shared_size,
1369 1);
1370
1371 struct mali_shared_memory shared = {
1372 .shared_memory = bo->gpu,
1373 .shared_workgroup_count =
1374 util_logbase2_ceil(info->grid[0]) +
1375 util_logbase2_ceil(info->grid[1]) +
1376 util_logbase2_ceil(info->grid[2]),
1377 .shared_unk1 = 0x2,
1378 .shared_shift = util_logbase2(single_size) - 1
1379 };
1380
1381 vtp->postfix.shared_memory = panfrost_pool_upload(&batch->pool, &shared,
1382 sizeof(shared));
1383 }
1384
1385 static mali_ptr
1386 panfrost_get_tex_desc(struct panfrost_batch *batch,
1387 enum pipe_shader_type st,
1388 struct panfrost_sampler_view *view)
1389 {
1390 if (!view)
1391 return (mali_ptr) 0;
1392
1393 struct pipe_sampler_view *pview = &view->base;
1394 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1395
1396 /* Add the BO to the job so it's retained until the job is done. */
1397
1398 panfrost_batch_add_bo(batch, rsrc->bo,
1399 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1400 panfrost_bo_access_for_stage(st));
1401
1402 panfrost_batch_add_bo(batch, view->bo,
1403 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1404 panfrost_bo_access_for_stage(st));
1405
1406 return view->bo->gpu;
1407 }
1408
1409 static void
1410 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1411 struct pipe_context *pctx)
1412 {
1413 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1414 if (view->texture_bo != rsrc->bo->gpu ||
1415 view->layout != rsrc->layout) {
1416 panfrost_bo_unreference(view->bo);
1417 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1418 }
1419 }
1420
1421 void
1422 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1423 enum pipe_shader_type stage,
1424 struct mali_vertex_tiler_postfix *postfix)
1425 {
1426 struct panfrost_context *ctx = batch->ctx;
1427 struct panfrost_device *device = pan_device(ctx->base.screen);
1428
1429 if (!ctx->sampler_view_count[stage])
1430 return;
1431
1432 if (device->quirks & IS_BIFROST) {
1433 struct bifrost_texture_descriptor *descriptors;
1434
1435 descriptors = malloc(sizeof(struct bifrost_texture_descriptor) *
1436 ctx->sampler_view_count[stage]);
1437
1438 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1439 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1440 struct pipe_sampler_view *pview = &view->base;
1441 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1442 panfrost_update_sampler_view(view, &ctx->base);
1443
1444 /* Add the BOs to the job so they are retained until the job is done. */
1445
1446 panfrost_batch_add_bo(batch, rsrc->bo,
1447 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1448 panfrost_bo_access_for_stage(stage));
1449
1450 panfrost_batch_add_bo(batch, view->bo,
1451 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1452 panfrost_bo_access_for_stage(stage));
1453
1454 memcpy(&descriptors[i], view->bifrost_descriptor, sizeof(*view->bifrost_descriptor));
1455 }
1456
1457 postfix->textures = panfrost_pool_upload(&batch->pool,
1458 descriptors,
1459 sizeof(struct bifrost_texture_descriptor) *
1460 ctx->sampler_view_count[stage]);
1461
1462 free(descriptors);
1463 } else {
1464 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1465
1466 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1467 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1468
1469 panfrost_update_sampler_view(view, &ctx->base);
1470
1471 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1472 }
1473
1474 postfix->textures = panfrost_pool_upload(&batch->pool,
1475 trampolines,
1476 sizeof(uint64_t) *
1477 ctx->sampler_view_count[stage]);
1478 }
1479 }
1480
1481 void
1482 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1483 enum pipe_shader_type stage,
1484 struct mali_vertex_tiler_postfix *postfix)
1485 {
1486 struct panfrost_context *ctx = batch->ctx;
1487 struct panfrost_device *device = pan_device(ctx->base.screen);
1488
1489 if (!ctx->sampler_count[stage])
1490 return;
1491
1492 if (device->quirks & IS_BIFROST) {
1493 size_t desc_size = sizeof(struct bifrost_sampler_descriptor);
1494 size_t transfer_size = desc_size * ctx->sampler_count[stage];
1495 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1496 transfer_size);
1497 struct bifrost_sampler_descriptor *desc = (struct bifrost_sampler_descriptor *)transfer.cpu;
1498
1499 for (int i = 0; i < ctx->sampler_count[stage]; ++i)
1500 desc[i] = ctx->samplers[stage][i]->bifrost_hw;
1501
1502 postfix->sampler_descriptor = transfer.gpu;
1503 } else {
1504 size_t desc_size = sizeof(struct mali_sampler_descriptor);
1505 size_t transfer_size = desc_size * ctx->sampler_count[stage];
1506 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1507 transfer_size);
1508 struct mali_sampler_descriptor *desc = (struct mali_sampler_descriptor *)transfer.cpu;
1509
1510 for (int i = 0; i < ctx->sampler_count[stage]; ++i)
1511 desc[i] = ctx->samplers[stage][i]->midgard_hw;
1512
1513 postfix->sampler_descriptor = transfer.gpu;
1514 }
1515 }
1516
1517 void
1518 panfrost_emit_vertex_attr_meta(struct panfrost_batch *batch,
1519 struct mali_vertex_tiler_postfix *vertex_postfix)
1520 {
1521 struct panfrost_context *ctx = batch->ctx;
1522
1523 if (!ctx->vertex)
1524 return;
1525
1526 struct panfrost_vertex_state *so = ctx->vertex;
1527
1528 panfrost_vertex_state_upd_attr_offs(ctx, vertex_postfix);
1529 vertex_postfix->attribute_meta = panfrost_pool_upload(&batch->pool, so->hw,
1530 sizeof(*so->hw) *
1531 PAN_MAX_ATTRIBUTE);
1532 }
1533
1534 void
1535 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1536 struct mali_vertex_tiler_postfix *vertex_postfix)
1537 {
1538 struct panfrost_context *ctx = batch->ctx;
1539 struct panfrost_vertex_state *so = ctx->vertex;
1540
1541 /* Staged mali_attr, and index into them. i =/= k, depending on the
1542 * vertex buffer mask and instancing. Twice as much room is allocated,
1543 * for a worst case of NPOT_DIVIDEs which take up extra slot */
1544 union mali_attr attrs[PIPE_MAX_ATTRIBS * 2];
1545 unsigned k = 0;
1546
1547 for (unsigned i = 0; i < so->num_elements; ++i) {
1548 /* We map a mali_attr to be 1:1 with the mali_attr_meta, which
1549 * means duplicating some vertex buffers (who cares? aside from
1550 * maybe some caching implications but I somehow doubt that
1551 * matters) */
1552
1553 struct pipe_vertex_element *elem = &so->pipe[i];
1554 unsigned vbi = elem->vertex_buffer_index;
1555
1556 /* The exception to 1:1 mapping is that we can have multiple
1557 * entries (NPOT divisors), so we fixup anyways */
1558
1559 so->hw[i].index = k;
1560
1561 if (!(ctx->vb_mask & (1 << vbi)))
1562 continue;
1563
1564 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1565 struct panfrost_resource *rsrc;
1566
1567 rsrc = pan_resource(buf->buffer.resource);
1568 if (!rsrc)
1569 continue;
1570
1571 /* Align to 64 bytes by masking off the lower bits. This
1572 * will be adjusted back when we fixup the src_offset in
1573 * mali_attr_meta */
1574
1575 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1576 mali_ptr addr = raw_addr & ~63;
1577 unsigned chopped_addr = raw_addr - addr;
1578
1579 /* Add a dependency of the batch on the vertex buffer */
1580 panfrost_batch_add_bo(batch, rsrc->bo,
1581 PAN_BO_ACCESS_SHARED |
1582 PAN_BO_ACCESS_READ |
1583 PAN_BO_ACCESS_VERTEX_TILER);
1584
1585 /* Set common fields */
1586 attrs[k].elements = addr;
1587 attrs[k].stride = buf->stride;
1588
1589 /* Since we advanced the base pointer, we shrink the buffer
1590 * size */
1591 attrs[k].size = rsrc->base.width0 - buf->buffer_offset;
1592
1593 /* We need to add the extra size we masked off (for
1594 * correctness) so the data doesn't get clamped away */
1595 attrs[k].size += chopped_addr;
1596
1597 /* For non-instancing make sure we initialize */
1598 attrs[k].shift = attrs[k].extra_flags = 0;
1599
1600 /* Instancing uses a dramatically different code path than
1601 * linear, so dispatch for the actual emission now that the
1602 * common code is finished */
1603
1604 unsigned divisor = elem->instance_divisor;
1605
1606 if (divisor && ctx->instance_count == 1) {
1607 /* Silly corner case where there's a divisor(=1) but
1608 * there's no legitimate instancing. So we want *every*
1609 * attribute to be the same. So set stride to zero so
1610 * we don't go anywhere. */
1611
1612 attrs[k].size = attrs[k].stride + chopped_addr;
1613 attrs[k].stride = 0;
1614 attrs[k++].elements |= MALI_ATTR_LINEAR;
1615 } else if (ctx->instance_count <= 1) {
1616 /* Normal, non-instanced attributes */
1617 attrs[k++].elements |= MALI_ATTR_LINEAR;
1618 } else {
1619 unsigned instance_shift = vertex_postfix->instance_shift;
1620 unsigned instance_odd = vertex_postfix->instance_odd;
1621
1622 k += panfrost_vertex_instanced(ctx->padded_count,
1623 instance_shift,
1624 instance_odd,
1625 divisor, &attrs[k]);
1626 }
1627 }
1628
1629 /* Add special gl_VertexID/gl_InstanceID buffers */
1630
1631 panfrost_vertex_id(ctx->padded_count, &attrs[k]);
1632 so->hw[PAN_VERTEX_ID].index = k++;
1633 panfrost_instance_id(ctx->padded_count, &attrs[k]);
1634 so->hw[PAN_INSTANCE_ID].index = k++;
1635
1636 /* Upload whatever we emitted and go */
1637
1638 vertex_postfix->attributes = panfrost_pool_upload(&batch->pool, attrs,
1639 k * sizeof(*attrs));
1640 }
1641
1642 static mali_ptr
1643 panfrost_emit_varyings(struct panfrost_batch *batch, union mali_attr *slot,
1644 unsigned stride, unsigned count)
1645 {
1646 /* Fill out the descriptor */
1647 slot->stride = stride;
1648 slot->size = stride * count;
1649 slot->shift = slot->extra_flags = 0;
1650
1651 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1652 slot->size);
1653
1654 slot->elements = transfer.gpu | MALI_ATTR_LINEAR;
1655
1656 return transfer.gpu;
1657 }
1658
1659 static unsigned
1660 panfrost_streamout_offset(unsigned stride, unsigned offset,
1661 struct pipe_stream_output_target *target)
1662 {
1663 return (target->buffer_offset + (offset * stride * 4)) & 63;
1664 }
1665
1666 static void
1667 panfrost_emit_streamout(struct panfrost_batch *batch, union mali_attr *slot,
1668 unsigned stride, unsigned offset, unsigned count,
1669 struct pipe_stream_output_target *target)
1670 {
1671 /* Fill out the descriptor */
1672 slot->stride = stride * 4;
1673 slot->shift = slot->extra_flags = 0;
1674
1675 unsigned max_size = target->buffer_size;
1676 unsigned expected_size = slot->stride * count;
1677
1678 /* Grab the BO and bind it to the batch */
1679 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1680
1681 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1682 * the perspective of the TILER and FRAGMENT.
1683 */
1684 panfrost_batch_add_bo(batch, bo,
1685 PAN_BO_ACCESS_SHARED |
1686 PAN_BO_ACCESS_RW |
1687 PAN_BO_ACCESS_VERTEX_TILER |
1688 PAN_BO_ACCESS_FRAGMENT);
1689
1690 /* We will have an offset applied to get alignment */
1691 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * slot->stride);
1692 slot->elements = (addr & ~63) | MALI_ATTR_LINEAR;
1693 slot->size = MIN2(max_size, expected_size) + (addr & 63);
1694 }
1695
1696 static bool
1697 has_point_coord(unsigned mask, gl_varying_slot loc)
1698 {
1699 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1700 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1701 else if (loc == VARYING_SLOT_PNTC)
1702 return (mask & (1 << 8));
1703 else
1704 return false;
1705 }
1706
1707 /* Helpers for manipulating stream out information so we can pack varyings
1708 * accordingly. Compute the src_offset for a given captured varying */
1709
1710 static struct pipe_stream_output *
1711 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1712 {
1713 for (unsigned i = 0; i < info->num_outputs; ++i) {
1714 if (info->output[i].register_index == loc)
1715 return &info->output[i];
1716 }
1717
1718 unreachable("Varying not captured");
1719 }
1720
1721 static unsigned
1722 pan_varying_size(enum mali_format fmt)
1723 {
1724 unsigned type = MALI_EXTRACT_TYPE(fmt);
1725 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1726 unsigned bits = MALI_EXTRACT_BITS(fmt);
1727 unsigned bpc = 0;
1728
1729 if (bits == MALI_CHANNEL_FLOAT) {
1730 /* No doubles */
1731 bool fp16 = (type == MALI_FORMAT_SINT);
1732 assert(fp16 || (type == MALI_FORMAT_UNORM));
1733
1734 bpc = fp16 ? 2 : 4;
1735 } else {
1736 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1737
1738 /* See the enums */
1739 bits = 1 << bits;
1740 assert(bits >= 8);
1741 bpc = bits / 8;
1742 }
1743
1744 return bpc * chan;
1745 }
1746
1747 /* Indices for named (non-XFB) varyings that are present. These are packed
1748 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1749 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1750 * of a given special field given a shift S by:
1751 *
1752 * idx = popcount(P & ((1 << S) - 1))
1753 *
1754 * That is... look at all of the varyings that come earlier and count them, the
1755 * count is the new index since plus one. Likewise, the total number of special
1756 * buffers required is simply popcount(P)
1757 */
1758
1759 enum pan_special_varying {
1760 PAN_VARY_GENERAL = 0,
1761 PAN_VARY_POSITION = 1,
1762 PAN_VARY_PSIZ = 2,
1763 PAN_VARY_PNTCOORD = 3,
1764 PAN_VARY_FACE = 4,
1765 PAN_VARY_FRAGCOORD = 5,
1766
1767 /* Keep last */
1768 PAN_VARY_MAX,
1769 };
1770
1771 /* Given a varying, figure out which index it correpsonds to */
1772
1773 static inline unsigned
1774 pan_varying_index(unsigned present, enum pan_special_varying v)
1775 {
1776 unsigned mask = (1 << v) - 1;
1777 return util_bitcount(present & mask);
1778 }
1779
1780 /* Get the base offset for XFB buffers, which by convention come after
1781 * everything else. Wrapper function for semantic reasons; by construction this
1782 * is just popcount. */
1783
1784 static inline unsigned
1785 pan_xfb_base(unsigned present)
1786 {
1787 return util_bitcount(present);
1788 }
1789
1790 /* Computes the present mask for varyings so we can start emitting varying records */
1791
1792 static inline unsigned
1793 pan_varying_present(
1794 struct panfrost_shader_state *vs,
1795 struct panfrost_shader_state *fs,
1796 unsigned quirks)
1797 {
1798 /* At the moment we always emit general and position buffers. Not
1799 * strictly necessary but usually harmless */
1800
1801 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1802
1803 /* Enable special buffers by the shader info */
1804
1805 if (vs->writes_point_size)
1806 present |= (1 << PAN_VARY_PSIZ);
1807
1808 if (fs->reads_point_coord)
1809 present |= (1 << PAN_VARY_PNTCOORD);
1810
1811 if (fs->reads_face)
1812 present |= (1 << PAN_VARY_FACE);
1813
1814 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1815 present |= (1 << PAN_VARY_FRAGCOORD);
1816
1817 /* Also, if we have a point sprite, we need a point coord buffer */
1818
1819 for (unsigned i = 0; i < fs->varying_count; i++) {
1820 gl_varying_slot loc = fs->varyings_loc[i];
1821
1822 if (has_point_coord(fs->point_sprite_mask, loc))
1823 present |= (1 << PAN_VARY_PNTCOORD);
1824 }
1825
1826 return present;
1827 }
1828
1829 /* Emitters for varying records */
1830
1831 static struct mali_attr_meta
1832 pan_emit_vary(unsigned present, enum pan_special_varying buf,
1833 unsigned quirks, enum mali_format format,
1834 unsigned offset)
1835 {
1836 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1837
1838 struct mali_attr_meta meta = {
1839 .index = pan_varying_index(present, buf),
1840 .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
1841 .swizzle = quirks & HAS_SWIZZLES ?
1842 panfrost_get_default_swizzle(nr_channels) :
1843 panfrost_bifrost_swizzle(nr_channels),
1844 .format = format,
1845 .src_offset = offset
1846 };
1847
1848 return meta;
1849 }
1850
1851 /* General varying that is unused */
1852
1853 static struct mali_attr_meta
1854 pan_emit_vary_only(unsigned present, unsigned quirks)
1855 {
1856 return pan_emit_vary(present, 0, quirks, MALI_VARYING_DISCARD, 0);
1857 }
1858
1859 /* Special records */
1860
1861 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1862 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1863 [PAN_VARY_PSIZ] = MALI_R16F,
1864 [PAN_VARY_PNTCOORD] = MALI_R16F,
1865 [PAN_VARY_FACE] = MALI_R32I,
1866 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1867 };
1868
1869 static struct mali_attr_meta
1870 pan_emit_vary_special(unsigned present, enum pan_special_varying buf,
1871 unsigned quirks)
1872 {
1873 assert(buf < PAN_VARY_MAX);
1874 return pan_emit_vary(present, buf, quirks, pan_varying_formats[buf], 0);
1875 }
1876
1877 static enum mali_format
1878 pan_xfb_format(enum mali_format format, unsigned nr)
1879 {
1880 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1881 return MALI_R32F | MALI_NR_CHANNELS(nr);
1882 else
1883 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1884 }
1885
1886 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1887 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1888 * value. */
1889
1890 static struct mali_attr_meta
1891 pan_emit_vary_xfb(unsigned present,
1892 unsigned max_xfb,
1893 unsigned *streamout_offsets,
1894 unsigned quirks,
1895 enum mali_format format,
1896 struct pipe_stream_output o)
1897 {
1898 /* Otherwise construct a record for it */
1899 struct mali_attr_meta meta = {
1900 /* XFB buffers come after everything else */
1901 .index = pan_xfb_base(present) + o.output_buffer,
1902
1903 /* As usual unknown bit */
1904 .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
1905
1906 /* Override swizzle with number of channels */
1907 .swizzle = quirks & HAS_SWIZZLES ?
1908 panfrost_get_default_swizzle(o.num_components) :
1909 panfrost_bifrost_swizzle(o.num_components),
1910
1911 /* Override number of channels and precision to highp */
1912 .format = pan_xfb_format(format, o.num_components),
1913
1914 /* Apply given offsets together */
1915 .src_offset = (o.dst_offset * 4) /* dwords */
1916 + streamout_offsets[o.output_buffer]
1917 };
1918
1919 return meta;
1920 }
1921
1922 /* Determine if we should capture a varying for XFB. This requires actually
1923 * having a buffer for it. If we don't capture it, we'll fallback to a general
1924 * varying path (linked or unlinked, possibly discarding the write) */
1925
1926 static bool
1927 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1928 unsigned loc, unsigned max_xfb)
1929 {
1930 if (!(xfb->so_mask & (1ll << loc)))
1931 return false;
1932
1933 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1934 return o->output_buffer < max_xfb;
1935 }
1936
1937 /* Higher-level wrapper around all of the above, classifying a varying into one
1938 * of the above types */
1939
1940 static struct mali_attr_meta
1941 panfrost_emit_varying(
1942 struct panfrost_shader_state *stage,
1943 struct panfrost_shader_state *other,
1944 struct panfrost_shader_state *xfb,
1945 unsigned present,
1946 unsigned max_xfb,
1947 unsigned *streamout_offsets,
1948 unsigned quirks,
1949 unsigned *gen_offsets,
1950 enum mali_format *gen_formats,
1951 unsigned *gen_stride,
1952 unsigned idx,
1953 bool should_alloc,
1954 bool is_fragment)
1955 {
1956 gl_varying_slot loc = stage->varyings_loc[idx];
1957 enum mali_format format = stage->varyings[idx];
1958
1959 /* Override format to match linkage */
1960 if (!should_alloc && gen_formats[idx])
1961 format = gen_formats[idx];
1962
1963 if (has_point_coord(stage->point_sprite_mask, loc)) {
1964 return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
1965 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1966 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1967 return pan_emit_vary_xfb(present, max_xfb, streamout_offsets, quirks, format, *o);
1968 } else if (loc == VARYING_SLOT_POS) {
1969 if (is_fragment)
1970 return pan_emit_vary_special(present, PAN_VARY_FRAGCOORD, quirks);
1971 else
1972 return pan_emit_vary_special(present, PAN_VARY_POSITION, quirks);
1973 } else if (loc == VARYING_SLOT_PSIZ) {
1974 return pan_emit_vary_special(present, PAN_VARY_PSIZ, quirks);
1975 } else if (loc == VARYING_SLOT_PNTC) {
1976 return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
1977 } else if (loc == VARYING_SLOT_FACE) {
1978 return pan_emit_vary_special(present, PAN_VARY_FACE, quirks);
1979 }
1980
1981 /* We've exhausted special cases, so it's otherwise a general varying. Check if we're linked */
1982 signed other_idx = -1;
1983
1984 for (unsigned j = 0; j < other->varying_count; ++j) {
1985 if (other->varyings_loc[j] == loc) {
1986 other_idx = j;
1987 break;
1988 }
1989 }
1990
1991 if (other_idx < 0)
1992 return pan_emit_vary_only(present, quirks);
1993
1994 unsigned offset = gen_offsets[other_idx];
1995
1996 if (should_alloc) {
1997 /* We're linked, so allocate a space via a watermark allocation */
1998 enum mali_format alt = other->varyings[other_idx];
1999
2000 /* Do interpolation at minimum precision */
2001 unsigned size_main = pan_varying_size(format);
2002 unsigned size_alt = pan_varying_size(alt);
2003 unsigned size = MIN2(size_main, size_alt);
2004
2005 /* If a varying is marked for XFB but not actually captured, we
2006 * should match the format to the format that would otherwise
2007 * be used for XFB, since dEQP checks for invariance here. It's
2008 * unclear if this is required by the spec. */
2009
2010 if (xfb->so_mask & (1ull << loc)) {
2011 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
2012 format = pan_xfb_format(format, o->num_components);
2013 size = pan_varying_size(format);
2014 } else if (size == size_alt) {
2015 format = alt;
2016 }
2017
2018 gen_offsets[idx] = *gen_stride;
2019 gen_formats[other_idx] = format;
2020 offset = *gen_stride;
2021 *gen_stride += size;
2022 }
2023
2024 return pan_emit_vary(present, PAN_VARY_GENERAL,
2025 quirks, format, offset);
2026 }
2027
2028 static void
2029 pan_emit_special_input(union mali_attr *varyings,
2030 unsigned present,
2031 enum pan_special_varying v,
2032 mali_ptr addr)
2033 {
2034 if (present & (1 << v)) {
2035 /* Ensure we write exactly once for performance and with fields
2036 * zeroed appropriately to avoid flakes */
2037
2038 union mali_attr s = {
2039 .elements = addr
2040 };
2041
2042 varyings[pan_varying_index(present, v)] = s;
2043 }
2044 }
2045
2046 void
2047 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
2048 unsigned vertex_count,
2049 struct mali_vertex_tiler_postfix *vertex_postfix,
2050 struct mali_vertex_tiler_postfix *tiler_postfix,
2051 union midgard_primitive_size *primitive_size)
2052 {
2053 /* Load the shaders */
2054 struct panfrost_context *ctx = batch->ctx;
2055 struct panfrost_device *dev = pan_device(ctx->base.screen);
2056 struct panfrost_shader_state *vs, *fs;
2057 size_t vs_size, fs_size;
2058
2059 /* Allocate the varying descriptor */
2060
2061 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
2062 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
2063 vs_size = sizeof(struct mali_attr_meta) * vs->varying_count;
2064 fs_size = sizeof(struct mali_attr_meta) * fs->varying_count;
2065
2066 struct panfrost_transfer trans = panfrost_pool_alloc(&batch->pool,
2067 vs_size +
2068 fs_size);
2069
2070 struct pipe_stream_output_info *so = &vs->stream_output;
2071 unsigned present = pan_varying_present(vs, fs, dev->quirks);
2072
2073 /* Check if this varying is linked by us. This is the case for
2074 * general-purpose, non-captured varyings. If it is, link it. If it's
2075 * not, use the provided stream out information to determine the
2076 * offset, since it was already linked for us. */
2077
2078 unsigned gen_offsets[32];
2079 enum mali_format gen_formats[32];
2080 memset(gen_offsets, 0, sizeof(gen_offsets));
2081 memset(gen_formats, 0, sizeof(gen_formats));
2082
2083 unsigned gen_stride = 0;
2084 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
2085 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
2086
2087 unsigned streamout_offsets[32];
2088
2089 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
2090 streamout_offsets[i] = panfrost_streamout_offset(
2091 so->stride[i],
2092 ctx->streamout.offsets[i],
2093 ctx->streamout.targets[i]);
2094 }
2095
2096 struct mali_attr_meta *ovs = (struct mali_attr_meta *)trans.cpu;
2097 struct mali_attr_meta *ofs = ovs + vs->varying_count;
2098
2099 for (unsigned i = 0; i < vs->varying_count; i++) {
2100 ovs[i] = panfrost_emit_varying(vs, fs, vs, present,
2101 ctx->streamout.num_targets, streamout_offsets,
2102 dev->quirks,
2103 gen_offsets, gen_formats, &gen_stride, i, true, false);
2104 }
2105
2106 for (unsigned i = 0; i < fs->varying_count; i++) {
2107 ofs[i] = panfrost_emit_varying(fs, vs, vs, present,
2108 ctx->streamout.num_targets, streamout_offsets,
2109 dev->quirks,
2110 gen_offsets, gen_formats, &gen_stride, i, false, true);
2111 }
2112
2113 unsigned xfb_base = pan_xfb_base(present);
2114 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
2115 sizeof(union mali_attr) * (xfb_base + ctx->streamout.num_targets));
2116 union mali_attr *varyings = (union mali_attr *) T.cpu;
2117
2118 /* Emit the stream out buffers */
2119
2120 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
2121 ctx->vertex_count);
2122
2123 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
2124 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
2125 so->stride[i],
2126 ctx->streamout.offsets[i],
2127 out_count,
2128 ctx->streamout.targets[i]);
2129 }
2130
2131 panfrost_emit_varyings(batch,
2132 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
2133 gen_stride, vertex_count);
2134
2135 /* fp32 vec4 gl_Position */
2136 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
2137 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
2138 sizeof(float) * 4, vertex_count);
2139
2140 if (present & (1 << PAN_VARY_PSIZ)) {
2141 primitive_size->pointer = panfrost_emit_varyings(batch,
2142 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
2143 2, vertex_count);
2144 }
2145
2146 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_VARYING_POINT_COORD);
2147 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_VARYING_FRONT_FACING);
2148 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_VARYING_FRAG_COORD);
2149
2150 vertex_postfix->varyings = T.gpu;
2151 tiler_postfix->varyings = T.gpu;
2152
2153 vertex_postfix->varying_meta = trans.gpu;
2154 tiler_postfix->varying_meta = trans.gpu + vs_size;
2155 }
2156
2157 void
2158 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
2159 struct mali_vertex_tiler_prefix *vertex_prefix,
2160 struct mali_vertex_tiler_postfix *vertex_postfix,
2161 struct mali_vertex_tiler_prefix *tiler_prefix,
2162 struct mali_vertex_tiler_postfix *tiler_postfix,
2163 union midgard_primitive_size *primitive_size)
2164 {
2165 struct panfrost_context *ctx = batch->ctx;
2166 struct panfrost_device *device = pan_device(ctx->base.screen);
2167 bool wallpapering = ctx->wallpaper_batch && batch->tiler_dep;
2168 struct bifrost_payload_vertex bifrost_vertex = {0,};
2169 struct bifrost_payload_tiler bifrost_tiler = {0,};
2170 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2171 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2172 void *vp, *tp;
2173 size_t vp_size, tp_size;
2174
2175 if (device->quirks & IS_BIFROST) {
2176 bifrost_vertex.prefix = *vertex_prefix;
2177 bifrost_vertex.postfix = *vertex_postfix;
2178 vp = &bifrost_vertex;
2179 vp_size = sizeof(bifrost_vertex);
2180
2181 bifrost_tiler.prefix = *tiler_prefix;
2182 bifrost_tiler.tiler.primitive_size = *primitive_size;
2183 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2184 bifrost_tiler.postfix = *tiler_postfix;
2185 tp = &bifrost_tiler;
2186 tp_size = sizeof(bifrost_tiler);
2187 } else {
2188 midgard_vertex.prefix = *vertex_prefix;
2189 midgard_vertex.postfix = *vertex_postfix;
2190 vp = &midgard_vertex;
2191 vp_size = sizeof(midgard_vertex);
2192
2193 midgard_tiler.prefix = *tiler_prefix;
2194 midgard_tiler.postfix = *tiler_postfix;
2195 midgard_tiler.primitive_size = *primitive_size;
2196 tp = &midgard_tiler;
2197 tp_size = sizeof(midgard_tiler);
2198 }
2199
2200 if (wallpapering) {
2201 /* Inject in reverse order, with "predicted" job indices.
2202 * THIS IS A HACK XXX */
2203 panfrost_new_job(batch, JOB_TYPE_TILER, false,
2204 batch->job_index + 2, tp, tp_size, true);
2205 panfrost_new_job(batch, JOB_TYPE_VERTEX, false, 0,
2206 vp, vp_size, true);
2207 return;
2208 }
2209
2210 /* If rasterizer discard is enable, only submit the vertex */
2211
2212 bool rasterizer_discard = ctx->rasterizer &&
2213 ctx->rasterizer->base.rasterizer_discard;
2214
2215 unsigned vertex = panfrost_new_job(batch, JOB_TYPE_VERTEX, false, 0,
2216 vp, vp_size, false);
2217
2218 if (rasterizer_discard)
2219 return;
2220
2221 panfrost_new_job(batch, JOB_TYPE_TILER, false, vertex, tp, tp_size,
2222 false);
2223 }
2224
2225 /* TODO: stop hardcoding this */
2226 mali_ptr
2227 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2228 {
2229 uint16_t locations[] = {
2230 128, 128,
2231 0, 256,
2232 0, 256,
2233 0, 256,
2234 0, 256,
2235 0, 256,
2236 0, 256,
2237 0, 256,
2238 0, 256,
2239 0, 256,
2240 0, 256,
2241 0, 256,
2242 0, 256,
2243 0, 256,
2244 0, 256,
2245 0, 256,
2246 0, 256,
2247 0, 256,
2248 0, 256,
2249 0, 256,
2250 0, 256,
2251 0, 256,
2252 0, 256,
2253 0, 256,
2254 0, 256,
2255 0, 256,
2256 0, 256,
2257 0, 256,
2258 0, 256,
2259 0, 256,
2260 0, 256,
2261 0, 256,
2262 128, 128,
2263 0, 0,
2264 0, 0,
2265 0, 0,
2266 0, 0,
2267 0, 0,
2268 0, 0,
2269 0, 0,
2270 0, 0,
2271 0, 0,
2272 0, 0,
2273 0, 0,
2274 0, 0,
2275 0, 0,
2276 0, 0,
2277 0, 0,
2278 };
2279
2280 return panfrost_pool_upload(&batch->pool, locations, 96 * sizeof(uint16_t));
2281 }