panfrost: Staticize a few cmdstream functions
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_allocate.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 /* TODO: Bifrost requires just a mali_shared_memory, without the rest of the
55 * framebuffer */
56
57 static void
58 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
59 struct midgard_payload_vertex_tiler *vt)
60 {
61 struct panfrost_device *dev = pan_device(ctx->base.screen);
62 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
63
64 /* If we haven't, reserve space for the framebuffer */
65
66 if (!batch->framebuffer.gpu) {
67 unsigned size = (dev->quirks & MIDGARD_SFBD) ?
68 sizeof(struct mali_single_framebuffer) :
69 sizeof(struct mali_framebuffer);
70
71 batch->framebuffer = panfrost_allocate_transient(batch, size);
72
73 /* Tag the pointer */
74 if (!(dev->quirks & MIDGARD_SFBD))
75 batch->framebuffer.gpu |= MALI_MFBD;
76 }
77
78 vt->postfix.shared_memory = batch->framebuffer.gpu;
79 }
80
81 static void
82 panfrost_vt_update_rasterizer(struct panfrost_context *ctx,
83 struct midgard_payload_vertex_tiler *tp)
84 {
85 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
86
87 tp->gl_enables |= 0x7;
88 SET_BIT(tp->gl_enables, MALI_FRONT_CCW_TOP,
89 rasterizer && rasterizer->base.front_ccw);
90 SET_BIT(tp->gl_enables, MALI_CULL_FACE_FRONT,
91 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_FRONT));
92 SET_BIT(tp->gl_enables, MALI_CULL_FACE_BACK,
93 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_BACK));
94 SET_BIT(tp->prefix.unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
95 rasterizer && rasterizer->base.flatshade_first);
96
97 if (!panfrost_writes_point_size(ctx)) {
98 bool points = tp->prefix.draw_mode == MALI_POINTS;
99 float val = 0.0f;
100
101 if (rasterizer)
102 val = points ?
103 rasterizer->base.point_size :
104 rasterizer->base.line_width;
105
106 tp->primitive_size.constant = val;
107 }
108 }
109
110 static void
111 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
112 struct midgard_payload_vertex_tiler *tp)
113 {
114 SET_BIT(tp->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
115 if (ctx->occlusion_query)
116 tp->postfix.occlusion_counter = ctx->occlusion_query->bo->gpu;
117 else
118 tp->postfix.occlusion_counter = 0;
119 }
120
121 void
122 panfrost_vt_init(struct panfrost_context *ctx,
123 enum pipe_shader_type stage,
124 struct midgard_payload_vertex_tiler *vtp)
125 {
126 if (!ctx->shader[stage])
127 return;
128
129 memset(vtp, 0, sizeof(*vtp));
130 vtp->gl_enables = 0x6;
131 panfrost_vt_attach_framebuffer(ctx, vtp);
132
133 if (stage == PIPE_SHADER_FRAGMENT) {
134 panfrost_vt_update_occlusion_query(ctx, vtp);
135 panfrost_vt_update_rasterizer(ctx, vtp);
136 }
137 }
138
139
140 static unsigned
141 panfrost_translate_index_size(unsigned size)
142 {
143 switch (size) {
144 case 1:
145 return MALI_DRAW_INDEXED_UINT8;
146
147 case 2:
148 return MALI_DRAW_INDEXED_UINT16;
149
150 case 4:
151 return MALI_DRAW_INDEXED_UINT32;
152
153 default:
154 unreachable("Invalid index size");
155 }
156 }
157
158 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
159 * good for the duration of the draw (transient), could last longer. Also get
160 * the bounds on the index buffer for the range accessed by the draw. We do
161 * these operations together because there are natural optimizations which
162 * require them to be together. */
163
164 static mali_ptr
165 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
166 const struct pipe_draw_info *info,
167 unsigned *min_index, unsigned *max_index)
168 {
169 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
170 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
171 off_t offset = info->start * info->index_size;
172 bool needs_indices = true;
173 mali_ptr out = 0;
174
175 if (info->max_index != ~0u) {
176 *min_index = info->min_index;
177 *max_index = info->max_index;
178 needs_indices = false;
179 }
180
181 if (!info->has_user_indices) {
182 /* Only resources can be directly mapped */
183 panfrost_batch_add_bo(batch, rsrc->bo,
184 PAN_BO_ACCESS_SHARED |
185 PAN_BO_ACCESS_READ |
186 PAN_BO_ACCESS_VERTEX_TILER);
187 out = rsrc->bo->gpu + offset;
188
189 /* Check the cache */
190 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
191 info->start,
192 info->count,
193 min_index,
194 max_index);
195 } else {
196 /* Otherwise, we need to upload to transient memory */
197 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
198 out = panfrost_upload_transient(batch, ibuf8 + offset,
199 info->count *
200 info->index_size);
201 }
202
203 if (needs_indices) {
204 /* Fallback */
205 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
206
207 if (!info->has_user_indices)
208 panfrost_minmax_cache_add(rsrc->index_cache,
209 info->start, info->count,
210 *min_index, *max_index);
211 }
212
213 return out;
214 }
215
216 void
217 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
218 const struct pipe_draw_info *info,
219 enum mali_draw_mode draw_mode,
220 struct midgard_payload_vertex_tiler *vp,
221 struct midgard_payload_vertex_tiler *tp,
222 unsigned *vertex_count,
223 unsigned *padded_count)
224 {
225 tp->prefix.draw_mode = draw_mode;
226
227 unsigned draw_flags = 0;
228
229 if (panfrost_writes_point_size(ctx))
230 draw_flags |= MALI_DRAW_VARYING_SIZE;
231
232 if (info->primitive_restart)
233 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
234
235 /* These doesn't make much sense */
236
237 draw_flags |= 0x3000;
238
239 if (info->index_size) {
240 unsigned min_index = 0, max_index = 0;
241
242 tp->prefix.indices = panfrost_get_index_buffer_bounded(ctx,
243 info,
244 &min_index,
245 &max_index);
246
247 /* Use the corresponding values */
248 *vertex_count = max_index - min_index + 1;
249 tp->offset_start = vp->offset_start = min_index + info->index_bias;
250 tp->prefix.offset_bias_correction = -min_index;
251 tp->prefix.index_count = MALI_POSITIVE(info->count);
252 draw_flags |= panfrost_translate_index_size(info->index_size);
253 } else {
254 tp->prefix.indices = 0;
255 *vertex_count = ctx->vertex_count;
256 tp->offset_start = vp->offset_start = info->start;
257 tp->prefix.offset_bias_correction = 0;
258 tp->prefix.index_count = MALI_POSITIVE(ctx->vertex_count);
259 }
260
261 tp->prefix.unknown_draw = draw_flags;
262
263 /* Encode the padded vertex count */
264
265 if (info->instance_count > 1) {
266 *padded_count = panfrost_padded_vertex_count(*vertex_count);
267
268 unsigned shift = __builtin_ctz(ctx->padded_count);
269 unsigned k = ctx->padded_count >> (shift + 1);
270
271 tp->instance_shift = vp->instance_shift = shift;
272 tp->instance_odd = vp->instance_odd = k;
273 } else {
274 *padded_count = *vertex_count;
275
276 /* Reset instancing state */
277 tp->instance_shift = vp->instance_shift = 0;
278 tp->instance_odd = vp->instance_odd = 0;
279 }
280 }
281
282 static void
283 panfrost_shader_meta_init(struct panfrost_context *ctx,
284 enum pipe_shader_type st,
285 struct mali_shader_meta *meta)
286 {
287 const struct panfrost_device *dev = pan_device(ctx->base.screen);
288 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
289
290 memset(meta, 0, sizeof(*meta));
291 meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
292 meta->attribute_count = ss->attribute_count;
293 meta->varying_count = ss->varying_count;
294 meta->texture_count = ctx->sampler_view_count[st];
295 meta->sampler_count = ctx->sampler_count[st];
296
297 if (dev->quirks & IS_BIFROST) {
298 meta->bifrost1.unk1 = 0x800200;
299 meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
300 meta->bifrost2.preload_regs = 0xC0;
301 meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
302 ss->uniform_cutoff);
303 } else {
304 meta->midgard1.uniform_count = MIN2(ss->uniform_count,
305 ss->uniform_cutoff);
306 meta->midgard1.work_count = ss->work_reg_count;
307 meta->midgard1.flags_hi = 0x8; /* XXX */
308 meta->midgard1.flags_lo = 0x220;
309 meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
310 }
311
312 }
313
314 static unsigned
315 panfrost_translate_compare_func(enum pipe_compare_func in)
316 {
317 switch (in) {
318 case PIPE_FUNC_NEVER:
319 return MALI_FUNC_NEVER;
320
321 case PIPE_FUNC_LESS:
322 return MALI_FUNC_LESS;
323
324 case PIPE_FUNC_EQUAL:
325 return MALI_FUNC_EQUAL;
326
327 case PIPE_FUNC_LEQUAL:
328 return MALI_FUNC_LEQUAL;
329
330 case PIPE_FUNC_GREATER:
331 return MALI_FUNC_GREATER;
332
333 case PIPE_FUNC_NOTEQUAL:
334 return MALI_FUNC_NOTEQUAL;
335
336 case PIPE_FUNC_GEQUAL:
337 return MALI_FUNC_GEQUAL;
338
339 case PIPE_FUNC_ALWAYS:
340 return MALI_FUNC_ALWAYS;
341
342 default:
343 unreachable("Invalid func");
344 }
345 }
346
347 static unsigned
348 panfrost_translate_stencil_op(enum pipe_stencil_op in)
349 {
350 switch (in) {
351 case PIPE_STENCIL_OP_KEEP:
352 return MALI_STENCIL_KEEP;
353
354 case PIPE_STENCIL_OP_ZERO:
355 return MALI_STENCIL_ZERO;
356
357 case PIPE_STENCIL_OP_REPLACE:
358 return MALI_STENCIL_REPLACE;
359
360 case PIPE_STENCIL_OP_INCR:
361 return MALI_STENCIL_INCR;
362
363 case PIPE_STENCIL_OP_DECR:
364 return MALI_STENCIL_DECR;
365
366 case PIPE_STENCIL_OP_INCR_WRAP:
367 return MALI_STENCIL_INCR_WRAP;
368
369 case PIPE_STENCIL_OP_DECR_WRAP:
370 return MALI_STENCIL_DECR_WRAP;
371
372 case PIPE_STENCIL_OP_INVERT:
373 return MALI_STENCIL_INVERT;
374
375 default:
376 unreachable("Invalid stencil op");
377 }
378 }
379
380 static unsigned
381 translate_tex_wrap(enum pipe_tex_wrap w)
382 {
383 switch (w) {
384 case PIPE_TEX_WRAP_REPEAT:
385 return MALI_WRAP_REPEAT;
386
387 case PIPE_TEX_WRAP_CLAMP:
388 return MALI_WRAP_CLAMP;
389
390 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
391 return MALI_WRAP_CLAMP_TO_EDGE;
392
393 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
394 return MALI_WRAP_CLAMP_TO_BORDER;
395
396 case PIPE_TEX_WRAP_MIRROR_REPEAT:
397 return MALI_WRAP_MIRRORED_REPEAT;
398
399 case PIPE_TEX_WRAP_MIRROR_CLAMP:
400 return MALI_WRAP_MIRRORED_CLAMP;
401
402 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
403 return MALI_WRAP_MIRRORED_CLAMP_TO_EDGE;
404
405 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
406 return MALI_WRAP_MIRRORED_CLAMP_TO_BORDER;
407
408 default:
409 unreachable("Invalid wrap");
410 }
411 }
412
413 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
414 struct mali_sampler_descriptor *hw)
415 {
416 unsigned func = panfrost_translate_compare_func(cso->compare_func);
417 bool min_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
418 bool mag_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
419 bool mip_linear = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR;
420 unsigned min_filter = min_nearest ? MALI_SAMP_MIN_NEAREST : 0;
421 unsigned mag_filter = mag_nearest ? MALI_SAMP_MAG_NEAREST : 0;
422 unsigned mip_filter = mip_linear ?
423 (MALI_SAMP_MIP_LINEAR_1 | MALI_SAMP_MIP_LINEAR_2) : 0;
424 unsigned normalized = cso->normalized_coords ? MALI_SAMP_NORM_COORDS : 0;
425
426 *hw = (struct mali_sampler_descriptor) {
427 .filter_mode = min_filter | mag_filter | mip_filter |
428 normalized,
429 .wrap_s = translate_tex_wrap(cso->wrap_s),
430 .wrap_t = translate_tex_wrap(cso->wrap_t),
431 .wrap_r = translate_tex_wrap(cso->wrap_r),
432 .compare_func = panfrost_flip_compare_func(func),
433 .border_color = {
434 cso->border_color.f[0],
435 cso->border_color.f[1],
436 cso->border_color.f[2],
437 cso->border_color.f[3]
438 },
439 .min_lod = FIXED_16(cso->min_lod, false), /* clamp at 0 */
440 .max_lod = FIXED_16(cso->max_lod, false),
441 .lod_bias = FIXED_16(cso->lod_bias, true), /* can be negative */
442 .seamless_cube_map = cso->seamless_cube_map,
443 };
444
445 /* If necessary, we disable mipmapping in the sampler descriptor by
446 * clamping the LOD as tight as possible (from 0 to epsilon,
447 * essentially -- remember these are fixed point numbers, so
448 * epsilon=1/256) */
449
450 if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
451 hw->max_lod = hw->min_lod + 1;
452 }
453
454 static void
455 panfrost_make_stencil_state(const struct pipe_stencil_state *in,
456 struct mali_stencil_test *out)
457 {
458 out->ref = 0; /* Gallium gets it from elsewhere */
459
460 out->mask = in->valuemask;
461 out->func = panfrost_translate_compare_func(in->func);
462 out->sfail = panfrost_translate_stencil_op(in->fail_op);
463 out->dpfail = panfrost_translate_stencil_op(in->zfail_op);
464 out->dppass = panfrost_translate_stencil_op(in->zpass_op);
465 }
466
467 static void
468 panfrost_frag_meta_rasterizer_update(struct panfrost_context *ctx,
469 struct mali_shader_meta *fragmeta)
470 {
471 if (!ctx->rasterizer) {
472 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, true);
473 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, false);
474 fragmeta->depth_units = 0.0f;
475 fragmeta->depth_factor = 0.0f;
476 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, false);
477 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, false);
478 return;
479 }
480
481 bool msaa = ctx->rasterizer->base.multisample;
482
483 /* TODO: Sample size */
484 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
485 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
486 fragmeta->depth_units = ctx->rasterizer->base.offset_units * 2.0f;
487 fragmeta->depth_factor = ctx->rasterizer->base.offset_scale;
488
489 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
490
491 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A,
492 ctx->rasterizer->base.offset_tri);
493 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B,
494 ctx->rasterizer->base.offset_tri);
495 }
496
497 static void
498 panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
499 struct mali_shader_meta *fragmeta)
500 {
501 const struct pipe_depth_stencil_alpha_state *zsa = ctx->depth_stencil;
502 int zfunc = PIPE_FUNC_ALWAYS;
503
504 if (!zsa) {
505 struct pipe_stencil_state default_stencil = {
506 .enabled = 0,
507 .func = PIPE_FUNC_ALWAYS,
508 .fail_op = MALI_STENCIL_KEEP,
509 .zfail_op = MALI_STENCIL_KEEP,
510 .zpass_op = MALI_STENCIL_KEEP,
511 .writemask = 0xFF,
512 .valuemask = 0xFF
513 };
514
515 panfrost_make_stencil_state(&default_stencil,
516 &fragmeta->stencil_front);
517 fragmeta->stencil_mask_front = default_stencil.writemask;
518 fragmeta->stencil_back = fragmeta->stencil_front;
519 fragmeta->stencil_mask_back = default_stencil.writemask;
520 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST, false);
521 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK, false);
522 } else {
523 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
524 zsa->stencil[0].enabled);
525 panfrost_make_stencil_state(&zsa->stencil[0],
526 &fragmeta->stencil_front);
527 fragmeta->stencil_mask_front = zsa->stencil[0].writemask;
528 fragmeta->stencil_front.ref = ctx->stencil_ref.ref_value[0];
529
530 /* If back-stencil is not enabled, use the front values */
531
532 if (zsa->stencil[1].enabled) {
533 panfrost_make_stencil_state(&zsa->stencil[1],
534 &fragmeta->stencil_back);
535 fragmeta->stencil_mask_back = zsa->stencil[1].writemask;
536 fragmeta->stencil_back.ref = ctx->stencil_ref.ref_value[1];
537 } else {
538 fragmeta->stencil_back = fragmeta->stencil_front;
539 fragmeta->stencil_mask_back = fragmeta->stencil_mask_front;
540 fragmeta->stencil_back.ref = fragmeta->stencil_front.ref;
541 }
542
543 if (zsa->depth.enabled)
544 zfunc = zsa->depth.func;
545
546 /* Depth state (TODO: Refactor) */
547
548 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
549 zsa->depth.writemask);
550 }
551
552 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
553 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(zfunc));
554 }
555
556 static void
557 panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
558 struct mali_shader_meta *fragmeta,
559 struct midgard_blend_rt *rts)
560 {
561 const struct panfrost_device *dev = pan_device(ctx->base.screen);
562
563 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
564 (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
565 !ctx->blend->base.dither);
566
567 /* Get blending setup */
568 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
569
570 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
571 unsigned shader_offset = 0;
572 struct panfrost_bo *shader_bo = NULL;
573
574 for (unsigned c = 0; c < rt_count; ++c)
575 blend[c] = panfrost_get_blend_for_context(ctx, c, &shader_bo,
576 &shader_offset);
577
578 /* If there is a blend shader, work registers are shared. XXX: opt */
579
580 for (unsigned c = 0; c < rt_count; ++c) {
581 if (blend[c].is_shader)
582 fragmeta->midgard1.work_count = 16;
583 }
584
585 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
586 * copied to the blend_meta appended (by convention), but this is the
587 * field actually read by the hardware. (Or maybe both are read...?).
588 * Specify the last RTi with a blend shader. */
589
590 fragmeta->blend.shader = 0;
591
592 for (signed rt = (rt_count - 1); rt >= 0; --rt) {
593 if (!blend[rt].is_shader)
594 continue;
595
596 fragmeta->blend.shader = blend[rt].shader.gpu |
597 blend[rt].shader.first_tag;
598 break;
599 }
600
601 if (dev->quirks & MIDGARD_SFBD) {
602 /* When only a single render target platform is used, the blend
603 * information is inside the shader meta itself. We additionally
604 * need to signal CAN_DISCARD for nontrivial blend modes (so
605 * we're able to read back the destination buffer) */
606
607 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
608 blend[0].is_shader);
609
610 if (!blend[0].is_shader) {
611 fragmeta->blend.equation = *blend[0].equation.equation;
612 fragmeta->blend.constant = blend[0].equation.constant;
613 }
614
615 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
616 !blend[0].no_blending);
617 return;
618 }
619
620 /* Additional blend descriptor tacked on for jobs using MFBD */
621
622 for (unsigned i = 0; i < rt_count; ++i) {
623 rts[i].flags = 0x200;
624
625 bool is_srgb = (ctx->pipe_framebuffer.nr_cbufs > i) &&
626 (ctx->pipe_framebuffer.cbufs[i]) &&
627 util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
628
629 SET_BIT(rts[i].flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
630 SET_BIT(rts[i].flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
631 SET_BIT(rts[i].flags, MALI_BLEND_SRGB, is_srgb);
632 SET_BIT(rts[i].flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
633
634 if (blend[i].is_shader) {
635 rts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
636 } else {
637 rts[i].blend.equation = *blend[i].equation.equation;
638 rts[i].blend.constant = blend[i].equation.constant;
639 }
640 }
641 }
642
643 static void
644 panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
645 struct mali_shader_meta *fragmeta,
646 struct midgard_blend_rt *rts)
647 {
648 const struct panfrost_device *dev = pan_device(ctx->base.screen);
649 struct panfrost_shader_state *fs;
650
651 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
652
653 fragmeta->alpha_coverage = ~MALI_ALPHA_COVERAGE(0.000000);
654 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x3010;
655 fragmeta->unknown2_4 = 0x4e0;
656
657 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
658 * is required (independent of 32-bit/64-bit descriptors), or why it's
659 * not used on later GPU revisions. Otherwise, all shader jobs fault on
660 * these earlier chips (perhaps this is a chicken bit of some kind).
661 * More investigation is needed. */
662
663 SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
664
665 /* Depending on whether it's legal to in the given shader, we try to
666 * enable early-z testing (or forward-pixel kill?) */
667
668 SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
669 !fs->can_discard && !fs->writes_depth);
670
671 /* Add the writes Z/S flags if needed. */
672 SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
673 SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
674
675 /* Any time texturing is used, derivatives are implicitly calculated,
676 * so we need to enable helper invocations */
677
678 SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
679 fs->helper_invocations);
680
681 /* CAN_DISCARD should be set if the fragment shader possibly contains a
682 * 'discard' instruction. It is likely this is related to optimizations
683 * related to forward-pixel kill, as per "Mali Performance 3: Is
684 * EGL_BUFFER_PRESERVED a good thing?" by Peter Harris */
685
686 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD, fs->can_discard);
687 SET_BIT(fragmeta->midgard1.flags_lo, 0x400, fs->can_discard);
688
689 panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
690 panfrost_frag_meta_zsa_update(ctx, fragmeta);
691 panfrost_frag_meta_blend_update(ctx, fragmeta, rts);
692 }
693
694 void
695 panfrost_emit_shader_meta(struct panfrost_batch *batch,
696 enum pipe_shader_type st,
697 struct midgard_payload_vertex_tiler *vtp)
698 {
699 struct panfrost_context *ctx = batch->ctx;
700 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
701
702 if (!ss) {
703 vtp->postfix.shader = 0;
704 return;
705 }
706
707 struct mali_shader_meta meta;
708
709 panfrost_shader_meta_init(ctx, st, &meta);
710
711 /* Add the shader BO to the batch. */
712 panfrost_batch_add_bo(batch, ss->bo,
713 PAN_BO_ACCESS_PRIVATE |
714 PAN_BO_ACCESS_READ |
715 panfrost_bo_access_for_stage(st));
716
717 mali_ptr shader_ptr;
718
719 if (st == PIPE_SHADER_FRAGMENT) {
720 struct panfrost_device *dev = pan_device(ctx->base.screen);
721 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
722 size_t desc_size = sizeof(meta);
723 struct midgard_blend_rt rts[4];
724 struct panfrost_transfer xfer;
725
726 assert(rt_count <= ARRAY_SIZE(rts));
727
728 panfrost_frag_shader_meta_init(ctx, &meta, rts);
729
730 if (!(dev->quirks & MIDGARD_SFBD))
731 desc_size += sizeof(*rts) * rt_count;
732
733 xfer = panfrost_allocate_transient(batch, desc_size);
734
735 memcpy(xfer.cpu, &meta, sizeof(meta));
736 memcpy(xfer.cpu + sizeof(meta), rts, sizeof(*rts) * rt_count);
737
738 shader_ptr = xfer.gpu;
739 } else {
740 shader_ptr = panfrost_upload_transient(batch, &meta,
741 sizeof(meta));
742 }
743
744 vtp->postfix.shader = shader_ptr;
745 }
746
747 static void
748 panfrost_mali_viewport_init(struct panfrost_context *ctx,
749 struct mali_viewport *mvp)
750 {
751 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
752
753 /* Clip bounds are encoded as floats. The viewport itself is encoded as
754 * (somewhat) asymmetric ints. */
755
756 const struct pipe_scissor_state *ss = &ctx->scissor;
757
758 memset(mvp, 0, sizeof(*mvp));
759
760 /* By default, do no viewport clipping, i.e. clip to (-inf, inf) in
761 * each direction. Clipping to the viewport in theory should work, but
762 * in practice causes issues when we're not explicitly trying to
763 * scissor */
764
765 *mvp = (struct mali_viewport) {
766 .clip_minx = -INFINITY,
767 .clip_miny = -INFINITY,
768 .clip_maxx = INFINITY,
769 .clip_maxy = INFINITY,
770 };
771
772 /* Always scissor to the viewport by default. */
773 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
774 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
775
776 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
777 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
778
779 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
780 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
781
782 /* Apply the scissor test */
783
784 unsigned minx, miny, maxx, maxy;
785
786 if (ss && ctx->rasterizer && ctx->rasterizer->base.scissor) {
787 minx = MAX2(ss->minx, vp_minx);
788 miny = MAX2(ss->miny, vp_miny);
789 maxx = MIN2(ss->maxx, vp_maxx);
790 maxy = MIN2(ss->maxy, vp_maxy);
791 } else {
792 minx = vp_minx;
793 miny = vp_miny;
794 maxx = vp_maxx;
795 maxy = vp_maxy;
796 }
797
798 /* Hardware needs the min/max to be strictly ordered, so flip if we
799 * need to. The viewport transformation in the vertex shader will
800 * handle the negatives if we don't */
801
802 if (miny > maxy) {
803 unsigned temp = miny;
804 miny = maxy;
805 maxy = temp;
806 }
807
808 if (minx > maxx) {
809 unsigned temp = minx;
810 minx = maxx;
811 maxx = temp;
812 }
813
814 if (minz > maxz) {
815 float temp = minz;
816 minz = maxz;
817 maxz = temp;
818 }
819
820 /* Clamp to the framebuffer size as a last check */
821
822 minx = MIN2(ctx->pipe_framebuffer.width, minx);
823 maxx = MIN2(ctx->pipe_framebuffer.width, maxx);
824
825 miny = MIN2(ctx->pipe_framebuffer.height, miny);
826 maxy = MIN2(ctx->pipe_framebuffer.height, maxy);
827
828 /* Upload */
829
830 mvp->viewport0[0] = minx;
831 mvp->viewport1[0] = MALI_POSITIVE(maxx);
832
833 mvp->viewport0[1] = miny;
834 mvp->viewport1[1] = MALI_POSITIVE(maxy);
835
836 mvp->clip_minz = minz;
837 mvp->clip_maxz = maxz;
838 }
839
840 void
841 panfrost_emit_viewport(struct panfrost_batch *batch,
842 struct midgard_payload_vertex_tiler *tp)
843 {
844 struct panfrost_context *ctx = batch->ctx;
845 struct mali_viewport mvp;
846
847 panfrost_mali_viewport_init(batch->ctx, &mvp);
848
849 /* Update the job, unless we're doing wallpapering (whose lack of
850 * scissor we can ignore, since if we "miss" a tile of wallpaper, it'll
851 * just... be faster :) */
852
853 if (!ctx->wallpaper_batch)
854 panfrost_batch_union_scissor(batch, mvp.viewport0[0],
855 mvp.viewport0[1],
856 mvp.viewport1[0] + 1,
857 mvp.viewport1[1] + 1);
858
859 tp->postfix.viewport = panfrost_upload_transient(batch, &mvp,
860 sizeof(mvp));
861 }
862
863 static mali_ptr
864 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
865 enum pipe_shader_type st,
866 struct panfrost_constant_buffer *buf,
867 unsigned index)
868 {
869 struct pipe_constant_buffer *cb = &buf->cb[index];
870 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
871
872 if (rsrc) {
873 panfrost_batch_add_bo(batch, rsrc->bo,
874 PAN_BO_ACCESS_SHARED |
875 PAN_BO_ACCESS_READ |
876 panfrost_bo_access_for_stage(st));
877
878 /* Alignment gauranteed by
879 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
880 return rsrc->bo->gpu + cb->buffer_offset;
881 } else if (cb->user_buffer) {
882 return panfrost_upload_transient(batch,
883 cb->user_buffer +
884 cb->buffer_offset,
885 cb->buffer_size);
886 } else {
887 unreachable("No constant buffer");
888 }
889 }
890
891 struct sysval_uniform {
892 union {
893 float f[4];
894 int32_t i[4];
895 uint32_t u[4];
896 uint64_t du[2];
897 };
898 };
899
900 static void
901 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
902 struct sysval_uniform *uniform)
903 {
904 struct panfrost_context *ctx = batch->ctx;
905 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
906
907 uniform->f[0] = vp->scale[0];
908 uniform->f[1] = vp->scale[1];
909 uniform->f[2] = vp->scale[2];
910 }
911
912 static void
913 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
914 struct sysval_uniform *uniform)
915 {
916 struct panfrost_context *ctx = batch->ctx;
917 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
918
919 uniform->f[0] = vp->translate[0];
920 uniform->f[1] = vp->translate[1];
921 uniform->f[2] = vp->translate[2];
922 }
923
924 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
925 enum pipe_shader_type st,
926 unsigned int sysvalid,
927 struct sysval_uniform *uniform)
928 {
929 struct panfrost_context *ctx = batch->ctx;
930 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
931 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
932 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
933 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
934
935 assert(dim);
936 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
937
938 if (dim > 1)
939 uniform->i[1] = u_minify(tex->texture->height0,
940 tex->u.tex.first_level);
941
942 if (dim > 2)
943 uniform->i[2] = u_minify(tex->texture->depth0,
944 tex->u.tex.first_level);
945
946 if (is_array)
947 uniform->i[dim] = tex->texture->array_size;
948 }
949
950 static void
951 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
952 enum pipe_shader_type st,
953 unsigned ssbo_id,
954 struct sysval_uniform *uniform)
955 {
956 struct panfrost_context *ctx = batch->ctx;
957
958 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
959 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
960
961 /* Compute address */
962 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
963
964 panfrost_batch_add_bo(batch, bo,
965 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
966 panfrost_bo_access_for_stage(st));
967
968 /* Upload address and size as sysval */
969 uniform->du[0] = bo->gpu + sb.buffer_offset;
970 uniform->u[2] = sb.buffer_size;
971 }
972
973 static void
974 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
975 enum pipe_shader_type st,
976 unsigned samp_idx,
977 struct sysval_uniform *uniform)
978 {
979 struct panfrost_context *ctx = batch->ctx;
980 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
981
982 uniform->f[0] = sampl->min_lod;
983 uniform->f[1] = sampl->max_lod;
984 uniform->f[2] = sampl->lod_bias;
985
986 /* Even without any errata, Midgard represents "no mipmapping" as
987 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
988 * panfrost_create_sampler_state which also explains our choice of
989 * epsilon value (again to keep behaviour consistent) */
990
991 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
992 uniform->f[1] = uniform->f[0] + (1.0/256.0);
993 }
994
995 static void
996 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
997 struct sysval_uniform *uniform)
998 {
999 struct panfrost_context *ctx = batch->ctx;
1000
1001 uniform->u[0] = ctx->compute_grid->grid[0];
1002 uniform->u[1] = ctx->compute_grid->grid[1];
1003 uniform->u[2] = ctx->compute_grid->grid[2];
1004 }
1005
1006 static void
1007 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1008 struct panfrost_shader_state *ss,
1009 enum pipe_shader_type st)
1010 {
1011 struct sysval_uniform *uniforms = (void *)buf;
1012
1013 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1014 int sysval = ss->sysval[i];
1015
1016 switch (PAN_SYSVAL_TYPE(sysval)) {
1017 case PAN_SYSVAL_VIEWPORT_SCALE:
1018 panfrost_upload_viewport_scale_sysval(batch,
1019 &uniforms[i]);
1020 break;
1021 case PAN_SYSVAL_VIEWPORT_OFFSET:
1022 panfrost_upload_viewport_offset_sysval(batch,
1023 &uniforms[i]);
1024 break;
1025 case PAN_SYSVAL_TEXTURE_SIZE:
1026 panfrost_upload_txs_sysval(batch, st,
1027 PAN_SYSVAL_ID(sysval),
1028 &uniforms[i]);
1029 break;
1030 case PAN_SYSVAL_SSBO:
1031 panfrost_upload_ssbo_sysval(batch, st,
1032 PAN_SYSVAL_ID(sysval),
1033 &uniforms[i]);
1034 break;
1035 case PAN_SYSVAL_NUM_WORK_GROUPS:
1036 panfrost_upload_num_work_groups_sysval(batch,
1037 &uniforms[i]);
1038 break;
1039 case PAN_SYSVAL_SAMPLER:
1040 panfrost_upload_sampler_sysval(batch, st,
1041 PAN_SYSVAL_ID(sysval),
1042 &uniforms[i]);
1043 break;
1044 default:
1045 assert(0);
1046 }
1047 }
1048 }
1049
1050 static const void *
1051 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1052 unsigned index)
1053 {
1054 struct pipe_constant_buffer *cb = &buf->cb[index];
1055 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1056
1057 if (rsrc)
1058 return rsrc->bo->cpu;
1059 else if (cb->user_buffer)
1060 return cb->user_buffer;
1061 else
1062 unreachable("No constant buffer");
1063 }
1064
1065 void
1066 panfrost_emit_const_buf(struct panfrost_batch *batch,
1067 enum pipe_shader_type stage,
1068 struct midgard_payload_vertex_tiler *vtp)
1069 {
1070 struct panfrost_context *ctx = batch->ctx;
1071 struct panfrost_shader_variants *all = ctx->shader[stage];
1072
1073 if (!all)
1074 return;
1075
1076 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1077
1078 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1079
1080 /* Uniforms are implicitly UBO #0 */
1081 bool has_uniforms = buf->enabled_mask & (1 << 0);
1082
1083 /* Allocate room for the sysval and the uniforms */
1084 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1085 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1086 size_t size = sys_size + uniform_size;
1087 struct panfrost_transfer transfer = panfrost_allocate_transient(batch,
1088 size);
1089
1090 /* Upload sysvals requested by the shader */
1091 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1092
1093 /* Upload uniforms */
1094 if (has_uniforms && uniform_size) {
1095 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1096 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1097 }
1098
1099 struct mali_vertex_tiler_postfix *postfix = &vtp->postfix;
1100
1101 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1102 * uploaded */
1103
1104 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1105 assert(ubo_count >= 1);
1106
1107 size_t sz = sizeof(uint64_t) * ubo_count;
1108 uint64_t ubos[PAN_MAX_CONST_BUFFERS];
1109 int uniform_count = ss->uniform_count;
1110
1111 /* Upload uniforms as a UBO */
1112 ubos[0] = MALI_MAKE_UBO(2 + uniform_count, transfer.gpu);
1113
1114 /* The rest are honest-to-goodness UBOs */
1115
1116 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1117 size_t usz = buf->cb[ubo].buffer_size;
1118 bool enabled = buf->enabled_mask & (1 << ubo);
1119 bool empty = usz == 0;
1120
1121 if (!enabled || empty) {
1122 /* Stub out disabled UBOs to catch accesses */
1123 ubos[ubo] = MALI_MAKE_UBO(0, 0xDEAD0000);
1124 continue;
1125 }
1126
1127 mali_ptr gpu = panfrost_map_constant_buffer_gpu(batch, stage,
1128 buf, ubo);
1129
1130 unsigned bytes_per_field = 16;
1131 unsigned aligned = ALIGN_POT(usz, bytes_per_field);
1132 ubos[ubo] = MALI_MAKE_UBO(aligned / bytes_per_field, gpu);
1133 }
1134
1135 mali_ptr ubufs = panfrost_upload_transient(batch, ubos, sz);
1136 postfix->uniforms = transfer.gpu;
1137 postfix->uniform_buffers = ubufs;
1138
1139 buf->dirty_mask = 0;
1140 }
1141
1142 void
1143 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1144 const struct pipe_grid_info *info,
1145 struct midgard_payload_vertex_tiler *vtp)
1146 {
1147 struct panfrost_context *ctx = batch->ctx;
1148 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1149 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1150 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1151 128));
1152 unsigned shared_size = single_size * info->grid[0] * info->grid[1] *
1153 info->grid[2] * 4;
1154 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1155 shared_size,
1156 1);
1157
1158 struct mali_shared_memory shared = {
1159 .shared_memory = bo->gpu,
1160 .shared_workgroup_count =
1161 util_logbase2_ceil(info->grid[0]) +
1162 util_logbase2_ceil(info->grid[1]) +
1163 util_logbase2_ceil(info->grid[2]),
1164 .shared_unk1 = 0x2,
1165 .shared_shift = util_logbase2(single_size) - 1
1166 };
1167
1168 vtp->postfix.shared_memory = panfrost_upload_transient(batch, &shared,
1169 sizeof(shared));
1170 }
1171
1172 static mali_ptr
1173 panfrost_get_tex_desc(struct panfrost_batch *batch,
1174 enum pipe_shader_type st,
1175 struct panfrost_sampler_view *view)
1176 {
1177 if (!view)
1178 return (mali_ptr) 0;
1179
1180 struct pipe_sampler_view *pview = &view->base;
1181 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1182
1183 /* Add the BO to the job so it's retained until the job is done. */
1184
1185 panfrost_batch_add_bo(batch, rsrc->bo,
1186 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1187 panfrost_bo_access_for_stage(st));
1188
1189 panfrost_batch_add_bo(batch, view->bo,
1190 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1191 panfrost_bo_access_for_stage(st));
1192
1193 return view->bo->gpu;
1194 }
1195
1196 void
1197 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1198 enum pipe_shader_type stage,
1199 struct midgard_payload_vertex_tiler *vtp)
1200 {
1201 struct panfrost_context *ctx = batch->ctx;
1202
1203 if (!ctx->sampler_view_count[stage])
1204 return;
1205
1206 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1207
1208 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i)
1209 trampolines[i] = panfrost_get_tex_desc(batch, stage,
1210 ctx->sampler_views[stage][i]);
1211
1212 vtp->postfix.texture_trampoline = panfrost_upload_transient(batch,
1213 trampolines,
1214 sizeof(uint64_t) *
1215 ctx->sampler_view_count[stage]);
1216 }
1217
1218 void
1219 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1220 enum pipe_shader_type stage,
1221 struct midgard_payload_vertex_tiler *vtp)
1222 {
1223 struct panfrost_context *ctx = batch->ctx;
1224
1225 if (!ctx->sampler_count[stage])
1226 return;
1227
1228 size_t desc_size = sizeof(struct mali_sampler_descriptor);
1229 size_t transfer_size = desc_size * ctx->sampler_count[stage];
1230 struct panfrost_transfer transfer = panfrost_allocate_transient(batch,
1231 transfer_size);
1232 struct mali_sampler_descriptor *desc = (struct mali_sampler_descriptor *)transfer.cpu;
1233
1234 for (int i = 0; i < ctx->sampler_count[stage]; ++i)
1235 desc[i] = ctx->samplers[stage][i]->hw;
1236
1237 vtp->postfix.sampler_descriptor = transfer.gpu;
1238 }
1239
1240 void
1241 panfrost_emit_vertex_attr_meta(struct panfrost_batch *batch,
1242 struct midgard_payload_vertex_tiler *vp)
1243 {
1244 struct panfrost_context *ctx = batch->ctx;
1245
1246 if (!ctx->vertex)
1247 return;
1248
1249 struct panfrost_vertex_state *so = ctx->vertex;
1250
1251 panfrost_vertex_state_upd_attr_offs(ctx, vp);
1252 vp->postfix.attribute_meta = panfrost_upload_transient(batch, so->hw,
1253 sizeof(*so->hw) *
1254 PAN_MAX_ATTRIBUTE);
1255 }
1256
1257 void
1258 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1259 struct midgard_payload_vertex_tiler *vp)
1260 {
1261 struct panfrost_context *ctx = batch->ctx;
1262 struct panfrost_vertex_state *so = ctx->vertex;
1263
1264 /* Staged mali_attr, and index into them. i =/= k, depending on the
1265 * vertex buffer mask and instancing. Twice as much room is allocated,
1266 * for a worst case of NPOT_DIVIDEs which take up extra slot */
1267 union mali_attr attrs[PIPE_MAX_ATTRIBS * 2];
1268 unsigned k = 0;
1269
1270 for (unsigned i = 0; i < so->num_elements; ++i) {
1271 /* We map a mali_attr to be 1:1 with the mali_attr_meta, which
1272 * means duplicating some vertex buffers (who cares? aside from
1273 * maybe some caching implications but I somehow doubt that
1274 * matters) */
1275
1276 struct pipe_vertex_element *elem = &so->pipe[i];
1277 unsigned vbi = elem->vertex_buffer_index;
1278
1279 /* The exception to 1:1 mapping is that we can have multiple
1280 * entries (NPOT divisors), so we fixup anyways */
1281
1282 so->hw[i].index = k;
1283
1284 if (!(ctx->vb_mask & (1 << vbi)))
1285 continue;
1286
1287 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1288 struct panfrost_resource *rsrc;
1289
1290 rsrc = pan_resource(buf->buffer.resource);
1291 if (!rsrc)
1292 continue;
1293
1294 /* Align to 64 bytes by masking off the lower bits. This
1295 * will be adjusted back when we fixup the src_offset in
1296 * mali_attr_meta */
1297
1298 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1299 mali_ptr addr = raw_addr & ~63;
1300 unsigned chopped_addr = raw_addr - addr;
1301
1302 /* Add a dependency of the batch on the vertex buffer */
1303 panfrost_batch_add_bo(batch, rsrc->bo,
1304 PAN_BO_ACCESS_SHARED |
1305 PAN_BO_ACCESS_READ |
1306 PAN_BO_ACCESS_VERTEX_TILER);
1307
1308 /* Set common fields */
1309 attrs[k].elements = addr;
1310 attrs[k].stride = buf->stride;
1311
1312 /* Since we advanced the base pointer, we shrink the buffer
1313 * size */
1314 attrs[k].size = rsrc->base.width0 - buf->buffer_offset;
1315
1316 /* We need to add the extra size we masked off (for
1317 * correctness) so the data doesn't get clamped away */
1318 attrs[k].size += chopped_addr;
1319
1320 /* For non-instancing make sure we initialize */
1321 attrs[k].shift = attrs[k].extra_flags = 0;
1322
1323 /* Instancing uses a dramatically different code path than
1324 * linear, so dispatch for the actual emission now that the
1325 * common code is finished */
1326
1327 unsigned divisor = elem->instance_divisor;
1328
1329 if (divisor && ctx->instance_count == 1) {
1330 /* Silly corner case where there's a divisor(=1) but
1331 * there's no legitimate instancing. So we want *every*
1332 * attribute to be the same. So set stride to zero so
1333 * we don't go anywhere. */
1334
1335 attrs[k].size = attrs[k].stride + chopped_addr;
1336 attrs[k].stride = 0;
1337 attrs[k++].elements |= MALI_ATTR_LINEAR;
1338 } else if (ctx->instance_count <= 1) {
1339 /* Normal, non-instanced attributes */
1340 attrs[k++].elements |= MALI_ATTR_LINEAR;
1341 } else {
1342 unsigned instance_shift = vp->instance_shift;
1343 unsigned instance_odd = vp->instance_odd;
1344
1345 k += panfrost_vertex_instanced(ctx->padded_count,
1346 instance_shift,
1347 instance_odd,
1348 divisor, &attrs[k]);
1349 }
1350 }
1351
1352 /* Add special gl_VertexID/gl_InstanceID buffers */
1353
1354 panfrost_vertex_id(ctx->padded_count, &attrs[k]);
1355 so->hw[PAN_VERTEX_ID].index = k++;
1356 panfrost_instance_id(ctx->padded_count, &attrs[k]);
1357 so->hw[PAN_INSTANCE_ID].index = k++;
1358
1359 /* Upload whatever we emitted and go */
1360
1361 vp->postfix.attributes = panfrost_upload_transient(batch, attrs,
1362 k * sizeof(*attrs));
1363 }
1364
1365 static mali_ptr
1366 panfrost_emit_varyings(struct panfrost_batch *batch, union mali_attr *slot,
1367 unsigned stride, unsigned count)
1368 {
1369 /* Fill out the descriptor */
1370 slot->stride = stride;
1371 slot->size = stride * count;
1372 slot->shift = slot->extra_flags = 0;
1373
1374 struct panfrost_transfer transfer = panfrost_allocate_transient(batch,
1375 slot->size);
1376
1377 slot->elements = transfer.gpu | MALI_ATTR_LINEAR;
1378
1379 return transfer.gpu;
1380 }
1381
1382 static void
1383 panfrost_emit_streamout(struct panfrost_batch *batch, union mali_attr *slot,
1384 unsigned stride, unsigned offset, unsigned count,
1385 struct pipe_stream_output_target *target)
1386 {
1387 /* Fill out the descriptor */
1388 slot->stride = stride * 4;
1389 slot->shift = slot->extra_flags = 0;
1390
1391 unsigned max_size = target->buffer_size;
1392 unsigned expected_size = slot->stride * count;
1393
1394 slot->size = MIN2(max_size, expected_size);
1395
1396 /* Grab the BO and bind it to the batch */
1397 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1398
1399 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1400 * the perspective of the TILER and FRAGMENT.
1401 */
1402 panfrost_batch_add_bo(batch, bo,
1403 PAN_BO_ACCESS_SHARED |
1404 PAN_BO_ACCESS_RW |
1405 PAN_BO_ACCESS_VERTEX_TILER |
1406 PAN_BO_ACCESS_FRAGMENT);
1407
1408 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * slot->stride);
1409 slot->elements = addr;
1410 }
1411
1412 /* Given a shader and buffer indices, link varying metadata together */
1413
1414 static bool
1415 is_special_varying(gl_varying_slot loc)
1416 {
1417 switch (loc) {
1418 case VARYING_SLOT_POS:
1419 case VARYING_SLOT_PSIZ:
1420 case VARYING_SLOT_PNTC:
1421 case VARYING_SLOT_FACE:
1422 return true;
1423 default:
1424 return false;
1425 }
1426 }
1427
1428 static void
1429 panfrost_emit_varying_meta(void *outptr, struct panfrost_shader_state *ss,
1430 signed general, signed gl_Position,
1431 signed gl_PointSize, signed gl_PointCoord,
1432 signed gl_FrontFacing)
1433 {
1434 struct mali_attr_meta *out = (struct mali_attr_meta *) outptr;
1435
1436 for (unsigned i = 0; i < ss->varying_count; ++i) {
1437 gl_varying_slot location = ss->varyings_loc[i];
1438 int index = -1;
1439
1440 switch (location) {
1441 case VARYING_SLOT_POS:
1442 index = gl_Position;
1443 break;
1444 case VARYING_SLOT_PSIZ:
1445 index = gl_PointSize;
1446 break;
1447 case VARYING_SLOT_PNTC:
1448 index = gl_PointCoord;
1449 break;
1450 case VARYING_SLOT_FACE:
1451 index = gl_FrontFacing;
1452 break;
1453 default:
1454 index = general;
1455 break;
1456 }
1457
1458 assert(index >= 0);
1459 out[i].index = index;
1460 }
1461 }
1462
1463 static bool
1464 has_point_coord(unsigned mask, gl_varying_slot loc)
1465 {
1466 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1467 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1468 else if (loc == VARYING_SLOT_PNTC)
1469 return (mask & (1 << 8));
1470 else
1471 return false;
1472 }
1473
1474 /* Helpers for manipulating stream out information so we can pack varyings
1475 * accordingly. Compute the src_offset for a given captured varying */
1476
1477 static struct pipe_stream_output *
1478 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1479 {
1480 for (unsigned i = 0; i < info->num_outputs; ++i) {
1481 if (info->output[i].register_index == loc)
1482 return &info->output[i];
1483 }
1484
1485 unreachable("Varying not captured");
1486 }
1487
1488 /* TODO: Integers */
1489 static enum mali_format
1490 pan_xfb_format(unsigned nr_components)
1491 {
1492 switch (nr_components) {
1493 case 1: return MALI_R32F;
1494 case 2: return MALI_RG32F;
1495 case 3: return MALI_RGB32F;
1496 case 4: return MALI_RGBA32F;
1497 default: unreachable("Invalid format");
1498 }
1499 }
1500
1501 void
1502 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1503 unsigned vertex_count,
1504 struct midgard_payload_vertex_tiler *vp,
1505 struct midgard_payload_vertex_tiler *tp)
1506 {
1507 /* Load the shaders */
1508 struct panfrost_context *ctx = batch->ctx;
1509 struct panfrost_shader_state *vs, *fs;
1510 unsigned int num_gen_varyings = 0;
1511 size_t vs_size, fs_size;
1512
1513 /* Allocate the varying descriptor */
1514
1515 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1516 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
1517 vs_size = sizeof(struct mali_attr_meta) * vs->varying_count;
1518 fs_size = sizeof(struct mali_attr_meta) * fs->varying_count;
1519
1520 struct panfrost_transfer trans = panfrost_allocate_transient(batch,
1521 vs_size +
1522 fs_size);
1523
1524 struct pipe_stream_output_info *so = &vs->stream_output;
1525
1526 /* Check if this varying is linked by us. This is the case for
1527 * general-purpose, non-captured varyings. If it is, link it. If it's
1528 * not, use the provided stream out information to determine the
1529 * offset, since it was already linked for us. */
1530
1531 for (unsigned i = 0; i < vs->varying_count; i++) {
1532 gl_varying_slot loc = vs->varyings_loc[i];
1533
1534 bool special = is_special_varying(loc);
1535 bool captured = ((vs->so_mask & (1ll << loc)) ? true : false);
1536
1537 if (captured) {
1538 struct pipe_stream_output *o = pan_get_so(so, loc);
1539
1540 unsigned dst_offset = o->dst_offset * 4; /* dwords */
1541 vs->varyings[i].src_offset = dst_offset;
1542 } else if (!special) {
1543 vs->varyings[i].src_offset = 16 * (num_gen_varyings++);
1544 }
1545 }
1546
1547 /* Conversely, we need to set src_offset for the captured varyings.
1548 * Here, the layout is defined by the stream out info, not us */
1549
1550 /* Link up with fragment varyings */
1551 bool reads_point_coord = fs->reads_point_coord;
1552
1553 for (unsigned i = 0; i < fs->varying_count; i++) {
1554 gl_varying_slot loc = fs->varyings_loc[i];
1555 unsigned src_offset;
1556 signed vs_idx = -1;
1557
1558 /* Link up */
1559 for (unsigned j = 0; j < vs->varying_count; ++j) {
1560 if (vs->varyings_loc[j] == loc) {
1561 vs_idx = j;
1562 break;
1563 }
1564 }
1565
1566 /* Either assign or reuse */
1567 if (vs_idx >= 0)
1568 src_offset = vs->varyings[vs_idx].src_offset;
1569 else
1570 src_offset = 16 * (num_gen_varyings++);
1571
1572 fs->varyings[i].src_offset = src_offset;
1573
1574 if (has_point_coord(fs->point_sprite_mask, loc))
1575 reads_point_coord = true;
1576 }
1577
1578 memcpy(trans.cpu, vs->varyings, vs_size);
1579 memcpy(trans.cpu + vs_size, fs->varyings, fs_size);
1580
1581 union mali_attr varyings[PIPE_MAX_ATTRIBS] = {0};
1582
1583 /* Figure out how many streamout buffers could be bound */
1584 unsigned so_count = ctx->streamout.num_targets;
1585 for (unsigned i = 0; i < vs->varying_count; i++) {
1586 gl_varying_slot loc = vs->varyings_loc[i];
1587
1588 bool captured = ((vs->so_mask & (1ll << loc)) ? true : false);
1589 if (!captured) continue;
1590
1591 struct pipe_stream_output *o = pan_get_so(so, loc);
1592 so_count = MAX2(so_count, o->output_buffer + 1);
1593 }
1594
1595 signed idx = so_count;
1596 signed general = idx++;
1597 signed gl_Position = idx++;
1598 signed gl_PointSize = vs->writes_point_size ? (idx++) : -1;
1599 signed gl_PointCoord = reads_point_coord ? (idx++) : -1;
1600 signed gl_FrontFacing = fs->reads_face ? (idx++) : -1;
1601 signed gl_FragCoord = fs->reads_frag_coord ? (idx++) : -1;
1602
1603 /* Emit the stream out buffers */
1604
1605 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
1606 ctx->vertex_count);
1607
1608 for (unsigned i = 0; i < so_count; ++i) {
1609 if (i < ctx->streamout.num_targets) {
1610 panfrost_emit_streamout(batch, &varyings[i],
1611 so->stride[i],
1612 ctx->streamout.offsets[i],
1613 out_count,
1614 ctx->streamout.targets[i]);
1615 } else {
1616 /* Emit a dummy buffer */
1617 panfrost_emit_varyings(batch, &varyings[i],
1618 so->stride[i] * 4,
1619 out_count);
1620
1621 /* Clear the attribute type */
1622 varyings[i].elements &= ~0xF;
1623 }
1624 }
1625
1626 panfrost_emit_varyings(batch, &varyings[general],
1627 num_gen_varyings * 16,
1628 vertex_count);
1629
1630 mali_ptr varyings_p;
1631
1632 /* fp32 vec4 gl_Position */
1633 varyings_p = panfrost_emit_varyings(batch, &varyings[gl_Position],
1634 sizeof(float) * 4, vertex_count);
1635 tp->postfix.position_varying = varyings_p;
1636
1637
1638 if (panfrost_writes_point_size(ctx)) {
1639 varyings_p = panfrost_emit_varyings(batch,
1640 &varyings[gl_PointSize],
1641 2, vertex_count);
1642 tp->primitive_size.pointer = varyings_p;
1643 }
1644
1645 if (reads_point_coord)
1646 varyings[gl_PointCoord].elements = MALI_VARYING_POINT_COORD;
1647
1648 if (fs->reads_face)
1649 varyings[gl_FrontFacing].elements = MALI_VARYING_FRONT_FACING;
1650
1651 if (fs->reads_frag_coord)
1652 varyings[gl_FragCoord].elements = MALI_VARYING_FRAG_COORD;
1653
1654 /* Let's go ahead and link varying meta to the buffer in question, now
1655 * that that information is available. VARYING_SLOT_POS is mapped to
1656 * gl_FragCoord for fragment shaders but gl_Positionf or vertex shaders
1657 * */
1658
1659 panfrost_emit_varying_meta(trans.cpu, vs, general, gl_Position,
1660 gl_PointSize, gl_PointCoord,
1661 gl_FrontFacing);
1662
1663 panfrost_emit_varying_meta(trans.cpu + vs_size, fs, general,
1664 gl_FragCoord, gl_PointSize,
1665 gl_PointCoord, gl_FrontFacing);
1666
1667 /* Replace streamout */
1668
1669 struct mali_attr_meta *ovs = (struct mali_attr_meta *)trans.cpu;
1670 struct mali_attr_meta *ofs = ovs + vs->varying_count;
1671
1672 for (unsigned i = 0; i < vs->varying_count; i++) {
1673 gl_varying_slot loc = vs->varyings_loc[i];
1674
1675 bool captured = ((vs->so_mask & (1ll << loc)) ? true : false);
1676 if (!captured)
1677 continue;
1678
1679 struct pipe_stream_output *o = pan_get_so(so, loc);
1680 ovs[i].index = o->output_buffer;
1681
1682 /* Set the type appropriately. TODO: Integer varyings XXX */
1683 assert(o->stream == 0);
1684 ovs[i].format = pan_xfb_format(o->num_components);
1685 ovs[i].swizzle = panfrost_get_default_swizzle(o->num_components);
1686
1687 /* Link to the fragment */
1688 signed fs_idx = -1;
1689
1690 /* Link up */
1691 for (unsigned j = 0; j < fs->varying_count; ++j) {
1692 if (fs->varyings_loc[j] == loc) {
1693 fs_idx = j;
1694 break;
1695 }
1696 }
1697
1698 if (fs_idx >= 0) {
1699 ofs[fs_idx].index = ovs[i].index;
1700 ofs[fs_idx].format = ovs[i].format;
1701 ofs[fs_idx].swizzle = ovs[i].swizzle;
1702 }
1703 }
1704
1705 /* Replace point sprite */
1706 for (unsigned i = 0; i < fs->varying_count; i++) {
1707 /* If we have a point sprite replacement, handle that here. We
1708 * have to translate location first. TODO: Flip y in shader.
1709 * We're already keying ... just time crunch .. */
1710
1711 if (has_point_coord(fs->point_sprite_mask,
1712 fs->varyings_loc[i])) {
1713 ofs[i].index = gl_PointCoord;
1714
1715 /* Swizzle out the z/w to 0/1 */
1716 ofs[i].format = MALI_RG16F;
1717 ofs[i].swizzle = panfrost_get_default_swizzle(2);
1718 }
1719 }
1720
1721 /* Fix up unaligned addresses */
1722 for (unsigned i = 0; i < so_count; ++i) {
1723 if (varyings[i].elements < MALI_RECORD_SPECIAL)
1724 continue;
1725
1726 unsigned align = (varyings[i].elements & 63);
1727
1728 /* While we're at it, the SO buffers are linear */
1729
1730 if (!align) {
1731 varyings[i].elements |= MALI_ATTR_LINEAR;
1732 continue;
1733 }
1734
1735 /* We need to adjust alignment */
1736 varyings[i].elements &= ~63;
1737 varyings[i].elements |= MALI_ATTR_LINEAR;
1738 varyings[i].size += align;
1739
1740 for (unsigned v = 0; v < vs->varying_count; ++v) {
1741 if (ovs[v].index != i)
1742 continue;
1743
1744 ovs[v].src_offset = vs->varyings[v].src_offset + align;
1745 }
1746
1747 for (unsigned f = 0; f < fs->varying_count; ++f) {
1748 if (ofs[f].index != i)
1749 continue;
1750
1751 ofs[f].src_offset = fs->varyings[f].src_offset + align;
1752 }
1753 }
1754
1755 varyings_p = panfrost_upload_transient(batch, varyings,
1756 idx * sizeof(*varyings));
1757 vp->postfix.varyings = varyings_p;
1758 tp->postfix.varyings = varyings_p;
1759
1760 vp->postfix.varying_meta = trans.gpu;
1761 tp->postfix.varying_meta = trans.gpu + vs_size;
1762 }
1763
1764 void
1765 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
1766 struct midgard_payload_vertex_tiler *vp,
1767 struct midgard_payload_vertex_tiler *tp)
1768 {
1769 struct panfrost_context *ctx = batch->ctx;
1770 bool wallpapering = ctx->wallpaper_batch && batch->tiler_dep;
1771
1772 if (wallpapering) {
1773 /* Inject in reverse order, with "predicted" job indices.
1774 * THIS IS A HACK XXX */
1775 panfrost_new_job(batch, JOB_TYPE_TILER, false,
1776 batch->job_index + 2, tp, sizeof(*tp), true);
1777 panfrost_new_job(batch, JOB_TYPE_VERTEX, false, 0,
1778 vp, sizeof(*vp), true);
1779 return;
1780 }
1781
1782 /* If rasterizer discard is enable, only submit the vertex */
1783
1784 bool rasterizer_discard = ctx->rasterizer &&
1785 ctx->rasterizer->base.rasterizer_discard;
1786
1787 unsigned vertex = panfrost_new_job(batch, JOB_TYPE_VERTEX, false, 0,
1788 vp, sizeof(*vp), false);
1789
1790 if (rasterizer_discard)
1791 return;
1792
1793 panfrost_new_job(batch, JOB_TYPE_TILER, false, vertex, tp, sizeof(*tp),
1794 false);
1795 }