panfrost: Prepare shader_meta descriptors at emission time
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26
27 #include "panfrost-quirks.h"
28
29 #include "pan_allocate.h"
30 #include "pan_bo.h"
31 #include "pan_cmdstream.h"
32 #include "pan_context.h"
33 #include "pan_job.h"
34
35 /* TODO: Bifrost requires just a mali_shared_memory, without the rest of the
36 * framebuffer */
37
38 void
39 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
40 struct midgard_payload_vertex_tiler *vt)
41 {
42 struct panfrost_screen *screen = pan_screen(ctx->base.screen);
43 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
44
45 /* If we haven't, reserve space for the framebuffer */
46
47 if (!batch->framebuffer.gpu) {
48 unsigned size = (screen->quirks & MIDGARD_SFBD) ?
49 sizeof(struct mali_single_framebuffer) :
50 sizeof(struct mali_framebuffer);
51
52 batch->framebuffer = panfrost_allocate_transient(batch, size);
53
54 /* Tag the pointer */
55 if (!(screen->quirks & MIDGARD_SFBD))
56 batch->framebuffer.gpu |= MALI_MFBD;
57 }
58
59 vt->postfix.shared_memory = batch->framebuffer.gpu;
60 }
61
62 void
63 panfrost_vt_update_rasterizer(struct panfrost_context *ctx,
64 struct midgard_payload_vertex_tiler *tp)
65 {
66 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
67
68 tp->gl_enables |= 0x7;
69 SET_BIT(tp->gl_enables, MALI_FRONT_CCW_TOP,
70 rasterizer && rasterizer->base.front_ccw);
71 SET_BIT(tp->gl_enables, MALI_CULL_FACE_FRONT,
72 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_FRONT));
73 SET_BIT(tp->gl_enables, MALI_CULL_FACE_BACK,
74 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_BACK));
75 SET_BIT(tp->prefix.unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
76 rasterizer && rasterizer->base.flatshade_first);
77
78 if (!panfrost_writes_point_size(ctx)) {
79 bool points = tp->prefix.draw_mode == MALI_POINTS;
80 float val = 0.0f;
81
82 if (rasterizer)
83 val = points ?
84 rasterizer->base.point_size :
85 rasterizer->base.line_width;
86
87 tp->primitive_size.constant = val;
88 }
89 }
90
91 void
92 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
93 struct midgard_payload_vertex_tiler *tp)
94 {
95 SET_BIT(tp->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
96 if (ctx->occlusion_query)
97 tp->postfix.occlusion_counter = ctx->occlusion_query->bo->gpu;
98 else
99 tp->postfix.occlusion_counter = 0;
100 }
101
102 static void
103 panfrost_shader_meta_init(struct panfrost_context *ctx,
104 enum pipe_shader_type st,
105 struct mali_shader_meta *meta)
106 {
107 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
108
109 memset(meta, 0, sizeof(*meta));
110 meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
111 meta->midgard1.uniform_count = MIN2(ss->uniform_count,
112 ss->uniform_cutoff);
113 meta->midgard1.work_count = ss->work_reg_count;
114 meta->attribute_count = ss->attribute_count;
115 meta->varying_count = ss->varying_count;
116 meta->midgard1.flags_hi = 0x8; /* XXX */
117 meta->midgard1.flags_lo = 0x220;
118 meta->texture_count = ctx->sampler_view_count[st];
119 meta->sampler_count = ctx->sampler_count[st];
120 meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
121 }
122
123 unsigned
124 panfrost_translate_compare_func(enum pipe_compare_func in)
125 {
126 switch (in) {
127 case PIPE_FUNC_NEVER:
128 return MALI_FUNC_NEVER;
129
130 case PIPE_FUNC_LESS:
131 return MALI_FUNC_LESS;
132
133 case PIPE_FUNC_EQUAL:
134 return MALI_FUNC_EQUAL;
135
136 case PIPE_FUNC_LEQUAL:
137 return MALI_FUNC_LEQUAL;
138
139 case PIPE_FUNC_GREATER:
140 return MALI_FUNC_GREATER;
141
142 case PIPE_FUNC_NOTEQUAL:
143 return MALI_FUNC_NOTEQUAL;
144
145 case PIPE_FUNC_GEQUAL:
146 return MALI_FUNC_GEQUAL;
147
148 case PIPE_FUNC_ALWAYS:
149 return MALI_FUNC_ALWAYS;
150
151 default:
152 unreachable("Invalid func");
153 }
154 }
155
156 static unsigned
157 panfrost_translate_stencil_op(enum pipe_stencil_op in)
158 {
159 switch (in) {
160 case PIPE_STENCIL_OP_KEEP:
161 return MALI_STENCIL_KEEP;
162
163 case PIPE_STENCIL_OP_ZERO:
164 return MALI_STENCIL_ZERO;
165
166 case PIPE_STENCIL_OP_REPLACE:
167 return MALI_STENCIL_REPLACE;
168
169 case PIPE_STENCIL_OP_INCR:
170 return MALI_STENCIL_INCR;
171
172 case PIPE_STENCIL_OP_DECR:
173 return MALI_STENCIL_DECR;
174
175 case PIPE_STENCIL_OP_INCR_WRAP:
176 return MALI_STENCIL_INCR_WRAP;
177
178 case PIPE_STENCIL_OP_DECR_WRAP:
179 return MALI_STENCIL_DECR_WRAP;
180
181 case PIPE_STENCIL_OP_INVERT:
182 return MALI_STENCIL_INVERT;
183
184 default:
185 unreachable("Invalid stencil op");
186 }
187 }
188
189 static void
190 panfrost_make_stencil_state(const struct pipe_stencil_state *in,
191 struct mali_stencil_test *out)
192 {
193 out->ref = 0; /* Gallium gets it from elsewhere */
194
195 out->mask = in->valuemask;
196 out->func = panfrost_translate_compare_func(in->func);
197 out->sfail = panfrost_translate_stencil_op(in->fail_op);
198 out->dpfail = panfrost_translate_stencil_op(in->zfail_op);
199 out->dppass = panfrost_translate_stencil_op(in->zpass_op);
200 }
201
202 static void
203 panfrost_frag_meta_rasterizer_update(struct panfrost_context *ctx,
204 struct mali_shader_meta *fragmeta)
205 {
206 if (!ctx->rasterizer) {
207 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, true);
208 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, false);
209 fragmeta->depth_units = 0.0f;
210 fragmeta->depth_factor = 0.0f;
211 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, false);
212 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, false);
213 return;
214 }
215
216 bool msaa = ctx->rasterizer->base.multisample;
217
218 /* TODO: Sample size */
219 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
220 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
221 fragmeta->depth_units = ctx->rasterizer->base.offset_units * 2.0f;
222 fragmeta->depth_factor = ctx->rasterizer->base.offset_scale;
223
224 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
225
226 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A,
227 ctx->rasterizer->base.offset_tri);
228 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B,
229 ctx->rasterizer->base.offset_tri);
230 }
231
232 static void
233 panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
234 struct mali_shader_meta *fragmeta)
235 {
236 const struct pipe_depth_stencil_alpha_state *zsa = ctx->depth_stencil;
237 int zfunc = PIPE_FUNC_ALWAYS;
238
239 if (!zsa) {
240 struct pipe_stencil_state default_stencil = {
241 .enabled = 0,
242 .func = PIPE_FUNC_ALWAYS,
243 .fail_op = MALI_STENCIL_KEEP,
244 .zfail_op = MALI_STENCIL_KEEP,
245 .zpass_op = MALI_STENCIL_KEEP,
246 .writemask = 0xFF,
247 .valuemask = 0xFF
248 };
249
250 panfrost_make_stencil_state(&default_stencil,
251 &fragmeta->stencil_front);
252 fragmeta->stencil_mask_front = default_stencil.writemask;
253 fragmeta->stencil_back = fragmeta->stencil_front;
254 fragmeta->stencil_mask_back = default_stencil.writemask;
255 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST, false);
256 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK, false);
257 } else {
258 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
259 zsa->stencil[0].enabled);
260 panfrost_make_stencil_state(&zsa->stencil[0],
261 &fragmeta->stencil_front);
262 fragmeta->stencil_mask_front = zsa->stencil[0].writemask;
263 fragmeta->stencil_front.ref = ctx->stencil_ref.ref_value[0];
264
265 /* If back-stencil is not enabled, use the front values */
266
267 if (zsa->stencil[1].enabled) {
268 panfrost_make_stencil_state(&zsa->stencil[1],
269 &fragmeta->stencil_back);
270 fragmeta->stencil_mask_back = zsa->stencil[1].writemask;
271 fragmeta->stencil_back.ref = ctx->stencil_ref.ref_value[1];
272 } else {
273 fragmeta->stencil_back = fragmeta->stencil_front;
274 fragmeta->stencil_mask_back = fragmeta->stencil_mask_front;
275 fragmeta->stencil_back.ref = fragmeta->stencil_front.ref;
276 }
277
278 if (zsa->depth.enabled)
279 zfunc = zsa->depth.func;
280
281 /* Depth state (TODO: Refactor) */
282
283 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
284 zsa->depth.writemask);
285 }
286
287 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
288 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(zfunc));
289 }
290
291 static void
292 panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
293 struct mali_shader_meta *fragmeta,
294 struct midgard_blend_rt *rts)
295 {
296 const struct panfrost_screen *screen = pan_screen(ctx->base.screen);
297
298 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
299 (screen->quirks & MIDGARD_SFBD) && ctx->blend &&
300 !ctx->blend->base.dither);
301
302 /* Get blending setup */
303 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
304
305 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
306 unsigned shader_offset = 0;
307 struct panfrost_bo *shader_bo = NULL;
308
309 for (unsigned c = 0; c < rt_count; ++c)
310 blend[c] = panfrost_get_blend_for_context(ctx, c, &shader_bo,
311 &shader_offset);
312
313 /* If there is a blend shader, work registers are shared. XXX: opt */
314
315 for (unsigned c = 0; c < rt_count; ++c) {
316 if (blend[c].is_shader)
317 fragmeta->midgard1.work_count = 16;
318 }
319
320 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
321 * copied to the blend_meta appended (by convention), but this is the
322 * field actually read by the hardware. (Or maybe both are read...?).
323 * Specify the last RTi with a blend shader. */
324
325 fragmeta->blend.shader = 0;
326
327 for (signed rt = (rt_count - 1); rt >= 0; --rt) {
328 if (!blend[rt].is_shader)
329 continue;
330
331 fragmeta->blend.shader = blend[rt].shader.gpu |
332 blend[rt].shader.first_tag;
333 break;
334 }
335
336 if (screen->quirks & MIDGARD_SFBD) {
337 /* When only a single render target platform is used, the blend
338 * information is inside the shader meta itself. We additionally
339 * need to signal CAN_DISCARD for nontrivial blend modes (so
340 * we're able to read back the destination buffer) */
341
342 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
343 blend[0].is_shader);
344
345 if (!blend[0].is_shader) {
346 fragmeta->blend.equation = *blend[0].equation.equation;
347 fragmeta->blend.constant = blend[0].equation.constant;
348 }
349
350 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
351 !blend[0].no_blending);
352 return;
353 }
354
355 /* Additional blend descriptor tacked on for jobs using MFBD */
356
357 for (unsigned i = 0; i < rt_count; ++i) {
358 rts[i].flags = 0x200;
359
360 bool is_srgb = (ctx->pipe_framebuffer.nr_cbufs > i) &&
361 (ctx->pipe_framebuffer.cbufs[i]) &&
362 util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
363
364 SET_BIT(rts[i].flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
365 SET_BIT(rts[i].flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
366 SET_BIT(rts[i].flags, MALI_BLEND_SRGB, is_srgb);
367 SET_BIT(rts[i].flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
368
369 if (blend[i].is_shader) {
370 rts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
371 } else {
372 rts[i].blend.equation = *blend[i].equation.equation;
373 rts[i].blend.constant = blend[i].equation.constant;
374 }
375 }
376 }
377
378 static void
379 panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
380 struct mali_shader_meta *fragmeta,
381 struct midgard_blend_rt *rts)
382 {
383 const struct panfrost_screen *screen = pan_screen(ctx->base.screen);
384 struct panfrost_shader_state *fs;
385
386 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
387
388 fragmeta->alpha_coverage = ~MALI_ALPHA_COVERAGE(0.000000);
389 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x3010;
390 fragmeta->unknown2_4 = 0x4e0;
391
392 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
393 * is required (independent of 32-bit/64-bit descriptors), or why it's
394 * not used on later GPU revisions. Otherwise, all shader jobs fault on
395 * these earlier chips (perhaps this is a chicken bit of some kind).
396 * More investigation is needed. */
397
398 SET_BIT(fragmeta->unknown2_4, 0x10, screen->quirks & MIDGARD_SFBD);
399
400 /* Depending on whether it's legal to in the given shader, we try to
401 * enable early-z testing (or forward-pixel kill?) */
402
403 SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
404 !fs->can_discard && !fs->writes_depth);
405
406 /* Add the writes Z/S flags if needed. */
407 SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
408 SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
409
410 /* Any time texturing is used, derivatives are implicitly calculated,
411 * so we need to enable helper invocations */
412
413 SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
414 fs->helper_invocations);
415
416 /* CAN_DISCARD should be set if the fragment shader possibly contains a
417 * 'discard' instruction. It is likely this is related to optimizations
418 * related to forward-pixel kill, as per "Mali Performance 3: Is
419 * EGL_BUFFER_PRESERVED a good thing?" by Peter Harris */
420
421 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD, fs->can_discard);
422 SET_BIT(fragmeta->midgard1.flags_lo, 0x400, fs->can_discard);
423
424 panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
425 panfrost_frag_meta_zsa_update(ctx, fragmeta);
426 panfrost_frag_meta_blend_update(ctx, fragmeta, rts);
427 }
428
429 void
430 panfrost_emit_shader_meta(struct panfrost_batch *batch,
431 enum pipe_shader_type st,
432 struct midgard_payload_vertex_tiler *vtp)
433 {
434 struct panfrost_context *ctx = batch->ctx;
435 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
436
437 if (!ss) {
438 vtp->postfix.shader = 0;
439 return;
440 }
441
442 struct mali_shader_meta meta;
443
444 panfrost_shader_meta_init(ctx, st, &meta);
445
446 /* Add the shader BO to the batch. */
447 panfrost_batch_add_bo(batch, ss->bo,
448 PAN_BO_ACCESS_PRIVATE |
449 PAN_BO_ACCESS_READ |
450 panfrost_bo_access_for_stage(st));
451
452 mali_ptr shader_ptr;
453
454 if (st == PIPE_SHADER_FRAGMENT) {
455 struct panfrost_screen *screen = pan_screen(ctx->base.screen);
456 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
457 size_t desc_size = sizeof(meta);
458 struct midgard_blend_rt rts[4];
459 struct panfrost_transfer xfer;
460
461 assert(rt_count <= ARRAY_SIZE(rts));
462
463 panfrost_frag_shader_meta_init(ctx, &meta, rts);
464
465 if (!(screen->quirks & MIDGARD_SFBD))
466 desc_size += sizeof(*rts) * rt_count;
467
468 xfer = panfrost_allocate_transient(batch, desc_size);
469
470 memcpy(xfer.cpu, &meta, sizeof(meta));
471 memcpy(xfer.cpu + sizeof(meta), rts, sizeof(*rts) * rt_count);
472
473 shader_ptr = xfer.gpu;
474 } else {
475 shader_ptr = panfrost_upload_transient(batch, &meta,
476 sizeof(meta));
477 }
478
479 vtp->postfix.shader = shader_ptr;
480 }
481
482 static void
483 panfrost_mali_viewport_init(struct panfrost_context *ctx,
484 struct mali_viewport *mvp)
485 {
486 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
487
488 /* Clip bounds are encoded as floats. The viewport itself is encoded as
489 * (somewhat) asymmetric ints. */
490
491 const struct pipe_scissor_state *ss = &ctx->scissor;
492
493 memset(mvp, 0, sizeof(*mvp));
494
495 /* By default, do no viewport clipping, i.e. clip to (-inf, inf) in
496 * each direction. Clipping to the viewport in theory should work, but
497 * in practice causes issues when we're not explicitly trying to
498 * scissor */
499
500 *mvp = (struct mali_viewport) {
501 .clip_minx = -INFINITY,
502 .clip_miny = -INFINITY,
503 .clip_maxx = INFINITY,
504 .clip_maxy = INFINITY,
505 };
506
507 /* Always scissor to the viewport by default. */
508 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
509 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
510
511 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
512 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
513
514 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
515 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
516
517 /* Apply the scissor test */
518
519 unsigned minx, miny, maxx, maxy;
520
521 if (ss && ctx->rasterizer && ctx->rasterizer->base.scissor) {
522 minx = MAX2(ss->minx, vp_minx);
523 miny = MAX2(ss->miny, vp_miny);
524 maxx = MIN2(ss->maxx, vp_maxx);
525 maxy = MIN2(ss->maxy, vp_maxy);
526 } else {
527 minx = vp_minx;
528 miny = vp_miny;
529 maxx = vp_maxx;
530 maxy = vp_maxy;
531 }
532
533 /* Hardware needs the min/max to be strictly ordered, so flip if we
534 * need to. The viewport transformation in the vertex shader will
535 * handle the negatives if we don't */
536
537 if (miny > maxy) {
538 unsigned temp = miny;
539 miny = maxy;
540 maxy = temp;
541 }
542
543 if (minx > maxx) {
544 unsigned temp = minx;
545 minx = maxx;
546 maxx = temp;
547 }
548
549 if (minz > maxz) {
550 float temp = minz;
551 minz = maxz;
552 maxz = temp;
553 }
554
555 /* Clamp to the framebuffer size as a last check */
556
557 minx = MIN2(ctx->pipe_framebuffer.width, minx);
558 maxx = MIN2(ctx->pipe_framebuffer.width, maxx);
559
560 miny = MIN2(ctx->pipe_framebuffer.height, miny);
561 maxy = MIN2(ctx->pipe_framebuffer.height, maxy);
562
563 /* Upload */
564
565 mvp->viewport0[0] = minx;
566 mvp->viewport1[0] = MALI_POSITIVE(maxx);
567
568 mvp->viewport0[1] = miny;
569 mvp->viewport1[1] = MALI_POSITIVE(maxy);
570
571 mvp->clip_minz = minz;
572 mvp->clip_maxz = maxz;
573 }
574
575 void
576 panfrost_emit_viewport(struct panfrost_batch *batch,
577 struct midgard_payload_vertex_tiler *tp)
578 {
579 struct panfrost_context *ctx = batch->ctx;
580 struct mali_viewport mvp;
581
582 panfrost_mali_viewport_init(batch->ctx, &mvp);
583
584 /* Update the job, unless we're doing wallpapering (whose lack of
585 * scissor we can ignore, since if we "miss" a tile of wallpaper, it'll
586 * just... be faster :) */
587
588 if (!ctx->wallpaper_batch)
589 panfrost_batch_union_scissor(batch, mvp.viewport0[0],
590 mvp.viewport0[1],
591 mvp.viewport1[0] + 1,
592 mvp.viewport1[1] + 1);
593
594 tp->postfix.viewport = panfrost_upload_transient(batch, &mvp,
595 sizeof(mvp));
596 }
597
598 static mali_ptr
599 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
600 enum pipe_shader_type st,
601 struct panfrost_constant_buffer *buf,
602 unsigned index)
603 {
604 struct pipe_constant_buffer *cb = &buf->cb[index];
605 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
606
607 if (rsrc) {
608 panfrost_batch_add_bo(batch, rsrc->bo,
609 PAN_BO_ACCESS_SHARED |
610 PAN_BO_ACCESS_READ |
611 panfrost_bo_access_for_stage(st));
612
613 /* Alignment gauranteed by
614 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
615 return rsrc->bo->gpu + cb->buffer_offset;
616 } else if (cb->user_buffer) {
617 return panfrost_upload_transient(batch,
618 cb->user_buffer +
619 cb->buffer_offset,
620 cb->buffer_size);
621 } else {
622 unreachable("No constant buffer");
623 }
624 }
625
626 struct sysval_uniform {
627 union {
628 float f[4];
629 int32_t i[4];
630 uint32_t u[4];
631 uint64_t du[2];
632 };
633 };
634
635 static void
636 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
637 struct sysval_uniform *uniform)
638 {
639 struct panfrost_context *ctx = batch->ctx;
640 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
641
642 uniform->f[0] = vp->scale[0];
643 uniform->f[1] = vp->scale[1];
644 uniform->f[2] = vp->scale[2];
645 }
646
647 static void
648 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
649 struct sysval_uniform *uniform)
650 {
651 struct panfrost_context *ctx = batch->ctx;
652 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
653
654 uniform->f[0] = vp->translate[0];
655 uniform->f[1] = vp->translate[1];
656 uniform->f[2] = vp->translate[2];
657 }
658
659 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
660 enum pipe_shader_type st,
661 unsigned int sysvalid,
662 struct sysval_uniform *uniform)
663 {
664 struct panfrost_context *ctx = batch->ctx;
665 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
666 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
667 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
668 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
669
670 assert(dim);
671 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
672
673 if (dim > 1)
674 uniform->i[1] = u_minify(tex->texture->height0,
675 tex->u.tex.first_level);
676
677 if (dim > 2)
678 uniform->i[2] = u_minify(tex->texture->depth0,
679 tex->u.tex.first_level);
680
681 if (is_array)
682 uniform->i[dim] = tex->texture->array_size;
683 }
684
685 static void
686 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
687 enum pipe_shader_type st,
688 unsigned ssbo_id,
689 struct sysval_uniform *uniform)
690 {
691 struct panfrost_context *ctx = batch->ctx;
692
693 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
694 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
695
696 /* Compute address */
697 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
698
699 panfrost_batch_add_bo(batch, bo,
700 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
701 panfrost_bo_access_for_stage(st));
702
703 /* Upload address and size as sysval */
704 uniform->du[0] = bo->gpu + sb.buffer_offset;
705 uniform->u[2] = sb.buffer_size;
706 }
707
708 static void
709 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
710 enum pipe_shader_type st,
711 unsigned samp_idx,
712 struct sysval_uniform *uniform)
713 {
714 struct panfrost_context *ctx = batch->ctx;
715 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
716
717 uniform->f[0] = sampl->min_lod;
718 uniform->f[1] = sampl->max_lod;
719 uniform->f[2] = sampl->lod_bias;
720
721 /* Even without any errata, Midgard represents "no mipmapping" as
722 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
723 * panfrost_create_sampler_state which also explains our choice of
724 * epsilon value (again to keep behaviour consistent) */
725
726 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
727 uniform->f[1] = uniform->f[0] + (1.0/256.0);
728 }
729
730 static void
731 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
732 struct sysval_uniform *uniform)
733 {
734 struct panfrost_context *ctx = batch->ctx;
735
736 uniform->u[0] = ctx->compute_grid->grid[0];
737 uniform->u[1] = ctx->compute_grid->grid[1];
738 uniform->u[2] = ctx->compute_grid->grid[2];
739 }
740
741 static void
742 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
743 struct panfrost_shader_state *ss,
744 enum pipe_shader_type st)
745 {
746 struct sysval_uniform *uniforms = (void *)buf;
747
748 for (unsigned i = 0; i < ss->sysval_count; ++i) {
749 int sysval = ss->sysval[i];
750
751 switch (PAN_SYSVAL_TYPE(sysval)) {
752 case PAN_SYSVAL_VIEWPORT_SCALE:
753 panfrost_upload_viewport_scale_sysval(batch,
754 &uniforms[i]);
755 break;
756 case PAN_SYSVAL_VIEWPORT_OFFSET:
757 panfrost_upload_viewport_offset_sysval(batch,
758 &uniforms[i]);
759 break;
760 case PAN_SYSVAL_TEXTURE_SIZE:
761 panfrost_upload_txs_sysval(batch, st,
762 PAN_SYSVAL_ID(sysval),
763 &uniforms[i]);
764 break;
765 case PAN_SYSVAL_SSBO:
766 panfrost_upload_ssbo_sysval(batch, st,
767 PAN_SYSVAL_ID(sysval),
768 &uniforms[i]);
769 break;
770 case PAN_SYSVAL_NUM_WORK_GROUPS:
771 panfrost_upload_num_work_groups_sysval(batch,
772 &uniforms[i]);
773 break;
774 case PAN_SYSVAL_SAMPLER:
775 panfrost_upload_sampler_sysval(batch, st,
776 PAN_SYSVAL_ID(sysval),
777 &uniforms[i]);
778 break;
779 default:
780 assert(0);
781 }
782 }
783 }
784
785 static const void *
786 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
787 unsigned index)
788 {
789 struct pipe_constant_buffer *cb = &buf->cb[index];
790 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
791
792 if (rsrc)
793 return rsrc->bo->cpu;
794 else if (cb->user_buffer)
795 return cb->user_buffer;
796 else
797 unreachable("No constant buffer");
798 }
799
800 void
801 panfrost_emit_const_buf(struct panfrost_batch *batch,
802 enum pipe_shader_type stage,
803 struct midgard_payload_vertex_tiler *vtp)
804 {
805 struct panfrost_context *ctx = batch->ctx;
806 struct panfrost_shader_variants *all = ctx->shader[stage];
807
808 if (!all)
809 return;
810
811 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
812
813 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
814
815 /* Uniforms are implicitly UBO #0 */
816 bool has_uniforms = buf->enabled_mask & (1 << 0);
817
818 /* Allocate room for the sysval and the uniforms */
819 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
820 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
821 size_t size = sys_size + uniform_size;
822 struct panfrost_transfer transfer = panfrost_allocate_transient(batch,
823 size);
824
825 /* Upload sysvals requested by the shader */
826 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
827
828 /* Upload uniforms */
829 if (has_uniforms && uniform_size) {
830 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
831 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
832 }
833
834 struct mali_vertex_tiler_postfix *postfix = &vtp->postfix;
835
836 /* Next up, attach UBOs. UBO #0 is the uniforms we just
837 * uploaded */
838
839 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
840 assert(ubo_count >= 1);
841
842 size_t sz = sizeof(uint64_t) * ubo_count;
843 uint64_t ubos[PAN_MAX_CONST_BUFFERS];
844 int uniform_count = ss->uniform_count;
845
846 /* Upload uniforms as a UBO */
847 ubos[0] = MALI_MAKE_UBO(2 + uniform_count, transfer.gpu);
848
849 /* The rest are honest-to-goodness UBOs */
850
851 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
852 size_t usz = buf->cb[ubo].buffer_size;
853 bool enabled = buf->enabled_mask & (1 << ubo);
854 bool empty = usz == 0;
855
856 if (!enabled || empty) {
857 /* Stub out disabled UBOs to catch accesses */
858 ubos[ubo] = MALI_MAKE_UBO(0, 0xDEAD0000);
859 continue;
860 }
861
862 mali_ptr gpu = panfrost_map_constant_buffer_gpu(batch, stage,
863 buf, ubo);
864
865 unsigned bytes_per_field = 16;
866 unsigned aligned = ALIGN_POT(usz, bytes_per_field);
867 ubos[ubo] = MALI_MAKE_UBO(aligned / bytes_per_field, gpu);
868 }
869
870 mali_ptr ubufs = panfrost_upload_transient(batch, ubos, sz);
871 postfix->uniforms = transfer.gpu;
872 postfix->uniform_buffers = ubufs;
873
874 buf->dirty_mask = 0;
875 }
876
877 void
878 panfrost_emit_shared_memory(struct panfrost_batch *batch,
879 const struct pipe_grid_info *info,
880 struct midgard_payload_vertex_tiler *vtp)
881 {
882 struct panfrost_context *ctx = batch->ctx;
883 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
884 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
885 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
886 128));
887 unsigned shared_size = single_size * info->grid[0] * info->grid[1] *
888 info->grid[2] * 4;
889 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
890 shared_size,
891 1);
892
893 struct mali_shared_memory shared = {
894 .shared_memory = bo->gpu,
895 .shared_workgroup_count =
896 util_logbase2_ceil(info->grid[0]) +
897 util_logbase2_ceil(info->grid[1]) +
898 util_logbase2_ceil(info->grid[2]),
899 .shared_unk1 = 0x2,
900 .shared_shift = util_logbase2(single_size) - 1
901 };
902
903 vtp->postfix.shared_memory = panfrost_upload_transient(batch, &shared,
904 sizeof(shared));
905 }