panfrost: Add a panfrost_sampler_desc_init() helper
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26
27 #include "panfrost-quirks.h"
28
29 #include "pan_allocate.h"
30 #include "pan_bo.h"
31 #include "pan_cmdstream.h"
32 #include "pan_context.h"
33 #include "pan_job.h"
34
35 /* TODO: Bifrost requires just a mali_shared_memory, without the rest of the
36 * framebuffer */
37
38 void
39 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
40 struct midgard_payload_vertex_tiler *vt)
41 {
42 struct panfrost_screen *screen = pan_screen(ctx->base.screen);
43 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
44
45 /* If we haven't, reserve space for the framebuffer */
46
47 if (!batch->framebuffer.gpu) {
48 unsigned size = (screen->quirks & MIDGARD_SFBD) ?
49 sizeof(struct mali_single_framebuffer) :
50 sizeof(struct mali_framebuffer);
51
52 batch->framebuffer = panfrost_allocate_transient(batch, size);
53
54 /* Tag the pointer */
55 if (!(screen->quirks & MIDGARD_SFBD))
56 batch->framebuffer.gpu |= MALI_MFBD;
57 }
58
59 vt->postfix.shared_memory = batch->framebuffer.gpu;
60 }
61
62 void
63 panfrost_vt_update_rasterizer(struct panfrost_context *ctx,
64 struct midgard_payload_vertex_tiler *tp)
65 {
66 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
67
68 tp->gl_enables |= 0x7;
69 SET_BIT(tp->gl_enables, MALI_FRONT_CCW_TOP,
70 rasterizer && rasterizer->base.front_ccw);
71 SET_BIT(tp->gl_enables, MALI_CULL_FACE_FRONT,
72 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_FRONT));
73 SET_BIT(tp->gl_enables, MALI_CULL_FACE_BACK,
74 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_BACK));
75 SET_BIT(tp->prefix.unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
76 rasterizer && rasterizer->base.flatshade_first);
77
78 if (!panfrost_writes_point_size(ctx)) {
79 bool points = tp->prefix.draw_mode == MALI_POINTS;
80 float val = 0.0f;
81
82 if (rasterizer)
83 val = points ?
84 rasterizer->base.point_size :
85 rasterizer->base.line_width;
86
87 tp->primitive_size.constant = val;
88 }
89 }
90
91 void
92 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
93 struct midgard_payload_vertex_tiler *tp)
94 {
95 SET_BIT(tp->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
96 if (ctx->occlusion_query)
97 tp->postfix.occlusion_counter = ctx->occlusion_query->bo->gpu;
98 else
99 tp->postfix.occlusion_counter = 0;
100 }
101
102 static void
103 panfrost_shader_meta_init(struct panfrost_context *ctx,
104 enum pipe_shader_type st,
105 struct mali_shader_meta *meta)
106 {
107 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
108
109 memset(meta, 0, sizeof(*meta));
110 meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
111 meta->midgard1.uniform_count = MIN2(ss->uniform_count,
112 ss->uniform_cutoff);
113 meta->midgard1.work_count = ss->work_reg_count;
114 meta->attribute_count = ss->attribute_count;
115 meta->varying_count = ss->varying_count;
116 meta->midgard1.flags_hi = 0x8; /* XXX */
117 meta->midgard1.flags_lo = 0x220;
118 meta->texture_count = ctx->sampler_view_count[st];
119 meta->sampler_count = ctx->sampler_count[st];
120 meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
121 }
122
123 static unsigned
124 panfrost_translate_compare_func(enum pipe_compare_func in)
125 {
126 switch (in) {
127 case PIPE_FUNC_NEVER:
128 return MALI_FUNC_NEVER;
129
130 case PIPE_FUNC_LESS:
131 return MALI_FUNC_LESS;
132
133 case PIPE_FUNC_EQUAL:
134 return MALI_FUNC_EQUAL;
135
136 case PIPE_FUNC_LEQUAL:
137 return MALI_FUNC_LEQUAL;
138
139 case PIPE_FUNC_GREATER:
140 return MALI_FUNC_GREATER;
141
142 case PIPE_FUNC_NOTEQUAL:
143 return MALI_FUNC_NOTEQUAL;
144
145 case PIPE_FUNC_GEQUAL:
146 return MALI_FUNC_GEQUAL;
147
148 case PIPE_FUNC_ALWAYS:
149 return MALI_FUNC_ALWAYS;
150
151 default:
152 unreachable("Invalid func");
153 }
154 }
155
156 static unsigned
157 panfrost_translate_stencil_op(enum pipe_stencil_op in)
158 {
159 switch (in) {
160 case PIPE_STENCIL_OP_KEEP:
161 return MALI_STENCIL_KEEP;
162
163 case PIPE_STENCIL_OP_ZERO:
164 return MALI_STENCIL_ZERO;
165
166 case PIPE_STENCIL_OP_REPLACE:
167 return MALI_STENCIL_REPLACE;
168
169 case PIPE_STENCIL_OP_INCR:
170 return MALI_STENCIL_INCR;
171
172 case PIPE_STENCIL_OP_DECR:
173 return MALI_STENCIL_DECR;
174
175 case PIPE_STENCIL_OP_INCR_WRAP:
176 return MALI_STENCIL_INCR_WRAP;
177
178 case PIPE_STENCIL_OP_DECR_WRAP:
179 return MALI_STENCIL_DECR_WRAP;
180
181 case PIPE_STENCIL_OP_INVERT:
182 return MALI_STENCIL_INVERT;
183
184 default:
185 unreachable("Invalid stencil op");
186 }
187 }
188
189 static unsigned
190 translate_tex_wrap(enum pipe_tex_wrap w)
191 {
192 switch (w) {
193 case PIPE_TEX_WRAP_REPEAT:
194 return MALI_WRAP_REPEAT;
195
196 case PIPE_TEX_WRAP_CLAMP:
197 return MALI_WRAP_CLAMP;
198
199 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
200 return MALI_WRAP_CLAMP_TO_EDGE;
201
202 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
203 return MALI_WRAP_CLAMP_TO_BORDER;
204
205 case PIPE_TEX_WRAP_MIRROR_REPEAT:
206 return MALI_WRAP_MIRRORED_REPEAT;
207
208 case PIPE_TEX_WRAP_MIRROR_CLAMP:
209 return MALI_WRAP_MIRRORED_CLAMP;
210
211 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
212 return MALI_WRAP_MIRRORED_CLAMP_TO_EDGE;
213
214 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
215 return MALI_WRAP_MIRRORED_CLAMP_TO_BORDER;
216
217 default:
218 unreachable("Invalid wrap");
219 }
220 }
221
222 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
223 struct mali_sampler_descriptor *hw)
224 {
225 unsigned func = panfrost_translate_compare_func(cso->compare_func);
226 bool min_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
227 bool mag_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
228 bool mip_linear = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR;
229 unsigned min_filter = min_nearest ? MALI_SAMP_MIN_NEAREST : 0;
230 unsigned mag_filter = mag_nearest ? MALI_SAMP_MAG_NEAREST : 0;
231 unsigned mip_filter = mip_linear ?
232 (MALI_SAMP_MIP_LINEAR_1 | MALI_SAMP_MIP_LINEAR_2) : 0;
233 unsigned normalized = cso->normalized_coords ? MALI_SAMP_NORM_COORDS : 0;
234
235 *hw = (struct mali_sampler_descriptor) {
236 .filter_mode = min_filter | mag_filter | mip_filter |
237 normalized,
238 .wrap_s = translate_tex_wrap(cso->wrap_s),
239 .wrap_t = translate_tex_wrap(cso->wrap_t),
240 .wrap_r = translate_tex_wrap(cso->wrap_r),
241 .compare_func = panfrost_flip_compare_func(func),
242 .border_color = {
243 cso->border_color.f[0],
244 cso->border_color.f[1],
245 cso->border_color.f[2],
246 cso->border_color.f[3]
247 },
248 .min_lod = FIXED_16(cso->min_lod, false), /* clamp at 0 */
249 .max_lod = FIXED_16(cso->max_lod, false),
250 .lod_bias = FIXED_16(cso->lod_bias, true), /* can be negative */
251 .seamless_cube_map = cso->seamless_cube_map,
252 };
253
254 /* If necessary, we disable mipmapping in the sampler descriptor by
255 * clamping the LOD as tight as possible (from 0 to epsilon,
256 * essentially -- remember these are fixed point numbers, so
257 * epsilon=1/256) */
258
259 if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
260 hw->max_lod = hw->min_lod + 1;
261 }
262
263 static void
264 panfrost_make_stencil_state(const struct pipe_stencil_state *in,
265 struct mali_stencil_test *out)
266 {
267 out->ref = 0; /* Gallium gets it from elsewhere */
268
269 out->mask = in->valuemask;
270 out->func = panfrost_translate_compare_func(in->func);
271 out->sfail = panfrost_translate_stencil_op(in->fail_op);
272 out->dpfail = panfrost_translate_stencil_op(in->zfail_op);
273 out->dppass = panfrost_translate_stencil_op(in->zpass_op);
274 }
275
276 static void
277 panfrost_frag_meta_rasterizer_update(struct panfrost_context *ctx,
278 struct mali_shader_meta *fragmeta)
279 {
280 if (!ctx->rasterizer) {
281 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, true);
282 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, false);
283 fragmeta->depth_units = 0.0f;
284 fragmeta->depth_factor = 0.0f;
285 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, false);
286 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, false);
287 return;
288 }
289
290 bool msaa = ctx->rasterizer->base.multisample;
291
292 /* TODO: Sample size */
293 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
294 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
295 fragmeta->depth_units = ctx->rasterizer->base.offset_units * 2.0f;
296 fragmeta->depth_factor = ctx->rasterizer->base.offset_scale;
297
298 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
299
300 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A,
301 ctx->rasterizer->base.offset_tri);
302 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B,
303 ctx->rasterizer->base.offset_tri);
304 }
305
306 static void
307 panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
308 struct mali_shader_meta *fragmeta)
309 {
310 const struct pipe_depth_stencil_alpha_state *zsa = ctx->depth_stencil;
311 int zfunc = PIPE_FUNC_ALWAYS;
312
313 if (!zsa) {
314 struct pipe_stencil_state default_stencil = {
315 .enabled = 0,
316 .func = PIPE_FUNC_ALWAYS,
317 .fail_op = MALI_STENCIL_KEEP,
318 .zfail_op = MALI_STENCIL_KEEP,
319 .zpass_op = MALI_STENCIL_KEEP,
320 .writemask = 0xFF,
321 .valuemask = 0xFF
322 };
323
324 panfrost_make_stencil_state(&default_stencil,
325 &fragmeta->stencil_front);
326 fragmeta->stencil_mask_front = default_stencil.writemask;
327 fragmeta->stencil_back = fragmeta->stencil_front;
328 fragmeta->stencil_mask_back = default_stencil.writemask;
329 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST, false);
330 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK, false);
331 } else {
332 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
333 zsa->stencil[0].enabled);
334 panfrost_make_stencil_state(&zsa->stencil[0],
335 &fragmeta->stencil_front);
336 fragmeta->stencil_mask_front = zsa->stencil[0].writemask;
337 fragmeta->stencil_front.ref = ctx->stencil_ref.ref_value[0];
338
339 /* If back-stencil is not enabled, use the front values */
340
341 if (zsa->stencil[1].enabled) {
342 panfrost_make_stencil_state(&zsa->stencil[1],
343 &fragmeta->stencil_back);
344 fragmeta->stencil_mask_back = zsa->stencil[1].writemask;
345 fragmeta->stencil_back.ref = ctx->stencil_ref.ref_value[1];
346 } else {
347 fragmeta->stencil_back = fragmeta->stencil_front;
348 fragmeta->stencil_mask_back = fragmeta->stencil_mask_front;
349 fragmeta->stencil_back.ref = fragmeta->stencil_front.ref;
350 }
351
352 if (zsa->depth.enabled)
353 zfunc = zsa->depth.func;
354
355 /* Depth state (TODO: Refactor) */
356
357 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
358 zsa->depth.writemask);
359 }
360
361 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
362 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(zfunc));
363 }
364
365 static void
366 panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
367 struct mali_shader_meta *fragmeta,
368 struct midgard_blend_rt *rts)
369 {
370 const struct panfrost_screen *screen = pan_screen(ctx->base.screen);
371
372 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
373 (screen->quirks & MIDGARD_SFBD) && ctx->blend &&
374 !ctx->blend->base.dither);
375
376 /* Get blending setup */
377 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
378
379 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
380 unsigned shader_offset = 0;
381 struct panfrost_bo *shader_bo = NULL;
382
383 for (unsigned c = 0; c < rt_count; ++c)
384 blend[c] = panfrost_get_blend_for_context(ctx, c, &shader_bo,
385 &shader_offset);
386
387 /* If there is a blend shader, work registers are shared. XXX: opt */
388
389 for (unsigned c = 0; c < rt_count; ++c) {
390 if (blend[c].is_shader)
391 fragmeta->midgard1.work_count = 16;
392 }
393
394 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
395 * copied to the blend_meta appended (by convention), but this is the
396 * field actually read by the hardware. (Or maybe both are read...?).
397 * Specify the last RTi with a blend shader. */
398
399 fragmeta->blend.shader = 0;
400
401 for (signed rt = (rt_count - 1); rt >= 0; --rt) {
402 if (!blend[rt].is_shader)
403 continue;
404
405 fragmeta->blend.shader = blend[rt].shader.gpu |
406 blend[rt].shader.first_tag;
407 break;
408 }
409
410 if (screen->quirks & MIDGARD_SFBD) {
411 /* When only a single render target platform is used, the blend
412 * information is inside the shader meta itself. We additionally
413 * need to signal CAN_DISCARD for nontrivial blend modes (so
414 * we're able to read back the destination buffer) */
415
416 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
417 blend[0].is_shader);
418
419 if (!blend[0].is_shader) {
420 fragmeta->blend.equation = *blend[0].equation.equation;
421 fragmeta->blend.constant = blend[0].equation.constant;
422 }
423
424 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
425 !blend[0].no_blending);
426 return;
427 }
428
429 /* Additional blend descriptor tacked on for jobs using MFBD */
430
431 for (unsigned i = 0; i < rt_count; ++i) {
432 rts[i].flags = 0x200;
433
434 bool is_srgb = (ctx->pipe_framebuffer.nr_cbufs > i) &&
435 (ctx->pipe_framebuffer.cbufs[i]) &&
436 util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
437
438 SET_BIT(rts[i].flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
439 SET_BIT(rts[i].flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
440 SET_BIT(rts[i].flags, MALI_BLEND_SRGB, is_srgb);
441 SET_BIT(rts[i].flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
442
443 if (blend[i].is_shader) {
444 rts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
445 } else {
446 rts[i].blend.equation = *blend[i].equation.equation;
447 rts[i].blend.constant = blend[i].equation.constant;
448 }
449 }
450 }
451
452 static void
453 panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
454 struct mali_shader_meta *fragmeta,
455 struct midgard_blend_rt *rts)
456 {
457 const struct panfrost_screen *screen = pan_screen(ctx->base.screen);
458 struct panfrost_shader_state *fs;
459
460 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
461
462 fragmeta->alpha_coverage = ~MALI_ALPHA_COVERAGE(0.000000);
463 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x3010;
464 fragmeta->unknown2_4 = 0x4e0;
465
466 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
467 * is required (independent of 32-bit/64-bit descriptors), or why it's
468 * not used on later GPU revisions. Otherwise, all shader jobs fault on
469 * these earlier chips (perhaps this is a chicken bit of some kind).
470 * More investigation is needed. */
471
472 SET_BIT(fragmeta->unknown2_4, 0x10, screen->quirks & MIDGARD_SFBD);
473
474 /* Depending on whether it's legal to in the given shader, we try to
475 * enable early-z testing (or forward-pixel kill?) */
476
477 SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
478 !fs->can_discard && !fs->writes_depth);
479
480 /* Add the writes Z/S flags if needed. */
481 SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
482 SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
483
484 /* Any time texturing is used, derivatives are implicitly calculated,
485 * so we need to enable helper invocations */
486
487 SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
488 fs->helper_invocations);
489
490 /* CAN_DISCARD should be set if the fragment shader possibly contains a
491 * 'discard' instruction. It is likely this is related to optimizations
492 * related to forward-pixel kill, as per "Mali Performance 3: Is
493 * EGL_BUFFER_PRESERVED a good thing?" by Peter Harris */
494
495 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD, fs->can_discard);
496 SET_BIT(fragmeta->midgard1.flags_lo, 0x400, fs->can_discard);
497
498 panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
499 panfrost_frag_meta_zsa_update(ctx, fragmeta);
500 panfrost_frag_meta_blend_update(ctx, fragmeta, rts);
501 }
502
503 void
504 panfrost_emit_shader_meta(struct panfrost_batch *batch,
505 enum pipe_shader_type st,
506 struct midgard_payload_vertex_tiler *vtp)
507 {
508 struct panfrost_context *ctx = batch->ctx;
509 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
510
511 if (!ss) {
512 vtp->postfix.shader = 0;
513 return;
514 }
515
516 struct mali_shader_meta meta;
517
518 panfrost_shader_meta_init(ctx, st, &meta);
519
520 /* Add the shader BO to the batch. */
521 panfrost_batch_add_bo(batch, ss->bo,
522 PAN_BO_ACCESS_PRIVATE |
523 PAN_BO_ACCESS_READ |
524 panfrost_bo_access_for_stage(st));
525
526 mali_ptr shader_ptr;
527
528 if (st == PIPE_SHADER_FRAGMENT) {
529 struct panfrost_screen *screen = pan_screen(ctx->base.screen);
530 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
531 size_t desc_size = sizeof(meta);
532 struct midgard_blend_rt rts[4];
533 struct panfrost_transfer xfer;
534
535 assert(rt_count <= ARRAY_SIZE(rts));
536
537 panfrost_frag_shader_meta_init(ctx, &meta, rts);
538
539 if (!(screen->quirks & MIDGARD_SFBD))
540 desc_size += sizeof(*rts) * rt_count;
541
542 xfer = panfrost_allocate_transient(batch, desc_size);
543
544 memcpy(xfer.cpu, &meta, sizeof(meta));
545 memcpy(xfer.cpu + sizeof(meta), rts, sizeof(*rts) * rt_count);
546
547 shader_ptr = xfer.gpu;
548 } else {
549 shader_ptr = panfrost_upload_transient(batch, &meta,
550 sizeof(meta));
551 }
552
553 vtp->postfix.shader = shader_ptr;
554 }
555
556 static void
557 panfrost_mali_viewport_init(struct panfrost_context *ctx,
558 struct mali_viewport *mvp)
559 {
560 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
561
562 /* Clip bounds are encoded as floats. The viewport itself is encoded as
563 * (somewhat) asymmetric ints. */
564
565 const struct pipe_scissor_state *ss = &ctx->scissor;
566
567 memset(mvp, 0, sizeof(*mvp));
568
569 /* By default, do no viewport clipping, i.e. clip to (-inf, inf) in
570 * each direction. Clipping to the viewport in theory should work, but
571 * in practice causes issues when we're not explicitly trying to
572 * scissor */
573
574 *mvp = (struct mali_viewport) {
575 .clip_minx = -INFINITY,
576 .clip_miny = -INFINITY,
577 .clip_maxx = INFINITY,
578 .clip_maxy = INFINITY,
579 };
580
581 /* Always scissor to the viewport by default. */
582 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
583 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
584
585 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
586 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
587
588 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
589 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
590
591 /* Apply the scissor test */
592
593 unsigned minx, miny, maxx, maxy;
594
595 if (ss && ctx->rasterizer && ctx->rasterizer->base.scissor) {
596 minx = MAX2(ss->minx, vp_minx);
597 miny = MAX2(ss->miny, vp_miny);
598 maxx = MIN2(ss->maxx, vp_maxx);
599 maxy = MIN2(ss->maxy, vp_maxy);
600 } else {
601 minx = vp_minx;
602 miny = vp_miny;
603 maxx = vp_maxx;
604 maxy = vp_maxy;
605 }
606
607 /* Hardware needs the min/max to be strictly ordered, so flip if we
608 * need to. The viewport transformation in the vertex shader will
609 * handle the negatives if we don't */
610
611 if (miny > maxy) {
612 unsigned temp = miny;
613 miny = maxy;
614 maxy = temp;
615 }
616
617 if (minx > maxx) {
618 unsigned temp = minx;
619 minx = maxx;
620 maxx = temp;
621 }
622
623 if (minz > maxz) {
624 float temp = minz;
625 minz = maxz;
626 maxz = temp;
627 }
628
629 /* Clamp to the framebuffer size as a last check */
630
631 minx = MIN2(ctx->pipe_framebuffer.width, minx);
632 maxx = MIN2(ctx->pipe_framebuffer.width, maxx);
633
634 miny = MIN2(ctx->pipe_framebuffer.height, miny);
635 maxy = MIN2(ctx->pipe_framebuffer.height, maxy);
636
637 /* Upload */
638
639 mvp->viewport0[0] = minx;
640 mvp->viewport1[0] = MALI_POSITIVE(maxx);
641
642 mvp->viewport0[1] = miny;
643 mvp->viewport1[1] = MALI_POSITIVE(maxy);
644
645 mvp->clip_minz = minz;
646 mvp->clip_maxz = maxz;
647 }
648
649 void
650 panfrost_emit_viewport(struct panfrost_batch *batch,
651 struct midgard_payload_vertex_tiler *tp)
652 {
653 struct panfrost_context *ctx = batch->ctx;
654 struct mali_viewport mvp;
655
656 panfrost_mali_viewport_init(batch->ctx, &mvp);
657
658 /* Update the job, unless we're doing wallpapering (whose lack of
659 * scissor we can ignore, since if we "miss" a tile of wallpaper, it'll
660 * just... be faster :) */
661
662 if (!ctx->wallpaper_batch)
663 panfrost_batch_union_scissor(batch, mvp.viewport0[0],
664 mvp.viewport0[1],
665 mvp.viewport1[0] + 1,
666 mvp.viewport1[1] + 1);
667
668 tp->postfix.viewport = panfrost_upload_transient(batch, &mvp,
669 sizeof(mvp));
670 }
671
672 static mali_ptr
673 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
674 enum pipe_shader_type st,
675 struct panfrost_constant_buffer *buf,
676 unsigned index)
677 {
678 struct pipe_constant_buffer *cb = &buf->cb[index];
679 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
680
681 if (rsrc) {
682 panfrost_batch_add_bo(batch, rsrc->bo,
683 PAN_BO_ACCESS_SHARED |
684 PAN_BO_ACCESS_READ |
685 panfrost_bo_access_for_stage(st));
686
687 /* Alignment gauranteed by
688 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
689 return rsrc->bo->gpu + cb->buffer_offset;
690 } else if (cb->user_buffer) {
691 return panfrost_upload_transient(batch,
692 cb->user_buffer +
693 cb->buffer_offset,
694 cb->buffer_size);
695 } else {
696 unreachable("No constant buffer");
697 }
698 }
699
700 struct sysval_uniform {
701 union {
702 float f[4];
703 int32_t i[4];
704 uint32_t u[4];
705 uint64_t du[2];
706 };
707 };
708
709 static void
710 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
711 struct sysval_uniform *uniform)
712 {
713 struct panfrost_context *ctx = batch->ctx;
714 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
715
716 uniform->f[0] = vp->scale[0];
717 uniform->f[1] = vp->scale[1];
718 uniform->f[2] = vp->scale[2];
719 }
720
721 static void
722 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
723 struct sysval_uniform *uniform)
724 {
725 struct panfrost_context *ctx = batch->ctx;
726 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
727
728 uniform->f[0] = vp->translate[0];
729 uniform->f[1] = vp->translate[1];
730 uniform->f[2] = vp->translate[2];
731 }
732
733 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
734 enum pipe_shader_type st,
735 unsigned int sysvalid,
736 struct sysval_uniform *uniform)
737 {
738 struct panfrost_context *ctx = batch->ctx;
739 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
740 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
741 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
742 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
743
744 assert(dim);
745 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
746
747 if (dim > 1)
748 uniform->i[1] = u_minify(tex->texture->height0,
749 tex->u.tex.first_level);
750
751 if (dim > 2)
752 uniform->i[2] = u_minify(tex->texture->depth0,
753 tex->u.tex.first_level);
754
755 if (is_array)
756 uniform->i[dim] = tex->texture->array_size;
757 }
758
759 static void
760 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
761 enum pipe_shader_type st,
762 unsigned ssbo_id,
763 struct sysval_uniform *uniform)
764 {
765 struct panfrost_context *ctx = batch->ctx;
766
767 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
768 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
769
770 /* Compute address */
771 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
772
773 panfrost_batch_add_bo(batch, bo,
774 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
775 panfrost_bo_access_for_stage(st));
776
777 /* Upload address and size as sysval */
778 uniform->du[0] = bo->gpu + sb.buffer_offset;
779 uniform->u[2] = sb.buffer_size;
780 }
781
782 static void
783 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
784 enum pipe_shader_type st,
785 unsigned samp_idx,
786 struct sysval_uniform *uniform)
787 {
788 struct panfrost_context *ctx = batch->ctx;
789 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
790
791 uniform->f[0] = sampl->min_lod;
792 uniform->f[1] = sampl->max_lod;
793 uniform->f[2] = sampl->lod_bias;
794
795 /* Even without any errata, Midgard represents "no mipmapping" as
796 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
797 * panfrost_create_sampler_state which also explains our choice of
798 * epsilon value (again to keep behaviour consistent) */
799
800 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
801 uniform->f[1] = uniform->f[0] + (1.0/256.0);
802 }
803
804 static void
805 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
806 struct sysval_uniform *uniform)
807 {
808 struct panfrost_context *ctx = batch->ctx;
809
810 uniform->u[0] = ctx->compute_grid->grid[0];
811 uniform->u[1] = ctx->compute_grid->grid[1];
812 uniform->u[2] = ctx->compute_grid->grid[2];
813 }
814
815 static void
816 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
817 struct panfrost_shader_state *ss,
818 enum pipe_shader_type st)
819 {
820 struct sysval_uniform *uniforms = (void *)buf;
821
822 for (unsigned i = 0; i < ss->sysval_count; ++i) {
823 int sysval = ss->sysval[i];
824
825 switch (PAN_SYSVAL_TYPE(sysval)) {
826 case PAN_SYSVAL_VIEWPORT_SCALE:
827 panfrost_upload_viewport_scale_sysval(batch,
828 &uniforms[i]);
829 break;
830 case PAN_SYSVAL_VIEWPORT_OFFSET:
831 panfrost_upload_viewport_offset_sysval(batch,
832 &uniforms[i]);
833 break;
834 case PAN_SYSVAL_TEXTURE_SIZE:
835 panfrost_upload_txs_sysval(batch, st,
836 PAN_SYSVAL_ID(sysval),
837 &uniforms[i]);
838 break;
839 case PAN_SYSVAL_SSBO:
840 panfrost_upload_ssbo_sysval(batch, st,
841 PAN_SYSVAL_ID(sysval),
842 &uniforms[i]);
843 break;
844 case PAN_SYSVAL_NUM_WORK_GROUPS:
845 panfrost_upload_num_work_groups_sysval(batch,
846 &uniforms[i]);
847 break;
848 case PAN_SYSVAL_SAMPLER:
849 panfrost_upload_sampler_sysval(batch, st,
850 PAN_SYSVAL_ID(sysval),
851 &uniforms[i]);
852 break;
853 default:
854 assert(0);
855 }
856 }
857 }
858
859 static const void *
860 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
861 unsigned index)
862 {
863 struct pipe_constant_buffer *cb = &buf->cb[index];
864 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
865
866 if (rsrc)
867 return rsrc->bo->cpu;
868 else if (cb->user_buffer)
869 return cb->user_buffer;
870 else
871 unreachable("No constant buffer");
872 }
873
874 void
875 panfrost_emit_const_buf(struct panfrost_batch *batch,
876 enum pipe_shader_type stage,
877 struct midgard_payload_vertex_tiler *vtp)
878 {
879 struct panfrost_context *ctx = batch->ctx;
880 struct panfrost_shader_variants *all = ctx->shader[stage];
881
882 if (!all)
883 return;
884
885 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
886
887 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
888
889 /* Uniforms are implicitly UBO #0 */
890 bool has_uniforms = buf->enabled_mask & (1 << 0);
891
892 /* Allocate room for the sysval and the uniforms */
893 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
894 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
895 size_t size = sys_size + uniform_size;
896 struct panfrost_transfer transfer = panfrost_allocate_transient(batch,
897 size);
898
899 /* Upload sysvals requested by the shader */
900 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
901
902 /* Upload uniforms */
903 if (has_uniforms && uniform_size) {
904 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
905 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
906 }
907
908 struct mali_vertex_tiler_postfix *postfix = &vtp->postfix;
909
910 /* Next up, attach UBOs. UBO #0 is the uniforms we just
911 * uploaded */
912
913 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
914 assert(ubo_count >= 1);
915
916 size_t sz = sizeof(uint64_t) * ubo_count;
917 uint64_t ubos[PAN_MAX_CONST_BUFFERS];
918 int uniform_count = ss->uniform_count;
919
920 /* Upload uniforms as a UBO */
921 ubos[0] = MALI_MAKE_UBO(2 + uniform_count, transfer.gpu);
922
923 /* The rest are honest-to-goodness UBOs */
924
925 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
926 size_t usz = buf->cb[ubo].buffer_size;
927 bool enabled = buf->enabled_mask & (1 << ubo);
928 bool empty = usz == 0;
929
930 if (!enabled || empty) {
931 /* Stub out disabled UBOs to catch accesses */
932 ubos[ubo] = MALI_MAKE_UBO(0, 0xDEAD0000);
933 continue;
934 }
935
936 mali_ptr gpu = panfrost_map_constant_buffer_gpu(batch, stage,
937 buf, ubo);
938
939 unsigned bytes_per_field = 16;
940 unsigned aligned = ALIGN_POT(usz, bytes_per_field);
941 ubos[ubo] = MALI_MAKE_UBO(aligned / bytes_per_field, gpu);
942 }
943
944 mali_ptr ubufs = panfrost_upload_transient(batch, ubos, sz);
945 postfix->uniforms = transfer.gpu;
946 postfix->uniform_buffers = ubufs;
947
948 buf->dirty_mask = 0;
949 }
950
951 void
952 panfrost_emit_shared_memory(struct panfrost_batch *batch,
953 const struct pipe_grid_info *info,
954 struct midgard_payload_vertex_tiler *vtp)
955 {
956 struct panfrost_context *ctx = batch->ctx;
957 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
958 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
959 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
960 128));
961 unsigned shared_size = single_size * info->grid[0] * info->grid[1] *
962 info->grid[2] * 4;
963 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
964 shared_size,
965 1);
966
967 struct mali_shared_memory shared = {
968 .shared_memory = bo->gpu,
969 .shared_workgroup_count =
970 util_logbase2_ceil(info->grid[0]) +
971 util_logbase2_ceil(info->grid[1]) +
972 util_logbase2_ceil(info->grid[2]),
973 .shared_unk1 = 0x2,
974 .shared_shift = util_logbase2(single_size) - 1
975 };
976
977 vtp->postfix.shared_memory = panfrost_upload_transient(batch, &shared,
978 sizeof(shared));
979 }