panfrost: Do per-sample shading when outputs are read
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 unsigned shift = panfrost_get_stack_shift(batch->stack_size);
62 struct mali_shared_memory shared = {
63 .stack_shift = shift,
64 .scratchpad = panfrost_batch_get_scratchpad(batch, shift, dev->thread_tls_alloc, dev->core_count)->gpu,
65 .shared_workgroup_count = ~0,
66 };
67 postfix->shared_memory = panfrost_pool_upload(&batch->pool, &shared, sizeof(shared));
68 }
69
70 static void
71 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
72 struct mali_vertex_tiler_postfix *postfix)
73 {
74 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
75 postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
76 }
77
78 static void
79 panfrost_vt_update_rasterizer(struct panfrost_context *ctx,
80 struct mali_vertex_tiler_prefix *prefix,
81 struct mali_vertex_tiler_postfix *postfix)
82 {
83 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
84
85 postfix->gl_enables |= 0x7;
86 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
87 rasterizer && rasterizer->base.front_ccw);
88 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
89 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_FRONT));
90 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
91 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_BACK));
92 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
93 rasterizer && rasterizer->base.flatshade_first);
94 }
95
96 void
97 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
98 struct mali_vertex_tiler_prefix *prefix,
99 union midgard_primitive_size *primitive_size)
100 {
101 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
102
103 if (!panfrost_writes_point_size(ctx)) {
104 bool points = prefix->draw_mode == MALI_POINTS;
105 float val = 0.0f;
106
107 if (rasterizer)
108 val = points ?
109 rasterizer->base.point_size :
110 rasterizer->base.line_width;
111
112 primitive_size->constant = val;
113 }
114 }
115
116 static void
117 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
118 struct mali_vertex_tiler_postfix *postfix)
119 {
120 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
121 if (ctx->occlusion_query) {
122 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
123 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
124 PAN_BO_ACCESS_SHARED |
125 PAN_BO_ACCESS_RW |
126 PAN_BO_ACCESS_FRAGMENT);
127 } else {
128 postfix->occlusion_counter = 0;
129 }
130 }
131
132 void
133 panfrost_vt_init(struct panfrost_context *ctx,
134 enum pipe_shader_type stage,
135 struct mali_vertex_tiler_prefix *prefix,
136 struct mali_vertex_tiler_postfix *postfix)
137 {
138 struct panfrost_device *device = pan_device(ctx->base.screen);
139
140 if (!ctx->shader[stage])
141 return;
142
143 memset(prefix, 0, sizeof(*prefix));
144 memset(postfix, 0, sizeof(*postfix));
145
146 if (device->quirks & IS_BIFROST) {
147 postfix->gl_enables = 0x2;
148 panfrost_vt_emit_shared_memory(ctx, postfix);
149 } else {
150 postfix->gl_enables = 0x6;
151 panfrost_vt_attach_framebuffer(ctx, postfix);
152 }
153
154 if (stage == PIPE_SHADER_FRAGMENT) {
155 panfrost_vt_update_occlusion_query(ctx, postfix);
156 panfrost_vt_update_rasterizer(ctx, prefix, postfix);
157 }
158 }
159
160 static unsigned
161 panfrost_translate_index_size(unsigned size)
162 {
163 switch (size) {
164 case 1:
165 return MALI_DRAW_INDEXED_UINT8;
166
167 case 2:
168 return MALI_DRAW_INDEXED_UINT16;
169
170 case 4:
171 return MALI_DRAW_INDEXED_UINT32;
172
173 default:
174 unreachable("Invalid index size");
175 }
176 }
177
178 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
179 * good for the duration of the draw (transient), could last longer. Also get
180 * the bounds on the index buffer for the range accessed by the draw. We do
181 * these operations together because there are natural optimizations which
182 * require them to be together. */
183
184 static mali_ptr
185 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
186 const struct pipe_draw_info *info,
187 unsigned *min_index, unsigned *max_index)
188 {
189 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
190 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
191 off_t offset = info->start * info->index_size;
192 bool needs_indices = true;
193 mali_ptr out = 0;
194
195 if (info->max_index != ~0u) {
196 *min_index = info->min_index;
197 *max_index = info->max_index;
198 needs_indices = false;
199 }
200
201 if (!info->has_user_indices) {
202 /* Only resources can be directly mapped */
203 panfrost_batch_add_bo(batch, rsrc->bo,
204 PAN_BO_ACCESS_SHARED |
205 PAN_BO_ACCESS_READ |
206 PAN_BO_ACCESS_VERTEX_TILER);
207 out = rsrc->bo->gpu + offset;
208
209 /* Check the cache */
210 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
211 info->start,
212 info->count,
213 min_index,
214 max_index);
215 } else {
216 /* Otherwise, we need to upload to transient memory */
217 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
218 out = panfrost_pool_upload(&batch->pool, ibuf8 + offset,
219 info->count *
220 info->index_size);
221 }
222
223 if (needs_indices) {
224 /* Fallback */
225 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
226
227 if (!info->has_user_indices)
228 panfrost_minmax_cache_add(rsrc->index_cache,
229 info->start, info->count,
230 *min_index, *max_index);
231 }
232
233 return out;
234 }
235
236 void
237 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
238 const struct pipe_draw_info *info,
239 enum mali_draw_mode draw_mode,
240 struct mali_vertex_tiler_postfix *vertex_postfix,
241 struct mali_vertex_tiler_prefix *tiler_prefix,
242 struct mali_vertex_tiler_postfix *tiler_postfix,
243 unsigned *vertex_count,
244 unsigned *padded_count)
245 {
246 tiler_prefix->draw_mode = draw_mode;
247
248 unsigned draw_flags = 0;
249
250 if (panfrost_writes_point_size(ctx))
251 draw_flags |= MALI_DRAW_VARYING_SIZE;
252
253 if (info->primitive_restart)
254 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
255
256 /* These doesn't make much sense */
257
258 draw_flags |= 0x3000;
259
260 if (info->index_size) {
261 unsigned min_index = 0, max_index = 0;
262
263 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
264 info,
265 &min_index,
266 &max_index);
267
268 /* Use the corresponding values */
269 *vertex_count = max_index - min_index + 1;
270 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
271 tiler_prefix->offset_bias_correction = -min_index;
272 tiler_prefix->index_count = MALI_POSITIVE(info->count);
273 draw_flags |= panfrost_translate_index_size(info->index_size);
274 } else {
275 tiler_prefix->indices = 0;
276 *vertex_count = ctx->vertex_count;
277 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
278 tiler_prefix->offset_bias_correction = 0;
279 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
280 }
281
282 tiler_prefix->unknown_draw = draw_flags;
283
284 /* Encode the padded vertex count */
285
286 if (info->instance_count > 1) {
287 *padded_count = panfrost_padded_vertex_count(*vertex_count);
288
289 unsigned shift = __builtin_ctz(ctx->padded_count);
290 unsigned k = ctx->padded_count >> (shift + 1);
291
292 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
293 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
294 } else {
295 *padded_count = *vertex_count;
296
297 /* Reset instancing state */
298 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
299 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
300 }
301 }
302
303 static void
304 panfrost_shader_meta_init(struct panfrost_context *ctx,
305 enum pipe_shader_type st,
306 struct mali_shader_meta *meta)
307 {
308 const struct panfrost_device *dev = pan_device(ctx->base.screen);
309 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
310
311 memset(meta, 0, sizeof(*meta));
312 meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
313 meta->attribute_count = ss->attribute_count;
314 meta->varying_count = ss->varying_count;
315 meta->texture_count = ctx->sampler_view_count[st];
316 meta->sampler_count = ctx->sampler_count[st];
317
318 if (dev->quirks & IS_BIFROST) {
319 if (st == PIPE_SHADER_VERTEX)
320 meta->bifrost1.unk1 = 0x800000;
321 else {
322 /* First clause ATEST |= 0x4000000.
323 * Less than 32 regs |= 0x200 */
324 meta->bifrost1.unk1 = 0x950020;
325 }
326
327 meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
328 if (st == PIPE_SHADER_VERTEX)
329 meta->bifrost2.preload_regs = 0xC0;
330 else {
331 meta->bifrost2.preload_regs = 0x1;
332 SET_BIT(meta->bifrost2.preload_regs, 0x10, ss->reads_frag_coord);
333 }
334
335 meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
336 ss->uniform_cutoff);
337 } else {
338 meta->midgard1.uniform_count = MIN2(ss->uniform_count,
339 ss->uniform_cutoff);
340 meta->midgard1.work_count = ss->work_reg_count;
341
342 /* TODO: This is not conformant on ES3 */
343 meta->midgard1.flags_hi = MALI_SUPPRESS_INF_NAN;
344
345 meta->midgard1.flags_lo = 0x20;
346 meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
347
348 SET_BIT(meta->midgard1.flags_hi, MALI_WRITES_GLOBAL, ss->writes_global);
349 }
350 }
351
352 static unsigned
353 panfrost_translate_compare_func(enum pipe_compare_func in)
354 {
355 switch (in) {
356 case PIPE_FUNC_NEVER:
357 return MALI_FUNC_NEVER;
358
359 case PIPE_FUNC_LESS:
360 return MALI_FUNC_LESS;
361
362 case PIPE_FUNC_EQUAL:
363 return MALI_FUNC_EQUAL;
364
365 case PIPE_FUNC_LEQUAL:
366 return MALI_FUNC_LEQUAL;
367
368 case PIPE_FUNC_GREATER:
369 return MALI_FUNC_GREATER;
370
371 case PIPE_FUNC_NOTEQUAL:
372 return MALI_FUNC_NOTEQUAL;
373
374 case PIPE_FUNC_GEQUAL:
375 return MALI_FUNC_GEQUAL;
376
377 case PIPE_FUNC_ALWAYS:
378 return MALI_FUNC_ALWAYS;
379
380 default:
381 unreachable("Invalid func");
382 }
383 }
384
385 static unsigned
386 panfrost_translate_stencil_op(enum pipe_stencil_op in)
387 {
388 switch (in) {
389 case PIPE_STENCIL_OP_KEEP:
390 return MALI_STENCIL_KEEP;
391
392 case PIPE_STENCIL_OP_ZERO:
393 return MALI_STENCIL_ZERO;
394
395 case PIPE_STENCIL_OP_REPLACE:
396 return MALI_STENCIL_REPLACE;
397
398 case PIPE_STENCIL_OP_INCR:
399 return MALI_STENCIL_INCR;
400
401 case PIPE_STENCIL_OP_DECR:
402 return MALI_STENCIL_DECR;
403
404 case PIPE_STENCIL_OP_INCR_WRAP:
405 return MALI_STENCIL_INCR_WRAP;
406
407 case PIPE_STENCIL_OP_DECR_WRAP:
408 return MALI_STENCIL_DECR_WRAP;
409
410 case PIPE_STENCIL_OP_INVERT:
411 return MALI_STENCIL_INVERT;
412
413 default:
414 unreachable("Invalid stencil op");
415 }
416 }
417
418 static unsigned
419 translate_tex_wrap(enum pipe_tex_wrap w)
420 {
421 switch (w) {
422 case PIPE_TEX_WRAP_REPEAT:
423 return MALI_WRAP_REPEAT;
424
425 case PIPE_TEX_WRAP_CLAMP:
426 return MALI_WRAP_CLAMP;
427
428 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
429 return MALI_WRAP_CLAMP_TO_EDGE;
430
431 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
432 return MALI_WRAP_CLAMP_TO_BORDER;
433
434 case PIPE_TEX_WRAP_MIRROR_REPEAT:
435 return MALI_WRAP_MIRRORED_REPEAT;
436
437 case PIPE_TEX_WRAP_MIRROR_CLAMP:
438 return MALI_WRAP_MIRRORED_CLAMP;
439
440 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
441 return MALI_WRAP_MIRRORED_CLAMP_TO_EDGE;
442
443 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
444 return MALI_WRAP_MIRRORED_CLAMP_TO_BORDER;
445
446 default:
447 unreachable("Invalid wrap");
448 }
449 }
450
451 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
452 struct mali_sampler_descriptor *hw)
453 {
454 unsigned func = panfrost_translate_compare_func(cso->compare_func);
455 bool min_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
456 bool mag_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
457 bool mip_linear = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR;
458 unsigned min_filter = min_nearest ? MALI_SAMP_MIN_NEAREST : 0;
459 unsigned mag_filter = mag_nearest ? MALI_SAMP_MAG_NEAREST : 0;
460 unsigned mip_filter = mip_linear ?
461 (MALI_SAMP_MIP_LINEAR_1 | MALI_SAMP_MIP_LINEAR_2) : 0;
462 unsigned normalized = cso->normalized_coords ? MALI_SAMP_NORM_COORDS : 0;
463
464 *hw = (struct mali_sampler_descriptor) {
465 .filter_mode = min_filter | mag_filter | mip_filter |
466 normalized,
467 .wrap_s = translate_tex_wrap(cso->wrap_s),
468 .wrap_t = translate_tex_wrap(cso->wrap_t),
469 .wrap_r = translate_tex_wrap(cso->wrap_r),
470 .compare_func = cso->compare_mode ?
471 panfrost_flip_compare_func(func) :
472 MALI_FUNC_NEVER,
473 .border_color = {
474 cso->border_color.f[0],
475 cso->border_color.f[1],
476 cso->border_color.f[2],
477 cso->border_color.f[3]
478 },
479 .min_lod = FIXED_16(cso->min_lod, false), /* clamp at 0 */
480 .max_lod = FIXED_16(cso->max_lod, false),
481 .lod_bias = FIXED_16(cso->lod_bias, true), /* can be negative */
482 .seamless_cube_map = cso->seamless_cube_map,
483 };
484
485 /* If necessary, we disable mipmapping in the sampler descriptor by
486 * clamping the LOD as tight as possible (from 0 to epsilon,
487 * essentially -- remember these are fixed point numbers, so
488 * epsilon=1/256) */
489
490 if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
491 hw->max_lod = hw->min_lod + 1;
492 }
493
494 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
495 struct bifrost_sampler_descriptor *hw)
496 {
497 *hw = (struct bifrost_sampler_descriptor) {
498 .unk1 = 0x1,
499 .wrap_s = translate_tex_wrap(cso->wrap_s),
500 .wrap_t = translate_tex_wrap(cso->wrap_t),
501 .wrap_r = translate_tex_wrap(cso->wrap_r),
502 .unk8 = 0x8,
503 .min_filter = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST,
504 .norm_coords = cso->normalized_coords,
505 .mip_filter = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR,
506 .mag_filter = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR,
507 .min_lod = FIXED_16(cso->min_lod, false), /* clamp at 0 */
508 .max_lod = FIXED_16(cso->max_lod, false),
509 };
510
511 /* If necessary, we disable mipmapping in the sampler descriptor by
512 * clamping the LOD as tight as possible (from 0 to epsilon,
513 * essentially -- remember these are fixed point numbers, so
514 * epsilon=1/256) */
515
516 if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
517 hw->max_lod = hw->min_lod + 1;
518 }
519
520 static void
521 panfrost_make_stencil_state(const struct pipe_stencil_state *in,
522 struct mali_stencil_test *out)
523 {
524 out->ref = 0; /* Gallium gets it from elsewhere */
525
526 out->mask = in->valuemask;
527 out->func = panfrost_translate_compare_func(in->func);
528 out->sfail = panfrost_translate_stencil_op(in->fail_op);
529 out->dpfail = panfrost_translate_stencil_op(in->zfail_op);
530 out->dppass = panfrost_translate_stencil_op(in->zpass_op);
531 }
532
533 static void
534 panfrost_frag_meta_rasterizer_update(struct panfrost_context *ctx,
535 struct mali_shader_meta *fragmeta)
536 {
537 if (!ctx->rasterizer) {
538 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, true);
539 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, false);
540 fragmeta->depth_units = 0.0f;
541 fragmeta->depth_factor = 0.0f;
542 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, false);
543 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, false);
544 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, true);
545 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, true);
546 return;
547 }
548
549 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
550
551 bool msaa = rast->multisample;
552
553 /* TODO: Sample size */
554 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
555 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
556
557 struct panfrost_shader_state *fs;
558 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
559
560 /* EXT_shader_framebuffer_fetch requires the shader to be run
561 * per-sample when outputs are read. */
562 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
563 SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
564
565 fragmeta->depth_units = rast->offset_units * 2.0f;
566 fragmeta->depth_factor = rast->offset_scale;
567
568 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
569
570 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
571 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
572
573 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
574 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
575 }
576
577 static void
578 panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
579 struct mali_shader_meta *fragmeta)
580 {
581 const struct pipe_depth_stencil_alpha_state *zsa = ctx->depth_stencil;
582 int zfunc = PIPE_FUNC_ALWAYS;
583
584 if (!zsa) {
585 struct pipe_stencil_state default_stencil = {
586 .enabled = 0,
587 .func = PIPE_FUNC_ALWAYS,
588 .fail_op = MALI_STENCIL_KEEP,
589 .zfail_op = MALI_STENCIL_KEEP,
590 .zpass_op = MALI_STENCIL_KEEP,
591 .writemask = 0xFF,
592 .valuemask = 0xFF
593 };
594
595 panfrost_make_stencil_state(&default_stencil,
596 &fragmeta->stencil_front);
597 fragmeta->stencil_mask_front = default_stencil.writemask;
598 fragmeta->stencil_back = fragmeta->stencil_front;
599 fragmeta->stencil_mask_back = default_stencil.writemask;
600 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST, false);
601 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK, false);
602 } else {
603 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
604 zsa->stencil[0].enabled);
605 panfrost_make_stencil_state(&zsa->stencil[0],
606 &fragmeta->stencil_front);
607 fragmeta->stencil_mask_front = zsa->stencil[0].writemask;
608 fragmeta->stencil_front.ref = ctx->stencil_ref.ref_value[0];
609
610 /* If back-stencil is not enabled, use the front values */
611
612 if (zsa->stencil[1].enabled) {
613 panfrost_make_stencil_state(&zsa->stencil[1],
614 &fragmeta->stencil_back);
615 fragmeta->stencil_mask_back = zsa->stencil[1].writemask;
616 fragmeta->stencil_back.ref = ctx->stencil_ref.ref_value[1];
617 } else {
618 fragmeta->stencil_back = fragmeta->stencil_front;
619 fragmeta->stencil_mask_back = fragmeta->stencil_mask_front;
620 fragmeta->stencil_back.ref = fragmeta->stencil_front.ref;
621 }
622
623 if (zsa->depth.enabled)
624 zfunc = zsa->depth.func;
625
626 /* Depth state (TODO: Refactor) */
627
628 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
629 zsa->depth.writemask);
630 }
631
632 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
633 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(zfunc));
634 }
635
636 static bool
637 panfrost_fs_required(
638 struct panfrost_shader_state *fs,
639 struct panfrost_blend_final *blend,
640 unsigned rt_count)
641 {
642 /* If we generally have side effects */
643 if (fs->fs_sidefx)
644 return true;
645
646 /* If colour is written we need to execute */
647 for (unsigned i = 0; i < rt_count; ++i) {
648 if (!blend[i].no_colour)
649 return true;
650 }
651
652 /* If depth is written and not implied we need to execute.
653 * TODO: Predicate on Z/S writes being enabled */
654 return (fs->writes_depth || fs->writes_stencil);
655 }
656
657 static void
658 panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
659 struct mali_shader_meta *fragmeta,
660 void *rts)
661 {
662 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
663 const struct panfrost_device *dev = pan_device(ctx->base.screen);
664 struct panfrost_shader_state *fs;
665 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
666
667 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
668 (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
669 !ctx->blend->base.dither);
670
671 SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
672 ctx->blend->base.alpha_to_coverage);
673
674 /* Get blending setup */
675 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
676
677 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
678 unsigned shader_offset = 0;
679 struct panfrost_bo *shader_bo = NULL;
680
681 for (unsigned c = 0; c < rt_count; ++c)
682 blend[c] = panfrost_get_blend_for_context(ctx, c, &shader_bo,
683 &shader_offset);
684
685 /* Disable shader execution if we can */
686 if (dev->quirks & MIDGARD_SHADERLESS
687 && !panfrost_fs_required(fs, blend, rt_count)) {
688 fragmeta->shader = 0;
689 fragmeta->attribute_count = 0;
690 fragmeta->varying_count = 0;
691 fragmeta->texture_count = 0;
692 fragmeta->sampler_count = 0;
693
694 /* This feature is not known to work on Bifrost */
695 fragmeta->midgard1.work_count = 1;
696 fragmeta->midgard1.uniform_count = 0;
697 fragmeta->midgard1.uniform_buffer_count = 0;
698 }
699
700 /* If there is a blend shader, work registers are shared. We impose 8
701 * work registers as a limit for blend shaders. Should be lower XXX */
702
703 if (!(dev->quirks & IS_BIFROST)) {
704 for (unsigned c = 0; c < rt_count; ++c) {
705 if (blend[c].is_shader) {
706 fragmeta->midgard1.work_count =
707 MAX2(fragmeta->midgard1.work_count, 8);
708 }
709 }
710 }
711
712 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
713 * copied to the blend_meta appended (by convention), but this is the
714 * field actually read by the hardware. (Or maybe both are read...?).
715 * Specify the last RTi with a blend shader. */
716
717 fragmeta->blend.shader = 0;
718
719 for (signed rt = (rt_count - 1); rt >= 0; --rt) {
720 if (!blend[rt].is_shader)
721 continue;
722
723 fragmeta->blend.shader = blend[rt].shader.gpu |
724 blend[rt].shader.first_tag;
725 break;
726 }
727
728 if (dev->quirks & MIDGARD_SFBD) {
729 /* When only a single render target platform is used, the blend
730 * information is inside the shader meta itself. We additionally
731 * need to signal CAN_DISCARD for nontrivial blend modes (so
732 * we're able to read back the destination buffer) */
733
734 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
735 blend[0].is_shader);
736
737 if (!blend[0].is_shader) {
738 fragmeta->blend.equation = *blend[0].equation.equation;
739 fragmeta->blend.constant = blend[0].equation.constant;
740 }
741
742 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
743 !blend[0].no_blending || fs->can_discard);
744
745 batch->draws |= PIPE_CLEAR_COLOR0;
746 return;
747 }
748
749 if (dev->quirks & IS_BIFROST) {
750 bool no_blend = true;
751
752 for (unsigned i = 0; i < rt_count; ++i)
753 no_blend &= (blend[i].no_blending | blend[i].no_colour);
754
755 SET_BIT(fragmeta->bifrost1.unk1, MALI_BIFROST_EARLY_Z,
756 !fs->can_discard && !fs->writes_depth && no_blend);
757 }
758
759 /* Additional blend descriptor tacked on for jobs using MFBD */
760
761 for (unsigned i = 0; i < rt_count; ++i) {
762 unsigned flags = 0;
763
764 if (ctx->pipe_framebuffer.nr_cbufs > i && !blend[i].no_colour) {
765 flags = 0x200;
766 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
767
768 bool is_srgb = (ctx->pipe_framebuffer.nr_cbufs > i) &&
769 (ctx->pipe_framebuffer.cbufs[i]) &&
770 util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
771
772 SET_BIT(flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
773 SET_BIT(flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
774 SET_BIT(flags, MALI_BLEND_SRGB, is_srgb);
775 SET_BIT(flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
776 }
777
778 if (dev->quirks & IS_BIFROST) {
779 struct bifrost_blend_rt *brts = rts;
780
781 brts[i].flags = flags;
782
783 if (blend[i].is_shader) {
784 /* The blend shader's address needs to be at
785 * the same top 32 bit as the fragment shader.
786 * TODO: Ensure that's always the case.
787 */
788 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
789 (fs->bo->gpu & (0xffffffffull << 32)));
790 brts[i].shader = blend[i].shader.gpu;
791 brts[i].unk2 = 0x0;
792 } else if (ctx->pipe_framebuffer.nr_cbufs > i) {
793 enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
794 const struct util_format_description *format_desc;
795 format_desc = util_format_description(format);
796
797 brts[i].equation = *blend[i].equation.equation;
798
799 /* TODO: this is a bit more complicated */
800 brts[i].constant = blend[i].equation.constant;
801
802 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
803
804 /* 0x19 disables blending and forces REPLACE
805 * mode (equivalent to rgb_mode = alpha_mode =
806 * x122, colour mask = 0xF). 0x1a allows
807 * blending. */
808 brts[i].unk2 = blend[i].no_blending ? 0x19 : 0x1a;
809
810 brts[i].shader_type = fs->blend_types[i];
811 } else {
812 /* Dummy attachment for depth-only */
813 brts[i].unk2 = 0x3;
814 brts[i].shader_type = fs->blend_types[i];
815 }
816 } else {
817 struct midgard_blend_rt *mrts = rts;
818 mrts[i].flags = flags;
819
820 if (blend[i].is_shader) {
821 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
822 } else {
823 mrts[i].blend.equation = *blend[i].equation.equation;
824 mrts[i].blend.constant = blend[i].equation.constant;
825 }
826 }
827 }
828 }
829
830 static void
831 panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
832 struct mali_shader_meta *fragmeta,
833 void *rts)
834 {
835 const struct panfrost_device *dev = pan_device(ctx->base.screen);
836 struct panfrost_shader_state *fs;
837
838 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
839
840 bool msaa = ctx->rasterizer && ctx->rasterizer->base.multisample;
841 fragmeta->coverage_mask = (msaa ? ctx->sample_mask : ~0) & 0xF;
842
843 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
844 fragmeta->unknown2_4 = 0x4e0;
845
846 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
847 * is required (independent of 32-bit/64-bit descriptors), or why it's
848 * not used on later GPU revisions. Otherwise, all shader jobs fault on
849 * these earlier chips (perhaps this is a chicken bit of some kind).
850 * More investigation is needed. */
851
852 SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
853
854 if (dev->quirks & IS_BIFROST) {
855 /* TODO */
856 } else {
857 /* Depending on whether it's legal to in the given shader, we try to
858 * enable early-z testing. TODO: respect e-z force */
859
860 SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
861 !fs->can_discard && !fs->writes_global &&
862 !fs->writes_depth && !fs->writes_stencil &&
863 !ctx->blend->base.alpha_to_coverage);
864
865 /* Add the writes Z/S flags if needed. */
866 SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
867 SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
868
869 /* Any time texturing is used, derivatives are implicitly calculated,
870 * so we need to enable helper invocations */
871
872 SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
873 fs->helper_invocations);
874
875 const struct pipe_depth_stencil_alpha_state *zsa = ctx->depth_stencil;
876
877 bool depth_enabled = fs->writes_depth ||
878 (zsa && zsa->depth.enabled && zsa->depth.func != PIPE_FUNC_ALWAYS);
879
880 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_TILEBUFFER,
881 fs->outputs_read || (!depth_enabled && fs->can_discard));
882 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_ZS, depth_enabled && fs->can_discard);
883 }
884
885 panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
886 panfrost_frag_meta_zsa_update(ctx, fragmeta);
887 panfrost_frag_meta_blend_update(ctx, fragmeta, rts);
888 }
889
890 void
891 panfrost_emit_shader_meta(struct panfrost_batch *batch,
892 enum pipe_shader_type st,
893 struct mali_vertex_tiler_postfix *postfix)
894 {
895 struct panfrost_context *ctx = batch->ctx;
896 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
897
898 if (!ss) {
899 postfix->shader = 0;
900 return;
901 }
902
903 struct mali_shader_meta meta;
904
905 panfrost_shader_meta_init(ctx, st, &meta);
906
907 /* Add the shader BO to the batch. */
908 panfrost_batch_add_bo(batch, ss->bo,
909 PAN_BO_ACCESS_PRIVATE |
910 PAN_BO_ACCESS_READ |
911 panfrost_bo_access_for_stage(st));
912
913 mali_ptr shader_ptr;
914
915 if (st == PIPE_SHADER_FRAGMENT) {
916 struct panfrost_device *dev = pan_device(ctx->base.screen);
917 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
918 size_t desc_size = sizeof(meta);
919 void *rts = NULL;
920 struct panfrost_transfer xfer;
921 unsigned rt_size;
922
923 if (dev->quirks & MIDGARD_SFBD)
924 rt_size = 0;
925 else if (dev->quirks & IS_BIFROST)
926 rt_size = sizeof(struct bifrost_blend_rt);
927 else
928 rt_size = sizeof(struct midgard_blend_rt);
929
930 desc_size += rt_size * rt_count;
931
932 if (rt_size)
933 rts = rzalloc_size(ctx, rt_size * rt_count);
934
935 panfrost_frag_shader_meta_init(ctx, &meta, rts);
936
937 xfer = panfrost_pool_alloc(&batch->pool, desc_size);
938
939 memcpy(xfer.cpu, &meta, sizeof(meta));
940 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
941
942 if (rt_size)
943 ralloc_free(rts);
944
945 shader_ptr = xfer.gpu;
946 } else {
947 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
948 sizeof(meta));
949 }
950
951 postfix->shader = shader_ptr;
952 }
953
954 static void
955 panfrost_mali_viewport_init(struct panfrost_context *ctx,
956 struct mali_viewport *mvp)
957 {
958 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
959
960 /* Clip bounds are encoded as floats. The viewport itself is encoded as
961 * (somewhat) asymmetric ints. */
962
963 const struct pipe_scissor_state *ss = &ctx->scissor;
964
965 memset(mvp, 0, sizeof(*mvp));
966
967 /* By default, do no viewport clipping, i.e. clip to (-inf, inf) in
968 * each direction. Clipping to the viewport in theory should work, but
969 * in practice causes issues when we're not explicitly trying to
970 * scissor */
971
972 *mvp = (struct mali_viewport) {
973 .clip_minx = -INFINITY,
974 .clip_miny = -INFINITY,
975 .clip_maxx = INFINITY,
976 .clip_maxy = INFINITY,
977 };
978
979 /* Always scissor to the viewport by default. */
980 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
981 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
982
983 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
984 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
985
986 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
987 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
988
989 /* Apply the scissor test */
990
991 unsigned minx, miny, maxx, maxy;
992
993 if (ss && ctx->rasterizer && ctx->rasterizer->base.scissor) {
994 minx = MAX2(ss->minx, vp_minx);
995 miny = MAX2(ss->miny, vp_miny);
996 maxx = MIN2(ss->maxx, vp_maxx);
997 maxy = MIN2(ss->maxy, vp_maxy);
998 } else {
999 minx = vp_minx;
1000 miny = vp_miny;
1001 maxx = vp_maxx;
1002 maxy = vp_maxy;
1003 }
1004
1005 /* Hardware needs the min/max to be strictly ordered, so flip if we
1006 * need to. The viewport transformation in the vertex shader will
1007 * handle the negatives if we don't */
1008
1009 if (miny > maxy) {
1010 unsigned temp = miny;
1011 miny = maxy;
1012 maxy = temp;
1013 }
1014
1015 if (minx > maxx) {
1016 unsigned temp = minx;
1017 minx = maxx;
1018 maxx = temp;
1019 }
1020
1021 if (minz > maxz) {
1022 float temp = minz;
1023 minz = maxz;
1024 maxz = temp;
1025 }
1026
1027 /* Clamp to the framebuffer size as a last check */
1028
1029 minx = MIN2(ctx->pipe_framebuffer.width, minx);
1030 maxx = MIN2(ctx->pipe_framebuffer.width, maxx);
1031
1032 miny = MIN2(ctx->pipe_framebuffer.height, miny);
1033 maxy = MIN2(ctx->pipe_framebuffer.height, maxy);
1034
1035 /* Upload */
1036
1037 mvp->viewport0[0] = minx;
1038 mvp->viewport1[0] = MALI_POSITIVE(maxx);
1039
1040 mvp->viewport0[1] = miny;
1041 mvp->viewport1[1] = MALI_POSITIVE(maxy);
1042
1043 bool clip_near = true;
1044 bool clip_far = true;
1045
1046 if (ctx->rasterizer) {
1047 clip_near = ctx->rasterizer->base.depth_clip_near;
1048 clip_far = ctx->rasterizer->base.depth_clip_far;
1049 }
1050
1051 mvp->clip_minz = clip_near ? minz : -INFINITY;
1052 mvp->clip_maxz = clip_far ? maxz : INFINITY;
1053 }
1054
1055 void
1056 panfrost_emit_viewport(struct panfrost_batch *batch,
1057 struct mali_vertex_tiler_postfix *tiler_postfix)
1058 {
1059 struct panfrost_context *ctx = batch->ctx;
1060 struct mali_viewport mvp;
1061
1062 panfrost_mali_viewport_init(batch->ctx, &mvp);
1063
1064 /* Update the job, unless we're doing wallpapering (whose lack of
1065 * scissor we can ignore, since if we "miss" a tile of wallpaper, it'll
1066 * just... be faster :) */
1067
1068 if (!ctx->wallpaper_batch)
1069 panfrost_batch_union_scissor(batch, mvp.viewport0[0],
1070 mvp.viewport0[1],
1071 mvp.viewport1[0] + 1,
1072 mvp.viewport1[1] + 1);
1073
1074 tiler_postfix->viewport = panfrost_pool_upload(&batch->pool, &mvp,
1075 sizeof(mvp));
1076 }
1077
1078 static mali_ptr
1079 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
1080 enum pipe_shader_type st,
1081 struct panfrost_constant_buffer *buf,
1082 unsigned index)
1083 {
1084 struct pipe_constant_buffer *cb = &buf->cb[index];
1085 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1086
1087 if (rsrc) {
1088 panfrost_batch_add_bo(batch, rsrc->bo,
1089 PAN_BO_ACCESS_SHARED |
1090 PAN_BO_ACCESS_READ |
1091 panfrost_bo_access_for_stage(st));
1092
1093 /* Alignment gauranteed by
1094 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
1095 return rsrc->bo->gpu + cb->buffer_offset;
1096 } else if (cb->user_buffer) {
1097 return panfrost_pool_upload(&batch->pool,
1098 cb->user_buffer +
1099 cb->buffer_offset,
1100 cb->buffer_size);
1101 } else {
1102 unreachable("No constant buffer");
1103 }
1104 }
1105
1106 struct sysval_uniform {
1107 union {
1108 float f[4];
1109 int32_t i[4];
1110 uint32_t u[4];
1111 uint64_t du[2];
1112 };
1113 };
1114
1115 static void
1116 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
1117 struct sysval_uniform *uniform)
1118 {
1119 struct panfrost_context *ctx = batch->ctx;
1120 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
1121
1122 uniform->f[0] = vp->scale[0];
1123 uniform->f[1] = vp->scale[1];
1124 uniform->f[2] = vp->scale[2];
1125 }
1126
1127 static void
1128 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
1129 struct sysval_uniform *uniform)
1130 {
1131 struct panfrost_context *ctx = batch->ctx;
1132 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
1133
1134 uniform->f[0] = vp->translate[0];
1135 uniform->f[1] = vp->translate[1];
1136 uniform->f[2] = vp->translate[2];
1137 }
1138
1139 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
1140 enum pipe_shader_type st,
1141 unsigned int sysvalid,
1142 struct sysval_uniform *uniform)
1143 {
1144 struct panfrost_context *ctx = batch->ctx;
1145 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
1146 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
1147 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
1148 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
1149
1150 assert(dim);
1151 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
1152
1153 if (dim > 1)
1154 uniform->i[1] = u_minify(tex->texture->height0,
1155 tex->u.tex.first_level);
1156
1157 if (dim > 2)
1158 uniform->i[2] = u_minify(tex->texture->depth0,
1159 tex->u.tex.first_level);
1160
1161 if (is_array)
1162 uniform->i[dim] = tex->texture->array_size;
1163 }
1164
1165 static void
1166 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
1167 enum pipe_shader_type st,
1168 unsigned ssbo_id,
1169 struct sysval_uniform *uniform)
1170 {
1171 struct panfrost_context *ctx = batch->ctx;
1172
1173 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
1174 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
1175
1176 /* Compute address */
1177 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
1178
1179 panfrost_batch_add_bo(batch, bo,
1180 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
1181 panfrost_bo_access_for_stage(st));
1182
1183 /* Upload address and size as sysval */
1184 uniform->du[0] = bo->gpu + sb.buffer_offset;
1185 uniform->u[2] = sb.buffer_size;
1186 }
1187
1188 static void
1189 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
1190 enum pipe_shader_type st,
1191 unsigned samp_idx,
1192 struct sysval_uniform *uniform)
1193 {
1194 struct panfrost_context *ctx = batch->ctx;
1195 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
1196
1197 uniform->f[0] = sampl->min_lod;
1198 uniform->f[1] = sampl->max_lod;
1199 uniform->f[2] = sampl->lod_bias;
1200
1201 /* Even without any errata, Midgard represents "no mipmapping" as
1202 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1203 * panfrost_create_sampler_state which also explains our choice of
1204 * epsilon value (again to keep behaviour consistent) */
1205
1206 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1207 uniform->f[1] = uniform->f[0] + (1.0/256.0);
1208 }
1209
1210 static void
1211 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
1212 struct sysval_uniform *uniform)
1213 {
1214 struct panfrost_context *ctx = batch->ctx;
1215
1216 uniform->u[0] = ctx->compute_grid->grid[0];
1217 uniform->u[1] = ctx->compute_grid->grid[1];
1218 uniform->u[2] = ctx->compute_grid->grid[2];
1219 }
1220
1221 static void
1222 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1223 struct panfrost_shader_state *ss,
1224 enum pipe_shader_type st)
1225 {
1226 struct sysval_uniform *uniforms = (void *)buf;
1227
1228 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1229 int sysval = ss->sysval[i];
1230
1231 switch (PAN_SYSVAL_TYPE(sysval)) {
1232 case PAN_SYSVAL_VIEWPORT_SCALE:
1233 panfrost_upload_viewport_scale_sysval(batch,
1234 &uniforms[i]);
1235 break;
1236 case PAN_SYSVAL_VIEWPORT_OFFSET:
1237 panfrost_upload_viewport_offset_sysval(batch,
1238 &uniforms[i]);
1239 break;
1240 case PAN_SYSVAL_TEXTURE_SIZE:
1241 panfrost_upload_txs_sysval(batch, st,
1242 PAN_SYSVAL_ID(sysval),
1243 &uniforms[i]);
1244 break;
1245 case PAN_SYSVAL_SSBO:
1246 panfrost_upload_ssbo_sysval(batch, st,
1247 PAN_SYSVAL_ID(sysval),
1248 &uniforms[i]);
1249 break;
1250 case PAN_SYSVAL_NUM_WORK_GROUPS:
1251 panfrost_upload_num_work_groups_sysval(batch,
1252 &uniforms[i]);
1253 break;
1254 case PAN_SYSVAL_SAMPLER:
1255 panfrost_upload_sampler_sysval(batch, st,
1256 PAN_SYSVAL_ID(sysval),
1257 &uniforms[i]);
1258 break;
1259 default:
1260 assert(0);
1261 }
1262 }
1263 }
1264
1265 static const void *
1266 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1267 unsigned index)
1268 {
1269 struct pipe_constant_buffer *cb = &buf->cb[index];
1270 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1271
1272 if (rsrc)
1273 return rsrc->bo->cpu;
1274 else if (cb->user_buffer)
1275 return cb->user_buffer;
1276 else
1277 unreachable("No constant buffer");
1278 }
1279
1280 void
1281 panfrost_emit_const_buf(struct panfrost_batch *batch,
1282 enum pipe_shader_type stage,
1283 struct mali_vertex_tiler_postfix *postfix)
1284 {
1285 struct panfrost_context *ctx = batch->ctx;
1286 struct panfrost_shader_variants *all = ctx->shader[stage];
1287
1288 if (!all)
1289 return;
1290
1291 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1292
1293 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1294
1295 /* Uniforms are implicitly UBO #0 */
1296 bool has_uniforms = buf->enabled_mask & (1 << 0);
1297
1298 /* Allocate room for the sysval and the uniforms */
1299 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1300 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1301 size_t size = sys_size + uniform_size;
1302 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1303 size);
1304
1305 /* Upload sysvals requested by the shader */
1306 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1307
1308 /* Upload uniforms */
1309 if (has_uniforms && uniform_size) {
1310 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1311 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1312 }
1313
1314 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1315 * uploaded */
1316
1317 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1318 assert(ubo_count >= 1);
1319
1320 size_t sz = sizeof(uint64_t) * ubo_count;
1321 uint64_t ubos[PAN_MAX_CONST_BUFFERS];
1322 int uniform_count = ss->uniform_count;
1323
1324 /* Upload uniforms as a UBO */
1325 ubos[0] = MALI_MAKE_UBO(2 + uniform_count, transfer.gpu);
1326
1327 /* The rest are honest-to-goodness UBOs */
1328
1329 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1330 size_t usz = buf->cb[ubo].buffer_size;
1331 bool enabled = buf->enabled_mask & (1 << ubo);
1332 bool empty = usz == 0;
1333
1334 if (!enabled || empty) {
1335 /* Stub out disabled UBOs to catch accesses */
1336 ubos[ubo] = MALI_MAKE_UBO(0, 0xDEAD0000);
1337 continue;
1338 }
1339
1340 mali_ptr gpu = panfrost_map_constant_buffer_gpu(batch, stage,
1341 buf, ubo);
1342
1343 unsigned bytes_per_field = 16;
1344 unsigned aligned = ALIGN_POT(usz, bytes_per_field);
1345 ubos[ubo] = MALI_MAKE_UBO(aligned / bytes_per_field, gpu);
1346 }
1347
1348 mali_ptr ubufs = panfrost_pool_upload(&batch->pool, ubos, sz);
1349 postfix->uniforms = transfer.gpu;
1350 postfix->uniform_buffers = ubufs;
1351
1352 buf->dirty_mask = 0;
1353 }
1354
1355 void
1356 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1357 const struct pipe_grid_info *info,
1358 struct midgard_payload_vertex_tiler *vtp)
1359 {
1360 struct panfrost_context *ctx = batch->ctx;
1361 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1362 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1363 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1364 128));
1365 unsigned shared_size = single_size * info->grid[0] * info->grid[1] *
1366 info->grid[2] * 4;
1367 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1368 shared_size,
1369 1);
1370
1371 struct mali_shared_memory shared = {
1372 .shared_memory = bo->gpu,
1373 .shared_workgroup_count =
1374 util_logbase2_ceil(info->grid[0]) +
1375 util_logbase2_ceil(info->grid[1]) +
1376 util_logbase2_ceil(info->grid[2]),
1377 .shared_unk1 = 0x2,
1378 .shared_shift = util_logbase2(single_size) - 1
1379 };
1380
1381 vtp->postfix.shared_memory = panfrost_pool_upload(&batch->pool, &shared,
1382 sizeof(shared));
1383 }
1384
1385 static mali_ptr
1386 panfrost_get_tex_desc(struct panfrost_batch *batch,
1387 enum pipe_shader_type st,
1388 struct panfrost_sampler_view *view)
1389 {
1390 if (!view)
1391 return (mali_ptr) 0;
1392
1393 struct pipe_sampler_view *pview = &view->base;
1394 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1395
1396 /* Add the BO to the job so it's retained until the job is done. */
1397
1398 panfrost_batch_add_bo(batch, rsrc->bo,
1399 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1400 panfrost_bo_access_for_stage(st));
1401
1402 panfrost_batch_add_bo(batch, view->bo,
1403 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1404 panfrost_bo_access_for_stage(st));
1405
1406 return view->bo->gpu;
1407 }
1408
1409 static void
1410 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1411 struct pipe_context *pctx)
1412 {
1413 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1414 if (view->texture_bo != rsrc->bo->gpu ||
1415 view->layout != rsrc->layout) {
1416 panfrost_bo_unreference(view->bo);
1417 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1418 }
1419 }
1420
1421 void
1422 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1423 enum pipe_shader_type stage,
1424 struct mali_vertex_tiler_postfix *postfix)
1425 {
1426 struct panfrost_context *ctx = batch->ctx;
1427 struct panfrost_device *device = pan_device(ctx->base.screen);
1428
1429 if (!ctx->sampler_view_count[stage])
1430 return;
1431
1432 if (device->quirks & IS_BIFROST) {
1433 struct bifrost_texture_descriptor *descriptors;
1434
1435 descriptors = malloc(sizeof(struct bifrost_texture_descriptor) *
1436 ctx->sampler_view_count[stage]);
1437
1438 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1439 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1440 struct pipe_sampler_view *pview = &view->base;
1441 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1442 panfrost_update_sampler_view(view, &ctx->base);
1443
1444 /* Add the BOs to the job so they are retained until the job is done. */
1445
1446 panfrost_batch_add_bo(batch, rsrc->bo,
1447 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1448 panfrost_bo_access_for_stage(stage));
1449
1450 panfrost_batch_add_bo(batch, view->bo,
1451 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1452 panfrost_bo_access_for_stage(stage));
1453
1454 memcpy(&descriptors[i], view->bifrost_descriptor, sizeof(*view->bifrost_descriptor));
1455 }
1456
1457 postfix->textures = panfrost_pool_upload(&batch->pool,
1458 descriptors,
1459 sizeof(struct bifrost_texture_descriptor) *
1460 ctx->sampler_view_count[stage]);
1461
1462 free(descriptors);
1463 } else {
1464 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1465
1466 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1467 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1468
1469 panfrost_update_sampler_view(view, &ctx->base);
1470
1471 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1472 }
1473
1474 postfix->textures = panfrost_pool_upload(&batch->pool,
1475 trampolines,
1476 sizeof(uint64_t) *
1477 ctx->sampler_view_count[stage]);
1478 }
1479 }
1480
1481 void
1482 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1483 enum pipe_shader_type stage,
1484 struct mali_vertex_tiler_postfix *postfix)
1485 {
1486 struct panfrost_context *ctx = batch->ctx;
1487 struct panfrost_device *device = pan_device(ctx->base.screen);
1488
1489 if (!ctx->sampler_count[stage])
1490 return;
1491
1492 if (device->quirks & IS_BIFROST) {
1493 size_t desc_size = sizeof(struct bifrost_sampler_descriptor);
1494 size_t transfer_size = desc_size * ctx->sampler_count[stage];
1495 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1496 transfer_size);
1497 struct bifrost_sampler_descriptor *desc = (struct bifrost_sampler_descriptor *)transfer.cpu;
1498
1499 for (int i = 0; i < ctx->sampler_count[stage]; ++i)
1500 desc[i] = ctx->samplers[stage][i]->bifrost_hw;
1501
1502 postfix->sampler_descriptor = transfer.gpu;
1503 } else {
1504 size_t desc_size = sizeof(struct mali_sampler_descriptor);
1505 size_t transfer_size = desc_size * ctx->sampler_count[stage];
1506 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1507 transfer_size);
1508 struct mali_sampler_descriptor *desc = (struct mali_sampler_descriptor *)transfer.cpu;
1509
1510 for (int i = 0; i < ctx->sampler_count[stage]; ++i)
1511 desc[i] = ctx->samplers[stage][i]->midgard_hw;
1512
1513 postfix->sampler_descriptor = transfer.gpu;
1514 }
1515 }
1516
1517 void
1518 panfrost_emit_vertex_attr_meta(struct panfrost_batch *batch,
1519 struct mali_vertex_tiler_postfix *vertex_postfix)
1520 {
1521 struct panfrost_context *ctx = batch->ctx;
1522
1523 if (!ctx->vertex)
1524 return;
1525
1526 struct panfrost_vertex_state *so = ctx->vertex;
1527
1528 panfrost_vertex_state_upd_attr_offs(ctx, vertex_postfix);
1529 vertex_postfix->attribute_meta = panfrost_pool_upload(&batch->pool, so->hw,
1530 sizeof(*so->hw) *
1531 PAN_MAX_ATTRIBUTE);
1532 }
1533
1534 void
1535 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1536 struct mali_vertex_tiler_postfix *vertex_postfix)
1537 {
1538 struct panfrost_context *ctx = batch->ctx;
1539 struct panfrost_vertex_state *so = ctx->vertex;
1540
1541 /* Staged mali_attr, and index into them. i =/= k, depending on the
1542 * vertex buffer mask and instancing. Twice as much room is allocated,
1543 * for a worst case of NPOT_DIVIDEs which take up extra slot */
1544 union mali_attr attrs[PIPE_MAX_ATTRIBS * 2];
1545 unsigned k = 0;
1546
1547 for (unsigned i = 0; i < so->num_elements; ++i) {
1548 /* We map a mali_attr to be 1:1 with the mali_attr_meta, which
1549 * means duplicating some vertex buffers (who cares? aside from
1550 * maybe some caching implications but I somehow doubt that
1551 * matters) */
1552
1553 struct pipe_vertex_element *elem = &so->pipe[i];
1554 unsigned vbi = elem->vertex_buffer_index;
1555
1556 /* The exception to 1:1 mapping is that we can have multiple
1557 * entries (NPOT divisors), so we fixup anyways */
1558
1559 so->hw[i].index = k;
1560
1561 if (!(ctx->vb_mask & (1 << vbi)))
1562 continue;
1563
1564 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1565 struct panfrost_resource *rsrc;
1566
1567 rsrc = pan_resource(buf->buffer.resource);
1568 if (!rsrc)
1569 continue;
1570
1571 /* Align to 64 bytes by masking off the lower bits. This
1572 * will be adjusted back when we fixup the src_offset in
1573 * mali_attr_meta */
1574
1575 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1576 mali_ptr addr = raw_addr & ~63;
1577 unsigned chopped_addr = raw_addr - addr;
1578
1579 /* Add a dependency of the batch on the vertex buffer */
1580 panfrost_batch_add_bo(batch, rsrc->bo,
1581 PAN_BO_ACCESS_SHARED |
1582 PAN_BO_ACCESS_READ |
1583 PAN_BO_ACCESS_VERTEX_TILER);
1584
1585 /* Set common fields */
1586 attrs[k].elements = addr;
1587 attrs[k].stride = buf->stride;
1588
1589 /* Since we advanced the base pointer, we shrink the buffer
1590 * size */
1591 attrs[k].size = rsrc->base.width0 - buf->buffer_offset;
1592
1593 /* We need to add the extra size we masked off (for
1594 * correctness) so the data doesn't get clamped away */
1595 attrs[k].size += chopped_addr;
1596
1597 /* For non-instancing make sure we initialize */
1598 attrs[k].shift = attrs[k].extra_flags = 0;
1599
1600 /* Instancing uses a dramatically different code path than
1601 * linear, so dispatch for the actual emission now that the
1602 * common code is finished */
1603
1604 unsigned divisor = elem->instance_divisor;
1605
1606 if (divisor && ctx->instance_count == 1) {
1607 /* Silly corner case where there's a divisor(=1) but
1608 * there's no legitimate instancing. So we want *every*
1609 * attribute to be the same. So set stride to zero so
1610 * we don't go anywhere. */
1611
1612 attrs[k].size = attrs[k].stride + chopped_addr;
1613 attrs[k].stride = 0;
1614 attrs[k++].elements |= MALI_ATTR_LINEAR;
1615 } else if (ctx->instance_count <= 1) {
1616 /* Normal, non-instanced attributes */
1617 attrs[k++].elements |= MALI_ATTR_LINEAR;
1618 } else {
1619 unsigned instance_shift = vertex_postfix->instance_shift;
1620 unsigned instance_odd = vertex_postfix->instance_odd;
1621
1622 k += panfrost_vertex_instanced(ctx->padded_count,
1623 instance_shift,
1624 instance_odd,
1625 divisor, &attrs[k]);
1626 }
1627 }
1628
1629 /* Add special gl_VertexID/gl_InstanceID buffers */
1630
1631 panfrost_vertex_id(ctx->padded_count, &attrs[k]);
1632 so->hw[PAN_VERTEX_ID].index = k++;
1633 panfrost_instance_id(ctx->padded_count, &attrs[k]);
1634 so->hw[PAN_INSTANCE_ID].index = k++;
1635
1636 /* Upload whatever we emitted and go */
1637
1638 vertex_postfix->attributes = panfrost_pool_upload(&batch->pool, attrs,
1639 k * sizeof(*attrs));
1640 }
1641
1642 static mali_ptr
1643 panfrost_emit_varyings(struct panfrost_batch *batch, union mali_attr *slot,
1644 unsigned stride, unsigned count)
1645 {
1646 /* Fill out the descriptor */
1647 slot->stride = stride;
1648 slot->size = stride * count;
1649 slot->shift = slot->extra_flags = 0;
1650
1651 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1652 slot->size);
1653
1654 slot->elements = transfer.gpu | MALI_ATTR_LINEAR;
1655
1656 return transfer.gpu;
1657 }
1658
1659 static unsigned
1660 panfrost_streamout_offset(unsigned stride, unsigned offset,
1661 struct pipe_stream_output_target *target)
1662 {
1663 return (target->buffer_offset + (offset * stride * 4)) & 63;
1664 }
1665
1666 static void
1667 panfrost_emit_streamout(struct panfrost_batch *batch, union mali_attr *slot,
1668 unsigned stride, unsigned offset, unsigned count,
1669 struct pipe_stream_output_target *target)
1670 {
1671 /* Fill out the descriptor */
1672 slot->stride = stride * 4;
1673 slot->shift = slot->extra_flags = 0;
1674
1675 unsigned max_size = target->buffer_size;
1676 unsigned expected_size = slot->stride * count;
1677
1678 /* Grab the BO and bind it to the batch */
1679 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1680
1681 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1682 * the perspective of the TILER and FRAGMENT.
1683 */
1684 panfrost_batch_add_bo(batch, bo,
1685 PAN_BO_ACCESS_SHARED |
1686 PAN_BO_ACCESS_RW |
1687 PAN_BO_ACCESS_VERTEX_TILER |
1688 PAN_BO_ACCESS_FRAGMENT);
1689
1690 /* We will have an offset applied to get alignment */
1691 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * slot->stride);
1692 slot->elements = (addr & ~63) | MALI_ATTR_LINEAR;
1693 slot->size = MIN2(max_size, expected_size) + (addr & 63);
1694 }
1695
1696 static bool
1697 has_point_coord(unsigned mask, gl_varying_slot loc)
1698 {
1699 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1700 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1701 else if (loc == VARYING_SLOT_PNTC)
1702 return (mask & (1 << 8));
1703 else
1704 return false;
1705 }
1706
1707 /* Helpers for manipulating stream out information so we can pack varyings
1708 * accordingly. Compute the src_offset for a given captured varying */
1709
1710 static struct pipe_stream_output *
1711 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1712 {
1713 for (unsigned i = 0; i < info->num_outputs; ++i) {
1714 if (info->output[i].register_index == loc)
1715 return &info->output[i];
1716 }
1717
1718 unreachable("Varying not captured");
1719 }
1720
1721 static unsigned
1722 pan_varying_size(enum mali_format fmt)
1723 {
1724 unsigned type = MALI_EXTRACT_TYPE(fmt);
1725 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1726 unsigned bits = MALI_EXTRACT_BITS(fmt);
1727 unsigned bpc = 0;
1728
1729 if (bits == MALI_CHANNEL_FLOAT) {
1730 /* No doubles */
1731 bool fp16 = (type == MALI_FORMAT_SINT);
1732 assert(fp16 || (type == MALI_FORMAT_UNORM));
1733
1734 bpc = fp16 ? 2 : 4;
1735 } else {
1736 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1737
1738 /* See the enums */
1739 bits = 1 << bits;
1740 assert(bits >= 8);
1741 bpc = bits / 8;
1742 }
1743
1744 return bpc * chan;
1745 }
1746
1747 /* Indices for named (non-XFB) varyings that are present. These are packed
1748 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1749 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1750 * of a given special field given a shift S by:
1751 *
1752 * idx = popcount(P & ((1 << S) - 1))
1753 *
1754 * That is... look at all of the varyings that come earlier and count them, the
1755 * count is the new index since plus one. Likewise, the total number of special
1756 * buffers required is simply popcount(P)
1757 */
1758
1759 enum pan_special_varying {
1760 PAN_VARY_GENERAL = 0,
1761 PAN_VARY_POSITION = 1,
1762 PAN_VARY_PSIZ = 2,
1763 PAN_VARY_PNTCOORD = 3,
1764 PAN_VARY_FACE = 4,
1765 PAN_VARY_FRAGCOORD = 5,
1766
1767 /* Keep last */
1768 PAN_VARY_MAX,
1769 };
1770
1771 /* Given a varying, figure out which index it correpsonds to */
1772
1773 static inline unsigned
1774 pan_varying_index(unsigned present, enum pan_special_varying v)
1775 {
1776 unsigned mask = (1 << v) - 1;
1777 return util_bitcount(present & mask);
1778 }
1779
1780 /* Get the base offset for XFB buffers, which by convention come after
1781 * everything else. Wrapper function for semantic reasons; by construction this
1782 * is just popcount. */
1783
1784 static inline unsigned
1785 pan_xfb_base(unsigned present)
1786 {
1787 return util_bitcount(present);
1788 }
1789
1790 /* Computes the present mask for varyings so we can start emitting varying records */
1791
1792 static inline unsigned
1793 pan_varying_present(
1794 struct panfrost_shader_state *vs,
1795 struct panfrost_shader_state *fs,
1796 unsigned quirks)
1797 {
1798 /* At the moment we always emit general and position buffers. Not
1799 * strictly necessary but usually harmless */
1800
1801 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1802
1803 /* Enable special buffers by the shader info */
1804
1805 if (vs->writes_point_size)
1806 present |= (1 << PAN_VARY_PSIZ);
1807
1808 if (fs->reads_point_coord)
1809 present |= (1 << PAN_VARY_PNTCOORD);
1810
1811 if (fs->reads_face)
1812 present |= (1 << PAN_VARY_FACE);
1813
1814 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1815 present |= (1 << PAN_VARY_FRAGCOORD);
1816
1817 /* Also, if we have a point sprite, we need a point coord buffer */
1818
1819 for (unsigned i = 0; i < fs->varying_count; i++) {
1820 gl_varying_slot loc = fs->varyings_loc[i];
1821
1822 if (has_point_coord(fs->point_sprite_mask, loc))
1823 present |= (1 << PAN_VARY_PNTCOORD);
1824 }
1825
1826 return present;
1827 }
1828
1829 /* Emitters for varying records */
1830
1831 static struct mali_attr_meta
1832 pan_emit_vary(unsigned present, enum pan_special_varying buf,
1833 unsigned quirks, enum mali_format format,
1834 unsigned offset)
1835 {
1836 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1837
1838 struct mali_attr_meta meta = {
1839 .index = pan_varying_index(present, buf),
1840 .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
1841 .swizzle = quirks & HAS_SWIZZLES ?
1842 panfrost_get_default_swizzle(nr_channels) :
1843 panfrost_bifrost_swizzle(nr_channels),
1844 .format = format,
1845 .src_offset = offset
1846 };
1847
1848 return meta;
1849 }
1850
1851 /* General varying that is unused */
1852
1853 static struct mali_attr_meta
1854 pan_emit_vary_only(unsigned present, unsigned quirks)
1855 {
1856 return pan_emit_vary(present, 0, quirks, MALI_VARYING_DISCARD, 0);
1857 }
1858
1859 /* Special records */
1860
1861 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1862 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1863 [PAN_VARY_PSIZ] = MALI_R16F,
1864 [PAN_VARY_PNTCOORD] = MALI_R16F,
1865 [PAN_VARY_FACE] = MALI_R32I,
1866 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1867 };
1868
1869 static struct mali_attr_meta
1870 pan_emit_vary_special(unsigned present, enum pan_special_varying buf,
1871 unsigned quirks)
1872 {
1873 assert(buf < PAN_VARY_MAX);
1874 return pan_emit_vary(present, buf, quirks, pan_varying_formats[buf], 0);
1875 }
1876
1877 static enum mali_format
1878 pan_xfb_format(enum mali_format format, unsigned nr)
1879 {
1880 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1881 return MALI_R32F | MALI_NR_CHANNELS(nr);
1882 else
1883 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1884 }
1885
1886 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1887 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1888 * value. */
1889
1890 static struct mali_attr_meta
1891 pan_emit_vary_xfb(unsigned present,
1892 unsigned max_xfb,
1893 unsigned *streamout_offsets,
1894 unsigned quirks,
1895 enum mali_format format,
1896 struct pipe_stream_output o)
1897 {
1898 /* Otherwise construct a record for it */
1899 struct mali_attr_meta meta = {
1900 /* XFB buffers come after everything else */
1901 .index = pan_xfb_base(present) + o.output_buffer,
1902
1903 /* As usual unknown bit */
1904 .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
1905
1906 /* Override swizzle with number of channels */
1907 .swizzle = quirks & HAS_SWIZZLES ?
1908 panfrost_get_default_swizzle(o.num_components) :
1909 panfrost_bifrost_swizzle(o.num_components),
1910
1911 /* Override number of channels and precision to highp */
1912 .format = pan_xfb_format(format, o.num_components),
1913
1914 /* Apply given offsets together */
1915 .src_offset = (o.dst_offset * 4) /* dwords */
1916 + streamout_offsets[o.output_buffer]
1917 };
1918
1919 return meta;
1920 }
1921
1922 /* Determine if we should capture a varying for XFB. This requires actually
1923 * having a buffer for it. If we don't capture it, we'll fallback to a general
1924 * varying path (linked or unlinked, possibly discarding the write) */
1925
1926 static bool
1927 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1928 unsigned loc, unsigned max_xfb)
1929 {
1930 if (!(xfb->so_mask & (1ll << loc)))
1931 return false;
1932
1933 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1934 return o->output_buffer < max_xfb;
1935 }
1936
1937 /* Higher-level wrapper around all of the above, classifying a varying into one
1938 * of the above types */
1939
1940 static struct mali_attr_meta
1941 panfrost_emit_varying(
1942 struct panfrost_shader_state *stage,
1943 struct panfrost_shader_state *other,
1944 struct panfrost_shader_state *xfb,
1945 unsigned present,
1946 unsigned max_xfb,
1947 unsigned *streamout_offsets,
1948 unsigned quirks,
1949 unsigned *gen_offsets,
1950 enum mali_format *gen_formats,
1951 unsigned *gen_stride,
1952 unsigned idx,
1953 bool should_alloc,
1954 bool is_fragment)
1955 {
1956 gl_varying_slot loc = stage->varyings_loc[idx];
1957 enum mali_format format = stage->varyings[idx];
1958
1959 /* Override format to match linkage */
1960 if (!should_alloc && gen_formats[idx])
1961 format = gen_formats[idx];
1962
1963 if (has_point_coord(stage->point_sprite_mask, loc)) {
1964 return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
1965 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1966 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1967 return pan_emit_vary_xfb(present, max_xfb, streamout_offsets, quirks, format, *o);
1968 } else if (loc == VARYING_SLOT_POS) {
1969 if (is_fragment)
1970 return pan_emit_vary_special(present, PAN_VARY_FRAGCOORD, quirks);
1971 else
1972 return pan_emit_vary_special(present, PAN_VARY_POSITION, quirks);
1973 } else if (loc == VARYING_SLOT_PSIZ) {
1974 return pan_emit_vary_special(present, PAN_VARY_PSIZ, quirks);
1975 } else if (loc == VARYING_SLOT_PNTC) {
1976 return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
1977 } else if (loc == VARYING_SLOT_FACE) {
1978 return pan_emit_vary_special(present, PAN_VARY_FACE, quirks);
1979 }
1980
1981 /* We've exhausted special cases, so it's otherwise a general varying. Check if we're linked */
1982 signed other_idx = -1;
1983
1984 for (unsigned j = 0; j < other->varying_count; ++j) {
1985 if (other->varyings_loc[j] == loc) {
1986 other_idx = j;
1987 break;
1988 }
1989 }
1990
1991 if (other_idx < 0)
1992 return pan_emit_vary_only(present, quirks);
1993
1994 unsigned offset = gen_offsets[other_idx];
1995
1996 if (should_alloc) {
1997 /* We're linked, so allocate a space via a watermark allocation */
1998 enum mali_format alt = other->varyings[other_idx];
1999
2000 /* Do interpolation at minimum precision */
2001 unsigned size_main = pan_varying_size(format);
2002 unsigned size_alt = pan_varying_size(alt);
2003 unsigned size = MIN2(size_main, size_alt);
2004
2005 /* If a varying is marked for XFB but not actually captured, we
2006 * should match the format to the format that would otherwise
2007 * be used for XFB, since dEQP checks for invariance here. It's
2008 * unclear if this is required by the spec. */
2009
2010 if (xfb->so_mask & (1ull << loc)) {
2011 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
2012 format = pan_xfb_format(format, o->num_components);
2013 size = pan_varying_size(format);
2014 } else if (size == size_alt) {
2015 format = alt;
2016 }
2017
2018 gen_offsets[idx] = *gen_stride;
2019 gen_formats[other_idx] = format;
2020 offset = *gen_stride;
2021 *gen_stride += size;
2022 }
2023
2024 return pan_emit_vary(present, PAN_VARY_GENERAL,
2025 quirks, format, offset);
2026 }
2027
2028 static void
2029 pan_emit_special_input(union mali_attr *varyings,
2030 unsigned present,
2031 enum pan_special_varying v,
2032 mali_ptr addr)
2033 {
2034 if (present & (1 << v)) {
2035 /* Ensure we write exactly once for performance and with fields
2036 * zeroed appropriately to avoid flakes */
2037
2038 union mali_attr s = {
2039 .elements = addr
2040 };
2041
2042 varyings[pan_varying_index(present, v)] = s;
2043 }
2044 }
2045
2046 void
2047 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
2048 unsigned vertex_count,
2049 struct mali_vertex_tiler_postfix *vertex_postfix,
2050 struct mali_vertex_tiler_postfix *tiler_postfix,
2051 union midgard_primitive_size *primitive_size)
2052 {
2053 /* Load the shaders */
2054 struct panfrost_context *ctx = batch->ctx;
2055 struct panfrost_device *dev = pan_device(ctx->base.screen);
2056 struct panfrost_shader_state *vs, *fs;
2057 size_t vs_size, fs_size;
2058
2059 /* Allocate the varying descriptor */
2060
2061 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
2062 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
2063 vs_size = sizeof(struct mali_attr_meta) * vs->varying_count;
2064 fs_size = sizeof(struct mali_attr_meta) * fs->varying_count;
2065
2066 struct panfrost_transfer trans = panfrost_pool_alloc(&batch->pool,
2067 vs_size +
2068 fs_size);
2069
2070 struct pipe_stream_output_info *so = &vs->stream_output;
2071 unsigned present = pan_varying_present(vs, fs, dev->quirks);
2072
2073 /* Check if this varying is linked by us. This is the case for
2074 * general-purpose, non-captured varyings. If it is, link it. If it's
2075 * not, use the provided stream out information to determine the
2076 * offset, since it was already linked for us. */
2077
2078 unsigned gen_offsets[32];
2079 enum mali_format gen_formats[32];
2080 memset(gen_offsets, 0, sizeof(gen_offsets));
2081 memset(gen_formats, 0, sizeof(gen_formats));
2082
2083 unsigned gen_stride = 0;
2084 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
2085 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
2086
2087 unsigned streamout_offsets[32];
2088
2089 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
2090 streamout_offsets[i] = panfrost_streamout_offset(
2091 so->stride[i],
2092 ctx->streamout.offsets[i],
2093 ctx->streamout.targets[i]);
2094 }
2095
2096 struct mali_attr_meta *ovs = (struct mali_attr_meta *)trans.cpu;
2097 struct mali_attr_meta *ofs = ovs + vs->varying_count;
2098
2099 for (unsigned i = 0; i < vs->varying_count; i++) {
2100 ovs[i] = panfrost_emit_varying(vs, fs, vs, present,
2101 ctx->streamout.num_targets, streamout_offsets,
2102 dev->quirks,
2103 gen_offsets, gen_formats, &gen_stride, i, true, false);
2104 }
2105
2106 for (unsigned i = 0; i < fs->varying_count; i++) {
2107 ofs[i] = panfrost_emit_varying(fs, vs, vs, present,
2108 ctx->streamout.num_targets, streamout_offsets,
2109 dev->quirks,
2110 gen_offsets, gen_formats, &gen_stride, i, false, true);
2111 }
2112
2113 unsigned xfb_base = pan_xfb_base(present);
2114 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
2115 sizeof(union mali_attr) * (xfb_base + ctx->streamout.num_targets));
2116 union mali_attr *varyings = (union mali_attr *) T.cpu;
2117
2118 /* Emit the stream out buffers */
2119
2120 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
2121 ctx->vertex_count);
2122
2123 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
2124 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
2125 so->stride[i],
2126 ctx->streamout.offsets[i],
2127 out_count,
2128 ctx->streamout.targets[i]);
2129 }
2130
2131 panfrost_emit_varyings(batch,
2132 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
2133 gen_stride, vertex_count);
2134
2135 /* fp32 vec4 gl_Position */
2136 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
2137 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
2138 sizeof(float) * 4, vertex_count);
2139
2140 if (present & (1 << PAN_VARY_PSIZ)) {
2141 primitive_size->pointer = panfrost_emit_varyings(batch,
2142 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
2143 2, vertex_count);
2144 }
2145
2146 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_VARYING_POINT_COORD);
2147 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_VARYING_FRONT_FACING);
2148 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_VARYING_FRAG_COORD);
2149
2150 vertex_postfix->varyings = T.gpu;
2151 tiler_postfix->varyings = T.gpu;
2152
2153 vertex_postfix->varying_meta = trans.gpu;
2154 tiler_postfix->varying_meta = trans.gpu + vs_size;
2155 }
2156
2157 void
2158 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
2159 struct mali_vertex_tiler_prefix *vertex_prefix,
2160 struct mali_vertex_tiler_postfix *vertex_postfix,
2161 struct mali_vertex_tiler_prefix *tiler_prefix,
2162 struct mali_vertex_tiler_postfix *tiler_postfix,
2163 union midgard_primitive_size *primitive_size)
2164 {
2165 struct panfrost_context *ctx = batch->ctx;
2166 struct panfrost_device *device = pan_device(ctx->base.screen);
2167 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
2168 struct bifrost_payload_vertex bifrost_vertex = {0,};
2169 struct bifrost_payload_tiler bifrost_tiler = {0,};
2170 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2171 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2172 void *vp, *tp;
2173 size_t vp_size, tp_size;
2174
2175 if (device->quirks & IS_BIFROST) {
2176 bifrost_vertex.prefix = *vertex_prefix;
2177 bifrost_vertex.postfix = *vertex_postfix;
2178 vp = &bifrost_vertex;
2179 vp_size = sizeof(bifrost_vertex);
2180
2181 bifrost_tiler.prefix = *tiler_prefix;
2182 bifrost_tiler.tiler.primitive_size = *primitive_size;
2183 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2184 bifrost_tiler.postfix = *tiler_postfix;
2185 tp = &bifrost_tiler;
2186 tp_size = sizeof(bifrost_tiler);
2187 } else {
2188 midgard_vertex.prefix = *vertex_prefix;
2189 midgard_vertex.postfix = *vertex_postfix;
2190 vp = &midgard_vertex;
2191 vp_size = sizeof(midgard_vertex);
2192
2193 midgard_tiler.prefix = *tiler_prefix;
2194 midgard_tiler.postfix = *tiler_postfix;
2195 midgard_tiler.primitive_size = *primitive_size;
2196 tp = &midgard_tiler;
2197 tp_size = sizeof(midgard_tiler);
2198 }
2199
2200 if (wallpapering) {
2201 /* Inject in reverse order, with "predicted" job indices.
2202 * THIS IS A HACK XXX */
2203 panfrost_new_job(&batch->pool, &batch->scoreboard, JOB_TYPE_TILER, false,
2204 batch->scoreboard.job_index + 2, tp, tp_size, true);
2205 panfrost_new_job(&batch->pool, &batch->scoreboard, JOB_TYPE_VERTEX, false, 0,
2206 vp, vp_size, true);
2207 return;
2208 }
2209
2210 /* If rasterizer discard is enable, only submit the vertex */
2211
2212 bool rasterizer_discard = ctx->rasterizer &&
2213 ctx->rasterizer->base.rasterizer_discard;
2214
2215 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, JOB_TYPE_VERTEX, false, 0,
2216 vp, vp_size, false);
2217
2218 if (rasterizer_discard)
2219 return;
2220
2221 panfrost_new_job(&batch->pool, &batch->scoreboard, JOB_TYPE_TILER, false, vertex, tp, tp_size,
2222 false);
2223 }
2224
2225 /* TODO: stop hardcoding this */
2226 mali_ptr
2227 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2228 {
2229 uint16_t locations[] = {
2230 128, 128,
2231 0, 256,
2232 0, 256,
2233 0, 256,
2234 0, 256,
2235 0, 256,
2236 0, 256,
2237 0, 256,
2238 0, 256,
2239 0, 256,
2240 0, 256,
2241 0, 256,
2242 0, 256,
2243 0, 256,
2244 0, 256,
2245 0, 256,
2246 0, 256,
2247 0, 256,
2248 0, 256,
2249 0, 256,
2250 0, 256,
2251 0, 256,
2252 0, 256,
2253 0, 256,
2254 0, 256,
2255 0, 256,
2256 0, 256,
2257 0, 256,
2258 0, 256,
2259 0, 256,
2260 0, 256,
2261 0, 256,
2262 128, 128,
2263 0, 0,
2264 0, 0,
2265 0, 0,
2266 0, 0,
2267 0, 0,
2268 0, 0,
2269 0, 0,
2270 0, 0,
2271 0, 0,
2272 0, 0,
2273 0, 0,
2274 0, 0,
2275 0, 0,
2276 0, 0,
2277 0, 0,
2278 };
2279
2280 return panfrost_pool_upload(&batch->pool, locations, 96 * sizeof(uint16_t));
2281 }