panfrost: XMLify stencil test
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 unsigned shift = panfrost_get_stack_shift(batch->stack_size);
62 struct mali_shared_memory shared = {
63 .stack_shift = shift,
64 .scratchpad = panfrost_batch_get_scratchpad(batch, shift, dev->thread_tls_alloc, dev->core_count)->gpu,
65 .shared_workgroup_count = ~0,
66 };
67 postfix->shared_memory = panfrost_pool_upload(&batch->pool, &shared, sizeof(shared));
68 }
69
70 static void
71 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
72 struct mali_vertex_tiler_postfix *postfix)
73 {
74 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
75 postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
76 }
77
78 static void
79 panfrost_vt_update_rasterizer(struct panfrost_context *ctx,
80 struct mali_vertex_tiler_prefix *prefix,
81 struct mali_vertex_tiler_postfix *postfix)
82 {
83 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
84
85 postfix->gl_enables |= 0x7;
86 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
87 rasterizer && rasterizer->base.front_ccw);
88 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
89 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_FRONT));
90 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
91 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_BACK));
92 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
93 rasterizer && rasterizer->base.flatshade_first);
94 }
95
96 void
97 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
98 struct mali_vertex_tiler_prefix *prefix,
99 union midgard_primitive_size *primitive_size)
100 {
101 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
102
103 if (!panfrost_writes_point_size(ctx)) {
104 bool points = prefix->draw_mode == MALI_DRAW_MODE_POINTS;
105 float val = 0.0f;
106
107 if (rasterizer)
108 val = points ?
109 rasterizer->base.point_size :
110 rasterizer->base.line_width;
111
112 primitive_size->constant = val;
113 }
114 }
115
116 static void
117 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
118 struct mali_vertex_tiler_postfix *postfix)
119 {
120 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
121 if (ctx->occlusion_query) {
122 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
123 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
124 PAN_BO_ACCESS_SHARED |
125 PAN_BO_ACCESS_RW |
126 PAN_BO_ACCESS_FRAGMENT);
127 } else {
128 postfix->occlusion_counter = 0;
129 }
130 }
131
132 void
133 panfrost_vt_init(struct panfrost_context *ctx,
134 enum pipe_shader_type stage,
135 struct mali_vertex_tiler_prefix *prefix,
136 struct mali_vertex_tiler_postfix *postfix)
137 {
138 struct panfrost_device *device = pan_device(ctx->base.screen);
139
140 if (!ctx->shader[stage])
141 return;
142
143 memset(prefix, 0, sizeof(*prefix));
144 memset(postfix, 0, sizeof(*postfix));
145
146 if (device->quirks & IS_BIFROST) {
147 postfix->gl_enables = 0x2;
148 panfrost_vt_emit_shared_memory(ctx, postfix);
149 } else {
150 postfix->gl_enables = 0x6;
151 panfrost_vt_attach_framebuffer(ctx, postfix);
152 }
153
154 if (stage == PIPE_SHADER_FRAGMENT) {
155 panfrost_vt_update_occlusion_query(ctx, postfix);
156 panfrost_vt_update_rasterizer(ctx, prefix, postfix);
157 }
158 }
159
160 static unsigned
161 panfrost_translate_index_size(unsigned size)
162 {
163 switch (size) {
164 case 1:
165 return MALI_DRAW_INDEXED_UINT8;
166
167 case 2:
168 return MALI_DRAW_INDEXED_UINT16;
169
170 case 4:
171 return MALI_DRAW_INDEXED_UINT32;
172
173 default:
174 unreachable("Invalid index size");
175 }
176 }
177
178 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
179 * good for the duration of the draw (transient), could last longer. Also get
180 * the bounds on the index buffer for the range accessed by the draw. We do
181 * these operations together because there are natural optimizations which
182 * require them to be together. */
183
184 static mali_ptr
185 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
186 const struct pipe_draw_info *info,
187 unsigned *min_index, unsigned *max_index)
188 {
189 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
190 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
191 off_t offset = info->start * info->index_size;
192 bool needs_indices = true;
193 mali_ptr out = 0;
194
195 if (info->max_index != ~0u) {
196 *min_index = info->min_index;
197 *max_index = info->max_index;
198 needs_indices = false;
199 }
200
201 if (!info->has_user_indices) {
202 /* Only resources can be directly mapped */
203 panfrost_batch_add_bo(batch, rsrc->bo,
204 PAN_BO_ACCESS_SHARED |
205 PAN_BO_ACCESS_READ |
206 PAN_BO_ACCESS_VERTEX_TILER);
207 out = rsrc->bo->gpu + offset;
208
209 /* Check the cache */
210 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
211 info->start,
212 info->count,
213 min_index,
214 max_index);
215 } else {
216 /* Otherwise, we need to upload to transient memory */
217 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
218 out = panfrost_pool_upload(&batch->pool, ibuf8 + offset,
219 info->count *
220 info->index_size);
221 }
222
223 if (needs_indices) {
224 /* Fallback */
225 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
226
227 if (!info->has_user_indices)
228 panfrost_minmax_cache_add(rsrc->index_cache,
229 info->start, info->count,
230 *min_index, *max_index);
231 }
232
233 return out;
234 }
235
236 void
237 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
238 const struct pipe_draw_info *info,
239 enum mali_draw_mode draw_mode,
240 struct mali_vertex_tiler_postfix *vertex_postfix,
241 struct mali_vertex_tiler_prefix *tiler_prefix,
242 struct mali_vertex_tiler_postfix *tiler_postfix,
243 unsigned *vertex_count,
244 unsigned *padded_count)
245 {
246 tiler_prefix->draw_mode = draw_mode;
247
248 unsigned draw_flags = 0;
249
250 if (panfrost_writes_point_size(ctx))
251 draw_flags |= MALI_DRAW_VARYING_SIZE;
252
253 if (info->primitive_restart)
254 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
255
256 /* These doesn't make much sense */
257
258 draw_flags |= 0x3000;
259
260 if (info->index_size) {
261 unsigned min_index = 0, max_index = 0;
262
263 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
264 info,
265 &min_index,
266 &max_index);
267
268 /* Use the corresponding values */
269 *vertex_count = max_index - min_index + 1;
270 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
271 tiler_prefix->offset_bias_correction = -min_index;
272 tiler_prefix->index_count = MALI_POSITIVE(info->count);
273 draw_flags |= panfrost_translate_index_size(info->index_size);
274 } else {
275 tiler_prefix->indices = 0;
276 *vertex_count = ctx->vertex_count;
277 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
278 tiler_prefix->offset_bias_correction = 0;
279 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
280 }
281
282 tiler_prefix->unknown_draw = draw_flags;
283
284 /* Encode the padded vertex count */
285
286 if (info->instance_count > 1) {
287 *padded_count = panfrost_padded_vertex_count(*vertex_count);
288
289 unsigned shift = __builtin_ctz(ctx->padded_count);
290 unsigned k = ctx->padded_count >> (shift + 1);
291
292 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
293 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
294 } else {
295 *padded_count = *vertex_count;
296
297 /* Reset instancing state */
298 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
299 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
300 }
301 }
302
303 static void
304 panfrost_shader_meta_init(struct panfrost_context *ctx,
305 enum pipe_shader_type st,
306 struct mali_shader_meta *meta)
307 {
308 const struct panfrost_device *dev = pan_device(ctx->base.screen);
309 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
310
311 memset(meta, 0, sizeof(*meta));
312 meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
313 meta->attribute_count = ss->attribute_count;
314 meta->varying_count = ss->varying_count;
315 meta->texture_count = ctx->sampler_view_count[st];
316 meta->sampler_count = ctx->sampler_count[st];
317
318 if (dev->quirks & IS_BIFROST) {
319 if (st == PIPE_SHADER_VERTEX)
320 meta->bifrost1.unk1 = 0x800000;
321 else {
322 /* First clause ATEST |= 0x4000000.
323 * Less than 32 regs |= 0x200 */
324 meta->bifrost1.unk1 = 0x950020;
325 }
326
327 meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
328 if (st == PIPE_SHADER_VERTEX)
329 meta->bifrost2.preload_regs = 0xC0;
330 else {
331 meta->bifrost2.preload_regs = 0x1;
332 SET_BIT(meta->bifrost2.preload_regs, 0x10, ss->reads_frag_coord);
333 }
334
335 meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
336 ss->uniform_cutoff);
337 } else {
338 meta->midgard1.uniform_count = MIN2(ss->uniform_count,
339 ss->uniform_cutoff);
340 meta->midgard1.work_count = ss->work_reg_count;
341
342 /* TODO: This is not conformant on ES3 */
343 meta->midgard1.flags_hi = MALI_SUPPRESS_INF_NAN;
344
345 meta->midgard1.flags_lo = 0x20;
346 meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
347
348 SET_BIT(meta->midgard1.flags_hi, MALI_WRITES_GLOBAL, ss->writes_global);
349 }
350 }
351
352 static unsigned
353 panfrost_translate_compare_func(enum pipe_compare_func in)
354 {
355 switch (in) {
356 case PIPE_FUNC_NEVER:
357 return MALI_FUNC_NEVER;
358
359 case PIPE_FUNC_LESS:
360 return MALI_FUNC_LESS;
361
362 case PIPE_FUNC_EQUAL:
363 return MALI_FUNC_EQUAL;
364
365 case PIPE_FUNC_LEQUAL:
366 return MALI_FUNC_LEQUAL;
367
368 case PIPE_FUNC_GREATER:
369 return MALI_FUNC_GREATER;
370
371 case PIPE_FUNC_NOTEQUAL:
372 return MALI_FUNC_NOT_EQUAL;
373
374 case PIPE_FUNC_GEQUAL:
375 return MALI_FUNC_GEQUAL;
376
377 case PIPE_FUNC_ALWAYS:
378 return MALI_FUNC_ALWAYS;
379
380 default:
381 unreachable("Invalid func");
382 }
383 }
384
385 static unsigned
386 panfrost_translate_stencil_op(enum pipe_stencil_op in)
387 {
388 switch (in) {
389 case PIPE_STENCIL_OP_KEEP:
390 return MALI_STENCIL_OP_KEEP;
391
392 case PIPE_STENCIL_OP_ZERO:
393 return MALI_STENCIL_OP_ZERO;
394
395 case PIPE_STENCIL_OP_REPLACE:
396 return MALI_STENCIL_OP_REPLACE;
397
398 case PIPE_STENCIL_OP_INCR:
399 return MALI_STENCIL_OP_INCR_SAT;
400
401 case PIPE_STENCIL_OP_DECR:
402 return MALI_STENCIL_OP_DECR_SAT;
403
404 case PIPE_STENCIL_OP_INCR_WRAP:
405 return MALI_STENCIL_OP_INCR_WRAP;
406
407 case PIPE_STENCIL_OP_DECR_WRAP:
408 return MALI_STENCIL_OP_DECR_WRAP;
409
410 case PIPE_STENCIL_OP_INVERT:
411 return MALI_STENCIL_OP_INVERT;
412
413 default:
414 unreachable("Invalid stencil op");
415 }
416 }
417
418 static unsigned
419 translate_tex_wrap(enum pipe_tex_wrap w)
420 {
421 switch (w) {
422 case PIPE_TEX_WRAP_REPEAT:
423 return MALI_WRAP_MODE_REPEAT;
424
425 case PIPE_TEX_WRAP_CLAMP:
426 return MALI_WRAP_MODE_CLAMP;
427
428 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
429 return MALI_WRAP_MODE_CLAMP_TO_EDGE;
430
431 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
432 return MALI_WRAP_MODE_CLAMP_TO_BORDER;
433
434 case PIPE_TEX_WRAP_MIRROR_REPEAT:
435 return MALI_WRAP_MODE_MIRRORED_REPEAT;
436
437 case PIPE_TEX_WRAP_MIRROR_CLAMP:
438 return MALI_WRAP_MODE_MIRRORED_CLAMP;
439
440 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
441 return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
442
443 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
444 return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
445
446 default:
447 unreachable("Invalid wrap");
448 }
449 }
450
451 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
452 struct mali_sampler_descriptor *hw)
453 {
454 unsigned func = panfrost_translate_compare_func(cso->compare_func);
455 bool min_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
456 bool mag_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
457 bool mip_linear = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR;
458 unsigned min_filter = min_nearest ? MALI_SAMP_MIN_NEAREST : 0;
459 unsigned mag_filter = mag_nearest ? MALI_SAMP_MAG_NEAREST : 0;
460 unsigned mip_filter = mip_linear ?
461 (MALI_SAMP_MIP_LINEAR_1 | MALI_SAMP_MIP_LINEAR_2) : 0;
462 unsigned normalized = cso->normalized_coords ? MALI_SAMP_NORM_COORDS : 0;
463
464 *hw = (struct mali_sampler_descriptor) {
465 .filter_mode = min_filter | mag_filter | mip_filter |
466 normalized,
467 .wrap_s = translate_tex_wrap(cso->wrap_s),
468 .wrap_t = translate_tex_wrap(cso->wrap_t),
469 .wrap_r = translate_tex_wrap(cso->wrap_r),
470 .compare_func = cso->compare_mode ?
471 panfrost_flip_compare_func(func) :
472 MALI_FUNC_NEVER,
473 .border_color = {
474 cso->border_color.f[0],
475 cso->border_color.f[1],
476 cso->border_color.f[2],
477 cso->border_color.f[3]
478 },
479 .min_lod = FIXED_16(cso->min_lod, false), /* clamp at 0 */
480 .max_lod = FIXED_16(cso->max_lod, false),
481 .lod_bias = FIXED_16(cso->lod_bias, true), /* can be negative */
482 .seamless_cube_map = cso->seamless_cube_map,
483 };
484
485 /* If necessary, we disable mipmapping in the sampler descriptor by
486 * clamping the LOD as tight as possible (from 0 to epsilon,
487 * essentially -- remember these are fixed point numbers, so
488 * epsilon=1/256) */
489
490 if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
491 hw->max_lod = hw->min_lod + 1;
492 }
493
494 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
495 struct bifrost_sampler_descriptor *hw)
496 {
497 *hw = (struct bifrost_sampler_descriptor) {
498 .unk1 = 0x1,
499 .wrap_s = translate_tex_wrap(cso->wrap_s),
500 .wrap_t = translate_tex_wrap(cso->wrap_t),
501 .wrap_r = translate_tex_wrap(cso->wrap_r),
502 .unk8 = 0x8,
503 .min_filter = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST,
504 .norm_coords = cso->normalized_coords,
505 .mip_filter = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR,
506 .mag_filter = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR,
507 .min_lod = FIXED_16(cso->min_lod, false), /* clamp at 0 */
508 .max_lod = FIXED_16(cso->max_lod, false),
509 };
510
511 /* If necessary, we disable mipmapping in the sampler descriptor by
512 * clamping the LOD as tight as possible (from 0 to epsilon,
513 * essentially -- remember these are fixed point numbers, so
514 * epsilon=1/256) */
515
516 if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
517 hw->max_lod = hw->min_lod + 1;
518 }
519
520 static void
521 panfrost_make_stencil_state(const struct pipe_stencil_state *in,
522 void *out)
523 {
524 pan_pack(out, STENCIL, cfg) {
525 cfg.mask = in->valuemask;
526 cfg.compare_function = panfrost_translate_compare_func(in->func);
527 cfg.stencil_fail = panfrost_translate_stencil_op(in->fail_op);
528 cfg.depth_fail = panfrost_translate_stencil_op(in->zfail_op);
529 cfg.depth_pass = panfrost_translate_stencil_op(in->zpass_op);
530 }
531 }
532
533 static void
534 panfrost_frag_meta_rasterizer_update(struct panfrost_context *ctx,
535 struct mali_shader_meta *fragmeta)
536 {
537 if (!ctx->rasterizer) {
538 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, true);
539 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, false);
540 fragmeta->depth_units = 0.0f;
541 fragmeta->depth_factor = 0.0f;
542 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, false);
543 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, false);
544 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, true);
545 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, true);
546 return;
547 }
548
549 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
550
551 bool msaa = rast->multisample;
552
553 /* TODO: Sample size */
554 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
555 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
556
557 struct panfrost_shader_state *fs;
558 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
559
560 /* EXT_shader_framebuffer_fetch requires the shader to be run
561 * per-sample when outputs are read. */
562 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
563 SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
564
565 fragmeta->depth_units = rast->offset_units * 2.0f;
566 fragmeta->depth_factor = rast->offset_scale;
567
568 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
569
570 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
571 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
572
573 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
574 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
575 }
576
577 static void
578 panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
579 struct mali_shader_meta *fragmeta)
580 {
581 const struct pipe_depth_stencil_alpha_state *zsa = ctx->depth_stencil;
582 int zfunc = PIPE_FUNC_ALWAYS;
583
584 if (!zsa) {
585 struct pipe_stencil_state default_stencil = {
586 .enabled = 0,
587 .func = PIPE_FUNC_ALWAYS,
588 .fail_op = PIPE_STENCIL_OP_KEEP,
589 .zfail_op = PIPE_STENCIL_OP_KEEP,
590 .zpass_op = PIPE_STENCIL_OP_KEEP,
591 .writemask = 0xFF,
592 .valuemask = 0xFF
593 };
594
595 panfrost_make_stencil_state(&default_stencil,
596 &fragmeta->stencil_front);
597 fragmeta->stencil_mask_front = default_stencil.writemask;
598 fragmeta->stencil_back = fragmeta->stencil_front;
599 fragmeta->stencil_mask_back = default_stencil.writemask;
600 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST, false);
601 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK, false);
602 } else {
603 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
604 zsa->stencil[0].enabled);
605 panfrost_make_stencil_state(&zsa->stencil[0],
606 &fragmeta->stencil_front);
607 fragmeta->stencil_mask_front = zsa->stencil[0].writemask;
608
609 /* Bottom 8-bits of stencil state is the stencil ref, ref is no
610 * more than 8-bits. Be extra careful. */
611 fragmeta->stencil_front.opaque[0] |= ctx->stencil_ref.ref_value[0];
612
613 /* If back-stencil is not enabled, use the front values */
614
615 if (zsa->stencil[1].enabled) {
616 panfrost_make_stencil_state(&zsa->stencil[1],
617 &fragmeta->stencil_back);
618 fragmeta->stencil_mask_back = zsa->stencil[1].writemask;
619 fragmeta->stencil_back.opaque[0] |= ctx->stencil_ref.ref_value[1];
620 } else {
621 fragmeta->stencil_back = fragmeta->stencil_front;
622 fragmeta->stencil_mask_back = fragmeta->stencil_mask_front;
623 }
624
625 if (zsa->depth.enabled)
626 zfunc = zsa->depth.func;
627
628 /* Depth state (TODO: Refactor) */
629
630 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
631 zsa->depth.writemask);
632 }
633
634 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
635 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(zfunc));
636 }
637
638 static bool
639 panfrost_fs_required(
640 struct panfrost_shader_state *fs,
641 struct panfrost_blend_final *blend,
642 unsigned rt_count)
643 {
644 /* If we generally have side effects */
645 if (fs->fs_sidefx)
646 return true;
647
648 /* If colour is written we need to execute */
649 for (unsigned i = 0; i < rt_count; ++i) {
650 if (!blend[i].no_colour)
651 return true;
652 }
653
654 /* If depth is written and not implied we need to execute.
655 * TODO: Predicate on Z/S writes being enabled */
656 return (fs->writes_depth || fs->writes_stencil);
657 }
658
659 static void
660 panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
661 struct mali_shader_meta *fragmeta,
662 void *rts)
663 {
664 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
665 const struct panfrost_device *dev = pan_device(ctx->base.screen);
666 struct panfrost_shader_state *fs;
667 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
668
669 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
670 (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
671 !ctx->blend->base.dither);
672
673 SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
674 ctx->blend->base.alpha_to_coverage);
675
676 /* Get blending setup */
677 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
678
679 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
680 unsigned shader_offset = 0;
681 struct panfrost_bo *shader_bo = NULL;
682
683 for (unsigned c = 0; c < rt_count; ++c)
684 blend[c] = panfrost_get_blend_for_context(ctx, c, &shader_bo,
685 &shader_offset);
686
687 /* Disable shader execution if we can */
688 if (dev->quirks & MIDGARD_SHADERLESS
689 && !panfrost_fs_required(fs, blend, rt_count)) {
690 fragmeta->shader = 0;
691 fragmeta->attribute_count = 0;
692 fragmeta->varying_count = 0;
693 fragmeta->texture_count = 0;
694 fragmeta->sampler_count = 0;
695
696 /* This feature is not known to work on Bifrost */
697 fragmeta->midgard1.work_count = 1;
698 fragmeta->midgard1.uniform_count = 0;
699 fragmeta->midgard1.uniform_buffer_count = 0;
700 }
701
702 /* If there is a blend shader, work registers are shared. We impose 8
703 * work registers as a limit for blend shaders. Should be lower XXX */
704
705 if (!(dev->quirks & IS_BIFROST)) {
706 for (unsigned c = 0; c < rt_count; ++c) {
707 if (blend[c].is_shader) {
708 fragmeta->midgard1.work_count =
709 MAX2(fragmeta->midgard1.work_count, 8);
710 }
711 }
712 }
713
714 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
715 * copied to the blend_meta appended (by convention), but this is the
716 * field actually read by the hardware. (Or maybe both are read...?).
717 * Specify the last RTi with a blend shader. */
718
719 fragmeta->blend.shader = 0;
720
721 for (signed rt = (rt_count - 1); rt >= 0; --rt) {
722 if (!blend[rt].is_shader)
723 continue;
724
725 fragmeta->blend.shader = blend[rt].shader.gpu |
726 blend[rt].shader.first_tag;
727 break;
728 }
729
730 if (dev->quirks & MIDGARD_SFBD) {
731 /* When only a single render target platform is used, the blend
732 * information is inside the shader meta itself. We additionally
733 * need to signal CAN_DISCARD for nontrivial blend modes (so
734 * we're able to read back the destination buffer) */
735
736 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
737 blend[0].is_shader);
738
739 if (!blend[0].is_shader) {
740 fragmeta->blend.equation = *blend[0].equation.equation;
741 fragmeta->blend.constant = blend[0].equation.constant;
742 }
743
744 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
745 !blend[0].no_blending || fs->can_discard);
746
747 batch->draws |= PIPE_CLEAR_COLOR0;
748 return;
749 }
750
751 if (dev->quirks & IS_BIFROST) {
752 bool no_blend = true;
753
754 for (unsigned i = 0; i < rt_count; ++i)
755 no_blend &= (blend[i].no_blending | blend[i].no_colour);
756
757 SET_BIT(fragmeta->bifrost1.unk1, MALI_BIFROST_EARLY_Z,
758 !fs->can_discard && !fs->writes_depth && no_blend);
759 }
760
761 /* Additional blend descriptor tacked on for jobs using MFBD */
762
763 for (unsigned i = 0; i < rt_count; ++i) {
764 unsigned flags = 0;
765
766 if (ctx->pipe_framebuffer.nr_cbufs > i && !blend[i].no_colour) {
767 flags = 0x200;
768 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
769
770 bool is_srgb = (ctx->pipe_framebuffer.nr_cbufs > i) &&
771 (ctx->pipe_framebuffer.cbufs[i]) &&
772 util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
773
774 SET_BIT(flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
775 SET_BIT(flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
776 SET_BIT(flags, MALI_BLEND_SRGB, is_srgb);
777 SET_BIT(flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
778 }
779
780 if (dev->quirks & IS_BIFROST) {
781 struct bifrost_blend_rt *brts = rts;
782
783 brts[i].flags = flags;
784
785 if (blend[i].is_shader) {
786 /* The blend shader's address needs to be at
787 * the same top 32 bit as the fragment shader.
788 * TODO: Ensure that's always the case.
789 */
790 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
791 (fs->bo->gpu & (0xffffffffull << 32)));
792 brts[i].shader = blend[i].shader.gpu;
793 brts[i].unk2 = 0x0;
794 } else if (ctx->pipe_framebuffer.nr_cbufs > i) {
795 enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
796 const struct util_format_description *format_desc;
797 format_desc = util_format_description(format);
798
799 brts[i].equation = *blend[i].equation.equation;
800
801 /* TODO: this is a bit more complicated */
802 brts[i].constant = blend[i].equation.constant;
803
804 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
805
806 /* 0x19 disables blending and forces REPLACE
807 * mode (equivalent to rgb_mode = alpha_mode =
808 * x122, colour mask = 0xF). 0x1a allows
809 * blending. */
810 brts[i].unk2 = blend[i].no_blending ? 0x19 : 0x1a;
811
812 brts[i].shader_type = fs->blend_types[i];
813 } else {
814 /* Dummy attachment for depth-only */
815 brts[i].unk2 = 0x3;
816 brts[i].shader_type = fs->blend_types[i];
817 }
818 } else {
819 struct midgard_blend_rt *mrts = rts;
820 mrts[i].flags = flags;
821
822 if (blend[i].is_shader) {
823 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
824 } else {
825 mrts[i].blend.equation = *blend[i].equation.equation;
826 mrts[i].blend.constant = blend[i].equation.constant;
827 }
828 }
829 }
830 }
831
832 static void
833 panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
834 struct mali_shader_meta *fragmeta,
835 void *rts)
836 {
837 const struct panfrost_device *dev = pan_device(ctx->base.screen);
838 struct panfrost_shader_state *fs;
839
840 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
841
842 bool msaa = ctx->rasterizer && ctx->rasterizer->base.multisample;
843 fragmeta->coverage_mask = (msaa ? ctx->sample_mask : ~0) & 0xF;
844
845 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
846 fragmeta->unknown2_4 = 0x4e0;
847
848 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
849 * is required (independent of 32-bit/64-bit descriptors), or why it's
850 * not used on later GPU revisions. Otherwise, all shader jobs fault on
851 * these earlier chips (perhaps this is a chicken bit of some kind).
852 * More investigation is needed. */
853
854 SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
855
856 if (dev->quirks & IS_BIFROST) {
857 /* TODO */
858 } else {
859 /* Depending on whether it's legal to in the given shader, we try to
860 * enable early-z testing. TODO: respect e-z force */
861
862 SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
863 !fs->can_discard && !fs->writes_global &&
864 !fs->writes_depth && !fs->writes_stencil &&
865 !ctx->blend->base.alpha_to_coverage);
866
867 /* Add the writes Z/S flags if needed. */
868 SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
869 SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
870
871 /* Any time texturing is used, derivatives are implicitly calculated,
872 * so we need to enable helper invocations */
873
874 SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
875 fs->helper_invocations);
876
877 /* If discard is enabled, which bit we set to convey this
878 * depends on if depth/stencil is used for the draw or not.
879 * Just one of depth OR stencil is enough to trigger this. */
880
881 const struct pipe_depth_stencil_alpha_state *zsa = ctx->depth_stencil;
882 bool zs_enabled = fs->writes_depth || fs->writes_stencil;
883
884 if (zsa) {
885 zs_enabled |= (zsa->depth.enabled && zsa->depth.func != PIPE_FUNC_ALWAYS);
886 zs_enabled |= zsa->stencil[0].enabled;
887 }
888
889 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_TILEBUFFER,
890 fs->outputs_read || (!zs_enabled && fs->can_discard));
891 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_ZS, zs_enabled && fs->can_discard);
892 }
893
894 panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
895 panfrost_frag_meta_zsa_update(ctx, fragmeta);
896 panfrost_frag_meta_blend_update(ctx, fragmeta, rts);
897 }
898
899 void
900 panfrost_emit_shader_meta(struct panfrost_batch *batch,
901 enum pipe_shader_type st,
902 struct mali_vertex_tiler_postfix *postfix)
903 {
904 struct panfrost_context *ctx = batch->ctx;
905 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
906
907 if (!ss) {
908 postfix->shader = 0;
909 return;
910 }
911
912 struct mali_shader_meta meta;
913
914 panfrost_shader_meta_init(ctx, st, &meta);
915
916 /* Add the shader BO to the batch. */
917 panfrost_batch_add_bo(batch, ss->bo,
918 PAN_BO_ACCESS_PRIVATE |
919 PAN_BO_ACCESS_READ |
920 panfrost_bo_access_for_stage(st));
921
922 mali_ptr shader_ptr;
923
924 if (st == PIPE_SHADER_FRAGMENT) {
925 struct panfrost_device *dev = pan_device(ctx->base.screen);
926 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
927 size_t desc_size = sizeof(meta);
928 void *rts = NULL;
929 struct panfrost_transfer xfer;
930 unsigned rt_size;
931
932 if (dev->quirks & MIDGARD_SFBD)
933 rt_size = 0;
934 else if (dev->quirks & IS_BIFROST)
935 rt_size = sizeof(struct bifrost_blend_rt);
936 else
937 rt_size = sizeof(struct midgard_blend_rt);
938
939 desc_size += rt_size * rt_count;
940
941 if (rt_size)
942 rts = rzalloc_size(ctx, rt_size * rt_count);
943
944 panfrost_frag_shader_meta_init(ctx, &meta, rts);
945
946 xfer = panfrost_pool_alloc(&batch->pool, desc_size);
947
948 memcpy(xfer.cpu, &meta, sizeof(meta));
949 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
950
951 if (rt_size)
952 ralloc_free(rts);
953
954 shader_ptr = xfer.gpu;
955 } else {
956 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
957 sizeof(meta));
958 }
959
960 postfix->shader = shader_ptr;
961 }
962
963 void
964 panfrost_emit_viewport(struct panfrost_batch *batch,
965 struct mali_vertex_tiler_postfix *tiler_postfix)
966 {
967 struct panfrost_context *ctx = batch->ctx;
968 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
969 const struct pipe_scissor_state *ss = &ctx->scissor;
970 const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
971 const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
972
973 /* Derive min/max from translate/scale. Note since |x| >= 0 by
974 * definition, we have that -|x| <= |x| hence translate - |scale| <=
975 * translate + |scale|, so the ordering is correct here. */
976 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
977 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
978 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
979 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
980 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
981 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
982
983 /* Scissor to the intersection of viewport and to the scissor, clamped
984 * to the framebuffer */
985
986 unsigned minx = MIN2(fb->width, vp_minx);
987 unsigned maxx = MIN2(fb->width, vp_maxx);
988 unsigned miny = MIN2(fb->height, vp_miny);
989 unsigned maxy = MIN2(fb->height, vp_maxy);
990
991 if (ss && rast && rast->scissor) {
992 minx = MAX2(ss->minx, minx);
993 miny = MAX2(ss->miny, miny);
994 maxx = MIN2(ss->maxx, maxx);
995 maxy = MIN2(ss->maxy, maxy);
996 }
997
998 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
999
1000 pan_pack(T.cpu, VIEWPORT, cfg) {
1001 cfg.scissor_minimum_x = minx;
1002 cfg.scissor_minimum_y = miny;
1003 cfg.scissor_maximum_x = maxx - 1;
1004 cfg.scissor_maximum_y = maxy - 1;
1005
1006 cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
1007 cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
1008 }
1009
1010 tiler_postfix->viewport = T.gpu;
1011 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
1012 }
1013
1014 static mali_ptr
1015 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
1016 enum pipe_shader_type st,
1017 struct panfrost_constant_buffer *buf,
1018 unsigned index)
1019 {
1020 struct pipe_constant_buffer *cb = &buf->cb[index];
1021 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1022
1023 if (rsrc) {
1024 panfrost_batch_add_bo(batch, rsrc->bo,
1025 PAN_BO_ACCESS_SHARED |
1026 PAN_BO_ACCESS_READ |
1027 panfrost_bo_access_for_stage(st));
1028
1029 /* Alignment gauranteed by
1030 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
1031 return rsrc->bo->gpu + cb->buffer_offset;
1032 } else if (cb->user_buffer) {
1033 return panfrost_pool_upload(&batch->pool,
1034 cb->user_buffer +
1035 cb->buffer_offset,
1036 cb->buffer_size);
1037 } else {
1038 unreachable("No constant buffer");
1039 }
1040 }
1041
1042 struct sysval_uniform {
1043 union {
1044 float f[4];
1045 int32_t i[4];
1046 uint32_t u[4];
1047 uint64_t du[2];
1048 };
1049 };
1050
1051 static void
1052 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
1053 struct sysval_uniform *uniform)
1054 {
1055 struct panfrost_context *ctx = batch->ctx;
1056 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
1057
1058 uniform->f[0] = vp->scale[0];
1059 uniform->f[1] = vp->scale[1];
1060 uniform->f[2] = vp->scale[2];
1061 }
1062
1063 static void
1064 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
1065 struct sysval_uniform *uniform)
1066 {
1067 struct panfrost_context *ctx = batch->ctx;
1068 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
1069
1070 uniform->f[0] = vp->translate[0];
1071 uniform->f[1] = vp->translate[1];
1072 uniform->f[2] = vp->translate[2];
1073 }
1074
1075 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
1076 enum pipe_shader_type st,
1077 unsigned int sysvalid,
1078 struct sysval_uniform *uniform)
1079 {
1080 struct panfrost_context *ctx = batch->ctx;
1081 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
1082 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
1083 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
1084 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
1085
1086 assert(dim);
1087 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
1088
1089 if (dim > 1)
1090 uniform->i[1] = u_minify(tex->texture->height0,
1091 tex->u.tex.first_level);
1092
1093 if (dim > 2)
1094 uniform->i[2] = u_minify(tex->texture->depth0,
1095 tex->u.tex.first_level);
1096
1097 if (is_array)
1098 uniform->i[dim] = tex->texture->array_size;
1099 }
1100
1101 static void
1102 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
1103 enum pipe_shader_type st,
1104 unsigned ssbo_id,
1105 struct sysval_uniform *uniform)
1106 {
1107 struct panfrost_context *ctx = batch->ctx;
1108
1109 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
1110 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
1111
1112 /* Compute address */
1113 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
1114
1115 panfrost_batch_add_bo(batch, bo,
1116 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
1117 panfrost_bo_access_for_stage(st));
1118
1119 /* Upload address and size as sysval */
1120 uniform->du[0] = bo->gpu + sb.buffer_offset;
1121 uniform->u[2] = sb.buffer_size;
1122 }
1123
1124 static void
1125 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
1126 enum pipe_shader_type st,
1127 unsigned samp_idx,
1128 struct sysval_uniform *uniform)
1129 {
1130 struct panfrost_context *ctx = batch->ctx;
1131 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
1132
1133 uniform->f[0] = sampl->min_lod;
1134 uniform->f[1] = sampl->max_lod;
1135 uniform->f[2] = sampl->lod_bias;
1136
1137 /* Even without any errata, Midgard represents "no mipmapping" as
1138 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1139 * panfrost_create_sampler_state which also explains our choice of
1140 * epsilon value (again to keep behaviour consistent) */
1141
1142 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1143 uniform->f[1] = uniform->f[0] + (1.0/256.0);
1144 }
1145
1146 static void
1147 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
1148 struct sysval_uniform *uniform)
1149 {
1150 struct panfrost_context *ctx = batch->ctx;
1151
1152 uniform->u[0] = ctx->compute_grid->grid[0];
1153 uniform->u[1] = ctx->compute_grid->grid[1];
1154 uniform->u[2] = ctx->compute_grid->grid[2];
1155 }
1156
1157 static void
1158 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1159 struct panfrost_shader_state *ss,
1160 enum pipe_shader_type st)
1161 {
1162 struct sysval_uniform *uniforms = (void *)buf;
1163
1164 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1165 int sysval = ss->sysval[i];
1166
1167 switch (PAN_SYSVAL_TYPE(sysval)) {
1168 case PAN_SYSVAL_VIEWPORT_SCALE:
1169 panfrost_upload_viewport_scale_sysval(batch,
1170 &uniforms[i]);
1171 break;
1172 case PAN_SYSVAL_VIEWPORT_OFFSET:
1173 panfrost_upload_viewport_offset_sysval(batch,
1174 &uniforms[i]);
1175 break;
1176 case PAN_SYSVAL_TEXTURE_SIZE:
1177 panfrost_upload_txs_sysval(batch, st,
1178 PAN_SYSVAL_ID(sysval),
1179 &uniforms[i]);
1180 break;
1181 case PAN_SYSVAL_SSBO:
1182 panfrost_upload_ssbo_sysval(batch, st,
1183 PAN_SYSVAL_ID(sysval),
1184 &uniforms[i]);
1185 break;
1186 case PAN_SYSVAL_NUM_WORK_GROUPS:
1187 panfrost_upload_num_work_groups_sysval(batch,
1188 &uniforms[i]);
1189 break;
1190 case PAN_SYSVAL_SAMPLER:
1191 panfrost_upload_sampler_sysval(batch, st,
1192 PAN_SYSVAL_ID(sysval),
1193 &uniforms[i]);
1194 break;
1195 default:
1196 assert(0);
1197 }
1198 }
1199 }
1200
1201 static const void *
1202 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1203 unsigned index)
1204 {
1205 struct pipe_constant_buffer *cb = &buf->cb[index];
1206 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1207
1208 if (rsrc)
1209 return rsrc->bo->cpu;
1210 else if (cb->user_buffer)
1211 return cb->user_buffer;
1212 else
1213 unreachable("No constant buffer");
1214 }
1215
1216 void
1217 panfrost_emit_const_buf(struct panfrost_batch *batch,
1218 enum pipe_shader_type stage,
1219 struct mali_vertex_tiler_postfix *postfix)
1220 {
1221 struct panfrost_context *ctx = batch->ctx;
1222 struct panfrost_shader_variants *all = ctx->shader[stage];
1223
1224 if (!all)
1225 return;
1226
1227 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1228
1229 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1230
1231 /* Uniforms are implicitly UBO #0 */
1232 bool has_uniforms = buf->enabled_mask & (1 << 0);
1233
1234 /* Allocate room for the sysval and the uniforms */
1235 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1236 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1237 size_t size = sys_size + uniform_size;
1238 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1239 size);
1240
1241 /* Upload sysvals requested by the shader */
1242 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1243
1244 /* Upload uniforms */
1245 if (has_uniforms && uniform_size) {
1246 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1247 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1248 }
1249
1250 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1251 * uploaded */
1252
1253 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1254 assert(ubo_count >= 1);
1255
1256 size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
1257 struct panfrost_transfer ubos = panfrost_pool_alloc(&batch->pool, sz);
1258 uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
1259
1260 /* Upload uniforms as a UBO */
1261
1262 if (ss->uniform_count) {
1263 pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
1264 cfg.entries = ss->uniform_count;
1265 cfg.pointer = transfer.gpu;
1266 }
1267 } else {
1268 *ubo_ptr = 0;
1269 }
1270
1271 /* The rest are honest-to-goodness UBOs */
1272
1273 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1274 size_t usz = buf->cb[ubo].buffer_size;
1275 bool enabled = buf->enabled_mask & (1 << ubo);
1276 bool empty = usz == 0;
1277
1278 if (!enabled || empty) {
1279 ubo_ptr[ubo] = 0;
1280 continue;
1281 }
1282
1283 pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
1284 cfg.entries = DIV_ROUND_UP(usz, 16);
1285 cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
1286 stage, buf, ubo);
1287 }
1288 }
1289
1290 postfix->uniforms = transfer.gpu;
1291 postfix->uniform_buffers = ubos.gpu;
1292
1293 buf->dirty_mask = 0;
1294 }
1295
1296 void
1297 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1298 const struct pipe_grid_info *info,
1299 struct midgard_payload_vertex_tiler *vtp)
1300 {
1301 struct panfrost_context *ctx = batch->ctx;
1302 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1303 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1304 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1305 128));
1306 unsigned shared_size = single_size * info->grid[0] * info->grid[1] *
1307 info->grid[2] * 4;
1308 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1309 shared_size,
1310 1);
1311
1312 struct mali_shared_memory shared = {
1313 .shared_memory = bo->gpu,
1314 .shared_workgroup_count =
1315 util_logbase2_ceil(info->grid[0]) +
1316 util_logbase2_ceil(info->grid[1]) +
1317 util_logbase2_ceil(info->grid[2]),
1318 .shared_unk1 = 0x2,
1319 .shared_shift = util_logbase2(single_size) - 1
1320 };
1321
1322 vtp->postfix.shared_memory = panfrost_pool_upload(&batch->pool, &shared,
1323 sizeof(shared));
1324 }
1325
1326 static mali_ptr
1327 panfrost_get_tex_desc(struct panfrost_batch *batch,
1328 enum pipe_shader_type st,
1329 struct panfrost_sampler_view *view)
1330 {
1331 if (!view)
1332 return (mali_ptr) 0;
1333
1334 struct pipe_sampler_view *pview = &view->base;
1335 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1336
1337 /* Add the BO to the job so it's retained until the job is done. */
1338
1339 panfrost_batch_add_bo(batch, rsrc->bo,
1340 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1341 panfrost_bo_access_for_stage(st));
1342
1343 panfrost_batch_add_bo(batch, view->bo,
1344 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1345 panfrost_bo_access_for_stage(st));
1346
1347 return view->bo->gpu;
1348 }
1349
1350 static void
1351 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1352 struct pipe_context *pctx)
1353 {
1354 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1355 if (view->texture_bo != rsrc->bo->gpu ||
1356 view->modifier != rsrc->modifier) {
1357 panfrost_bo_unreference(view->bo);
1358 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1359 }
1360 }
1361
1362 void
1363 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1364 enum pipe_shader_type stage,
1365 struct mali_vertex_tiler_postfix *postfix)
1366 {
1367 struct panfrost_context *ctx = batch->ctx;
1368 struct panfrost_device *device = pan_device(ctx->base.screen);
1369
1370 if (!ctx->sampler_view_count[stage])
1371 return;
1372
1373 if (device->quirks & IS_BIFROST) {
1374 struct bifrost_texture_descriptor *descriptors;
1375
1376 descriptors = malloc(sizeof(struct bifrost_texture_descriptor) *
1377 ctx->sampler_view_count[stage]);
1378
1379 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1380 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1381 struct pipe_sampler_view *pview = &view->base;
1382 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1383 panfrost_update_sampler_view(view, &ctx->base);
1384
1385 /* Add the BOs to the job so they are retained until the job is done. */
1386
1387 panfrost_batch_add_bo(batch, rsrc->bo,
1388 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1389 panfrost_bo_access_for_stage(stage));
1390
1391 panfrost_batch_add_bo(batch, view->bo,
1392 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1393 panfrost_bo_access_for_stage(stage));
1394
1395 memcpy(&descriptors[i], view->bifrost_descriptor, sizeof(*view->bifrost_descriptor));
1396 }
1397
1398 postfix->textures = panfrost_pool_upload(&batch->pool,
1399 descriptors,
1400 sizeof(struct bifrost_texture_descriptor) *
1401 ctx->sampler_view_count[stage]);
1402
1403 free(descriptors);
1404 } else {
1405 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1406
1407 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1408 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1409
1410 panfrost_update_sampler_view(view, &ctx->base);
1411
1412 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1413 }
1414
1415 postfix->textures = panfrost_pool_upload(&batch->pool,
1416 trampolines,
1417 sizeof(uint64_t) *
1418 ctx->sampler_view_count[stage]);
1419 }
1420 }
1421
1422 void
1423 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1424 enum pipe_shader_type stage,
1425 struct mali_vertex_tiler_postfix *postfix)
1426 {
1427 struct panfrost_context *ctx = batch->ctx;
1428 struct panfrost_device *device = pan_device(ctx->base.screen);
1429
1430 if (!ctx->sampler_count[stage])
1431 return;
1432
1433 if (device->quirks & IS_BIFROST) {
1434 size_t desc_size = sizeof(struct bifrost_sampler_descriptor);
1435 size_t transfer_size = desc_size * ctx->sampler_count[stage];
1436 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1437 transfer_size);
1438 struct bifrost_sampler_descriptor *desc = (struct bifrost_sampler_descriptor *)transfer.cpu;
1439
1440 for (int i = 0; i < ctx->sampler_count[stage]; ++i)
1441 desc[i] = ctx->samplers[stage][i]->bifrost_hw;
1442
1443 postfix->sampler_descriptor = transfer.gpu;
1444 } else {
1445 size_t desc_size = sizeof(struct mali_sampler_descriptor);
1446 size_t transfer_size = desc_size * ctx->sampler_count[stage];
1447 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1448 transfer_size);
1449 struct mali_sampler_descriptor *desc = (struct mali_sampler_descriptor *)transfer.cpu;
1450
1451 for (int i = 0; i < ctx->sampler_count[stage]; ++i)
1452 desc[i] = ctx->samplers[stage][i]->midgard_hw;
1453
1454 postfix->sampler_descriptor = transfer.gpu;
1455 }
1456 }
1457
1458 void
1459 panfrost_emit_vertex_attr_meta(struct panfrost_batch *batch,
1460 struct mali_vertex_tiler_postfix *vertex_postfix)
1461 {
1462 struct panfrost_context *ctx = batch->ctx;
1463
1464 if (!ctx->vertex)
1465 return;
1466
1467 struct panfrost_vertex_state *so = ctx->vertex;
1468
1469 panfrost_vertex_state_upd_attr_offs(ctx, vertex_postfix);
1470 vertex_postfix->attribute_meta = panfrost_pool_upload(&batch->pool, so->hw,
1471 sizeof(*so->hw) *
1472 PAN_MAX_ATTRIBUTE);
1473 }
1474
1475 void
1476 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1477 struct mali_vertex_tiler_postfix *vertex_postfix)
1478 {
1479 struct panfrost_context *ctx = batch->ctx;
1480 struct panfrost_vertex_state *so = ctx->vertex;
1481
1482 /* Staged mali_attr, and index into them. i =/= k, depending on the
1483 * vertex buffer mask and instancing. Twice as much room is allocated,
1484 * for a worst case of NPOT_DIVIDEs which take up extra slot */
1485 union mali_attr attrs[PIPE_MAX_ATTRIBS * 2];
1486 unsigned k = 0;
1487
1488 for (unsigned i = 0; i < so->num_elements; ++i) {
1489 /* We map a mali_attr to be 1:1 with the mali_attr_meta, which
1490 * means duplicating some vertex buffers (who cares? aside from
1491 * maybe some caching implications but I somehow doubt that
1492 * matters) */
1493
1494 struct pipe_vertex_element *elem = &so->pipe[i];
1495 unsigned vbi = elem->vertex_buffer_index;
1496
1497 /* The exception to 1:1 mapping is that we can have multiple
1498 * entries (NPOT divisors), so we fixup anyways */
1499
1500 so->hw[i].index = k;
1501
1502 if (!(ctx->vb_mask & (1 << vbi)))
1503 continue;
1504
1505 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1506 struct panfrost_resource *rsrc;
1507
1508 rsrc = pan_resource(buf->buffer.resource);
1509 if (!rsrc)
1510 continue;
1511
1512 /* Align to 64 bytes by masking off the lower bits. This
1513 * will be adjusted back when we fixup the src_offset in
1514 * mali_attr_meta */
1515
1516 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1517 mali_ptr addr = raw_addr & ~63;
1518 unsigned chopped_addr = raw_addr - addr;
1519
1520 /* Add a dependency of the batch on the vertex buffer */
1521 panfrost_batch_add_bo(batch, rsrc->bo,
1522 PAN_BO_ACCESS_SHARED |
1523 PAN_BO_ACCESS_READ |
1524 PAN_BO_ACCESS_VERTEX_TILER);
1525
1526 /* Set common fields */
1527 attrs[k].elements = addr;
1528 attrs[k].stride = buf->stride;
1529
1530 /* Since we advanced the base pointer, we shrink the buffer
1531 * size */
1532 attrs[k].size = rsrc->base.width0 - buf->buffer_offset;
1533
1534 /* We need to add the extra size we masked off (for
1535 * correctness) so the data doesn't get clamped away */
1536 attrs[k].size += chopped_addr;
1537
1538 /* For non-instancing make sure we initialize */
1539 attrs[k].shift = attrs[k].extra_flags = 0;
1540
1541 /* Instancing uses a dramatically different code path than
1542 * linear, so dispatch for the actual emission now that the
1543 * common code is finished */
1544
1545 unsigned divisor = elem->instance_divisor;
1546
1547 if (divisor && ctx->instance_count == 1) {
1548 /* Silly corner case where there's a divisor(=1) but
1549 * there's no legitimate instancing. So we want *every*
1550 * attribute to be the same. So set stride to zero so
1551 * we don't go anywhere. */
1552
1553 attrs[k].size = attrs[k].stride + chopped_addr;
1554 attrs[k].stride = 0;
1555 attrs[k++].elements |= MALI_ATTR_LINEAR;
1556 } else if (ctx->instance_count <= 1) {
1557 /* Normal, non-instanced attributes */
1558 attrs[k++].elements |= MALI_ATTR_LINEAR;
1559 } else {
1560 unsigned instance_shift = vertex_postfix->instance_shift;
1561 unsigned instance_odd = vertex_postfix->instance_odd;
1562
1563 k += panfrost_vertex_instanced(ctx->padded_count,
1564 instance_shift,
1565 instance_odd,
1566 divisor, &attrs[k]);
1567 }
1568 }
1569
1570 /* Add special gl_VertexID/gl_InstanceID buffers */
1571
1572 panfrost_vertex_id(ctx->padded_count, &attrs[k]);
1573 so->hw[PAN_VERTEX_ID].index = k++;
1574 panfrost_instance_id(ctx->padded_count, &attrs[k]);
1575 so->hw[PAN_INSTANCE_ID].index = k++;
1576
1577 /* Upload whatever we emitted and go */
1578
1579 vertex_postfix->attributes = panfrost_pool_upload(&batch->pool, attrs,
1580 k * sizeof(*attrs));
1581 }
1582
1583 static mali_ptr
1584 panfrost_emit_varyings(struct panfrost_batch *batch, union mali_attr *slot,
1585 unsigned stride, unsigned count)
1586 {
1587 /* Fill out the descriptor */
1588 slot->stride = stride;
1589 slot->size = stride * count;
1590 slot->shift = slot->extra_flags = 0;
1591
1592 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1593 slot->size);
1594
1595 slot->elements = transfer.gpu | MALI_ATTR_LINEAR;
1596
1597 return transfer.gpu;
1598 }
1599
1600 static unsigned
1601 panfrost_streamout_offset(unsigned stride, unsigned offset,
1602 struct pipe_stream_output_target *target)
1603 {
1604 return (target->buffer_offset + (offset * stride * 4)) & 63;
1605 }
1606
1607 static void
1608 panfrost_emit_streamout(struct panfrost_batch *batch, union mali_attr *slot,
1609 unsigned stride, unsigned offset, unsigned count,
1610 struct pipe_stream_output_target *target)
1611 {
1612 /* Fill out the descriptor */
1613 slot->stride = stride * 4;
1614 slot->shift = slot->extra_flags = 0;
1615
1616 unsigned max_size = target->buffer_size;
1617 unsigned expected_size = slot->stride * count;
1618
1619 /* Grab the BO and bind it to the batch */
1620 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1621
1622 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1623 * the perspective of the TILER and FRAGMENT.
1624 */
1625 panfrost_batch_add_bo(batch, bo,
1626 PAN_BO_ACCESS_SHARED |
1627 PAN_BO_ACCESS_RW |
1628 PAN_BO_ACCESS_VERTEX_TILER |
1629 PAN_BO_ACCESS_FRAGMENT);
1630
1631 /* We will have an offset applied to get alignment */
1632 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * slot->stride);
1633 slot->elements = (addr & ~63) | MALI_ATTR_LINEAR;
1634 slot->size = MIN2(max_size, expected_size) + (addr & 63);
1635 }
1636
1637 static bool
1638 has_point_coord(unsigned mask, gl_varying_slot loc)
1639 {
1640 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1641 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1642 else if (loc == VARYING_SLOT_PNTC)
1643 return (mask & (1 << 8));
1644 else
1645 return false;
1646 }
1647
1648 /* Helpers for manipulating stream out information so we can pack varyings
1649 * accordingly. Compute the src_offset for a given captured varying */
1650
1651 static struct pipe_stream_output *
1652 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1653 {
1654 for (unsigned i = 0; i < info->num_outputs; ++i) {
1655 if (info->output[i].register_index == loc)
1656 return &info->output[i];
1657 }
1658
1659 unreachable("Varying not captured");
1660 }
1661
1662 static unsigned
1663 pan_varying_size(enum mali_format fmt)
1664 {
1665 unsigned type = MALI_EXTRACT_TYPE(fmt);
1666 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1667 unsigned bits = MALI_EXTRACT_BITS(fmt);
1668 unsigned bpc = 0;
1669
1670 if (bits == MALI_CHANNEL_FLOAT) {
1671 /* No doubles */
1672 bool fp16 = (type == MALI_FORMAT_SINT);
1673 assert(fp16 || (type == MALI_FORMAT_UNORM));
1674
1675 bpc = fp16 ? 2 : 4;
1676 } else {
1677 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1678
1679 /* See the enums */
1680 bits = 1 << bits;
1681 assert(bits >= 8);
1682 bpc = bits / 8;
1683 }
1684
1685 return bpc * chan;
1686 }
1687
1688 /* Indices for named (non-XFB) varyings that are present. These are packed
1689 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1690 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1691 * of a given special field given a shift S by:
1692 *
1693 * idx = popcount(P & ((1 << S) - 1))
1694 *
1695 * That is... look at all of the varyings that come earlier and count them, the
1696 * count is the new index since plus one. Likewise, the total number of special
1697 * buffers required is simply popcount(P)
1698 */
1699
1700 enum pan_special_varying {
1701 PAN_VARY_GENERAL = 0,
1702 PAN_VARY_POSITION = 1,
1703 PAN_VARY_PSIZ = 2,
1704 PAN_VARY_PNTCOORD = 3,
1705 PAN_VARY_FACE = 4,
1706 PAN_VARY_FRAGCOORD = 5,
1707
1708 /* Keep last */
1709 PAN_VARY_MAX,
1710 };
1711
1712 /* Given a varying, figure out which index it correpsonds to */
1713
1714 static inline unsigned
1715 pan_varying_index(unsigned present, enum pan_special_varying v)
1716 {
1717 unsigned mask = (1 << v) - 1;
1718 return util_bitcount(present & mask);
1719 }
1720
1721 /* Get the base offset for XFB buffers, which by convention come after
1722 * everything else. Wrapper function for semantic reasons; by construction this
1723 * is just popcount. */
1724
1725 static inline unsigned
1726 pan_xfb_base(unsigned present)
1727 {
1728 return util_bitcount(present);
1729 }
1730
1731 /* Computes the present mask for varyings so we can start emitting varying records */
1732
1733 static inline unsigned
1734 pan_varying_present(
1735 struct panfrost_shader_state *vs,
1736 struct panfrost_shader_state *fs,
1737 unsigned quirks)
1738 {
1739 /* At the moment we always emit general and position buffers. Not
1740 * strictly necessary but usually harmless */
1741
1742 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1743
1744 /* Enable special buffers by the shader info */
1745
1746 if (vs->writes_point_size)
1747 present |= (1 << PAN_VARY_PSIZ);
1748
1749 if (fs->reads_point_coord)
1750 present |= (1 << PAN_VARY_PNTCOORD);
1751
1752 if (fs->reads_face)
1753 present |= (1 << PAN_VARY_FACE);
1754
1755 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1756 present |= (1 << PAN_VARY_FRAGCOORD);
1757
1758 /* Also, if we have a point sprite, we need a point coord buffer */
1759
1760 for (unsigned i = 0; i < fs->varying_count; i++) {
1761 gl_varying_slot loc = fs->varyings_loc[i];
1762
1763 if (has_point_coord(fs->point_sprite_mask, loc))
1764 present |= (1 << PAN_VARY_PNTCOORD);
1765 }
1766
1767 return present;
1768 }
1769
1770 /* Emitters for varying records */
1771
1772 static struct mali_attr_meta
1773 pan_emit_vary(unsigned present, enum pan_special_varying buf,
1774 unsigned quirks, enum mali_format format,
1775 unsigned offset)
1776 {
1777 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1778
1779 struct mali_attr_meta meta = {
1780 .index = pan_varying_index(present, buf),
1781 .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
1782 .swizzle = quirks & HAS_SWIZZLES ?
1783 panfrost_get_default_swizzle(nr_channels) :
1784 panfrost_bifrost_swizzle(nr_channels),
1785 .format = format,
1786 .src_offset = offset
1787 };
1788
1789 return meta;
1790 }
1791
1792 /* General varying that is unused */
1793
1794 static struct mali_attr_meta
1795 pan_emit_vary_only(unsigned present, unsigned quirks)
1796 {
1797 return pan_emit_vary(present, 0, quirks, MALI_VARYING_DISCARD, 0);
1798 }
1799
1800 /* Special records */
1801
1802 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1803 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1804 [PAN_VARY_PSIZ] = MALI_R16F,
1805 [PAN_VARY_PNTCOORD] = MALI_R16F,
1806 [PAN_VARY_FACE] = MALI_R32I,
1807 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1808 };
1809
1810 static struct mali_attr_meta
1811 pan_emit_vary_special(unsigned present, enum pan_special_varying buf,
1812 unsigned quirks)
1813 {
1814 assert(buf < PAN_VARY_MAX);
1815 return pan_emit_vary(present, buf, quirks, pan_varying_formats[buf], 0);
1816 }
1817
1818 static enum mali_format
1819 pan_xfb_format(enum mali_format format, unsigned nr)
1820 {
1821 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1822 return MALI_R32F | MALI_NR_CHANNELS(nr);
1823 else
1824 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1825 }
1826
1827 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1828 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1829 * value. */
1830
1831 static struct mali_attr_meta
1832 pan_emit_vary_xfb(unsigned present,
1833 unsigned max_xfb,
1834 unsigned *streamout_offsets,
1835 unsigned quirks,
1836 enum mali_format format,
1837 struct pipe_stream_output o)
1838 {
1839 /* Otherwise construct a record for it */
1840 struct mali_attr_meta meta = {
1841 /* XFB buffers come after everything else */
1842 .index = pan_xfb_base(present) + o.output_buffer,
1843
1844 /* As usual unknown bit */
1845 .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
1846
1847 /* Override swizzle with number of channels */
1848 .swizzle = quirks & HAS_SWIZZLES ?
1849 panfrost_get_default_swizzle(o.num_components) :
1850 panfrost_bifrost_swizzle(o.num_components),
1851
1852 /* Override number of channels and precision to highp */
1853 .format = pan_xfb_format(format, o.num_components),
1854
1855 /* Apply given offsets together */
1856 .src_offset = (o.dst_offset * 4) /* dwords */
1857 + streamout_offsets[o.output_buffer]
1858 };
1859
1860 return meta;
1861 }
1862
1863 /* Determine if we should capture a varying for XFB. This requires actually
1864 * having a buffer for it. If we don't capture it, we'll fallback to a general
1865 * varying path (linked or unlinked, possibly discarding the write) */
1866
1867 static bool
1868 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1869 unsigned loc, unsigned max_xfb)
1870 {
1871 if (!(xfb->so_mask & (1ll << loc)))
1872 return false;
1873
1874 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1875 return o->output_buffer < max_xfb;
1876 }
1877
1878 /* Higher-level wrapper around all of the above, classifying a varying into one
1879 * of the above types */
1880
1881 static struct mali_attr_meta
1882 panfrost_emit_varying(
1883 struct panfrost_shader_state *stage,
1884 struct panfrost_shader_state *other,
1885 struct panfrost_shader_state *xfb,
1886 unsigned present,
1887 unsigned max_xfb,
1888 unsigned *streamout_offsets,
1889 unsigned quirks,
1890 unsigned *gen_offsets,
1891 enum mali_format *gen_formats,
1892 unsigned *gen_stride,
1893 unsigned idx,
1894 bool should_alloc,
1895 bool is_fragment)
1896 {
1897 gl_varying_slot loc = stage->varyings_loc[idx];
1898 enum mali_format format = stage->varyings[idx];
1899
1900 /* Override format to match linkage */
1901 if (!should_alloc && gen_formats[idx])
1902 format = gen_formats[idx];
1903
1904 if (has_point_coord(stage->point_sprite_mask, loc)) {
1905 return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
1906 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1907 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1908 return pan_emit_vary_xfb(present, max_xfb, streamout_offsets, quirks, format, *o);
1909 } else if (loc == VARYING_SLOT_POS) {
1910 if (is_fragment)
1911 return pan_emit_vary_special(present, PAN_VARY_FRAGCOORD, quirks);
1912 else
1913 return pan_emit_vary_special(present, PAN_VARY_POSITION, quirks);
1914 } else if (loc == VARYING_SLOT_PSIZ) {
1915 return pan_emit_vary_special(present, PAN_VARY_PSIZ, quirks);
1916 } else if (loc == VARYING_SLOT_PNTC) {
1917 return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
1918 } else if (loc == VARYING_SLOT_FACE) {
1919 return pan_emit_vary_special(present, PAN_VARY_FACE, quirks);
1920 }
1921
1922 /* We've exhausted special cases, so it's otherwise a general varying. Check if we're linked */
1923 signed other_idx = -1;
1924
1925 for (unsigned j = 0; j < other->varying_count; ++j) {
1926 if (other->varyings_loc[j] == loc) {
1927 other_idx = j;
1928 break;
1929 }
1930 }
1931
1932 if (other_idx < 0)
1933 return pan_emit_vary_only(present, quirks);
1934
1935 unsigned offset = gen_offsets[other_idx];
1936
1937 if (should_alloc) {
1938 /* We're linked, so allocate a space via a watermark allocation */
1939 enum mali_format alt = other->varyings[other_idx];
1940
1941 /* Do interpolation at minimum precision */
1942 unsigned size_main = pan_varying_size(format);
1943 unsigned size_alt = pan_varying_size(alt);
1944 unsigned size = MIN2(size_main, size_alt);
1945
1946 /* If a varying is marked for XFB but not actually captured, we
1947 * should match the format to the format that would otherwise
1948 * be used for XFB, since dEQP checks for invariance here. It's
1949 * unclear if this is required by the spec. */
1950
1951 if (xfb->so_mask & (1ull << loc)) {
1952 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1953 format = pan_xfb_format(format, o->num_components);
1954 size = pan_varying_size(format);
1955 } else if (size == size_alt) {
1956 format = alt;
1957 }
1958
1959 gen_offsets[idx] = *gen_stride;
1960 gen_formats[other_idx] = format;
1961 offset = *gen_stride;
1962 *gen_stride += size;
1963 }
1964
1965 return pan_emit_vary(present, PAN_VARY_GENERAL,
1966 quirks, format, offset);
1967 }
1968
1969 static void
1970 pan_emit_special_input(union mali_attr *varyings,
1971 unsigned present,
1972 enum pan_special_varying v,
1973 mali_ptr addr)
1974 {
1975 if (present & (1 << v)) {
1976 /* Ensure we write exactly once for performance and with fields
1977 * zeroed appropriately to avoid flakes */
1978
1979 union mali_attr s = {
1980 .elements = addr
1981 };
1982
1983 varyings[pan_varying_index(present, v)] = s;
1984 }
1985 }
1986
1987 void
1988 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1989 unsigned vertex_count,
1990 struct mali_vertex_tiler_postfix *vertex_postfix,
1991 struct mali_vertex_tiler_postfix *tiler_postfix,
1992 union midgard_primitive_size *primitive_size)
1993 {
1994 /* Load the shaders */
1995 struct panfrost_context *ctx = batch->ctx;
1996 struct panfrost_device *dev = pan_device(ctx->base.screen);
1997 struct panfrost_shader_state *vs, *fs;
1998 size_t vs_size, fs_size;
1999
2000 /* Allocate the varying descriptor */
2001
2002 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
2003 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
2004 vs_size = sizeof(struct mali_attr_meta) * vs->varying_count;
2005 fs_size = sizeof(struct mali_attr_meta) * fs->varying_count;
2006
2007 struct panfrost_transfer trans = panfrost_pool_alloc(&batch->pool,
2008 vs_size +
2009 fs_size);
2010
2011 struct pipe_stream_output_info *so = &vs->stream_output;
2012 unsigned present = pan_varying_present(vs, fs, dev->quirks);
2013
2014 /* Check if this varying is linked by us. This is the case for
2015 * general-purpose, non-captured varyings. If it is, link it. If it's
2016 * not, use the provided stream out information to determine the
2017 * offset, since it was already linked for us. */
2018
2019 unsigned gen_offsets[32];
2020 enum mali_format gen_formats[32];
2021 memset(gen_offsets, 0, sizeof(gen_offsets));
2022 memset(gen_formats, 0, sizeof(gen_formats));
2023
2024 unsigned gen_stride = 0;
2025 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
2026 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
2027
2028 unsigned streamout_offsets[32];
2029
2030 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
2031 streamout_offsets[i] = panfrost_streamout_offset(
2032 so->stride[i],
2033 ctx->streamout.offsets[i],
2034 ctx->streamout.targets[i]);
2035 }
2036
2037 struct mali_attr_meta *ovs = (struct mali_attr_meta *)trans.cpu;
2038 struct mali_attr_meta *ofs = ovs + vs->varying_count;
2039
2040 for (unsigned i = 0; i < vs->varying_count; i++) {
2041 ovs[i] = panfrost_emit_varying(vs, fs, vs, present,
2042 ctx->streamout.num_targets, streamout_offsets,
2043 dev->quirks,
2044 gen_offsets, gen_formats, &gen_stride, i, true, false);
2045 }
2046
2047 for (unsigned i = 0; i < fs->varying_count; i++) {
2048 ofs[i] = panfrost_emit_varying(fs, vs, vs, present,
2049 ctx->streamout.num_targets, streamout_offsets,
2050 dev->quirks,
2051 gen_offsets, gen_formats, &gen_stride, i, false, true);
2052 }
2053
2054 unsigned xfb_base = pan_xfb_base(present);
2055 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
2056 sizeof(union mali_attr) * (xfb_base + ctx->streamout.num_targets));
2057 union mali_attr *varyings = (union mali_attr *) T.cpu;
2058
2059 /* Emit the stream out buffers */
2060
2061 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
2062 ctx->vertex_count);
2063
2064 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
2065 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
2066 so->stride[i],
2067 ctx->streamout.offsets[i],
2068 out_count,
2069 ctx->streamout.targets[i]);
2070 }
2071
2072 panfrost_emit_varyings(batch,
2073 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
2074 gen_stride, vertex_count);
2075
2076 /* fp32 vec4 gl_Position */
2077 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
2078 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
2079 sizeof(float) * 4, vertex_count);
2080
2081 if (present & (1 << PAN_VARY_PSIZ)) {
2082 primitive_size->pointer = panfrost_emit_varyings(batch,
2083 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
2084 2, vertex_count);
2085 }
2086
2087 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_VARYING_POINT_COORD);
2088 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_VARYING_FRONT_FACING);
2089 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_VARYING_FRAG_COORD);
2090
2091 vertex_postfix->varyings = T.gpu;
2092 tiler_postfix->varyings = T.gpu;
2093
2094 vertex_postfix->varying_meta = trans.gpu;
2095 tiler_postfix->varying_meta = trans.gpu + vs_size;
2096 }
2097
2098 void
2099 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
2100 struct mali_vertex_tiler_prefix *vertex_prefix,
2101 struct mali_vertex_tiler_postfix *vertex_postfix,
2102 struct mali_vertex_tiler_prefix *tiler_prefix,
2103 struct mali_vertex_tiler_postfix *tiler_postfix,
2104 union midgard_primitive_size *primitive_size)
2105 {
2106 struct panfrost_context *ctx = batch->ctx;
2107 struct panfrost_device *device = pan_device(ctx->base.screen);
2108 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
2109 struct bifrost_payload_vertex bifrost_vertex = {0,};
2110 struct bifrost_payload_tiler bifrost_tiler = {0,};
2111 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2112 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2113 void *vp, *tp;
2114 size_t vp_size, tp_size;
2115
2116 if (device->quirks & IS_BIFROST) {
2117 bifrost_vertex.prefix = *vertex_prefix;
2118 bifrost_vertex.postfix = *vertex_postfix;
2119 vp = &bifrost_vertex;
2120 vp_size = sizeof(bifrost_vertex);
2121
2122 bifrost_tiler.prefix = *tiler_prefix;
2123 bifrost_tiler.tiler.primitive_size = *primitive_size;
2124 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2125 bifrost_tiler.postfix = *tiler_postfix;
2126 tp = &bifrost_tiler;
2127 tp_size = sizeof(bifrost_tiler);
2128 } else {
2129 midgard_vertex.prefix = *vertex_prefix;
2130 midgard_vertex.postfix = *vertex_postfix;
2131 vp = &midgard_vertex;
2132 vp_size = sizeof(midgard_vertex);
2133
2134 midgard_tiler.prefix = *tiler_prefix;
2135 midgard_tiler.postfix = *tiler_postfix;
2136 midgard_tiler.primitive_size = *primitive_size;
2137 tp = &midgard_tiler;
2138 tp_size = sizeof(midgard_tiler);
2139 }
2140
2141 if (wallpapering) {
2142 /* Inject in reverse order, with "predicted" job indices.
2143 * THIS IS A HACK XXX */
2144 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
2145 batch->scoreboard.job_index + 2, tp, tp_size, true);
2146 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2147 vp, vp_size, true);
2148 return;
2149 }
2150
2151 /* If rasterizer discard is enable, only submit the vertex */
2152
2153 bool rasterizer_discard = ctx->rasterizer &&
2154 ctx->rasterizer->base.rasterizer_discard;
2155
2156 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2157 vp, vp_size, false);
2158
2159 if (rasterizer_discard)
2160 return;
2161
2162 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
2163 false);
2164 }
2165
2166 /* TODO: stop hardcoding this */
2167 mali_ptr
2168 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2169 {
2170 uint16_t locations[] = {
2171 128, 128,
2172 0, 256,
2173 0, 256,
2174 0, 256,
2175 0, 256,
2176 0, 256,
2177 0, 256,
2178 0, 256,
2179 0, 256,
2180 0, 256,
2181 0, 256,
2182 0, 256,
2183 0, 256,
2184 0, 256,
2185 0, 256,
2186 0, 256,
2187 0, 256,
2188 0, 256,
2189 0, 256,
2190 0, 256,
2191 0, 256,
2192 0, 256,
2193 0, 256,
2194 0, 256,
2195 0, 256,
2196 0, 256,
2197 0, 256,
2198 0, 256,
2199 0, 256,
2200 0, 256,
2201 0, 256,
2202 0, 256,
2203 128, 128,
2204 0, 0,
2205 0, 0,
2206 0, 0,
2207 0, 0,
2208 0, 0,
2209 0, 0,
2210 0, 0,
2211 0, 0,
2212 0, 0,
2213 0, 0,
2214 0, 0,
2215 0, 0,
2216 0, 0,
2217 0, 0,
2218 0, 0,
2219 };
2220
2221 return panfrost_pool_upload(&batch->pool, locations, 96 * sizeof(uint16_t));
2222 }