panfrost: XMLify UBOs
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 unsigned shift = panfrost_get_stack_shift(batch->stack_size);
62 struct mali_shared_memory shared = {
63 .stack_shift = shift,
64 .scratchpad = panfrost_batch_get_scratchpad(batch, shift, dev->thread_tls_alloc, dev->core_count)->gpu,
65 .shared_workgroup_count = ~0,
66 };
67 postfix->shared_memory = panfrost_pool_upload(&batch->pool, &shared, sizeof(shared));
68 }
69
70 static void
71 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
72 struct mali_vertex_tiler_postfix *postfix)
73 {
74 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
75 postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
76 }
77
78 static void
79 panfrost_vt_update_rasterizer(struct panfrost_context *ctx,
80 struct mali_vertex_tiler_prefix *prefix,
81 struct mali_vertex_tiler_postfix *postfix)
82 {
83 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
84
85 postfix->gl_enables |= 0x7;
86 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
87 rasterizer && rasterizer->base.front_ccw);
88 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
89 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_FRONT));
90 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
91 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_BACK));
92 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
93 rasterizer && rasterizer->base.flatshade_first);
94 }
95
96 void
97 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
98 struct mali_vertex_tiler_prefix *prefix,
99 union midgard_primitive_size *primitive_size)
100 {
101 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
102
103 if (!panfrost_writes_point_size(ctx)) {
104 bool points = prefix->draw_mode == MALI_DRAW_MODE_POINTS;
105 float val = 0.0f;
106
107 if (rasterizer)
108 val = points ?
109 rasterizer->base.point_size :
110 rasterizer->base.line_width;
111
112 primitive_size->constant = val;
113 }
114 }
115
116 static void
117 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
118 struct mali_vertex_tiler_postfix *postfix)
119 {
120 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
121 if (ctx->occlusion_query) {
122 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
123 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
124 PAN_BO_ACCESS_SHARED |
125 PAN_BO_ACCESS_RW |
126 PAN_BO_ACCESS_FRAGMENT);
127 } else {
128 postfix->occlusion_counter = 0;
129 }
130 }
131
132 void
133 panfrost_vt_init(struct panfrost_context *ctx,
134 enum pipe_shader_type stage,
135 struct mali_vertex_tiler_prefix *prefix,
136 struct mali_vertex_tiler_postfix *postfix)
137 {
138 struct panfrost_device *device = pan_device(ctx->base.screen);
139
140 if (!ctx->shader[stage])
141 return;
142
143 memset(prefix, 0, sizeof(*prefix));
144 memset(postfix, 0, sizeof(*postfix));
145
146 if (device->quirks & IS_BIFROST) {
147 postfix->gl_enables = 0x2;
148 panfrost_vt_emit_shared_memory(ctx, postfix);
149 } else {
150 postfix->gl_enables = 0x6;
151 panfrost_vt_attach_framebuffer(ctx, postfix);
152 }
153
154 if (stage == PIPE_SHADER_FRAGMENT) {
155 panfrost_vt_update_occlusion_query(ctx, postfix);
156 panfrost_vt_update_rasterizer(ctx, prefix, postfix);
157 }
158 }
159
160 static unsigned
161 panfrost_translate_index_size(unsigned size)
162 {
163 switch (size) {
164 case 1:
165 return MALI_DRAW_INDEXED_UINT8;
166
167 case 2:
168 return MALI_DRAW_INDEXED_UINT16;
169
170 case 4:
171 return MALI_DRAW_INDEXED_UINT32;
172
173 default:
174 unreachable("Invalid index size");
175 }
176 }
177
178 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
179 * good for the duration of the draw (transient), could last longer. Also get
180 * the bounds on the index buffer for the range accessed by the draw. We do
181 * these operations together because there are natural optimizations which
182 * require them to be together. */
183
184 static mali_ptr
185 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
186 const struct pipe_draw_info *info,
187 unsigned *min_index, unsigned *max_index)
188 {
189 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
190 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
191 off_t offset = info->start * info->index_size;
192 bool needs_indices = true;
193 mali_ptr out = 0;
194
195 if (info->max_index != ~0u) {
196 *min_index = info->min_index;
197 *max_index = info->max_index;
198 needs_indices = false;
199 }
200
201 if (!info->has_user_indices) {
202 /* Only resources can be directly mapped */
203 panfrost_batch_add_bo(batch, rsrc->bo,
204 PAN_BO_ACCESS_SHARED |
205 PAN_BO_ACCESS_READ |
206 PAN_BO_ACCESS_VERTEX_TILER);
207 out = rsrc->bo->gpu + offset;
208
209 /* Check the cache */
210 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
211 info->start,
212 info->count,
213 min_index,
214 max_index);
215 } else {
216 /* Otherwise, we need to upload to transient memory */
217 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
218 out = panfrost_pool_upload(&batch->pool, ibuf8 + offset,
219 info->count *
220 info->index_size);
221 }
222
223 if (needs_indices) {
224 /* Fallback */
225 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
226
227 if (!info->has_user_indices)
228 panfrost_minmax_cache_add(rsrc->index_cache,
229 info->start, info->count,
230 *min_index, *max_index);
231 }
232
233 return out;
234 }
235
236 void
237 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
238 const struct pipe_draw_info *info,
239 enum mali_draw_mode draw_mode,
240 struct mali_vertex_tiler_postfix *vertex_postfix,
241 struct mali_vertex_tiler_prefix *tiler_prefix,
242 struct mali_vertex_tiler_postfix *tiler_postfix,
243 unsigned *vertex_count,
244 unsigned *padded_count)
245 {
246 tiler_prefix->draw_mode = draw_mode;
247
248 unsigned draw_flags = 0;
249
250 if (panfrost_writes_point_size(ctx))
251 draw_flags |= MALI_DRAW_VARYING_SIZE;
252
253 if (info->primitive_restart)
254 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
255
256 /* These doesn't make much sense */
257
258 draw_flags |= 0x3000;
259
260 if (info->index_size) {
261 unsigned min_index = 0, max_index = 0;
262
263 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
264 info,
265 &min_index,
266 &max_index);
267
268 /* Use the corresponding values */
269 *vertex_count = max_index - min_index + 1;
270 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
271 tiler_prefix->offset_bias_correction = -min_index;
272 tiler_prefix->index_count = MALI_POSITIVE(info->count);
273 draw_flags |= panfrost_translate_index_size(info->index_size);
274 } else {
275 tiler_prefix->indices = 0;
276 *vertex_count = ctx->vertex_count;
277 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
278 tiler_prefix->offset_bias_correction = 0;
279 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
280 }
281
282 tiler_prefix->unknown_draw = draw_flags;
283
284 /* Encode the padded vertex count */
285
286 if (info->instance_count > 1) {
287 *padded_count = panfrost_padded_vertex_count(*vertex_count);
288
289 unsigned shift = __builtin_ctz(ctx->padded_count);
290 unsigned k = ctx->padded_count >> (shift + 1);
291
292 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
293 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
294 } else {
295 *padded_count = *vertex_count;
296
297 /* Reset instancing state */
298 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
299 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
300 }
301 }
302
303 static void
304 panfrost_shader_meta_init(struct panfrost_context *ctx,
305 enum pipe_shader_type st,
306 struct mali_shader_meta *meta)
307 {
308 const struct panfrost_device *dev = pan_device(ctx->base.screen);
309 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
310
311 memset(meta, 0, sizeof(*meta));
312 meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
313 meta->attribute_count = ss->attribute_count;
314 meta->varying_count = ss->varying_count;
315 meta->texture_count = ctx->sampler_view_count[st];
316 meta->sampler_count = ctx->sampler_count[st];
317
318 if (dev->quirks & IS_BIFROST) {
319 if (st == PIPE_SHADER_VERTEX)
320 meta->bifrost1.unk1 = 0x800000;
321 else {
322 /* First clause ATEST |= 0x4000000.
323 * Less than 32 regs |= 0x200 */
324 meta->bifrost1.unk1 = 0x950020;
325 }
326
327 meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
328 if (st == PIPE_SHADER_VERTEX)
329 meta->bifrost2.preload_regs = 0xC0;
330 else {
331 meta->bifrost2.preload_regs = 0x1;
332 SET_BIT(meta->bifrost2.preload_regs, 0x10, ss->reads_frag_coord);
333 }
334
335 meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
336 ss->uniform_cutoff);
337 } else {
338 meta->midgard1.uniform_count = MIN2(ss->uniform_count,
339 ss->uniform_cutoff);
340 meta->midgard1.work_count = ss->work_reg_count;
341
342 /* TODO: This is not conformant on ES3 */
343 meta->midgard1.flags_hi = MALI_SUPPRESS_INF_NAN;
344
345 meta->midgard1.flags_lo = 0x20;
346 meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
347
348 SET_BIT(meta->midgard1.flags_hi, MALI_WRITES_GLOBAL, ss->writes_global);
349 }
350 }
351
352 static unsigned
353 panfrost_translate_compare_func(enum pipe_compare_func in)
354 {
355 switch (in) {
356 case PIPE_FUNC_NEVER:
357 return MALI_FUNC_NEVER;
358
359 case PIPE_FUNC_LESS:
360 return MALI_FUNC_LESS;
361
362 case PIPE_FUNC_EQUAL:
363 return MALI_FUNC_EQUAL;
364
365 case PIPE_FUNC_LEQUAL:
366 return MALI_FUNC_LEQUAL;
367
368 case PIPE_FUNC_GREATER:
369 return MALI_FUNC_GREATER;
370
371 case PIPE_FUNC_NOTEQUAL:
372 return MALI_FUNC_NOT_EQUAL;
373
374 case PIPE_FUNC_GEQUAL:
375 return MALI_FUNC_GEQUAL;
376
377 case PIPE_FUNC_ALWAYS:
378 return MALI_FUNC_ALWAYS;
379
380 default:
381 unreachable("Invalid func");
382 }
383 }
384
385 static unsigned
386 panfrost_translate_stencil_op(enum pipe_stencil_op in)
387 {
388 switch (in) {
389 case PIPE_STENCIL_OP_KEEP:
390 return MALI_STENCIL_OP_KEEP;
391
392 case PIPE_STENCIL_OP_ZERO:
393 return MALI_STENCIL_OP_ZERO;
394
395 case PIPE_STENCIL_OP_REPLACE:
396 return MALI_STENCIL_OP_REPLACE;
397
398 case PIPE_STENCIL_OP_INCR:
399 return MALI_STENCIL_OP_INCR_SAT;
400
401 case PIPE_STENCIL_OP_DECR:
402 return MALI_STENCIL_OP_DECR_SAT;
403
404 case PIPE_STENCIL_OP_INCR_WRAP:
405 return MALI_STENCIL_OP_INCR_WRAP;
406
407 case PIPE_STENCIL_OP_DECR_WRAP:
408 return MALI_STENCIL_OP_DECR_WRAP;
409
410 case PIPE_STENCIL_OP_INVERT:
411 return MALI_STENCIL_OP_INVERT;
412
413 default:
414 unreachable("Invalid stencil op");
415 }
416 }
417
418 static unsigned
419 translate_tex_wrap(enum pipe_tex_wrap w)
420 {
421 switch (w) {
422 case PIPE_TEX_WRAP_REPEAT:
423 return MALI_WRAP_MODE_REPEAT;
424
425 case PIPE_TEX_WRAP_CLAMP:
426 return MALI_WRAP_MODE_CLAMP;
427
428 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
429 return MALI_WRAP_MODE_CLAMP_TO_EDGE;
430
431 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
432 return MALI_WRAP_MODE_CLAMP_TO_BORDER;
433
434 case PIPE_TEX_WRAP_MIRROR_REPEAT:
435 return MALI_WRAP_MODE_MIRRORED_REPEAT;
436
437 case PIPE_TEX_WRAP_MIRROR_CLAMP:
438 return MALI_WRAP_MODE_MIRRORED_CLAMP;
439
440 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
441 return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
442
443 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
444 return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
445
446 default:
447 unreachable("Invalid wrap");
448 }
449 }
450
451 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
452 struct mali_sampler_descriptor *hw)
453 {
454 unsigned func = panfrost_translate_compare_func(cso->compare_func);
455 bool min_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
456 bool mag_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
457 bool mip_linear = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR;
458 unsigned min_filter = min_nearest ? MALI_SAMP_MIN_NEAREST : 0;
459 unsigned mag_filter = mag_nearest ? MALI_SAMP_MAG_NEAREST : 0;
460 unsigned mip_filter = mip_linear ?
461 (MALI_SAMP_MIP_LINEAR_1 | MALI_SAMP_MIP_LINEAR_2) : 0;
462 unsigned normalized = cso->normalized_coords ? MALI_SAMP_NORM_COORDS : 0;
463
464 *hw = (struct mali_sampler_descriptor) {
465 .filter_mode = min_filter | mag_filter | mip_filter |
466 normalized,
467 .wrap_s = translate_tex_wrap(cso->wrap_s),
468 .wrap_t = translate_tex_wrap(cso->wrap_t),
469 .wrap_r = translate_tex_wrap(cso->wrap_r),
470 .compare_func = cso->compare_mode ?
471 panfrost_flip_compare_func(func) :
472 MALI_FUNC_NEVER,
473 .border_color = {
474 cso->border_color.f[0],
475 cso->border_color.f[1],
476 cso->border_color.f[2],
477 cso->border_color.f[3]
478 },
479 .min_lod = FIXED_16(cso->min_lod, false), /* clamp at 0 */
480 .max_lod = FIXED_16(cso->max_lod, false),
481 .lod_bias = FIXED_16(cso->lod_bias, true), /* can be negative */
482 .seamless_cube_map = cso->seamless_cube_map,
483 };
484
485 /* If necessary, we disable mipmapping in the sampler descriptor by
486 * clamping the LOD as tight as possible (from 0 to epsilon,
487 * essentially -- remember these are fixed point numbers, so
488 * epsilon=1/256) */
489
490 if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
491 hw->max_lod = hw->min_lod + 1;
492 }
493
494 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
495 struct bifrost_sampler_descriptor *hw)
496 {
497 *hw = (struct bifrost_sampler_descriptor) {
498 .unk1 = 0x1,
499 .wrap_s = translate_tex_wrap(cso->wrap_s),
500 .wrap_t = translate_tex_wrap(cso->wrap_t),
501 .wrap_r = translate_tex_wrap(cso->wrap_r),
502 .unk8 = 0x8,
503 .min_filter = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST,
504 .norm_coords = cso->normalized_coords,
505 .mip_filter = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR,
506 .mag_filter = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR,
507 .min_lod = FIXED_16(cso->min_lod, false), /* clamp at 0 */
508 .max_lod = FIXED_16(cso->max_lod, false),
509 };
510
511 /* If necessary, we disable mipmapping in the sampler descriptor by
512 * clamping the LOD as tight as possible (from 0 to epsilon,
513 * essentially -- remember these are fixed point numbers, so
514 * epsilon=1/256) */
515
516 if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
517 hw->max_lod = hw->min_lod + 1;
518 }
519
520 static void
521 panfrost_make_stencil_state(const struct pipe_stencil_state *in,
522 struct mali_stencil_test *out)
523 {
524 out->ref = 0; /* Gallium gets it from elsewhere */
525
526 out->mask = in->valuemask;
527 out->func = panfrost_translate_compare_func(in->func);
528 out->sfail = panfrost_translate_stencil_op(in->fail_op);
529 out->dpfail = panfrost_translate_stencil_op(in->zfail_op);
530 out->dppass = panfrost_translate_stencil_op(in->zpass_op);
531 }
532
533 static void
534 panfrost_frag_meta_rasterizer_update(struct panfrost_context *ctx,
535 struct mali_shader_meta *fragmeta)
536 {
537 if (!ctx->rasterizer) {
538 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, true);
539 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, false);
540 fragmeta->depth_units = 0.0f;
541 fragmeta->depth_factor = 0.0f;
542 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, false);
543 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, false);
544 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, true);
545 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, true);
546 return;
547 }
548
549 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
550
551 bool msaa = rast->multisample;
552
553 /* TODO: Sample size */
554 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
555 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
556
557 struct panfrost_shader_state *fs;
558 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
559
560 /* EXT_shader_framebuffer_fetch requires the shader to be run
561 * per-sample when outputs are read. */
562 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
563 SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
564
565 fragmeta->depth_units = rast->offset_units * 2.0f;
566 fragmeta->depth_factor = rast->offset_scale;
567
568 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
569
570 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
571 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
572
573 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
574 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
575 }
576
577 static void
578 panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
579 struct mali_shader_meta *fragmeta)
580 {
581 const struct pipe_depth_stencil_alpha_state *zsa = ctx->depth_stencil;
582 int zfunc = PIPE_FUNC_ALWAYS;
583
584 if (!zsa) {
585 struct pipe_stencil_state default_stencil = {
586 .enabled = 0,
587 .func = PIPE_FUNC_ALWAYS,
588 .fail_op = PIPE_STENCIL_OP_KEEP,
589 .zfail_op = PIPE_STENCIL_OP_KEEP,
590 .zpass_op = PIPE_STENCIL_OP_KEEP,
591 .writemask = 0xFF,
592 .valuemask = 0xFF
593 };
594
595 panfrost_make_stencil_state(&default_stencil,
596 &fragmeta->stencil_front);
597 fragmeta->stencil_mask_front = default_stencil.writemask;
598 fragmeta->stencil_back = fragmeta->stencil_front;
599 fragmeta->stencil_mask_back = default_stencil.writemask;
600 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST, false);
601 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK, false);
602 } else {
603 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
604 zsa->stencil[0].enabled);
605 panfrost_make_stencil_state(&zsa->stencil[0],
606 &fragmeta->stencil_front);
607 fragmeta->stencil_mask_front = zsa->stencil[0].writemask;
608 fragmeta->stencil_front.ref = ctx->stencil_ref.ref_value[0];
609
610 /* If back-stencil is not enabled, use the front values */
611
612 if (zsa->stencil[1].enabled) {
613 panfrost_make_stencil_state(&zsa->stencil[1],
614 &fragmeta->stencil_back);
615 fragmeta->stencil_mask_back = zsa->stencil[1].writemask;
616 fragmeta->stencil_back.ref = ctx->stencil_ref.ref_value[1];
617 } else {
618 fragmeta->stencil_back = fragmeta->stencil_front;
619 fragmeta->stencil_mask_back = fragmeta->stencil_mask_front;
620 fragmeta->stencil_back.ref = fragmeta->stencil_front.ref;
621 }
622
623 if (zsa->depth.enabled)
624 zfunc = zsa->depth.func;
625
626 /* Depth state (TODO: Refactor) */
627
628 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
629 zsa->depth.writemask);
630 }
631
632 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
633 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(zfunc));
634 }
635
636 static bool
637 panfrost_fs_required(
638 struct panfrost_shader_state *fs,
639 struct panfrost_blend_final *blend,
640 unsigned rt_count)
641 {
642 /* If we generally have side effects */
643 if (fs->fs_sidefx)
644 return true;
645
646 /* If colour is written we need to execute */
647 for (unsigned i = 0; i < rt_count; ++i) {
648 if (!blend[i].no_colour)
649 return true;
650 }
651
652 /* If depth is written and not implied we need to execute.
653 * TODO: Predicate on Z/S writes being enabled */
654 return (fs->writes_depth || fs->writes_stencil);
655 }
656
657 static void
658 panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
659 struct mali_shader_meta *fragmeta,
660 void *rts)
661 {
662 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
663 const struct panfrost_device *dev = pan_device(ctx->base.screen);
664 struct panfrost_shader_state *fs;
665 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
666
667 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
668 (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
669 !ctx->blend->base.dither);
670
671 SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
672 ctx->blend->base.alpha_to_coverage);
673
674 /* Get blending setup */
675 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
676
677 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
678 unsigned shader_offset = 0;
679 struct panfrost_bo *shader_bo = NULL;
680
681 for (unsigned c = 0; c < rt_count; ++c)
682 blend[c] = panfrost_get_blend_for_context(ctx, c, &shader_bo,
683 &shader_offset);
684
685 /* Disable shader execution if we can */
686 if (dev->quirks & MIDGARD_SHADERLESS
687 && !panfrost_fs_required(fs, blend, rt_count)) {
688 fragmeta->shader = 0;
689 fragmeta->attribute_count = 0;
690 fragmeta->varying_count = 0;
691 fragmeta->texture_count = 0;
692 fragmeta->sampler_count = 0;
693
694 /* This feature is not known to work on Bifrost */
695 fragmeta->midgard1.work_count = 1;
696 fragmeta->midgard1.uniform_count = 0;
697 fragmeta->midgard1.uniform_buffer_count = 0;
698 }
699
700 /* If there is a blend shader, work registers are shared. We impose 8
701 * work registers as a limit for blend shaders. Should be lower XXX */
702
703 if (!(dev->quirks & IS_BIFROST)) {
704 for (unsigned c = 0; c < rt_count; ++c) {
705 if (blend[c].is_shader) {
706 fragmeta->midgard1.work_count =
707 MAX2(fragmeta->midgard1.work_count, 8);
708 }
709 }
710 }
711
712 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
713 * copied to the blend_meta appended (by convention), but this is the
714 * field actually read by the hardware. (Or maybe both are read...?).
715 * Specify the last RTi with a blend shader. */
716
717 fragmeta->blend.shader = 0;
718
719 for (signed rt = (rt_count - 1); rt >= 0; --rt) {
720 if (!blend[rt].is_shader)
721 continue;
722
723 fragmeta->blend.shader = blend[rt].shader.gpu |
724 blend[rt].shader.first_tag;
725 break;
726 }
727
728 if (dev->quirks & MIDGARD_SFBD) {
729 /* When only a single render target platform is used, the blend
730 * information is inside the shader meta itself. We additionally
731 * need to signal CAN_DISCARD for nontrivial blend modes (so
732 * we're able to read back the destination buffer) */
733
734 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
735 blend[0].is_shader);
736
737 if (!blend[0].is_shader) {
738 fragmeta->blend.equation = *blend[0].equation.equation;
739 fragmeta->blend.constant = blend[0].equation.constant;
740 }
741
742 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
743 !blend[0].no_blending || fs->can_discard);
744
745 batch->draws |= PIPE_CLEAR_COLOR0;
746 return;
747 }
748
749 if (dev->quirks & IS_BIFROST) {
750 bool no_blend = true;
751
752 for (unsigned i = 0; i < rt_count; ++i)
753 no_blend &= (blend[i].no_blending | blend[i].no_colour);
754
755 SET_BIT(fragmeta->bifrost1.unk1, MALI_BIFROST_EARLY_Z,
756 !fs->can_discard && !fs->writes_depth && no_blend);
757 }
758
759 /* Additional blend descriptor tacked on for jobs using MFBD */
760
761 for (unsigned i = 0; i < rt_count; ++i) {
762 unsigned flags = 0;
763
764 if (ctx->pipe_framebuffer.nr_cbufs > i && !blend[i].no_colour) {
765 flags = 0x200;
766 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
767
768 bool is_srgb = (ctx->pipe_framebuffer.nr_cbufs > i) &&
769 (ctx->pipe_framebuffer.cbufs[i]) &&
770 util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
771
772 SET_BIT(flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
773 SET_BIT(flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
774 SET_BIT(flags, MALI_BLEND_SRGB, is_srgb);
775 SET_BIT(flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
776 }
777
778 if (dev->quirks & IS_BIFROST) {
779 struct bifrost_blend_rt *brts = rts;
780
781 brts[i].flags = flags;
782
783 if (blend[i].is_shader) {
784 /* The blend shader's address needs to be at
785 * the same top 32 bit as the fragment shader.
786 * TODO: Ensure that's always the case.
787 */
788 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
789 (fs->bo->gpu & (0xffffffffull << 32)));
790 brts[i].shader = blend[i].shader.gpu;
791 brts[i].unk2 = 0x0;
792 } else if (ctx->pipe_framebuffer.nr_cbufs > i) {
793 enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
794 const struct util_format_description *format_desc;
795 format_desc = util_format_description(format);
796
797 brts[i].equation = *blend[i].equation.equation;
798
799 /* TODO: this is a bit more complicated */
800 brts[i].constant = blend[i].equation.constant;
801
802 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
803
804 /* 0x19 disables blending and forces REPLACE
805 * mode (equivalent to rgb_mode = alpha_mode =
806 * x122, colour mask = 0xF). 0x1a allows
807 * blending. */
808 brts[i].unk2 = blend[i].no_blending ? 0x19 : 0x1a;
809
810 brts[i].shader_type = fs->blend_types[i];
811 } else {
812 /* Dummy attachment for depth-only */
813 brts[i].unk2 = 0x3;
814 brts[i].shader_type = fs->blend_types[i];
815 }
816 } else {
817 struct midgard_blend_rt *mrts = rts;
818 mrts[i].flags = flags;
819
820 if (blend[i].is_shader) {
821 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
822 } else {
823 mrts[i].blend.equation = *blend[i].equation.equation;
824 mrts[i].blend.constant = blend[i].equation.constant;
825 }
826 }
827 }
828 }
829
830 static void
831 panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
832 struct mali_shader_meta *fragmeta,
833 void *rts)
834 {
835 const struct panfrost_device *dev = pan_device(ctx->base.screen);
836 struct panfrost_shader_state *fs;
837
838 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
839
840 bool msaa = ctx->rasterizer && ctx->rasterizer->base.multisample;
841 fragmeta->coverage_mask = (msaa ? ctx->sample_mask : ~0) & 0xF;
842
843 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
844 fragmeta->unknown2_4 = 0x4e0;
845
846 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
847 * is required (independent of 32-bit/64-bit descriptors), or why it's
848 * not used on later GPU revisions. Otherwise, all shader jobs fault on
849 * these earlier chips (perhaps this is a chicken bit of some kind).
850 * More investigation is needed. */
851
852 SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
853
854 if (dev->quirks & IS_BIFROST) {
855 /* TODO */
856 } else {
857 /* Depending on whether it's legal to in the given shader, we try to
858 * enable early-z testing. TODO: respect e-z force */
859
860 SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
861 !fs->can_discard && !fs->writes_global &&
862 !fs->writes_depth && !fs->writes_stencil &&
863 !ctx->blend->base.alpha_to_coverage);
864
865 /* Add the writes Z/S flags if needed. */
866 SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
867 SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
868
869 /* Any time texturing is used, derivatives are implicitly calculated,
870 * so we need to enable helper invocations */
871
872 SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
873 fs->helper_invocations);
874
875 /* If discard is enabled, which bit we set to convey this
876 * depends on if depth/stencil is used for the draw or not.
877 * Just one of depth OR stencil is enough to trigger this. */
878
879 const struct pipe_depth_stencil_alpha_state *zsa = ctx->depth_stencil;
880 bool zs_enabled = fs->writes_depth || fs->writes_stencil;
881
882 if (zsa) {
883 zs_enabled |= (zsa->depth.enabled && zsa->depth.func != PIPE_FUNC_ALWAYS);
884 zs_enabled |= zsa->stencil[0].enabled;
885 }
886
887 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_TILEBUFFER,
888 fs->outputs_read || (!zs_enabled && fs->can_discard));
889 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_ZS, zs_enabled && fs->can_discard);
890 }
891
892 panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
893 panfrost_frag_meta_zsa_update(ctx, fragmeta);
894 panfrost_frag_meta_blend_update(ctx, fragmeta, rts);
895 }
896
897 void
898 panfrost_emit_shader_meta(struct panfrost_batch *batch,
899 enum pipe_shader_type st,
900 struct mali_vertex_tiler_postfix *postfix)
901 {
902 struct panfrost_context *ctx = batch->ctx;
903 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
904
905 if (!ss) {
906 postfix->shader = 0;
907 return;
908 }
909
910 struct mali_shader_meta meta;
911
912 panfrost_shader_meta_init(ctx, st, &meta);
913
914 /* Add the shader BO to the batch. */
915 panfrost_batch_add_bo(batch, ss->bo,
916 PAN_BO_ACCESS_PRIVATE |
917 PAN_BO_ACCESS_READ |
918 panfrost_bo_access_for_stage(st));
919
920 mali_ptr shader_ptr;
921
922 if (st == PIPE_SHADER_FRAGMENT) {
923 struct panfrost_device *dev = pan_device(ctx->base.screen);
924 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
925 size_t desc_size = sizeof(meta);
926 void *rts = NULL;
927 struct panfrost_transfer xfer;
928 unsigned rt_size;
929
930 if (dev->quirks & MIDGARD_SFBD)
931 rt_size = 0;
932 else if (dev->quirks & IS_BIFROST)
933 rt_size = sizeof(struct bifrost_blend_rt);
934 else
935 rt_size = sizeof(struct midgard_blend_rt);
936
937 desc_size += rt_size * rt_count;
938
939 if (rt_size)
940 rts = rzalloc_size(ctx, rt_size * rt_count);
941
942 panfrost_frag_shader_meta_init(ctx, &meta, rts);
943
944 xfer = panfrost_pool_alloc(&batch->pool, desc_size);
945
946 memcpy(xfer.cpu, &meta, sizeof(meta));
947 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
948
949 if (rt_size)
950 ralloc_free(rts);
951
952 shader_ptr = xfer.gpu;
953 } else {
954 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
955 sizeof(meta));
956 }
957
958 postfix->shader = shader_ptr;
959 }
960
961 void
962 panfrost_emit_viewport(struct panfrost_batch *batch,
963 struct mali_vertex_tiler_postfix *tiler_postfix)
964 {
965 struct panfrost_context *ctx = batch->ctx;
966 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
967 const struct pipe_scissor_state *ss = &ctx->scissor;
968 const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
969 const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
970
971 /* Derive min/max from translate/scale. Note since |x| >= 0 by
972 * definition, we have that -|x| <= |x| hence translate - |scale| <=
973 * translate + |scale|, so the ordering is correct here. */
974 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
975 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
976 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
977 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
978 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
979 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
980
981 /* Scissor to the intersection of viewport and to the scissor, clamped
982 * to the framebuffer */
983
984 unsigned minx = MIN2(fb->width, vp_minx);
985 unsigned maxx = MIN2(fb->width, vp_maxx);
986 unsigned miny = MIN2(fb->height, vp_miny);
987 unsigned maxy = MIN2(fb->height, vp_maxy);
988
989 if (ss && rast && rast->scissor) {
990 minx = MAX2(ss->minx, minx);
991 miny = MAX2(ss->miny, miny);
992 maxx = MIN2(ss->maxx, maxx);
993 maxy = MIN2(ss->maxy, maxy);
994 }
995
996 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
997
998 pan_pack(T.cpu, VIEWPORT, cfg) {
999 cfg.scissor_minimum_x = minx;
1000 cfg.scissor_minimum_y = miny;
1001 cfg.scissor_maximum_x = maxx - 1;
1002 cfg.scissor_maximum_y = maxy - 1;
1003
1004 cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
1005 cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
1006 }
1007
1008 tiler_postfix->viewport = T.gpu;
1009 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
1010 }
1011
1012 static mali_ptr
1013 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
1014 enum pipe_shader_type st,
1015 struct panfrost_constant_buffer *buf,
1016 unsigned index)
1017 {
1018 struct pipe_constant_buffer *cb = &buf->cb[index];
1019 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1020
1021 if (rsrc) {
1022 panfrost_batch_add_bo(batch, rsrc->bo,
1023 PAN_BO_ACCESS_SHARED |
1024 PAN_BO_ACCESS_READ |
1025 panfrost_bo_access_for_stage(st));
1026
1027 /* Alignment gauranteed by
1028 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
1029 return rsrc->bo->gpu + cb->buffer_offset;
1030 } else if (cb->user_buffer) {
1031 return panfrost_pool_upload(&batch->pool,
1032 cb->user_buffer +
1033 cb->buffer_offset,
1034 cb->buffer_size);
1035 } else {
1036 unreachable("No constant buffer");
1037 }
1038 }
1039
1040 struct sysval_uniform {
1041 union {
1042 float f[4];
1043 int32_t i[4];
1044 uint32_t u[4];
1045 uint64_t du[2];
1046 };
1047 };
1048
1049 static void
1050 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
1051 struct sysval_uniform *uniform)
1052 {
1053 struct panfrost_context *ctx = batch->ctx;
1054 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
1055
1056 uniform->f[0] = vp->scale[0];
1057 uniform->f[1] = vp->scale[1];
1058 uniform->f[2] = vp->scale[2];
1059 }
1060
1061 static void
1062 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
1063 struct sysval_uniform *uniform)
1064 {
1065 struct panfrost_context *ctx = batch->ctx;
1066 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
1067
1068 uniform->f[0] = vp->translate[0];
1069 uniform->f[1] = vp->translate[1];
1070 uniform->f[2] = vp->translate[2];
1071 }
1072
1073 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
1074 enum pipe_shader_type st,
1075 unsigned int sysvalid,
1076 struct sysval_uniform *uniform)
1077 {
1078 struct panfrost_context *ctx = batch->ctx;
1079 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
1080 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
1081 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
1082 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
1083
1084 assert(dim);
1085 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
1086
1087 if (dim > 1)
1088 uniform->i[1] = u_minify(tex->texture->height0,
1089 tex->u.tex.first_level);
1090
1091 if (dim > 2)
1092 uniform->i[2] = u_minify(tex->texture->depth0,
1093 tex->u.tex.first_level);
1094
1095 if (is_array)
1096 uniform->i[dim] = tex->texture->array_size;
1097 }
1098
1099 static void
1100 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
1101 enum pipe_shader_type st,
1102 unsigned ssbo_id,
1103 struct sysval_uniform *uniform)
1104 {
1105 struct panfrost_context *ctx = batch->ctx;
1106
1107 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
1108 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
1109
1110 /* Compute address */
1111 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
1112
1113 panfrost_batch_add_bo(batch, bo,
1114 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
1115 panfrost_bo_access_for_stage(st));
1116
1117 /* Upload address and size as sysval */
1118 uniform->du[0] = bo->gpu + sb.buffer_offset;
1119 uniform->u[2] = sb.buffer_size;
1120 }
1121
1122 static void
1123 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
1124 enum pipe_shader_type st,
1125 unsigned samp_idx,
1126 struct sysval_uniform *uniform)
1127 {
1128 struct panfrost_context *ctx = batch->ctx;
1129 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
1130
1131 uniform->f[0] = sampl->min_lod;
1132 uniform->f[1] = sampl->max_lod;
1133 uniform->f[2] = sampl->lod_bias;
1134
1135 /* Even without any errata, Midgard represents "no mipmapping" as
1136 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1137 * panfrost_create_sampler_state which also explains our choice of
1138 * epsilon value (again to keep behaviour consistent) */
1139
1140 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1141 uniform->f[1] = uniform->f[0] + (1.0/256.0);
1142 }
1143
1144 static void
1145 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
1146 struct sysval_uniform *uniform)
1147 {
1148 struct panfrost_context *ctx = batch->ctx;
1149
1150 uniform->u[0] = ctx->compute_grid->grid[0];
1151 uniform->u[1] = ctx->compute_grid->grid[1];
1152 uniform->u[2] = ctx->compute_grid->grid[2];
1153 }
1154
1155 static void
1156 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1157 struct panfrost_shader_state *ss,
1158 enum pipe_shader_type st)
1159 {
1160 struct sysval_uniform *uniforms = (void *)buf;
1161
1162 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1163 int sysval = ss->sysval[i];
1164
1165 switch (PAN_SYSVAL_TYPE(sysval)) {
1166 case PAN_SYSVAL_VIEWPORT_SCALE:
1167 panfrost_upload_viewport_scale_sysval(batch,
1168 &uniforms[i]);
1169 break;
1170 case PAN_SYSVAL_VIEWPORT_OFFSET:
1171 panfrost_upload_viewport_offset_sysval(batch,
1172 &uniforms[i]);
1173 break;
1174 case PAN_SYSVAL_TEXTURE_SIZE:
1175 panfrost_upload_txs_sysval(batch, st,
1176 PAN_SYSVAL_ID(sysval),
1177 &uniforms[i]);
1178 break;
1179 case PAN_SYSVAL_SSBO:
1180 panfrost_upload_ssbo_sysval(batch, st,
1181 PAN_SYSVAL_ID(sysval),
1182 &uniforms[i]);
1183 break;
1184 case PAN_SYSVAL_NUM_WORK_GROUPS:
1185 panfrost_upload_num_work_groups_sysval(batch,
1186 &uniforms[i]);
1187 break;
1188 case PAN_SYSVAL_SAMPLER:
1189 panfrost_upload_sampler_sysval(batch, st,
1190 PAN_SYSVAL_ID(sysval),
1191 &uniforms[i]);
1192 break;
1193 default:
1194 assert(0);
1195 }
1196 }
1197 }
1198
1199 static const void *
1200 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1201 unsigned index)
1202 {
1203 struct pipe_constant_buffer *cb = &buf->cb[index];
1204 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1205
1206 if (rsrc)
1207 return rsrc->bo->cpu;
1208 else if (cb->user_buffer)
1209 return cb->user_buffer;
1210 else
1211 unreachable("No constant buffer");
1212 }
1213
1214 void
1215 panfrost_emit_const_buf(struct panfrost_batch *batch,
1216 enum pipe_shader_type stage,
1217 struct mali_vertex_tiler_postfix *postfix)
1218 {
1219 struct panfrost_context *ctx = batch->ctx;
1220 struct panfrost_shader_variants *all = ctx->shader[stage];
1221
1222 if (!all)
1223 return;
1224
1225 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1226
1227 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1228
1229 /* Uniforms are implicitly UBO #0 */
1230 bool has_uniforms = buf->enabled_mask & (1 << 0);
1231
1232 /* Allocate room for the sysval and the uniforms */
1233 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1234 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1235 size_t size = sys_size + uniform_size;
1236 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1237 size);
1238
1239 /* Upload sysvals requested by the shader */
1240 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1241
1242 /* Upload uniforms */
1243 if (has_uniforms && uniform_size) {
1244 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1245 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1246 }
1247
1248 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1249 * uploaded */
1250
1251 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1252 assert(ubo_count >= 1);
1253
1254 size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
1255 struct panfrost_transfer ubos = panfrost_pool_alloc(&batch->pool, sz);
1256 uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
1257
1258 /* Upload uniforms as a UBO */
1259
1260 if (ss->uniform_count) {
1261 pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
1262 cfg.entries = ss->uniform_count;
1263 cfg.pointer = transfer.gpu;
1264 }
1265 } else {
1266 *ubo_ptr = 0;
1267 }
1268
1269 /* The rest are honest-to-goodness UBOs */
1270
1271 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1272 size_t usz = buf->cb[ubo].buffer_size;
1273 bool enabled = buf->enabled_mask & (1 << ubo);
1274 bool empty = usz == 0;
1275
1276 if (!enabled || empty) {
1277 ubo_ptr[ubo] = 0;
1278 continue;
1279 }
1280
1281 pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
1282 cfg.entries = DIV_ROUND_UP(usz, 16);
1283 cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
1284 stage, buf, ubo);
1285 }
1286 }
1287
1288 postfix->uniforms = transfer.gpu;
1289 postfix->uniform_buffers = ubos.gpu;
1290
1291 buf->dirty_mask = 0;
1292 }
1293
1294 void
1295 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1296 const struct pipe_grid_info *info,
1297 struct midgard_payload_vertex_tiler *vtp)
1298 {
1299 struct panfrost_context *ctx = batch->ctx;
1300 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1301 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1302 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1303 128));
1304 unsigned shared_size = single_size * info->grid[0] * info->grid[1] *
1305 info->grid[2] * 4;
1306 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1307 shared_size,
1308 1);
1309
1310 struct mali_shared_memory shared = {
1311 .shared_memory = bo->gpu,
1312 .shared_workgroup_count =
1313 util_logbase2_ceil(info->grid[0]) +
1314 util_logbase2_ceil(info->grid[1]) +
1315 util_logbase2_ceil(info->grid[2]),
1316 .shared_unk1 = 0x2,
1317 .shared_shift = util_logbase2(single_size) - 1
1318 };
1319
1320 vtp->postfix.shared_memory = panfrost_pool_upload(&batch->pool, &shared,
1321 sizeof(shared));
1322 }
1323
1324 static mali_ptr
1325 panfrost_get_tex_desc(struct panfrost_batch *batch,
1326 enum pipe_shader_type st,
1327 struct panfrost_sampler_view *view)
1328 {
1329 if (!view)
1330 return (mali_ptr) 0;
1331
1332 struct pipe_sampler_view *pview = &view->base;
1333 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1334
1335 /* Add the BO to the job so it's retained until the job is done. */
1336
1337 panfrost_batch_add_bo(batch, rsrc->bo,
1338 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1339 panfrost_bo_access_for_stage(st));
1340
1341 panfrost_batch_add_bo(batch, view->bo,
1342 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1343 panfrost_bo_access_for_stage(st));
1344
1345 return view->bo->gpu;
1346 }
1347
1348 static void
1349 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1350 struct pipe_context *pctx)
1351 {
1352 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1353 if (view->texture_bo != rsrc->bo->gpu ||
1354 view->modifier != rsrc->modifier) {
1355 panfrost_bo_unreference(view->bo);
1356 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1357 }
1358 }
1359
1360 void
1361 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1362 enum pipe_shader_type stage,
1363 struct mali_vertex_tiler_postfix *postfix)
1364 {
1365 struct panfrost_context *ctx = batch->ctx;
1366 struct panfrost_device *device = pan_device(ctx->base.screen);
1367
1368 if (!ctx->sampler_view_count[stage])
1369 return;
1370
1371 if (device->quirks & IS_BIFROST) {
1372 struct bifrost_texture_descriptor *descriptors;
1373
1374 descriptors = malloc(sizeof(struct bifrost_texture_descriptor) *
1375 ctx->sampler_view_count[stage]);
1376
1377 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1378 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1379 struct pipe_sampler_view *pview = &view->base;
1380 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1381 panfrost_update_sampler_view(view, &ctx->base);
1382
1383 /* Add the BOs to the job so they are retained until the job is done. */
1384
1385 panfrost_batch_add_bo(batch, rsrc->bo,
1386 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1387 panfrost_bo_access_for_stage(stage));
1388
1389 panfrost_batch_add_bo(batch, view->bo,
1390 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1391 panfrost_bo_access_for_stage(stage));
1392
1393 memcpy(&descriptors[i], view->bifrost_descriptor, sizeof(*view->bifrost_descriptor));
1394 }
1395
1396 postfix->textures = panfrost_pool_upload(&batch->pool,
1397 descriptors,
1398 sizeof(struct bifrost_texture_descriptor) *
1399 ctx->sampler_view_count[stage]);
1400
1401 free(descriptors);
1402 } else {
1403 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1404
1405 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1406 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1407
1408 panfrost_update_sampler_view(view, &ctx->base);
1409
1410 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1411 }
1412
1413 postfix->textures = panfrost_pool_upload(&batch->pool,
1414 trampolines,
1415 sizeof(uint64_t) *
1416 ctx->sampler_view_count[stage]);
1417 }
1418 }
1419
1420 void
1421 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1422 enum pipe_shader_type stage,
1423 struct mali_vertex_tiler_postfix *postfix)
1424 {
1425 struct panfrost_context *ctx = batch->ctx;
1426 struct panfrost_device *device = pan_device(ctx->base.screen);
1427
1428 if (!ctx->sampler_count[stage])
1429 return;
1430
1431 if (device->quirks & IS_BIFROST) {
1432 size_t desc_size = sizeof(struct bifrost_sampler_descriptor);
1433 size_t transfer_size = desc_size * ctx->sampler_count[stage];
1434 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1435 transfer_size);
1436 struct bifrost_sampler_descriptor *desc = (struct bifrost_sampler_descriptor *)transfer.cpu;
1437
1438 for (int i = 0; i < ctx->sampler_count[stage]; ++i)
1439 desc[i] = ctx->samplers[stage][i]->bifrost_hw;
1440
1441 postfix->sampler_descriptor = transfer.gpu;
1442 } else {
1443 size_t desc_size = sizeof(struct mali_sampler_descriptor);
1444 size_t transfer_size = desc_size * ctx->sampler_count[stage];
1445 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1446 transfer_size);
1447 struct mali_sampler_descriptor *desc = (struct mali_sampler_descriptor *)transfer.cpu;
1448
1449 for (int i = 0; i < ctx->sampler_count[stage]; ++i)
1450 desc[i] = ctx->samplers[stage][i]->midgard_hw;
1451
1452 postfix->sampler_descriptor = transfer.gpu;
1453 }
1454 }
1455
1456 void
1457 panfrost_emit_vertex_attr_meta(struct panfrost_batch *batch,
1458 struct mali_vertex_tiler_postfix *vertex_postfix)
1459 {
1460 struct panfrost_context *ctx = batch->ctx;
1461
1462 if (!ctx->vertex)
1463 return;
1464
1465 struct panfrost_vertex_state *so = ctx->vertex;
1466
1467 panfrost_vertex_state_upd_attr_offs(ctx, vertex_postfix);
1468 vertex_postfix->attribute_meta = panfrost_pool_upload(&batch->pool, so->hw,
1469 sizeof(*so->hw) *
1470 PAN_MAX_ATTRIBUTE);
1471 }
1472
1473 void
1474 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1475 struct mali_vertex_tiler_postfix *vertex_postfix)
1476 {
1477 struct panfrost_context *ctx = batch->ctx;
1478 struct panfrost_vertex_state *so = ctx->vertex;
1479
1480 /* Staged mali_attr, and index into them. i =/= k, depending on the
1481 * vertex buffer mask and instancing. Twice as much room is allocated,
1482 * for a worst case of NPOT_DIVIDEs which take up extra slot */
1483 union mali_attr attrs[PIPE_MAX_ATTRIBS * 2];
1484 unsigned k = 0;
1485
1486 for (unsigned i = 0; i < so->num_elements; ++i) {
1487 /* We map a mali_attr to be 1:1 with the mali_attr_meta, which
1488 * means duplicating some vertex buffers (who cares? aside from
1489 * maybe some caching implications but I somehow doubt that
1490 * matters) */
1491
1492 struct pipe_vertex_element *elem = &so->pipe[i];
1493 unsigned vbi = elem->vertex_buffer_index;
1494
1495 /* The exception to 1:1 mapping is that we can have multiple
1496 * entries (NPOT divisors), so we fixup anyways */
1497
1498 so->hw[i].index = k;
1499
1500 if (!(ctx->vb_mask & (1 << vbi)))
1501 continue;
1502
1503 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1504 struct panfrost_resource *rsrc;
1505
1506 rsrc = pan_resource(buf->buffer.resource);
1507 if (!rsrc)
1508 continue;
1509
1510 /* Align to 64 bytes by masking off the lower bits. This
1511 * will be adjusted back when we fixup the src_offset in
1512 * mali_attr_meta */
1513
1514 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1515 mali_ptr addr = raw_addr & ~63;
1516 unsigned chopped_addr = raw_addr - addr;
1517
1518 /* Add a dependency of the batch on the vertex buffer */
1519 panfrost_batch_add_bo(batch, rsrc->bo,
1520 PAN_BO_ACCESS_SHARED |
1521 PAN_BO_ACCESS_READ |
1522 PAN_BO_ACCESS_VERTEX_TILER);
1523
1524 /* Set common fields */
1525 attrs[k].elements = addr;
1526 attrs[k].stride = buf->stride;
1527
1528 /* Since we advanced the base pointer, we shrink the buffer
1529 * size */
1530 attrs[k].size = rsrc->base.width0 - buf->buffer_offset;
1531
1532 /* We need to add the extra size we masked off (for
1533 * correctness) so the data doesn't get clamped away */
1534 attrs[k].size += chopped_addr;
1535
1536 /* For non-instancing make sure we initialize */
1537 attrs[k].shift = attrs[k].extra_flags = 0;
1538
1539 /* Instancing uses a dramatically different code path than
1540 * linear, so dispatch for the actual emission now that the
1541 * common code is finished */
1542
1543 unsigned divisor = elem->instance_divisor;
1544
1545 if (divisor && ctx->instance_count == 1) {
1546 /* Silly corner case where there's a divisor(=1) but
1547 * there's no legitimate instancing. So we want *every*
1548 * attribute to be the same. So set stride to zero so
1549 * we don't go anywhere. */
1550
1551 attrs[k].size = attrs[k].stride + chopped_addr;
1552 attrs[k].stride = 0;
1553 attrs[k++].elements |= MALI_ATTR_LINEAR;
1554 } else if (ctx->instance_count <= 1) {
1555 /* Normal, non-instanced attributes */
1556 attrs[k++].elements |= MALI_ATTR_LINEAR;
1557 } else {
1558 unsigned instance_shift = vertex_postfix->instance_shift;
1559 unsigned instance_odd = vertex_postfix->instance_odd;
1560
1561 k += panfrost_vertex_instanced(ctx->padded_count,
1562 instance_shift,
1563 instance_odd,
1564 divisor, &attrs[k]);
1565 }
1566 }
1567
1568 /* Add special gl_VertexID/gl_InstanceID buffers */
1569
1570 panfrost_vertex_id(ctx->padded_count, &attrs[k]);
1571 so->hw[PAN_VERTEX_ID].index = k++;
1572 panfrost_instance_id(ctx->padded_count, &attrs[k]);
1573 so->hw[PAN_INSTANCE_ID].index = k++;
1574
1575 /* Upload whatever we emitted and go */
1576
1577 vertex_postfix->attributes = panfrost_pool_upload(&batch->pool, attrs,
1578 k * sizeof(*attrs));
1579 }
1580
1581 static mali_ptr
1582 panfrost_emit_varyings(struct panfrost_batch *batch, union mali_attr *slot,
1583 unsigned stride, unsigned count)
1584 {
1585 /* Fill out the descriptor */
1586 slot->stride = stride;
1587 slot->size = stride * count;
1588 slot->shift = slot->extra_flags = 0;
1589
1590 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1591 slot->size);
1592
1593 slot->elements = transfer.gpu | MALI_ATTR_LINEAR;
1594
1595 return transfer.gpu;
1596 }
1597
1598 static unsigned
1599 panfrost_streamout_offset(unsigned stride, unsigned offset,
1600 struct pipe_stream_output_target *target)
1601 {
1602 return (target->buffer_offset + (offset * stride * 4)) & 63;
1603 }
1604
1605 static void
1606 panfrost_emit_streamout(struct panfrost_batch *batch, union mali_attr *slot,
1607 unsigned stride, unsigned offset, unsigned count,
1608 struct pipe_stream_output_target *target)
1609 {
1610 /* Fill out the descriptor */
1611 slot->stride = stride * 4;
1612 slot->shift = slot->extra_flags = 0;
1613
1614 unsigned max_size = target->buffer_size;
1615 unsigned expected_size = slot->stride * count;
1616
1617 /* Grab the BO and bind it to the batch */
1618 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1619
1620 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1621 * the perspective of the TILER and FRAGMENT.
1622 */
1623 panfrost_batch_add_bo(batch, bo,
1624 PAN_BO_ACCESS_SHARED |
1625 PAN_BO_ACCESS_RW |
1626 PAN_BO_ACCESS_VERTEX_TILER |
1627 PAN_BO_ACCESS_FRAGMENT);
1628
1629 /* We will have an offset applied to get alignment */
1630 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * slot->stride);
1631 slot->elements = (addr & ~63) | MALI_ATTR_LINEAR;
1632 slot->size = MIN2(max_size, expected_size) + (addr & 63);
1633 }
1634
1635 static bool
1636 has_point_coord(unsigned mask, gl_varying_slot loc)
1637 {
1638 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1639 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1640 else if (loc == VARYING_SLOT_PNTC)
1641 return (mask & (1 << 8));
1642 else
1643 return false;
1644 }
1645
1646 /* Helpers for manipulating stream out information so we can pack varyings
1647 * accordingly. Compute the src_offset for a given captured varying */
1648
1649 static struct pipe_stream_output *
1650 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1651 {
1652 for (unsigned i = 0; i < info->num_outputs; ++i) {
1653 if (info->output[i].register_index == loc)
1654 return &info->output[i];
1655 }
1656
1657 unreachable("Varying not captured");
1658 }
1659
1660 static unsigned
1661 pan_varying_size(enum mali_format fmt)
1662 {
1663 unsigned type = MALI_EXTRACT_TYPE(fmt);
1664 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1665 unsigned bits = MALI_EXTRACT_BITS(fmt);
1666 unsigned bpc = 0;
1667
1668 if (bits == MALI_CHANNEL_FLOAT) {
1669 /* No doubles */
1670 bool fp16 = (type == MALI_FORMAT_SINT);
1671 assert(fp16 || (type == MALI_FORMAT_UNORM));
1672
1673 bpc = fp16 ? 2 : 4;
1674 } else {
1675 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1676
1677 /* See the enums */
1678 bits = 1 << bits;
1679 assert(bits >= 8);
1680 bpc = bits / 8;
1681 }
1682
1683 return bpc * chan;
1684 }
1685
1686 /* Indices for named (non-XFB) varyings that are present. These are packed
1687 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1688 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1689 * of a given special field given a shift S by:
1690 *
1691 * idx = popcount(P & ((1 << S) - 1))
1692 *
1693 * That is... look at all of the varyings that come earlier and count them, the
1694 * count is the new index since plus one. Likewise, the total number of special
1695 * buffers required is simply popcount(P)
1696 */
1697
1698 enum pan_special_varying {
1699 PAN_VARY_GENERAL = 0,
1700 PAN_VARY_POSITION = 1,
1701 PAN_VARY_PSIZ = 2,
1702 PAN_VARY_PNTCOORD = 3,
1703 PAN_VARY_FACE = 4,
1704 PAN_VARY_FRAGCOORD = 5,
1705
1706 /* Keep last */
1707 PAN_VARY_MAX,
1708 };
1709
1710 /* Given a varying, figure out which index it correpsonds to */
1711
1712 static inline unsigned
1713 pan_varying_index(unsigned present, enum pan_special_varying v)
1714 {
1715 unsigned mask = (1 << v) - 1;
1716 return util_bitcount(present & mask);
1717 }
1718
1719 /* Get the base offset for XFB buffers, which by convention come after
1720 * everything else. Wrapper function for semantic reasons; by construction this
1721 * is just popcount. */
1722
1723 static inline unsigned
1724 pan_xfb_base(unsigned present)
1725 {
1726 return util_bitcount(present);
1727 }
1728
1729 /* Computes the present mask for varyings so we can start emitting varying records */
1730
1731 static inline unsigned
1732 pan_varying_present(
1733 struct panfrost_shader_state *vs,
1734 struct panfrost_shader_state *fs,
1735 unsigned quirks)
1736 {
1737 /* At the moment we always emit general and position buffers. Not
1738 * strictly necessary but usually harmless */
1739
1740 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1741
1742 /* Enable special buffers by the shader info */
1743
1744 if (vs->writes_point_size)
1745 present |= (1 << PAN_VARY_PSIZ);
1746
1747 if (fs->reads_point_coord)
1748 present |= (1 << PAN_VARY_PNTCOORD);
1749
1750 if (fs->reads_face)
1751 present |= (1 << PAN_VARY_FACE);
1752
1753 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1754 present |= (1 << PAN_VARY_FRAGCOORD);
1755
1756 /* Also, if we have a point sprite, we need a point coord buffer */
1757
1758 for (unsigned i = 0; i < fs->varying_count; i++) {
1759 gl_varying_slot loc = fs->varyings_loc[i];
1760
1761 if (has_point_coord(fs->point_sprite_mask, loc))
1762 present |= (1 << PAN_VARY_PNTCOORD);
1763 }
1764
1765 return present;
1766 }
1767
1768 /* Emitters for varying records */
1769
1770 static struct mali_attr_meta
1771 pan_emit_vary(unsigned present, enum pan_special_varying buf,
1772 unsigned quirks, enum mali_format format,
1773 unsigned offset)
1774 {
1775 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1776
1777 struct mali_attr_meta meta = {
1778 .index = pan_varying_index(present, buf),
1779 .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
1780 .swizzle = quirks & HAS_SWIZZLES ?
1781 panfrost_get_default_swizzle(nr_channels) :
1782 panfrost_bifrost_swizzle(nr_channels),
1783 .format = format,
1784 .src_offset = offset
1785 };
1786
1787 return meta;
1788 }
1789
1790 /* General varying that is unused */
1791
1792 static struct mali_attr_meta
1793 pan_emit_vary_only(unsigned present, unsigned quirks)
1794 {
1795 return pan_emit_vary(present, 0, quirks, MALI_VARYING_DISCARD, 0);
1796 }
1797
1798 /* Special records */
1799
1800 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1801 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1802 [PAN_VARY_PSIZ] = MALI_R16F,
1803 [PAN_VARY_PNTCOORD] = MALI_R16F,
1804 [PAN_VARY_FACE] = MALI_R32I,
1805 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1806 };
1807
1808 static struct mali_attr_meta
1809 pan_emit_vary_special(unsigned present, enum pan_special_varying buf,
1810 unsigned quirks)
1811 {
1812 assert(buf < PAN_VARY_MAX);
1813 return pan_emit_vary(present, buf, quirks, pan_varying_formats[buf], 0);
1814 }
1815
1816 static enum mali_format
1817 pan_xfb_format(enum mali_format format, unsigned nr)
1818 {
1819 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1820 return MALI_R32F | MALI_NR_CHANNELS(nr);
1821 else
1822 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1823 }
1824
1825 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1826 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1827 * value. */
1828
1829 static struct mali_attr_meta
1830 pan_emit_vary_xfb(unsigned present,
1831 unsigned max_xfb,
1832 unsigned *streamout_offsets,
1833 unsigned quirks,
1834 enum mali_format format,
1835 struct pipe_stream_output o)
1836 {
1837 /* Otherwise construct a record for it */
1838 struct mali_attr_meta meta = {
1839 /* XFB buffers come after everything else */
1840 .index = pan_xfb_base(present) + o.output_buffer,
1841
1842 /* As usual unknown bit */
1843 .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
1844
1845 /* Override swizzle with number of channels */
1846 .swizzle = quirks & HAS_SWIZZLES ?
1847 panfrost_get_default_swizzle(o.num_components) :
1848 panfrost_bifrost_swizzle(o.num_components),
1849
1850 /* Override number of channels and precision to highp */
1851 .format = pan_xfb_format(format, o.num_components),
1852
1853 /* Apply given offsets together */
1854 .src_offset = (o.dst_offset * 4) /* dwords */
1855 + streamout_offsets[o.output_buffer]
1856 };
1857
1858 return meta;
1859 }
1860
1861 /* Determine if we should capture a varying for XFB. This requires actually
1862 * having a buffer for it. If we don't capture it, we'll fallback to a general
1863 * varying path (linked or unlinked, possibly discarding the write) */
1864
1865 static bool
1866 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1867 unsigned loc, unsigned max_xfb)
1868 {
1869 if (!(xfb->so_mask & (1ll << loc)))
1870 return false;
1871
1872 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1873 return o->output_buffer < max_xfb;
1874 }
1875
1876 /* Higher-level wrapper around all of the above, classifying a varying into one
1877 * of the above types */
1878
1879 static struct mali_attr_meta
1880 panfrost_emit_varying(
1881 struct panfrost_shader_state *stage,
1882 struct panfrost_shader_state *other,
1883 struct panfrost_shader_state *xfb,
1884 unsigned present,
1885 unsigned max_xfb,
1886 unsigned *streamout_offsets,
1887 unsigned quirks,
1888 unsigned *gen_offsets,
1889 enum mali_format *gen_formats,
1890 unsigned *gen_stride,
1891 unsigned idx,
1892 bool should_alloc,
1893 bool is_fragment)
1894 {
1895 gl_varying_slot loc = stage->varyings_loc[idx];
1896 enum mali_format format = stage->varyings[idx];
1897
1898 /* Override format to match linkage */
1899 if (!should_alloc && gen_formats[idx])
1900 format = gen_formats[idx];
1901
1902 if (has_point_coord(stage->point_sprite_mask, loc)) {
1903 return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
1904 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1905 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1906 return pan_emit_vary_xfb(present, max_xfb, streamout_offsets, quirks, format, *o);
1907 } else if (loc == VARYING_SLOT_POS) {
1908 if (is_fragment)
1909 return pan_emit_vary_special(present, PAN_VARY_FRAGCOORD, quirks);
1910 else
1911 return pan_emit_vary_special(present, PAN_VARY_POSITION, quirks);
1912 } else if (loc == VARYING_SLOT_PSIZ) {
1913 return pan_emit_vary_special(present, PAN_VARY_PSIZ, quirks);
1914 } else if (loc == VARYING_SLOT_PNTC) {
1915 return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
1916 } else if (loc == VARYING_SLOT_FACE) {
1917 return pan_emit_vary_special(present, PAN_VARY_FACE, quirks);
1918 }
1919
1920 /* We've exhausted special cases, so it's otherwise a general varying. Check if we're linked */
1921 signed other_idx = -1;
1922
1923 for (unsigned j = 0; j < other->varying_count; ++j) {
1924 if (other->varyings_loc[j] == loc) {
1925 other_idx = j;
1926 break;
1927 }
1928 }
1929
1930 if (other_idx < 0)
1931 return pan_emit_vary_only(present, quirks);
1932
1933 unsigned offset = gen_offsets[other_idx];
1934
1935 if (should_alloc) {
1936 /* We're linked, so allocate a space via a watermark allocation */
1937 enum mali_format alt = other->varyings[other_idx];
1938
1939 /* Do interpolation at minimum precision */
1940 unsigned size_main = pan_varying_size(format);
1941 unsigned size_alt = pan_varying_size(alt);
1942 unsigned size = MIN2(size_main, size_alt);
1943
1944 /* If a varying is marked for XFB but not actually captured, we
1945 * should match the format to the format that would otherwise
1946 * be used for XFB, since dEQP checks for invariance here. It's
1947 * unclear if this is required by the spec. */
1948
1949 if (xfb->so_mask & (1ull << loc)) {
1950 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1951 format = pan_xfb_format(format, o->num_components);
1952 size = pan_varying_size(format);
1953 } else if (size == size_alt) {
1954 format = alt;
1955 }
1956
1957 gen_offsets[idx] = *gen_stride;
1958 gen_formats[other_idx] = format;
1959 offset = *gen_stride;
1960 *gen_stride += size;
1961 }
1962
1963 return pan_emit_vary(present, PAN_VARY_GENERAL,
1964 quirks, format, offset);
1965 }
1966
1967 static void
1968 pan_emit_special_input(union mali_attr *varyings,
1969 unsigned present,
1970 enum pan_special_varying v,
1971 mali_ptr addr)
1972 {
1973 if (present & (1 << v)) {
1974 /* Ensure we write exactly once for performance and with fields
1975 * zeroed appropriately to avoid flakes */
1976
1977 union mali_attr s = {
1978 .elements = addr
1979 };
1980
1981 varyings[pan_varying_index(present, v)] = s;
1982 }
1983 }
1984
1985 void
1986 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1987 unsigned vertex_count,
1988 struct mali_vertex_tiler_postfix *vertex_postfix,
1989 struct mali_vertex_tiler_postfix *tiler_postfix,
1990 union midgard_primitive_size *primitive_size)
1991 {
1992 /* Load the shaders */
1993 struct panfrost_context *ctx = batch->ctx;
1994 struct panfrost_device *dev = pan_device(ctx->base.screen);
1995 struct panfrost_shader_state *vs, *fs;
1996 size_t vs_size, fs_size;
1997
1998 /* Allocate the varying descriptor */
1999
2000 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
2001 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
2002 vs_size = sizeof(struct mali_attr_meta) * vs->varying_count;
2003 fs_size = sizeof(struct mali_attr_meta) * fs->varying_count;
2004
2005 struct panfrost_transfer trans = panfrost_pool_alloc(&batch->pool,
2006 vs_size +
2007 fs_size);
2008
2009 struct pipe_stream_output_info *so = &vs->stream_output;
2010 unsigned present = pan_varying_present(vs, fs, dev->quirks);
2011
2012 /* Check if this varying is linked by us. This is the case for
2013 * general-purpose, non-captured varyings. If it is, link it. If it's
2014 * not, use the provided stream out information to determine the
2015 * offset, since it was already linked for us. */
2016
2017 unsigned gen_offsets[32];
2018 enum mali_format gen_formats[32];
2019 memset(gen_offsets, 0, sizeof(gen_offsets));
2020 memset(gen_formats, 0, sizeof(gen_formats));
2021
2022 unsigned gen_stride = 0;
2023 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
2024 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
2025
2026 unsigned streamout_offsets[32];
2027
2028 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
2029 streamout_offsets[i] = panfrost_streamout_offset(
2030 so->stride[i],
2031 ctx->streamout.offsets[i],
2032 ctx->streamout.targets[i]);
2033 }
2034
2035 struct mali_attr_meta *ovs = (struct mali_attr_meta *)trans.cpu;
2036 struct mali_attr_meta *ofs = ovs + vs->varying_count;
2037
2038 for (unsigned i = 0; i < vs->varying_count; i++) {
2039 ovs[i] = panfrost_emit_varying(vs, fs, vs, present,
2040 ctx->streamout.num_targets, streamout_offsets,
2041 dev->quirks,
2042 gen_offsets, gen_formats, &gen_stride, i, true, false);
2043 }
2044
2045 for (unsigned i = 0; i < fs->varying_count; i++) {
2046 ofs[i] = panfrost_emit_varying(fs, vs, vs, present,
2047 ctx->streamout.num_targets, streamout_offsets,
2048 dev->quirks,
2049 gen_offsets, gen_formats, &gen_stride, i, false, true);
2050 }
2051
2052 unsigned xfb_base = pan_xfb_base(present);
2053 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
2054 sizeof(union mali_attr) * (xfb_base + ctx->streamout.num_targets));
2055 union mali_attr *varyings = (union mali_attr *) T.cpu;
2056
2057 /* Emit the stream out buffers */
2058
2059 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
2060 ctx->vertex_count);
2061
2062 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
2063 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
2064 so->stride[i],
2065 ctx->streamout.offsets[i],
2066 out_count,
2067 ctx->streamout.targets[i]);
2068 }
2069
2070 panfrost_emit_varyings(batch,
2071 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
2072 gen_stride, vertex_count);
2073
2074 /* fp32 vec4 gl_Position */
2075 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
2076 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
2077 sizeof(float) * 4, vertex_count);
2078
2079 if (present & (1 << PAN_VARY_PSIZ)) {
2080 primitive_size->pointer = panfrost_emit_varyings(batch,
2081 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
2082 2, vertex_count);
2083 }
2084
2085 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_VARYING_POINT_COORD);
2086 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_VARYING_FRONT_FACING);
2087 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_VARYING_FRAG_COORD);
2088
2089 vertex_postfix->varyings = T.gpu;
2090 tiler_postfix->varyings = T.gpu;
2091
2092 vertex_postfix->varying_meta = trans.gpu;
2093 tiler_postfix->varying_meta = trans.gpu + vs_size;
2094 }
2095
2096 void
2097 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
2098 struct mali_vertex_tiler_prefix *vertex_prefix,
2099 struct mali_vertex_tiler_postfix *vertex_postfix,
2100 struct mali_vertex_tiler_prefix *tiler_prefix,
2101 struct mali_vertex_tiler_postfix *tiler_postfix,
2102 union midgard_primitive_size *primitive_size)
2103 {
2104 struct panfrost_context *ctx = batch->ctx;
2105 struct panfrost_device *device = pan_device(ctx->base.screen);
2106 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
2107 struct bifrost_payload_vertex bifrost_vertex = {0,};
2108 struct bifrost_payload_tiler bifrost_tiler = {0,};
2109 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2110 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2111 void *vp, *tp;
2112 size_t vp_size, tp_size;
2113
2114 if (device->quirks & IS_BIFROST) {
2115 bifrost_vertex.prefix = *vertex_prefix;
2116 bifrost_vertex.postfix = *vertex_postfix;
2117 vp = &bifrost_vertex;
2118 vp_size = sizeof(bifrost_vertex);
2119
2120 bifrost_tiler.prefix = *tiler_prefix;
2121 bifrost_tiler.tiler.primitive_size = *primitive_size;
2122 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2123 bifrost_tiler.postfix = *tiler_postfix;
2124 tp = &bifrost_tiler;
2125 tp_size = sizeof(bifrost_tiler);
2126 } else {
2127 midgard_vertex.prefix = *vertex_prefix;
2128 midgard_vertex.postfix = *vertex_postfix;
2129 vp = &midgard_vertex;
2130 vp_size = sizeof(midgard_vertex);
2131
2132 midgard_tiler.prefix = *tiler_prefix;
2133 midgard_tiler.postfix = *tiler_postfix;
2134 midgard_tiler.primitive_size = *primitive_size;
2135 tp = &midgard_tiler;
2136 tp_size = sizeof(midgard_tiler);
2137 }
2138
2139 if (wallpapering) {
2140 /* Inject in reverse order, with "predicted" job indices.
2141 * THIS IS A HACK XXX */
2142 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
2143 batch->scoreboard.job_index + 2, tp, tp_size, true);
2144 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2145 vp, vp_size, true);
2146 return;
2147 }
2148
2149 /* If rasterizer discard is enable, only submit the vertex */
2150
2151 bool rasterizer_discard = ctx->rasterizer &&
2152 ctx->rasterizer->base.rasterizer_discard;
2153
2154 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2155 vp, vp_size, false);
2156
2157 if (rasterizer_discard)
2158 return;
2159
2160 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
2161 false);
2162 }
2163
2164 /* TODO: stop hardcoding this */
2165 mali_ptr
2166 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2167 {
2168 uint16_t locations[] = {
2169 128, 128,
2170 0, 256,
2171 0, 256,
2172 0, 256,
2173 0, 256,
2174 0, 256,
2175 0, 256,
2176 0, 256,
2177 0, 256,
2178 0, 256,
2179 0, 256,
2180 0, 256,
2181 0, 256,
2182 0, 256,
2183 0, 256,
2184 0, 256,
2185 0, 256,
2186 0, 256,
2187 0, 256,
2188 0, 256,
2189 0, 256,
2190 0, 256,
2191 0, 256,
2192 0, 256,
2193 0, 256,
2194 0, 256,
2195 0, 256,
2196 0, 256,
2197 0, 256,
2198 0, 256,
2199 0, 256,
2200 0, 256,
2201 128, 128,
2202 0, 0,
2203 0, 0,
2204 0, 0,
2205 0, 0,
2206 0, 0,
2207 0, 0,
2208 0, 0,
2209 0, 0,
2210 0, 0,
2211 0, 0,
2212 0, 0,
2213 0, 0,
2214 0, 0,
2215 0, 0,
2216 0, 0,
2217 };
2218
2219 return panfrost_pool_upload(&batch->pool, locations, 96 * sizeof(uint16_t));
2220 }