3d0a57aff24c0a560d8f70eb8e97c65ba0d051c1
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 unsigned shift = panfrost_get_stack_shift(batch->stack_size);
62 struct mali_shared_memory shared = {
63 .stack_shift = shift,
64 .scratchpad = panfrost_batch_get_scratchpad(batch, shift, dev->thread_tls_alloc, dev->core_count)->gpu,
65 .shared_workgroup_count = ~0,
66 };
67 postfix->shared_memory = panfrost_pool_upload(&batch->pool, &shared, sizeof(shared));
68 }
69
70 static void
71 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
72 struct mali_vertex_tiler_postfix *postfix)
73 {
74 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
75 postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
76 }
77
78 static void
79 panfrost_vt_update_rasterizer(struct panfrost_context *ctx,
80 struct mali_vertex_tiler_prefix *prefix,
81 struct mali_vertex_tiler_postfix *postfix)
82 {
83 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
84
85 postfix->gl_enables |= 0x7;
86 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
87 rasterizer && rasterizer->base.front_ccw);
88 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
89 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_FRONT));
90 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
91 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_BACK));
92 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
93 rasterizer && rasterizer->base.flatshade_first);
94 }
95
96 void
97 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
98 struct mali_vertex_tiler_prefix *prefix,
99 union midgard_primitive_size *primitive_size)
100 {
101 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
102
103 if (!panfrost_writes_point_size(ctx)) {
104 bool points = prefix->draw_mode == MALI_DRAW_MODE_POINTS;
105 float val = 0.0f;
106
107 if (rasterizer)
108 val = points ?
109 rasterizer->base.point_size :
110 rasterizer->base.line_width;
111
112 primitive_size->constant = val;
113 }
114 }
115
116 static void
117 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
118 struct mali_vertex_tiler_postfix *postfix)
119 {
120 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
121 if (ctx->occlusion_query) {
122 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
123 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
124 PAN_BO_ACCESS_SHARED |
125 PAN_BO_ACCESS_RW |
126 PAN_BO_ACCESS_FRAGMENT);
127 } else {
128 postfix->occlusion_counter = 0;
129 }
130 }
131
132 void
133 panfrost_vt_init(struct panfrost_context *ctx,
134 enum pipe_shader_type stage,
135 struct mali_vertex_tiler_prefix *prefix,
136 struct mali_vertex_tiler_postfix *postfix)
137 {
138 struct panfrost_device *device = pan_device(ctx->base.screen);
139
140 if (!ctx->shader[stage])
141 return;
142
143 memset(prefix, 0, sizeof(*prefix));
144 memset(postfix, 0, sizeof(*postfix));
145
146 if (device->quirks & IS_BIFROST) {
147 postfix->gl_enables = 0x2;
148 panfrost_vt_emit_shared_memory(ctx, postfix);
149 } else {
150 postfix->gl_enables = 0x6;
151 panfrost_vt_attach_framebuffer(ctx, postfix);
152 }
153
154 if (stage == PIPE_SHADER_FRAGMENT) {
155 panfrost_vt_update_occlusion_query(ctx, postfix);
156 panfrost_vt_update_rasterizer(ctx, prefix, postfix);
157 }
158 }
159
160 static unsigned
161 panfrost_translate_index_size(unsigned size)
162 {
163 switch (size) {
164 case 1:
165 return MALI_DRAW_INDEXED_UINT8;
166
167 case 2:
168 return MALI_DRAW_INDEXED_UINT16;
169
170 case 4:
171 return MALI_DRAW_INDEXED_UINT32;
172
173 default:
174 unreachable("Invalid index size");
175 }
176 }
177
178 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
179 * good for the duration of the draw (transient), could last longer. Also get
180 * the bounds on the index buffer for the range accessed by the draw. We do
181 * these operations together because there are natural optimizations which
182 * require them to be together. */
183
184 static mali_ptr
185 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
186 const struct pipe_draw_info *info,
187 unsigned *min_index, unsigned *max_index)
188 {
189 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
190 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
191 off_t offset = info->start * info->index_size;
192 bool needs_indices = true;
193 mali_ptr out = 0;
194
195 if (info->max_index != ~0u) {
196 *min_index = info->min_index;
197 *max_index = info->max_index;
198 needs_indices = false;
199 }
200
201 if (!info->has_user_indices) {
202 /* Only resources can be directly mapped */
203 panfrost_batch_add_bo(batch, rsrc->bo,
204 PAN_BO_ACCESS_SHARED |
205 PAN_BO_ACCESS_READ |
206 PAN_BO_ACCESS_VERTEX_TILER);
207 out = rsrc->bo->gpu + offset;
208
209 /* Check the cache */
210 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
211 info->start,
212 info->count,
213 min_index,
214 max_index);
215 } else {
216 /* Otherwise, we need to upload to transient memory */
217 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
218 out = panfrost_pool_upload(&batch->pool, ibuf8 + offset,
219 info->count *
220 info->index_size);
221 }
222
223 if (needs_indices) {
224 /* Fallback */
225 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
226
227 if (!info->has_user_indices)
228 panfrost_minmax_cache_add(rsrc->index_cache,
229 info->start, info->count,
230 *min_index, *max_index);
231 }
232
233 return out;
234 }
235
236 void
237 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
238 const struct pipe_draw_info *info,
239 enum mali_draw_mode draw_mode,
240 struct mali_vertex_tiler_postfix *vertex_postfix,
241 struct mali_vertex_tiler_prefix *tiler_prefix,
242 struct mali_vertex_tiler_postfix *tiler_postfix,
243 unsigned *vertex_count,
244 unsigned *padded_count)
245 {
246 tiler_prefix->draw_mode = draw_mode;
247
248 unsigned draw_flags = 0;
249
250 if (panfrost_writes_point_size(ctx))
251 draw_flags |= MALI_DRAW_VARYING_SIZE;
252
253 if (info->primitive_restart)
254 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
255
256 /* These doesn't make much sense */
257
258 draw_flags |= 0x3000;
259
260 if (info->index_size) {
261 unsigned min_index = 0, max_index = 0;
262
263 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
264 info,
265 &min_index,
266 &max_index);
267
268 /* Use the corresponding values */
269 *vertex_count = max_index - min_index + 1;
270 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
271 tiler_prefix->offset_bias_correction = -min_index;
272 tiler_prefix->index_count = MALI_POSITIVE(info->count);
273 draw_flags |= panfrost_translate_index_size(info->index_size);
274 } else {
275 tiler_prefix->indices = 0;
276 *vertex_count = ctx->vertex_count;
277 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
278 tiler_prefix->offset_bias_correction = 0;
279 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
280 }
281
282 tiler_prefix->unknown_draw = draw_flags;
283
284 /* Encode the padded vertex count */
285
286 if (info->instance_count > 1) {
287 *padded_count = panfrost_padded_vertex_count(*vertex_count);
288
289 unsigned shift = __builtin_ctz(ctx->padded_count);
290 unsigned k = ctx->padded_count >> (shift + 1);
291
292 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
293 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
294 } else {
295 *padded_count = *vertex_count;
296
297 /* Reset instancing state */
298 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
299 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
300 }
301 }
302
303 static void
304 panfrost_shader_meta_init(struct panfrost_context *ctx,
305 enum pipe_shader_type st,
306 struct mali_shader_meta *meta)
307 {
308 const struct panfrost_device *dev = pan_device(ctx->base.screen);
309 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
310
311 memset(meta, 0, sizeof(*meta));
312 meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
313 meta->attribute_count = ss->attribute_count;
314 meta->varying_count = ss->varying_count;
315 meta->texture_count = ctx->sampler_view_count[st];
316 meta->sampler_count = ctx->sampler_count[st];
317
318 if (dev->quirks & IS_BIFROST) {
319 if (st == PIPE_SHADER_VERTEX)
320 meta->bifrost1.unk1 = 0x800000;
321 else {
322 /* First clause ATEST |= 0x4000000.
323 * Less than 32 regs |= 0x200 */
324 meta->bifrost1.unk1 = 0x950020;
325 }
326
327 meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
328 if (st == PIPE_SHADER_VERTEX)
329 meta->bifrost2.preload_regs = 0xC0;
330 else {
331 meta->bifrost2.preload_regs = 0x1;
332 SET_BIT(meta->bifrost2.preload_regs, 0x10, ss->reads_frag_coord);
333 }
334
335 meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
336 ss->uniform_cutoff);
337 } else {
338 meta->midgard1.uniform_count = MIN2(ss->uniform_count,
339 ss->uniform_cutoff);
340 meta->midgard1.work_count = ss->work_reg_count;
341
342 /* TODO: This is not conformant on ES3 */
343 meta->midgard1.flags_hi = MALI_SUPPRESS_INF_NAN;
344
345 meta->midgard1.flags_lo = 0x20;
346 meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
347
348 SET_BIT(meta->midgard1.flags_hi, MALI_WRITES_GLOBAL, ss->writes_global);
349 }
350 }
351
352 static unsigned
353 translate_tex_wrap(enum pipe_tex_wrap w)
354 {
355 switch (w) {
356 case PIPE_TEX_WRAP_REPEAT: return MALI_WRAP_MODE_REPEAT;
357 case PIPE_TEX_WRAP_CLAMP: return MALI_WRAP_MODE_CLAMP;
358 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return MALI_WRAP_MODE_CLAMP_TO_EDGE;
359 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return MALI_WRAP_MODE_CLAMP_TO_BORDER;
360 case PIPE_TEX_WRAP_MIRROR_REPEAT: return MALI_WRAP_MODE_MIRRORED_REPEAT;
361 case PIPE_TEX_WRAP_MIRROR_CLAMP: return MALI_WRAP_MODE_MIRRORED_CLAMP;
362 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
363 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
364 default: unreachable("Invalid wrap");
365 }
366 }
367
368 /* The hardware compares in the wrong order order, so we have to flip before
369 * encoding. Yes, really. */
370
371 static enum mali_func
372 panfrost_sampler_compare_func(const struct pipe_sampler_state *cso)
373 {
374 if (!cso->compare_mode)
375 return MALI_FUNC_NEVER;
376
377 enum mali_func f = panfrost_translate_compare_func(cso->compare_func);
378 return panfrost_flip_compare_func(f);
379 }
380
381 static enum mali_mipmap_mode
382 pan_pipe_to_mipmode(enum pipe_tex_mipfilter f)
383 {
384 switch (f) {
385 case PIPE_TEX_MIPFILTER_NEAREST: return MALI_MIPMAP_MODE_NEAREST;
386 case PIPE_TEX_MIPFILTER_LINEAR: return MALI_MIPMAP_MODE_TRILINEAR;
387 case PIPE_TEX_MIPFILTER_NONE: return MALI_MIPMAP_MODE_NONE;
388 default: unreachable("Invalid");
389 }
390 }
391
392 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
393 struct mali_midgard_sampler_packed *hw)
394 {
395 pan_pack(hw, MIDGARD_SAMPLER, cfg) {
396 cfg.magnify_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
397 cfg.minify_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
398 cfg.mipmap_mode = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR) ?
399 MALI_MIPMAP_MODE_TRILINEAR : MALI_MIPMAP_MODE_NEAREST;
400 cfg.normalized_coordinates = cso->normalized_coords;
401
402 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
403
404 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
405
406 /* If necessary, we disable mipmapping in the sampler descriptor by
407 * clamping the LOD as tight as possible (from 0 to epsilon,
408 * essentially -- remember these are fixed point numbers, so
409 * epsilon=1/256) */
410
411 cfg.maximum_lod = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) ?
412 cfg.minimum_lod + 1 :
413 FIXED_16(cso->max_lod, false);
414
415 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
416 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
417 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
418
419 cfg.compare_function = panfrost_sampler_compare_func(cso);
420 cfg.seamless_cube_map = cso->seamless_cube_map;
421
422 cfg.border_color_r = cso->border_color.f[0];
423 cfg.border_color_g = cso->border_color.f[0];
424 cfg.border_color_b = cso->border_color.f[0];
425 cfg.border_color_a = cso->border_color.f[0];
426 }
427 }
428
429 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
430 uint32_t *_hw)
431 {
432 struct bifrost_sampler_descriptor *hw = (struct bifrost_sampler_descriptor *) _hw;
433 *hw = (struct bifrost_sampler_descriptor) {
434 .unk1 = 0x1,
435 .wrap_s = translate_tex_wrap(cso->wrap_s),
436 .wrap_t = translate_tex_wrap(cso->wrap_t),
437 .wrap_r = translate_tex_wrap(cso->wrap_r),
438 .unk8 = 0x8,
439 .min_filter = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST,
440 .norm_coords = cso->normalized_coords,
441 .mip_filter = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR,
442 .mag_filter = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR,
443 .min_lod = FIXED_16(cso->min_lod, false), /* clamp at 0 */
444 .max_lod = FIXED_16(cso->max_lod, false),
445 };
446
447 /* If necessary, we disable mipmapping in the sampler descriptor by
448 * clamping the LOD as tight as possible (from 0 to epsilon,
449 * essentially -- remember these are fixed point numbers, so
450 * epsilon=1/256) */
451
452 if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
453 hw->max_lod = hw->min_lod + 1;
454 }
455
456 static void
457 panfrost_frag_meta_rasterizer_update(struct panfrost_context *ctx,
458 struct mali_shader_meta *fragmeta)
459 {
460 if (!ctx->rasterizer) {
461 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, true);
462 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, false);
463 fragmeta->depth_units = 0.0f;
464 fragmeta->depth_factor = 0.0f;
465 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, false);
466 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, false);
467 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, true);
468 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, true);
469 return;
470 }
471
472 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
473
474 bool msaa = rast->multisample;
475
476 /* TODO: Sample size */
477 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
478 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
479
480 struct panfrost_shader_state *fs;
481 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
482
483 /* EXT_shader_framebuffer_fetch requires the shader to be run
484 * per-sample when outputs are read. */
485 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
486 SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
487
488 fragmeta->depth_units = rast->offset_units * 2.0f;
489 fragmeta->depth_factor = rast->offset_scale;
490
491 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
492
493 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
494 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
495
496 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
497 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
498 }
499
500 static void
501 panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
502 struct mali_shader_meta *fragmeta)
503 {
504 const struct panfrost_zsa_state *so = ctx->depth_stencil;
505 int zfunc = PIPE_FUNC_ALWAYS;
506
507 if (!so) {
508 /* If stenciling is disabled, the state is irrelevant */
509 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST, false);
510 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK, false);
511 } else {
512 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
513 so->base.stencil[0].enabled);
514
515 fragmeta->stencil_mask_front = so->stencil_mask_front;
516 fragmeta->stencil_mask_back = so->stencil_mask_back;
517
518 /* Bottom bits for stencil ref, exactly one word */
519 fragmeta->stencil_front.opaque[0] = so->stencil_front.opaque[0] | ctx->stencil_ref.ref_value[0];
520
521 /* If back-stencil is not enabled, use the front values */
522
523 if (so->base.stencil[1].enabled)
524 fragmeta->stencil_back.opaque[0] = so->stencil_back.opaque[0] | ctx->stencil_ref.ref_value[1];
525 else
526 fragmeta->stencil_back = fragmeta->stencil_front;
527
528 if (so->base.depth.enabled)
529 zfunc = so->base.depth.func;
530
531 /* Depth state (TODO: Refactor) */
532
533 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
534 so->base.depth.writemask);
535 }
536
537 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
538 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(zfunc));
539 }
540
541 static bool
542 panfrost_fs_required(
543 struct panfrost_shader_state *fs,
544 struct panfrost_blend_final *blend,
545 unsigned rt_count)
546 {
547 /* If we generally have side effects */
548 if (fs->fs_sidefx)
549 return true;
550
551 /* If colour is written we need to execute */
552 for (unsigned i = 0; i < rt_count; ++i) {
553 if (!blend[i].no_colour)
554 return true;
555 }
556
557 /* If depth is written and not implied we need to execute.
558 * TODO: Predicate on Z/S writes being enabled */
559 return (fs->writes_depth || fs->writes_stencil);
560 }
561
562 static void
563 panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
564 struct mali_shader_meta *fragmeta,
565 void *rts)
566 {
567 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
568 const struct panfrost_device *dev = pan_device(ctx->base.screen);
569 struct panfrost_shader_state *fs;
570 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
571
572 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
573 (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
574 !ctx->blend->base.dither);
575
576 SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
577 ctx->blend->base.alpha_to_coverage);
578
579 /* Get blending setup */
580 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
581
582 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
583 unsigned shader_offset = 0;
584 struct panfrost_bo *shader_bo = NULL;
585
586 for (unsigned c = 0; c < rt_count; ++c)
587 blend[c] = panfrost_get_blend_for_context(ctx, c, &shader_bo,
588 &shader_offset);
589
590 /* Disable shader execution if we can */
591 if (dev->quirks & MIDGARD_SHADERLESS
592 && !panfrost_fs_required(fs, blend, rt_count)) {
593 fragmeta->shader = 0;
594 fragmeta->attribute_count = 0;
595 fragmeta->varying_count = 0;
596 fragmeta->texture_count = 0;
597 fragmeta->sampler_count = 0;
598
599 /* This feature is not known to work on Bifrost */
600 fragmeta->midgard1.work_count = 1;
601 fragmeta->midgard1.uniform_count = 0;
602 fragmeta->midgard1.uniform_buffer_count = 0;
603 }
604
605 /* If there is a blend shader, work registers are shared. We impose 8
606 * work registers as a limit for blend shaders. Should be lower XXX */
607
608 if (!(dev->quirks & IS_BIFROST)) {
609 for (unsigned c = 0; c < rt_count; ++c) {
610 if (blend[c].is_shader) {
611 fragmeta->midgard1.work_count =
612 MAX2(fragmeta->midgard1.work_count, 8);
613 }
614 }
615 }
616
617 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
618 * copied to the blend_meta appended (by convention), but this is the
619 * field actually read by the hardware. (Or maybe both are read...?).
620 * Specify the last RTi with a blend shader. */
621
622 fragmeta->blend.shader = 0;
623
624 for (signed rt = (rt_count - 1); rt >= 0; --rt) {
625 if (!blend[rt].is_shader)
626 continue;
627
628 fragmeta->blend.shader = blend[rt].shader.gpu |
629 blend[rt].shader.first_tag;
630 break;
631 }
632
633 if (dev->quirks & MIDGARD_SFBD) {
634 /* When only a single render target platform is used, the blend
635 * information is inside the shader meta itself. We additionally
636 * need to signal CAN_DISCARD for nontrivial blend modes (so
637 * we're able to read back the destination buffer) */
638
639 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
640 blend[0].is_shader);
641
642 if (!blend[0].is_shader) {
643 fragmeta->blend.equation = *blend[0].equation.equation;
644 fragmeta->blend.constant = blend[0].equation.constant;
645 }
646
647 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
648 !blend[0].no_blending || fs->can_discard);
649
650 batch->draws |= PIPE_CLEAR_COLOR0;
651 return;
652 }
653
654 if (dev->quirks & IS_BIFROST) {
655 bool no_blend = true;
656
657 for (unsigned i = 0; i < rt_count; ++i)
658 no_blend &= (blend[i].no_blending | blend[i].no_colour);
659
660 SET_BIT(fragmeta->bifrost1.unk1, MALI_BIFROST_EARLY_Z,
661 !fs->can_discard && !fs->writes_depth && no_blend);
662 }
663
664 /* Additional blend descriptor tacked on for jobs using MFBD */
665
666 for (unsigned i = 0; i < rt_count; ++i) {
667 unsigned flags = 0;
668
669 if (ctx->pipe_framebuffer.nr_cbufs > i && !blend[i].no_colour) {
670 flags = 0x200;
671 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
672
673 bool is_srgb = (ctx->pipe_framebuffer.nr_cbufs > i) &&
674 (ctx->pipe_framebuffer.cbufs[i]) &&
675 util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
676
677 SET_BIT(flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
678 SET_BIT(flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
679 SET_BIT(flags, MALI_BLEND_SRGB, is_srgb);
680 SET_BIT(flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
681 }
682
683 if (dev->quirks & IS_BIFROST) {
684 struct bifrost_blend_rt *brts = rts;
685
686 brts[i].flags = flags;
687
688 if (blend[i].is_shader) {
689 /* The blend shader's address needs to be at
690 * the same top 32 bit as the fragment shader.
691 * TODO: Ensure that's always the case.
692 */
693 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
694 (fs->bo->gpu & (0xffffffffull << 32)));
695 brts[i].shader = blend[i].shader.gpu;
696 brts[i].unk2 = 0x0;
697 } else if (ctx->pipe_framebuffer.nr_cbufs > i) {
698 enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
699 const struct util_format_description *format_desc;
700 format_desc = util_format_description(format);
701
702 brts[i].equation = *blend[i].equation.equation;
703
704 /* TODO: this is a bit more complicated */
705 brts[i].constant = blend[i].equation.constant;
706
707 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
708
709 /* 0x19 disables blending and forces REPLACE
710 * mode (equivalent to rgb_mode = alpha_mode =
711 * x122, colour mask = 0xF). 0x1a allows
712 * blending. */
713 brts[i].unk2 = blend[i].no_blending ? 0x19 : 0x1a;
714
715 brts[i].shader_type = fs->blend_types[i];
716 } else {
717 /* Dummy attachment for depth-only */
718 brts[i].unk2 = 0x3;
719 brts[i].shader_type = fs->blend_types[i];
720 }
721 } else {
722 struct midgard_blend_rt *mrts = rts;
723 mrts[i].flags = flags;
724
725 if (blend[i].is_shader) {
726 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
727 } else {
728 mrts[i].blend.equation = *blend[i].equation.equation;
729 mrts[i].blend.constant = blend[i].equation.constant;
730 }
731 }
732 }
733 }
734
735 static void
736 panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
737 struct mali_shader_meta *fragmeta,
738 void *rts)
739 {
740 const struct panfrost_device *dev = pan_device(ctx->base.screen);
741 struct panfrost_shader_state *fs;
742
743 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
744
745 bool msaa = ctx->rasterizer && ctx->rasterizer->base.multisample;
746 fragmeta->coverage_mask = msaa ? ctx->sample_mask : ~0;
747
748 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
749 fragmeta->unknown2_4 = 0x4e0;
750
751 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
752 * is required (independent of 32-bit/64-bit descriptors), or why it's
753 * not used on later GPU revisions. Otherwise, all shader jobs fault on
754 * these earlier chips (perhaps this is a chicken bit of some kind).
755 * More investigation is needed. */
756
757 SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
758
759 if (dev->quirks & IS_BIFROST) {
760 /* TODO */
761 } else {
762 /* Depending on whether it's legal to in the given shader, we try to
763 * enable early-z testing. TODO: respect e-z force */
764
765 SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
766 !fs->can_discard && !fs->writes_global &&
767 !fs->writes_depth && !fs->writes_stencil &&
768 !ctx->blend->base.alpha_to_coverage);
769
770 /* Add the writes Z/S flags if needed. */
771 SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
772 SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
773
774 /* Any time texturing is used, derivatives are implicitly calculated,
775 * so we need to enable helper invocations */
776
777 SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
778 fs->helper_invocations);
779
780 /* If discard is enabled, which bit we set to convey this
781 * depends on if depth/stencil is used for the draw or not.
782 * Just one of depth OR stencil is enough to trigger this. */
783
784 const struct pipe_depth_stencil_alpha_state *zsa = &ctx->depth_stencil->base;
785 bool zs_enabled = fs->writes_depth || fs->writes_stencil;
786
787 if (zsa) {
788 zs_enabled |= (zsa->depth.enabled && zsa->depth.func != PIPE_FUNC_ALWAYS);
789 zs_enabled |= zsa->stencil[0].enabled;
790 }
791
792 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_TILEBUFFER,
793 fs->outputs_read || (!zs_enabled && fs->can_discard));
794 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_ZS, zs_enabled && fs->can_discard);
795 }
796
797 panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
798 panfrost_frag_meta_zsa_update(ctx, fragmeta);
799 panfrost_frag_meta_blend_update(ctx, fragmeta, rts);
800 }
801
802 void
803 panfrost_emit_shader_meta(struct panfrost_batch *batch,
804 enum pipe_shader_type st,
805 struct mali_vertex_tiler_postfix *postfix)
806 {
807 struct panfrost_context *ctx = batch->ctx;
808 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
809
810 if (!ss) {
811 postfix->shader = 0;
812 return;
813 }
814
815 struct mali_shader_meta meta;
816
817 panfrost_shader_meta_init(ctx, st, &meta);
818
819 /* Add the shader BO to the batch. */
820 panfrost_batch_add_bo(batch, ss->bo,
821 PAN_BO_ACCESS_PRIVATE |
822 PAN_BO_ACCESS_READ |
823 panfrost_bo_access_for_stage(st));
824
825 mali_ptr shader_ptr;
826
827 if (st == PIPE_SHADER_FRAGMENT) {
828 struct panfrost_device *dev = pan_device(ctx->base.screen);
829 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
830 size_t desc_size = sizeof(meta);
831 void *rts = NULL;
832 struct panfrost_transfer xfer;
833 unsigned rt_size;
834
835 if (dev->quirks & MIDGARD_SFBD)
836 rt_size = 0;
837 else if (dev->quirks & IS_BIFROST)
838 rt_size = sizeof(struct bifrost_blend_rt);
839 else
840 rt_size = sizeof(struct midgard_blend_rt);
841
842 desc_size += rt_size * rt_count;
843
844 if (rt_size)
845 rts = rzalloc_size(ctx, rt_size * rt_count);
846
847 panfrost_frag_shader_meta_init(ctx, &meta, rts);
848
849 xfer = panfrost_pool_alloc(&batch->pool, desc_size);
850
851 memcpy(xfer.cpu, &meta, sizeof(meta));
852 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
853
854 if (rt_size)
855 ralloc_free(rts);
856
857 shader_ptr = xfer.gpu;
858 } else {
859 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
860 sizeof(meta));
861 }
862
863 postfix->shader = shader_ptr;
864 }
865
866 void
867 panfrost_emit_viewport(struct panfrost_batch *batch,
868 struct mali_vertex_tiler_postfix *tiler_postfix)
869 {
870 struct panfrost_context *ctx = batch->ctx;
871 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
872 const struct pipe_scissor_state *ss = &ctx->scissor;
873 const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
874 const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
875
876 /* Derive min/max from translate/scale. Note since |x| >= 0 by
877 * definition, we have that -|x| <= |x| hence translate - |scale| <=
878 * translate + |scale|, so the ordering is correct here. */
879 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
880 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
881 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
882 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
883 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
884 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
885
886 /* Scissor to the intersection of viewport and to the scissor, clamped
887 * to the framebuffer */
888
889 unsigned minx = MIN2(fb->width, vp_minx);
890 unsigned maxx = MIN2(fb->width, vp_maxx);
891 unsigned miny = MIN2(fb->height, vp_miny);
892 unsigned maxy = MIN2(fb->height, vp_maxy);
893
894 if (ss && rast && rast->scissor) {
895 minx = MAX2(ss->minx, minx);
896 miny = MAX2(ss->miny, miny);
897 maxx = MIN2(ss->maxx, maxx);
898 maxy = MIN2(ss->maxy, maxy);
899 }
900
901 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
902
903 pan_pack(T.cpu, VIEWPORT, cfg) {
904 cfg.scissor_minimum_x = minx;
905 cfg.scissor_minimum_y = miny;
906 cfg.scissor_maximum_x = maxx - 1;
907 cfg.scissor_maximum_y = maxy - 1;
908
909 cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
910 cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
911 }
912
913 tiler_postfix->viewport = T.gpu;
914 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
915 }
916
917 static mali_ptr
918 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
919 enum pipe_shader_type st,
920 struct panfrost_constant_buffer *buf,
921 unsigned index)
922 {
923 struct pipe_constant_buffer *cb = &buf->cb[index];
924 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
925
926 if (rsrc) {
927 panfrost_batch_add_bo(batch, rsrc->bo,
928 PAN_BO_ACCESS_SHARED |
929 PAN_BO_ACCESS_READ |
930 panfrost_bo_access_for_stage(st));
931
932 /* Alignment gauranteed by
933 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
934 return rsrc->bo->gpu + cb->buffer_offset;
935 } else if (cb->user_buffer) {
936 return panfrost_pool_upload(&batch->pool,
937 cb->user_buffer +
938 cb->buffer_offset,
939 cb->buffer_size);
940 } else {
941 unreachable("No constant buffer");
942 }
943 }
944
945 struct sysval_uniform {
946 union {
947 float f[4];
948 int32_t i[4];
949 uint32_t u[4];
950 uint64_t du[2];
951 };
952 };
953
954 static void
955 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
956 struct sysval_uniform *uniform)
957 {
958 struct panfrost_context *ctx = batch->ctx;
959 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
960
961 uniform->f[0] = vp->scale[0];
962 uniform->f[1] = vp->scale[1];
963 uniform->f[2] = vp->scale[2];
964 }
965
966 static void
967 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
968 struct sysval_uniform *uniform)
969 {
970 struct panfrost_context *ctx = batch->ctx;
971 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
972
973 uniform->f[0] = vp->translate[0];
974 uniform->f[1] = vp->translate[1];
975 uniform->f[2] = vp->translate[2];
976 }
977
978 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
979 enum pipe_shader_type st,
980 unsigned int sysvalid,
981 struct sysval_uniform *uniform)
982 {
983 struct panfrost_context *ctx = batch->ctx;
984 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
985 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
986 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
987 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
988
989 assert(dim);
990 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
991
992 if (dim > 1)
993 uniform->i[1] = u_minify(tex->texture->height0,
994 tex->u.tex.first_level);
995
996 if (dim > 2)
997 uniform->i[2] = u_minify(tex->texture->depth0,
998 tex->u.tex.first_level);
999
1000 if (is_array)
1001 uniform->i[dim] = tex->texture->array_size;
1002 }
1003
1004 static void
1005 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
1006 enum pipe_shader_type st,
1007 unsigned ssbo_id,
1008 struct sysval_uniform *uniform)
1009 {
1010 struct panfrost_context *ctx = batch->ctx;
1011
1012 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
1013 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
1014
1015 /* Compute address */
1016 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
1017
1018 panfrost_batch_add_bo(batch, bo,
1019 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
1020 panfrost_bo_access_for_stage(st));
1021
1022 /* Upload address and size as sysval */
1023 uniform->du[0] = bo->gpu + sb.buffer_offset;
1024 uniform->u[2] = sb.buffer_size;
1025 }
1026
1027 static void
1028 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
1029 enum pipe_shader_type st,
1030 unsigned samp_idx,
1031 struct sysval_uniform *uniform)
1032 {
1033 struct panfrost_context *ctx = batch->ctx;
1034 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
1035
1036 uniform->f[0] = sampl->min_lod;
1037 uniform->f[1] = sampl->max_lod;
1038 uniform->f[2] = sampl->lod_bias;
1039
1040 /* Even without any errata, Midgard represents "no mipmapping" as
1041 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1042 * panfrost_create_sampler_state which also explains our choice of
1043 * epsilon value (again to keep behaviour consistent) */
1044
1045 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1046 uniform->f[1] = uniform->f[0] + (1.0/256.0);
1047 }
1048
1049 static void
1050 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
1051 struct sysval_uniform *uniform)
1052 {
1053 struct panfrost_context *ctx = batch->ctx;
1054
1055 uniform->u[0] = ctx->compute_grid->grid[0];
1056 uniform->u[1] = ctx->compute_grid->grid[1];
1057 uniform->u[2] = ctx->compute_grid->grid[2];
1058 }
1059
1060 static void
1061 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1062 struct panfrost_shader_state *ss,
1063 enum pipe_shader_type st)
1064 {
1065 struct sysval_uniform *uniforms = (void *)buf;
1066
1067 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1068 int sysval = ss->sysval[i];
1069
1070 switch (PAN_SYSVAL_TYPE(sysval)) {
1071 case PAN_SYSVAL_VIEWPORT_SCALE:
1072 panfrost_upload_viewport_scale_sysval(batch,
1073 &uniforms[i]);
1074 break;
1075 case PAN_SYSVAL_VIEWPORT_OFFSET:
1076 panfrost_upload_viewport_offset_sysval(batch,
1077 &uniforms[i]);
1078 break;
1079 case PAN_SYSVAL_TEXTURE_SIZE:
1080 panfrost_upload_txs_sysval(batch, st,
1081 PAN_SYSVAL_ID(sysval),
1082 &uniforms[i]);
1083 break;
1084 case PAN_SYSVAL_SSBO:
1085 panfrost_upload_ssbo_sysval(batch, st,
1086 PAN_SYSVAL_ID(sysval),
1087 &uniforms[i]);
1088 break;
1089 case PAN_SYSVAL_NUM_WORK_GROUPS:
1090 panfrost_upload_num_work_groups_sysval(batch,
1091 &uniforms[i]);
1092 break;
1093 case PAN_SYSVAL_SAMPLER:
1094 panfrost_upload_sampler_sysval(batch, st,
1095 PAN_SYSVAL_ID(sysval),
1096 &uniforms[i]);
1097 break;
1098 default:
1099 assert(0);
1100 }
1101 }
1102 }
1103
1104 static const void *
1105 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1106 unsigned index)
1107 {
1108 struct pipe_constant_buffer *cb = &buf->cb[index];
1109 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1110
1111 if (rsrc)
1112 return rsrc->bo->cpu;
1113 else if (cb->user_buffer)
1114 return cb->user_buffer;
1115 else
1116 unreachable("No constant buffer");
1117 }
1118
1119 void
1120 panfrost_emit_const_buf(struct panfrost_batch *batch,
1121 enum pipe_shader_type stage,
1122 struct mali_vertex_tiler_postfix *postfix)
1123 {
1124 struct panfrost_context *ctx = batch->ctx;
1125 struct panfrost_shader_variants *all = ctx->shader[stage];
1126
1127 if (!all)
1128 return;
1129
1130 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1131
1132 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1133
1134 /* Uniforms are implicitly UBO #0 */
1135 bool has_uniforms = buf->enabled_mask & (1 << 0);
1136
1137 /* Allocate room for the sysval and the uniforms */
1138 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1139 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1140 size_t size = sys_size + uniform_size;
1141 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1142 size);
1143
1144 /* Upload sysvals requested by the shader */
1145 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1146
1147 /* Upload uniforms */
1148 if (has_uniforms && uniform_size) {
1149 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1150 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1151 }
1152
1153 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1154 * uploaded */
1155
1156 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1157 assert(ubo_count >= 1);
1158
1159 size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
1160 struct panfrost_transfer ubos = panfrost_pool_alloc(&batch->pool, sz);
1161 uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
1162
1163 /* Upload uniforms as a UBO */
1164
1165 if (ss->uniform_count) {
1166 pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
1167 cfg.entries = ss->uniform_count;
1168 cfg.pointer = transfer.gpu;
1169 }
1170 } else {
1171 *ubo_ptr = 0;
1172 }
1173
1174 /* The rest are honest-to-goodness UBOs */
1175
1176 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1177 size_t usz = buf->cb[ubo].buffer_size;
1178 bool enabled = buf->enabled_mask & (1 << ubo);
1179 bool empty = usz == 0;
1180
1181 if (!enabled || empty) {
1182 ubo_ptr[ubo] = 0;
1183 continue;
1184 }
1185
1186 pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
1187 cfg.entries = DIV_ROUND_UP(usz, 16);
1188 cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
1189 stage, buf, ubo);
1190 }
1191 }
1192
1193 postfix->uniforms = transfer.gpu;
1194 postfix->uniform_buffers = ubos.gpu;
1195
1196 buf->dirty_mask = 0;
1197 }
1198
1199 void
1200 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1201 const struct pipe_grid_info *info,
1202 struct midgard_payload_vertex_tiler *vtp)
1203 {
1204 struct panfrost_context *ctx = batch->ctx;
1205 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1206 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1207 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1208 128));
1209 unsigned shared_size = single_size * info->grid[0] * info->grid[1] *
1210 info->grid[2] * 4;
1211 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1212 shared_size,
1213 1);
1214
1215 struct mali_shared_memory shared = {
1216 .shared_memory = bo->gpu,
1217 .shared_workgroup_count =
1218 util_logbase2_ceil(info->grid[0]) +
1219 util_logbase2_ceil(info->grid[1]) +
1220 util_logbase2_ceil(info->grid[2]),
1221 .shared_unk1 = 0x2,
1222 .shared_shift = util_logbase2(single_size) - 1
1223 };
1224
1225 vtp->postfix.shared_memory = panfrost_pool_upload(&batch->pool, &shared,
1226 sizeof(shared));
1227 }
1228
1229 static mali_ptr
1230 panfrost_get_tex_desc(struct panfrost_batch *batch,
1231 enum pipe_shader_type st,
1232 struct panfrost_sampler_view *view)
1233 {
1234 if (!view)
1235 return (mali_ptr) 0;
1236
1237 struct pipe_sampler_view *pview = &view->base;
1238 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1239
1240 /* Add the BO to the job so it's retained until the job is done. */
1241
1242 panfrost_batch_add_bo(batch, rsrc->bo,
1243 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1244 panfrost_bo_access_for_stage(st));
1245
1246 panfrost_batch_add_bo(batch, view->bo,
1247 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1248 panfrost_bo_access_for_stage(st));
1249
1250 return view->bo->gpu;
1251 }
1252
1253 static void
1254 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1255 struct pipe_context *pctx)
1256 {
1257 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1258 if (view->texture_bo != rsrc->bo->gpu ||
1259 view->modifier != rsrc->modifier) {
1260 panfrost_bo_unreference(view->bo);
1261 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1262 }
1263 }
1264
1265 void
1266 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1267 enum pipe_shader_type stage,
1268 struct mali_vertex_tiler_postfix *postfix)
1269 {
1270 struct panfrost_context *ctx = batch->ctx;
1271 struct panfrost_device *device = pan_device(ctx->base.screen);
1272
1273 if (!ctx->sampler_view_count[stage])
1274 return;
1275
1276 if (device->quirks & IS_BIFROST) {
1277 struct bifrost_texture_descriptor *descriptors;
1278
1279 descriptors = malloc(sizeof(struct bifrost_texture_descriptor) *
1280 ctx->sampler_view_count[stage]);
1281
1282 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1283 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1284 struct pipe_sampler_view *pview = &view->base;
1285 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1286 panfrost_update_sampler_view(view, &ctx->base);
1287
1288 /* Add the BOs to the job so they are retained until the job is done. */
1289
1290 panfrost_batch_add_bo(batch, rsrc->bo,
1291 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1292 panfrost_bo_access_for_stage(stage));
1293
1294 panfrost_batch_add_bo(batch, view->bo,
1295 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1296 panfrost_bo_access_for_stage(stage));
1297
1298 memcpy(&descriptors[i], view->bifrost_descriptor, sizeof(*view->bifrost_descriptor));
1299 }
1300
1301 postfix->textures = panfrost_pool_upload(&batch->pool,
1302 descriptors,
1303 sizeof(struct bifrost_texture_descriptor) *
1304 ctx->sampler_view_count[stage]);
1305
1306 free(descriptors);
1307 } else {
1308 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1309
1310 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1311 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1312
1313 panfrost_update_sampler_view(view, &ctx->base);
1314
1315 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1316 }
1317
1318 postfix->textures = panfrost_pool_upload(&batch->pool,
1319 trampolines,
1320 sizeof(uint64_t) *
1321 ctx->sampler_view_count[stage]);
1322 }
1323 }
1324
1325 void
1326 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1327 enum pipe_shader_type stage,
1328 struct mali_vertex_tiler_postfix *postfix)
1329 {
1330 struct panfrost_context *ctx = batch->ctx;
1331
1332 if (!ctx->sampler_count[stage])
1333 return;
1334
1335 size_t desc_size = sizeof(struct bifrost_sampler_descriptor);
1336 assert(sizeof(struct bifrost_sampler_descriptor) == MALI_MIDGARD_SAMPLER_LENGTH);
1337
1338 size_t sz = desc_size * ctx->sampler_count[stage];
1339 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, sz);
1340 struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
1341
1342 for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
1343 out[i] = ctx->samplers[stage][i]->hw;
1344
1345 postfix->sampler_descriptor = T.gpu;
1346 }
1347
1348 void
1349 panfrost_emit_vertex_attr_meta(struct panfrost_batch *batch,
1350 struct mali_vertex_tiler_postfix *vertex_postfix)
1351 {
1352 struct panfrost_context *ctx = batch->ctx;
1353
1354 if (!ctx->vertex)
1355 return;
1356
1357 struct panfrost_vertex_state *so = ctx->vertex;
1358
1359 panfrost_vertex_state_upd_attr_offs(ctx, vertex_postfix);
1360 vertex_postfix->attribute_meta = panfrost_pool_upload(&batch->pool, so->hw,
1361 sizeof(*so->hw) *
1362 PAN_MAX_ATTRIBUTE);
1363 }
1364
1365 void
1366 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1367 struct mali_vertex_tiler_postfix *vertex_postfix)
1368 {
1369 struct panfrost_context *ctx = batch->ctx;
1370 struct panfrost_vertex_state *so = ctx->vertex;
1371
1372 /* Staged mali_attr, and index into them. i =/= k, depending on the
1373 * vertex buffer mask and instancing. Twice as much room is allocated,
1374 * for a worst case of NPOT_DIVIDEs which take up extra slot */
1375 union mali_attr attrs[PIPE_MAX_ATTRIBS * 2];
1376 unsigned k = 0;
1377
1378 for (unsigned i = 0; i < so->num_elements; ++i) {
1379 /* We map a mali_attr to be 1:1 with the mali_attr_meta, which
1380 * means duplicating some vertex buffers (who cares? aside from
1381 * maybe some caching implications but I somehow doubt that
1382 * matters) */
1383
1384 struct pipe_vertex_element *elem = &so->pipe[i];
1385 unsigned vbi = elem->vertex_buffer_index;
1386
1387 /* The exception to 1:1 mapping is that we can have multiple
1388 * entries (NPOT divisors), so we fixup anyways */
1389
1390 so->hw[i].index = k;
1391
1392 if (!(ctx->vb_mask & (1 << vbi)))
1393 continue;
1394
1395 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1396 struct panfrost_resource *rsrc;
1397
1398 rsrc = pan_resource(buf->buffer.resource);
1399 if (!rsrc)
1400 continue;
1401
1402 /* Align to 64 bytes by masking off the lower bits. This
1403 * will be adjusted back when we fixup the src_offset in
1404 * mali_attr_meta */
1405
1406 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1407 mali_ptr addr = raw_addr & ~63;
1408 unsigned chopped_addr = raw_addr - addr;
1409
1410 /* Add a dependency of the batch on the vertex buffer */
1411 panfrost_batch_add_bo(batch, rsrc->bo,
1412 PAN_BO_ACCESS_SHARED |
1413 PAN_BO_ACCESS_READ |
1414 PAN_BO_ACCESS_VERTEX_TILER);
1415
1416 /* Set common fields */
1417 attrs[k].elements = addr;
1418 attrs[k].stride = buf->stride;
1419
1420 /* Since we advanced the base pointer, we shrink the buffer
1421 * size */
1422 attrs[k].size = rsrc->base.width0 - buf->buffer_offset;
1423
1424 /* We need to add the extra size we masked off (for
1425 * correctness) so the data doesn't get clamped away */
1426 attrs[k].size += chopped_addr;
1427
1428 /* For non-instancing make sure we initialize */
1429 attrs[k].shift = attrs[k].extra_flags = 0;
1430
1431 /* Instancing uses a dramatically different code path than
1432 * linear, so dispatch for the actual emission now that the
1433 * common code is finished */
1434
1435 unsigned divisor = elem->instance_divisor;
1436
1437 if (divisor && ctx->instance_count == 1) {
1438 /* Silly corner case where there's a divisor(=1) but
1439 * there's no legitimate instancing. So we want *every*
1440 * attribute to be the same. So set stride to zero so
1441 * we don't go anywhere. */
1442
1443 attrs[k].size = attrs[k].stride + chopped_addr;
1444 attrs[k].stride = 0;
1445 attrs[k++].elements |= MALI_ATTR_LINEAR;
1446 } else if (ctx->instance_count <= 1) {
1447 /* Normal, non-instanced attributes */
1448 attrs[k++].elements |= MALI_ATTR_LINEAR;
1449 } else {
1450 unsigned instance_shift = vertex_postfix->instance_shift;
1451 unsigned instance_odd = vertex_postfix->instance_odd;
1452
1453 k += panfrost_vertex_instanced(ctx->padded_count,
1454 instance_shift,
1455 instance_odd,
1456 divisor, &attrs[k]);
1457 }
1458 }
1459
1460 /* Add special gl_VertexID/gl_InstanceID buffers */
1461
1462 panfrost_vertex_id(ctx->padded_count, &attrs[k]);
1463 so->hw[PAN_VERTEX_ID].index = k++;
1464 panfrost_instance_id(ctx->padded_count, &attrs[k]);
1465 so->hw[PAN_INSTANCE_ID].index = k++;
1466
1467 /* Upload whatever we emitted and go */
1468
1469 vertex_postfix->attributes = panfrost_pool_upload(&batch->pool, attrs,
1470 k * sizeof(*attrs));
1471 }
1472
1473 static mali_ptr
1474 panfrost_emit_varyings(struct panfrost_batch *batch, union mali_attr *slot,
1475 unsigned stride, unsigned count)
1476 {
1477 /* Fill out the descriptor */
1478 slot->stride = stride;
1479 slot->size = stride * count;
1480 slot->shift = slot->extra_flags = 0;
1481
1482 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1483 slot->size);
1484
1485 slot->elements = transfer.gpu | MALI_ATTR_LINEAR;
1486
1487 return transfer.gpu;
1488 }
1489
1490 static unsigned
1491 panfrost_streamout_offset(unsigned stride, unsigned offset,
1492 struct pipe_stream_output_target *target)
1493 {
1494 return (target->buffer_offset + (offset * stride * 4)) & 63;
1495 }
1496
1497 static void
1498 panfrost_emit_streamout(struct panfrost_batch *batch, union mali_attr *slot,
1499 unsigned stride, unsigned offset, unsigned count,
1500 struct pipe_stream_output_target *target)
1501 {
1502 /* Fill out the descriptor */
1503 slot->stride = stride * 4;
1504 slot->shift = slot->extra_flags = 0;
1505
1506 unsigned max_size = target->buffer_size;
1507 unsigned expected_size = slot->stride * count;
1508
1509 /* Grab the BO and bind it to the batch */
1510 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1511
1512 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1513 * the perspective of the TILER and FRAGMENT.
1514 */
1515 panfrost_batch_add_bo(batch, bo,
1516 PAN_BO_ACCESS_SHARED |
1517 PAN_BO_ACCESS_RW |
1518 PAN_BO_ACCESS_VERTEX_TILER |
1519 PAN_BO_ACCESS_FRAGMENT);
1520
1521 /* We will have an offset applied to get alignment */
1522 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * slot->stride);
1523 slot->elements = (addr & ~63) | MALI_ATTR_LINEAR;
1524 slot->size = MIN2(max_size, expected_size) + (addr & 63);
1525 }
1526
1527 static bool
1528 has_point_coord(unsigned mask, gl_varying_slot loc)
1529 {
1530 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1531 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1532 else if (loc == VARYING_SLOT_PNTC)
1533 return (mask & (1 << 8));
1534 else
1535 return false;
1536 }
1537
1538 /* Helpers for manipulating stream out information so we can pack varyings
1539 * accordingly. Compute the src_offset for a given captured varying */
1540
1541 static struct pipe_stream_output *
1542 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1543 {
1544 for (unsigned i = 0; i < info->num_outputs; ++i) {
1545 if (info->output[i].register_index == loc)
1546 return &info->output[i];
1547 }
1548
1549 unreachable("Varying not captured");
1550 }
1551
1552 static unsigned
1553 pan_varying_size(enum mali_format fmt)
1554 {
1555 unsigned type = MALI_EXTRACT_TYPE(fmt);
1556 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1557 unsigned bits = MALI_EXTRACT_BITS(fmt);
1558 unsigned bpc = 0;
1559
1560 if (bits == MALI_CHANNEL_FLOAT) {
1561 /* No doubles */
1562 bool fp16 = (type == MALI_FORMAT_SINT);
1563 assert(fp16 || (type == MALI_FORMAT_UNORM));
1564
1565 bpc = fp16 ? 2 : 4;
1566 } else {
1567 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1568
1569 /* See the enums */
1570 bits = 1 << bits;
1571 assert(bits >= 8);
1572 bpc = bits / 8;
1573 }
1574
1575 return bpc * chan;
1576 }
1577
1578 /* Indices for named (non-XFB) varyings that are present. These are packed
1579 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1580 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1581 * of a given special field given a shift S by:
1582 *
1583 * idx = popcount(P & ((1 << S) - 1))
1584 *
1585 * That is... look at all of the varyings that come earlier and count them, the
1586 * count is the new index since plus one. Likewise, the total number of special
1587 * buffers required is simply popcount(P)
1588 */
1589
1590 enum pan_special_varying {
1591 PAN_VARY_GENERAL = 0,
1592 PAN_VARY_POSITION = 1,
1593 PAN_VARY_PSIZ = 2,
1594 PAN_VARY_PNTCOORD = 3,
1595 PAN_VARY_FACE = 4,
1596 PAN_VARY_FRAGCOORD = 5,
1597
1598 /* Keep last */
1599 PAN_VARY_MAX,
1600 };
1601
1602 /* Given a varying, figure out which index it correpsonds to */
1603
1604 static inline unsigned
1605 pan_varying_index(unsigned present, enum pan_special_varying v)
1606 {
1607 unsigned mask = (1 << v) - 1;
1608 return util_bitcount(present & mask);
1609 }
1610
1611 /* Get the base offset for XFB buffers, which by convention come after
1612 * everything else. Wrapper function for semantic reasons; by construction this
1613 * is just popcount. */
1614
1615 static inline unsigned
1616 pan_xfb_base(unsigned present)
1617 {
1618 return util_bitcount(present);
1619 }
1620
1621 /* Computes the present mask for varyings so we can start emitting varying records */
1622
1623 static inline unsigned
1624 pan_varying_present(
1625 struct panfrost_shader_state *vs,
1626 struct panfrost_shader_state *fs,
1627 unsigned quirks)
1628 {
1629 /* At the moment we always emit general and position buffers. Not
1630 * strictly necessary but usually harmless */
1631
1632 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1633
1634 /* Enable special buffers by the shader info */
1635
1636 if (vs->writes_point_size)
1637 present |= (1 << PAN_VARY_PSIZ);
1638
1639 if (fs->reads_point_coord)
1640 present |= (1 << PAN_VARY_PNTCOORD);
1641
1642 if (fs->reads_face)
1643 present |= (1 << PAN_VARY_FACE);
1644
1645 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1646 present |= (1 << PAN_VARY_FRAGCOORD);
1647
1648 /* Also, if we have a point sprite, we need a point coord buffer */
1649
1650 for (unsigned i = 0; i < fs->varying_count; i++) {
1651 gl_varying_slot loc = fs->varyings_loc[i];
1652
1653 if (has_point_coord(fs->point_sprite_mask, loc))
1654 present |= (1 << PAN_VARY_PNTCOORD);
1655 }
1656
1657 return present;
1658 }
1659
1660 /* Emitters for varying records */
1661
1662 static struct mali_attr_meta
1663 pan_emit_vary(unsigned present, enum pan_special_varying buf,
1664 unsigned quirks, enum mali_format format,
1665 unsigned offset)
1666 {
1667 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1668
1669 struct mali_attr_meta meta = {
1670 .index = pan_varying_index(present, buf),
1671 .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
1672 .swizzle = quirks & HAS_SWIZZLES ?
1673 panfrost_get_default_swizzle(nr_channels) :
1674 panfrost_bifrost_swizzle(nr_channels),
1675 .format = format,
1676 .src_offset = offset
1677 };
1678
1679 return meta;
1680 }
1681
1682 /* General varying that is unused */
1683
1684 static struct mali_attr_meta
1685 pan_emit_vary_only(unsigned present, unsigned quirks)
1686 {
1687 return pan_emit_vary(present, 0, quirks, MALI_VARYING_DISCARD, 0);
1688 }
1689
1690 /* Special records */
1691
1692 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1693 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1694 [PAN_VARY_PSIZ] = MALI_R16F,
1695 [PAN_VARY_PNTCOORD] = MALI_R16F,
1696 [PAN_VARY_FACE] = MALI_R32I,
1697 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1698 };
1699
1700 static struct mali_attr_meta
1701 pan_emit_vary_special(unsigned present, enum pan_special_varying buf,
1702 unsigned quirks)
1703 {
1704 assert(buf < PAN_VARY_MAX);
1705 return pan_emit_vary(present, buf, quirks, pan_varying_formats[buf], 0);
1706 }
1707
1708 static enum mali_format
1709 pan_xfb_format(enum mali_format format, unsigned nr)
1710 {
1711 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1712 return MALI_R32F | MALI_NR_CHANNELS(nr);
1713 else
1714 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1715 }
1716
1717 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1718 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1719 * value. */
1720
1721 static struct mali_attr_meta
1722 pan_emit_vary_xfb(unsigned present,
1723 unsigned max_xfb,
1724 unsigned *streamout_offsets,
1725 unsigned quirks,
1726 enum mali_format format,
1727 struct pipe_stream_output o)
1728 {
1729 /* Otherwise construct a record for it */
1730 struct mali_attr_meta meta = {
1731 /* XFB buffers come after everything else */
1732 .index = pan_xfb_base(present) + o.output_buffer,
1733
1734 /* As usual unknown bit */
1735 .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
1736
1737 /* Override swizzle with number of channels */
1738 .swizzle = quirks & HAS_SWIZZLES ?
1739 panfrost_get_default_swizzle(o.num_components) :
1740 panfrost_bifrost_swizzle(o.num_components),
1741
1742 /* Override number of channels and precision to highp */
1743 .format = pan_xfb_format(format, o.num_components),
1744
1745 /* Apply given offsets together */
1746 .src_offset = (o.dst_offset * 4) /* dwords */
1747 + streamout_offsets[o.output_buffer]
1748 };
1749
1750 return meta;
1751 }
1752
1753 /* Determine if we should capture a varying for XFB. This requires actually
1754 * having a buffer for it. If we don't capture it, we'll fallback to a general
1755 * varying path (linked or unlinked, possibly discarding the write) */
1756
1757 static bool
1758 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1759 unsigned loc, unsigned max_xfb)
1760 {
1761 if (!(xfb->so_mask & (1ll << loc)))
1762 return false;
1763
1764 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1765 return o->output_buffer < max_xfb;
1766 }
1767
1768 /* Higher-level wrapper around all of the above, classifying a varying into one
1769 * of the above types */
1770
1771 static struct mali_attr_meta
1772 panfrost_emit_varying(
1773 struct panfrost_shader_state *stage,
1774 struct panfrost_shader_state *other,
1775 struct panfrost_shader_state *xfb,
1776 unsigned present,
1777 unsigned max_xfb,
1778 unsigned *streamout_offsets,
1779 unsigned quirks,
1780 unsigned *gen_offsets,
1781 enum mali_format *gen_formats,
1782 unsigned *gen_stride,
1783 unsigned idx,
1784 bool should_alloc,
1785 bool is_fragment)
1786 {
1787 gl_varying_slot loc = stage->varyings_loc[idx];
1788 enum mali_format format = stage->varyings[idx];
1789
1790 /* Override format to match linkage */
1791 if (!should_alloc && gen_formats[idx])
1792 format = gen_formats[idx];
1793
1794 if (has_point_coord(stage->point_sprite_mask, loc)) {
1795 return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
1796 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1797 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1798 return pan_emit_vary_xfb(present, max_xfb, streamout_offsets, quirks, format, *o);
1799 } else if (loc == VARYING_SLOT_POS) {
1800 if (is_fragment)
1801 return pan_emit_vary_special(present, PAN_VARY_FRAGCOORD, quirks);
1802 else
1803 return pan_emit_vary_special(present, PAN_VARY_POSITION, quirks);
1804 } else if (loc == VARYING_SLOT_PSIZ) {
1805 return pan_emit_vary_special(present, PAN_VARY_PSIZ, quirks);
1806 } else if (loc == VARYING_SLOT_PNTC) {
1807 return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
1808 } else if (loc == VARYING_SLOT_FACE) {
1809 return pan_emit_vary_special(present, PAN_VARY_FACE, quirks);
1810 }
1811
1812 /* We've exhausted special cases, so it's otherwise a general varying. Check if we're linked */
1813 signed other_idx = -1;
1814
1815 for (unsigned j = 0; j < other->varying_count; ++j) {
1816 if (other->varyings_loc[j] == loc) {
1817 other_idx = j;
1818 break;
1819 }
1820 }
1821
1822 if (other_idx < 0)
1823 return pan_emit_vary_only(present, quirks);
1824
1825 unsigned offset = gen_offsets[other_idx];
1826
1827 if (should_alloc) {
1828 /* We're linked, so allocate a space via a watermark allocation */
1829 enum mali_format alt = other->varyings[other_idx];
1830
1831 /* Do interpolation at minimum precision */
1832 unsigned size_main = pan_varying_size(format);
1833 unsigned size_alt = pan_varying_size(alt);
1834 unsigned size = MIN2(size_main, size_alt);
1835
1836 /* If a varying is marked for XFB but not actually captured, we
1837 * should match the format to the format that would otherwise
1838 * be used for XFB, since dEQP checks for invariance here. It's
1839 * unclear if this is required by the spec. */
1840
1841 if (xfb->so_mask & (1ull << loc)) {
1842 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1843 format = pan_xfb_format(format, o->num_components);
1844 size = pan_varying_size(format);
1845 } else if (size == size_alt) {
1846 format = alt;
1847 }
1848
1849 gen_offsets[idx] = *gen_stride;
1850 gen_formats[other_idx] = format;
1851 offset = *gen_stride;
1852 *gen_stride += size;
1853 }
1854
1855 return pan_emit_vary(present, PAN_VARY_GENERAL,
1856 quirks, format, offset);
1857 }
1858
1859 static void
1860 pan_emit_special_input(union mali_attr *varyings,
1861 unsigned present,
1862 enum pan_special_varying v,
1863 mali_ptr addr)
1864 {
1865 if (present & (1 << v)) {
1866 /* Ensure we write exactly once for performance and with fields
1867 * zeroed appropriately to avoid flakes */
1868
1869 union mali_attr s = {
1870 .elements = addr
1871 };
1872
1873 varyings[pan_varying_index(present, v)] = s;
1874 }
1875 }
1876
1877 void
1878 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1879 unsigned vertex_count,
1880 struct mali_vertex_tiler_postfix *vertex_postfix,
1881 struct mali_vertex_tiler_postfix *tiler_postfix,
1882 union midgard_primitive_size *primitive_size)
1883 {
1884 /* Load the shaders */
1885 struct panfrost_context *ctx = batch->ctx;
1886 struct panfrost_device *dev = pan_device(ctx->base.screen);
1887 struct panfrost_shader_state *vs, *fs;
1888 size_t vs_size, fs_size;
1889
1890 /* Allocate the varying descriptor */
1891
1892 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1893 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
1894 vs_size = sizeof(struct mali_attr_meta) * vs->varying_count;
1895 fs_size = sizeof(struct mali_attr_meta) * fs->varying_count;
1896
1897 struct panfrost_transfer trans = panfrost_pool_alloc(&batch->pool,
1898 vs_size +
1899 fs_size);
1900
1901 struct pipe_stream_output_info *so = &vs->stream_output;
1902 unsigned present = pan_varying_present(vs, fs, dev->quirks);
1903
1904 /* Check if this varying is linked by us. This is the case for
1905 * general-purpose, non-captured varyings. If it is, link it. If it's
1906 * not, use the provided stream out information to determine the
1907 * offset, since it was already linked for us. */
1908
1909 unsigned gen_offsets[32];
1910 enum mali_format gen_formats[32];
1911 memset(gen_offsets, 0, sizeof(gen_offsets));
1912 memset(gen_formats, 0, sizeof(gen_formats));
1913
1914 unsigned gen_stride = 0;
1915 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
1916 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
1917
1918 unsigned streamout_offsets[32];
1919
1920 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1921 streamout_offsets[i] = panfrost_streamout_offset(
1922 so->stride[i],
1923 ctx->streamout.offsets[i],
1924 ctx->streamout.targets[i]);
1925 }
1926
1927 struct mali_attr_meta *ovs = (struct mali_attr_meta *)trans.cpu;
1928 struct mali_attr_meta *ofs = ovs + vs->varying_count;
1929
1930 for (unsigned i = 0; i < vs->varying_count; i++) {
1931 ovs[i] = panfrost_emit_varying(vs, fs, vs, present,
1932 ctx->streamout.num_targets, streamout_offsets,
1933 dev->quirks,
1934 gen_offsets, gen_formats, &gen_stride, i, true, false);
1935 }
1936
1937 for (unsigned i = 0; i < fs->varying_count; i++) {
1938 ofs[i] = panfrost_emit_varying(fs, vs, vs, present,
1939 ctx->streamout.num_targets, streamout_offsets,
1940 dev->quirks,
1941 gen_offsets, gen_formats, &gen_stride, i, false, true);
1942 }
1943
1944 unsigned xfb_base = pan_xfb_base(present);
1945 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
1946 sizeof(union mali_attr) * (xfb_base + ctx->streamout.num_targets));
1947 union mali_attr *varyings = (union mali_attr *) T.cpu;
1948
1949 /* Emit the stream out buffers */
1950
1951 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
1952 ctx->vertex_count);
1953
1954 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1955 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
1956 so->stride[i],
1957 ctx->streamout.offsets[i],
1958 out_count,
1959 ctx->streamout.targets[i]);
1960 }
1961
1962 panfrost_emit_varyings(batch,
1963 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
1964 gen_stride, vertex_count);
1965
1966 /* fp32 vec4 gl_Position */
1967 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
1968 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
1969 sizeof(float) * 4, vertex_count);
1970
1971 if (present & (1 << PAN_VARY_PSIZ)) {
1972 primitive_size->pointer = panfrost_emit_varyings(batch,
1973 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
1974 2, vertex_count);
1975 }
1976
1977 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_VARYING_POINT_COORD);
1978 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_VARYING_FRONT_FACING);
1979 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_VARYING_FRAG_COORD);
1980
1981 vertex_postfix->varyings = T.gpu;
1982 tiler_postfix->varyings = T.gpu;
1983
1984 vertex_postfix->varying_meta = trans.gpu;
1985 tiler_postfix->varying_meta = trans.gpu + vs_size;
1986 }
1987
1988 void
1989 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
1990 struct mali_vertex_tiler_prefix *vertex_prefix,
1991 struct mali_vertex_tiler_postfix *vertex_postfix,
1992 struct mali_vertex_tiler_prefix *tiler_prefix,
1993 struct mali_vertex_tiler_postfix *tiler_postfix,
1994 union midgard_primitive_size *primitive_size)
1995 {
1996 struct panfrost_context *ctx = batch->ctx;
1997 struct panfrost_device *device = pan_device(ctx->base.screen);
1998 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
1999 struct bifrost_payload_vertex bifrost_vertex = {0,};
2000 struct bifrost_payload_tiler bifrost_tiler = {0,};
2001 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2002 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2003 void *vp, *tp;
2004 size_t vp_size, tp_size;
2005
2006 if (device->quirks & IS_BIFROST) {
2007 bifrost_vertex.prefix = *vertex_prefix;
2008 bifrost_vertex.postfix = *vertex_postfix;
2009 vp = &bifrost_vertex;
2010 vp_size = sizeof(bifrost_vertex);
2011
2012 bifrost_tiler.prefix = *tiler_prefix;
2013 bifrost_tiler.tiler.primitive_size = *primitive_size;
2014 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2015 bifrost_tiler.postfix = *tiler_postfix;
2016 tp = &bifrost_tiler;
2017 tp_size = sizeof(bifrost_tiler);
2018 } else {
2019 midgard_vertex.prefix = *vertex_prefix;
2020 midgard_vertex.postfix = *vertex_postfix;
2021 vp = &midgard_vertex;
2022 vp_size = sizeof(midgard_vertex);
2023
2024 midgard_tiler.prefix = *tiler_prefix;
2025 midgard_tiler.postfix = *tiler_postfix;
2026 midgard_tiler.primitive_size = *primitive_size;
2027 tp = &midgard_tiler;
2028 tp_size = sizeof(midgard_tiler);
2029 }
2030
2031 if (wallpapering) {
2032 /* Inject in reverse order, with "predicted" job indices.
2033 * THIS IS A HACK XXX */
2034 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
2035 batch->scoreboard.job_index + 2, tp, tp_size, true);
2036 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2037 vp, vp_size, true);
2038 return;
2039 }
2040
2041 /* If rasterizer discard is enable, only submit the vertex */
2042
2043 bool rasterizer_discard = ctx->rasterizer &&
2044 ctx->rasterizer->base.rasterizer_discard;
2045
2046 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2047 vp, vp_size, false);
2048
2049 if (rasterizer_discard)
2050 return;
2051
2052 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
2053 false);
2054 }
2055
2056 /* TODO: stop hardcoding this */
2057 mali_ptr
2058 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2059 {
2060 uint16_t locations[] = {
2061 128, 128,
2062 0, 256,
2063 0, 256,
2064 0, 256,
2065 0, 256,
2066 0, 256,
2067 0, 256,
2068 0, 256,
2069 0, 256,
2070 0, 256,
2071 0, 256,
2072 0, 256,
2073 0, 256,
2074 0, 256,
2075 0, 256,
2076 0, 256,
2077 0, 256,
2078 0, 256,
2079 0, 256,
2080 0, 256,
2081 0, 256,
2082 0, 256,
2083 0, 256,
2084 0, 256,
2085 0, 256,
2086 0, 256,
2087 0, 256,
2088 0, 256,
2089 0, 256,
2090 0, 256,
2091 0, 256,
2092 0, 256,
2093 128, 128,
2094 0, 0,
2095 0, 0,
2096 0, 0,
2097 0, 0,
2098 0, 0,
2099 0, 0,
2100 0, 0,
2101 0, 0,
2102 0, 0,
2103 0, 0,
2104 0, 0,
2105 0, 0,
2106 0, 0,
2107 0, 0,
2108 0, 0,
2109 };
2110
2111 return panfrost_pool_upload(&batch->pool, locations, 96 * sizeof(uint16_t));
2112 }