panfrost: Inline panfrost_vertex_instanced
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 unsigned shift = panfrost_get_stack_shift(batch->stack_size);
62 struct mali_shared_memory shared = {
63 .stack_shift = shift,
64 .scratchpad = panfrost_batch_get_scratchpad(batch, shift, dev->thread_tls_alloc, dev->core_count)->gpu,
65 .shared_workgroup_count = ~0,
66 };
67 postfix->shared_memory = panfrost_pool_upload(&batch->pool, &shared, sizeof(shared));
68 }
69
70 static void
71 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
72 struct mali_vertex_tiler_postfix *postfix)
73 {
74 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
75 postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
76 }
77
78 static void
79 panfrost_vt_update_rasterizer(struct panfrost_context *ctx,
80 struct mali_vertex_tiler_prefix *prefix,
81 struct mali_vertex_tiler_postfix *postfix)
82 {
83 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
84
85 postfix->gl_enables |= 0x7;
86 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
87 rasterizer && rasterizer->base.front_ccw);
88 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
89 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_FRONT));
90 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
91 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_BACK));
92 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
93 rasterizer && rasterizer->base.flatshade_first);
94 }
95
96 void
97 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
98 struct mali_vertex_tiler_prefix *prefix,
99 union midgard_primitive_size *primitive_size)
100 {
101 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
102
103 if (!panfrost_writes_point_size(ctx)) {
104 bool points = prefix->draw_mode == MALI_DRAW_MODE_POINTS;
105 float val = 0.0f;
106
107 if (rasterizer)
108 val = points ?
109 rasterizer->base.point_size :
110 rasterizer->base.line_width;
111
112 primitive_size->constant = val;
113 }
114 }
115
116 static void
117 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
118 struct mali_vertex_tiler_postfix *postfix)
119 {
120 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
121 if (ctx->occlusion_query) {
122 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
123 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
124 PAN_BO_ACCESS_SHARED |
125 PAN_BO_ACCESS_RW |
126 PAN_BO_ACCESS_FRAGMENT);
127 } else {
128 postfix->occlusion_counter = 0;
129 }
130 }
131
132 void
133 panfrost_vt_init(struct panfrost_context *ctx,
134 enum pipe_shader_type stage,
135 struct mali_vertex_tiler_prefix *prefix,
136 struct mali_vertex_tiler_postfix *postfix)
137 {
138 struct panfrost_device *device = pan_device(ctx->base.screen);
139
140 if (!ctx->shader[stage])
141 return;
142
143 memset(prefix, 0, sizeof(*prefix));
144 memset(postfix, 0, sizeof(*postfix));
145
146 if (device->quirks & IS_BIFROST) {
147 postfix->gl_enables = 0x2;
148 panfrost_vt_emit_shared_memory(ctx, postfix);
149 } else {
150 postfix->gl_enables = 0x6;
151 panfrost_vt_attach_framebuffer(ctx, postfix);
152 }
153
154 if (stage == PIPE_SHADER_FRAGMENT) {
155 panfrost_vt_update_occlusion_query(ctx, postfix);
156 panfrost_vt_update_rasterizer(ctx, prefix, postfix);
157 }
158 }
159
160 static unsigned
161 panfrost_translate_index_size(unsigned size)
162 {
163 switch (size) {
164 case 1:
165 return MALI_DRAW_INDEXED_UINT8;
166
167 case 2:
168 return MALI_DRAW_INDEXED_UINT16;
169
170 case 4:
171 return MALI_DRAW_INDEXED_UINT32;
172
173 default:
174 unreachable("Invalid index size");
175 }
176 }
177
178 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
179 * good for the duration of the draw (transient), could last longer. Also get
180 * the bounds on the index buffer for the range accessed by the draw. We do
181 * these operations together because there are natural optimizations which
182 * require them to be together. */
183
184 static mali_ptr
185 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
186 const struct pipe_draw_info *info,
187 unsigned *min_index, unsigned *max_index)
188 {
189 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
190 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
191 off_t offset = info->start * info->index_size;
192 bool needs_indices = true;
193 mali_ptr out = 0;
194
195 if (info->max_index != ~0u) {
196 *min_index = info->min_index;
197 *max_index = info->max_index;
198 needs_indices = false;
199 }
200
201 if (!info->has_user_indices) {
202 /* Only resources can be directly mapped */
203 panfrost_batch_add_bo(batch, rsrc->bo,
204 PAN_BO_ACCESS_SHARED |
205 PAN_BO_ACCESS_READ |
206 PAN_BO_ACCESS_VERTEX_TILER);
207 out = rsrc->bo->gpu + offset;
208
209 /* Check the cache */
210 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
211 info->start,
212 info->count,
213 min_index,
214 max_index);
215 } else {
216 /* Otherwise, we need to upload to transient memory */
217 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
218 out = panfrost_pool_upload(&batch->pool, ibuf8 + offset,
219 info->count *
220 info->index_size);
221 }
222
223 if (needs_indices) {
224 /* Fallback */
225 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
226
227 if (!info->has_user_indices)
228 panfrost_minmax_cache_add(rsrc->index_cache,
229 info->start, info->count,
230 *min_index, *max_index);
231 }
232
233 return out;
234 }
235
236 void
237 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
238 const struct pipe_draw_info *info,
239 enum mali_draw_mode draw_mode,
240 struct mali_vertex_tiler_postfix *vertex_postfix,
241 struct mali_vertex_tiler_prefix *tiler_prefix,
242 struct mali_vertex_tiler_postfix *tiler_postfix,
243 unsigned *vertex_count,
244 unsigned *padded_count)
245 {
246 tiler_prefix->draw_mode = draw_mode;
247
248 unsigned draw_flags = 0;
249
250 if (panfrost_writes_point_size(ctx))
251 draw_flags |= MALI_DRAW_VARYING_SIZE;
252
253 if (info->primitive_restart)
254 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
255
256 /* These doesn't make much sense */
257
258 draw_flags |= 0x3000;
259
260 if (info->index_size) {
261 unsigned min_index = 0, max_index = 0;
262
263 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
264 info,
265 &min_index,
266 &max_index);
267
268 /* Use the corresponding values */
269 *vertex_count = max_index - min_index + 1;
270 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
271 tiler_prefix->offset_bias_correction = -min_index;
272 tiler_prefix->index_count = MALI_POSITIVE(info->count);
273 draw_flags |= panfrost_translate_index_size(info->index_size);
274 } else {
275 tiler_prefix->indices = 0;
276 *vertex_count = ctx->vertex_count;
277 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
278 tiler_prefix->offset_bias_correction = 0;
279 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
280 }
281
282 tiler_prefix->unknown_draw = draw_flags;
283
284 /* Encode the padded vertex count */
285
286 if (info->instance_count > 1) {
287 *padded_count = panfrost_padded_vertex_count(*vertex_count);
288
289 unsigned shift = __builtin_ctz(ctx->padded_count);
290 unsigned k = ctx->padded_count >> (shift + 1);
291
292 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
293 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
294 } else {
295 *padded_count = *vertex_count;
296
297 /* Reset instancing state */
298 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
299 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
300 }
301 }
302
303 static void
304 panfrost_shader_meta_init(struct panfrost_context *ctx,
305 enum pipe_shader_type st,
306 struct mali_shader_meta *meta)
307 {
308 const struct panfrost_device *dev = pan_device(ctx->base.screen);
309 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
310
311 memset(meta, 0, sizeof(*meta));
312 meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
313 meta->attribute_count = ss->attribute_count;
314 meta->varying_count = ss->varying_count;
315 meta->texture_count = ctx->sampler_view_count[st];
316 meta->sampler_count = ctx->sampler_count[st];
317
318 if (dev->quirks & IS_BIFROST) {
319 if (st == PIPE_SHADER_VERTEX)
320 meta->bifrost1.unk1 = 0x800000;
321 else {
322 /* First clause ATEST |= 0x4000000.
323 * Less than 32 regs |= 0x200 */
324 meta->bifrost1.unk1 = 0x950020;
325 }
326
327 meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
328 if (st == PIPE_SHADER_VERTEX)
329 meta->bifrost2.preload_regs = 0xC0;
330 else {
331 meta->bifrost2.preload_regs = 0x1;
332 SET_BIT(meta->bifrost2.preload_regs, 0x10, ss->reads_frag_coord);
333 }
334
335 meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
336 ss->uniform_cutoff);
337 } else {
338 meta->midgard1.uniform_count = MIN2(ss->uniform_count,
339 ss->uniform_cutoff);
340 meta->midgard1.work_count = ss->work_reg_count;
341
342 /* TODO: This is not conformant on ES3 */
343 meta->midgard1.flags_hi = MALI_SUPPRESS_INF_NAN;
344
345 meta->midgard1.flags_lo = 0x20;
346 meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
347
348 SET_BIT(meta->midgard1.flags_hi, MALI_WRITES_GLOBAL, ss->writes_global);
349 }
350 }
351
352 static unsigned
353 translate_tex_wrap(enum pipe_tex_wrap w)
354 {
355 switch (w) {
356 case PIPE_TEX_WRAP_REPEAT: return MALI_WRAP_MODE_REPEAT;
357 case PIPE_TEX_WRAP_CLAMP: return MALI_WRAP_MODE_CLAMP;
358 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return MALI_WRAP_MODE_CLAMP_TO_EDGE;
359 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return MALI_WRAP_MODE_CLAMP_TO_BORDER;
360 case PIPE_TEX_WRAP_MIRROR_REPEAT: return MALI_WRAP_MODE_MIRRORED_REPEAT;
361 case PIPE_TEX_WRAP_MIRROR_CLAMP: return MALI_WRAP_MODE_MIRRORED_CLAMP;
362 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
363 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
364 default: unreachable("Invalid wrap");
365 }
366 }
367
368 /* The hardware compares in the wrong order order, so we have to flip before
369 * encoding. Yes, really. */
370
371 static enum mali_func
372 panfrost_sampler_compare_func(const struct pipe_sampler_state *cso)
373 {
374 if (!cso->compare_mode)
375 return MALI_FUNC_NEVER;
376
377 enum mali_func f = panfrost_translate_compare_func(cso->compare_func);
378 return panfrost_flip_compare_func(f);
379 }
380
381 static enum mali_mipmap_mode
382 pan_pipe_to_mipmode(enum pipe_tex_mipfilter f)
383 {
384 switch (f) {
385 case PIPE_TEX_MIPFILTER_NEAREST: return MALI_MIPMAP_MODE_NEAREST;
386 case PIPE_TEX_MIPFILTER_LINEAR: return MALI_MIPMAP_MODE_TRILINEAR;
387 case PIPE_TEX_MIPFILTER_NONE: return MALI_MIPMAP_MODE_NONE;
388 default: unreachable("Invalid");
389 }
390 }
391
392 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
393 struct mali_midgard_sampler_packed *hw)
394 {
395 pan_pack(hw, MIDGARD_SAMPLER, cfg) {
396 cfg.magnify_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
397 cfg.minify_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
398 cfg.mipmap_mode = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR) ?
399 MALI_MIPMAP_MODE_TRILINEAR : MALI_MIPMAP_MODE_NEAREST;
400 cfg.normalized_coordinates = cso->normalized_coords;
401
402 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
403
404 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
405
406 /* If necessary, we disable mipmapping in the sampler descriptor by
407 * clamping the LOD as tight as possible (from 0 to epsilon,
408 * essentially -- remember these are fixed point numbers, so
409 * epsilon=1/256) */
410
411 cfg.maximum_lod = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) ?
412 cfg.minimum_lod + 1 :
413 FIXED_16(cso->max_lod, false);
414
415 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
416 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
417 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
418
419 cfg.compare_function = panfrost_sampler_compare_func(cso);
420 cfg.seamless_cube_map = cso->seamless_cube_map;
421
422 cfg.border_color_r = cso->border_color.f[0];
423 cfg.border_color_g = cso->border_color.f[1];
424 cfg.border_color_b = cso->border_color.f[2];
425 cfg.border_color_a = cso->border_color.f[3];
426 }
427 }
428
429 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
430 struct mali_bifrost_sampler_packed *hw)
431 {
432 pan_pack(hw, BIFROST_SAMPLER, cfg) {
433 cfg.magnify_linear = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR;
434 cfg.minify_linear = cso->min_img_filter == PIPE_TEX_FILTER_LINEAR;
435 cfg.mipmap_mode = pan_pipe_to_mipmode(cso->min_mip_filter);
436 cfg.normalized_coordinates = cso->normalized_coords;
437
438 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
439 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
440 cfg.maximum_lod = FIXED_16(cso->max_lod, false);
441
442 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
443 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
444 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
445
446 cfg.compare_function = panfrost_sampler_compare_func(cso);
447 cfg.seamless_cube_map = cso->seamless_cube_map;
448 }
449 }
450
451 static void
452 panfrost_frag_meta_rasterizer_update(struct panfrost_context *ctx,
453 struct mali_shader_meta *fragmeta)
454 {
455 if (!ctx->rasterizer) {
456 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, true);
457 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, false);
458 fragmeta->depth_units = 0.0f;
459 fragmeta->depth_factor = 0.0f;
460 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, false);
461 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, false);
462 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, true);
463 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, true);
464 return;
465 }
466
467 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
468
469 bool msaa = rast->multisample;
470
471 /* TODO: Sample size */
472 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
473 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
474
475 struct panfrost_shader_state *fs;
476 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
477
478 /* EXT_shader_framebuffer_fetch requires the shader to be run
479 * per-sample when outputs are read. */
480 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
481 SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
482
483 fragmeta->depth_units = rast->offset_units * 2.0f;
484 fragmeta->depth_factor = rast->offset_scale;
485
486 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
487
488 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
489 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
490
491 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
492 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
493 }
494
495 static void
496 panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
497 struct mali_shader_meta *fragmeta)
498 {
499 const struct panfrost_zsa_state *so = ctx->depth_stencil;
500 int zfunc = PIPE_FUNC_ALWAYS;
501
502 if (!so) {
503 /* If stenciling is disabled, the state is irrelevant */
504 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST, false);
505 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK, false);
506 } else {
507 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
508 so->base.stencil[0].enabled);
509
510 fragmeta->stencil_mask_front = so->stencil_mask_front;
511 fragmeta->stencil_mask_back = so->stencil_mask_back;
512
513 /* Bottom bits for stencil ref, exactly one word */
514 fragmeta->stencil_front.opaque[0] = so->stencil_front.opaque[0] | ctx->stencil_ref.ref_value[0];
515
516 /* If back-stencil is not enabled, use the front values */
517
518 if (so->base.stencil[1].enabled)
519 fragmeta->stencil_back.opaque[0] = so->stencil_back.opaque[0] | ctx->stencil_ref.ref_value[1];
520 else
521 fragmeta->stencil_back = fragmeta->stencil_front;
522
523 if (so->base.depth.enabled)
524 zfunc = so->base.depth.func;
525
526 /* Depth state (TODO: Refactor) */
527
528 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
529 so->base.depth.writemask);
530 }
531
532 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
533 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(zfunc));
534 }
535
536 static bool
537 panfrost_fs_required(
538 struct panfrost_shader_state *fs,
539 struct panfrost_blend_final *blend,
540 unsigned rt_count)
541 {
542 /* If we generally have side effects */
543 if (fs->fs_sidefx)
544 return true;
545
546 /* If colour is written we need to execute */
547 for (unsigned i = 0; i < rt_count; ++i) {
548 if (!blend[i].no_colour)
549 return true;
550 }
551
552 /* If depth is written and not implied we need to execute.
553 * TODO: Predicate on Z/S writes being enabled */
554 return (fs->writes_depth || fs->writes_stencil);
555 }
556
557 static void
558 panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
559 struct mali_shader_meta *fragmeta,
560 void *rts)
561 {
562 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
563 const struct panfrost_device *dev = pan_device(ctx->base.screen);
564 struct panfrost_shader_state *fs;
565 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
566
567 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
568 (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
569 !ctx->blend->base.dither);
570
571 SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
572 ctx->blend->base.alpha_to_coverage);
573
574 /* Get blending setup */
575 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
576
577 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
578 unsigned shader_offset = 0;
579 struct panfrost_bo *shader_bo = NULL;
580
581 for (unsigned c = 0; c < rt_count; ++c)
582 blend[c] = panfrost_get_blend_for_context(ctx, c, &shader_bo,
583 &shader_offset);
584
585 /* Disable shader execution if we can */
586 if (dev->quirks & MIDGARD_SHADERLESS
587 && !panfrost_fs_required(fs, blend, rt_count)) {
588 fragmeta->shader = 0;
589 fragmeta->attribute_count = 0;
590 fragmeta->varying_count = 0;
591 fragmeta->texture_count = 0;
592 fragmeta->sampler_count = 0;
593
594 /* This feature is not known to work on Bifrost */
595 fragmeta->midgard1.work_count = 1;
596 fragmeta->midgard1.uniform_count = 0;
597 fragmeta->midgard1.uniform_buffer_count = 0;
598 }
599
600 /* If there is a blend shader, work registers are shared. We impose 8
601 * work registers as a limit for blend shaders. Should be lower XXX */
602
603 if (!(dev->quirks & IS_BIFROST)) {
604 for (unsigned c = 0; c < rt_count; ++c) {
605 if (blend[c].is_shader) {
606 fragmeta->midgard1.work_count =
607 MAX2(fragmeta->midgard1.work_count, 8);
608 }
609 }
610 }
611
612 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
613 * copied to the blend_meta appended (by convention), but this is the
614 * field actually read by the hardware. (Or maybe both are read...?).
615 * Specify the last RTi with a blend shader. */
616
617 fragmeta->blend.shader = 0;
618
619 for (signed rt = (rt_count - 1); rt >= 0; --rt) {
620 if (!blend[rt].is_shader)
621 continue;
622
623 fragmeta->blend.shader = blend[rt].shader.gpu |
624 blend[rt].shader.first_tag;
625 break;
626 }
627
628 if (dev->quirks & MIDGARD_SFBD) {
629 /* When only a single render target platform is used, the blend
630 * information is inside the shader meta itself. We additionally
631 * need to signal CAN_DISCARD for nontrivial blend modes (so
632 * we're able to read back the destination buffer) */
633
634 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
635 blend[0].is_shader);
636
637 if (!blend[0].is_shader) {
638 fragmeta->blend.equation = *blend[0].equation.equation;
639 fragmeta->blend.constant = blend[0].equation.constant;
640 }
641
642 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
643 !blend[0].no_blending || fs->can_discard);
644
645 batch->draws |= PIPE_CLEAR_COLOR0;
646 return;
647 }
648
649 if (dev->quirks & IS_BIFROST) {
650 bool no_blend = true;
651
652 for (unsigned i = 0; i < rt_count; ++i)
653 no_blend &= (blend[i].no_blending | blend[i].no_colour);
654
655 SET_BIT(fragmeta->bifrost1.unk1, MALI_BIFROST_EARLY_Z,
656 !fs->can_discard && !fs->writes_depth && no_blend);
657 }
658
659 /* Additional blend descriptor tacked on for jobs using MFBD */
660
661 for (unsigned i = 0; i < rt_count; ++i) {
662 unsigned flags = 0;
663
664 if (ctx->pipe_framebuffer.nr_cbufs > i && !blend[i].no_colour) {
665 flags = 0x200;
666 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
667
668 bool is_srgb = (ctx->pipe_framebuffer.nr_cbufs > i) &&
669 (ctx->pipe_framebuffer.cbufs[i]) &&
670 util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
671
672 SET_BIT(flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
673 SET_BIT(flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
674 SET_BIT(flags, MALI_BLEND_SRGB, is_srgb);
675 SET_BIT(flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
676 }
677
678 if (dev->quirks & IS_BIFROST) {
679 struct bifrost_blend_rt *brts = rts;
680
681 brts[i].flags = flags;
682
683 if (blend[i].is_shader) {
684 /* The blend shader's address needs to be at
685 * the same top 32 bit as the fragment shader.
686 * TODO: Ensure that's always the case.
687 */
688 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
689 (fs->bo->gpu & (0xffffffffull << 32)));
690 brts[i].shader = blend[i].shader.gpu;
691 brts[i].unk2 = 0x0;
692 } else if (ctx->pipe_framebuffer.nr_cbufs > i) {
693 enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
694 const struct util_format_description *format_desc;
695 format_desc = util_format_description(format);
696
697 brts[i].equation = *blend[i].equation.equation;
698
699 /* TODO: this is a bit more complicated */
700 brts[i].constant = blend[i].equation.constant;
701
702 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
703
704 /* 0x19 disables blending and forces REPLACE
705 * mode (equivalent to rgb_mode = alpha_mode =
706 * x122, colour mask = 0xF). 0x1a allows
707 * blending. */
708 brts[i].unk2 = blend[i].no_blending ? 0x19 : 0x1a;
709
710 brts[i].shader_type = fs->blend_types[i];
711 } else {
712 /* Dummy attachment for depth-only */
713 brts[i].unk2 = 0x3;
714 brts[i].shader_type = fs->blend_types[i];
715 }
716 } else {
717 struct midgard_blend_rt *mrts = rts;
718 mrts[i].flags = flags;
719
720 if (blend[i].is_shader) {
721 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
722 } else {
723 mrts[i].blend.equation = *blend[i].equation.equation;
724 mrts[i].blend.constant = blend[i].equation.constant;
725 }
726 }
727 }
728 }
729
730 static void
731 panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
732 struct mali_shader_meta *fragmeta,
733 void *rts)
734 {
735 const struct panfrost_device *dev = pan_device(ctx->base.screen);
736 struct panfrost_shader_state *fs;
737
738 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
739
740 bool msaa = ctx->rasterizer && ctx->rasterizer->base.multisample;
741 fragmeta->coverage_mask = msaa ? ctx->sample_mask : ~0;
742
743 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
744 fragmeta->unknown2_4 = 0x4e0;
745
746 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
747 * is required (independent of 32-bit/64-bit descriptors), or why it's
748 * not used on later GPU revisions. Otherwise, all shader jobs fault on
749 * these earlier chips (perhaps this is a chicken bit of some kind).
750 * More investigation is needed. */
751
752 SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
753
754 if (dev->quirks & IS_BIFROST) {
755 /* TODO */
756 } else {
757 /* Depending on whether it's legal to in the given shader, we try to
758 * enable early-z testing. TODO: respect e-z force */
759
760 SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
761 !fs->can_discard && !fs->writes_global &&
762 !fs->writes_depth && !fs->writes_stencil &&
763 !ctx->blend->base.alpha_to_coverage);
764
765 /* Add the writes Z/S flags if needed. */
766 SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
767 SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
768
769 /* Any time texturing is used, derivatives are implicitly calculated,
770 * so we need to enable helper invocations */
771
772 SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
773 fs->helper_invocations);
774
775 /* If discard is enabled, which bit we set to convey this
776 * depends on if depth/stencil is used for the draw or not.
777 * Just one of depth OR stencil is enough to trigger this. */
778
779 const struct pipe_depth_stencil_alpha_state *zsa = &ctx->depth_stencil->base;
780 bool zs_enabled = fs->writes_depth || fs->writes_stencil;
781
782 if (zsa) {
783 zs_enabled |= (zsa->depth.enabled && zsa->depth.func != PIPE_FUNC_ALWAYS);
784 zs_enabled |= zsa->stencil[0].enabled;
785 }
786
787 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_TILEBUFFER,
788 fs->outputs_read || (!zs_enabled && fs->can_discard));
789 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_ZS, zs_enabled && fs->can_discard);
790 }
791
792 panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
793 panfrost_frag_meta_zsa_update(ctx, fragmeta);
794 panfrost_frag_meta_blend_update(ctx, fragmeta, rts);
795 }
796
797 void
798 panfrost_emit_shader_meta(struct panfrost_batch *batch,
799 enum pipe_shader_type st,
800 struct mali_vertex_tiler_postfix *postfix)
801 {
802 struct panfrost_context *ctx = batch->ctx;
803 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
804
805 if (!ss) {
806 postfix->shader = 0;
807 return;
808 }
809
810 struct mali_shader_meta meta;
811
812 panfrost_shader_meta_init(ctx, st, &meta);
813
814 /* Add the shader BO to the batch. */
815 panfrost_batch_add_bo(batch, ss->bo,
816 PAN_BO_ACCESS_PRIVATE |
817 PAN_BO_ACCESS_READ |
818 panfrost_bo_access_for_stage(st));
819
820 mali_ptr shader_ptr;
821
822 if (st == PIPE_SHADER_FRAGMENT) {
823 struct panfrost_device *dev = pan_device(ctx->base.screen);
824 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
825 size_t desc_size = sizeof(meta);
826 void *rts = NULL;
827 struct panfrost_transfer xfer;
828 unsigned rt_size;
829
830 if (dev->quirks & MIDGARD_SFBD)
831 rt_size = 0;
832 else if (dev->quirks & IS_BIFROST)
833 rt_size = sizeof(struct bifrost_blend_rt);
834 else
835 rt_size = sizeof(struct midgard_blend_rt);
836
837 desc_size += rt_size * rt_count;
838
839 if (rt_size)
840 rts = rzalloc_size(ctx, rt_size * rt_count);
841
842 panfrost_frag_shader_meta_init(ctx, &meta, rts);
843
844 xfer = panfrost_pool_alloc(&batch->pool, desc_size);
845
846 memcpy(xfer.cpu, &meta, sizeof(meta));
847 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
848
849 if (rt_size)
850 ralloc_free(rts);
851
852 shader_ptr = xfer.gpu;
853 } else {
854 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
855 sizeof(meta));
856 }
857
858 postfix->shader = shader_ptr;
859 }
860
861 void
862 panfrost_emit_viewport(struct panfrost_batch *batch,
863 struct mali_vertex_tiler_postfix *tiler_postfix)
864 {
865 struct panfrost_context *ctx = batch->ctx;
866 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
867 const struct pipe_scissor_state *ss = &ctx->scissor;
868 const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
869 const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
870
871 /* Derive min/max from translate/scale. Note since |x| >= 0 by
872 * definition, we have that -|x| <= |x| hence translate - |scale| <=
873 * translate + |scale|, so the ordering is correct here. */
874 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
875 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
876 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
877 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
878 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
879 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
880
881 /* Scissor to the intersection of viewport and to the scissor, clamped
882 * to the framebuffer */
883
884 unsigned minx = MIN2(fb->width, vp_minx);
885 unsigned maxx = MIN2(fb->width, vp_maxx);
886 unsigned miny = MIN2(fb->height, vp_miny);
887 unsigned maxy = MIN2(fb->height, vp_maxy);
888
889 if (ss && rast && rast->scissor) {
890 minx = MAX2(ss->minx, minx);
891 miny = MAX2(ss->miny, miny);
892 maxx = MIN2(ss->maxx, maxx);
893 maxy = MIN2(ss->maxy, maxy);
894 }
895
896 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
897
898 pan_pack(T.cpu, VIEWPORT, cfg) {
899 cfg.scissor_minimum_x = minx;
900 cfg.scissor_minimum_y = miny;
901 cfg.scissor_maximum_x = maxx - 1;
902 cfg.scissor_maximum_y = maxy - 1;
903
904 cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
905 cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
906 }
907
908 tiler_postfix->viewport = T.gpu;
909 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
910 }
911
912 static mali_ptr
913 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
914 enum pipe_shader_type st,
915 struct panfrost_constant_buffer *buf,
916 unsigned index)
917 {
918 struct pipe_constant_buffer *cb = &buf->cb[index];
919 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
920
921 if (rsrc) {
922 panfrost_batch_add_bo(batch, rsrc->bo,
923 PAN_BO_ACCESS_SHARED |
924 PAN_BO_ACCESS_READ |
925 panfrost_bo_access_for_stage(st));
926
927 /* Alignment gauranteed by
928 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
929 return rsrc->bo->gpu + cb->buffer_offset;
930 } else if (cb->user_buffer) {
931 return panfrost_pool_upload(&batch->pool,
932 cb->user_buffer +
933 cb->buffer_offset,
934 cb->buffer_size);
935 } else {
936 unreachable("No constant buffer");
937 }
938 }
939
940 struct sysval_uniform {
941 union {
942 float f[4];
943 int32_t i[4];
944 uint32_t u[4];
945 uint64_t du[2];
946 };
947 };
948
949 static void
950 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
951 struct sysval_uniform *uniform)
952 {
953 struct panfrost_context *ctx = batch->ctx;
954 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
955
956 uniform->f[0] = vp->scale[0];
957 uniform->f[1] = vp->scale[1];
958 uniform->f[2] = vp->scale[2];
959 }
960
961 static void
962 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
963 struct sysval_uniform *uniform)
964 {
965 struct panfrost_context *ctx = batch->ctx;
966 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
967
968 uniform->f[0] = vp->translate[0];
969 uniform->f[1] = vp->translate[1];
970 uniform->f[2] = vp->translate[2];
971 }
972
973 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
974 enum pipe_shader_type st,
975 unsigned int sysvalid,
976 struct sysval_uniform *uniform)
977 {
978 struct panfrost_context *ctx = batch->ctx;
979 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
980 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
981 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
982 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
983
984 assert(dim);
985 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
986
987 if (dim > 1)
988 uniform->i[1] = u_minify(tex->texture->height0,
989 tex->u.tex.first_level);
990
991 if (dim > 2)
992 uniform->i[2] = u_minify(tex->texture->depth0,
993 tex->u.tex.first_level);
994
995 if (is_array)
996 uniform->i[dim] = tex->texture->array_size;
997 }
998
999 static void
1000 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
1001 enum pipe_shader_type st,
1002 unsigned ssbo_id,
1003 struct sysval_uniform *uniform)
1004 {
1005 struct panfrost_context *ctx = batch->ctx;
1006
1007 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
1008 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
1009
1010 /* Compute address */
1011 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
1012
1013 panfrost_batch_add_bo(batch, bo,
1014 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
1015 panfrost_bo_access_for_stage(st));
1016
1017 /* Upload address and size as sysval */
1018 uniform->du[0] = bo->gpu + sb.buffer_offset;
1019 uniform->u[2] = sb.buffer_size;
1020 }
1021
1022 static void
1023 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
1024 enum pipe_shader_type st,
1025 unsigned samp_idx,
1026 struct sysval_uniform *uniform)
1027 {
1028 struct panfrost_context *ctx = batch->ctx;
1029 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
1030
1031 uniform->f[0] = sampl->min_lod;
1032 uniform->f[1] = sampl->max_lod;
1033 uniform->f[2] = sampl->lod_bias;
1034
1035 /* Even without any errata, Midgard represents "no mipmapping" as
1036 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1037 * panfrost_create_sampler_state which also explains our choice of
1038 * epsilon value (again to keep behaviour consistent) */
1039
1040 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1041 uniform->f[1] = uniform->f[0] + (1.0/256.0);
1042 }
1043
1044 static void
1045 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
1046 struct sysval_uniform *uniform)
1047 {
1048 struct panfrost_context *ctx = batch->ctx;
1049
1050 uniform->u[0] = ctx->compute_grid->grid[0];
1051 uniform->u[1] = ctx->compute_grid->grid[1];
1052 uniform->u[2] = ctx->compute_grid->grid[2];
1053 }
1054
1055 static void
1056 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1057 struct panfrost_shader_state *ss,
1058 enum pipe_shader_type st)
1059 {
1060 struct sysval_uniform *uniforms = (void *)buf;
1061
1062 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1063 int sysval = ss->sysval[i];
1064
1065 switch (PAN_SYSVAL_TYPE(sysval)) {
1066 case PAN_SYSVAL_VIEWPORT_SCALE:
1067 panfrost_upload_viewport_scale_sysval(batch,
1068 &uniforms[i]);
1069 break;
1070 case PAN_SYSVAL_VIEWPORT_OFFSET:
1071 panfrost_upload_viewport_offset_sysval(batch,
1072 &uniforms[i]);
1073 break;
1074 case PAN_SYSVAL_TEXTURE_SIZE:
1075 panfrost_upload_txs_sysval(batch, st,
1076 PAN_SYSVAL_ID(sysval),
1077 &uniforms[i]);
1078 break;
1079 case PAN_SYSVAL_SSBO:
1080 panfrost_upload_ssbo_sysval(batch, st,
1081 PAN_SYSVAL_ID(sysval),
1082 &uniforms[i]);
1083 break;
1084 case PAN_SYSVAL_NUM_WORK_GROUPS:
1085 panfrost_upload_num_work_groups_sysval(batch,
1086 &uniforms[i]);
1087 break;
1088 case PAN_SYSVAL_SAMPLER:
1089 panfrost_upload_sampler_sysval(batch, st,
1090 PAN_SYSVAL_ID(sysval),
1091 &uniforms[i]);
1092 break;
1093 default:
1094 assert(0);
1095 }
1096 }
1097 }
1098
1099 static const void *
1100 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1101 unsigned index)
1102 {
1103 struct pipe_constant_buffer *cb = &buf->cb[index];
1104 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1105
1106 if (rsrc)
1107 return rsrc->bo->cpu;
1108 else if (cb->user_buffer)
1109 return cb->user_buffer;
1110 else
1111 unreachable("No constant buffer");
1112 }
1113
1114 void
1115 panfrost_emit_const_buf(struct panfrost_batch *batch,
1116 enum pipe_shader_type stage,
1117 struct mali_vertex_tiler_postfix *postfix)
1118 {
1119 struct panfrost_context *ctx = batch->ctx;
1120 struct panfrost_shader_variants *all = ctx->shader[stage];
1121
1122 if (!all)
1123 return;
1124
1125 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1126
1127 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1128
1129 /* Uniforms are implicitly UBO #0 */
1130 bool has_uniforms = buf->enabled_mask & (1 << 0);
1131
1132 /* Allocate room for the sysval and the uniforms */
1133 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1134 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1135 size_t size = sys_size + uniform_size;
1136 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1137 size);
1138
1139 /* Upload sysvals requested by the shader */
1140 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1141
1142 /* Upload uniforms */
1143 if (has_uniforms && uniform_size) {
1144 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1145 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1146 }
1147
1148 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1149 * uploaded */
1150
1151 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1152 assert(ubo_count >= 1);
1153
1154 size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
1155 struct panfrost_transfer ubos = panfrost_pool_alloc(&batch->pool, sz);
1156 uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
1157
1158 /* Upload uniforms as a UBO */
1159
1160 if (ss->uniform_count) {
1161 pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
1162 cfg.entries = ss->uniform_count;
1163 cfg.pointer = transfer.gpu;
1164 }
1165 } else {
1166 *ubo_ptr = 0;
1167 }
1168
1169 /* The rest are honest-to-goodness UBOs */
1170
1171 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1172 size_t usz = buf->cb[ubo].buffer_size;
1173 bool enabled = buf->enabled_mask & (1 << ubo);
1174 bool empty = usz == 0;
1175
1176 if (!enabled || empty) {
1177 ubo_ptr[ubo] = 0;
1178 continue;
1179 }
1180
1181 pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
1182 cfg.entries = DIV_ROUND_UP(usz, 16);
1183 cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
1184 stage, buf, ubo);
1185 }
1186 }
1187
1188 postfix->uniforms = transfer.gpu;
1189 postfix->uniform_buffers = ubos.gpu;
1190
1191 buf->dirty_mask = 0;
1192 }
1193
1194 void
1195 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1196 const struct pipe_grid_info *info,
1197 struct midgard_payload_vertex_tiler *vtp)
1198 {
1199 struct panfrost_context *ctx = batch->ctx;
1200 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1201 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1202 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1203 128));
1204 unsigned shared_size = single_size * info->grid[0] * info->grid[1] *
1205 info->grid[2] * 4;
1206 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1207 shared_size,
1208 1);
1209
1210 struct mali_shared_memory shared = {
1211 .shared_memory = bo->gpu,
1212 .shared_workgroup_count =
1213 util_logbase2_ceil(info->grid[0]) +
1214 util_logbase2_ceil(info->grid[1]) +
1215 util_logbase2_ceil(info->grid[2]),
1216 .shared_unk1 = 0x2,
1217 .shared_shift = util_logbase2(single_size) - 1
1218 };
1219
1220 vtp->postfix.shared_memory = panfrost_pool_upload(&batch->pool, &shared,
1221 sizeof(shared));
1222 }
1223
1224 static mali_ptr
1225 panfrost_get_tex_desc(struct panfrost_batch *batch,
1226 enum pipe_shader_type st,
1227 struct panfrost_sampler_view *view)
1228 {
1229 if (!view)
1230 return (mali_ptr) 0;
1231
1232 struct pipe_sampler_view *pview = &view->base;
1233 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1234
1235 /* Add the BO to the job so it's retained until the job is done. */
1236
1237 panfrost_batch_add_bo(batch, rsrc->bo,
1238 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1239 panfrost_bo_access_for_stage(st));
1240
1241 panfrost_batch_add_bo(batch, view->bo,
1242 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1243 panfrost_bo_access_for_stage(st));
1244
1245 return view->bo->gpu;
1246 }
1247
1248 static void
1249 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1250 struct pipe_context *pctx)
1251 {
1252 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1253 if (view->texture_bo != rsrc->bo->gpu ||
1254 view->modifier != rsrc->modifier) {
1255 panfrost_bo_unreference(view->bo);
1256 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1257 }
1258 }
1259
1260 void
1261 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1262 enum pipe_shader_type stage,
1263 struct mali_vertex_tiler_postfix *postfix)
1264 {
1265 struct panfrost_context *ctx = batch->ctx;
1266 struct panfrost_device *device = pan_device(ctx->base.screen);
1267
1268 if (!ctx->sampler_view_count[stage])
1269 return;
1270
1271 if (device->quirks & IS_BIFROST) {
1272 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
1273 MALI_BIFROST_TEXTURE_LENGTH *
1274 ctx->sampler_view_count[stage]);
1275
1276 struct mali_bifrost_texture_packed *out =
1277 (struct mali_bifrost_texture_packed *) T.cpu;
1278
1279 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1280 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1281 struct pipe_sampler_view *pview = &view->base;
1282 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1283
1284 panfrost_update_sampler_view(view, &ctx->base);
1285 out[i] = view->bifrost_descriptor;
1286
1287 /* Add the BOs to the job so they are retained until the job is done. */
1288
1289 panfrost_batch_add_bo(batch, rsrc->bo,
1290 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1291 panfrost_bo_access_for_stage(stage));
1292
1293 panfrost_batch_add_bo(batch, view->bo,
1294 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1295 panfrost_bo_access_for_stage(stage));
1296 }
1297
1298 postfix->textures = T.gpu;
1299 } else {
1300 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1301
1302 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1303 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1304
1305 panfrost_update_sampler_view(view, &ctx->base);
1306
1307 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1308 }
1309
1310 postfix->textures = panfrost_pool_upload(&batch->pool,
1311 trampolines,
1312 sizeof(uint64_t) *
1313 ctx->sampler_view_count[stage]);
1314 }
1315 }
1316
1317 void
1318 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1319 enum pipe_shader_type stage,
1320 struct mali_vertex_tiler_postfix *postfix)
1321 {
1322 struct panfrost_context *ctx = batch->ctx;
1323
1324 if (!ctx->sampler_count[stage])
1325 return;
1326
1327 size_t desc_size = MALI_BIFROST_SAMPLER_LENGTH;
1328 assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
1329
1330 size_t sz = desc_size * ctx->sampler_count[stage];
1331 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, sz);
1332 struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
1333
1334 for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
1335 out[i] = ctx->samplers[stage][i]->hw;
1336
1337 postfix->sampler_descriptor = T.gpu;
1338 }
1339
1340 void
1341 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1342 struct mali_vertex_tiler_postfix *vertex_postfix)
1343 {
1344 struct panfrost_context *ctx = batch->ctx;
1345 struct panfrost_vertex_state *so = ctx->vertex;
1346
1347 unsigned instance_shift = vertex_postfix->instance_shift;
1348 unsigned instance_odd = vertex_postfix->instance_odd;
1349
1350 /* Staged mali_attr, and index into them. i =/= k, depending on the
1351 * vertex buffer mask and instancing. Twice as much room is allocated,
1352 * for a worst case of NPOT_DIVIDEs which take up extra slot */
1353 union mali_attr attrs[PIPE_MAX_ATTRIBS * 2];
1354 unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
1355 unsigned k = 0;
1356
1357 for (unsigned i = 0; i < so->num_elements; ++i) {
1358 /* We map a mali_attr to be 1:1 with the mali_attr_meta, which
1359 * means duplicating some vertex buffers (who cares? aside from
1360 * maybe some caching implications but I somehow doubt that
1361 * matters) */
1362
1363 struct pipe_vertex_element *elem = &so->pipe[i];
1364 unsigned vbi = elem->vertex_buffer_index;
1365 attrib_to_buffer[i] = k;
1366
1367 if (!(ctx->vb_mask & (1 << vbi)))
1368 continue;
1369
1370 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1371 struct panfrost_resource *rsrc;
1372
1373 rsrc = pan_resource(buf->buffer.resource);
1374 if (!rsrc)
1375 continue;
1376
1377 /* Align to 64 bytes by masking off the lower bits. This
1378 * will be adjusted back when we fixup the src_offset in
1379 * mali_attr_meta */
1380
1381 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1382 mali_ptr addr = raw_addr & ~63;
1383 unsigned chopped_addr = raw_addr - addr;
1384
1385 /* Add a dependency of the batch on the vertex buffer */
1386 panfrost_batch_add_bo(batch, rsrc->bo,
1387 PAN_BO_ACCESS_SHARED |
1388 PAN_BO_ACCESS_READ |
1389 PAN_BO_ACCESS_VERTEX_TILER);
1390
1391 /* Set common fields */
1392 attrs[k].elements = addr;
1393 attrs[k].stride = buf->stride;
1394
1395 /* Since we advanced the base pointer, we shrink the buffer
1396 * size */
1397 attrs[k].size = rsrc->base.width0 - buf->buffer_offset;
1398
1399 /* We need to add the extra size we masked off (for
1400 * correctness) so the data doesn't get clamped away */
1401 attrs[k].size += chopped_addr;
1402
1403 /* For non-instancing make sure we initialize */
1404 attrs[k].shift = attrs[k].extra_flags = 0;
1405
1406 /* Instancing uses a dramatically different code path than
1407 * linear, so dispatch for the actual emission now that the
1408 * common code is finished */
1409
1410 unsigned divisor = elem->instance_divisor;
1411
1412 /* Depending if there is an instance divisor or not, packing varies.
1413 * When there is a divisor, the hardware-level divisor is actually the
1414 * product of the instance divisor and the padded count */
1415
1416 unsigned hw_divisor = ctx->padded_count * divisor;
1417
1418 if (divisor && ctx->instance_count == 1) {
1419 /* Silly corner case where there's a divisor(=1) but
1420 * there's no legitimate instancing. So we want *every*
1421 * attribute to be the same. So set stride to zero so
1422 * we don't go anywhere. */
1423
1424 attrs[k].size = attrs[k].stride + chopped_addr;
1425 attrs[k].stride = 0;
1426 attrs[k++].elements |= MALI_ATTR_LINEAR;
1427 } else if (ctx->instance_count <= 1) {
1428 /* Normal, non-instanced attributes */
1429 attrs[k++].elements |= MALI_ATTR_LINEAR;
1430 } else if (divisor == 0) {
1431 /* Per-vertex attributes use the MODULO mode. */
1432 attrs[k].elements |= MALI_ATTR_MODULO;
1433 attrs[k].shift = instance_shift;
1434 attrs[k++].extra_flags = instance_odd;
1435 } else if (util_is_power_of_two_or_zero(hw_divisor)) {
1436 /* If there is a divisor but the hardware divisor works out to
1437 * a power of two (not terribly exceptional), we can use an
1438 * easy path (just shifting) */
1439
1440 attrs[k].elements |= MALI_ATTR_POT_DIVIDE;
1441 attrs[k++].shift = __builtin_ctz(hw_divisor);
1442 } else {
1443 unsigned shift = 0, extra_flags = 0;
1444
1445 unsigned magic_divisor =
1446 panfrost_compute_magic_divisor(hw_divisor, &shift, &extra_flags);
1447
1448 /* Upload to two different slots */
1449
1450 attrs[k].elements |= MALI_ATTR_NPOT_DIVIDE;
1451 attrs[k].shift = shift;
1452 attrs[k++].extra_flags = extra_flags;
1453
1454 attrs[k].unk = 0x20;
1455 attrs[k].zero = 0;
1456 attrs[k].magic_divisor = magic_divisor;
1457 attrs[k++].divisor = divisor;
1458 }
1459 }
1460
1461 /* Add special gl_VertexID/gl_InstanceID buffers */
1462
1463 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
1464 MALI_ATTRIBUTE_LENGTH * (PAN_INSTANCE_ID + 1));
1465
1466 struct mali_attribute_packed *out =
1467 (struct mali_attribute_packed *) T.cpu;
1468
1469 panfrost_vertex_id(ctx->padded_count, &attrs[k]);
1470
1471 pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
1472 cfg.buffer_index = k++;
1473 cfg.format = so->formats[PAN_VERTEX_ID];
1474 }
1475
1476 panfrost_instance_id(ctx->padded_count, &attrs[k]);
1477
1478 pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
1479 cfg.buffer_index = k++;
1480 cfg.format = so->formats[PAN_INSTANCE_ID];
1481 }
1482
1483 /* Attribute addresses require 64-byte alignment, so let:
1484 *
1485 * base' = base & ~63 = base - (base & 63)
1486 * offset' = offset + (base & 63)
1487 *
1488 * Since base' + offset' = base + offset, these are equivalent
1489 * addressing modes and now base is 64 aligned.
1490 */
1491
1492 unsigned start = vertex_postfix->offset_start;
1493
1494 for (unsigned i = 0; i < so->num_elements; ++i) {
1495 unsigned vbi = so->pipe[i].vertex_buffer_index;
1496 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1497
1498 /* Adjust by the masked off bits of the offset. Make sure we
1499 * read src_offset from so->hw (which is not GPU visible)
1500 * rather than target (which is) due to caching effects */
1501
1502 unsigned src_offset = so->pipe[i].src_offset;
1503
1504 /* BOs aligned to 4k so guaranteed aligned to 64 */
1505 src_offset += (buf->buffer_offset & 63);
1506
1507 /* Also, somewhat obscurely per-instance data needs to be
1508 * offset in response to a delayed start in an indexed draw */
1509
1510 if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
1511 src_offset -= buf->stride * start;
1512
1513 pan_pack(out + i, ATTRIBUTE, cfg) {
1514 cfg.buffer_index = attrib_to_buffer[i];
1515 cfg.format = so->formats[i];
1516 cfg.offset = src_offset;
1517 }
1518 }
1519
1520
1521 vertex_postfix->attributes = panfrost_pool_upload(&batch->pool, attrs,
1522 k * sizeof(*attrs));
1523
1524 vertex_postfix->attribute_meta = T.gpu;
1525 }
1526
1527 static mali_ptr
1528 panfrost_emit_varyings(struct panfrost_batch *batch, union mali_attr *slot,
1529 unsigned stride, unsigned count)
1530 {
1531 /* Fill out the descriptor */
1532 slot->stride = stride;
1533 slot->size = stride * count;
1534 slot->shift = slot->extra_flags = 0;
1535
1536 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1537 slot->size);
1538
1539 slot->elements = transfer.gpu | MALI_ATTR_LINEAR;
1540
1541 return transfer.gpu;
1542 }
1543
1544 static unsigned
1545 panfrost_streamout_offset(unsigned stride, unsigned offset,
1546 struct pipe_stream_output_target *target)
1547 {
1548 return (target->buffer_offset + (offset * stride * 4)) & 63;
1549 }
1550
1551 static void
1552 panfrost_emit_streamout(struct panfrost_batch *batch, union mali_attr *slot,
1553 unsigned stride, unsigned offset, unsigned count,
1554 struct pipe_stream_output_target *target)
1555 {
1556 /* Fill out the descriptor */
1557 slot->stride = stride * 4;
1558 slot->shift = slot->extra_flags = 0;
1559
1560 unsigned max_size = target->buffer_size;
1561 unsigned expected_size = slot->stride * count;
1562
1563 /* Grab the BO and bind it to the batch */
1564 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1565
1566 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1567 * the perspective of the TILER and FRAGMENT.
1568 */
1569 panfrost_batch_add_bo(batch, bo,
1570 PAN_BO_ACCESS_SHARED |
1571 PAN_BO_ACCESS_RW |
1572 PAN_BO_ACCESS_VERTEX_TILER |
1573 PAN_BO_ACCESS_FRAGMENT);
1574
1575 /* We will have an offset applied to get alignment */
1576 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * slot->stride);
1577 slot->elements = (addr & ~63) | MALI_ATTR_LINEAR;
1578 slot->size = MIN2(max_size, expected_size) + (addr & 63);
1579 }
1580
1581 static bool
1582 has_point_coord(unsigned mask, gl_varying_slot loc)
1583 {
1584 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1585 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1586 else if (loc == VARYING_SLOT_PNTC)
1587 return (mask & (1 << 8));
1588 else
1589 return false;
1590 }
1591
1592 /* Helpers for manipulating stream out information so we can pack varyings
1593 * accordingly. Compute the src_offset for a given captured varying */
1594
1595 static struct pipe_stream_output *
1596 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1597 {
1598 for (unsigned i = 0; i < info->num_outputs; ++i) {
1599 if (info->output[i].register_index == loc)
1600 return &info->output[i];
1601 }
1602
1603 unreachable("Varying not captured");
1604 }
1605
1606 static unsigned
1607 pan_varying_size(enum mali_format fmt)
1608 {
1609 unsigned type = MALI_EXTRACT_TYPE(fmt);
1610 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1611 unsigned bits = MALI_EXTRACT_BITS(fmt);
1612 unsigned bpc = 0;
1613
1614 if (bits == MALI_CHANNEL_FLOAT) {
1615 /* No doubles */
1616 bool fp16 = (type == MALI_FORMAT_SINT);
1617 assert(fp16 || (type == MALI_FORMAT_UNORM));
1618
1619 bpc = fp16 ? 2 : 4;
1620 } else {
1621 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1622
1623 /* See the enums */
1624 bits = 1 << bits;
1625 assert(bits >= 8);
1626 bpc = bits / 8;
1627 }
1628
1629 return bpc * chan;
1630 }
1631
1632 /* Indices for named (non-XFB) varyings that are present. These are packed
1633 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1634 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1635 * of a given special field given a shift S by:
1636 *
1637 * idx = popcount(P & ((1 << S) - 1))
1638 *
1639 * That is... look at all of the varyings that come earlier and count them, the
1640 * count is the new index since plus one. Likewise, the total number of special
1641 * buffers required is simply popcount(P)
1642 */
1643
1644 enum pan_special_varying {
1645 PAN_VARY_GENERAL = 0,
1646 PAN_VARY_POSITION = 1,
1647 PAN_VARY_PSIZ = 2,
1648 PAN_VARY_PNTCOORD = 3,
1649 PAN_VARY_FACE = 4,
1650 PAN_VARY_FRAGCOORD = 5,
1651
1652 /* Keep last */
1653 PAN_VARY_MAX,
1654 };
1655
1656 /* Given a varying, figure out which index it correpsonds to */
1657
1658 static inline unsigned
1659 pan_varying_index(unsigned present, enum pan_special_varying v)
1660 {
1661 unsigned mask = (1 << v) - 1;
1662 return util_bitcount(present & mask);
1663 }
1664
1665 /* Get the base offset for XFB buffers, which by convention come after
1666 * everything else. Wrapper function for semantic reasons; by construction this
1667 * is just popcount. */
1668
1669 static inline unsigned
1670 pan_xfb_base(unsigned present)
1671 {
1672 return util_bitcount(present);
1673 }
1674
1675 /* Computes the present mask for varyings so we can start emitting varying records */
1676
1677 static inline unsigned
1678 pan_varying_present(
1679 struct panfrost_shader_state *vs,
1680 struct panfrost_shader_state *fs,
1681 unsigned quirks)
1682 {
1683 /* At the moment we always emit general and position buffers. Not
1684 * strictly necessary but usually harmless */
1685
1686 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1687
1688 /* Enable special buffers by the shader info */
1689
1690 if (vs->writes_point_size)
1691 present |= (1 << PAN_VARY_PSIZ);
1692
1693 if (fs->reads_point_coord)
1694 present |= (1 << PAN_VARY_PNTCOORD);
1695
1696 if (fs->reads_face)
1697 present |= (1 << PAN_VARY_FACE);
1698
1699 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1700 present |= (1 << PAN_VARY_FRAGCOORD);
1701
1702 /* Also, if we have a point sprite, we need a point coord buffer */
1703
1704 for (unsigned i = 0; i < fs->varying_count; i++) {
1705 gl_varying_slot loc = fs->varyings_loc[i];
1706
1707 if (has_point_coord(fs->point_sprite_mask, loc))
1708 present |= (1 << PAN_VARY_PNTCOORD);
1709 }
1710
1711 return present;
1712 }
1713
1714 /* Emitters for varying records */
1715
1716 static struct mali_attr_meta
1717 pan_emit_vary(unsigned present, enum pan_special_varying buf,
1718 unsigned quirks, enum mali_format format,
1719 unsigned offset)
1720 {
1721 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1722 unsigned swizzle = quirks & HAS_SWIZZLES ?
1723 panfrost_get_default_swizzle(nr_channels) :
1724 panfrost_bifrost_swizzle(nr_channels);
1725
1726 struct mali_attr_meta meta = {
1727 .index = pan_varying_index(present, buf),
1728 .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
1729 .format = (format << 12) | swizzle,
1730 .src_offset = offset
1731 };
1732
1733 return meta;
1734 }
1735
1736 /* General varying that is unused */
1737
1738 static struct mali_attr_meta
1739 pan_emit_vary_only(unsigned present, unsigned quirks)
1740 {
1741 return pan_emit_vary(present, 0, quirks, MALI_VARYING_DISCARD, 0);
1742 }
1743
1744 /* Special records */
1745
1746 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1747 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1748 [PAN_VARY_PSIZ] = MALI_R16F,
1749 [PAN_VARY_PNTCOORD] = MALI_R16F,
1750 [PAN_VARY_FACE] = MALI_R32I,
1751 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1752 };
1753
1754 static struct mali_attr_meta
1755 pan_emit_vary_special(unsigned present, enum pan_special_varying buf,
1756 unsigned quirks)
1757 {
1758 assert(buf < PAN_VARY_MAX);
1759 return pan_emit_vary(present, buf, quirks, pan_varying_formats[buf], 0);
1760 }
1761
1762 static enum mali_format
1763 pan_xfb_format(enum mali_format format, unsigned nr)
1764 {
1765 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1766 return MALI_R32F | MALI_NR_CHANNELS(nr);
1767 else
1768 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1769 }
1770
1771 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1772 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1773 * value. */
1774
1775 static struct mali_attr_meta
1776 pan_emit_vary_xfb(unsigned present,
1777 unsigned max_xfb,
1778 unsigned *streamout_offsets,
1779 unsigned quirks,
1780 enum mali_format format,
1781 struct pipe_stream_output o)
1782 {
1783 unsigned swizzle = quirks & HAS_SWIZZLES ?
1784 panfrost_get_default_swizzle(o.num_components) :
1785 panfrost_bifrost_swizzle(o.num_components);
1786
1787 /* Otherwise construct a record for it */
1788 struct mali_attr_meta meta = {
1789 /* XFB buffers come after everything else */
1790 .index = pan_xfb_base(present) + o.output_buffer,
1791
1792 /* As usual unknown bit */
1793 .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
1794
1795 /* Override number of channels and precision to highp */
1796 .format = (pan_xfb_format(format, o.num_components) << 12) | swizzle,
1797
1798 /* Apply given offsets together */
1799 .src_offset = (o.dst_offset * 4) /* dwords */
1800 + streamout_offsets[o.output_buffer]
1801 };
1802
1803 return meta;
1804 }
1805
1806 /* Determine if we should capture a varying for XFB. This requires actually
1807 * having a buffer for it. If we don't capture it, we'll fallback to a general
1808 * varying path (linked or unlinked, possibly discarding the write) */
1809
1810 static bool
1811 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1812 unsigned loc, unsigned max_xfb)
1813 {
1814 if (!(xfb->so_mask & (1ll << loc)))
1815 return false;
1816
1817 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1818 return o->output_buffer < max_xfb;
1819 }
1820
1821 /* Higher-level wrapper around all of the above, classifying a varying into one
1822 * of the above types */
1823
1824 static struct mali_attr_meta
1825 panfrost_emit_varying(
1826 struct panfrost_shader_state *stage,
1827 struct panfrost_shader_state *other,
1828 struct panfrost_shader_state *xfb,
1829 unsigned present,
1830 unsigned max_xfb,
1831 unsigned *streamout_offsets,
1832 unsigned quirks,
1833 unsigned *gen_offsets,
1834 enum mali_format *gen_formats,
1835 unsigned *gen_stride,
1836 unsigned idx,
1837 bool should_alloc,
1838 bool is_fragment)
1839 {
1840 gl_varying_slot loc = stage->varyings_loc[idx];
1841 enum mali_format format = stage->varyings[idx];
1842
1843 /* Override format to match linkage */
1844 if (!should_alloc && gen_formats[idx])
1845 format = gen_formats[idx];
1846
1847 if (has_point_coord(stage->point_sprite_mask, loc)) {
1848 return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
1849 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1850 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1851 return pan_emit_vary_xfb(present, max_xfb, streamout_offsets, quirks, format, *o);
1852 } else if (loc == VARYING_SLOT_POS) {
1853 if (is_fragment)
1854 return pan_emit_vary_special(present, PAN_VARY_FRAGCOORD, quirks);
1855 else
1856 return pan_emit_vary_special(present, PAN_VARY_POSITION, quirks);
1857 } else if (loc == VARYING_SLOT_PSIZ) {
1858 return pan_emit_vary_special(present, PAN_VARY_PSIZ, quirks);
1859 } else if (loc == VARYING_SLOT_PNTC) {
1860 return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
1861 } else if (loc == VARYING_SLOT_FACE) {
1862 return pan_emit_vary_special(present, PAN_VARY_FACE, quirks);
1863 }
1864
1865 /* We've exhausted special cases, so it's otherwise a general varying. Check if we're linked */
1866 signed other_idx = -1;
1867
1868 for (unsigned j = 0; j < other->varying_count; ++j) {
1869 if (other->varyings_loc[j] == loc) {
1870 other_idx = j;
1871 break;
1872 }
1873 }
1874
1875 if (other_idx < 0)
1876 return pan_emit_vary_only(present, quirks);
1877
1878 unsigned offset = gen_offsets[other_idx];
1879
1880 if (should_alloc) {
1881 /* We're linked, so allocate a space via a watermark allocation */
1882 enum mali_format alt = other->varyings[other_idx];
1883
1884 /* Do interpolation at minimum precision */
1885 unsigned size_main = pan_varying_size(format);
1886 unsigned size_alt = pan_varying_size(alt);
1887 unsigned size = MIN2(size_main, size_alt);
1888
1889 /* If a varying is marked for XFB but not actually captured, we
1890 * should match the format to the format that would otherwise
1891 * be used for XFB, since dEQP checks for invariance here. It's
1892 * unclear if this is required by the spec. */
1893
1894 if (xfb->so_mask & (1ull << loc)) {
1895 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1896 format = pan_xfb_format(format, o->num_components);
1897 size = pan_varying_size(format);
1898 } else if (size == size_alt) {
1899 format = alt;
1900 }
1901
1902 gen_offsets[idx] = *gen_stride;
1903 gen_formats[other_idx] = format;
1904 offset = *gen_stride;
1905 *gen_stride += size;
1906 }
1907
1908 return pan_emit_vary(present, PAN_VARY_GENERAL,
1909 quirks, format, offset);
1910 }
1911
1912 static void
1913 pan_emit_special_input(union mali_attr *varyings,
1914 unsigned present,
1915 enum pan_special_varying v,
1916 mali_ptr addr)
1917 {
1918 if (present & (1 << v)) {
1919 /* Ensure we write exactly once for performance and with fields
1920 * zeroed appropriately to avoid flakes */
1921
1922 union mali_attr s = {
1923 .elements = addr
1924 };
1925
1926 varyings[pan_varying_index(present, v)] = s;
1927 }
1928 }
1929
1930 void
1931 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1932 unsigned vertex_count,
1933 struct mali_vertex_tiler_postfix *vertex_postfix,
1934 struct mali_vertex_tiler_postfix *tiler_postfix,
1935 union midgard_primitive_size *primitive_size)
1936 {
1937 /* Load the shaders */
1938 struct panfrost_context *ctx = batch->ctx;
1939 struct panfrost_device *dev = pan_device(ctx->base.screen);
1940 struct panfrost_shader_state *vs, *fs;
1941 size_t vs_size, fs_size;
1942
1943 /* Allocate the varying descriptor */
1944
1945 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1946 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
1947 vs_size = sizeof(struct mali_attr_meta) * vs->varying_count;
1948 fs_size = sizeof(struct mali_attr_meta) * fs->varying_count;
1949
1950 struct panfrost_transfer trans = panfrost_pool_alloc(&batch->pool,
1951 vs_size +
1952 fs_size);
1953
1954 struct pipe_stream_output_info *so = &vs->stream_output;
1955 unsigned present = pan_varying_present(vs, fs, dev->quirks);
1956
1957 /* Check if this varying is linked by us. This is the case for
1958 * general-purpose, non-captured varyings. If it is, link it. If it's
1959 * not, use the provided stream out information to determine the
1960 * offset, since it was already linked for us. */
1961
1962 unsigned gen_offsets[32];
1963 enum mali_format gen_formats[32];
1964 memset(gen_offsets, 0, sizeof(gen_offsets));
1965 memset(gen_formats, 0, sizeof(gen_formats));
1966
1967 unsigned gen_stride = 0;
1968 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
1969 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
1970
1971 unsigned streamout_offsets[32];
1972
1973 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1974 streamout_offsets[i] = panfrost_streamout_offset(
1975 so->stride[i],
1976 ctx->streamout.offsets[i],
1977 ctx->streamout.targets[i]);
1978 }
1979
1980 struct mali_attr_meta *ovs = (struct mali_attr_meta *)trans.cpu;
1981 struct mali_attr_meta *ofs = ovs + vs->varying_count;
1982
1983 for (unsigned i = 0; i < vs->varying_count; i++) {
1984 ovs[i] = panfrost_emit_varying(vs, fs, vs, present,
1985 ctx->streamout.num_targets, streamout_offsets,
1986 dev->quirks,
1987 gen_offsets, gen_formats, &gen_stride, i, true, false);
1988 }
1989
1990 for (unsigned i = 0; i < fs->varying_count; i++) {
1991 ofs[i] = panfrost_emit_varying(fs, vs, vs, present,
1992 ctx->streamout.num_targets, streamout_offsets,
1993 dev->quirks,
1994 gen_offsets, gen_formats, &gen_stride, i, false, true);
1995 }
1996
1997 unsigned xfb_base = pan_xfb_base(present);
1998 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
1999 sizeof(union mali_attr) * (xfb_base + ctx->streamout.num_targets));
2000 union mali_attr *varyings = (union mali_attr *) T.cpu;
2001
2002 /* Emit the stream out buffers */
2003
2004 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
2005 ctx->vertex_count);
2006
2007 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
2008 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
2009 so->stride[i],
2010 ctx->streamout.offsets[i],
2011 out_count,
2012 ctx->streamout.targets[i]);
2013 }
2014
2015 panfrost_emit_varyings(batch,
2016 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
2017 gen_stride, vertex_count);
2018
2019 /* fp32 vec4 gl_Position */
2020 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
2021 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
2022 sizeof(float) * 4, vertex_count);
2023
2024 if (present & (1 << PAN_VARY_PSIZ)) {
2025 primitive_size->pointer = panfrost_emit_varyings(batch,
2026 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
2027 2, vertex_count);
2028 }
2029
2030 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_VARYING_POINT_COORD);
2031 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_VARYING_FRONT_FACING);
2032 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_VARYING_FRAG_COORD);
2033
2034 vertex_postfix->varyings = T.gpu;
2035 tiler_postfix->varyings = T.gpu;
2036
2037 vertex_postfix->varying_meta = trans.gpu;
2038 tiler_postfix->varying_meta = trans.gpu + vs_size;
2039 }
2040
2041 void
2042 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
2043 struct mali_vertex_tiler_prefix *vertex_prefix,
2044 struct mali_vertex_tiler_postfix *vertex_postfix,
2045 struct mali_vertex_tiler_prefix *tiler_prefix,
2046 struct mali_vertex_tiler_postfix *tiler_postfix,
2047 union midgard_primitive_size *primitive_size)
2048 {
2049 struct panfrost_context *ctx = batch->ctx;
2050 struct panfrost_device *device = pan_device(ctx->base.screen);
2051 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
2052 struct bifrost_payload_vertex bifrost_vertex = {0,};
2053 struct bifrost_payload_tiler bifrost_tiler = {0,};
2054 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2055 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2056 void *vp, *tp;
2057 size_t vp_size, tp_size;
2058
2059 if (device->quirks & IS_BIFROST) {
2060 bifrost_vertex.prefix = *vertex_prefix;
2061 bifrost_vertex.postfix = *vertex_postfix;
2062 vp = &bifrost_vertex;
2063 vp_size = sizeof(bifrost_vertex);
2064
2065 bifrost_tiler.prefix = *tiler_prefix;
2066 bifrost_tiler.tiler.primitive_size = *primitive_size;
2067 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2068 bifrost_tiler.postfix = *tiler_postfix;
2069 tp = &bifrost_tiler;
2070 tp_size = sizeof(bifrost_tiler);
2071 } else {
2072 midgard_vertex.prefix = *vertex_prefix;
2073 midgard_vertex.postfix = *vertex_postfix;
2074 vp = &midgard_vertex;
2075 vp_size = sizeof(midgard_vertex);
2076
2077 midgard_tiler.prefix = *tiler_prefix;
2078 midgard_tiler.postfix = *tiler_postfix;
2079 midgard_tiler.primitive_size = *primitive_size;
2080 tp = &midgard_tiler;
2081 tp_size = sizeof(midgard_tiler);
2082 }
2083
2084 if (wallpapering) {
2085 /* Inject in reverse order, with "predicted" job indices.
2086 * THIS IS A HACK XXX */
2087 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
2088 batch->scoreboard.job_index + 2, tp, tp_size, true);
2089 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2090 vp, vp_size, true);
2091 return;
2092 }
2093
2094 /* If rasterizer discard is enable, only submit the vertex */
2095
2096 bool rasterizer_discard = ctx->rasterizer &&
2097 ctx->rasterizer->base.rasterizer_discard;
2098
2099 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2100 vp, vp_size, false);
2101
2102 if (rasterizer_discard)
2103 return;
2104
2105 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
2106 false);
2107 }
2108
2109 /* TODO: stop hardcoding this */
2110 mali_ptr
2111 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2112 {
2113 uint16_t locations[] = {
2114 128, 128,
2115 0, 256,
2116 0, 256,
2117 0, 256,
2118 0, 256,
2119 0, 256,
2120 0, 256,
2121 0, 256,
2122 0, 256,
2123 0, 256,
2124 0, 256,
2125 0, 256,
2126 0, 256,
2127 0, 256,
2128 0, 256,
2129 0, 256,
2130 0, 256,
2131 0, 256,
2132 0, 256,
2133 0, 256,
2134 0, 256,
2135 0, 256,
2136 0, 256,
2137 0, 256,
2138 0, 256,
2139 0, 256,
2140 0, 256,
2141 0, 256,
2142 0, 256,
2143 0, 256,
2144 0, 256,
2145 0, 256,
2146 128, 128,
2147 0, 0,
2148 0, 0,
2149 0, 0,
2150 0, 0,
2151 0, 0,
2152 0, 0,
2153 0, 0,
2154 0, 0,
2155 0, 0,
2156 0, 0,
2157 0, 0,
2158 0, 0,
2159 0, 0,
2160 0, 0,
2161 0, 0,
2162 };
2163
2164 return panfrost_pool_upload(&batch->pool, locations, 96 * sizeof(uint16_t));
2165 }