panfrost: Avoid minimum stack allocations
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 struct mali_shared_memory shared = {
62 .shared_workgroup_count = ~0,
63 };
64
65 if (batch->stack_size) {
66 struct panfrost_bo *stack =
67 panfrost_batch_get_scratchpad(batch, batch->stack_size,
68 dev->thread_tls_alloc,
69 dev->core_count);
70
71 shared.stack_shift = panfrost_get_stack_shift(batch->stack_size);
72 shared.scratchpad = stack->gpu;
73 }
74
75 postfix->shared_memory = panfrost_pool_upload(&batch->pool, &shared, sizeof(shared));
76 }
77
78 static void
79 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
80 struct mali_vertex_tiler_postfix *postfix)
81 {
82 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
83 postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
84 }
85
86 static void
87 panfrost_vt_update_rasterizer(struct panfrost_rasterizer *rasterizer,
88 struct mali_vertex_tiler_prefix *prefix,
89 struct mali_vertex_tiler_postfix *postfix)
90 {
91 postfix->gl_enables |= 0x7;
92 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
93 rasterizer->base.front_ccw);
94 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
95 (rasterizer->base.cull_face & PIPE_FACE_FRONT));
96 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
97 (rasterizer->base.cull_face & PIPE_FACE_BACK));
98 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
99 rasterizer->base.flatshade_first);
100 }
101
102 void
103 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
104 struct mali_vertex_tiler_prefix *prefix,
105 union midgard_primitive_size *primitive_size)
106 {
107 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
108
109 if (!panfrost_writes_point_size(ctx)) {
110 float val = (prefix->draw_mode == MALI_DRAW_MODE_POINTS) ?
111 rasterizer->base.point_size :
112 rasterizer->base.line_width;
113
114 primitive_size->constant = val;
115 }
116 }
117
118 static void
119 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
120 struct mali_vertex_tiler_postfix *postfix)
121 {
122 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
123 if (ctx->occlusion_query) {
124 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
125 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
126 PAN_BO_ACCESS_SHARED |
127 PAN_BO_ACCESS_RW |
128 PAN_BO_ACCESS_FRAGMENT);
129 } else {
130 postfix->occlusion_counter = 0;
131 }
132 }
133
134 void
135 panfrost_vt_init(struct panfrost_context *ctx,
136 enum pipe_shader_type stage,
137 struct mali_vertex_tiler_prefix *prefix,
138 struct mali_vertex_tiler_postfix *postfix)
139 {
140 struct panfrost_device *device = pan_device(ctx->base.screen);
141
142 if (!ctx->shader[stage])
143 return;
144
145 memset(prefix, 0, sizeof(*prefix));
146 memset(postfix, 0, sizeof(*postfix));
147
148 if (device->quirks & IS_BIFROST) {
149 postfix->gl_enables = 0x2;
150 panfrost_vt_emit_shared_memory(ctx, postfix);
151 } else {
152 postfix->gl_enables = 0x6;
153 panfrost_vt_attach_framebuffer(ctx, postfix);
154 }
155
156 if (stage == PIPE_SHADER_FRAGMENT) {
157 panfrost_vt_update_occlusion_query(ctx, postfix);
158 panfrost_vt_update_rasterizer(ctx->rasterizer, prefix, postfix);
159 }
160 }
161
162 static unsigned
163 panfrost_translate_index_size(unsigned size)
164 {
165 switch (size) {
166 case 1:
167 return MALI_DRAW_INDEXED_UINT8;
168
169 case 2:
170 return MALI_DRAW_INDEXED_UINT16;
171
172 case 4:
173 return MALI_DRAW_INDEXED_UINT32;
174
175 default:
176 unreachable("Invalid index size");
177 }
178 }
179
180 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
181 * good for the duration of the draw (transient), could last longer. Also get
182 * the bounds on the index buffer for the range accessed by the draw. We do
183 * these operations together because there are natural optimizations which
184 * require them to be together. */
185
186 static mali_ptr
187 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
188 const struct pipe_draw_info *info,
189 unsigned *min_index, unsigned *max_index)
190 {
191 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
192 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
193 off_t offset = info->start * info->index_size;
194 bool needs_indices = true;
195 mali_ptr out = 0;
196
197 if (info->max_index != ~0u) {
198 *min_index = info->min_index;
199 *max_index = info->max_index;
200 needs_indices = false;
201 }
202
203 if (!info->has_user_indices) {
204 /* Only resources can be directly mapped */
205 panfrost_batch_add_bo(batch, rsrc->bo,
206 PAN_BO_ACCESS_SHARED |
207 PAN_BO_ACCESS_READ |
208 PAN_BO_ACCESS_VERTEX_TILER);
209 out = rsrc->bo->gpu + offset;
210
211 /* Check the cache */
212 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
213 info->start,
214 info->count,
215 min_index,
216 max_index);
217 } else {
218 /* Otherwise, we need to upload to transient memory */
219 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
220 out = panfrost_pool_upload(&batch->pool, ibuf8 + offset,
221 info->count *
222 info->index_size);
223 }
224
225 if (needs_indices) {
226 /* Fallback */
227 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
228
229 if (!info->has_user_indices)
230 panfrost_minmax_cache_add(rsrc->index_cache,
231 info->start, info->count,
232 *min_index, *max_index);
233 }
234
235 return out;
236 }
237
238 void
239 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
240 const struct pipe_draw_info *info,
241 enum mali_draw_mode draw_mode,
242 struct mali_vertex_tiler_postfix *vertex_postfix,
243 struct mali_vertex_tiler_prefix *tiler_prefix,
244 struct mali_vertex_tiler_postfix *tiler_postfix,
245 unsigned *vertex_count,
246 unsigned *padded_count)
247 {
248 tiler_prefix->draw_mode = draw_mode;
249
250 unsigned draw_flags = 0;
251
252 if (panfrost_writes_point_size(ctx))
253 draw_flags |= MALI_DRAW_VARYING_SIZE;
254
255 if (info->primitive_restart)
256 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
257
258 /* These doesn't make much sense */
259
260 draw_flags |= 0x3000;
261
262 if (info->index_size) {
263 unsigned min_index = 0, max_index = 0;
264
265 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
266 info,
267 &min_index,
268 &max_index);
269
270 /* Use the corresponding values */
271 *vertex_count = max_index - min_index + 1;
272 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
273 tiler_prefix->offset_bias_correction = -min_index;
274 tiler_prefix->index_count = MALI_POSITIVE(info->count);
275 draw_flags |= panfrost_translate_index_size(info->index_size);
276 } else {
277 tiler_prefix->indices = 0;
278 *vertex_count = ctx->vertex_count;
279 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
280 tiler_prefix->offset_bias_correction = 0;
281 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
282 }
283
284 tiler_prefix->unknown_draw = draw_flags;
285
286 /* Encode the padded vertex count */
287
288 if (info->instance_count > 1) {
289 *padded_count = panfrost_padded_vertex_count(*vertex_count);
290
291 unsigned shift = __builtin_ctz(ctx->padded_count);
292 unsigned k = ctx->padded_count >> (shift + 1);
293
294 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
295 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
296 } else {
297 *padded_count = *vertex_count;
298
299 /* Reset instancing state */
300 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
301 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
302 }
303 }
304
305 static void
306 panfrost_shader_meta_init(struct panfrost_context *ctx,
307 enum pipe_shader_type st,
308 struct mali_shader_meta *meta)
309 {
310 const struct panfrost_device *dev = pan_device(ctx->base.screen);
311 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
312
313 memset(meta, 0, sizeof(*meta));
314 meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
315 meta->attribute_count = ss->attribute_count;
316 meta->varying_count = ss->varying_count;
317 meta->texture_count = ctx->sampler_view_count[st];
318 meta->sampler_count = ctx->sampler_count[st];
319
320 if (dev->quirks & IS_BIFROST) {
321 if (st == PIPE_SHADER_VERTEX)
322 meta->bifrost1.unk1 = 0x800000;
323 else {
324 /* First clause ATEST |= 0x4000000.
325 * Less than 32 regs |= 0x200 */
326 meta->bifrost1.unk1 = 0x950020;
327 }
328
329 meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
330 if (st == PIPE_SHADER_VERTEX)
331 meta->bifrost2.preload_regs = 0xC0;
332 else {
333 meta->bifrost2.preload_regs = 0x1;
334 SET_BIT(meta->bifrost2.preload_regs, 0x10, ss->reads_frag_coord);
335 }
336
337 meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
338 ss->uniform_cutoff);
339 } else {
340 meta->midgard1.uniform_count = MIN2(ss->uniform_count,
341 ss->uniform_cutoff);
342 meta->midgard1.work_count = ss->work_reg_count;
343
344 /* TODO: This is not conformant on ES3 */
345 meta->midgard1.flags_hi = MALI_SUPPRESS_INF_NAN;
346
347 meta->midgard1.flags_lo = 0x20;
348 meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
349
350 SET_BIT(meta->midgard1.flags_lo, MALI_WRITES_GLOBAL, ss->writes_global);
351 }
352 }
353
354 static unsigned
355 translate_tex_wrap(enum pipe_tex_wrap w)
356 {
357 switch (w) {
358 case PIPE_TEX_WRAP_REPEAT: return MALI_WRAP_MODE_REPEAT;
359 case PIPE_TEX_WRAP_CLAMP: return MALI_WRAP_MODE_CLAMP;
360 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return MALI_WRAP_MODE_CLAMP_TO_EDGE;
361 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return MALI_WRAP_MODE_CLAMP_TO_BORDER;
362 case PIPE_TEX_WRAP_MIRROR_REPEAT: return MALI_WRAP_MODE_MIRRORED_REPEAT;
363 case PIPE_TEX_WRAP_MIRROR_CLAMP: return MALI_WRAP_MODE_MIRRORED_CLAMP;
364 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
365 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
366 default: unreachable("Invalid wrap");
367 }
368 }
369
370 /* The hardware compares in the wrong order order, so we have to flip before
371 * encoding. Yes, really. */
372
373 static enum mali_func
374 panfrost_sampler_compare_func(const struct pipe_sampler_state *cso)
375 {
376 if (!cso->compare_mode)
377 return MALI_FUNC_NEVER;
378
379 enum mali_func f = panfrost_translate_compare_func(cso->compare_func);
380 return panfrost_flip_compare_func(f);
381 }
382
383 static enum mali_mipmap_mode
384 pan_pipe_to_mipmode(enum pipe_tex_mipfilter f)
385 {
386 switch (f) {
387 case PIPE_TEX_MIPFILTER_NEAREST: return MALI_MIPMAP_MODE_NEAREST;
388 case PIPE_TEX_MIPFILTER_LINEAR: return MALI_MIPMAP_MODE_TRILINEAR;
389 case PIPE_TEX_MIPFILTER_NONE: return MALI_MIPMAP_MODE_NONE;
390 default: unreachable("Invalid");
391 }
392 }
393
394 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
395 struct mali_midgard_sampler_packed *hw)
396 {
397 pan_pack(hw, MIDGARD_SAMPLER, cfg) {
398 cfg.magnify_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
399 cfg.minify_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
400 cfg.mipmap_mode = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR) ?
401 MALI_MIPMAP_MODE_TRILINEAR : MALI_MIPMAP_MODE_NEAREST;
402 cfg.normalized_coordinates = cso->normalized_coords;
403
404 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
405
406 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
407
408 /* If necessary, we disable mipmapping in the sampler descriptor by
409 * clamping the LOD as tight as possible (from 0 to epsilon,
410 * essentially -- remember these are fixed point numbers, so
411 * epsilon=1/256) */
412
413 cfg.maximum_lod = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) ?
414 cfg.minimum_lod + 1 :
415 FIXED_16(cso->max_lod, false);
416
417 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
418 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
419 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
420
421 cfg.compare_function = panfrost_sampler_compare_func(cso);
422 cfg.seamless_cube_map = cso->seamless_cube_map;
423
424 cfg.border_color_r = cso->border_color.f[0];
425 cfg.border_color_g = cso->border_color.f[1];
426 cfg.border_color_b = cso->border_color.f[2];
427 cfg.border_color_a = cso->border_color.f[3];
428 }
429 }
430
431 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
432 struct mali_bifrost_sampler_packed *hw)
433 {
434 pan_pack(hw, BIFROST_SAMPLER, cfg) {
435 cfg.magnify_linear = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR;
436 cfg.minify_linear = cso->min_img_filter == PIPE_TEX_FILTER_LINEAR;
437 cfg.mipmap_mode = pan_pipe_to_mipmode(cso->min_mip_filter);
438 cfg.normalized_coordinates = cso->normalized_coords;
439
440 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
441 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
442 cfg.maximum_lod = FIXED_16(cso->max_lod, false);
443
444 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
445 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
446 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
447
448 cfg.compare_function = panfrost_sampler_compare_func(cso);
449 cfg.seamless_cube_map = cso->seamless_cube_map;
450 }
451 }
452
453 static void
454 panfrost_frag_meta_rasterizer_update(struct panfrost_context *ctx,
455 struct mali_shader_meta *fragmeta)
456 {
457 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
458
459 bool msaa = rast->multisample;
460
461 /* TODO: Sample size */
462 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
463 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
464
465 struct panfrost_shader_state *fs;
466 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
467
468 /* EXT_shader_framebuffer_fetch requires the shader to be run
469 * per-sample when outputs are read. */
470 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
471 SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
472
473 fragmeta->depth_units = rast->offset_units * 2.0f;
474 fragmeta->depth_factor = rast->offset_scale;
475
476 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
477
478 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
479 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
480
481 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
482 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
483 }
484
485 static void
486 panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
487 struct mali_shader_meta *fragmeta)
488 {
489 const struct panfrost_zsa_state *so = ctx->depth_stencil;
490
491 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
492 so->base.stencil[0].enabled);
493
494 fragmeta->stencil_mask_front = so->stencil_mask_front;
495 fragmeta->stencil_mask_back = so->stencil_mask_back;
496
497 /* Bottom bits for stencil ref, exactly one word */
498 fragmeta->stencil_front.opaque[0] = so->stencil_front.opaque[0] | ctx->stencil_ref.ref_value[0];
499
500 /* If back-stencil is not enabled, use the front values */
501
502 if (so->base.stencil[1].enabled)
503 fragmeta->stencil_back.opaque[0] = so->stencil_back.opaque[0] | ctx->stencil_ref.ref_value[1];
504 else
505 fragmeta->stencil_back = fragmeta->stencil_front;
506
507 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
508 so->base.depth.writemask);
509
510 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
511 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(
512 so->base.depth.enabled ? so->base.depth.func : PIPE_FUNC_ALWAYS));
513 }
514
515 static bool
516 panfrost_fs_required(
517 struct panfrost_shader_state *fs,
518 struct panfrost_blend_final *blend,
519 unsigned rt_count)
520 {
521 /* If we generally have side effects */
522 if (fs->fs_sidefx)
523 return true;
524
525 /* If colour is written we need to execute */
526 for (unsigned i = 0; i < rt_count; ++i) {
527 if (!blend[i].no_colour)
528 return true;
529 }
530
531 /* If depth is written and not implied we need to execute.
532 * TODO: Predicate on Z/S writes being enabled */
533 return (fs->writes_depth || fs->writes_stencil);
534 }
535
536 static void
537 panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
538 struct mali_shader_meta *fragmeta,
539 void *rts)
540 {
541 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
542 const struct panfrost_device *dev = pan_device(ctx->base.screen);
543 struct panfrost_shader_state *fs;
544 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
545
546 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
547 (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
548 !ctx->blend->base.dither);
549
550 SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
551 ctx->blend->base.alpha_to_coverage);
552
553 /* Get blending setup */
554 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
555
556 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
557 unsigned shader_offset = 0;
558 struct panfrost_bo *shader_bo = NULL;
559
560 for (unsigned c = 0; c < rt_count; ++c)
561 blend[c] = panfrost_get_blend_for_context(ctx, c, &shader_bo,
562 &shader_offset);
563
564 /* Disable shader execution if we can */
565 if (dev->quirks & MIDGARD_SHADERLESS
566 && !panfrost_fs_required(fs, blend, rt_count)) {
567 fragmeta->shader = 0;
568 fragmeta->attribute_count = 0;
569 fragmeta->varying_count = 0;
570 fragmeta->texture_count = 0;
571 fragmeta->sampler_count = 0;
572
573 /* This feature is not known to work on Bifrost */
574 fragmeta->midgard1.work_count = 1;
575 fragmeta->midgard1.uniform_count = 0;
576 fragmeta->midgard1.uniform_buffer_count = 0;
577 }
578
579 /* If there is a blend shader, work registers are shared. We impose 8
580 * work registers as a limit for blend shaders. Should be lower XXX */
581
582 if (!(dev->quirks & IS_BIFROST)) {
583 for (unsigned c = 0; c < rt_count; ++c) {
584 if (blend[c].is_shader) {
585 fragmeta->midgard1.work_count =
586 MAX2(fragmeta->midgard1.work_count, 8);
587 }
588 }
589 }
590
591 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
592 * copied to the blend_meta appended (by convention), but this is the
593 * field actually read by the hardware. (Or maybe both are read...?).
594 * Specify the last RTi with a blend shader. */
595
596 fragmeta->blend.shader = 0;
597
598 for (signed rt = (rt_count - 1); rt >= 0; --rt) {
599 if (!blend[rt].is_shader)
600 continue;
601
602 fragmeta->blend.shader = blend[rt].shader.gpu |
603 blend[rt].shader.first_tag;
604 break;
605 }
606
607 if (dev->quirks & MIDGARD_SFBD) {
608 /* When only a single render target platform is used, the blend
609 * information is inside the shader meta itself. We additionally
610 * need to signal CAN_DISCARD for nontrivial blend modes (so
611 * we're able to read back the destination buffer) */
612
613 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
614 blend[0].is_shader);
615
616 if (!blend[0].is_shader) {
617 fragmeta->blend.equation = *blend[0].equation.equation;
618 fragmeta->blend.constant = blend[0].equation.constant;
619 }
620
621 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
622 !blend[0].no_blending || fs->can_discard);
623
624 batch->draws |= PIPE_CLEAR_COLOR0;
625 return;
626 }
627
628 if (dev->quirks & IS_BIFROST) {
629 bool no_blend = true;
630
631 for (unsigned i = 0; i < rt_count; ++i)
632 no_blend &= (blend[i].no_blending | blend[i].no_colour);
633
634 SET_BIT(fragmeta->bifrost1.unk1, MALI_BIFROST_EARLY_Z,
635 !fs->can_discard && !fs->writes_depth && no_blend);
636 }
637
638 /* Additional blend descriptor tacked on for jobs using MFBD */
639
640 for (unsigned i = 0; i < rt_count; ++i) {
641 unsigned flags = 0;
642
643 if (ctx->pipe_framebuffer.nr_cbufs > i && !blend[i].no_colour) {
644 flags = 0x200;
645 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
646
647 bool is_srgb = (ctx->pipe_framebuffer.nr_cbufs > i) &&
648 (ctx->pipe_framebuffer.cbufs[i]) &&
649 util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
650
651 SET_BIT(flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
652 SET_BIT(flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
653 SET_BIT(flags, MALI_BLEND_SRGB, is_srgb);
654 SET_BIT(flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
655 }
656
657 if (dev->quirks & IS_BIFROST) {
658 struct bifrost_blend_rt *brts = rts;
659
660 brts[i].flags = flags;
661
662 if (blend[i].is_shader) {
663 /* The blend shader's address needs to be at
664 * the same top 32 bit as the fragment shader.
665 * TODO: Ensure that's always the case.
666 */
667 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
668 (fs->bo->gpu & (0xffffffffull << 32)));
669 brts[i].shader = blend[i].shader.gpu;
670 brts[i].unk2 = 0x0;
671 } else if (ctx->pipe_framebuffer.nr_cbufs > i) {
672 enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
673 const struct util_format_description *format_desc;
674 format_desc = util_format_description(format);
675
676 brts[i].equation = *blend[i].equation.equation;
677
678 /* TODO: this is a bit more complicated */
679 brts[i].constant = blend[i].equation.constant;
680
681 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
682
683 /* 0x19 disables blending and forces REPLACE
684 * mode (equivalent to rgb_mode = alpha_mode =
685 * x122, colour mask = 0xF). 0x1a allows
686 * blending. */
687 brts[i].unk2 = blend[i].no_blending ? 0x19 : 0x1a;
688
689 brts[i].shader_type = fs->blend_types[i];
690 } else {
691 /* Dummy attachment for depth-only */
692 brts[i].unk2 = 0x3;
693 brts[i].shader_type = fs->blend_types[i];
694 }
695 } else {
696 struct midgard_blend_rt *mrts = rts;
697 mrts[i].flags = flags;
698
699 if (blend[i].is_shader) {
700 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
701 } else {
702 mrts[i].blend.equation = *blend[i].equation.equation;
703 mrts[i].blend.constant = blend[i].equation.constant;
704 }
705 }
706 }
707 }
708
709 static void
710 panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
711 struct mali_shader_meta *fragmeta,
712 void *rts)
713 {
714 const struct panfrost_device *dev = pan_device(ctx->base.screen);
715 struct panfrost_shader_state *fs;
716
717 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
718
719 bool msaa = ctx->rasterizer->base.multisample;
720 fragmeta->coverage_mask = msaa ? ctx->sample_mask : ~0;
721
722 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
723 fragmeta->unknown2_4 = 0x4e0;
724
725 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
726 * is required (independent of 32-bit/64-bit descriptors), or why it's
727 * not used on later GPU revisions. Otherwise, all shader jobs fault on
728 * these earlier chips (perhaps this is a chicken bit of some kind).
729 * More investigation is needed. */
730
731 SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
732
733 if (dev->quirks & IS_BIFROST) {
734 /* TODO */
735 } else {
736 /* Depending on whether it's legal to in the given shader, we try to
737 * enable early-z testing. TODO: respect e-z force */
738
739 SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
740 !fs->can_discard && !fs->writes_global &&
741 !fs->writes_depth && !fs->writes_stencil &&
742 !ctx->blend->base.alpha_to_coverage);
743
744 /* Add the writes Z/S flags if needed. */
745 SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
746 SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
747
748 /* Any time texturing is used, derivatives are implicitly calculated,
749 * so we need to enable helper invocations */
750
751 SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
752 fs->helper_invocations);
753
754 /* If discard is enabled, which bit we set to convey this
755 * depends on if depth/stencil is used for the draw or not.
756 * Just one of depth OR stencil is enough to trigger this. */
757
758 const struct pipe_depth_stencil_alpha_state *zsa = &ctx->depth_stencil->base;
759 bool zs_enabled =
760 fs->writes_depth || fs->writes_stencil ||
761 (zsa->depth.enabled && zsa->depth.func != PIPE_FUNC_ALWAYS) ||
762 zsa->stencil[0].enabled;
763
764 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_TILEBUFFER,
765 fs->outputs_read || (!zs_enabled && fs->can_discard));
766 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_ZS, zs_enabled && fs->can_discard);
767 }
768
769 panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
770 panfrost_frag_meta_zsa_update(ctx, fragmeta);
771 panfrost_frag_meta_blend_update(ctx, fragmeta, rts);
772 }
773
774 void
775 panfrost_emit_shader_meta(struct panfrost_batch *batch,
776 enum pipe_shader_type st,
777 struct mali_vertex_tiler_postfix *postfix)
778 {
779 struct panfrost_context *ctx = batch->ctx;
780 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
781
782 if (!ss) {
783 postfix->shader = 0;
784 return;
785 }
786
787 struct mali_shader_meta meta;
788
789 panfrost_shader_meta_init(ctx, st, &meta);
790
791 /* Add the shader BO to the batch. */
792 panfrost_batch_add_bo(batch, ss->bo,
793 PAN_BO_ACCESS_PRIVATE |
794 PAN_BO_ACCESS_READ |
795 panfrost_bo_access_for_stage(st));
796
797 mali_ptr shader_ptr;
798
799 if (st == PIPE_SHADER_FRAGMENT) {
800 struct panfrost_device *dev = pan_device(ctx->base.screen);
801 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
802 size_t desc_size = sizeof(meta);
803 void *rts = NULL;
804 struct panfrost_transfer xfer;
805 unsigned rt_size;
806
807 if (dev->quirks & MIDGARD_SFBD)
808 rt_size = 0;
809 else if (dev->quirks & IS_BIFROST)
810 rt_size = sizeof(struct bifrost_blend_rt);
811 else
812 rt_size = sizeof(struct midgard_blend_rt);
813
814 desc_size += rt_size * rt_count;
815
816 if (rt_size)
817 rts = rzalloc_size(ctx, rt_size * rt_count);
818
819 panfrost_frag_shader_meta_init(ctx, &meta, rts);
820
821 xfer = panfrost_pool_alloc(&batch->pool, desc_size);
822
823 memcpy(xfer.cpu, &meta, sizeof(meta));
824 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
825
826 if (rt_size)
827 ralloc_free(rts);
828
829 shader_ptr = xfer.gpu;
830 } else {
831 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
832 sizeof(meta));
833 }
834
835 postfix->shader = shader_ptr;
836 }
837
838 void
839 panfrost_emit_viewport(struct panfrost_batch *batch,
840 struct mali_vertex_tiler_postfix *tiler_postfix)
841 {
842 struct panfrost_context *ctx = batch->ctx;
843 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
844 const struct pipe_scissor_state *ss = &ctx->scissor;
845 const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
846 const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
847
848 /* Derive min/max from translate/scale. Note since |x| >= 0 by
849 * definition, we have that -|x| <= |x| hence translate - |scale| <=
850 * translate + |scale|, so the ordering is correct here. */
851 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
852 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
853 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
854 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
855 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
856 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
857
858 /* Scissor to the intersection of viewport and to the scissor, clamped
859 * to the framebuffer */
860
861 unsigned minx = MIN2(fb->width, vp_minx);
862 unsigned maxx = MIN2(fb->width, vp_maxx);
863 unsigned miny = MIN2(fb->height, vp_miny);
864 unsigned maxy = MIN2(fb->height, vp_maxy);
865
866 if (ss && rast->scissor) {
867 minx = MAX2(ss->minx, minx);
868 miny = MAX2(ss->miny, miny);
869 maxx = MIN2(ss->maxx, maxx);
870 maxy = MIN2(ss->maxy, maxy);
871 }
872
873 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
874
875 pan_pack(T.cpu, VIEWPORT, cfg) {
876 cfg.scissor_minimum_x = minx;
877 cfg.scissor_minimum_y = miny;
878 cfg.scissor_maximum_x = maxx - 1;
879 cfg.scissor_maximum_y = maxy - 1;
880
881 cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
882 cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
883 }
884
885 tiler_postfix->viewport = T.gpu;
886 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
887 }
888
889 static mali_ptr
890 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
891 enum pipe_shader_type st,
892 struct panfrost_constant_buffer *buf,
893 unsigned index)
894 {
895 struct pipe_constant_buffer *cb = &buf->cb[index];
896 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
897
898 if (rsrc) {
899 panfrost_batch_add_bo(batch, rsrc->bo,
900 PAN_BO_ACCESS_SHARED |
901 PAN_BO_ACCESS_READ |
902 panfrost_bo_access_for_stage(st));
903
904 /* Alignment gauranteed by
905 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
906 return rsrc->bo->gpu + cb->buffer_offset;
907 } else if (cb->user_buffer) {
908 return panfrost_pool_upload(&batch->pool,
909 cb->user_buffer +
910 cb->buffer_offset,
911 cb->buffer_size);
912 } else {
913 unreachable("No constant buffer");
914 }
915 }
916
917 struct sysval_uniform {
918 union {
919 float f[4];
920 int32_t i[4];
921 uint32_t u[4];
922 uint64_t du[2];
923 };
924 };
925
926 static void
927 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
928 struct sysval_uniform *uniform)
929 {
930 struct panfrost_context *ctx = batch->ctx;
931 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
932
933 uniform->f[0] = vp->scale[0];
934 uniform->f[1] = vp->scale[1];
935 uniform->f[2] = vp->scale[2];
936 }
937
938 static void
939 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
940 struct sysval_uniform *uniform)
941 {
942 struct panfrost_context *ctx = batch->ctx;
943 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
944
945 uniform->f[0] = vp->translate[0];
946 uniform->f[1] = vp->translate[1];
947 uniform->f[2] = vp->translate[2];
948 }
949
950 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
951 enum pipe_shader_type st,
952 unsigned int sysvalid,
953 struct sysval_uniform *uniform)
954 {
955 struct panfrost_context *ctx = batch->ctx;
956 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
957 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
958 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
959 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
960
961 assert(dim);
962 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
963
964 if (dim > 1)
965 uniform->i[1] = u_minify(tex->texture->height0,
966 tex->u.tex.first_level);
967
968 if (dim > 2)
969 uniform->i[2] = u_minify(tex->texture->depth0,
970 tex->u.tex.first_level);
971
972 if (is_array)
973 uniform->i[dim] = tex->texture->array_size;
974 }
975
976 static void
977 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
978 enum pipe_shader_type st,
979 unsigned ssbo_id,
980 struct sysval_uniform *uniform)
981 {
982 struct panfrost_context *ctx = batch->ctx;
983
984 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
985 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
986
987 /* Compute address */
988 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
989
990 panfrost_batch_add_bo(batch, bo,
991 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
992 panfrost_bo_access_for_stage(st));
993
994 /* Upload address and size as sysval */
995 uniform->du[0] = bo->gpu + sb.buffer_offset;
996 uniform->u[2] = sb.buffer_size;
997 }
998
999 static void
1000 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
1001 enum pipe_shader_type st,
1002 unsigned samp_idx,
1003 struct sysval_uniform *uniform)
1004 {
1005 struct panfrost_context *ctx = batch->ctx;
1006 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
1007
1008 uniform->f[0] = sampl->min_lod;
1009 uniform->f[1] = sampl->max_lod;
1010 uniform->f[2] = sampl->lod_bias;
1011
1012 /* Even without any errata, Midgard represents "no mipmapping" as
1013 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1014 * panfrost_create_sampler_state which also explains our choice of
1015 * epsilon value (again to keep behaviour consistent) */
1016
1017 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1018 uniform->f[1] = uniform->f[0] + (1.0/256.0);
1019 }
1020
1021 static void
1022 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
1023 struct sysval_uniform *uniform)
1024 {
1025 struct panfrost_context *ctx = batch->ctx;
1026
1027 uniform->u[0] = ctx->compute_grid->grid[0];
1028 uniform->u[1] = ctx->compute_grid->grid[1];
1029 uniform->u[2] = ctx->compute_grid->grid[2];
1030 }
1031
1032 static void
1033 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1034 struct panfrost_shader_state *ss,
1035 enum pipe_shader_type st)
1036 {
1037 struct sysval_uniform *uniforms = (void *)buf;
1038
1039 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1040 int sysval = ss->sysval[i];
1041
1042 switch (PAN_SYSVAL_TYPE(sysval)) {
1043 case PAN_SYSVAL_VIEWPORT_SCALE:
1044 panfrost_upload_viewport_scale_sysval(batch,
1045 &uniforms[i]);
1046 break;
1047 case PAN_SYSVAL_VIEWPORT_OFFSET:
1048 panfrost_upload_viewport_offset_sysval(batch,
1049 &uniforms[i]);
1050 break;
1051 case PAN_SYSVAL_TEXTURE_SIZE:
1052 panfrost_upload_txs_sysval(batch, st,
1053 PAN_SYSVAL_ID(sysval),
1054 &uniforms[i]);
1055 break;
1056 case PAN_SYSVAL_SSBO:
1057 panfrost_upload_ssbo_sysval(batch, st,
1058 PAN_SYSVAL_ID(sysval),
1059 &uniforms[i]);
1060 break;
1061 case PAN_SYSVAL_NUM_WORK_GROUPS:
1062 panfrost_upload_num_work_groups_sysval(batch,
1063 &uniforms[i]);
1064 break;
1065 case PAN_SYSVAL_SAMPLER:
1066 panfrost_upload_sampler_sysval(batch, st,
1067 PAN_SYSVAL_ID(sysval),
1068 &uniforms[i]);
1069 break;
1070 default:
1071 assert(0);
1072 }
1073 }
1074 }
1075
1076 static const void *
1077 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1078 unsigned index)
1079 {
1080 struct pipe_constant_buffer *cb = &buf->cb[index];
1081 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1082
1083 if (rsrc)
1084 return rsrc->bo->cpu;
1085 else if (cb->user_buffer)
1086 return cb->user_buffer;
1087 else
1088 unreachable("No constant buffer");
1089 }
1090
1091 void
1092 panfrost_emit_const_buf(struct panfrost_batch *batch,
1093 enum pipe_shader_type stage,
1094 struct mali_vertex_tiler_postfix *postfix)
1095 {
1096 struct panfrost_context *ctx = batch->ctx;
1097 struct panfrost_shader_variants *all = ctx->shader[stage];
1098
1099 if (!all)
1100 return;
1101
1102 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1103
1104 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1105
1106 /* Uniforms are implicitly UBO #0 */
1107 bool has_uniforms = buf->enabled_mask & (1 << 0);
1108
1109 /* Allocate room for the sysval and the uniforms */
1110 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1111 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1112 size_t size = sys_size + uniform_size;
1113 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1114 size);
1115
1116 /* Upload sysvals requested by the shader */
1117 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1118
1119 /* Upload uniforms */
1120 if (has_uniforms && uniform_size) {
1121 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1122 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1123 }
1124
1125 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1126 * uploaded */
1127
1128 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1129 assert(ubo_count >= 1);
1130
1131 size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
1132 struct panfrost_transfer ubos = panfrost_pool_alloc(&batch->pool, sz);
1133 uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
1134
1135 /* Upload uniforms as a UBO */
1136
1137 if (ss->uniform_count) {
1138 pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
1139 cfg.entries = ss->uniform_count;
1140 cfg.pointer = transfer.gpu;
1141 }
1142 } else {
1143 *ubo_ptr = 0;
1144 }
1145
1146 /* The rest are honest-to-goodness UBOs */
1147
1148 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1149 size_t usz = buf->cb[ubo].buffer_size;
1150 bool enabled = buf->enabled_mask & (1 << ubo);
1151 bool empty = usz == 0;
1152
1153 if (!enabled || empty) {
1154 ubo_ptr[ubo] = 0;
1155 continue;
1156 }
1157
1158 pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
1159 cfg.entries = DIV_ROUND_UP(usz, 16);
1160 cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
1161 stage, buf, ubo);
1162 }
1163 }
1164
1165 postfix->uniforms = transfer.gpu;
1166 postfix->uniform_buffers = ubos.gpu;
1167
1168 buf->dirty_mask = 0;
1169 }
1170
1171 void
1172 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1173 const struct pipe_grid_info *info,
1174 struct midgard_payload_vertex_tiler *vtp)
1175 {
1176 struct panfrost_context *ctx = batch->ctx;
1177 struct panfrost_device *dev = pan_device(ctx->base.screen);
1178 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1179 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1180 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1181 128));
1182
1183 unsigned log2_instances =
1184 util_logbase2_ceil(info->grid[0]) +
1185 util_logbase2_ceil(info->grid[1]) +
1186 util_logbase2_ceil(info->grid[2]);
1187
1188 unsigned shared_size = single_size * (1 << log2_instances) * dev->core_count;
1189 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1190 shared_size,
1191 1);
1192
1193 struct mali_shared_memory shared = {
1194 .shared_memory = bo->gpu,
1195 .shared_workgroup_count = log2_instances,
1196 .shared_shift = util_logbase2(single_size) + 1
1197 };
1198
1199 vtp->postfix.shared_memory = panfrost_pool_upload(&batch->pool, &shared,
1200 sizeof(shared));
1201 }
1202
1203 static mali_ptr
1204 panfrost_get_tex_desc(struct panfrost_batch *batch,
1205 enum pipe_shader_type st,
1206 struct panfrost_sampler_view *view)
1207 {
1208 if (!view)
1209 return (mali_ptr) 0;
1210
1211 struct pipe_sampler_view *pview = &view->base;
1212 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1213
1214 /* Add the BO to the job so it's retained until the job is done. */
1215
1216 panfrost_batch_add_bo(batch, rsrc->bo,
1217 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1218 panfrost_bo_access_for_stage(st));
1219
1220 panfrost_batch_add_bo(batch, view->bo,
1221 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1222 panfrost_bo_access_for_stage(st));
1223
1224 return view->bo->gpu;
1225 }
1226
1227 static void
1228 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1229 struct pipe_context *pctx)
1230 {
1231 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1232 if (view->texture_bo != rsrc->bo->gpu ||
1233 view->modifier != rsrc->modifier) {
1234 panfrost_bo_unreference(view->bo);
1235 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1236 }
1237 }
1238
1239 void
1240 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1241 enum pipe_shader_type stage,
1242 struct mali_vertex_tiler_postfix *postfix)
1243 {
1244 struct panfrost_context *ctx = batch->ctx;
1245 struct panfrost_device *device = pan_device(ctx->base.screen);
1246
1247 if (!ctx->sampler_view_count[stage])
1248 return;
1249
1250 if (device->quirks & IS_BIFROST) {
1251 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
1252 MALI_BIFROST_TEXTURE_LENGTH *
1253 ctx->sampler_view_count[stage]);
1254
1255 struct mali_bifrost_texture_packed *out =
1256 (struct mali_bifrost_texture_packed *) T.cpu;
1257
1258 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1259 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1260 struct pipe_sampler_view *pview = &view->base;
1261 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1262
1263 panfrost_update_sampler_view(view, &ctx->base);
1264 out[i] = view->bifrost_descriptor;
1265
1266 /* Add the BOs to the job so they are retained until the job is done. */
1267
1268 panfrost_batch_add_bo(batch, rsrc->bo,
1269 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1270 panfrost_bo_access_for_stage(stage));
1271
1272 panfrost_batch_add_bo(batch, view->bo,
1273 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1274 panfrost_bo_access_for_stage(stage));
1275 }
1276
1277 postfix->textures = T.gpu;
1278 } else {
1279 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1280
1281 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1282 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1283
1284 panfrost_update_sampler_view(view, &ctx->base);
1285
1286 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1287 }
1288
1289 postfix->textures = panfrost_pool_upload(&batch->pool,
1290 trampolines,
1291 sizeof(uint64_t) *
1292 ctx->sampler_view_count[stage]);
1293 }
1294 }
1295
1296 void
1297 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1298 enum pipe_shader_type stage,
1299 struct mali_vertex_tiler_postfix *postfix)
1300 {
1301 struct panfrost_context *ctx = batch->ctx;
1302
1303 if (!ctx->sampler_count[stage])
1304 return;
1305
1306 size_t desc_size = MALI_BIFROST_SAMPLER_LENGTH;
1307 assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
1308
1309 size_t sz = desc_size * ctx->sampler_count[stage];
1310 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, sz);
1311 struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
1312
1313 for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
1314 out[i] = ctx->samplers[stage][i]->hw;
1315
1316 postfix->sampler_descriptor = T.gpu;
1317 }
1318
1319 void
1320 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1321 struct mali_vertex_tiler_postfix *vertex_postfix)
1322 {
1323 struct panfrost_context *ctx = batch->ctx;
1324 struct panfrost_vertex_state *so = ctx->vertex;
1325
1326 unsigned instance_shift = vertex_postfix->instance_shift;
1327 unsigned instance_odd = vertex_postfix->instance_odd;
1328
1329 /* Worst case: everything is NPOT */
1330
1331 struct panfrost_transfer S = panfrost_pool_alloc(&batch->pool,
1332 MALI_ATTRIBUTE_LENGTH * PIPE_MAX_ATTRIBS * 2);
1333
1334 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
1335 MALI_ATTRIBUTE_LENGTH * (PAN_INSTANCE_ID + 1));
1336
1337 struct mali_attribute_buffer_packed *bufs =
1338 (struct mali_attribute_buffer_packed *) S.cpu;
1339
1340 struct mali_attribute_packed *out =
1341 (struct mali_attribute_packed *) T.cpu;
1342
1343 unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
1344 unsigned k = 0;
1345
1346 for (unsigned i = 0; i < so->num_elements; ++i) {
1347 /* We map buffers 1:1 with the attributes, which
1348 * means duplicating some vertex buffers (who cares? aside from
1349 * maybe some caching implications but I somehow doubt that
1350 * matters) */
1351
1352 struct pipe_vertex_element *elem = &so->pipe[i];
1353 unsigned vbi = elem->vertex_buffer_index;
1354 attrib_to_buffer[i] = k;
1355
1356 if (!(ctx->vb_mask & (1 << vbi)))
1357 continue;
1358
1359 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1360 struct panfrost_resource *rsrc;
1361
1362 rsrc = pan_resource(buf->buffer.resource);
1363 if (!rsrc)
1364 continue;
1365
1366 /* Add a dependency of the batch on the vertex buffer */
1367 panfrost_batch_add_bo(batch, rsrc->bo,
1368 PAN_BO_ACCESS_SHARED |
1369 PAN_BO_ACCESS_READ |
1370 PAN_BO_ACCESS_VERTEX_TILER);
1371
1372 /* Mask off lower bits, see offset fixup below */
1373 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1374 mali_ptr addr = raw_addr & ~63;
1375
1376 /* Since we advanced the base pointer, we shrink the buffer
1377 * size, but add the offset we subtracted */
1378 unsigned size = rsrc->base.width0 + (raw_addr - addr)
1379 - buf->buffer_offset;
1380
1381 /* When there is a divisor, the hardware-level divisor is
1382 * the product of the instance divisor and the padded count */
1383 unsigned divisor = elem->instance_divisor;
1384 unsigned hw_divisor = ctx->padded_count * divisor;
1385 unsigned stride = buf->stride;
1386
1387 /* If there's a divisor(=1) but no instancing, we want every
1388 * attribute to be the same */
1389
1390 if (divisor && ctx->instance_count == 1)
1391 stride = 0;
1392
1393 if (!divisor || ctx->instance_count <= 1) {
1394 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1395 if (ctx->instance_count > 1)
1396 cfg.type = MALI_ATTRIBUTE_TYPE_1D_MODULUS;
1397
1398 cfg.pointer = addr;
1399 cfg.stride = stride;
1400 cfg.size = size;
1401 cfg.divisor_r = instance_shift;
1402 cfg.divisor_p = instance_odd;
1403 }
1404 } else if (util_is_power_of_two_or_zero(hw_divisor)) {
1405 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1406 cfg.type = MALI_ATTRIBUTE_TYPE_1D_POT_DIVISOR;
1407 cfg.pointer = addr;
1408 cfg.stride = stride;
1409 cfg.size = size;
1410 cfg.divisor_r = __builtin_ctz(hw_divisor);
1411 }
1412
1413 } else {
1414 unsigned shift = 0, extra_flags = 0;
1415
1416 unsigned magic_divisor =
1417 panfrost_compute_magic_divisor(hw_divisor, &shift, &extra_flags);
1418
1419 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1420 cfg.type = MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR;
1421 cfg.pointer = addr;
1422 cfg.stride = stride;
1423 cfg.size = size;
1424
1425 cfg.divisor_r = shift;
1426 cfg.divisor_e = extra_flags;
1427 }
1428
1429 pan_pack(bufs + k + 1, ATTRIBUTE_BUFFER_CONTINUATION_NPOT, cfg) {
1430 cfg.divisor_numerator = magic_divisor;
1431 cfg.divisor = divisor;
1432 }
1433
1434 ++k;
1435 }
1436
1437 ++k;
1438 }
1439
1440 /* Add special gl_VertexID/gl_InstanceID buffers */
1441
1442 panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1443
1444 pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
1445 cfg.buffer_index = k++;
1446 cfg.format = so->formats[PAN_VERTEX_ID];
1447 }
1448
1449 panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1450
1451 pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
1452 cfg.buffer_index = k++;
1453 cfg.format = so->formats[PAN_INSTANCE_ID];
1454 }
1455
1456 /* Attribute addresses require 64-byte alignment, so let:
1457 *
1458 * base' = base & ~63 = base - (base & 63)
1459 * offset' = offset + (base & 63)
1460 *
1461 * Since base' + offset' = base + offset, these are equivalent
1462 * addressing modes and now base is 64 aligned.
1463 */
1464
1465 unsigned start = vertex_postfix->offset_start;
1466
1467 for (unsigned i = 0; i < so->num_elements; ++i) {
1468 unsigned vbi = so->pipe[i].vertex_buffer_index;
1469 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1470
1471 /* Adjust by the masked off bits of the offset. Make sure we
1472 * read src_offset from so->hw (which is not GPU visible)
1473 * rather than target (which is) due to caching effects */
1474
1475 unsigned src_offset = so->pipe[i].src_offset;
1476
1477 /* BOs aligned to 4k so guaranteed aligned to 64 */
1478 src_offset += (buf->buffer_offset & 63);
1479
1480 /* Also, somewhat obscurely per-instance data needs to be
1481 * offset in response to a delayed start in an indexed draw */
1482
1483 if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
1484 src_offset -= buf->stride * start;
1485
1486 pan_pack(out + i, ATTRIBUTE, cfg) {
1487 cfg.buffer_index = attrib_to_buffer[i];
1488 cfg.format = so->formats[i];
1489 cfg.offset = src_offset;
1490 }
1491 }
1492
1493 vertex_postfix->attributes = S.gpu;
1494 vertex_postfix->attribute_meta = T.gpu;
1495 }
1496
1497 static mali_ptr
1498 panfrost_emit_varyings(struct panfrost_batch *batch,
1499 struct mali_attribute_buffer_packed *slot,
1500 unsigned stride, unsigned count)
1501 {
1502 unsigned size = stride * count;
1503 mali_ptr ptr = panfrost_pool_alloc(&batch->invisible_pool, size).gpu;
1504
1505 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1506 cfg.stride = stride;
1507 cfg.size = size;
1508 cfg.pointer = ptr;
1509 }
1510
1511 return ptr;
1512 }
1513
1514 static unsigned
1515 panfrost_streamout_offset(unsigned stride, unsigned offset,
1516 struct pipe_stream_output_target *target)
1517 {
1518 return (target->buffer_offset + (offset * stride * 4)) & 63;
1519 }
1520
1521 static void
1522 panfrost_emit_streamout(struct panfrost_batch *batch,
1523 struct mali_attribute_buffer_packed *slot,
1524 unsigned stride_words, unsigned offset, unsigned count,
1525 struct pipe_stream_output_target *target)
1526 {
1527 unsigned stride = stride_words * 4;
1528 unsigned max_size = target->buffer_size;
1529 unsigned expected_size = stride * count;
1530
1531 /* Grab the BO and bind it to the batch */
1532 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1533
1534 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1535 * the perspective of the TILER and FRAGMENT.
1536 */
1537 panfrost_batch_add_bo(batch, bo,
1538 PAN_BO_ACCESS_SHARED |
1539 PAN_BO_ACCESS_RW |
1540 PAN_BO_ACCESS_VERTEX_TILER |
1541 PAN_BO_ACCESS_FRAGMENT);
1542
1543 /* We will have an offset applied to get alignment */
1544 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * stride);
1545
1546 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1547 cfg.pointer = (addr & ~63);
1548 cfg.stride = stride;
1549 cfg.size = MIN2(max_size, expected_size) + (addr & 63);
1550 }
1551 }
1552
1553 static bool
1554 has_point_coord(unsigned mask, gl_varying_slot loc)
1555 {
1556 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1557 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1558 else if (loc == VARYING_SLOT_PNTC)
1559 return (mask & (1 << 8));
1560 else
1561 return false;
1562 }
1563
1564 /* Helpers for manipulating stream out information so we can pack varyings
1565 * accordingly. Compute the src_offset for a given captured varying */
1566
1567 static struct pipe_stream_output *
1568 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1569 {
1570 for (unsigned i = 0; i < info->num_outputs; ++i) {
1571 if (info->output[i].register_index == loc)
1572 return &info->output[i];
1573 }
1574
1575 unreachable("Varying not captured");
1576 }
1577
1578 static unsigned
1579 pan_varying_size(enum mali_format fmt)
1580 {
1581 unsigned type = MALI_EXTRACT_TYPE(fmt);
1582 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1583 unsigned bits = MALI_EXTRACT_BITS(fmt);
1584 unsigned bpc = 0;
1585
1586 if (bits == MALI_CHANNEL_FLOAT) {
1587 /* No doubles */
1588 bool fp16 = (type == MALI_FORMAT_SINT);
1589 assert(fp16 || (type == MALI_FORMAT_UNORM));
1590
1591 bpc = fp16 ? 2 : 4;
1592 } else {
1593 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1594
1595 /* See the enums */
1596 bits = 1 << bits;
1597 assert(bits >= 8);
1598 bpc = bits / 8;
1599 }
1600
1601 return bpc * chan;
1602 }
1603
1604 /* Indices for named (non-XFB) varyings that are present. These are packed
1605 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1606 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1607 * of a given special field given a shift S by:
1608 *
1609 * idx = popcount(P & ((1 << S) - 1))
1610 *
1611 * That is... look at all of the varyings that come earlier and count them, the
1612 * count is the new index since plus one. Likewise, the total number of special
1613 * buffers required is simply popcount(P)
1614 */
1615
1616 enum pan_special_varying {
1617 PAN_VARY_GENERAL = 0,
1618 PAN_VARY_POSITION = 1,
1619 PAN_VARY_PSIZ = 2,
1620 PAN_VARY_PNTCOORD = 3,
1621 PAN_VARY_FACE = 4,
1622 PAN_VARY_FRAGCOORD = 5,
1623
1624 /* Keep last */
1625 PAN_VARY_MAX,
1626 };
1627
1628 /* Given a varying, figure out which index it correpsonds to */
1629
1630 static inline unsigned
1631 pan_varying_index(unsigned present, enum pan_special_varying v)
1632 {
1633 unsigned mask = (1 << v) - 1;
1634 return util_bitcount(present & mask);
1635 }
1636
1637 /* Get the base offset for XFB buffers, which by convention come after
1638 * everything else. Wrapper function for semantic reasons; by construction this
1639 * is just popcount. */
1640
1641 static inline unsigned
1642 pan_xfb_base(unsigned present)
1643 {
1644 return util_bitcount(present);
1645 }
1646
1647 /* Computes the present mask for varyings so we can start emitting varying records */
1648
1649 static inline unsigned
1650 pan_varying_present(
1651 struct panfrost_shader_state *vs,
1652 struct panfrost_shader_state *fs,
1653 unsigned quirks)
1654 {
1655 /* At the moment we always emit general and position buffers. Not
1656 * strictly necessary but usually harmless */
1657
1658 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1659
1660 /* Enable special buffers by the shader info */
1661
1662 if (vs->writes_point_size)
1663 present |= (1 << PAN_VARY_PSIZ);
1664
1665 if (fs->reads_point_coord)
1666 present |= (1 << PAN_VARY_PNTCOORD);
1667
1668 if (fs->reads_face)
1669 present |= (1 << PAN_VARY_FACE);
1670
1671 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1672 present |= (1 << PAN_VARY_FRAGCOORD);
1673
1674 /* Also, if we have a point sprite, we need a point coord buffer */
1675
1676 for (unsigned i = 0; i < fs->varying_count; i++) {
1677 gl_varying_slot loc = fs->varyings_loc[i];
1678
1679 if (has_point_coord(fs->point_sprite_mask, loc))
1680 present |= (1 << PAN_VARY_PNTCOORD);
1681 }
1682
1683 return present;
1684 }
1685
1686 /* Emitters for varying records */
1687
1688 static void
1689 pan_emit_vary(struct mali_attribute_packed *out,
1690 unsigned present, enum pan_special_varying buf,
1691 unsigned quirks, enum mali_format format,
1692 unsigned offset)
1693 {
1694 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1695 unsigned swizzle = quirks & HAS_SWIZZLES ?
1696 panfrost_get_default_swizzle(nr_channels) :
1697 panfrost_bifrost_swizzle(nr_channels);
1698
1699 pan_pack(out, ATTRIBUTE, cfg) {
1700 cfg.buffer_index = pan_varying_index(present, buf);
1701 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1702 cfg.format = (format << 12) | swizzle;
1703 cfg.offset = offset;
1704 }
1705 }
1706
1707 /* General varying that is unused */
1708
1709 static void
1710 pan_emit_vary_only(struct mali_attribute_packed *out,
1711 unsigned present, unsigned quirks)
1712 {
1713 pan_emit_vary(out, present, 0, quirks, MALI_VARYING_DISCARD, 0);
1714 }
1715
1716 /* Special records */
1717
1718 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1719 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1720 [PAN_VARY_PSIZ] = MALI_R16F,
1721 [PAN_VARY_PNTCOORD] = MALI_R16F,
1722 [PAN_VARY_FACE] = MALI_R32I,
1723 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1724 };
1725
1726 static void
1727 pan_emit_vary_special(struct mali_attribute_packed *out,
1728 unsigned present, enum pan_special_varying buf,
1729 unsigned quirks)
1730 {
1731 assert(buf < PAN_VARY_MAX);
1732 pan_emit_vary(out, present, buf, quirks, pan_varying_formats[buf], 0);
1733 }
1734
1735 static enum mali_format
1736 pan_xfb_format(enum mali_format format, unsigned nr)
1737 {
1738 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1739 return MALI_R32F | MALI_NR_CHANNELS(nr);
1740 else
1741 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1742 }
1743
1744 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1745 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1746 * value. */
1747
1748 static void
1749 pan_emit_vary_xfb(struct mali_attribute_packed *out,
1750 unsigned present,
1751 unsigned max_xfb,
1752 unsigned *streamout_offsets,
1753 unsigned quirks,
1754 enum mali_format format,
1755 struct pipe_stream_output o)
1756 {
1757 unsigned swizzle = quirks & HAS_SWIZZLES ?
1758 panfrost_get_default_swizzle(o.num_components) :
1759 panfrost_bifrost_swizzle(o.num_components);
1760
1761 pan_pack(out, ATTRIBUTE, cfg) {
1762 /* XFB buffers come after everything else */
1763 cfg.buffer_index = pan_xfb_base(present) + o.output_buffer;
1764 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1765
1766 /* Override number of channels and precision to highp */
1767 cfg.format = (pan_xfb_format(format, o.num_components) << 12) | swizzle;
1768
1769 /* Apply given offsets together */
1770 cfg.offset = (o.dst_offset * 4) /* dwords */
1771 + streamout_offsets[o.output_buffer];
1772 }
1773 }
1774
1775 /* Determine if we should capture a varying for XFB. This requires actually
1776 * having a buffer for it. If we don't capture it, we'll fallback to a general
1777 * varying path (linked or unlinked, possibly discarding the write) */
1778
1779 static bool
1780 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1781 unsigned loc, unsigned max_xfb)
1782 {
1783 if (!(xfb->so_mask & (1ll << loc)))
1784 return false;
1785
1786 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1787 return o->output_buffer < max_xfb;
1788 }
1789
1790 static void
1791 pan_emit_general_varying(struct mali_attribute_packed *out,
1792 struct panfrost_shader_state *other,
1793 struct panfrost_shader_state *xfb,
1794 gl_varying_slot loc,
1795 enum mali_format format,
1796 unsigned present,
1797 unsigned quirks,
1798 unsigned *gen_offsets,
1799 enum mali_format *gen_formats,
1800 unsigned *gen_stride,
1801 unsigned idx,
1802 bool should_alloc)
1803 {
1804 /* Check if we're linked */
1805 signed other_idx = -1;
1806
1807 for (unsigned j = 0; j < other->varying_count; ++j) {
1808 if (other->varyings_loc[j] == loc) {
1809 other_idx = j;
1810 break;
1811 }
1812 }
1813
1814 if (other_idx < 0) {
1815 pan_emit_vary_only(out, present, quirks);
1816 return;
1817 }
1818
1819 unsigned offset = gen_offsets[other_idx];
1820
1821 if (should_alloc) {
1822 /* We're linked, so allocate a space via a watermark allocation */
1823 enum mali_format alt = other->varyings[other_idx];
1824
1825 /* Do interpolation at minimum precision */
1826 unsigned size_main = pan_varying_size(format);
1827 unsigned size_alt = pan_varying_size(alt);
1828 unsigned size = MIN2(size_main, size_alt);
1829
1830 /* If a varying is marked for XFB but not actually captured, we
1831 * should match the format to the format that would otherwise
1832 * be used for XFB, since dEQP checks for invariance here. It's
1833 * unclear if this is required by the spec. */
1834
1835 if (xfb->so_mask & (1ull << loc)) {
1836 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1837 format = pan_xfb_format(format, o->num_components);
1838 size = pan_varying_size(format);
1839 } else if (size == size_alt) {
1840 format = alt;
1841 }
1842
1843 gen_offsets[idx] = *gen_stride;
1844 gen_formats[other_idx] = format;
1845 offset = *gen_stride;
1846 *gen_stride += size;
1847 }
1848
1849 pan_emit_vary(out, present, PAN_VARY_GENERAL, quirks, format, offset);
1850 }
1851
1852 /* Higher-level wrapper around all of the above, classifying a varying into one
1853 * of the above types */
1854
1855 static void
1856 panfrost_emit_varying(
1857 struct mali_attribute_packed *out,
1858 struct panfrost_shader_state *stage,
1859 struct panfrost_shader_state *other,
1860 struct panfrost_shader_state *xfb,
1861 unsigned present,
1862 unsigned max_xfb,
1863 unsigned *streamout_offsets,
1864 unsigned quirks,
1865 unsigned *gen_offsets,
1866 enum mali_format *gen_formats,
1867 unsigned *gen_stride,
1868 unsigned idx,
1869 bool should_alloc,
1870 bool is_fragment)
1871 {
1872 gl_varying_slot loc = stage->varyings_loc[idx];
1873 enum mali_format format = stage->varyings[idx];
1874
1875 /* Override format to match linkage */
1876 if (!should_alloc && gen_formats[idx])
1877 format = gen_formats[idx];
1878
1879 if (has_point_coord(stage->point_sprite_mask, loc)) {
1880 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1881 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1882 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1883 pan_emit_vary_xfb(out, present, max_xfb, streamout_offsets, quirks, format, *o);
1884 } else if (loc == VARYING_SLOT_POS) {
1885 if (is_fragment)
1886 pan_emit_vary_special(out, present, PAN_VARY_FRAGCOORD, quirks);
1887 else
1888 pan_emit_vary_special(out, present, PAN_VARY_POSITION, quirks);
1889 } else if (loc == VARYING_SLOT_PSIZ) {
1890 pan_emit_vary_special(out, present, PAN_VARY_PSIZ, quirks);
1891 } else if (loc == VARYING_SLOT_PNTC) {
1892 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1893 } else if (loc == VARYING_SLOT_FACE) {
1894 pan_emit_vary_special(out, present, PAN_VARY_FACE, quirks);
1895 } else {
1896 pan_emit_general_varying(out, other, xfb, loc, format, present,
1897 quirks, gen_offsets, gen_formats, gen_stride,
1898 idx, should_alloc);
1899 }
1900 }
1901
1902 static void
1903 pan_emit_special_input(struct mali_attribute_buffer_packed *out,
1904 unsigned present,
1905 enum pan_special_varying v,
1906 unsigned special)
1907 {
1908 if (present & (1 << v)) {
1909 unsigned idx = pan_varying_index(present, v);
1910
1911 pan_pack(out + idx, ATTRIBUTE_BUFFER, cfg) {
1912 cfg.special = special;
1913 cfg.type = 0;
1914 }
1915 }
1916 }
1917
1918 void
1919 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1920 unsigned vertex_count,
1921 struct mali_vertex_tiler_postfix *vertex_postfix,
1922 struct mali_vertex_tiler_postfix *tiler_postfix,
1923 union midgard_primitive_size *primitive_size)
1924 {
1925 /* Load the shaders */
1926 struct panfrost_context *ctx = batch->ctx;
1927 struct panfrost_device *dev = pan_device(ctx->base.screen);
1928 struct panfrost_shader_state *vs, *fs;
1929 size_t vs_size, fs_size;
1930
1931 /* Allocate the varying descriptor */
1932
1933 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1934 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
1935 vs_size = MALI_ATTRIBUTE_LENGTH * vs->varying_count;
1936 fs_size = MALI_ATTRIBUTE_LENGTH * fs->varying_count;
1937
1938 struct panfrost_transfer trans = panfrost_pool_alloc(&batch->pool,
1939 vs_size +
1940 fs_size);
1941
1942 struct pipe_stream_output_info *so = &vs->stream_output;
1943 unsigned present = pan_varying_present(vs, fs, dev->quirks);
1944
1945 /* Check if this varying is linked by us. This is the case for
1946 * general-purpose, non-captured varyings. If it is, link it. If it's
1947 * not, use the provided stream out information to determine the
1948 * offset, since it was already linked for us. */
1949
1950 unsigned gen_offsets[32];
1951 enum mali_format gen_formats[32];
1952 memset(gen_offsets, 0, sizeof(gen_offsets));
1953 memset(gen_formats, 0, sizeof(gen_formats));
1954
1955 unsigned gen_stride = 0;
1956 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
1957 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
1958
1959 unsigned streamout_offsets[32];
1960
1961 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1962 streamout_offsets[i] = panfrost_streamout_offset(
1963 so->stride[i],
1964 ctx->streamout.offsets[i],
1965 ctx->streamout.targets[i]);
1966 }
1967
1968 struct mali_attribute_packed *ovs = (struct mali_attribute_packed *)trans.cpu;
1969 struct mali_attribute_packed *ofs = ovs + vs->varying_count;
1970
1971 for (unsigned i = 0; i < vs->varying_count; i++) {
1972 panfrost_emit_varying(ovs + i, vs, fs, vs, present,
1973 ctx->streamout.num_targets, streamout_offsets,
1974 dev->quirks,
1975 gen_offsets, gen_formats, &gen_stride, i, true, false);
1976 }
1977
1978 for (unsigned i = 0; i < fs->varying_count; i++) {
1979 panfrost_emit_varying(ofs + i, fs, vs, vs, present,
1980 ctx->streamout.num_targets, streamout_offsets,
1981 dev->quirks,
1982 gen_offsets, gen_formats, &gen_stride, i, false, true);
1983 }
1984
1985 unsigned xfb_base = pan_xfb_base(present);
1986 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
1987 MALI_ATTRIBUTE_BUFFER_LENGTH * (xfb_base + ctx->streamout.num_targets));
1988 struct mali_attribute_buffer_packed *varyings =
1989 (struct mali_attribute_buffer_packed *) T.cpu;
1990
1991 /* Emit the stream out buffers */
1992
1993 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
1994 ctx->vertex_count);
1995
1996 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1997 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
1998 so->stride[i],
1999 ctx->streamout.offsets[i],
2000 out_count,
2001 ctx->streamout.targets[i]);
2002 }
2003
2004 panfrost_emit_varyings(batch,
2005 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
2006 gen_stride, vertex_count);
2007
2008 /* fp32 vec4 gl_Position */
2009 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
2010 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
2011 sizeof(float) * 4, vertex_count);
2012
2013 if (present & (1 << PAN_VARY_PSIZ)) {
2014 primitive_size->pointer = panfrost_emit_varyings(batch,
2015 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
2016 2, vertex_count);
2017 }
2018
2019 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_ATTRIBUTE_SPECIAL_POINT_COORD);
2020 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_ATTRIBUTE_SPECIAL_FRONT_FACING);
2021 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_ATTRIBUTE_SPECIAL_FRAG_COORD);
2022
2023 vertex_postfix->varyings = T.gpu;
2024 tiler_postfix->varyings = T.gpu;
2025
2026 vertex_postfix->varying_meta = trans.gpu;
2027 tiler_postfix->varying_meta = trans.gpu + vs_size;
2028 }
2029
2030 void
2031 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
2032 struct mali_vertex_tiler_prefix *vertex_prefix,
2033 struct mali_vertex_tiler_postfix *vertex_postfix,
2034 struct mali_vertex_tiler_prefix *tiler_prefix,
2035 struct mali_vertex_tiler_postfix *tiler_postfix,
2036 union midgard_primitive_size *primitive_size)
2037 {
2038 struct panfrost_context *ctx = batch->ctx;
2039 struct panfrost_device *device = pan_device(ctx->base.screen);
2040 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
2041 struct bifrost_payload_vertex bifrost_vertex = {0,};
2042 struct bifrost_payload_tiler bifrost_tiler = {0,};
2043 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2044 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2045 void *vp, *tp;
2046 size_t vp_size, tp_size;
2047
2048 if (device->quirks & IS_BIFROST) {
2049 bifrost_vertex.prefix = *vertex_prefix;
2050 bifrost_vertex.postfix = *vertex_postfix;
2051 vp = &bifrost_vertex;
2052 vp_size = sizeof(bifrost_vertex);
2053
2054 bifrost_tiler.prefix = *tiler_prefix;
2055 bifrost_tiler.tiler.primitive_size = *primitive_size;
2056 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2057 bifrost_tiler.postfix = *tiler_postfix;
2058 tp = &bifrost_tiler;
2059 tp_size = sizeof(bifrost_tiler);
2060 } else {
2061 midgard_vertex.prefix = *vertex_prefix;
2062 midgard_vertex.postfix = *vertex_postfix;
2063 vp = &midgard_vertex;
2064 vp_size = sizeof(midgard_vertex);
2065
2066 midgard_tiler.prefix = *tiler_prefix;
2067 midgard_tiler.postfix = *tiler_postfix;
2068 midgard_tiler.primitive_size = *primitive_size;
2069 tp = &midgard_tiler;
2070 tp_size = sizeof(midgard_tiler);
2071 }
2072
2073 if (wallpapering) {
2074 /* Inject in reverse order, with "predicted" job indices.
2075 * THIS IS A HACK XXX */
2076 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
2077 batch->scoreboard.job_index + 2, tp, tp_size, true);
2078 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2079 vp, vp_size, true);
2080 return;
2081 }
2082
2083 /* If rasterizer discard is enable, only submit the vertex */
2084
2085 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2086 vp, vp_size, false);
2087
2088 if (ctx->rasterizer->base.rasterizer_discard)
2089 return;
2090
2091 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
2092 false);
2093 }
2094
2095 /* TODO: stop hardcoding this */
2096 mali_ptr
2097 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2098 {
2099 uint16_t locations[] = {
2100 128, 128,
2101 0, 256,
2102 0, 256,
2103 0, 256,
2104 0, 256,
2105 0, 256,
2106 0, 256,
2107 0, 256,
2108 0, 256,
2109 0, 256,
2110 0, 256,
2111 0, 256,
2112 0, 256,
2113 0, 256,
2114 0, 256,
2115 0, 256,
2116 0, 256,
2117 0, 256,
2118 0, 256,
2119 0, 256,
2120 0, 256,
2121 0, 256,
2122 0, 256,
2123 0, 256,
2124 0, 256,
2125 0, 256,
2126 0, 256,
2127 0, 256,
2128 0, 256,
2129 0, 256,
2130 0, 256,
2131 0, 256,
2132 128, 128,
2133 0, 0,
2134 0, 0,
2135 0, 0,
2136 0, 0,
2137 0, 0,
2138 0, 0,
2139 0, 0,
2140 0, 0,
2141 0, 0,
2142 0, 0,
2143 0, 0,
2144 0, 0,
2145 0, 0,
2146 0, 0,
2147 0, 0,
2148 };
2149
2150 return panfrost_pool_upload(&batch->pool, locations, 96 * sizeof(uint16_t));
2151 }