90ff477ec6625899434482653dd6c7b9b51b864b
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 unsigned shift = panfrost_get_stack_shift(batch->stack_size);
62 struct mali_shared_memory shared = {
63 .stack_shift = shift,
64 .scratchpad = panfrost_batch_get_scratchpad(batch, shift, dev->thread_tls_alloc, dev->core_count)->gpu,
65 .shared_workgroup_count = ~0,
66 };
67 postfix->shared_memory = panfrost_pool_upload(&batch->pool, &shared, sizeof(shared));
68 }
69
70 static void
71 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
72 struct mali_vertex_tiler_postfix *postfix)
73 {
74 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
75 postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
76 }
77
78 static void
79 panfrost_vt_update_rasterizer(struct panfrost_rasterizer *rasterizer,
80 struct mali_vertex_tiler_prefix *prefix,
81 struct mali_vertex_tiler_postfix *postfix)
82 {
83 postfix->gl_enables |= 0x7;
84 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
85 rasterizer->base.front_ccw);
86 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
87 (rasterizer->base.cull_face & PIPE_FACE_FRONT));
88 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
89 (rasterizer->base.cull_face & PIPE_FACE_BACK));
90 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
91 rasterizer->base.flatshade_first);
92 }
93
94 void
95 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
96 struct mali_vertex_tiler_prefix *prefix,
97 union midgard_primitive_size *primitive_size)
98 {
99 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
100
101 if (!panfrost_writes_point_size(ctx)) {
102 float val = (prefix->draw_mode == MALI_DRAW_MODE_POINTS) ?
103 rasterizer->base.point_size :
104 rasterizer->base.line_width;
105
106 primitive_size->constant = val;
107 }
108 }
109
110 static void
111 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
112 struct mali_vertex_tiler_postfix *postfix)
113 {
114 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
115 if (ctx->occlusion_query) {
116 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
117 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
118 PAN_BO_ACCESS_SHARED |
119 PAN_BO_ACCESS_RW |
120 PAN_BO_ACCESS_FRAGMENT);
121 } else {
122 postfix->occlusion_counter = 0;
123 }
124 }
125
126 void
127 panfrost_vt_init(struct panfrost_context *ctx,
128 enum pipe_shader_type stage,
129 struct mali_vertex_tiler_prefix *prefix,
130 struct mali_vertex_tiler_postfix *postfix)
131 {
132 struct panfrost_device *device = pan_device(ctx->base.screen);
133
134 if (!ctx->shader[stage])
135 return;
136
137 memset(prefix, 0, sizeof(*prefix));
138 memset(postfix, 0, sizeof(*postfix));
139
140 if (device->quirks & IS_BIFROST) {
141 postfix->gl_enables = 0x2;
142 panfrost_vt_emit_shared_memory(ctx, postfix);
143 } else {
144 postfix->gl_enables = 0x6;
145 panfrost_vt_attach_framebuffer(ctx, postfix);
146 }
147
148 if (stage == PIPE_SHADER_FRAGMENT) {
149 panfrost_vt_update_occlusion_query(ctx, postfix);
150 panfrost_vt_update_rasterizer(ctx->rasterizer, prefix, postfix);
151 }
152 }
153
154 static unsigned
155 panfrost_translate_index_size(unsigned size)
156 {
157 switch (size) {
158 case 1:
159 return MALI_DRAW_INDEXED_UINT8;
160
161 case 2:
162 return MALI_DRAW_INDEXED_UINT16;
163
164 case 4:
165 return MALI_DRAW_INDEXED_UINT32;
166
167 default:
168 unreachable("Invalid index size");
169 }
170 }
171
172 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
173 * good for the duration of the draw (transient), could last longer. Also get
174 * the bounds on the index buffer for the range accessed by the draw. We do
175 * these operations together because there are natural optimizations which
176 * require them to be together. */
177
178 static mali_ptr
179 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
180 const struct pipe_draw_info *info,
181 unsigned *min_index, unsigned *max_index)
182 {
183 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
184 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
185 off_t offset = info->start * info->index_size;
186 bool needs_indices = true;
187 mali_ptr out = 0;
188
189 if (info->max_index != ~0u) {
190 *min_index = info->min_index;
191 *max_index = info->max_index;
192 needs_indices = false;
193 }
194
195 if (!info->has_user_indices) {
196 /* Only resources can be directly mapped */
197 panfrost_batch_add_bo(batch, rsrc->bo,
198 PAN_BO_ACCESS_SHARED |
199 PAN_BO_ACCESS_READ |
200 PAN_BO_ACCESS_VERTEX_TILER);
201 out = rsrc->bo->gpu + offset;
202
203 /* Check the cache */
204 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
205 info->start,
206 info->count,
207 min_index,
208 max_index);
209 } else {
210 /* Otherwise, we need to upload to transient memory */
211 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
212 out = panfrost_pool_upload(&batch->pool, ibuf8 + offset,
213 info->count *
214 info->index_size);
215 }
216
217 if (needs_indices) {
218 /* Fallback */
219 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
220
221 if (!info->has_user_indices)
222 panfrost_minmax_cache_add(rsrc->index_cache,
223 info->start, info->count,
224 *min_index, *max_index);
225 }
226
227 return out;
228 }
229
230 void
231 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
232 const struct pipe_draw_info *info,
233 enum mali_draw_mode draw_mode,
234 struct mali_vertex_tiler_postfix *vertex_postfix,
235 struct mali_vertex_tiler_prefix *tiler_prefix,
236 struct mali_vertex_tiler_postfix *tiler_postfix,
237 unsigned *vertex_count,
238 unsigned *padded_count)
239 {
240 tiler_prefix->draw_mode = draw_mode;
241
242 unsigned draw_flags = 0;
243
244 if (panfrost_writes_point_size(ctx))
245 draw_flags |= MALI_DRAW_VARYING_SIZE;
246
247 if (info->primitive_restart)
248 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
249
250 /* These doesn't make much sense */
251
252 draw_flags |= 0x3000;
253
254 if (info->index_size) {
255 unsigned min_index = 0, max_index = 0;
256
257 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
258 info,
259 &min_index,
260 &max_index);
261
262 /* Use the corresponding values */
263 *vertex_count = max_index - min_index + 1;
264 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
265 tiler_prefix->offset_bias_correction = -min_index;
266 tiler_prefix->index_count = MALI_POSITIVE(info->count);
267 draw_flags |= panfrost_translate_index_size(info->index_size);
268 } else {
269 tiler_prefix->indices = 0;
270 *vertex_count = ctx->vertex_count;
271 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
272 tiler_prefix->offset_bias_correction = 0;
273 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
274 }
275
276 tiler_prefix->unknown_draw = draw_flags;
277
278 /* Encode the padded vertex count */
279
280 if (info->instance_count > 1) {
281 *padded_count = panfrost_padded_vertex_count(*vertex_count);
282
283 unsigned shift = __builtin_ctz(ctx->padded_count);
284 unsigned k = ctx->padded_count >> (shift + 1);
285
286 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
287 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
288 } else {
289 *padded_count = *vertex_count;
290
291 /* Reset instancing state */
292 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
293 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
294 }
295 }
296
297 static void
298 panfrost_shader_meta_init(struct panfrost_context *ctx,
299 enum pipe_shader_type st,
300 struct mali_shader_meta *meta)
301 {
302 const struct panfrost_device *dev = pan_device(ctx->base.screen);
303 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
304
305 memset(meta, 0, sizeof(*meta));
306 meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
307 meta->attribute_count = ss->attribute_count;
308 meta->varying_count = ss->varying_count;
309 meta->texture_count = ctx->sampler_view_count[st];
310 meta->sampler_count = ctx->sampler_count[st];
311
312 if (dev->quirks & IS_BIFROST) {
313 if (st == PIPE_SHADER_VERTEX)
314 meta->bifrost1.unk1 = 0x800000;
315 else {
316 /* First clause ATEST |= 0x4000000.
317 * Less than 32 regs |= 0x200 */
318 meta->bifrost1.unk1 = 0x950020;
319 }
320
321 meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
322 if (st == PIPE_SHADER_VERTEX)
323 meta->bifrost2.preload_regs = 0xC0;
324 else {
325 meta->bifrost2.preload_regs = 0x1;
326 SET_BIT(meta->bifrost2.preload_regs, 0x10, ss->reads_frag_coord);
327 }
328
329 meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
330 ss->uniform_cutoff);
331 } else {
332 meta->midgard1.uniform_count = MIN2(ss->uniform_count,
333 ss->uniform_cutoff);
334 meta->midgard1.work_count = ss->work_reg_count;
335
336 /* TODO: This is not conformant on ES3 */
337 meta->midgard1.flags_hi = MALI_SUPPRESS_INF_NAN;
338
339 meta->midgard1.flags_lo = 0x20;
340 meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
341
342 SET_BIT(meta->midgard1.flags_lo, MALI_WRITES_GLOBAL, ss->writes_global);
343 }
344 }
345
346 static unsigned
347 translate_tex_wrap(enum pipe_tex_wrap w)
348 {
349 switch (w) {
350 case PIPE_TEX_WRAP_REPEAT: return MALI_WRAP_MODE_REPEAT;
351 case PIPE_TEX_WRAP_CLAMP: return MALI_WRAP_MODE_CLAMP;
352 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return MALI_WRAP_MODE_CLAMP_TO_EDGE;
353 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return MALI_WRAP_MODE_CLAMP_TO_BORDER;
354 case PIPE_TEX_WRAP_MIRROR_REPEAT: return MALI_WRAP_MODE_MIRRORED_REPEAT;
355 case PIPE_TEX_WRAP_MIRROR_CLAMP: return MALI_WRAP_MODE_MIRRORED_CLAMP;
356 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
357 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
358 default: unreachable("Invalid wrap");
359 }
360 }
361
362 /* The hardware compares in the wrong order order, so we have to flip before
363 * encoding. Yes, really. */
364
365 static enum mali_func
366 panfrost_sampler_compare_func(const struct pipe_sampler_state *cso)
367 {
368 if (!cso->compare_mode)
369 return MALI_FUNC_NEVER;
370
371 enum mali_func f = panfrost_translate_compare_func(cso->compare_func);
372 return panfrost_flip_compare_func(f);
373 }
374
375 static enum mali_mipmap_mode
376 pan_pipe_to_mipmode(enum pipe_tex_mipfilter f)
377 {
378 switch (f) {
379 case PIPE_TEX_MIPFILTER_NEAREST: return MALI_MIPMAP_MODE_NEAREST;
380 case PIPE_TEX_MIPFILTER_LINEAR: return MALI_MIPMAP_MODE_TRILINEAR;
381 case PIPE_TEX_MIPFILTER_NONE: return MALI_MIPMAP_MODE_NONE;
382 default: unreachable("Invalid");
383 }
384 }
385
386 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
387 struct mali_midgard_sampler_packed *hw)
388 {
389 pan_pack(hw, MIDGARD_SAMPLER, cfg) {
390 cfg.magnify_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
391 cfg.minify_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
392 cfg.mipmap_mode = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR) ?
393 MALI_MIPMAP_MODE_TRILINEAR : MALI_MIPMAP_MODE_NEAREST;
394 cfg.normalized_coordinates = cso->normalized_coords;
395
396 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
397
398 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
399
400 /* If necessary, we disable mipmapping in the sampler descriptor by
401 * clamping the LOD as tight as possible (from 0 to epsilon,
402 * essentially -- remember these are fixed point numbers, so
403 * epsilon=1/256) */
404
405 cfg.maximum_lod = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) ?
406 cfg.minimum_lod + 1 :
407 FIXED_16(cso->max_lod, false);
408
409 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
410 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
411 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
412
413 cfg.compare_function = panfrost_sampler_compare_func(cso);
414 cfg.seamless_cube_map = cso->seamless_cube_map;
415
416 cfg.border_color_r = cso->border_color.f[0];
417 cfg.border_color_g = cso->border_color.f[1];
418 cfg.border_color_b = cso->border_color.f[2];
419 cfg.border_color_a = cso->border_color.f[3];
420 }
421 }
422
423 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
424 struct mali_bifrost_sampler_packed *hw)
425 {
426 pan_pack(hw, BIFROST_SAMPLER, cfg) {
427 cfg.magnify_linear = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR;
428 cfg.minify_linear = cso->min_img_filter == PIPE_TEX_FILTER_LINEAR;
429 cfg.mipmap_mode = pan_pipe_to_mipmode(cso->min_mip_filter);
430 cfg.normalized_coordinates = cso->normalized_coords;
431
432 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
433 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
434 cfg.maximum_lod = FIXED_16(cso->max_lod, false);
435
436 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
437 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
438 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
439
440 cfg.compare_function = panfrost_sampler_compare_func(cso);
441 cfg.seamless_cube_map = cso->seamless_cube_map;
442 }
443 }
444
445 static void
446 panfrost_frag_meta_rasterizer_update(struct panfrost_context *ctx,
447 struct mali_shader_meta *fragmeta)
448 {
449 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
450
451 bool msaa = rast->multisample;
452
453 /* TODO: Sample size */
454 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
455 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
456
457 struct panfrost_shader_state *fs;
458 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
459
460 /* EXT_shader_framebuffer_fetch requires the shader to be run
461 * per-sample when outputs are read. */
462 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
463 SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
464
465 fragmeta->depth_units = rast->offset_units * 2.0f;
466 fragmeta->depth_factor = rast->offset_scale;
467
468 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
469
470 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
471 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
472
473 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
474 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
475 }
476
477 static void
478 panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
479 struct mali_shader_meta *fragmeta)
480 {
481 const struct panfrost_zsa_state *so = ctx->depth_stencil;
482
483 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
484 so->base.stencil[0].enabled);
485
486 fragmeta->stencil_mask_front = so->stencil_mask_front;
487 fragmeta->stencil_mask_back = so->stencil_mask_back;
488
489 /* Bottom bits for stencil ref, exactly one word */
490 fragmeta->stencil_front.opaque[0] = so->stencil_front.opaque[0] | ctx->stencil_ref.ref_value[0];
491
492 /* If back-stencil is not enabled, use the front values */
493
494 if (so->base.stencil[1].enabled)
495 fragmeta->stencil_back.opaque[0] = so->stencil_back.opaque[0] | ctx->stencil_ref.ref_value[1];
496 else
497 fragmeta->stencil_back = fragmeta->stencil_front;
498
499 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
500 so->base.depth.writemask);
501
502 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
503 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(
504 so->base.depth.enabled ? so->base.depth.func : PIPE_FUNC_ALWAYS));
505 }
506
507 static bool
508 panfrost_fs_required(
509 struct panfrost_shader_state *fs,
510 struct panfrost_blend_final *blend,
511 unsigned rt_count)
512 {
513 /* If we generally have side effects */
514 if (fs->fs_sidefx)
515 return true;
516
517 /* If colour is written we need to execute */
518 for (unsigned i = 0; i < rt_count; ++i) {
519 if (!blend[i].no_colour)
520 return true;
521 }
522
523 /* If depth is written and not implied we need to execute.
524 * TODO: Predicate on Z/S writes being enabled */
525 return (fs->writes_depth || fs->writes_stencil);
526 }
527
528 static void
529 panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
530 struct mali_shader_meta *fragmeta,
531 void *rts)
532 {
533 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
534 const struct panfrost_device *dev = pan_device(ctx->base.screen);
535 struct panfrost_shader_state *fs;
536 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
537
538 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
539 (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
540 !ctx->blend->base.dither);
541
542 SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
543 ctx->blend->base.alpha_to_coverage);
544
545 /* Get blending setup */
546 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
547
548 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
549 unsigned shader_offset = 0;
550 struct panfrost_bo *shader_bo = NULL;
551
552 for (unsigned c = 0; c < rt_count; ++c)
553 blend[c] = panfrost_get_blend_for_context(ctx, c, &shader_bo,
554 &shader_offset);
555
556 /* Disable shader execution if we can */
557 if (dev->quirks & MIDGARD_SHADERLESS
558 && !panfrost_fs_required(fs, blend, rt_count)) {
559 fragmeta->shader = 0;
560 fragmeta->attribute_count = 0;
561 fragmeta->varying_count = 0;
562 fragmeta->texture_count = 0;
563 fragmeta->sampler_count = 0;
564
565 /* This feature is not known to work on Bifrost */
566 fragmeta->midgard1.work_count = 1;
567 fragmeta->midgard1.uniform_count = 0;
568 fragmeta->midgard1.uniform_buffer_count = 0;
569 }
570
571 /* If there is a blend shader, work registers are shared. We impose 8
572 * work registers as a limit for blend shaders. Should be lower XXX */
573
574 if (!(dev->quirks & IS_BIFROST)) {
575 for (unsigned c = 0; c < rt_count; ++c) {
576 if (blend[c].is_shader) {
577 fragmeta->midgard1.work_count =
578 MAX2(fragmeta->midgard1.work_count, 8);
579 }
580 }
581 }
582
583 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
584 * copied to the blend_meta appended (by convention), but this is the
585 * field actually read by the hardware. (Or maybe both are read...?).
586 * Specify the last RTi with a blend shader. */
587
588 fragmeta->blend.shader = 0;
589
590 for (signed rt = (rt_count - 1); rt >= 0; --rt) {
591 if (!blend[rt].is_shader)
592 continue;
593
594 fragmeta->blend.shader = blend[rt].shader.gpu |
595 blend[rt].shader.first_tag;
596 break;
597 }
598
599 if (dev->quirks & MIDGARD_SFBD) {
600 /* When only a single render target platform is used, the blend
601 * information is inside the shader meta itself. We additionally
602 * need to signal CAN_DISCARD for nontrivial blend modes (so
603 * we're able to read back the destination buffer) */
604
605 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
606 blend[0].is_shader);
607
608 if (!blend[0].is_shader) {
609 fragmeta->blend.equation = *blend[0].equation.equation;
610 fragmeta->blend.constant = blend[0].equation.constant;
611 }
612
613 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
614 !blend[0].no_blending || fs->can_discard);
615
616 batch->draws |= PIPE_CLEAR_COLOR0;
617 return;
618 }
619
620 if (dev->quirks & IS_BIFROST) {
621 bool no_blend = true;
622
623 for (unsigned i = 0; i < rt_count; ++i)
624 no_blend &= (blend[i].no_blending | blend[i].no_colour);
625
626 SET_BIT(fragmeta->bifrost1.unk1, MALI_BIFROST_EARLY_Z,
627 !fs->can_discard && !fs->writes_depth && no_blend);
628 }
629
630 /* Additional blend descriptor tacked on for jobs using MFBD */
631
632 for (unsigned i = 0; i < rt_count; ++i) {
633 unsigned flags = 0;
634
635 if (ctx->pipe_framebuffer.nr_cbufs > i && !blend[i].no_colour) {
636 flags = 0x200;
637 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
638
639 bool is_srgb = (ctx->pipe_framebuffer.nr_cbufs > i) &&
640 (ctx->pipe_framebuffer.cbufs[i]) &&
641 util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
642
643 SET_BIT(flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
644 SET_BIT(flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
645 SET_BIT(flags, MALI_BLEND_SRGB, is_srgb);
646 SET_BIT(flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
647 }
648
649 if (dev->quirks & IS_BIFROST) {
650 struct bifrost_blend_rt *brts = rts;
651
652 brts[i].flags = flags;
653
654 if (blend[i].is_shader) {
655 /* The blend shader's address needs to be at
656 * the same top 32 bit as the fragment shader.
657 * TODO: Ensure that's always the case.
658 */
659 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
660 (fs->bo->gpu & (0xffffffffull << 32)));
661 brts[i].shader = blend[i].shader.gpu;
662 brts[i].unk2 = 0x0;
663 } else if (ctx->pipe_framebuffer.nr_cbufs > i) {
664 enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
665 const struct util_format_description *format_desc;
666 format_desc = util_format_description(format);
667
668 brts[i].equation = *blend[i].equation.equation;
669
670 /* TODO: this is a bit more complicated */
671 brts[i].constant = blend[i].equation.constant;
672
673 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
674
675 /* 0x19 disables blending and forces REPLACE
676 * mode (equivalent to rgb_mode = alpha_mode =
677 * x122, colour mask = 0xF). 0x1a allows
678 * blending. */
679 brts[i].unk2 = blend[i].no_blending ? 0x19 : 0x1a;
680
681 brts[i].shader_type = fs->blend_types[i];
682 } else {
683 /* Dummy attachment for depth-only */
684 brts[i].unk2 = 0x3;
685 brts[i].shader_type = fs->blend_types[i];
686 }
687 } else {
688 struct midgard_blend_rt *mrts = rts;
689 mrts[i].flags = flags;
690
691 if (blend[i].is_shader) {
692 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
693 } else {
694 mrts[i].blend.equation = *blend[i].equation.equation;
695 mrts[i].blend.constant = blend[i].equation.constant;
696 }
697 }
698 }
699 }
700
701 static void
702 panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
703 struct mali_shader_meta *fragmeta,
704 void *rts)
705 {
706 const struct panfrost_device *dev = pan_device(ctx->base.screen);
707 struct panfrost_shader_state *fs;
708
709 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
710
711 bool msaa = ctx->rasterizer->base.multisample;
712 fragmeta->coverage_mask = msaa ? ctx->sample_mask : ~0;
713
714 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
715 fragmeta->unknown2_4 = 0x4e0;
716
717 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
718 * is required (independent of 32-bit/64-bit descriptors), or why it's
719 * not used on later GPU revisions. Otherwise, all shader jobs fault on
720 * these earlier chips (perhaps this is a chicken bit of some kind).
721 * More investigation is needed. */
722
723 SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
724
725 if (dev->quirks & IS_BIFROST) {
726 /* TODO */
727 } else {
728 /* Depending on whether it's legal to in the given shader, we try to
729 * enable early-z testing. TODO: respect e-z force */
730
731 SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
732 !fs->can_discard && !fs->writes_global &&
733 !fs->writes_depth && !fs->writes_stencil &&
734 !ctx->blend->base.alpha_to_coverage);
735
736 /* Add the writes Z/S flags if needed. */
737 SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
738 SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
739
740 /* Any time texturing is used, derivatives are implicitly calculated,
741 * so we need to enable helper invocations */
742
743 SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
744 fs->helper_invocations);
745
746 /* If discard is enabled, which bit we set to convey this
747 * depends on if depth/stencil is used for the draw or not.
748 * Just one of depth OR stencil is enough to trigger this. */
749
750 const struct pipe_depth_stencil_alpha_state *zsa = &ctx->depth_stencil->base;
751 bool zs_enabled =
752 fs->writes_depth || fs->writes_stencil ||
753 (zsa->depth.enabled && zsa->depth.func != PIPE_FUNC_ALWAYS) ||
754 zsa->stencil[0].enabled;
755
756 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_TILEBUFFER,
757 fs->outputs_read || (!zs_enabled && fs->can_discard));
758 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_ZS, zs_enabled && fs->can_discard);
759 }
760
761 panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
762 panfrost_frag_meta_zsa_update(ctx, fragmeta);
763 panfrost_frag_meta_blend_update(ctx, fragmeta, rts);
764 }
765
766 void
767 panfrost_emit_shader_meta(struct panfrost_batch *batch,
768 enum pipe_shader_type st,
769 struct mali_vertex_tiler_postfix *postfix)
770 {
771 struct panfrost_context *ctx = batch->ctx;
772 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
773
774 if (!ss) {
775 postfix->shader = 0;
776 return;
777 }
778
779 struct mali_shader_meta meta;
780
781 panfrost_shader_meta_init(ctx, st, &meta);
782
783 /* Add the shader BO to the batch. */
784 panfrost_batch_add_bo(batch, ss->bo,
785 PAN_BO_ACCESS_PRIVATE |
786 PAN_BO_ACCESS_READ |
787 panfrost_bo_access_for_stage(st));
788
789 mali_ptr shader_ptr;
790
791 if (st == PIPE_SHADER_FRAGMENT) {
792 struct panfrost_device *dev = pan_device(ctx->base.screen);
793 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
794 size_t desc_size = sizeof(meta);
795 void *rts = NULL;
796 struct panfrost_transfer xfer;
797 unsigned rt_size;
798
799 if (dev->quirks & MIDGARD_SFBD)
800 rt_size = 0;
801 else if (dev->quirks & IS_BIFROST)
802 rt_size = sizeof(struct bifrost_blend_rt);
803 else
804 rt_size = sizeof(struct midgard_blend_rt);
805
806 desc_size += rt_size * rt_count;
807
808 if (rt_size)
809 rts = rzalloc_size(ctx, rt_size * rt_count);
810
811 panfrost_frag_shader_meta_init(ctx, &meta, rts);
812
813 xfer = panfrost_pool_alloc(&batch->pool, desc_size);
814
815 memcpy(xfer.cpu, &meta, sizeof(meta));
816 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
817
818 if (rt_size)
819 ralloc_free(rts);
820
821 shader_ptr = xfer.gpu;
822 } else {
823 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
824 sizeof(meta));
825 }
826
827 postfix->shader = shader_ptr;
828 }
829
830 void
831 panfrost_emit_viewport(struct panfrost_batch *batch,
832 struct mali_vertex_tiler_postfix *tiler_postfix)
833 {
834 struct panfrost_context *ctx = batch->ctx;
835 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
836 const struct pipe_scissor_state *ss = &ctx->scissor;
837 const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
838 const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
839
840 /* Derive min/max from translate/scale. Note since |x| >= 0 by
841 * definition, we have that -|x| <= |x| hence translate - |scale| <=
842 * translate + |scale|, so the ordering is correct here. */
843 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
844 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
845 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
846 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
847 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
848 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
849
850 /* Scissor to the intersection of viewport and to the scissor, clamped
851 * to the framebuffer */
852
853 unsigned minx = MIN2(fb->width, vp_minx);
854 unsigned maxx = MIN2(fb->width, vp_maxx);
855 unsigned miny = MIN2(fb->height, vp_miny);
856 unsigned maxy = MIN2(fb->height, vp_maxy);
857
858 if (ss && rast->scissor) {
859 minx = MAX2(ss->minx, minx);
860 miny = MAX2(ss->miny, miny);
861 maxx = MIN2(ss->maxx, maxx);
862 maxy = MIN2(ss->maxy, maxy);
863 }
864
865 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
866
867 pan_pack(T.cpu, VIEWPORT, cfg) {
868 cfg.scissor_minimum_x = minx;
869 cfg.scissor_minimum_y = miny;
870 cfg.scissor_maximum_x = maxx - 1;
871 cfg.scissor_maximum_y = maxy - 1;
872
873 cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
874 cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
875 }
876
877 tiler_postfix->viewport = T.gpu;
878 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
879 }
880
881 static mali_ptr
882 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
883 enum pipe_shader_type st,
884 struct panfrost_constant_buffer *buf,
885 unsigned index)
886 {
887 struct pipe_constant_buffer *cb = &buf->cb[index];
888 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
889
890 if (rsrc) {
891 panfrost_batch_add_bo(batch, rsrc->bo,
892 PAN_BO_ACCESS_SHARED |
893 PAN_BO_ACCESS_READ |
894 panfrost_bo_access_for_stage(st));
895
896 /* Alignment gauranteed by
897 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
898 return rsrc->bo->gpu + cb->buffer_offset;
899 } else if (cb->user_buffer) {
900 return panfrost_pool_upload(&batch->pool,
901 cb->user_buffer +
902 cb->buffer_offset,
903 cb->buffer_size);
904 } else {
905 unreachable("No constant buffer");
906 }
907 }
908
909 struct sysval_uniform {
910 union {
911 float f[4];
912 int32_t i[4];
913 uint32_t u[4];
914 uint64_t du[2];
915 };
916 };
917
918 static void
919 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
920 struct sysval_uniform *uniform)
921 {
922 struct panfrost_context *ctx = batch->ctx;
923 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
924
925 uniform->f[0] = vp->scale[0];
926 uniform->f[1] = vp->scale[1];
927 uniform->f[2] = vp->scale[2];
928 }
929
930 static void
931 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
932 struct sysval_uniform *uniform)
933 {
934 struct panfrost_context *ctx = batch->ctx;
935 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
936
937 uniform->f[0] = vp->translate[0];
938 uniform->f[1] = vp->translate[1];
939 uniform->f[2] = vp->translate[2];
940 }
941
942 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
943 enum pipe_shader_type st,
944 unsigned int sysvalid,
945 struct sysval_uniform *uniform)
946 {
947 struct panfrost_context *ctx = batch->ctx;
948 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
949 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
950 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
951 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
952
953 assert(dim);
954 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
955
956 if (dim > 1)
957 uniform->i[1] = u_minify(tex->texture->height0,
958 tex->u.tex.first_level);
959
960 if (dim > 2)
961 uniform->i[2] = u_minify(tex->texture->depth0,
962 tex->u.tex.first_level);
963
964 if (is_array)
965 uniform->i[dim] = tex->texture->array_size;
966 }
967
968 static void
969 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
970 enum pipe_shader_type st,
971 unsigned ssbo_id,
972 struct sysval_uniform *uniform)
973 {
974 struct panfrost_context *ctx = batch->ctx;
975
976 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
977 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
978
979 /* Compute address */
980 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
981
982 panfrost_batch_add_bo(batch, bo,
983 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
984 panfrost_bo_access_for_stage(st));
985
986 /* Upload address and size as sysval */
987 uniform->du[0] = bo->gpu + sb.buffer_offset;
988 uniform->u[2] = sb.buffer_size;
989 }
990
991 static void
992 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
993 enum pipe_shader_type st,
994 unsigned samp_idx,
995 struct sysval_uniform *uniform)
996 {
997 struct panfrost_context *ctx = batch->ctx;
998 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
999
1000 uniform->f[0] = sampl->min_lod;
1001 uniform->f[1] = sampl->max_lod;
1002 uniform->f[2] = sampl->lod_bias;
1003
1004 /* Even without any errata, Midgard represents "no mipmapping" as
1005 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1006 * panfrost_create_sampler_state which also explains our choice of
1007 * epsilon value (again to keep behaviour consistent) */
1008
1009 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1010 uniform->f[1] = uniform->f[0] + (1.0/256.0);
1011 }
1012
1013 static void
1014 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
1015 struct sysval_uniform *uniform)
1016 {
1017 struct panfrost_context *ctx = batch->ctx;
1018
1019 uniform->u[0] = ctx->compute_grid->grid[0];
1020 uniform->u[1] = ctx->compute_grid->grid[1];
1021 uniform->u[2] = ctx->compute_grid->grid[2];
1022 }
1023
1024 static void
1025 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1026 struct panfrost_shader_state *ss,
1027 enum pipe_shader_type st)
1028 {
1029 struct sysval_uniform *uniforms = (void *)buf;
1030
1031 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1032 int sysval = ss->sysval[i];
1033
1034 switch (PAN_SYSVAL_TYPE(sysval)) {
1035 case PAN_SYSVAL_VIEWPORT_SCALE:
1036 panfrost_upload_viewport_scale_sysval(batch,
1037 &uniforms[i]);
1038 break;
1039 case PAN_SYSVAL_VIEWPORT_OFFSET:
1040 panfrost_upload_viewport_offset_sysval(batch,
1041 &uniforms[i]);
1042 break;
1043 case PAN_SYSVAL_TEXTURE_SIZE:
1044 panfrost_upload_txs_sysval(batch, st,
1045 PAN_SYSVAL_ID(sysval),
1046 &uniforms[i]);
1047 break;
1048 case PAN_SYSVAL_SSBO:
1049 panfrost_upload_ssbo_sysval(batch, st,
1050 PAN_SYSVAL_ID(sysval),
1051 &uniforms[i]);
1052 break;
1053 case PAN_SYSVAL_NUM_WORK_GROUPS:
1054 panfrost_upload_num_work_groups_sysval(batch,
1055 &uniforms[i]);
1056 break;
1057 case PAN_SYSVAL_SAMPLER:
1058 panfrost_upload_sampler_sysval(batch, st,
1059 PAN_SYSVAL_ID(sysval),
1060 &uniforms[i]);
1061 break;
1062 default:
1063 assert(0);
1064 }
1065 }
1066 }
1067
1068 static const void *
1069 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1070 unsigned index)
1071 {
1072 struct pipe_constant_buffer *cb = &buf->cb[index];
1073 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1074
1075 if (rsrc)
1076 return rsrc->bo->cpu;
1077 else if (cb->user_buffer)
1078 return cb->user_buffer;
1079 else
1080 unreachable("No constant buffer");
1081 }
1082
1083 void
1084 panfrost_emit_const_buf(struct panfrost_batch *batch,
1085 enum pipe_shader_type stage,
1086 struct mali_vertex_tiler_postfix *postfix)
1087 {
1088 struct panfrost_context *ctx = batch->ctx;
1089 struct panfrost_shader_variants *all = ctx->shader[stage];
1090
1091 if (!all)
1092 return;
1093
1094 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1095
1096 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1097
1098 /* Uniforms are implicitly UBO #0 */
1099 bool has_uniforms = buf->enabled_mask & (1 << 0);
1100
1101 /* Allocate room for the sysval and the uniforms */
1102 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1103 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1104 size_t size = sys_size + uniform_size;
1105 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1106 size);
1107
1108 /* Upload sysvals requested by the shader */
1109 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1110
1111 /* Upload uniforms */
1112 if (has_uniforms && uniform_size) {
1113 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1114 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1115 }
1116
1117 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1118 * uploaded */
1119
1120 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1121 assert(ubo_count >= 1);
1122
1123 size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
1124 struct panfrost_transfer ubos = panfrost_pool_alloc(&batch->pool, sz);
1125 uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
1126
1127 /* Upload uniforms as a UBO */
1128
1129 if (ss->uniform_count) {
1130 pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
1131 cfg.entries = ss->uniform_count;
1132 cfg.pointer = transfer.gpu;
1133 }
1134 } else {
1135 *ubo_ptr = 0;
1136 }
1137
1138 /* The rest are honest-to-goodness UBOs */
1139
1140 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1141 size_t usz = buf->cb[ubo].buffer_size;
1142 bool enabled = buf->enabled_mask & (1 << ubo);
1143 bool empty = usz == 0;
1144
1145 if (!enabled || empty) {
1146 ubo_ptr[ubo] = 0;
1147 continue;
1148 }
1149
1150 pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
1151 cfg.entries = DIV_ROUND_UP(usz, 16);
1152 cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
1153 stage, buf, ubo);
1154 }
1155 }
1156
1157 postfix->uniforms = transfer.gpu;
1158 postfix->uniform_buffers = ubos.gpu;
1159
1160 buf->dirty_mask = 0;
1161 }
1162
1163 void
1164 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1165 const struct pipe_grid_info *info,
1166 struct midgard_payload_vertex_tiler *vtp)
1167 {
1168 struct panfrost_context *ctx = batch->ctx;
1169 struct panfrost_device *dev = pan_device(ctx->base.screen);
1170 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1171 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1172 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1173 128));
1174
1175 unsigned log2_instances =
1176 util_logbase2_ceil(info->grid[0]) +
1177 util_logbase2_ceil(info->grid[1]) +
1178 util_logbase2_ceil(info->grid[2]);
1179
1180 unsigned shared_size = single_size * (1 << log2_instances) * dev->core_count;
1181 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1182 shared_size,
1183 1);
1184
1185 struct mali_shared_memory shared = {
1186 .shared_memory = bo->gpu,
1187 .shared_workgroup_count = log2_instances,
1188 .shared_shift = util_logbase2(single_size) + 1
1189 };
1190
1191 vtp->postfix.shared_memory = panfrost_pool_upload(&batch->pool, &shared,
1192 sizeof(shared));
1193 }
1194
1195 static mali_ptr
1196 panfrost_get_tex_desc(struct panfrost_batch *batch,
1197 enum pipe_shader_type st,
1198 struct panfrost_sampler_view *view)
1199 {
1200 if (!view)
1201 return (mali_ptr) 0;
1202
1203 struct pipe_sampler_view *pview = &view->base;
1204 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1205
1206 /* Add the BO to the job so it's retained until the job is done. */
1207
1208 panfrost_batch_add_bo(batch, rsrc->bo,
1209 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1210 panfrost_bo_access_for_stage(st));
1211
1212 panfrost_batch_add_bo(batch, view->bo,
1213 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1214 panfrost_bo_access_for_stage(st));
1215
1216 return view->bo->gpu;
1217 }
1218
1219 static void
1220 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1221 struct pipe_context *pctx)
1222 {
1223 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1224 if (view->texture_bo != rsrc->bo->gpu ||
1225 view->modifier != rsrc->modifier) {
1226 panfrost_bo_unreference(view->bo);
1227 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1228 }
1229 }
1230
1231 void
1232 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1233 enum pipe_shader_type stage,
1234 struct mali_vertex_tiler_postfix *postfix)
1235 {
1236 struct panfrost_context *ctx = batch->ctx;
1237 struct panfrost_device *device = pan_device(ctx->base.screen);
1238
1239 if (!ctx->sampler_view_count[stage])
1240 return;
1241
1242 if (device->quirks & IS_BIFROST) {
1243 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
1244 MALI_BIFROST_TEXTURE_LENGTH *
1245 ctx->sampler_view_count[stage]);
1246
1247 struct mali_bifrost_texture_packed *out =
1248 (struct mali_bifrost_texture_packed *) T.cpu;
1249
1250 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1251 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1252 struct pipe_sampler_view *pview = &view->base;
1253 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1254
1255 panfrost_update_sampler_view(view, &ctx->base);
1256 out[i] = view->bifrost_descriptor;
1257
1258 /* Add the BOs to the job so they are retained until the job is done. */
1259
1260 panfrost_batch_add_bo(batch, rsrc->bo,
1261 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1262 panfrost_bo_access_for_stage(stage));
1263
1264 panfrost_batch_add_bo(batch, view->bo,
1265 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1266 panfrost_bo_access_for_stage(stage));
1267 }
1268
1269 postfix->textures = T.gpu;
1270 } else {
1271 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1272
1273 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1274 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1275
1276 panfrost_update_sampler_view(view, &ctx->base);
1277
1278 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1279 }
1280
1281 postfix->textures = panfrost_pool_upload(&batch->pool,
1282 trampolines,
1283 sizeof(uint64_t) *
1284 ctx->sampler_view_count[stage]);
1285 }
1286 }
1287
1288 void
1289 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1290 enum pipe_shader_type stage,
1291 struct mali_vertex_tiler_postfix *postfix)
1292 {
1293 struct panfrost_context *ctx = batch->ctx;
1294
1295 if (!ctx->sampler_count[stage])
1296 return;
1297
1298 size_t desc_size = MALI_BIFROST_SAMPLER_LENGTH;
1299 assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
1300
1301 size_t sz = desc_size * ctx->sampler_count[stage];
1302 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, sz);
1303 struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
1304
1305 for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
1306 out[i] = ctx->samplers[stage][i]->hw;
1307
1308 postfix->sampler_descriptor = T.gpu;
1309 }
1310
1311 void
1312 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1313 struct mali_vertex_tiler_postfix *vertex_postfix)
1314 {
1315 struct panfrost_context *ctx = batch->ctx;
1316 struct panfrost_vertex_state *so = ctx->vertex;
1317
1318 unsigned instance_shift = vertex_postfix->instance_shift;
1319 unsigned instance_odd = vertex_postfix->instance_odd;
1320
1321 /* Worst case: everything is NPOT */
1322
1323 struct panfrost_transfer S = panfrost_pool_alloc(&batch->pool,
1324 MALI_ATTRIBUTE_LENGTH * PIPE_MAX_ATTRIBS * 2);
1325
1326 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
1327 MALI_ATTRIBUTE_LENGTH * (PAN_INSTANCE_ID + 1));
1328
1329 struct mali_attribute_buffer_packed *bufs =
1330 (struct mali_attribute_buffer_packed *) S.cpu;
1331
1332 struct mali_attribute_packed *out =
1333 (struct mali_attribute_packed *) T.cpu;
1334
1335 unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
1336 unsigned k = 0;
1337
1338 for (unsigned i = 0; i < so->num_elements; ++i) {
1339 /* We map buffers 1:1 with the attributes, which
1340 * means duplicating some vertex buffers (who cares? aside from
1341 * maybe some caching implications but I somehow doubt that
1342 * matters) */
1343
1344 struct pipe_vertex_element *elem = &so->pipe[i];
1345 unsigned vbi = elem->vertex_buffer_index;
1346 attrib_to_buffer[i] = k;
1347
1348 if (!(ctx->vb_mask & (1 << vbi)))
1349 continue;
1350
1351 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1352 struct panfrost_resource *rsrc;
1353
1354 rsrc = pan_resource(buf->buffer.resource);
1355 if (!rsrc)
1356 continue;
1357
1358 /* Add a dependency of the batch on the vertex buffer */
1359 panfrost_batch_add_bo(batch, rsrc->bo,
1360 PAN_BO_ACCESS_SHARED |
1361 PAN_BO_ACCESS_READ |
1362 PAN_BO_ACCESS_VERTEX_TILER);
1363
1364 /* Mask off lower bits, see offset fixup below */
1365 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1366 mali_ptr addr = raw_addr & ~63;
1367
1368 /* Since we advanced the base pointer, we shrink the buffer
1369 * size, but add the offset we subtracted */
1370 unsigned size = rsrc->base.width0 + (raw_addr - addr)
1371 - buf->buffer_offset;
1372
1373 /* When there is a divisor, the hardware-level divisor is
1374 * the product of the instance divisor and the padded count */
1375 unsigned divisor = elem->instance_divisor;
1376 unsigned hw_divisor = ctx->padded_count * divisor;
1377 unsigned stride = buf->stride;
1378
1379 /* If there's a divisor(=1) but no instancing, we want every
1380 * attribute to be the same */
1381
1382 if (divisor && ctx->instance_count == 1)
1383 stride = 0;
1384
1385 if (!divisor || ctx->instance_count <= 1) {
1386 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1387 if (ctx->instance_count > 1)
1388 cfg.type = MALI_ATTRIBUTE_TYPE_1D_MODULUS;
1389
1390 cfg.pointer = addr;
1391 cfg.stride = stride;
1392 cfg.size = size;
1393 cfg.divisor_r = instance_shift;
1394 cfg.divisor_p = instance_odd;
1395 }
1396 } else if (util_is_power_of_two_or_zero(hw_divisor)) {
1397 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1398 cfg.type = MALI_ATTRIBUTE_TYPE_1D_POT_DIVISOR;
1399 cfg.pointer = addr;
1400 cfg.stride = stride;
1401 cfg.size = size;
1402 cfg.divisor_r = __builtin_ctz(hw_divisor);
1403 }
1404
1405 } else {
1406 unsigned shift = 0, extra_flags = 0;
1407
1408 unsigned magic_divisor =
1409 panfrost_compute_magic_divisor(hw_divisor, &shift, &extra_flags);
1410
1411 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1412 cfg.type = MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR;
1413 cfg.pointer = addr;
1414 cfg.stride = stride;
1415 cfg.size = size;
1416
1417 cfg.divisor_r = shift;
1418 cfg.divisor_e = extra_flags;
1419 }
1420
1421 pan_pack(bufs + k + 1, ATTRIBUTE_BUFFER_CONTINUATION_NPOT, cfg) {
1422 cfg.divisor_numerator = magic_divisor;
1423 cfg.divisor = divisor;
1424 }
1425
1426 ++k;
1427 }
1428
1429 ++k;
1430 }
1431
1432 /* Add special gl_VertexID/gl_InstanceID buffers */
1433
1434 panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1435
1436 pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
1437 cfg.buffer_index = k++;
1438 cfg.format = so->formats[PAN_VERTEX_ID];
1439 }
1440
1441 panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1442
1443 pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
1444 cfg.buffer_index = k++;
1445 cfg.format = so->formats[PAN_INSTANCE_ID];
1446 }
1447
1448 /* Attribute addresses require 64-byte alignment, so let:
1449 *
1450 * base' = base & ~63 = base - (base & 63)
1451 * offset' = offset + (base & 63)
1452 *
1453 * Since base' + offset' = base + offset, these are equivalent
1454 * addressing modes and now base is 64 aligned.
1455 */
1456
1457 unsigned start = vertex_postfix->offset_start;
1458
1459 for (unsigned i = 0; i < so->num_elements; ++i) {
1460 unsigned vbi = so->pipe[i].vertex_buffer_index;
1461 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1462
1463 /* Adjust by the masked off bits of the offset. Make sure we
1464 * read src_offset from so->hw (which is not GPU visible)
1465 * rather than target (which is) due to caching effects */
1466
1467 unsigned src_offset = so->pipe[i].src_offset;
1468
1469 /* BOs aligned to 4k so guaranteed aligned to 64 */
1470 src_offset += (buf->buffer_offset & 63);
1471
1472 /* Also, somewhat obscurely per-instance data needs to be
1473 * offset in response to a delayed start in an indexed draw */
1474
1475 if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
1476 src_offset -= buf->stride * start;
1477
1478 pan_pack(out + i, ATTRIBUTE, cfg) {
1479 cfg.buffer_index = attrib_to_buffer[i];
1480 cfg.format = so->formats[i];
1481 cfg.offset = src_offset;
1482 }
1483 }
1484
1485 vertex_postfix->attributes = S.gpu;
1486 vertex_postfix->attribute_meta = T.gpu;
1487 }
1488
1489 static mali_ptr
1490 panfrost_emit_varyings(struct panfrost_batch *batch,
1491 struct mali_attribute_buffer_packed *slot,
1492 unsigned stride, unsigned count)
1493 {
1494 unsigned size = stride * count;
1495 mali_ptr ptr = panfrost_pool_alloc(&batch->pool, size).gpu;
1496
1497 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1498 cfg.stride = stride;
1499 cfg.size = size;
1500 cfg.pointer = ptr;
1501 }
1502
1503 return ptr;
1504 }
1505
1506 static unsigned
1507 panfrost_streamout_offset(unsigned stride, unsigned offset,
1508 struct pipe_stream_output_target *target)
1509 {
1510 return (target->buffer_offset + (offset * stride * 4)) & 63;
1511 }
1512
1513 static void
1514 panfrost_emit_streamout(struct panfrost_batch *batch,
1515 struct mali_attribute_buffer_packed *slot,
1516 unsigned stride_words, unsigned offset, unsigned count,
1517 struct pipe_stream_output_target *target)
1518 {
1519 unsigned stride = stride_words * 4;
1520 unsigned max_size = target->buffer_size;
1521 unsigned expected_size = stride * count;
1522
1523 /* Grab the BO and bind it to the batch */
1524 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1525
1526 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1527 * the perspective of the TILER and FRAGMENT.
1528 */
1529 panfrost_batch_add_bo(batch, bo,
1530 PAN_BO_ACCESS_SHARED |
1531 PAN_BO_ACCESS_RW |
1532 PAN_BO_ACCESS_VERTEX_TILER |
1533 PAN_BO_ACCESS_FRAGMENT);
1534
1535 /* We will have an offset applied to get alignment */
1536 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * stride);
1537
1538 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1539 cfg.pointer = (addr & ~63);
1540 cfg.stride = stride;
1541 cfg.size = MIN2(max_size, expected_size) + (addr & 63);
1542 }
1543 }
1544
1545 static bool
1546 has_point_coord(unsigned mask, gl_varying_slot loc)
1547 {
1548 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1549 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1550 else if (loc == VARYING_SLOT_PNTC)
1551 return (mask & (1 << 8));
1552 else
1553 return false;
1554 }
1555
1556 /* Helpers for manipulating stream out information so we can pack varyings
1557 * accordingly. Compute the src_offset for a given captured varying */
1558
1559 static struct pipe_stream_output *
1560 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1561 {
1562 for (unsigned i = 0; i < info->num_outputs; ++i) {
1563 if (info->output[i].register_index == loc)
1564 return &info->output[i];
1565 }
1566
1567 unreachable("Varying not captured");
1568 }
1569
1570 static unsigned
1571 pan_varying_size(enum mali_format fmt)
1572 {
1573 unsigned type = MALI_EXTRACT_TYPE(fmt);
1574 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1575 unsigned bits = MALI_EXTRACT_BITS(fmt);
1576 unsigned bpc = 0;
1577
1578 if (bits == MALI_CHANNEL_FLOAT) {
1579 /* No doubles */
1580 bool fp16 = (type == MALI_FORMAT_SINT);
1581 assert(fp16 || (type == MALI_FORMAT_UNORM));
1582
1583 bpc = fp16 ? 2 : 4;
1584 } else {
1585 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1586
1587 /* See the enums */
1588 bits = 1 << bits;
1589 assert(bits >= 8);
1590 bpc = bits / 8;
1591 }
1592
1593 return bpc * chan;
1594 }
1595
1596 /* Indices for named (non-XFB) varyings that are present. These are packed
1597 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1598 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1599 * of a given special field given a shift S by:
1600 *
1601 * idx = popcount(P & ((1 << S) - 1))
1602 *
1603 * That is... look at all of the varyings that come earlier and count them, the
1604 * count is the new index since plus one. Likewise, the total number of special
1605 * buffers required is simply popcount(P)
1606 */
1607
1608 enum pan_special_varying {
1609 PAN_VARY_GENERAL = 0,
1610 PAN_VARY_POSITION = 1,
1611 PAN_VARY_PSIZ = 2,
1612 PAN_VARY_PNTCOORD = 3,
1613 PAN_VARY_FACE = 4,
1614 PAN_VARY_FRAGCOORD = 5,
1615
1616 /* Keep last */
1617 PAN_VARY_MAX,
1618 };
1619
1620 /* Given a varying, figure out which index it correpsonds to */
1621
1622 static inline unsigned
1623 pan_varying_index(unsigned present, enum pan_special_varying v)
1624 {
1625 unsigned mask = (1 << v) - 1;
1626 return util_bitcount(present & mask);
1627 }
1628
1629 /* Get the base offset for XFB buffers, which by convention come after
1630 * everything else. Wrapper function for semantic reasons; by construction this
1631 * is just popcount. */
1632
1633 static inline unsigned
1634 pan_xfb_base(unsigned present)
1635 {
1636 return util_bitcount(present);
1637 }
1638
1639 /* Computes the present mask for varyings so we can start emitting varying records */
1640
1641 static inline unsigned
1642 pan_varying_present(
1643 struct panfrost_shader_state *vs,
1644 struct panfrost_shader_state *fs,
1645 unsigned quirks)
1646 {
1647 /* At the moment we always emit general and position buffers. Not
1648 * strictly necessary but usually harmless */
1649
1650 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1651
1652 /* Enable special buffers by the shader info */
1653
1654 if (vs->writes_point_size)
1655 present |= (1 << PAN_VARY_PSIZ);
1656
1657 if (fs->reads_point_coord)
1658 present |= (1 << PAN_VARY_PNTCOORD);
1659
1660 if (fs->reads_face)
1661 present |= (1 << PAN_VARY_FACE);
1662
1663 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1664 present |= (1 << PAN_VARY_FRAGCOORD);
1665
1666 /* Also, if we have a point sprite, we need a point coord buffer */
1667
1668 for (unsigned i = 0; i < fs->varying_count; i++) {
1669 gl_varying_slot loc = fs->varyings_loc[i];
1670
1671 if (has_point_coord(fs->point_sprite_mask, loc))
1672 present |= (1 << PAN_VARY_PNTCOORD);
1673 }
1674
1675 return present;
1676 }
1677
1678 /* Emitters for varying records */
1679
1680 static void
1681 pan_emit_vary(struct mali_attribute_packed *out,
1682 unsigned present, enum pan_special_varying buf,
1683 unsigned quirks, enum mali_format format,
1684 unsigned offset)
1685 {
1686 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1687 unsigned swizzle = quirks & HAS_SWIZZLES ?
1688 panfrost_get_default_swizzle(nr_channels) :
1689 panfrost_bifrost_swizzle(nr_channels);
1690
1691 pan_pack(out, ATTRIBUTE, cfg) {
1692 cfg.buffer_index = pan_varying_index(present, buf);
1693 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1694 cfg.format = (format << 12) | swizzle;
1695 cfg.offset = offset;
1696 }
1697 }
1698
1699 /* General varying that is unused */
1700
1701 static void
1702 pan_emit_vary_only(struct mali_attribute_packed *out,
1703 unsigned present, unsigned quirks)
1704 {
1705 pan_emit_vary(out, present, 0, quirks, MALI_VARYING_DISCARD, 0);
1706 }
1707
1708 /* Special records */
1709
1710 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1711 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1712 [PAN_VARY_PSIZ] = MALI_R16F,
1713 [PAN_VARY_PNTCOORD] = MALI_R16F,
1714 [PAN_VARY_FACE] = MALI_R32I,
1715 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1716 };
1717
1718 static void
1719 pan_emit_vary_special(struct mali_attribute_packed *out,
1720 unsigned present, enum pan_special_varying buf,
1721 unsigned quirks)
1722 {
1723 assert(buf < PAN_VARY_MAX);
1724 pan_emit_vary(out, present, buf, quirks, pan_varying_formats[buf], 0);
1725 }
1726
1727 static enum mali_format
1728 pan_xfb_format(enum mali_format format, unsigned nr)
1729 {
1730 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1731 return MALI_R32F | MALI_NR_CHANNELS(nr);
1732 else
1733 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1734 }
1735
1736 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1737 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1738 * value. */
1739
1740 static void
1741 pan_emit_vary_xfb(struct mali_attribute_packed *out,
1742 unsigned present,
1743 unsigned max_xfb,
1744 unsigned *streamout_offsets,
1745 unsigned quirks,
1746 enum mali_format format,
1747 struct pipe_stream_output o)
1748 {
1749 unsigned swizzle = quirks & HAS_SWIZZLES ?
1750 panfrost_get_default_swizzle(o.num_components) :
1751 panfrost_bifrost_swizzle(o.num_components);
1752
1753 pan_pack(out, ATTRIBUTE, cfg) {
1754 /* XFB buffers come after everything else */
1755 cfg.buffer_index = pan_xfb_base(present) + o.output_buffer;
1756 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1757
1758 /* Override number of channels and precision to highp */
1759 cfg.format = (pan_xfb_format(format, o.num_components) << 12) | swizzle;
1760
1761 /* Apply given offsets together */
1762 cfg.offset = (o.dst_offset * 4) /* dwords */
1763 + streamout_offsets[o.output_buffer];
1764 }
1765 }
1766
1767 /* Determine if we should capture a varying for XFB. This requires actually
1768 * having a buffer for it. If we don't capture it, we'll fallback to a general
1769 * varying path (linked or unlinked, possibly discarding the write) */
1770
1771 static bool
1772 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1773 unsigned loc, unsigned max_xfb)
1774 {
1775 if (!(xfb->so_mask & (1ll << loc)))
1776 return false;
1777
1778 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1779 return o->output_buffer < max_xfb;
1780 }
1781
1782 static void
1783 pan_emit_general_varying(struct mali_attribute_packed *out,
1784 struct panfrost_shader_state *other,
1785 struct panfrost_shader_state *xfb,
1786 gl_varying_slot loc,
1787 enum mali_format format,
1788 unsigned present,
1789 unsigned quirks,
1790 unsigned *gen_offsets,
1791 enum mali_format *gen_formats,
1792 unsigned *gen_stride,
1793 unsigned idx,
1794 bool should_alloc)
1795 {
1796 /* Check if we're linked */
1797 signed other_idx = -1;
1798
1799 for (unsigned j = 0; j < other->varying_count; ++j) {
1800 if (other->varyings_loc[j] == loc) {
1801 other_idx = j;
1802 break;
1803 }
1804 }
1805
1806 if (other_idx < 0) {
1807 pan_emit_vary_only(out, present, quirks);
1808 return;
1809 }
1810
1811 unsigned offset = gen_offsets[other_idx];
1812
1813 if (should_alloc) {
1814 /* We're linked, so allocate a space via a watermark allocation */
1815 enum mali_format alt = other->varyings[other_idx];
1816
1817 /* Do interpolation at minimum precision */
1818 unsigned size_main = pan_varying_size(format);
1819 unsigned size_alt = pan_varying_size(alt);
1820 unsigned size = MIN2(size_main, size_alt);
1821
1822 /* If a varying is marked for XFB but not actually captured, we
1823 * should match the format to the format that would otherwise
1824 * be used for XFB, since dEQP checks for invariance here. It's
1825 * unclear if this is required by the spec. */
1826
1827 if (xfb->so_mask & (1ull << loc)) {
1828 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1829 format = pan_xfb_format(format, o->num_components);
1830 size = pan_varying_size(format);
1831 } else if (size == size_alt) {
1832 format = alt;
1833 }
1834
1835 gen_offsets[idx] = *gen_stride;
1836 gen_formats[other_idx] = format;
1837 offset = *gen_stride;
1838 *gen_stride += size;
1839 }
1840
1841 pan_emit_vary(out, present, PAN_VARY_GENERAL, quirks, format, offset);
1842 }
1843
1844 /* Higher-level wrapper around all of the above, classifying a varying into one
1845 * of the above types */
1846
1847 static void
1848 panfrost_emit_varying(
1849 struct mali_attribute_packed *out,
1850 struct panfrost_shader_state *stage,
1851 struct panfrost_shader_state *other,
1852 struct panfrost_shader_state *xfb,
1853 unsigned present,
1854 unsigned max_xfb,
1855 unsigned *streamout_offsets,
1856 unsigned quirks,
1857 unsigned *gen_offsets,
1858 enum mali_format *gen_formats,
1859 unsigned *gen_stride,
1860 unsigned idx,
1861 bool should_alloc,
1862 bool is_fragment)
1863 {
1864 gl_varying_slot loc = stage->varyings_loc[idx];
1865 enum mali_format format = stage->varyings[idx];
1866
1867 /* Override format to match linkage */
1868 if (!should_alloc && gen_formats[idx])
1869 format = gen_formats[idx];
1870
1871 if (has_point_coord(stage->point_sprite_mask, loc)) {
1872 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1873 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1874 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1875 pan_emit_vary_xfb(out, present, max_xfb, streamout_offsets, quirks, format, *o);
1876 } else if (loc == VARYING_SLOT_POS) {
1877 if (is_fragment)
1878 pan_emit_vary_special(out, present, PAN_VARY_FRAGCOORD, quirks);
1879 else
1880 pan_emit_vary_special(out, present, PAN_VARY_POSITION, quirks);
1881 } else if (loc == VARYING_SLOT_PSIZ) {
1882 pan_emit_vary_special(out, present, PAN_VARY_PSIZ, quirks);
1883 } else if (loc == VARYING_SLOT_PNTC) {
1884 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1885 } else if (loc == VARYING_SLOT_FACE) {
1886 pan_emit_vary_special(out, present, PAN_VARY_FACE, quirks);
1887 } else {
1888 pan_emit_general_varying(out, other, xfb, loc, format, present,
1889 quirks, gen_offsets, gen_formats, gen_stride,
1890 idx, should_alloc);
1891 }
1892 }
1893
1894 static void
1895 pan_emit_special_input(struct mali_attribute_buffer_packed *out,
1896 unsigned present,
1897 enum pan_special_varying v,
1898 unsigned special)
1899 {
1900 if (present & (1 << v)) {
1901 unsigned idx = pan_varying_index(present, v);
1902
1903 pan_pack(out + idx, ATTRIBUTE_BUFFER, cfg) {
1904 cfg.special = special;
1905 cfg.type = 0;
1906 }
1907 }
1908 }
1909
1910 void
1911 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1912 unsigned vertex_count,
1913 struct mali_vertex_tiler_postfix *vertex_postfix,
1914 struct mali_vertex_tiler_postfix *tiler_postfix,
1915 union midgard_primitive_size *primitive_size)
1916 {
1917 /* Load the shaders */
1918 struct panfrost_context *ctx = batch->ctx;
1919 struct panfrost_device *dev = pan_device(ctx->base.screen);
1920 struct panfrost_shader_state *vs, *fs;
1921 size_t vs_size, fs_size;
1922
1923 /* Allocate the varying descriptor */
1924
1925 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1926 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
1927 vs_size = MALI_ATTRIBUTE_LENGTH * vs->varying_count;
1928 fs_size = MALI_ATTRIBUTE_LENGTH * fs->varying_count;
1929
1930 struct panfrost_transfer trans = panfrost_pool_alloc(&batch->pool,
1931 vs_size +
1932 fs_size);
1933
1934 struct pipe_stream_output_info *so = &vs->stream_output;
1935 unsigned present = pan_varying_present(vs, fs, dev->quirks);
1936
1937 /* Check if this varying is linked by us. This is the case for
1938 * general-purpose, non-captured varyings. If it is, link it. If it's
1939 * not, use the provided stream out information to determine the
1940 * offset, since it was already linked for us. */
1941
1942 unsigned gen_offsets[32];
1943 enum mali_format gen_formats[32];
1944 memset(gen_offsets, 0, sizeof(gen_offsets));
1945 memset(gen_formats, 0, sizeof(gen_formats));
1946
1947 unsigned gen_stride = 0;
1948 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
1949 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
1950
1951 unsigned streamout_offsets[32];
1952
1953 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1954 streamout_offsets[i] = panfrost_streamout_offset(
1955 so->stride[i],
1956 ctx->streamout.offsets[i],
1957 ctx->streamout.targets[i]);
1958 }
1959
1960 struct mali_attribute_packed *ovs = (struct mali_attribute_packed *)trans.cpu;
1961 struct mali_attribute_packed *ofs = ovs + vs->varying_count;
1962
1963 for (unsigned i = 0; i < vs->varying_count; i++) {
1964 panfrost_emit_varying(ovs + i, vs, fs, vs, present,
1965 ctx->streamout.num_targets, streamout_offsets,
1966 dev->quirks,
1967 gen_offsets, gen_formats, &gen_stride, i, true, false);
1968 }
1969
1970 for (unsigned i = 0; i < fs->varying_count; i++) {
1971 panfrost_emit_varying(ofs + i, fs, vs, vs, present,
1972 ctx->streamout.num_targets, streamout_offsets,
1973 dev->quirks,
1974 gen_offsets, gen_formats, &gen_stride, i, false, true);
1975 }
1976
1977 unsigned xfb_base = pan_xfb_base(present);
1978 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
1979 MALI_ATTRIBUTE_BUFFER_LENGTH * (xfb_base + ctx->streamout.num_targets));
1980 struct mali_attribute_buffer_packed *varyings =
1981 (struct mali_attribute_buffer_packed *) T.cpu;
1982
1983 /* Emit the stream out buffers */
1984
1985 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
1986 ctx->vertex_count);
1987
1988 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1989 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
1990 so->stride[i],
1991 ctx->streamout.offsets[i],
1992 out_count,
1993 ctx->streamout.targets[i]);
1994 }
1995
1996 panfrost_emit_varyings(batch,
1997 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
1998 gen_stride, vertex_count);
1999
2000 /* fp32 vec4 gl_Position */
2001 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
2002 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
2003 sizeof(float) * 4, vertex_count);
2004
2005 if (present & (1 << PAN_VARY_PSIZ)) {
2006 primitive_size->pointer = panfrost_emit_varyings(batch,
2007 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
2008 2, vertex_count);
2009 }
2010
2011 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_ATTRIBUTE_SPECIAL_POINT_COORD);
2012 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_ATTRIBUTE_SPECIAL_FRONT_FACING);
2013 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_ATTRIBUTE_SPECIAL_FRAG_COORD);
2014
2015 vertex_postfix->varyings = T.gpu;
2016 tiler_postfix->varyings = T.gpu;
2017
2018 vertex_postfix->varying_meta = trans.gpu;
2019 tiler_postfix->varying_meta = trans.gpu + vs_size;
2020 }
2021
2022 void
2023 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
2024 struct mali_vertex_tiler_prefix *vertex_prefix,
2025 struct mali_vertex_tiler_postfix *vertex_postfix,
2026 struct mali_vertex_tiler_prefix *tiler_prefix,
2027 struct mali_vertex_tiler_postfix *tiler_postfix,
2028 union midgard_primitive_size *primitive_size)
2029 {
2030 struct panfrost_context *ctx = batch->ctx;
2031 struct panfrost_device *device = pan_device(ctx->base.screen);
2032 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
2033 struct bifrost_payload_vertex bifrost_vertex = {0,};
2034 struct bifrost_payload_tiler bifrost_tiler = {0,};
2035 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2036 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2037 void *vp, *tp;
2038 size_t vp_size, tp_size;
2039
2040 if (device->quirks & IS_BIFROST) {
2041 bifrost_vertex.prefix = *vertex_prefix;
2042 bifrost_vertex.postfix = *vertex_postfix;
2043 vp = &bifrost_vertex;
2044 vp_size = sizeof(bifrost_vertex);
2045
2046 bifrost_tiler.prefix = *tiler_prefix;
2047 bifrost_tiler.tiler.primitive_size = *primitive_size;
2048 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2049 bifrost_tiler.postfix = *tiler_postfix;
2050 tp = &bifrost_tiler;
2051 tp_size = sizeof(bifrost_tiler);
2052 } else {
2053 midgard_vertex.prefix = *vertex_prefix;
2054 midgard_vertex.postfix = *vertex_postfix;
2055 vp = &midgard_vertex;
2056 vp_size = sizeof(midgard_vertex);
2057
2058 midgard_tiler.prefix = *tiler_prefix;
2059 midgard_tiler.postfix = *tiler_postfix;
2060 midgard_tiler.primitive_size = *primitive_size;
2061 tp = &midgard_tiler;
2062 tp_size = sizeof(midgard_tiler);
2063 }
2064
2065 if (wallpapering) {
2066 /* Inject in reverse order, with "predicted" job indices.
2067 * THIS IS A HACK XXX */
2068 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
2069 batch->scoreboard.job_index + 2, tp, tp_size, true);
2070 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2071 vp, vp_size, true);
2072 return;
2073 }
2074
2075 /* If rasterizer discard is enable, only submit the vertex */
2076
2077 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2078 vp, vp_size, false);
2079
2080 if (ctx->rasterizer->base.rasterizer_discard)
2081 return;
2082
2083 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
2084 false);
2085 }
2086
2087 /* TODO: stop hardcoding this */
2088 mali_ptr
2089 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2090 {
2091 uint16_t locations[] = {
2092 128, 128,
2093 0, 256,
2094 0, 256,
2095 0, 256,
2096 0, 256,
2097 0, 256,
2098 0, 256,
2099 0, 256,
2100 0, 256,
2101 0, 256,
2102 0, 256,
2103 0, 256,
2104 0, 256,
2105 0, 256,
2106 0, 256,
2107 0, 256,
2108 0, 256,
2109 0, 256,
2110 0, 256,
2111 0, 256,
2112 0, 256,
2113 0, 256,
2114 0, 256,
2115 0, 256,
2116 0, 256,
2117 0, 256,
2118 0, 256,
2119 0, 256,
2120 0, 256,
2121 0, 256,
2122 0, 256,
2123 0, 256,
2124 128, 128,
2125 0, 0,
2126 0, 0,
2127 0, 0,
2128 0, 0,
2129 0, 0,
2130 0, 0,
2131 0, 0,
2132 0, 0,
2133 0, 0,
2134 0, 0,
2135 0, 0,
2136 0, 0,
2137 0, 0,
2138 0, 0,
2139 0, 0,
2140 };
2141
2142 return panfrost_pool_upload(&batch->pool, locations, 96 * sizeof(uint16_t));
2143 }