panfrost: Explicitly handle nr_cbufs=0 case
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 struct mali_shared_memory shared = {
62 .shared_workgroup_count = ~0,
63 };
64
65 if (batch->stack_size) {
66 struct panfrost_bo *stack =
67 panfrost_batch_get_scratchpad(batch, batch->stack_size,
68 dev->thread_tls_alloc,
69 dev->core_count);
70
71 shared.stack_shift = panfrost_get_stack_shift(batch->stack_size);
72 shared.scratchpad = stack->gpu;
73 }
74
75 postfix->shared_memory = panfrost_pool_upload(&batch->pool, &shared, sizeof(shared));
76 }
77
78 static void
79 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
80 struct mali_vertex_tiler_postfix *postfix)
81 {
82 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
83 postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
84 }
85
86 static void
87 panfrost_vt_update_rasterizer(struct panfrost_rasterizer *rasterizer,
88 struct mali_vertex_tiler_prefix *prefix,
89 struct mali_vertex_tiler_postfix *postfix)
90 {
91 postfix->gl_enables |= 0x7;
92 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
93 rasterizer->base.front_ccw);
94 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
95 (rasterizer->base.cull_face & PIPE_FACE_FRONT));
96 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
97 (rasterizer->base.cull_face & PIPE_FACE_BACK));
98 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
99 rasterizer->base.flatshade_first);
100 }
101
102 void
103 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
104 struct mali_vertex_tiler_prefix *prefix,
105 union midgard_primitive_size *primitive_size)
106 {
107 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
108
109 if (!panfrost_writes_point_size(ctx)) {
110 float val = (prefix->draw_mode == MALI_DRAW_MODE_POINTS) ?
111 rasterizer->base.point_size :
112 rasterizer->base.line_width;
113
114 primitive_size->constant = val;
115 }
116 }
117
118 static void
119 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
120 struct mali_vertex_tiler_postfix *postfix)
121 {
122 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
123 if (ctx->occlusion_query) {
124 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
125 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
126 PAN_BO_ACCESS_SHARED |
127 PAN_BO_ACCESS_RW |
128 PAN_BO_ACCESS_FRAGMENT);
129 } else {
130 postfix->occlusion_counter = 0;
131 }
132 }
133
134 void
135 panfrost_vt_init(struct panfrost_context *ctx,
136 enum pipe_shader_type stage,
137 struct mali_vertex_tiler_prefix *prefix,
138 struct mali_vertex_tiler_postfix *postfix)
139 {
140 struct panfrost_device *device = pan_device(ctx->base.screen);
141
142 if (!ctx->shader[stage])
143 return;
144
145 memset(prefix, 0, sizeof(*prefix));
146 memset(postfix, 0, sizeof(*postfix));
147
148 if (device->quirks & IS_BIFROST) {
149 postfix->gl_enables = 0x2;
150 panfrost_vt_emit_shared_memory(ctx, postfix);
151 } else {
152 postfix->gl_enables = 0x6;
153 panfrost_vt_attach_framebuffer(ctx, postfix);
154 }
155
156 if (stage == PIPE_SHADER_FRAGMENT) {
157 panfrost_vt_update_occlusion_query(ctx, postfix);
158 panfrost_vt_update_rasterizer(ctx->rasterizer, prefix, postfix);
159 }
160 }
161
162 static unsigned
163 panfrost_translate_index_size(unsigned size)
164 {
165 switch (size) {
166 case 1:
167 return MALI_DRAW_INDEXED_UINT8;
168
169 case 2:
170 return MALI_DRAW_INDEXED_UINT16;
171
172 case 4:
173 return MALI_DRAW_INDEXED_UINT32;
174
175 default:
176 unreachable("Invalid index size");
177 }
178 }
179
180 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
181 * good for the duration of the draw (transient), could last longer. Also get
182 * the bounds on the index buffer for the range accessed by the draw. We do
183 * these operations together because there are natural optimizations which
184 * require them to be together. */
185
186 static mali_ptr
187 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
188 const struct pipe_draw_info *info,
189 unsigned *min_index, unsigned *max_index)
190 {
191 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
192 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
193 off_t offset = info->start * info->index_size;
194 bool needs_indices = true;
195 mali_ptr out = 0;
196
197 if (info->max_index != ~0u) {
198 *min_index = info->min_index;
199 *max_index = info->max_index;
200 needs_indices = false;
201 }
202
203 if (!info->has_user_indices) {
204 /* Only resources can be directly mapped */
205 panfrost_batch_add_bo(batch, rsrc->bo,
206 PAN_BO_ACCESS_SHARED |
207 PAN_BO_ACCESS_READ |
208 PAN_BO_ACCESS_VERTEX_TILER);
209 out = rsrc->bo->gpu + offset;
210
211 /* Check the cache */
212 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
213 info->start,
214 info->count,
215 min_index,
216 max_index);
217 } else {
218 /* Otherwise, we need to upload to transient memory */
219 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
220 out = panfrost_pool_upload(&batch->pool, ibuf8 + offset,
221 info->count *
222 info->index_size);
223 }
224
225 if (needs_indices) {
226 /* Fallback */
227 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
228
229 if (!info->has_user_indices)
230 panfrost_minmax_cache_add(rsrc->index_cache,
231 info->start, info->count,
232 *min_index, *max_index);
233 }
234
235 return out;
236 }
237
238 void
239 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
240 const struct pipe_draw_info *info,
241 enum mali_draw_mode draw_mode,
242 struct mali_vertex_tiler_postfix *vertex_postfix,
243 struct mali_vertex_tiler_prefix *tiler_prefix,
244 struct mali_vertex_tiler_postfix *tiler_postfix,
245 unsigned *vertex_count,
246 unsigned *padded_count)
247 {
248 tiler_prefix->draw_mode = draw_mode;
249
250 unsigned draw_flags = 0;
251
252 if (panfrost_writes_point_size(ctx))
253 draw_flags |= MALI_DRAW_VARYING_SIZE;
254
255 if (info->primitive_restart)
256 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
257
258 /* These doesn't make much sense */
259
260 draw_flags |= 0x3000;
261
262 if (info->index_size) {
263 unsigned min_index = 0, max_index = 0;
264
265 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
266 info,
267 &min_index,
268 &max_index);
269
270 /* Use the corresponding values */
271 *vertex_count = max_index - min_index + 1;
272 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
273 tiler_prefix->offset_bias_correction = -min_index;
274 tiler_prefix->index_count = MALI_POSITIVE(info->count);
275 draw_flags |= panfrost_translate_index_size(info->index_size);
276 } else {
277 tiler_prefix->indices = 0;
278 *vertex_count = ctx->vertex_count;
279 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
280 tiler_prefix->offset_bias_correction = 0;
281 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
282 }
283
284 tiler_prefix->unknown_draw = draw_flags;
285
286 /* Encode the padded vertex count */
287
288 if (info->instance_count > 1) {
289 *padded_count = panfrost_padded_vertex_count(*vertex_count);
290
291 unsigned shift = __builtin_ctz(ctx->padded_count);
292 unsigned k = ctx->padded_count >> (shift + 1);
293
294 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
295 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
296 } else {
297 *padded_count = *vertex_count;
298
299 /* Reset instancing state */
300 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
301 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
302 }
303 }
304
305 static void
306 panfrost_shader_meta_init(struct panfrost_context *ctx,
307 enum pipe_shader_type st,
308 struct mali_shader_meta *meta)
309 {
310 const struct panfrost_device *dev = pan_device(ctx->base.screen);
311 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
312
313 memset(meta, 0, sizeof(*meta));
314 meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
315 meta->attribute_count = ss->attribute_count;
316 meta->varying_count = ss->varying_count;
317 meta->texture_count = ctx->sampler_view_count[st];
318 meta->sampler_count = ctx->sampler_count[st];
319
320 if (dev->quirks & IS_BIFROST) {
321 if (st == PIPE_SHADER_VERTEX)
322 meta->bifrost1.unk1 = 0x800000;
323 else {
324 /* First clause ATEST |= 0x4000000.
325 * Less than 32 regs |= 0x200 */
326 meta->bifrost1.unk1 = 0x950020;
327 }
328
329 meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
330 if (st == PIPE_SHADER_VERTEX)
331 meta->bifrost2.preload_regs = 0xC0;
332 else {
333 meta->bifrost2.preload_regs = 0x1;
334 SET_BIT(meta->bifrost2.preload_regs, 0x10, ss->reads_frag_coord);
335 }
336
337 meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
338 ss->uniform_cutoff);
339 } else {
340 meta->midgard1.uniform_count = MIN2(ss->uniform_count,
341 ss->uniform_cutoff);
342 meta->midgard1.work_count = ss->work_reg_count;
343
344 /* TODO: This is not conformant on ES3 */
345 meta->midgard1.flags_hi = MALI_SUPPRESS_INF_NAN;
346
347 meta->midgard1.flags_lo = 0x20;
348 meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
349
350 SET_BIT(meta->midgard1.flags_lo, MALI_WRITES_GLOBAL, ss->writes_global);
351 }
352 }
353
354 static unsigned
355 translate_tex_wrap(enum pipe_tex_wrap w)
356 {
357 switch (w) {
358 case PIPE_TEX_WRAP_REPEAT: return MALI_WRAP_MODE_REPEAT;
359 case PIPE_TEX_WRAP_CLAMP: return MALI_WRAP_MODE_CLAMP;
360 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return MALI_WRAP_MODE_CLAMP_TO_EDGE;
361 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return MALI_WRAP_MODE_CLAMP_TO_BORDER;
362 case PIPE_TEX_WRAP_MIRROR_REPEAT: return MALI_WRAP_MODE_MIRRORED_REPEAT;
363 case PIPE_TEX_WRAP_MIRROR_CLAMP: return MALI_WRAP_MODE_MIRRORED_CLAMP;
364 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
365 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
366 default: unreachable("Invalid wrap");
367 }
368 }
369
370 /* The hardware compares in the wrong order order, so we have to flip before
371 * encoding. Yes, really. */
372
373 static enum mali_func
374 panfrost_sampler_compare_func(const struct pipe_sampler_state *cso)
375 {
376 if (!cso->compare_mode)
377 return MALI_FUNC_NEVER;
378
379 enum mali_func f = panfrost_translate_compare_func(cso->compare_func);
380 return panfrost_flip_compare_func(f);
381 }
382
383 static enum mali_mipmap_mode
384 pan_pipe_to_mipmode(enum pipe_tex_mipfilter f)
385 {
386 switch (f) {
387 case PIPE_TEX_MIPFILTER_NEAREST: return MALI_MIPMAP_MODE_NEAREST;
388 case PIPE_TEX_MIPFILTER_LINEAR: return MALI_MIPMAP_MODE_TRILINEAR;
389 case PIPE_TEX_MIPFILTER_NONE: return MALI_MIPMAP_MODE_NONE;
390 default: unreachable("Invalid");
391 }
392 }
393
394 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
395 struct mali_midgard_sampler_packed *hw)
396 {
397 pan_pack(hw, MIDGARD_SAMPLER, cfg) {
398 cfg.magnify_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
399 cfg.minify_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
400 cfg.mipmap_mode = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR) ?
401 MALI_MIPMAP_MODE_TRILINEAR : MALI_MIPMAP_MODE_NEAREST;
402 cfg.normalized_coordinates = cso->normalized_coords;
403
404 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
405
406 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
407
408 /* If necessary, we disable mipmapping in the sampler descriptor by
409 * clamping the LOD as tight as possible (from 0 to epsilon,
410 * essentially -- remember these are fixed point numbers, so
411 * epsilon=1/256) */
412
413 cfg.maximum_lod = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) ?
414 cfg.minimum_lod + 1 :
415 FIXED_16(cso->max_lod, false);
416
417 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
418 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
419 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
420
421 cfg.compare_function = panfrost_sampler_compare_func(cso);
422 cfg.seamless_cube_map = cso->seamless_cube_map;
423
424 cfg.border_color_r = cso->border_color.f[0];
425 cfg.border_color_g = cso->border_color.f[1];
426 cfg.border_color_b = cso->border_color.f[2];
427 cfg.border_color_a = cso->border_color.f[3];
428 }
429 }
430
431 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
432 struct mali_bifrost_sampler_packed *hw)
433 {
434 pan_pack(hw, BIFROST_SAMPLER, cfg) {
435 cfg.magnify_linear = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR;
436 cfg.minify_linear = cso->min_img_filter == PIPE_TEX_FILTER_LINEAR;
437 cfg.mipmap_mode = pan_pipe_to_mipmode(cso->min_mip_filter);
438 cfg.normalized_coordinates = cso->normalized_coords;
439
440 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
441 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
442 cfg.maximum_lod = FIXED_16(cso->max_lod, false);
443
444 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
445 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
446 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
447
448 cfg.compare_function = panfrost_sampler_compare_func(cso);
449 cfg.seamless_cube_map = cso->seamless_cube_map;
450 }
451 }
452
453 static void
454 panfrost_frag_meta_rasterizer_update(struct panfrost_context *ctx,
455 struct mali_shader_meta *fragmeta)
456 {
457 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
458
459 bool msaa = rast->multisample;
460
461 /* TODO: Sample size */
462 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
463 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
464
465 struct panfrost_shader_state *fs;
466 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
467
468 /* EXT_shader_framebuffer_fetch requires the shader to be run
469 * per-sample when outputs are read. */
470 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
471 SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
472
473 fragmeta->depth_units = rast->offset_units * 2.0f;
474 fragmeta->depth_factor = rast->offset_scale;
475
476 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
477
478 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
479 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
480
481 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
482 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
483 }
484
485 static void
486 panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
487 struct mali_shader_meta *fragmeta)
488 {
489 const struct panfrost_zsa_state *so = ctx->depth_stencil;
490
491 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
492 so->base.stencil[0].enabled);
493
494 fragmeta->stencil_mask_front = so->stencil_mask_front;
495 fragmeta->stencil_mask_back = so->stencil_mask_back;
496
497 /* Bottom bits for stencil ref, exactly one word */
498 fragmeta->stencil_front.opaque[0] = so->stencil_front.opaque[0] | ctx->stencil_ref.ref_value[0];
499
500 /* If back-stencil is not enabled, use the front values */
501
502 if (so->base.stencil[1].enabled)
503 fragmeta->stencil_back.opaque[0] = so->stencil_back.opaque[0] | ctx->stencil_ref.ref_value[1];
504 else
505 fragmeta->stencil_back = fragmeta->stencil_front;
506
507 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
508 so->base.depth.writemask);
509
510 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
511 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(
512 so->base.depth.enabled ? so->base.depth.func : PIPE_FUNC_ALWAYS));
513 }
514
515 static bool
516 panfrost_fs_required(
517 struct panfrost_shader_state *fs,
518 struct panfrost_blend_final *blend,
519 unsigned rt_count)
520 {
521 /* If we generally have side effects */
522 if (fs->fs_sidefx)
523 return true;
524
525 /* If colour is written we need to execute */
526 for (unsigned i = 0; i < rt_count; ++i) {
527 if (!blend[i].no_colour)
528 return true;
529 }
530
531 /* If depth is written and not implied we need to execute.
532 * TODO: Predicate on Z/S writes being enabled */
533 return (fs->writes_depth || fs->writes_stencil);
534 }
535
536 static void
537 panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
538 struct mali_shader_meta *fragmeta,
539 void *rts)
540 {
541 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
542 const struct panfrost_device *dev = pan_device(ctx->base.screen);
543 struct panfrost_shader_state *fs;
544 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
545
546 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
547 (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
548 !ctx->blend->base.dither);
549
550 SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
551 ctx->blend->base.alpha_to_coverage);
552
553 /* Get blending setup */
554 unsigned rt_count = ctx->pipe_framebuffer.nr_cbufs;
555
556 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
557
558 for (unsigned c = 0; c < rt_count; ++c)
559 blend[c] = panfrost_get_blend_for_context(ctx, c);
560
561 /* Disable shader execution if we can */
562 if (dev->quirks & MIDGARD_SHADERLESS
563 && !panfrost_fs_required(fs, blend, rt_count)) {
564 fragmeta->shader = 0;
565 fragmeta->attribute_count = 0;
566 fragmeta->varying_count = 0;
567 fragmeta->texture_count = 0;
568 fragmeta->sampler_count = 0;
569
570 /* This feature is not known to work on Bifrost */
571 fragmeta->midgard1.work_count = 1;
572 fragmeta->midgard1.uniform_count = 0;
573 fragmeta->midgard1.uniform_buffer_count = 0;
574 }
575
576 /* If there is a blend shader, work registers are shared. We impose 8
577 * work registers as a limit for blend shaders. Should be lower XXX */
578
579 if (!(dev->quirks & IS_BIFROST)) {
580 for (unsigned c = 0; c < rt_count; ++c) {
581 if (blend[c].is_shader) {
582 fragmeta->midgard1.work_count =
583 MAX2(fragmeta->midgard1.work_count, 8);
584 }
585 }
586 }
587
588 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
589 * copied to the blend_meta appended (by convention), but this is the
590 * field actually read by the hardware. (Or maybe both are read...?).
591 * Specify the last RTi with a blend shader. */
592
593 fragmeta->blend.shader = 0;
594
595 for (signed rt = ((signed) rt_count - 1); rt >= 0; --rt) {
596 if (!blend[rt].is_shader)
597 continue;
598
599 fragmeta->blend.shader = blend[rt].shader.gpu |
600 blend[rt].shader.first_tag;
601 break;
602 }
603
604 if (dev->quirks & MIDGARD_SFBD) {
605 /* When only a single render target platform is used, the blend
606 * information is inside the shader meta itself. We additionally
607 * need to signal CAN_DISCARD for nontrivial blend modes (so
608 * we're able to read back the destination buffer) */
609
610 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
611 blend[0].is_shader);
612
613 if (!blend[0].is_shader) {
614 fragmeta->blend.equation = *blend[0].equation.equation;
615 fragmeta->blend.constant = blend[0].equation.constant;
616 }
617
618 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
619 !blend[0].no_blending || fs->can_discard);
620
621 batch->draws |= PIPE_CLEAR_COLOR0;
622 return;
623 }
624
625 if (dev->quirks & IS_BIFROST) {
626 bool no_blend = true;
627
628 for (unsigned i = 0; i < rt_count; ++i)
629 no_blend &= (blend[i].no_blending | blend[i].no_colour);
630
631 SET_BIT(fragmeta->bifrost1.unk1, MALI_BIFROST_EARLY_Z,
632 !fs->can_discard && !fs->writes_depth && no_blend);
633 }
634
635 /* Additional blend descriptor tacked on for jobs using MFBD */
636
637 struct bifrost_blend_rt *brts = rts;
638 struct midgard_blend_rt *mrts = rts;
639
640 /* Disable blending for depth-only on Bifrost */
641
642 if (rt_count == 0 && dev->quirks & IS_BIFROST)
643 brts[0].unk2 = 0x3;
644
645 for (unsigned i = 0; i < rt_count; ++i) {
646 unsigned flags = 0;
647
648 if (!blend[i].no_colour) {
649 flags = 0x200;
650 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
651
652 bool is_srgb = util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
653
654 SET_BIT(flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
655 SET_BIT(flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
656 SET_BIT(flags, MALI_BLEND_SRGB, is_srgb);
657 SET_BIT(flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
658 }
659
660 if (dev->quirks & IS_BIFROST) {
661 brts[i].flags = flags;
662
663 if (blend[i].is_shader) {
664 /* The blend shader's address needs to be at
665 * the same top 32 bit as the fragment shader.
666 * TODO: Ensure that's always the case.
667 */
668 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
669 (fs->bo->gpu & (0xffffffffull << 32)));
670 brts[i].shader = blend[i].shader.gpu;
671 brts[i].unk2 = 0x0;
672 } else {
673 enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
674 const struct util_format_description *format_desc;
675 format_desc = util_format_description(format);
676
677 brts[i].equation = *blend[i].equation.equation;
678
679 /* TODO: this is a bit more complicated */
680 brts[i].constant = blend[i].equation.constant;
681
682 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
683
684 /* 0x19 disables blending and forces REPLACE
685 * mode (equivalent to rgb_mode = alpha_mode =
686 * x122, colour mask = 0xF). 0x1a allows
687 * blending. */
688 brts[i].unk2 = blend[i].no_blending ? 0x19 : 0x1a;
689
690 brts[i].shader_type = fs->blend_types[i];
691 }
692 } else {
693 mrts[i].flags = flags;
694
695 if (blend[i].is_shader) {
696 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
697 } else {
698 mrts[i].blend.equation = *blend[i].equation.equation;
699 mrts[i].blend.constant = blend[i].equation.constant;
700 }
701 }
702 }
703 }
704
705 static void
706 panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
707 struct mali_shader_meta *fragmeta,
708 void *rts)
709 {
710 const struct panfrost_device *dev = pan_device(ctx->base.screen);
711 struct panfrost_shader_state *fs;
712
713 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
714
715 bool msaa = ctx->rasterizer->base.multisample;
716 fragmeta->coverage_mask = msaa ? ctx->sample_mask : ~0;
717
718 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
719 fragmeta->unknown2_4 = 0x4e0;
720
721 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
722 * is required (independent of 32-bit/64-bit descriptors), or why it's
723 * not used on later GPU revisions. Otherwise, all shader jobs fault on
724 * these earlier chips (perhaps this is a chicken bit of some kind).
725 * More investigation is needed. */
726
727 SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
728
729 if (dev->quirks & IS_BIFROST) {
730 /* TODO */
731 } else {
732 /* Depending on whether it's legal to in the given shader, we try to
733 * enable early-z testing. TODO: respect e-z force */
734
735 SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
736 !fs->can_discard && !fs->writes_global &&
737 !fs->writes_depth && !fs->writes_stencil &&
738 !ctx->blend->base.alpha_to_coverage);
739
740 /* Add the writes Z/S flags if needed. */
741 SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
742 SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
743
744 /* Any time texturing is used, derivatives are implicitly calculated,
745 * so we need to enable helper invocations */
746
747 SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
748 fs->helper_invocations);
749
750 /* If discard is enabled, which bit we set to convey this
751 * depends on if depth/stencil is used for the draw or not.
752 * Just one of depth OR stencil is enough to trigger this. */
753
754 const struct pipe_depth_stencil_alpha_state *zsa = &ctx->depth_stencil->base;
755 bool zs_enabled =
756 fs->writes_depth || fs->writes_stencil ||
757 (zsa->depth.enabled && zsa->depth.func != PIPE_FUNC_ALWAYS) ||
758 zsa->stencil[0].enabled;
759
760 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_TILEBUFFER,
761 fs->outputs_read || (!zs_enabled && fs->can_discard));
762 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_ZS, zs_enabled && fs->can_discard);
763 }
764
765 panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
766 panfrost_frag_meta_zsa_update(ctx, fragmeta);
767 panfrost_frag_meta_blend_update(ctx, fragmeta, rts);
768 }
769
770 void
771 panfrost_emit_shader_meta(struct panfrost_batch *batch,
772 enum pipe_shader_type st,
773 struct mali_vertex_tiler_postfix *postfix)
774 {
775 struct panfrost_context *ctx = batch->ctx;
776 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
777
778 if (!ss) {
779 postfix->shader = 0;
780 return;
781 }
782
783 struct mali_shader_meta meta;
784
785 panfrost_shader_meta_init(ctx, st, &meta);
786
787 /* Add the shader BO to the batch. */
788 panfrost_batch_add_bo(batch, ss->bo,
789 PAN_BO_ACCESS_PRIVATE |
790 PAN_BO_ACCESS_READ |
791 panfrost_bo_access_for_stage(st));
792
793 mali_ptr shader_ptr;
794
795 if (st == PIPE_SHADER_FRAGMENT) {
796 struct panfrost_device *dev = pan_device(ctx->base.screen);
797 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
798 size_t desc_size = sizeof(meta);
799 void *rts = NULL;
800 struct panfrost_transfer xfer;
801 unsigned rt_size;
802
803 if (dev->quirks & MIDGARD_SFBD)
804 rt_size = 0;
805 else if (dev->quirks & IS_BIFROST)
806 rt_size = sizeof(struct bifrost_blend_rt);
807 else
808 rt_size = sizeof(struct midgard_blend_rt);
809
810 desc_size += rt_size * rt_count;
811
812 if (rt_size)
813 rts = rzalloc_size(ctx, rt_size * rt_count);
814
815 panfrost_frag_shader_meta_init(ctx, &meta, rts);
816
817 xfer = panfrost_pool_alloc(&batch->pool, desc_size);
818
819 memcpy(xfer.cpu, &meta, sizeof(meta));
820 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
821
822 if (rt_size)
823 ralloc_free(rts);
824
825 shader_ptr = xfer.gpu;
826 } else {
827 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
828 sizeof(meta));
829 }
830
831 postfix->shader = shader_ptr;
832 }
833
834 void
835 panfrost_emit_viewport(struct panfrost_batch *batch,
836 struct mali_vertex_tiler_postfix *tiler_postfix)
837 {
838 struct panfrost_context *ctx = batch->ctx;
839 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
840 const struct pipe_scissor_state *ss = &ctx->scissor;
841 const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
842 const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
843
844 /* Derive min/max from translate/scale. Note since |x| >= 0 by
845 * definition, we have that -|x| <= |x| hence translate - |scale| <=
846 * translate + |scale|, so the ordering is correct here. */
847 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
848 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
849 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
850 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
851 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
852 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
853
854 /* Scissor to the intersection of viewport and to the scissor, clamped
855 * to the framebuffer */
856
857 unsigned minx = MIN2(fb->width, vp_minx);
858 unsigned maxx = MIN2(fb->width, vp_maxx);
859 unsigned miny = MIN2(fb->height, vp_miny);
860 unsigned maxy = MIN2(fb->height, vp_maxy);
861
862 if (ss && rast->scissor) {
863 minx = MAX2(ss->minx, minx);
864 miny = MAX2(ss->miny, miny);
865 maxx = MIN2(ss->maxx, maxx);
866 maxy = MIN2(ss->maxy, maxy);
867 }
868
869 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
870
871 pan_pack(T.cpu, VIEWPORT, cfg) {
872 cfg.scissor_minimum_x = minx;
873 cfg.scissor_minimum_y = miny;
874 cfg.scissor_maximum_x = maxx - 1;
875 cfg.scissor_maximum_y = maxy - 1;
876
877 cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
878 cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
879 }
880
881 tiler_postfix->viewport = T.gpu;
882 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
883 }
884
885 static mali_ptr
886 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
887 enum pipe_shader_type st,
888 struct panfrost_constant_buffer *buf,
889 unsigned index)
890 {
891 struct pipe_constant_buffer *cb = &buf->cb[index];
892 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
893
894 if (rsrc) {
895 panfrost_batch_add_bo(batch, rsrc->bo,
896 PAN_BO_ACCESS_SHARED |
897 PAN_BO_ACCESS_READ |
898 panfrost_bo_access_for_stage(st));
899
900 /* Alignment gauranteed by
901 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
902 return rsrc->bo->gpu + cb->buffer_offset;
903 } else if (cb->user_buffer) {
904 return panfrost_pool_upload(&batch->pool,
905 cb->user_buffer +
906 cb->buffer_offset,
907 cb->buffer_size);
908 } else {
909 unreachable("No constant buffer");
910 }
911 }
912
913 struct sysval_uniform {
914 union {
915 float f[4];
916 int32_t i[4];
917 uint32_t u[4];
918 uint64_t du[2];
919 };
920 };
921
922 static void
923 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
924 struct sysval_uniform *uniform)
925 {
926 struct panfrost_context *ctx = batch->ctx;
927 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
928
929 uniform->f[0] = vp->scale[0];
930 uniform->f[1] = vp->scale[1];
931 uniform->f[2] = vp->scale[2];
932 }
933
934 static void
935 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
936 struct sysval_uniform *uniform)
937 {
938 struct panfrost_context *ctx = batch->ctx;
939 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
940
941 uniform->f[0] = vp->translate[0];
942 uniform->f[1] = vp->translate[1];
943 uniform->f[2] = vp->translate[2];
944 }
945
946 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
947 enum pipe_shader_type st,
948 unsigned int sysvalid,
949 struct sysval_uniform *uniform)
950 {
951 struct panfrost_context *ctx = batch->ctx;
952 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
953 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
954 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
955 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
956
957 assert(dim);
958 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
959
960 if (dim > 1)
961 uniform->i[1] = u_minify(tex->texture->height0,
962 tex->u.tex.first_level);
963
964 if (dim > 2)
965 uniform->i[2] = u_minify(tex->texture->depth0,
966 tex->u.tex.first_level);
967
968 if (is_array)
969 uniform->i[dim] = tex->texture->array_size;
970 }
971
972 static void
973 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
974 enum pipe_shader_type st,
975 unsigned ssbo_id,
976 struct sysval_uniform *uniform)
977 {
978 struct panfrost_context *ctx = batch->ctx;
979
980 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
981 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
982
983 /* Compute address */
984 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
985
986 panfrost_batch_add_bo(batch, bo,
987 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
988 panfrost_bo_access_for_stage(st));
989
990 /* Upload address and size as sysval */
991 uniform->du[0] = bo->gpu + sb.buffer_offset;
992 uniform->u[2] = sb.buffer_size;
993 }
994
995 static void
996 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
997 enum pipe_shader_type st,
998 unsigned samp_idx,
999 struct sysval_uniform *uniform)
1000 {
1001 struct panfrost_context *ctx = batch->ctx;
1002 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
1003
1004 uniform->f[0] = sampl->min_lod;
1005 uniform->f[1] = sampl->max_lod;
1006 uniform->f[2] = sampl->lod_bias;
1007
1008 /* Even without any errata, Midgard represents "no mipmapping" as
1009 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1010 * panfrost_create_sampler_state which also explains our choice of
1011 * epsilon value (again to keep behaviour consistent) */
1012
1013 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1014 uniform->f[1] = uniform->f[0] + (1.0/256.0);
1015 }
1016
1017 static void
1018 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
1019 struct sysval_uniform *uniform)
1020 {
1021 struct panfrost_context *ctx = batch->ctx;
1022
1023 uniform->u[0] = ctx->compute_grid->grid[0];
1024 uniform->u[1] = ctx->compute_grid->grid[1];
1025 uniform->u[2] = ctx->compute_grid->grid[2];
1026 }
1027
1028 static void
1029 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1030 struct panfrost_shader_state *ss,
1031 enum pipe_shader_type st)
1032 {
1033 struct sysval_uniform *uniforms = (void *)buf;
1034
1035 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1036 int sysval = ss->sysval[i];
1037
1038 switch (PAN_SYSVAL_TYPE(sysval)) {
1039 case PAN_SYSVAL_VIEWPORT_SCALE:
1040 panfrost_upload_viewport_scale_sysval(batch,
1041 &uniforms[i]);
1042 break;
1043 case PAN_SYSVAL_VIEWPORT_OFFSET:
1044 panfrost_upload_viewport_offset_sysval(batch,
1045 &uniforms[i]);
1046 break;
1047 case PAN_SYSVAL_TEXTURE_SIZE:
1048 panfrost_upload_txs_sysval(batch, st,
1049 PAN_SYSVAL_ID(sysval),
1050 &uniforms[i]);
1051 break;
1052 case PAN_SYSVAL_SSBO:
1053 panfrost_upload_ssbo_sysval(batch, st,
1054 PAN_SYSVAL_ID(sysval),
1055 &uniforms[i]);
1056 break;
1057 case PAN_SYSVAL_NUM_WORK_GROUPS:
1058 panfrost_upload_num_work_groups_sysval(batch,
1059 &uniforms[i]);
1060 break;
1061 case PAN_SYSVAL_SAMPLER:
1062 panfrost_upload_sampler_sysval(batch, st,
1063 PAN_SYSVAL_ID(sysval),
1064 &uniforms[i]);
1065 break;
1066 default:
1067 assert(0);
1068 }
1069 }
1070 }
1071
1072 static const void *
1073 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1074 unsigned index)
1075 {
1076 struct pipe_constant_buffer *cb = &buf->cb[index];
1077 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1078
1079 if (rsrc)
1080 return rsrc->bo->cpu;
1081 else if (cb->user_buffer)
1082 return cb->user_buffer;
1083 else
1084 unreachable("No constant buffer");
1085 }
1086
1087 void
1088 panfrost_emit_const_buf(struct panfrost_batch *batch,
1089 enum pipe_shader_type stage,
1090 struct mali_vertex_tiler_postfix *postfix)
1091 {
1092 struct panfrost_context *ctx = batch->ctx;
1093 struct panfrost_shader_variants *all = ctx->shader[stage];
1094
1095 if (!all)
1096 return;
1097
1098 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1099
1100 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1101
1102 /* Uniforms are implicitly UBO #0 */
1103 bool has_uniforms = buf->enabled_mask & (1 << 0);
1104
1105 /* Allocate room for the sysval and the uniforms */
1106 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1107 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1108 size_t size = sys_size + uniform_size;
1109 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1110 size);
1111
1112 /* Upload sysvals requested by the shader */
1113 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1114
1115 /* Upload uniforms */
1116 if (has_uniforms && uniform_size) {
1117 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1118 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1119 }
1120
1121 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1122 * uploaded */
1123
1124 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1125 assert(ubo_count >= 1);
1126
1127 size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
1128 struct panfrost_transfer ubos = panfrost_pool_alloc(&batch->pool, sz);
1129 uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
1130
1131 /* Upload uniforms as a UBO */
1132
1133 if (ss->uniform_count) {
1134 pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
1135 cfg.entries = ss->uniform_count;
1136 cfg.pointer = transfer.gpu;
1137 }
1138 } else {
1139 *ubo_ptr = 0;
1140 }
1141
1142 /* The rest are honest-to-goodness UBOs */
1143
1144 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1145 size_t usz = buf->cb[ubo].buffer_size;
1146 bool enabled = buf->enabled_mask & (1 << ubo);
1147 bool empty = usz == 0;
1148
1149 if (!enabled || empty) {
1150 ubo_ptr[ubo] = 0;
1151 continue;
1152 }
1153
1154 pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
1155 cfg.entries = DIV_ROUND_UP(usz, 16);
1156 cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
1157 stage, buf, ubo);
1158 }
1159 }
1160
1161 postfix->uniforms = transfer.gpu;
1162 postfix->uniform_buffers = ubos.gpu;
1163
1164 buf->dirty_mask = 0;
1165 }
1166
1167 void
1168 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1169 const struct pipe_grid_info *info,
1170 struct midgard_payload_vertex_tiler *vtp)
1171 {
1172 struct panfrost_context *ctx = batch->ctx;
1173 struct panfrost_device *dev = pan_device(ctx->base.screen);
1174 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1175 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1176 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1177 128));
1178
1179 unsigned log2_instances =
1180 util_logbase2_ceil(info->grid[0]) +
1181 util_logbase2_ceil(info->grid[1]) +
1182 util_logbase2_ceil(info->grid[2]);
1183
1184 unsigned shared_size = single_size * (1 << log2_instances) * dev->core_count;
1185 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1186 shared_size,
1187 1);
1188
1189 struct mali_shared_memory shared = {
1190 .shared_memory = bo->gpu,
1191 .shared_workgroup_count = log2_instances,
1192 .shared_shift = util_logbase2(single_size) + 1
1193 };
1194
1195 vtp->postfix.shared_memory = panfrost_pool_upload(&batch->pool, &shared,
1196 sizeof(shared));
1197 }
1198
1199 static mali_ptr
1200 panfrost_get_tex_desc(struct panfrost_batch *batch,
1201 enum pipe_shader_type st,
1202 struct panfrost_sampler_view *view)
1203 {
1204 if (!view)
1205 return (mali_ptr) 0;
1206
1207 struct pipe_sampler_view *pview = &view->base;
1208 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1209
1210 /* Add the BO to the job so it's retained until the job is done. */
1211
1212 panfrost_batch_add_bo(batch, rsrc->bo,
1213 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1214 panfrost_bo_access_for_stage(st));
1215
1216 panfrost_batch_add_bo(batch, view->bo,
1217 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1218 panfrost_bo_access_for_stage(st));
1219
1220 return view->bo->gpu;
1221 }
1222
1223 static void
1224 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1225 struct pipe_context *pctx)
1226 {
1227 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1228 if (view->texture_bo != rsrc->bo->gpu ||
1229 view->modifier != rsrc->modifier) {
1230 panfrost_bo_unreference(view->bo);
1231 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1232 }
1233 }
1234
1235 void
1236 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1237 enum pipe_shader_type stage,
1238 struct mali_vertex_tiler_postfix *postfix)
1239 {
1240 struct panfrost_context *ctx = batch->ctx;
1241 struct panfrost_device *device = pan_device(ctx->base.screen);
1242
1243 if (!ctx->sampler_view_count[stage])
1244 return;
1245
1246 if (device->quirks & IS_BIFROST) {
1247 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
1248 MALI_BIFROST_TEXTURE_LENGTH *
1249 ctx->sampler_view_count[stage]);
1250
1251 struct mali_bifrost_texture_packed *out =
1252 (struct mali_bifrost_texture_packed *) T.cpu;
1253
1254 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1255 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1256 struct pipe_sampler_view *pview = &view->base;
1257 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1258
1259 panfrost_update_sampler_view(view, &ctx->base);
1260 out[i] = view->bifrost_descriptor;
1261
1262 /* Add the BOs to the job so they are retained until the job is done. */
1263
1264 panfrost_batch_add_bo(batch, rsrc->bo,
1265 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1266 panfrost_bo_access_for_stage(stage));
1267
1268 panfrost_batch_add_bo(batch, view->bo,
1269 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1270 panfrost_bo_access_for_stage(stage));
1271 }
1272
1273 postfix->textures = T.gpu;
1274 } else {
1275 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1276
1277 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1278 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1279
1280 panfrost_update_sampler_view(view, &ctx->base);
1281
1282 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1283 }
1284
1285 postfix->textures = panfrost_pool_upload(&batch->pool,
1286 trampolines,
1287 sizeof(uint64_t) *
1288 ctx->sampler_view_count[stage]);
1289 }
1290 }
1291
1292 void
1293 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1294 enum pipe_shader_type stage,
1295 struct mali_vertex_tiler_postfix *postfix)
1296 {
1297 struct panfrost_context *ctx = batch->ctx;
1298
1299 if (!ctx->sampler_count[stage])
1300 return;
1301
1302 size_t desc_size = MALI_BIFROST_SAMPLER_LENGTH;
1303 assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
1304
1305 size_t sz = desc_size * ctx->sampler_count[stage];
1306 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, sz);
1307 struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
1308
1309 for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
1310 out[i] = ctx->samplers[stage][i]->hw;
1311
1312 postfix->sampler_descriptor = T.gpu;
1313 }
1314
1315 void
1316 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1317 struct mali_vertex_tiler_postfix *vertex_postfix)
1318 {
1319 struct panfrost_context *ctx = batch->ctx;
1320 struct panfrost_vertex_state *so = ctx->vertex;
1321
1322 unsigned instance_shift = vertex_postfix->instance_shift;
1323 unsigned instance_odd = vertex_postfix->instance_odd;
1324
1325 /* Worst case: everything is NPOT */
1326
1327 struct panfrost_transfer S = panfrost_pool_alloc(&batch->pool,
1328 MALI_ATTRIBUTE_LENGTH * PIPE_MAX_ATTRIBS * 2);
1329
1330 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
1331 MALI_ATTRIBUTE_LENGTH * (PAN_INSTANCE_ID + 1));
1332
1333 struct mali_attribute_buffer_packed *bufs =
1334 (struct mali_attribute_buffer_packed *) S.cpu;
1335
1336 struct mali_attribute_packed *out =
1337 (struct mali_attribute_packed *) T.cpu;
1338
1339 unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
1340 unsigned k = 0;
1341
1342 for (unsigned i = 0; i < so->num_elements; ++i) {
1343 /* We map buffers 1:1 with the attributes, which
1344 * means duplicating some vertex buffers (who cares? aside from
1345 * maybe some caching implications but I somehow doubt that
1346 * matters) */
1347
1348 struct pipe_vertex_element *elem = &so->pipe[i];
1349 unsigned vbi = elem->vertex_buffer_index;
1350 attrib_to_buffer[i] = k;
1351
1352 if (!(ctx->vb_mask & (1 << vbi)))
1353 continue;
1354
1355 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1356 struct panfrost_resource *rsrc;
1357
1358 rsrc = pan_resource(buf->buffer.resource);
1359 if (!rsrc)
1360 continue;
1361
1362 /* Add a dependency of the batch on the vertex buffer */
1363 panfrost_batch_add_bo(batch, rsrc->bo,
1364 PAN_BO_ACCESS_SHARED |
1365 PAN_BO_ACCESS_READ |
1366 PAN_BO_ACCESS_VERTEX_TILER);
1367
1368 /* Mask off lower bits, see offset fixup below */
1369 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1370 mali_ptr addr = raw_addr & ~63;
1371
1372 /* Since we advanced the base pointer, we shrink the buffer
1373 * size, but add the offset we subtracted */
1374 unsigned size = rsrc->base.width0 + (raw_addr - addr)
1375 - buf->buffer_offset;
1376
1377 /* When there is a divisor, the hardware-level divisor is
1378 * the product of the instance divisor and the padded count */
1379 unsigned divisor = elem->instance_divisor;
1380 unsigned hw_divisor = ctx->padded_count * divisor;
1381 unsigned stride = buf->stride;
1382
1383 /* If there's a divisor(=1) but no instancing, we want every
1384 * attribute to be the same */
1385
1386 if (divisor && ctx->instance_count == 1)
1387 stride = 0;
1388
1389 if (!divisor || ctx->instance_count <= 1) {
1390 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1391 if (ctx->instance_count > 1)
1392 cfg.type = MALI_ATTRIBUTE_TYPE_1D_MODULUS;
1393
1394 cfg.pointer = addr;
1395 cfg.stride = stride;
1396 cfg.size = size;
1397 cfg.divisor_r = instance_shift;
1398 cfg.divisor_p = instance_odd;
1399 }
1400 } else if (util_is_power_of_two_or_zero(hw_divisor)) {
1401 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1402 cfg.type = MALI_ATTRIBUTE_TYPE_1D_POT_DIVISOR;
1403 cfg.pointer = addr;
1404 cfg.stride = stride;
1405 cfg.size = size;
1406 cfg.divisor_r = __builtin_ctz(hw_divisor);
1407 }
1408
1409 } else {
1410 unsigned shift = 0, extra_flags = 0;
1411
1412 unsigned magic_divisor =
1413 panfrost_compute_magic_divisor(hw_divisor, &shift, &extra_flags);
1414
1415 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1416 cfg.type = MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR;
1417 cfg.pointer = addr;
1418 cfg.stride = stride;
1419 cfg.size = size;
1420
1421 cfg.divisor_r = shift;
1422 cfg.divisor_e = extra_flags;
1423 }
1424
1425 pan_pack(bufs + k + 1, ATTRIBUTE_BUFFER_CONTINUATION_NPOT, cfg) {
1426 cfg.divisor_numerator = magic_divisor;
1427 cfg.divisor = divisor;
1428 }
1429
1430 ++k;
1431 }
1432
1433 ++k;
1434 }
1435
1436 /* Add special gl_VertexID/gl_InstanceID buffers */
1437
1438 panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1439
1440 pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
1441 cfg.buffer_index = k++;
1442 cfg.format = so->formats[PAN_VERTEX_ID];
1443 }
1444
1445 panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1446
1447 pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
1448 cfg.buffer_index = k++;
1449 cfg.format = so->formats[PAN_INSTANCE_ID];
1450 }
1451
1452 /* Attribute addresses require 64-byte alignment, so let:
1453 *
1454 * base' = base & ~63 = base - (base & 63)
1455 * offset' = offset + (base & 63)
1456 *
1457 * Since base' + offset' = base + offset, these are equivalent
1458 * addressing modes and now base is 64 aligned.
1459 */
1460
1461 unsigned start = vertex_postfix->offset_start;
1462
1463 for (unsigned i = 0; i < so->num_elements; ++i) {
1464 unsigned vbi = so->pipe[i].vertex_buffer_index;
1465 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1466
1467 /* Adjust by the masked off bits of the offset. Make sure we
1468 * read src_offset from so->hw (which is not GPU visible)
1469 * rather than target (which is) due to caching effects */
1470
1471 unsigned src_offset = so->pipe[i].src_offset;
1472
1473 /* BOs aligned to 4k so guaranteed aligned to 64 */
1474 src_offset += (buf->buffer_offset & 63);
1475
1476 /* Also, somewhat obscurely per-instance data needs to be
1477 * offset in response to a delayed start in an indexed draw */
1478
1479 if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
1480 src_offset -= buf->stride * start;
1481
1482 pan_pack(out + i, ATTRIBUTE, cfg) {
1483 cfg.buffer_index = attrib_to_buffer[i];
1484 cfg.format = so->formats[i];
1485 cfg.offset = src_offset;
1486 }
1487 }
1488
1489 vertex_postfix->attributes = S.gpu;
1490 vertex_postfix->attribute_meta = T.gpu;
1491 }
1492
1493 static mali_ptr
1494 panfrost_emit_varyings(struct panfrost_batch *batch,
1495 struct mali_attribute_buffer_packed *slot,
1496 unsigned stride, unsigned count)
1497 {
1498 unsigned size = stride * count;
1499 mali_ptr ptr = panfrost_pool_alloc(&batch->invisible_pool, size).gpu;
1500
1501 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1502 cfg.stride = stride;
1503 cfg.size = size;
1504 cfg.pointer = ptr;
1505 }
1506
1507 return ptr;
1508 }
1509
1510 static unsigned
1511 panfrost_streamout_offset(unsigned stride, unsigned offset,
1512 struct pipe_stream_output_target *target)
1513 {
1514 return (target->buffer_offset + (offset * stride * 4)) & 63;
1515 }
1516
1517 static void
1518 panfrost_emit_streamout(struct panfrost_batch *batch,
1519 struct mali_attribute_buffer_packed *slot,
1520 unsigned stride_words, unsigned offset, unsigned count,
1521 struct pipe_stream_output_target *target)
1522 {
1523 unsigned stride = stride_words * 4;
1524 unsigned max_size = target->buffer_size;
1525 unsigned expected_size = stride * count;
1526
1527 /* Grab the BO and bind it to the batch */
1528 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1529
1530 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1531 * the perspective of the TILER and FRAGMENT.
1532 */
1533 panfrost_batch_add_bo(batch, bo,
1534 PAN_BO_ACCESS_SHARED |
1535 PAN_BO_ACCESS_RW |
1536 PAN_BO_ACCESS_VERTEX_TILER |
1537 PAN_BO_ACCESS_FRAGMENT);
1538
1539 /* We will have an offset applied to get alignment */
1540 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * stride);
1541
1542 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1543 cfg.pointer = (addr & ~63);
1544 cfg.stride = stride;
1545 cfg.size = MIN2(max_size, expected_size) + (addr & 63);
1546 }
1547 }
1548
1549 static bool
1550 has_point_coord(unsigned mask, gl_varying_slot loc)
1551 {
1552 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1553 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1554 else if (loc == VARYING_SLOT_PNTC)
1555 return (mask & (1 << 8));
1556 else
1557 return false;
1558 }
1559
1560 /* Helpers for manipulating stream out information so we can pack varyings
1561 * accordingly. Compute the src_offset for a given captured varying */
1562
1563 static struct pipe_stream_output *
1564 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1565 {
1566 for (unsigned i = 0; i < info->num_outputs; ++i) {
1567 if (info->output[i].register_index == loc)
1568 return &info->output[i];
1569 }
1570
1571 unreachable("Varying not captured");
1572 }
1573
1574 static unsigned
1575 pan_varying_size(enum mali_format fmt)
1576 {
1577 unsigned type = MALI_EXTRACT_TYPE(fmt);
1578 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1579 unsigned bits = MALI_EXTRACT_BITS(fmt);
1580 unsigned bpc = 0;
1581
1582 if (bits == MALI_CHANNEL_FLOAT) {
1583 /* No doubles */
1584 bool fp16 = (type == MALI_FORMAT_SINT);
1585 assert(fp16 || (type == MALI_FORMAT_UNORM));
1586
1587 bpc = fp16 ? 2 : 4;
1588 } else {
1589 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1590
1591 /* See the enums */
1592 bits = 1 << bits;
1593 assert(bits >= 8);
1594 bpc = bits / 8;
1595 }
1596
1597 return bpc * chan;
1598 }
1599
1600 /* Indices for named (non-XFB) varyings that are present. These are packed
1601 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1602 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1603 * of a given special field given a shift S by:
1604 *
1605 * idx = popcount(P & ((1 << S) - 1))
1606 *
1607 * That is... look at all of the varyings that come earlier and count them, the
1608 * count is the new index since plus one. Likewise, the total number of special
1609 * buffers required is simply popcount(P)
1610 */
1611
1612 enum pan_special_varying {
1613 PAN_VARY_GENERAL = 0,
1614 PAN_VARY_POSITION = 1,
1615 PAN_VARY_PSIZ = 2,
1616 PAN_VARY_PNTCOORD = 3,
1617 PAN_VARY_FACE = 4,
1618 PAN_VARY_FRAGCOORD = 5,
1619
1620 /* Keep last */
1621 PAN_VARY_MAX,
1622 };
1623
1624 /* Given a varying, figure out which index it correpsonds to */
1625
1626 static inline unsigned
1627 pan_varying_index(unsigned present, enum pan_special_varying v)
1628 {
1629 unsigned mask = (1 << v) - 1;
1630 return util_bitcount(present & mask);
1631 }
1632
1633 /* Get the base offset for XFB buffers, which by convention come after
1634 * everything else. Wrapper function for semantic reasons; by construction this
1635 * is just popcount. */
1636
1637 static inline unsigned
1638 pan_xfb_base(unsigned present)
1639 {
1640 return util_bitcount(present);
1641 }
1642
1643 /* Computes the present mask for varyings so we can start emitting varying records */
1644
1645 static inline unsigned
1646 pan_varying_present(
1647 struct panfrost_shader_state *vs,
1648 struct panfrost_shader_state *fs,
1649 unsigned quirks)
1650 {
1651 /* At the moment we always emit general and position buffers. Not
1652 * strictly necessary but usually harmless */
1653
1654 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1655
1656 /* Enable special buffers by the shader info */
1657
1658 if (vs->writes_point_size)
1659 present |= (1 << PAN_VARY_PSIZ);
1660
1661 if (fs->reads_point_coord)
1662 present |= (1 << PAN_VARY_PNTCOORD);
1663
1664 if (fs->reads_face)
1665 present |= (1 << PAN_VARY_FACE);
1666
1667 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1668 present |= (1 << PAN_VARY_FRAGCOORD);
1669
1670 /* Also, if we have a point sprite, we need a point coord buffer */
1671
1672 for (unsigned i = 0; i < fs->varying_count; i++) {
1673 gl_varying_slot loc = fs->varyings_loc[i];
1674
1675 if (has_point_coord(fs->point_sprite_mask, loc))
1676 present |= (1 << PAN_VARY_PNTCOORD);
1677 }
1678
1679 return present;
1680 }
1681
1682 /* Emitters for varying records */
1683
1684 static void
1685 pan_emit_vary(struct mali_attribute_packed *out,
1686 unsigned present, enum pan_special_varying buf,
1687 unsigned quirks, enum mali_format format,
1688 unsigned offset)
1689 {
1690 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1691 unsigned swizzle = quirks & HAS_SWIZZLES ?
1692 panfrost_get_default_swizzle(nr_channels) :
1693 panfrost_bifrost_swizzle(nr_channels);
1694
1695 pan_pack(out, ATTRIBUTE, cfg) {
1696 cfg.buffer_index = pan_varying_index(present, buf);
1697 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1698 cfg.format = (format << 12) | swizzle;
1699 cfg.offset = offset;
1700 }
1701 }
1702
1703 /* General varying that is unused */
1704
1705 static void
1706 pan_emit_vary_only(struct mali_attribute_packed *out,
1707 unsigned present, unsigned quirks)
1708 {
1709 pan_emit_vary(out, present, 0, quirks, MALI_VARYING_DISCARD, 0);
1710 }
1711
1712 /* Special records */
1713
1714 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1715 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1716 [PAN_VARY_PSIZ] = MALI_R16F,
1717 [PAN_VARY_PNTCOORD] = MALI_R16F,
1718 [PAN_VARY_FACE] = MALI_R32I,
1719 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1720 };
1721
1722 static void
1723 pan_emit_vary_special(struct mali_attribute_packed *out,
1724 unsigned present, enum pan_special_varying buf,
1725 unsigned quirks)
1726 {
1727 assert(buf < PAN_VARY_MAX);
1728 pan_emit_vary(out, present, buf, quirks, pan_varying_formats[buf], 0);
1729 }
1730
1731 static enum mali_format
1732 pan_xfb_format(enum mali_format format, unsigned nr)
1733 {
1734 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1735 return MALI_R32F | MALI_NR_CHANNELS(nr);
1736 else
1737 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1738 }
1739
1740 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1741 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1742 * value. */
1743
1744 static void
1745 pan_emit_vary_xfb(struct mali_attribute_packed *out,
1746 unsigned present,
1747 unsigned max_xfb,
1748 unsigned *streamout_offsets,
1749 unsigned quirks,
1750 enum mali_format format,
1751 struct pipe_stream_output o)
1752 {
1753 unsigned swizzle = quirks & HAS_SWIZZLES ?
1754 panfrost_get_default_swizzle(o.num_components) :
1755 panfrost_bifrost_swizzle(o.num_components);
1756
1757 pan_pack(out, ATTRIBUTE, cfg) {
1758 /* XFB buffers come after everything else */
1759 cfg.buffer_index = pan_xfb_base(present) + o.output_buffer;
1760 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1761
1762 /* Override number of channels and precision to highp */
1763 cfg.format = (pan_xfb_format(format, o.num_components) << 12) | swizzle;
1764
1765 /* Apply given offsets together */
1766 cfg.offset = (o.dst_offset * 4) /* dwords */
1767 + streamout_offsets[o.output_buffer];
1768 }
1769 }
1770
1771 /* Determine if we should capture a varying for XFB. This requires actually
1772 * having a buffer for it. If we don't capture it, we'll fallback to a general
1773 * varying path (linked or unlinked, possibly discarding the write) */
1774
1775 static bool
1776 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1777 unsigned loc, unsigned max_xfb)
1778 {
1779 if (!(xfb->so_mask & (1ll << loc)))
1780 return false;
1781
1782 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1783 return o->output_buffer < max_xfb;
1784 }
1785
1786 static void
1787 pan_emit_general_varying(struct mali_attribute_packed *out,
1788 struct panfrost_shader_state *other,
1789 struct panfrost_shader_state *xfb,
1790 gl_varying_slot loc,
1791 enum mali_format format,
1792 unsigned present,
1793 unsigned quirks,
1794 unsigned *gen_offsets,
1795 enum mali_format *gen_formats,
1796 unsigned *gen_stride,
1797 unsigned idx,
1798 bool should_alloc)
1799 {
1800 /* Check if we're linked */
1801 signed other_idx = -1;
1802
1803 for (unsigned j = 0; j < other->varying_count; ++j) {
1804 if (other->varyings_loc[j] == loc) {
1805 other_idx = j;
1806 break;
1807 }
1808 }
1809
1810 if (other_idx < 0) {
1811 pan_emit_vary_only(out, present, quirks);
1812 return;
1813 }
1814
1815 unsigned offset = gen_offsets[other_idx];
1816
1817 if (should_alloc) {
1818 /* We're linked, so allocate a space via a watermark allocation */
1819 enum mali_format alt = other->varyings[other_idx];
1820
1821 /* Do interpolation at minimum precision */
1822 unsigned size_main = pan_varying_size(format);
1823 unsigned size_alt = pan_varying_size(alt);
1824 unsigned size = MIN2(size_main, size_alt);
1825
1826 /* If a varying is marked for XFB but not actually captured, we
1827 * should match the format to the format that would otherwise
1828 * be used for XFB, since dEQP checks for invariance here. It's
1829 * unclear if this is required by the spec. */
1830
1831 if (xfb->so_mask & (1ull << loc)) {
1832 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1833 format = pan_xfb_format(format, o->num_components);
1834 size = pan_varying_size(format);
1835 } else if (size == size_alt) {
1836 format = alt;
1837 }
1838
1839 gen_offsets[idx] = *gen_stride;
1840 gen_formats[other_idx] = format;
1841 offset = *gen_stride;
1842 *gen_stride += size;
1843 }
1844
1845 pan_emit_vary(out, present, PAN_VARY_GENERAL, quirks, format, offset);
1846 }
1847
1848 /* Higher-level wrapper around all of the above, classifying a varying into one
1849 * of the above types */
1850
1851 static void
1852 panfrost_emit_varying(
1853 struct mali_attribute_packed *out,
1854 struct panfrost_shader_state *stage,
1855 struct panfrost_shader_state *other,
1856 struct panfrost_shader_state *xfb,
1857 unsigned present,
1858 unsigned max_xfb,
1859 unsigned *streamout_offsets,
1860 unsigned quirks,
1861 unsigned *gen_offsets,
1862 enum mali_format *gen_formats,
1863 unsigned *gen_stride,
1864 unsigned idx,
1865 bool should_alloc,
1866 bool is_fragment)
1867 {
1868 gl_varying_slot loc = stage->varyings_loc[idx];
1869 enum mali_format format = stage->varyings[idx];
1870
1871 /* Override format to match linkage */
1872 if (!should_alloc && gen_formats[idx])
1873 format = gen_formats[idx];
1874
1875 if (has_point_coord(stage->point_sprite_mask, loc)) {
1876 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1877 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1878 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1879 pan_emit_vary_xfb(out, present, max_xfb, streamout_offsets, quirks, format, *o);
1880 } else if (loc == VARYING_SLOT_POS) {
1881 if (is_fragment)
1882 pan_emit_vary_special(out, present, PAN_VARY_FRAGCOORD, quirks);
1883 else
1884 pan_emit_vary_special(out, present, PAN_VARY_POSITION, quirks);
1885 } else if (loc == VARYING_SLOT_PSIZ) {
1886 pan_emit_vary_special(out, present, PAN_VARY_PSIZ, quirks);
1887 } else if (loc == VARYING_SLOT_PNTC) {
1888 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1889 } else if (loc == VARYING_SLOT_FACE) {
1890 pan_emit_vary_special(out, present, PAN_VARY_FACE, quirks);
1891 } else {
1892 pan_emit_general_varying(out, other, xfb, loc, format, present,
1893 quirks, gen_offsets, gen_formats, gen_stride,
1894 idx, should_alloc);
1895 }
1896 }
1897
1898 static void
1899 pan_emit_special_input(struct mali_attribute_buffer_packed *out,
1900 unsigned present,
1901 enum pan_special_varying v,
1902 unsigned special)
1903 {
1904 if (present & (1 << v)) {
1905 unsigned idx = pan_varying_index(present, v);
1906
1907 pan_pack(out + idx, ATTRIBUTE_BUFFER, cfg) {
1908 cfg.special = special;
1909 cfg.type = 0;
1910 }
1911 }
1912 }
1913
1914 void
1915 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1916 unsigned vertex_count,
1917 struct mali_vertex_tiler_postfix *vertex_postfix,
1918 struct mali_vertex_tiler_postfix *tiler_postfix,
1919 union midgard_primitive_size *primitive_size)
1920 {
1921 /* Load the shaders */
1922 struct panfrost_context *ctx = batch->ctx;
1923 struct panfrost_device *dev = pan_device(ctx->base.screen);
1924 struct panfrost_shader_state *vs, *fs;
1925 size_t vs_size, fs_size;
1926
1927 /* Allocate the varying descriptor */
1928
1929 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1930 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
1931 vs_size = MALI_ATTRIBUTE_LENGTH * vs->varying_count;
1932 fs_size = MALI_ATTRIBUTE_LENGTH * fs->varying_count;
1933
1934 struct panfrost_transfer trans = panfrost_pool_alloc(&batch->pool,
1935 vs_size +
1936 fs_size);
1937
1938 struct pipe_stream_output_info *so = &vs->stream_output;
1939 unsigned present = pan_varying_present(vs, fs, dev->quirks);
1940
1941 /* Check if this varying is linked by us. This is the case for
1942 * general-purpose, non-captured varyings. If it is, link it. If it's
1943 * not, use the provided stream out information to determine the
1944 * offset, since it was already linked for us. */
1945
1946 unsigned gen_offsets[32];
1947 enum mali_format gen_formats[32];
1948 memset(gen_offsets, 0, sizeof(gen_offsets));
1949 memset(gen_formats, 0, sizeof(gen_formats));
1950
1951 unsigned gen_stride = 0;
1952 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
1953 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
1954
1955 unsigned streamout_offsets[32];
1956
1957 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1958 streamout_offsets[i] = panfrost_streamout_offset(
1959 so->stride[i],
1960 ctx->streamout.offsets[i],
1961 ctx->streamout.targets[i]);
1962 }
1963
1964 struct mali_attribute_packed *ovs = (struct mali_attribute_packed *)trans.cpu;
1965 struct mali_attribute_packed *ofs = ovs + vs->varying_count;
1966
1967 for (unsigned i = 0; i < vs->varying_count; i++) {
1968 panfrost_emit_varying(ovs + i, vs, fs, vs, present,
1969 ctx->streamout.num_targets, streamout_offsets,
1970 dev->quirks,
1971 gen_offsets, gen_formats, &gen_stride, i, true, false);
1972 }
1973
1974 for (unsigned i = 0; i < fs->varying_count; i++) {
1975 panfrost_emit_varying(ofs + i, fs, vs, vs, present,
1976 ctx->streamout.num_targets, streamout_offsets,
1977 dev->quirks,
1978 gen_offsets, gen_formats, &gen_stride, i, false, true);
1979 }
1980
1981 unsigned xfb_base = pan_xfb_base(present);
1982 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
1983 MALI_ATTRIBUTE_BUFFER_LENGTH * (xfb_base + ctx->streamout.num_targets));
1984 struct mali_attribute_buffer_packed *varyings =
1985 (struct mali_attribute_buffer_packed *) T.cpu;
1986
1987 /* Emit the stream out buffers */
1988
1989 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
1990 ctx->vertex_count);
1991
1992 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1993 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
1994 so->stride[i],
1995 ctx->streamout.offsets[i],
1996 out_count,
1997 ctx->streamout.targets[i]);
1998 }
1999
2000 panfrost_emit_varyings(batch,
2001 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
2002 gen_stride, vertex_count);
2003
2004 /* fp32 vec4 gl_Position */
2005 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
2006 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
2007 sizeof(float) * 4, vertex_count);
2008
2009 if (present & (1 << PAN_VARY_PSIZ)) {
2010 primitive_size->pointer = panfrost_emit_varyings(batch,
2011 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
2012 2, vertex_count);
2013 }
2014
2015 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_ATTRIBUTE_SPECIAL_POINT_COORD);
2016 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_ATTRIBUTE_SPECIAL_FRONT_FACING);
2017 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_ATTRIBUTE_SPECIAL_FRAG_COORD);
2018
2019 vertex_postfix->varyings = T.gpu;
2020 tiler_postfix->varyings = T.gpu;
2021
2022 vertex_postfix->varying_meta = trans.gpu;
2023 tiler_postfix->varying_meta = trans.gpu + vs_size;
2024 }
2025
2026 void
2027 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
2028 struct mali_vertex_tiler_prefix *vertex_prefix,
2029 struct mali_vertex_tiler_postfix *vertex_postfix,
2030 struct mali_vertex_tiler_prefix *tiler_prefix,
2031 struct mali_vertex_tiler_postfix *tiler_postfix,
2032 union midgard_primitive_size *primitive_size)
2033 {
2034 struct panfrost_context *ctx = batch->ctx;
2035 struct panfrost_device *device = pan_device(ctx->base.screen);
2036 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
2037 struct bifrost_payload_vertex bifrost_vertex = {0,};
2038 struct bifrost_payload_tiler bifrost_tiler = {0,};
2039 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2040 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2041 void *vp, *tp;
2042 size_t vp_size, tp_size;
2043
2044 if (device->quirks & IS_BIFROST) {
2045 bifrost_vertex.prefix = *vertex_prefix;
2046 bifrost_vertex.postfix = *vertex_postfix;
2047 vp = &bifrost_vertex;
2048 vp_size = sizeof(bifrost_vertex);
2049
2050 bifrost_tiler.prefix = *tiler_prefix;
2051 bifrost_tiler.tiler.primitive_size = *primitive_size;
2052 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2053 bifrost_tiler.postfix = *tiler_postfix;
2054 tp = &bifrost_tiler;
2055 tp_size = sizeof(bifrost_tiler);
2056 } else {
2057 midgard_vertex.prefix = *vertex_prefix;
2058 midgard_vertex.postfix = *vertex_postfix;
2059 vp = &midgard_vertex;
2060 vp_size = sizeof(midgard_vertex);
2061
2062 midgard_tiler.prefix = *tiler_prefix;
2063 midgard_tiler.postfix = *tiler_postfix;
2064 midgard_tiler.primitive_size = *primitive_size;
2065 tp = &midgard_tiler;
2066 tp_size = sizeof(midgard_tiler);
2067 }
2068
2069 if (wallpapering) {
2070 /* Inject in reverse order, with "predicted" job indices.
2071 * THIS IS A HACK XXX */
2072 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
2073 batch->scoreboard.job_index + 2, tp, tp_size, true);
2074 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2075 vp, vp_size, true);
2076 return;
2077 }
2078
2079 /* If rasterizer discard is enable, only submit the vertex */
2080
2081 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2082 vp, vp_size, false);
2083
2084 if (ctx->rasterizer->base.rasterizer_discard)
2085 return;
2086
2087 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
2088 false);
2089 }
2090
2091 /* TODO: stop hardcoding this */
2092 mali_ptr
2093 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2094 {
2095 uint16_t locations[] = {
2096 128, 128,
2097 0, 256,
2098 0, 256,
2099 0, 256,
2100 0, 256,
2101 0, 256,
2102 0, 256,
2103 0, 256,
2104 0, 256,
2105 0, 256,
2106 0, 256,
2107 0, 256,
2108 0, 256,
2109 0, 256,
2110 0, 256,
2111 0, 256,
2112 0, 256,
2113 0, 256,
2114 0, 256,
2115 0, 256,
2116 0, 256,
2117 0, 256,
2118 0, 256,
2119 0, 256,
2120 0, 256,
2121 0, 256,
2122 0, 256,
2123 0, 256,
2124 0, 256,
2125 0, 256,
2126 0, 256,
2127 0, 256,
2128 128, 128,
2129 0, 0,
2130 0, 0,
2131 0, 0,
2132 0, 0,
2133 0, 0,
2134 0, 0,
2135 0, 0,
2136 0, 0,
2137 0, 0,
2138 0, 0,
2139 0, 0,
2140 0, 0,
2141 0, 0,
2142 0, 0,
2143 0, 0,
2144 };
2145
2146 return panfrost_pool_upload(&batch->pool, locations, 96 * sizeof(uint16_t));
2147 }