panfrost: Separate shader/blend descriptor emits
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 struct mali_shared_memory shared = {
62 .shared_workgroup_count = ~0,
63 };
64
65 if (batch->stack_size) {
66 struct panfrost_bo *stack =
67 panfrost_batch_get_scratchpad(batch, batch->stack_size,
68 dev->thread_tls_alloc,
69 dev->core_count);
70
71 shared.stack_shift = panfrost_get_stack_shift(batch->stack_size);
72 shared.scratchpad = stack->gpu;
73 }
74
75 postfix->shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared, sizeof(shared), 64);
76 }
77
78 static void
79 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
80 struct mali_vertex_tiler_postfix *postfix)
81 {
82 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
83 postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
84 }
85
86 static void
87 panfrost_vt_update_rasterizer(struct panfrost_rasterizer *rasterizer,
88 struct mali_vertex_tiler_prefix *prefix,
89 struct mali_vertex_tiler_postfix *postfix)
90 {
91 postfix->gl_enables |= 0x7;
92 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
93 rasterizer->base.front_ccw);
94 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
95 (rasterizer->base.cull_face & PIPE_FACE_FRONT));
96 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
97 (rasterizer->base.cull_face & PIPE_FACE_BACK));
98 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
99 rasterizer->base.flatshade_first);
100 }
101
102 void
103 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
104 struct mali_vertex_tiler_prefix *prefix,
105 union midgard_primitive_size *primitive_size)
106 {
107 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
108
109 if (!panfrost_writes_point_size(ctx)) {
110 float val = (prefix->draw_mode == MALI_DRAW_MODE_POINTS) ?
111 rasterizer->base.point_size :
112 rasterizer->base.line_width;
113
114 primitive_size->constant = val;
115 }
116 }
117
118 static void
119 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
120 struct mali_vertex_tiler_postfix *postfix)
121 {
122 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
123 if (ctx->occlusion_query) {
124 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
125 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
126 PAN_BO_ACCESS_SHARED |
127 PAN_BO_ACCESS_RW |
128 PAN_BO_ACCESS_FRAGMENT);
129 } else {
130 postfix->occlusion_counter = 0;
131 }
132 }
133
134 void
135 panfrost_vt_init(struct panfrost_context *ctx,
136 enum pipe_shader_type stage,
137 struct mali_vertex_tiler_prefix *prefix,
138 struct mali_vertex_tiler_postfix *postfix)
139 {
140 struct panfrost_device *device = pan_device(ctx->base.screen);
141
142 if (!ctx->shader[stage])
143 return;
144
145 memset(prefix, 0, sizeof(*prefix));
146 memset(postfix, 0, sizeof(*postfix));
147
148 if (device->quirks & IS_BIFROST) {
149 postfix->gl_enables = 0x2;
150 panfrost_vt_emit_shared_memory(ctx, postfix);
151 } else {
152 postfix->gl_enables = 0x6;
153 panfrost_vt_attach_framebuffer(ctx, postfix);
154 }
155
156 if (stage == PIPE_SHADER_FRAGMENT) {
157 panfrost_vt_update_occlusion_query(ctx, postfix);
158 panfrost_vt_update_rasterizer(ctx->rasterizer, prefix, postfix);
159 }
160 }
161
162 static unsigned
163 panfrost_translate_index_size(unsigned size)
164 {
165 switch (size) {
166 case 1:
167 return MALI_DRAW_INDEXED_UINT8;
168
169 case 2:
170 return MALI_DRAW_INDEXED_UINT16;
171
172 case 4:
173 return MALI_DRAW_INDEXED_UINT32;
174
175 default:
176 unreachable("Invalid index size");
177 }
178 }
179
180 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
181 * good for the duration of the draw (transient), could last longer. Also get
182 * the bounds on the index buffer for the range accessed by the draw. We do
183 * these operations together because there are natural optimizations which
184 * require them to be together. */
185
186 static mali_ptr
187 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
188 const struct pipe_draw_info *info,
189 unsigned *min_index, unsigned *max_index)
190 {
191 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
192 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
193 off_t offset = info->start * info->index_size;
194 bool needs_indices = true;
195 mali_ptr out = 0;
196
197 if (info->max_index != ~0u) {
198 *min_index = info->min_index;
199 *max_index = info->max_index;
200 needs_indices = false;
201 }
202
203 if (!info->has_user_indices) {
204 /* Only resources can be directly mapped */
205 panfrost_batch_add_bo(batch, rsrc->bo,
206 PAN_BO_ACCESS_SHARED |
207 PAN_BO_ACCESS_READ |
208 PAN_BO_ACCESS_VERTEX_TILER);
209 out = rsrc->bo->gpu + offset;
210
211 /* Check the cache */
212 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
213 info->start,
214 info->count,
215 min_index,
216 max_index);
217 } else {
218 /* Otherwise, we need to upload to transient memory */
219 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
220 struct panfrost_transfer T =
221 panfrost_pool_alloc_aligned(&batch->pool,
222 info->count * info->index_size,
223 info->index_size);
224
225 memcpy(T.cpu, ibuf8 + offset, info->count * info->index_size);
226 out = T.gpu;
227 }
228
229 if (needs_indices) {
230 /* Fallback */
231 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
232
233 if (!info->has_user_indices)
234 panfrost_minmax_cache_add(rsrc->index_cache,
235 info->start, info->count,
236 *min_index, *max_index);
237 }
238
239 return out;
240 }
241
242 void
243 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
244 const struct pipe_draw_info *info,
245 enum mali_draw_mode draw_mode,
246 struct mali_vertex_tiler_postfix *vertex_postfix,
247 struct mali_vertex_tiler_prefix *tiler_prefix,
248 struct mali_vertex_tiler_postfix *tiler_postfix,
249 unsigned *vertex_count,
250 unsigned *padded_count)
251 {
252 tiler_prefix->draw_mode = draw_mode;
253
254 unsigned draw_flags = 0;
255
256 if (panfrost_writes_point_size(ctx))
257 draw_flags |= MALI_DRAW_VARYING_SIZE;
258
259 if (info->primitive_restart)
260 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
261
262 /* These doesn't make much sense */
263
264 draw_flags |= 0x3000;
265
266 if (info->index_size) {
267 unsigned min_index = 0, max_index = 0;
268
269 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
270 info,
271 &min_index,
272 &max_index);
273
274 /* Use the corresponding values */
275 *vertex_count = max_index - min_index + 1;
276 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
277 tiler_prefix->offset_bias_correction = -min_index;
278 tiler_prefix->index_count = MALI_POSITIVE(info->count);
279 draw_flags |= panfrost_translate_index_size(info->index_size);
280 } else {
281 tiler_prefix->indices = 0;
282 *vertex_count = ctx->vertex_count;
283 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
284 tiler_prefix->offset_bias_correction = 0;
285 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
286 }
287
288 tiler_prefix->unknown_draw = draw_flags;
289
290 /* Encode the padded vertex count */
291
292 if (info->instance_count > 1) {
293 *padded_count = panfrost_padded_vertex_count(*vertex_count);
294
295 unsigned shift = __builtin_ctz(ctx->padded_count);
296 unsigned k = ctx->padded_count >> (shift + 1);
297
298 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
299 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
300 } else {
301 *padded_count = *vertex_count;
302
303 /* Reset instancing state */
304 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
305 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
306 }
307 }
308
309 static void
310 panfrost_shader_meta_init(struct panfrost_context *ctx,
311 enum pipe_shader_type st,
312 struct mali_shader_meta *meta)
313 {
314 const struct panfrost_device *dev = pan_device(ctx->base.screen);
315 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
316
317 memset(meta, 0, sizeof(*meta));
318 meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
319 meta->attribute_count = ss->attribute_count;
320 meta->varying_count = ss->varying_count;
321 meta->texture_count = ctx->sampler_view_count[st];
322 meta->sampler_count = ctx->sampler_count[st];
323
324 if (dev->quirks & IS_BIFROST) {
325 if (st == PIPE_SHADER_VERTEX)
326 meta->bifrost1.unk1 = 0x800000;
327 else {
328 /* First clause ATEST |= 0x4000000.
329 * Less than 32 regs |= 0x200 */
330 meta->bifrost1.unk1 = 0x950020;
331 }
332
333 meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
334 if (st == PIPE_SHADER_VERTEX)
335 meta->bifrost2.preload_regs = 0xC0;
336 else {
337 meta->bifrost2.preload_regs = 0x1;
338 SET_BIT(meta->bifrost2.preload_regs, 0x10, ss->reads_frag_coord);
339 }
340
341 meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
342 ss->uniform_cutoff);
343 } else {
344 meta->midgard1.uniform_count = MIN2(ss->uniform_count,
345 ss->uniform_cutoff);
346 meta->midgard1.work_count = ss->work_reg_count;
347
348 /* TODO: This is not conformant on ES3 */
349 meta->midgard1.flags_hi = MALI_SUPPRESS_INF_NAN;
350
351 meta->midgard1.flags_lo = 0x20;
352 meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
353
354 SET_BIT(meta->midgard1.flags_lo, MALI_WRITES_GLOBAL, ss->writes_global);
355 }
356 }
357
358 static unsigned
359 translate_tex_wrap(enum pipe_tex_wrap w)
360 {
361 switch (w) {
362 case PIPE_TEX_WRAP_REPEAT: return MALI_WRAP_MODE_REPEAT;
363 case PIPE_TEX_WRAP_CLAMP: return MALI_WRAP_MODE_CLAMP;
364 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return MALI_WRAP_MODE_CLAMP_TO_EDGE;
365 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return MALI_WRAP_MODE_CLAMP_TO_BORDER;
366 case PIPE_TEX_WRAP_MIRROR_REPEAT: return MALI_WRAP_MODE_MIRRORED_REPEAT;
367 case PIPE_TEX_WRAP_MIRROR_CLAMP: return MALI_WRAP_MODE_MIRRORED_CLAMP;
368 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
369 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
370 default: unreachable("Invalid wrap");
371 }
372 }
373
374 /* The hardware compares in the wrong order order, so we have to flip before
375 * encoding. Yes, really. */
376
377 static enum mali_func
378 panfrost_sampler_compare_func(const struct pipe_sampler_state *cso)
379 {
380 if (!cso->compare_mode)
381 return MALI_FUNC_NEVER;
382
383 enum mali_func f = panfrost_translate_compare_func(cso->compare_func);
384 return panfrost_flip_compare_func(f);
385 }
386
387 static enum mali_mipmap_mode
388 pan_pipe_to_mipmode(enum pipe_tex_mipfilter f)
389 {
390 switch (f) {
391 case PIPE_TEX_MIPFILTER_NEAREST: return MALI_MIPMAP_MODE_NEAREST;
392 case PIPE_TEX_MIPFILTER_LINEAR: return MALI_MIPMAP_MODE_TRILINEAR;
393 case PIPE_TEX_MIPFILTER_NONE: return MALI_MIPMAP_MODE_NONE;
394 default: unreachable("Invalid");
395 }
396 }
397
398 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
399 struct mali_midgard_sampler_packed *hw)
400 {
401 pan_pack(hw, MIDGARD_SAMPLER, cfg) {
402 cfg.magnify_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
403 cfg.minify_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
404 cfg.mipmap_mode = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR) ?
405 MALI_MIPMAP_MODE_TRILINEAR : MALI_MIPMAP_MODE_NEAREST;
406 cfg.normalized_coordinates = cso->normalized_coords;
407
408 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
409
410 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
411
412 /* If necessary, we disable mipmapping in the sampler descriptor by
413 * clamping the LOD as tight as possible (from 0 to epsilon,
414 * essentially -- remember these are fixed point numbers, so
415 * epsilon=1/256) */
416
417 cfg.maximum_lod = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) ?
418 cfg.minimum_lod + 1 :
419 FIXED_16(cso->max_lod, false);
420
421 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
422 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
423 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
424
425 cfg.compare_function = panfrost_sampler_compare_func(cso);
426 cfg.seamless_cube_map = cso->seamless_cube_map;
427
428 cfg.border_color_r = cso->border_color.f[0];
429 cfg.border_color_g = cso->border_color.f[1];
430 cfg.border_color_b = cso->border_color.f[2];
431 cfg.border_color_a = cso->border_color.f[3];
432 }
433 }
434
435 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
436 struct mali_bifrost_sampler_packed *hw)
437 {
438 pan_pack(hw, BIFROST_SAMPLER, cfg) {
439 cfg.magnify_linear = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR;
440 cfg.minify_linear = cso->min_img_filter == PIPE_TEX_FILTER_LINEAR;
441 cfg.mipmap_mode = pan_pipe_to_mipmode(cso->min_mip_filter);
442 cfg.normalized_coordinates = cso->normalized_coords;
443
444 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
445 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
446 cfg.maximum_lod = FIXED_16(cso->max_lod, false);
447
448 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
449 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
450 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
451
452 cfg.compare_function = panfrost_sampler_compare_func(cso);
453 cfg.seamless_cube_map = cso->seamless_cube_map;
454 }
455 }
456
457 static void
458 panfrost_frag_meta_rasterizer_update(struct panfrost_context *ctx,
459 struct mali_shader_meta *fragmeta)
460 {
461 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
462
463 bool msaa = rast->multisample;
464
465 /* TODO: Sample size */
466 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
467 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
468
469 struct panfrost_shader_state *fs;
470 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
471
472 /* EXT_shader_framebuffer_fetch requires the shader to be run
473 * per-sample when outputs are read. */
474 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
475 SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
476
477 fragmeta->depth_units = rast->offset_units * 2.0f;
478 fragmeta->depth_factor = rast->offset_scale;
479
480 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
481
482 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
483 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
484
485 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
486 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
487 }
488
489 static void
490 panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
491 struct mali_shader_meta *fragmeta)
492 {
493 const struct panfrost_zsa_state *so = ctx->depth_stencil;
494
495 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
496 so->base.stencil[0].enabled);
497
498 fragmeta->stencil_mask_front = so->stencil_mask_front;
499 fragmeta->stencil_mask_back = so->stencil_mask_back;
500
501 /* Bottom bits for stencil ref, exactly one word */
502 fragmeta->stencil_front.opaque[0] = so->stencil_front.opaque[0] | ctx->stencil_ref.ref_value[0];
503
504 /* If back-stencil is not enabled, use the front values */
505
506 if (so->base.stencil[1].enabled)
507 fragmeta->stencil_back.opaque[0] = so->stencil_back.opaque[0] | ctx->stencil_ref.ref_value[1];
508 else
509 fragmeta->stencil_back = fragmeta->stencil_front;
510
511 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
512 so->base.depth.writemask);
513
514 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
515 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(
516 so->base.depth.enabled ? so->base.depth.func : PIPE_FUNC_ALWAYS));
517 }
518
519 static bool
520 panfrost_fs_required(
521 struct panfrost_shader_state *fs,
522 struct panfrost_blend_final *blend,
523 unsigned rt_count)
524 {
525 /* If we generally have side effects */
526 if (fs->fs_sidefx)
527 return true;
528
529 /* If colour is written we need to execute */
530 for (unsigned i = 0; i < rt_count; ++i) {
531 if (!blend[i].no_colour)
532 return true;
533 }
534
535 /* If depth is written and not implied we need to execute.
536 * TODO: Predicate on Z/S writes being enabled */
537 return (fs->writes_depth || fs->writes_stencil);
538 }
539
540 static void
541 panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
542 struct mali_shader_meta *fragmeta,
543 struct panfrost_blend_final *blend)
544 {
545 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
546 const struct panfrost_device *dev = pan_device(ctx->base.screen);
547 struct panfrost_shader_state *fs;
548 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
549
550 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
551 (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
552 !ctx->blend->base.dither);
553
554 SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
555 ctx->blend->base.alpha_to_coverage);
556
557 /* Get blending setup */
558 unsigned rt_count = ctx->pipe_framebuffer.nr_cbufs;
559
560 /* Disable shader execution if we can */
561 if (dev->quirks & MIDGARD_SHADERLESS
562 && !panfrost_fs_required(fs, blend, rt_count)) {
563 fragmeta->shader = 0;
564 fragmeta->attribute_count = 0;
565 fragmeta->varying_count = 0;
566 fragmeta->texture_count = 0;
567 fragmeta->sampler_count = 0;
568
569 /* This feature is not known to work on Bifrost */
570 fragmeta->midgard1.work_count = 1;
571 fragmeta->midgard1.uniform_count = 0;
572 fragmeta->midgard1.uniform_buffer_count = 0;
573 }
574
575 /* If there is a blend shader, work registers are shared. We impose 8
576 * work registers as a limit for blend shaders. Should be lower XXX */
577
578 if (!(dev->quirks & IS_BIFROST)) {
579 for (unsigned c = 0; c < rt_count; ++c) {
580 if (blend[c].is_shader) {
581 fragmeta->midgard1.work_count =
582 MAX2(fragmeta->midgard1.work_count, 8);
583 }
584 }
585 }
586
587 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
588 * copied to the blend_meta appended (by convention), but this is the
589 * field actually read by the hardware. (Or maybe both are read...?).
590 * Specify the last RTi with a blend shader. */
591
592 fragmeta->blend.shader = 0;
593
594 for (signed rt = ((signed) rt_count - 1); rt >= 0; --rt) {
595 if (!blend[rt].is_shader)
596 continue;
597
598 fragmeta->blend.shader = blend[rt].shader.gpu |
599 blend[rt].shader.first_tag;
600 break;
601 }
602
603 if (dev->quirks & MIDGARD_SFBD) {
604 /* When only a single render target platform is used, the blend
605 * information is inside the shader meta itself. We additionally
606 * need to signal CAN_DISCARD for nontrivial blend modes (so
607 * we're able to read back the destination buffer) */
608
609 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
610 blend[0].is_shader);
611
612 if (!blend[0].is_shader) {
613 fragmeta->blend.equation = *blend[0].equation.equation;
614 fragmeta->blend.constant = blend[0].equation.constant;
615 }
616
617 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
618 !blend[0].no_blending || fs->can_discard);
619
620 batch->draws |= PIPE_CLEAR_COLOR0;
621 return;
622 }
623
624 if (dev->quirks & IS_BIFROST) {
625 bool no_blend = true;
626
627 for (unsigned i = 0; i < rt_count; ++i)
628 no_blend &= (blend[i].no_blending | blend[i].no_colour);
629
630 SET_BIT(fragmeta->bifrost1.unk1, MALI_BIFROST_EARLY_Z,
631 !fs->can_discard && !fs->writes_depth && no_blend);
632 }
633 }
634
635 static void
636 panfrost_emit_blend(struct panfrost_batch *batch, void *rts,
637 struct panfrost_blend_final *blend)
638 {
639 const struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
640 struct panfrost_shader_state *fs = panfrost_get_shader_state(batch->ctx, PIPE_SHADER_FRAGMENT);
641 unsigned rt_count = batch->key.nr_cbufs;
642
643 struct bifrost_blend_rt *brts = rts;
644 struct midgard_blend_rt *mrts = rts;
645
646 /* Disable blending for depth-only on Bifrost */
647
648 if (rt_count == 0 && dev->quirks & IS_BIFROST)
649 brts[0].unk2 = 0x3;
650
651 for (unsigned i = 0; i < rt_count; ++i) {
652 unsigned flags = 0;
653
654 if (!blend[i].no_colour) {
655 flags = 0x200;
656 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
657
658 bool is_srgb = util_format_is_srgb(batch->key.cbufs[i]->format);
659
660 SET_BIT(flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
661 SET_BIT(flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
662 SET_BIT(flags, MALI_BLEND_SRGB, is_srgb);
663 SET_BIT(flags, MALI_BLEND_NO_DITHER, !batch->ctx->blend->base.dither);
664 }
665
666 if (dev->quirks & IS_BIFROST) {
667 brts[i].flags = flags;
668
669 if (blend[i].is_shader) {
670 /* The blend shader's address needs to be at
671 * the same top 32 bit as the fragment shader.
672 * TODO: Ensure that's always the case.
673 */
674 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
675 (fs->bo->gpu & (0xffffffffull << 32)));
676 brts[i].shader = blend[i].shader.gpu;
677 brts[i].unk2 = 0x0;
678 } else {
679 enum pipe_format format = batch->key.cbufs[i]->format;
680 const struct util_format_description *format_desc;
681 format_desc = util_format_description(format);
682
683 brts[i].equation = *blend[i].equation.equation;
684
685 /* TODO: this is a bit more complicated */
686 brts[i].constant = blend[i].equation.constant;
687
688 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
689
690 /* 0x19 disables blending and forces REPLACE
691 * mode (equivalent to rgb_mode = alpha_mode =
692 * x122, colour mask = 0xF). 0x1a allows
693 * blending. */
694 brts[i].unk2 = blend[i].no_blending ? 0x19 : 0x1a;
695
696 brts[i].shader_type = fs->blend_types[i];
697 }
698 } else {
699 mrts[i].flags = flags;
700
701 if (blend[i].is_shader) {
702 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
703 } else {
704 mrts[i].blend.equation = *blend[i].equation.equation;
705 mrts[i].blend.constant = blend[i].equation.constant;
706 }
707 }
708 }
709 }
710
711 static void
712 panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
713 struct mali_shader_meta *fragmeta,
714 struct panfrost_blend_final *blend)
715 {
716 const struct panfrost_device *dev = pan_device(ctx->base.screen);
717 struct panfrost_shader_state *fs;
718
719 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
720
721 bool msaa = ctx->rasterizer->base.multisample;
722 fragmeta->coverage_mask = msaa ? ctx->sample_mask : ~0;
723
724 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
725 fragmeta->unknown2_4 = 0x4e0;
726
727 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
728 * is required (independent of 32-bit/64-bit descriptors), or why it's
729 * not used on later GPU revisions. Otherwise, all shader jobs fault on
730 * these earlier chips (perhaps this is a chicken bit of some kind).
731 * More investigation is needed. */
732
733 SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
734
735 if (dev->quirks & IS_BIFROST) {
736 /* TODO */
737 } else {
738 /* Depending on whether it's legal to in the given shader, we try to
739 * enable early-z testing. TODO: respect e-z force */
740
741 SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
742 !fs->can_discard && !fs->writes_global &&
743 !fs->writes_depth && !fs->writes_stencil &&
744 !ctx->blend->base.alpha_to_coverage);
745
746 /* Add the writes Z/S flags if needed. */
747 SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
748 SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
749
750 /* Any time texturing is used, derivatives are implicitly calculated,
751 * so we need to enable helper invocations */
752
753 SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
754 fs->helper_invocations);
755
756 /* If discard is enabled, which bit we set to convey this
757 * depends on if depth/stencil is used for the draw or not.
758 * Just one of depth OR stencil is enough to trigger this. */
759
760 const struct pipe_depth_stencil_alpha_state *zsa = &ctx->depth_stencil->base;
761 bool zs_enabled =
762 fs->writes_depth || fs->writes_stencil ||
763 (zsa->depth.enabled && zsa->depth.func != PIPE_FUNC_ALWAYS) ||
764 zsa->stencil[0].enabled;
765
766 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_TILEBUFFER,
767 fs->outputs_read || (!zs_enabled && fs->can_discard));
768 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_ZS, zs_enabled && fs->can_discard);
769 }
770
771 panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
772 panfrost_frag_meta_zsa_update(ctx, fragmeta);
773 panfrost_frag_meta_blend_update(ctx, fragmeta, blend);
774 }
775
776 void
777 panfrost_emit_shader_meta(struct panfrost_batch *batch,
778 enum pipe_shader_type st,
779 struct mali_vertex_tiler_postfix *postfix)
780 {
781 struct panfrost_context *ctx = batch->ctx;
782 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
783
784 if (!ss) {
785 postfix->shader = 0;
786 return;
787 }
788
789 struct mali_shader_meta meta;
790
791 panfrost_shader_meta_init(ctx, st, &meta);
792
793 /* Add the shader BO to the batch. */
794 panfrost_batch_add_bo(batch, ss->bo,
795 PAN_BO_ACCESS_PRIVATE |
796 PAN_BO_ACCESS_READ |
797 panfrost_bo_access_for_stage(st));
798
799 mali_ptr shader_ptr;
800
801 if (st == PIPE_SHADER_FRAGMENT) {
802 struct panfrost_device *dev = pan_device(ctx->base.screen);
803 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
804 size_t desc_size = sizeof(meta);
805 void *rts = NULL;
806 struct panfrost_transfer xfer;
807 unsigned rt_size;
808
809 if (dev->quirks & MIDGARD_SFBD)
810 rt_size = 0;
811 else if (dev->quirks & IS_BIFROST)
812 rt_size = sizeof(struct bifrost_blend_rt);
813 else
814 rt_size = sizeof(struct midgard_blend_rt);
815
816 desc_size += rt_size * rt_count;
817
818 if (rt_size)
819 rts = rzalloc_size(ctx, rt_size * rt_count);
820
821 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
822
823 for (unsigned c = 0; c < ctx->pipe_framebuffer.nr_cbufs; ++c)
824 blend[c] = panfrost_get_blend_for_context(ctx, c);
825
826 panfrost_frag_shader_meta_init(ctx, &meta, blend);
827
828 if (!(dev->quirks & MIDGARD_SFBD))
829 panfrost_emit_blend(batch, rts, blend);
830
831 xfer = panfrost_pool_alloc_aligned(&batch->pool, desc_size, sizeof(meta));
832
833 memcpy(xfer.cpu, &meta, sizeof(meta));
834 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
835
836 if (rt_size)
837 ralloc_free(rts);
838
839 shader_ptr = xfer.gpu;
840 } else {
841 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
842 sizeof(meta));
843 }
844
845 postfix->shader = shader_ptr;
846 }
847
848 void
849 panfrost_emit_viewport(struct panfrost_batch *batch,
850 struct mali_vertex_tiler_postfix *tiler_postfix)
851 {
852 struct panfrost_context *ctx = batch->ctx;
853 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
854 const struct pipe_scissor_state *ss = &ctx->scissor;
855 const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
856 const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
857
858 /* Derive min/max from translate/scale. Note since |x| >= 0 by
859 * definition, we have that -|x| <= |x| hence translate - |scale| <=
860 * translate + |scale|, so the ordering is correct here. */
861 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
862 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
863 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
864 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
865 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
866 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
867
868 /* Scissor to the intersection of viewport and to the scissor, clamped
869 * to the framebuffer */
870
871 unsigned minx = MIN2(fb->width, vp_minx);
872 unsigned maxx = MIN2(fb->width, vp_maxx);
873 unsigned miny = MIN2(fb->height, vp_miny);
874 unsigned maxy = MIN2(fb->height, vp_maxy);
875
876 if (ss && rast->scissor) {
877 minx = MAX2(ss->minx, minx);
878 miny = MAX2(ss->miny, miny);
879 maxx = MIN2(ss->maxx, maxx);
880 maxy = MIN2(ss->maxy, maxy);
881 }
882
883 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
884
885 pan_pack(T.cpu, VIEWPORT, cfg) {
886 cfg.scissor_minimum_x = minx;
887 cfg.scissor_minimum_y = miny;
888 cfg.scissor_maximum_x = maxx - 1;
889 cfg.scissor_maximum_y = maxy - 1;
890
891 cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
892 cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
893 }
894
895 tiler_postfix->viewport = T.gpu;
896 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
897 }
898
899 static mali_ptr
900 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
901 enum pipe_shader_type st,
902 struct panfrost_constant_buffer *buf,
903 unsigned index)
904 {
905 struct pipe_constant_buffer *cb = &buf->cb[index];
906 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
907
908 if (rsrc) {
909 panfrost_batch_add_bo(batch, rsrc->bo,
910 PAN_BO_ACCESS_SHARED |
911 PAN_BO_ACCESS_READ |
912 panfrost_bo_access_for_stage(st));
913
914 /* Alignment gauranteed by
915 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
916 return rsrc->bo->gpu + cb->buffer_offset;
917 } else if (cb->user_buffer) {
918 return panfrost_pool_upload_aligned(&batch->pool,
919 cb->user_buffer +
920 cb->buffer_offset,
921 cb->buffer_size, 16);
922 } else {
923 unreachable("No constant buffer");
924 }
925 }
926
927 struct sysval_uniform {
928 union {
929 float f[4];
930 int32_t i[4];
931 uint32_t u[4];
932 uint64_t du[2];
933 };
934 };
935
936 static void
937 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
938 struct sysval_uniform *uniform)
939 {
940 struct panfrost_context *ctx = batch->ctx;
941 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
942
943 uniform->f[0] = vp->scale[0];
944 uniform->f[1] = vp->scale[1];
945 uniform->f[2] = vp->scale[2];
946 }
947
948 static void
949 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
950 struct sysval_uniform *uniform)
951 {
952 struct panfrost_context *ctx = batch->ctx;
953 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
954
955 uniform->f[0] = vp->translate[0];
956 uniform->f[1] = vp->translate[1];
957 uniform->f[2] = vp->translate[2];
958 }
959
960 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
961 enum pipe_shader_type st,
962 unsigned int sysvalid,
963 struct sysval_uniform *uniform)
964 {
965 struct panfrost_context *ctx = batch->ctx;
966 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
967 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
968 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
969 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
970
971 assert(dim);
972 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
973
974 if (dim > 1)
975 uniform->i[1] = u_minify(tex->texture->height0,
976 tex->u.tex.first_level);
977
978 if (dim > 2)
979 uniform->i[2] = u_minify(tex->texture->depth0,
980 tex->u.tex.first_level);
981
982 if (is_array)
983 uniform->i[dim] = tex->texture->array_size;
984 }
985
986 static void
987 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
988 enum pipe_shader_type st,
989 unsigned ssbo_id,
990 struct sysval_uniform *uniform)
991 {
992 struct panfrost_context *ctx = batch->ctx;
993
994 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
995 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
996
997 /* Compute address */
998 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
999
1000 panfrost_batch_add_bo(batch, bo,
1001 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
1002 panfrost_bo_access_for_stage(st));
1003
1004 /* Upload address and size as sysval */
1005 uniform->du[0] = bo->gpu + sb.buffer_offset;
1006 uniform->u[2] = sb.buffer_size;
1007 }
1008
1009 static void
1010 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
1011 enum pipe_shader_type st,
1012 unsigned samp_idx,
1013 struct sysval_uniform *uniform)
1014 {
1015 struct panfrost_context *ctx = batch->ctx;
1016 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
1017
1018 uniform->f[0] = sampl->min_lod;
1019 uniform->f[1] = sampl->max_lod;
1020 uniform->f[2] = sampl->lod_bias;
1021
1022 /* Even without any errata, Midgard represents "no mipmapping" as
1023 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1024 * panfrost_create_sampler_state which also explains our choice of
1025 * epsilon value (again to keep behaviour consistent) */
1026
1027 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1028 uniform->f[1] = uniform->f[0] + (1.0/256.0);
1029 }
1030
1031 static void
1032 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
1033 struct sysval_uniform *uniform)
1034 {
1035 struct panfrost_context *ctx = batch->ctx;
1036
1037 uniform->u[0] = ctx->compute_grid->grid[0];
1038 uniform->u[1] = ctx->compute_grid->grid[1];
1039 uniform->u[2] = ctx->compute_grid->grid[2];
1040 }
1041
1042 static void
1043 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1044 struct panfrost_shader_state *ss,
1045 enum pipe_shader_type st)
1046 {
1047 struct sysval_uniform *uniforms = (void *)buf;
1048
1049 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1050 int sysval = ss->sysval[i];
1051
1052 switch (PAN_SYSVAL_TYPE(sysval)) {
1053 case PAN_SYSVAL_VIEWPORT_SCALE:
1054 panfrost_upload_viewport_scale_sysval(batch,
1055 &uniforms[i]);
1056 break;
1057 case PAN_SYSVAL_VIEWPORT_OFFSET:
1058 panfrost_upload_viewport_offset_sysval(batch,
1059 &uniforms[i]);
1060 break;
1061 case PAN_SYSVAL_TEXTURE_SIZE:
1062 panfrost_upload_txs_sysval(batch, st,
1063 PAN_SYSVAL_ID(sysval),
1064 &uniforms[i]);
1065 break;
1066 case PAN_SYSVAL_SSBO:
1067 panfrost_upload_ssbo_sysval(batch, st,
1068 PAN_SYSVAL_ID(sysval),
1069 &uniforms[i]);
1070 break;
1071 case PAN_SYSVAL_NUM_WORK_GROUPS:
1072 panfrost_upload_num_work_groups_sysval(batch,
1073 &uniforms[i]);
1074 break;
1075 case PAN_SYSVAL_SAMPLER:
1076 panfrost_upload_sampler_sysval(batch, st,
1077 PAN_SYSVAL_ID(sysval),
1078 &uniforms[i]);
1079 break;
1080 default:
1081 assert(0);
1082 }
1083 }
1084 }
1085
1086 static const void *
1087 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1088 unsigned index)
1089 {
1090 struct pipe_constant_buffer *cb = &buf->cb[index];
1091 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1092
1093 if (rsrc)
1094 return rsrc->bo->cpu;
1095 else if (cb->user_buffer)
1096 return cb->user_buffer;
1097 else
1098 unreachable("No constant buffer");
1099 }
1100
1101 void
1102 panfrost_emit_const_buf(struct panfrost_batch *batch,
1103 enum pipe_shader_type stage,
1104 struct mali_vertex_tiler_postfix *postfix)
1105 {
1106 struct panfrost_context *ctx = batch->ctx;
1107 struct panfrost_shader_variants *all = ctx->shader[stage];
1108
1109 if (!all)
1110 return;
1111
1112 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1113
1114 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1115
1116 /* Uniforms are implicitly UBO #0 */
1117 bool has_uniforms = buf->enabled_mask & (1 << 0);
1118
1119 /* Allocate room for the sysval and the uniforms */
1120 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1121 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1122 size_t size = sys_size + uniform_size;
1123 struct panfrost_transfer transfer =
1124 panfrost_pool_alloc_aligned(&batch->pool, size, 16);
1125
1126 /* Upload sysvals requested by the shader */
1127 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1128
1129 /* Upload uniforms */
1130 if (has_uniforms && uniform_size) {
1131 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1132 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1133 }
1134
1135 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1136 * uploaded */
1137
1138 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1139 assert(ubo_count >= 1);
1140
1141 size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
1142 struct panfrost_transfer ubos =
1143 panfrost_pool_alloc_aligned(&batch->pool, sz,
1144 MALI_UNIFORM_BUFFER_LENGTH);
1145
1146 uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
1147
1148 /* Upload uniforms as a UBO */
1149
1150 if (ss->uniform_count) {
1151 pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
1152 cfg.entries = ss->uniform_count;
1153 cfg.pointer = transfer.gpu;
1154 }
1155 } else {
1156 *ubo_ptr = 0;
1157 }
1158
1159 /* The rest are honest-to-goodness UBOs */
1160
1161 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1162 size_t usz = buf->cb[ubo].buffer_size;
1163 bool enabled = buf->enabled_mask & (1 << ubo);
1164 bool empty = usz == 0;
1165
1166 if (!enabled || empty) {
1167 ubo_ptr[ubo] = 0;
1168 continue;
1169 }
1170
1171 pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
1172 cfg.entries = DIV_ROUND_UP(usz, 16);
1173 cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
1174 stage, buf, ubo);
1175 }
1176 }
1177
1178 postfix->uniforms = transfer.gpu;
1179 postfix->uniform_buffers = ubos.gpu;
1180
1181 buf->dirty_mask = 0;
1182 }
1183
1184 void
1185 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1186 const struct pipe_grid_info *info,
1187 struct midgard_payload_vertex_tiler *vtp)
1188 {
1189 struct panfrost_context *ctx = batch->ctx;
1190 struct panfrost_device *dev = pan_device(ctx->base.screen);
1191 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1192 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1193 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1194 128));
1195
1196 unsigned log2_instances =
1197 util_logbase2_ceil(info->grid[0]) +
1198 util_logbase2_ceil(info->grid[1]) +
1199 util_logbase2_ceil(info->grid[2]);
1200
1201 unsigned shared_size = single_size * (1 << log2_instances) * dev->core_count;
1202 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1203 shared_size,
1204 1);
1205
1206 struct mali_shared_memory shared = {
1207 .shared_memory = bo->gpu,
1208 .shared_workgroup_count = log2_instances,
1209 .shared_shift = util_logbase2(single_size) + 1
1210 };
1211
1212 vtp->postfix.shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared,
1213 sizeof(shared), 64);
1214 }
1215
1216 static mali_ptr
1217 panfrost_get_tex_desc(struct panfrost_batch *batch,
1218 enum pipe_shader_type st,
1219 struct panfrost_sampler_view *view)
1220 {
1221 if (!view)
1222 return (mali_ptr) 0;
1223
1224 struct pipe_sampler_view *pview = &view->base;
1225 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1226
1227 /* Add the BO to the job so it's retained until the job is done. */
1228
1229 panfrost_batch_add_bo(batch, rsrc->bo,
1230 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1231 panfrost_bo_access_for_stage(st));
1232
1233 panfrost_batch_add_bo(batch, view->bo,
1234 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1235 panfrost_bo_access_for_stage(st));
1236
1237 return view->bo->gpu;
1238 }
1239
1240 static void
1241 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1242 struct pipe_context *pctx)
1243 {
1244 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1245 if (view->texture_bo != rsrc->bo->gpu ||
1246 view->modifier != rsrc->modifier) {
1247 panfrost_bo_unreference(view->bo);
1248 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1249 }
1250 }
1251
1252 void
1253 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1254 enum pipe_shader_type stage,
1255 struct mali_vertex_tiler_postfix *postfix)
1256 {
1257 struct panfrost_context *ctx = batch->ctx;
1258 struct panfrost_device *device = pan_device(ctx->base.screen);
1259
1260 if (!ctx->sampler_view_count[stage])
1261 return;
1262
1263 if (device->quirks & IS_BIFROST) {
1264 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1265 MALI_BIFROST_TEXTURE_LENGTH *
1266 ctx->sampler_view_count[stage],
1267 MALI_BIFROST_TEXTURE_LENGTH);
1268
1269 struct mali_bifrost_texture_packed *out =
1270 (struct mali_bifrost_texture_packed *) T.cpu;
1271
1272 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1273 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1274 struct pipe_sampler_view *pview = &view->base;
1275 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1276
1277 panfrost_update_sampler_view(view, &ctx->base);
1278 out[i] = view->bifrost_descriptor;
1279
1280 /* Add the BOs to the job so they are retained until the job is done. */
1281
1282 panfrost_batch_add_bo(batch, rsrc->bo,
1283 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1284 panfrost_bo_access_for_stage(stage));
1285
1286 panfrost_batch_add_bo(batch, view->bo,
1287 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1288 panfrost_bo_access_for_stage(stage));
1289 }
1290
1291 postfix->textures = T.gpu;
1292 } else {
1293 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1294
1295 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1296 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1297
1298 panfrost_update_sampler_view(view, &ctx->base);
1299
1300 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1301 }
1302
1303 postfix->textures = panfrost_pool_upload_aligned(&batch->pool,
1304 trampolines,
1305 sizeof(uint64_t) *
1306 ctx->sampler_view_count[stage],
1307 sizeof(uint64_t));
1308 }
1309 }
1310
1311 void
1312 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1313 enum pipe_shader_type stage,
1314 struct mali_vertex_tiler_postfix *postfix)
1315 {
1316 struct panfrost_context *ctx = batch->ctx;
1317
1318 if (!ctx->sampler_count[stage])
1319 return;
1320
1321 size_t desc_size = MALI_BIFROST_SAMPLER_LENGTH;
1322 assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
1323
1324 size_t sz = desc_size * ctx->sampler_count[stage];
1325 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool, sz, desc_size);
1326 struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
1327
1328 for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
1329 out[i] = ctx->samplers[stage][i]->hw;
1330
1331 postfix->sampler_descriptor = T.gpu;
1332 }
1333
1334 void
1335 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1336 struct mali_vertex_tiler_postfix *vertex_postfix)
1337 {
1338 struct panfrost_context *ctx = batch->ctx;
1339 struct panfrost_vertex_state *so = ctx->vertex;
1340 struct panfrost_shader_state *vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1341
1342 unsigned instance_shift = vertex_postfix->instance_shift;
1343 unsigned instance_odd = vertex_postfix->instance_odd;
1344
1345 /* Worst case: everything is NPOT, which is only possible if instancing
1346 * is enabled. Otherwise single record is gauranteed */
1347 bool could_npot = instance_shift || instance_odd;
1348
1349 struct panfrost_transfer S = panfrost_pool_alloc_aligned(&batch->pool,
1350 MALI_ATTRIBUTE_BUFFER_LENGTH * vs->attribute_count *
1351 (could_npot ? 2 : 1),
1352 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
1353
1354 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1355 MALI_ATTRIBUTE_LENGTH * vs->attribute_count,
1356 MALI_ATTRIBUTE_LENGTH);
1357
1358 struct mali_attribute_buffer_packed *bufs =
1359 (struct mali_attribute_buffer_packed *) S.cpu;
1360
1361 struct mali_attribute_packed *out =
1362 (struct mali_attribute_packed *) T.cpu;
1363
1364 unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
1365 unsigned k = 0;
1366
1367 for (unsigned i = 0; i < so->num_elements; ++i) {
1368 /* We map buffers 1:1 with the attributes, which
1369 * means duplicating some vertex buffers (who cares? aside from
1370 * maybe some caching implications but I somehow doubt that
1371 * matters) */
1372
1373 struct pipe_vertex_element *elem = &so->pipe[i];
1374 unsigned vbi = elem->vertex_buffer_index;
1375 attrib_to_buffer[i] = k;
1376
1377 if (!(ctx->vb_mask & (1 << vbi)))
1378 continue;
1379
1380 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1381 struct panfrost_resource *rsrc;
1382
1383 rsrc = pan_resource(buf->buffer.resource);
1384 if (!rsrc)
1385 continue;
1386
1387 /* Add a dependency of the batch on the vertex buffer */
1388 panfrost_batch_add_bo(batch, rsrc->bo,
1389 PAN_BO_ACCESS_SHARED |
1390 PAN_BO_ACCESS_READ |
1391 PAN_BO_ACCESS_VERTEX_TILER);
1392
1393 /* Mask off lower bits, see offset fixup below */
1394 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1395 mali_ptr addr = raw_addr & ~63;
1396
1397 /* Since we advanced the base pointer, we shrink the buffer
1398 * size, but add the offset we subtracted */
1399 unsigned size = rsrc->base.width0 + (raw_addr - addr)
1400 - buf->buffer_offset;
1401
1402 /* When there is a divisor, the hardware-level divisor is
1403 * the product of the instance divisor and the padded count */
1404 unsigned divisor = elem->instance_divisor;
1405 unsigned hw_divisor = ctx->padded_count * divisor;
1406 unsigned stride = buf->stride;
1407
1408 /* If there's a divisor(=1) but no instancing, we want every
1409 * attribute to be the same */
1410
1411 if (divisor && ctx->instance_count == 1)
1412 stride = 0;
1413
1414 if (!divisor || ctx->instance_count <= 1) {
1415 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1416 if (ctx->instance_count > 1)
1417 cfg.type = MALI_ATTRIBUTE_TYPE_1D_MODULUS;
1418
1419 cfg.pointer = addr;
1420 cfg.stride = stride;
1421 cfg.size = size;
1422 cfg.divisor_r = instance_shift;
1423 cfg.divisor_p = instance_odd;
1424 }
1425 } else if (util_is_power_of_two_or_zero(hw_divisor)) {
1426 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1427 cfg.type = MALI_ATTRIBUTE_TYPE_1D_POT_DIVISOR;
1428 cfg.pointer = addr;
1429 cfg.stride = stride;
1430 cfg.size = size;
1431 cfg.divisor_r = __builtin_ctz(hw_divisor);
1432 }
1433
1434 } else {
1435 unsigned shift = 0, extra_flags = 0;
1436
1437 unsigned magic_divisor =
1438 panfrost_compute_magic_divisor(hw_divisor, &shift, &extra_flags);
1439
1440 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1441 cfg.type = MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR;
1442 cfg.pointer = addr;
1443 cfg.stride = stride;
1444 cfg.size = size;
1445
1446 cfg.divisor_r = shift;
1447 cfg.divisor_e = extra_flags;
1448 }
1449
1450 pan_pack(bufs + k + 1, ATTRIBUTE_BUFFER_CONTINUATION_NPOT, cfg) {
1451 cfg.divisor_numerator = magic_divisor;
1452 cfg.divisor = divisor;
1453 }
1454
1455 ++k;
1456 }
1457
1458 ++k;
1459 }
1460
1461 /* Add special gl_VertexID/gl_InstanceID buffers */
1462
1463 if (unlikely(vs->attribute_count >= PAN_VERTEX_ID)) {
1464 panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1465
1466 pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
1467 cfg.buffer_index = k++;
1468 cfg.format = so->formats[PAN_VERTEX_ID];
1469 }
1470
1471 panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1472
1473 pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
1474 cfg.buffer_index = k++;
1475 cfg.format = so->formats[PAN_INSTANCE_ID];
1476 }
1477 }
1478
1479 /* Attribute addresses require 64-byte alignment, so let:
1480 *
1481 * base' = base & ~63 = base - (base & 63)
1482 * offset' = offset + (base & 63)
1483 *
1484 * Since base' + offset' = base + offset, these are equivalent
1485 * addressing modes and now base is 64 aligned.
1486 */
1487
1488 unsigned start = vertex_postfix->offset_start;
1489
1490 for (unsigned i = 0; i < so->num_elements; ++i) {
1491 unsigned vbi = so->pipe[i].vertex_buffer_index;
1492 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1493
1494 /* Adjust by the masked off bits of the offset. Make sure we
1495 * read src_offset from so->hw (which is not GPU visible)
1496 * rather than target (which is) due to caching effects */
1497
1498 unsigned src_offset = so->pipe[i].src_offset;
1499
1500 /* BOs aligned to 4k so guaranteed aligned to 64 */
1501 src_offset += (buf->buffer_offset & 63);
1502
1503 /* Also, somewhat obscurely per-instance data needs to be
1504 * offset in response to a delayed start in an indexed draw */
1505
1506 if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
1507 src_offset -= buf->stride * start;
1508
1509 pan_pack(out + i, ATTRIBUTE, cfg) {
1510 cfg.buffer_index = attrib_to_buffer[i];
1511 cfg.format = so->formats[i];
1512 cfg.offset = src_offset;
1513 }
1514 }
1515
1516 vertex_postfix->attributes = S.gpu;
1517 vertex_postfix->attribute_meta = T.gpu;
1518 }
1519
1520 static mali_ptr
1521 panfrost_emit_varyings(struct panfrost_batch *batch,
1522 struct mali_attribute_buffer_packed *slot,
1523 unsigned stride, unsigned count)
1524 {
1525 unsigned size = stride * count;
1526 mali_ptr ptr = panfrost_pool_alloc_aligned(&batch->invisible_pool, size, 64).gpu;
1527
1528 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1529 cfg.stride = stride;
1530 cfg.size = size;
1531 cfg.pointer = ptr;
1532 }
1533
1534 return ptr;
1535 }
1536
1537 static unsigned
1538 panfrost_streamout_offset(unsigned stride, unsigned offset,
1539 struct pipe_stream_output_target *target)
1540 {
1541 return (target->buffer_offset + (offset * stride * 4)) & 63;
1542 }
1543
1544 static void
1545 panfrost_emit_streamout(struct panfrost_batch *batch,
1546 struct mali_attribute_buffer_packed *slot,
1547 unsigned stride_words, unsigned offset, unsigned count,
1548 struct pipe_stream_output_target *target)
1549 {
1550 unsigned stride = stride_words * 4;
1551 unsigned max_size = target->buffer_size;
1552 unsigned expected_size = stride * count;
1553
1554 /* Grab the BO and bind it to the batch */
1555 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1556
1557 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1558 * the perspective of the TILER and FRAGMENT.
1559 */
1560 panfrost_batch_add_bo(batch, bo,
1561 PAN_BO_ACCESS_SHARED |
1562 PAN_BO_ACCESS_RW |
1563 PAN_BO_ACCESS_VERTEX_TILER |
1564 PAN_BO_ACCESS_FRAGMENT);
1565
1566 /* We will have an offset applied to get alignment */
1567 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * stride);
1568
1569 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1570 cfg.pointer = (addr & ~63);
1571 cfg.stride = stride;
1572 cfg.size = MIN2(max_size, expected_size) + (addr & 63);
1573 }
1574 }
1575
1576 static bool
1577 has_point_coord(unsigned mask, gl_varying_slot loc)
1578 {
1579 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1580 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1581 else if (loc == VARYING_SLOT_PNTC)
1582 return (mask & (1 << 8));
1583 else
1584 return false;
1585 }
1586
1587 /* Helpers for manipulating stream out information so we can pack varyings
1588 * accordingly. Compute the src_offset for a given captured varying */
1589
1590 static struct pipe_stream_output *
1591 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1592 {
1593 for (unsigned i = 0; i < info->num_outputs; ++i) {
1594 if (info->output[i].register_index == loc)
1595 return &info->output[i];
1596 }
1597
1598 unreachable("Varying not captured");
1599 }
1600
1601 static unsigned
1602 pan_varying_size(enum mali_format fmt)
1603 {
1604 unsigned type = MALI_EXTRACT_TYPE(fmt);
1605 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1606 unsigned bits = MALI_EXTRACT_BITS(fmt);
1607 unsigned bpc = 0;
1608
1609 if (bits == MALI_CHANNEL_FLOAT) {
1610 /* No doubles */
1611 bool fp16 = (type == MALI_FORMAT_SINT);
1612 assert(fp16 || (type == MALI_FORMAT_UNORM));
1613
1614 bpc = fp16 ? 2 : 4;
1615 } else {
1616 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1617
1618 /* See the enums */
1619 bits = 1 << bits;
1620 assert(bits >= 8);
1621 bpc = bits / 8;
1622 }
1623
1624 return bpc * chan;
1625 }
1626
1627 /* Indices for named (non-XFB) varyings that are present. These are packed
1628 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1629 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1630 * of a given special field given a shift S by:
1631 *
1632 * idx = popcount(P & ((1 << S) - 1))
1633 *
1634 * That is... look at all of the varyings that come earlier and count them, the
1635 * count is the new index since plus one. Likewise, the total number of special
1636 * buffers required is simply popcount(P)
1637 */
1638
1639 enum pan_special_varying {
1640 PAN_VARY_GENERAL = 0,
1641 PAN_VARY_POSITION = 1,
1642 PAN_VARY_PSIZ = 2,
1643 PAN_VARY_PNTCOORD = 3,
1644 PAN_VARY_FACE = 4,
1645 PAN_VARY_FRAGCOORD = 5,
1646
1647 /* Keep last */
1648 PAN_VARY_MAX,
1649 };
1650
1651 /* Given a varying, figure out which index it correpsonds to */
1652
1653 static inline unsigned
1654 pan_varying_index(unsigned present, enum pan_special_varying v)
1655 {
1656 unsigned mask = (1 << v) - 1;
1657 return util_bitcount(present & mask);
1658 }
1659
1660 /* Get the base offset for XFB buffers, which by convention come after
1661 * everything else. Wrapper function for semantic reasons; by construction this
1662 * is just popcount. */
1663
1664 static inline unsigned
1665 pan_xfb_base(unsigned present)
1666 {
1667 return util_bitcount(present);
1668 }
1669
1670 /* Computes the present mask for varyings so we can start emitting varying records */
1671
1672 static inline unsigned
1673 pan_varying_present(
1674 struct panfrost_shader_state *vs,
1675 struct panfrost_shader_state *fs,
1676 unsigned quirks)
1677 {
1678 /* At the moment we always emit general and position buffers. Not
1679 * strictly necessary but usually harmless */
1680
1681 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1682
1683 /* Enable special buffers by the shader info */
1684
1685 if (vs->writes_point_size)
1686 present |= (1 << PAN_VARY_PSIZ);
1687
1688 if (fs->reads_point_coord)
1689 present |= (1 << PAN_VARY_PNTCOORD);
1690
1691 if (fs->reads_face)
1692 present |= (1 << PAN_VARY_FACE);
1693
1694 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1695 present |= (1 << PAN_VARY_FRAGCOORD);
1696
1697 /* Also, if we have a point sprite, we need a point coord buffer */
1698
1699 for (unsigned i = 0; i < fs->varying_count; i++) {
1700 gl_varying_slot loc = fs->varyings_loc[i];
1701
1702 if (has_point_coord(fs->point_sprite_mask, loc))
1703 present |= (1 << PAN_VARY_PNTCOORD);
1704 }
1705
1706 return present;
1707 }
1708
1709 /* Emitters for varying records */
1710
1711 static void
1712 pan_emit_vary(struct mali_attribute_packed *out,
1713 unsigned present, enum pan_special_varying buf,
1714 unsigned quirks, enum mali_format format,
1715 unsigned offset)
1716 {
1717 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1718 unsigned swizzle = quirks & HAS_SWIZZLES ?
1719 panfrost_get_default_swizzle(nr_channels) :
1720 panfrost_bifrost_swizzle(nr_channels);
1721
1722 pan_pack(out, ATTRIBUTE, cfg) {
1723 cfg.buffer_index = pan_varying_index(present, buf);
1724 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1725 cfg.format = (format << 12) | swizzle;
1726 cfg.offset = offset;
1727 }
1728 }
1729
1730 /* General varying that is unused */
1731
1732 static void
1733 pan_emit_vary_only(struct mali_attribute_packed *out,
1734 unsigned present, unsigned quirks)
1735 {
1736 pan_emit_vary(out, present, 0, quirks, MALI_VARYING_DISCARD, 0);
1737 }
1738
1739 /* Special records */
1740
1741 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1742 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1743 [PAN_VARY_PSIZ] = MALI_R16F,
1744 [PAN_VARY_PNTCOORD] = MALI_R16F,
1745 [PAN_VARY_FACE] = MALI_R32I,
1746 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1747 };
1748
1749 static void
1750 pan_emit_vary_special(struct mali_attribute_packed *out,
1751 unsigned present, enum pan_special_varying buf,
1752 unsigned quirks)
1753 {
1754 assert(buf < PAN_VARY_MAX);
1755 pan_emit_vary(out, present, buf, quirks, pan_varying_formats[buf], 0);
1756 }
1757
1758 static enum mali_format
1759 pan_xfb_format(enum mali_format format, unsigned nr)
1760 {
1761 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1762 return MALI_R32F | MALI_NR_CHANNELS(nr);
1763 else
1764 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1765 }
1766
1767 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1768 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1769 * value. */
1770
1771 static void
1772 pan_emit_vary_xfb(struct mali_attribute_packed *out,
1773 unsigned present,
1774 unsigned max_xfb,
1775 unsigned *streamout_offsets,
1776 unsigned quirks,
1777 enum mali_format format,
1778 struct pipe_stream_output o)
1779 {
1780 unsigned swizzle = quirks & HAS_SWIZZLES ?
1781 panfrost_get_default_swizzle(o.num_components) :
1782 panfrost_bifrost_swizzle(o.num_components);
1783
1784 pan_pack(out, ATTRIBUTE, cfg) {
1785 /* XFB buffers come after everything else */
1786 cfg.buffer_index = pan_xfb_base(present) + o.output_buffer;
1787 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1788
1789 /* Override number of channels and precision to highp */
1790 cfg.format = (pan_xfb_format(format, o.num_components) << 12) | swizzle;
1791
1792 /* Apply given offsets together */
1793 cfg.offset = (o.dst_offset * 4) /* dwords */
1794 + streamout_offsets[o.output_buffer];
1795 }
1796 }
1797
1798 /* Determine if we should capture a varying for XFB. This requires actually
1799 * having a buffer for it. If we don't capture it, we'll fallback to a general
1800 * varying path (linked or unlinked, possibly discarding the write) */
1801
1802 static bool
1803 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1804 unsigned loc, unsigned max_xfb)
1805 {
1806 if (!(xfb->so_mask & (1ll << loc)))
1807 return false;
1808
1809 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1810 return o->output_buffer < max_xfb;
1811 }
1812
1813 static void
1814 pan_emit_general_varying(struct mali_attribute_packed *out,
1815 struct panfrost_shader_state *other,
1816 struct panfrost_shader_state *xfb,
1817 gl_varying_slot loc,
1818 enum mali_format format,
1819 unsigned present,
1820 unsigned quirks,
1821 unsigned *gen_offsets,
1822 enum mali_format *gen_formats,
1823 unsigned *gen_stride,
1824 unsigned idx,
1825 bool should_alloc)
1826 {
1827 /* Check if we're linked */
1828 signed other_idx = -1;
1829
1830 for (unsigned j = 0; j < other->varying_count; ++j) {
1831 if (other->varyings_loc[j] == loc) {
1832 other_idx = j;
1833 break;
1834 }
1835 }
1836
1837 if (other_idx < 0) {
1838 pan_emit_vary_only(out, present, quirks);
1839 return;
1840 }
1841
1842 unsigned offset = gen_offsets[other_idx];
1843
1844 if (should_alloc) {
1845 /* We're linked, so allocate a space via a watermark allocation */
1846 enum mali_format alt = other->varyings[other_idx];
1847
1848 /* Do interpolation at minimum precision */
1849 unsigned size_main = pan_varying_size(format);
1850 unsigned size_alt = pan_varying_size(alt);
1851 unsigned size = MIN2(size_main, size_alt);
1852
1853 /* If a varying is marked for XFB but not actually captured, we
1854 * should match the format to the format that would otherwise
1855 * be used for XFB, since dEQP checks for invariance here. It's
1856 * unclear if this is required by the spec. */
1857
1858 if (xfb->so_mask & (1ull << loc)) {
1859 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1860 format = pan_xfb_format(format, o->num_components);
1861 size = pan_varying_size(format);
1862 } else if (size == size_alt) {
1863 format = alt;
1864 }
1865
1866 gen_offsets[idx] = *gen_stride;
1867 gen_formats[other_idx] = format;
1868 offset = *gen_stride;
1869 *gen_stride += size;
1870 }
1871
1872 pan_emit_vary(out, present, PAN_VARY_GENERAL, quirks, format, offset);
1873 }
1874
1875 /* Higher-level wrapper around all of the above, classifying a varying into one
1876 * of the above types */
1877
1878 static void
1879 panfrost_emit_varying(
1880 struct mali_attribute_packed *out,
1881 struct panfrost_shader_state *stage,
1882 struct panfrost_shader_state *other,
1883 struct panfrost_shader_state *xfb,
1884 unsigned present,
1885 unsigned max_xfb,
1886 unsigned *streamout_offsets,
1887 unsigned quirks,
1888 unsigned *gen_offsets,
1889 enum mali_format *gen_formats,
1890 unsigned *gen_stride,
1891 unsigned idx,
1892 bool should_alloc,
1893 bool is_fragment)
1894 {
1895 gl_varying_slot loc = stage->varyings_loc[idx];
1896 enum mali_format format = stage->varyings[idx];
1897
1898 /* Override format to match linkage */
1899 if (!should_alloc && gen_formats[idx])
1900 format = gen_formats[idx];
1901
1902 if (has_point_coord(stage->point_sprite_mask, loc)) {
1903 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1904 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1905 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1906 pan_emit_vary_xfb(out, present, max_xfb, streamout_offsets, quirks, format, *o);
1907 } else if (loc == VARYING_SLOT_POS) {
1908 if (is_fragment)
1909 pan_emit_vary_special(out, present, PAN_VARY_FRAGCOORD, quirks);
1910 else
1911 pan_emit_vary_special(out, present, PAN_VARY_POSITION, quirks);
1912 } else if (loc == VARYING_SLOT_PSIZ) {
1913 pan_emit_vary_special(out, present, PAN_VARY_PSIZ, quirks);
1914 } else if (loc == VARYING_SLOT_PNTC) {
1915 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1916 } else if (loc == VARYING_SLOT_FACE) {
1917 pan_emit_vary_special(out, present, PAN_VARY_FACE, quirks);
1918 } else {
1919 pan_emit_general_varying(out, other, xfb, loc, format, present,
1920 quirks, gen_offsets, gen_formats, gen_stride,
1921 idx, should_alloc);
1922 }
1923 }
1924
1925 static void
1926 pan_emit_special_input(struct mali_attribute_buffer_packed *out,
1927 unsigned present,
1928 enum pan_special_varying v,
1929 unsigned special)
1930 {
1931 if (present & (1 << v)) {
1932 unsigned idx = pan_varying_index(present, v);
1933
1934 pan_pack(out + idx, ATTRIBUTE_BUFFER, cfg) {
1935 cfg.special = special;
1936 cfg.type = 0;
1937 }
1938 }
1939 }
1940
1941 void
1942 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1943 unsigned vertex_count,
1944 struct mali_vertex_tiler_postfix *vertex_postfix,
1945 struct mali_vertex_tiler_postfix *tiler_postfix,
1946 union midgard_primitive_size *primitive_size)
1947 {
1948 /* Load the shaders */
1949 struct panfrost_context *ctx = batch->ctx;
1950 struct panfrost_device *dev = pan_device(ctx->base.screen);
1951 struct panfrost_shader_state *vs, *fs;
1952 size_t vs_size, fs_size;
1953
1954 /* Allocate the varying descriptor */
1955
1956 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1957 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
1958 vs_size = MALI_ATTRIBUTE_LENGTH * vs->varying_count;
1959 fs_size = MALI_ATTRIBUTE_LENGTH * fs->varying_count;
1960
1961 struct panfrost_transfer trans = panfrost_pool_alloc_aligned(
1962 &batch->pool, vs_size + fs_size, MALI_ATTRIBUTE_LENGTH);
1963
1964 struct pipe_stream_output_info *so = &vs->stream_output;
1965 unsigned present = pan_varying_present(vs, fs, dev->quirks);
1966
1967 /* Check if this varying is linked by us. This is the case for
1968 * general-purpose, non-captured varyings. If it is, link it. If it's
1969 * not, use the provided stream out information to determine the
1970 * offset, since it was already linked for us. */
1971
1972 unsigned gen_offsets[32];
1973 enum mali_format gen_formats[32];
1974 memset(gen_offsets, 0, sizeof(gen_offsets));
1975 memset(gen_formats, 0, sizeof(gen_formats));
1976
1977 unsigned gen_stride = 0;
1978 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
1979 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
1980
1981 unsigned streamout_offsets[32];
1982
1983 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1984 streamout_offsets[i] = panfrost_streamout_offset(
1985 so->stride[i],
1986 ctx->streamout.offsets[i],
1987 ctx->streamout.targets[i]);
1988 }
1989
1990 struct mali_attribute_packed *ovs = (struct mali_attribute_packed *)trans.cpu;
1991 struct mali_attribute_packed *ofs = ovs + vs->varying_count;
1992
1993 for (unsigned i = 0; i < vs->varying_count; i++) {
1994 panfrost_emit_varying(ovs + i, vs, fs, vs, present,
1995 ctx->streamout.num_targets, streamout_offsets,
1996 dev->quirks,
1997 gen_offsets, gen_formats, &gen_stride, i, true, false);
1998 }
1999
2000 for (unsigned i = 0; i < fs->varying_count; i++) {
2001 panfrost_emit_varying(ofs + i, fs, vs, vs, present,
2002 ctx->streamout.num_targets, streamout_offsets,
2003 dev->quirks,
2004 gen_offsets, gen_formats, &gen_stride, i, false, true);
2005 }
2006
2007 unsigned xfb_base = pan_xfb_base(present);
2008 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
2009 MALI_ATTRIBUTE_BUFFER_LENGTH * (xfb_base + ctx->streamout.num_targets),
2010 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
2011 struct mali_attribute_buffer_packed *varyings =
2012 (struct mali_attribute_buffer_packed *) T.cpu;
2013
2014 /* Emit the stream out buffers */
2015
2016 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
2017 ctx->vertex_count);
2018
2019 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
2020 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
2021 so->stride[i],
2022 ctx->streamout.offsets[i],
2023 out_count,
2024 ctx->streamout.targets[i]);
2025 }
2026
2027 panfrost_emit_varyings(batch,
2028 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
2029 gen_stride, vertex_count);
2030
2031 /* fp32 vec4 gl_Position */
2032 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
2033 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
2034 sizeof(float) * 4, vertex_count);
2035
2036 if (present & (1 << PAN_VARY_PSIZ)) {
2037 primitive_size->pointer = panfrost_emit_varyings(batch,
2038 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
2039 2, vertex_count);
2040 }
2041
2042 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_ATTRIBUTE_SPECIAL_POINT_COORD);
2043 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_ATTRIBUTE_SPECIAL_FRONT_FACING);
2044 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_ATTRIBUTE_SPECIAL_FRAG_COORD);
2045
2046 vertex_postfix->varyings = T.gpu;
2047 tiler_postfix->varyings = T.gpu;
2048
2049 vertex_postfix->varying_meta = trans.gpu;
2050 tiler_postfix->varying_meta = trans.gpu + vs_size;
2051 }
2052
2053 void
2054 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
2055 struct mali_vertex_tiler_prefix *vertex_prefix,
2056 struct mali_vertex_tiler_postfix *vertex_postfix,
2057 struct mali_vertex_tiler_prefix *tiler_prefix,
2058 struct mali_vertex_tiler_postfix *tiler_postfix,
2059 union midgard_primitive_size *primitive_size)
2060 {
2061 struct panfrost_context *ctx = batch->ctx;
2062 struct panfrost_device *device = pan_device(ctx->base.screen);
2063 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
2064 struct bifrost_payload_vertex bifrost_vertex = {0,};
2065 struct bifrost_payload_tiler bifrost_tiler = {0,};
2066 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2067 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2068 void *vp, *tp;
2069 size_t vp_size, tp_size;
2070
2071 if (device->quirks & IS_BIFROST) {
2072 bifrost_vertex.prefix = *vertex_prefix;
2073 bifrost_vertex.postfix = *vertex_postfix;
2074 vp = &bifrost_vertex;
2075 vp_size = sizeof(bifrost_vertex);
2076
2077 bifrost_tiler.prefix = *tiler_prefix;
2078 bifrost_tiler.tiler.primitive_size = *primitive_size;
2079 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2080 bifrost_tiler.postfix = *tiler_postfix;
2081 tp = &bifrost_tiler;
2082 tp_size = sizeof(bifrost_tiler);
2083 } else {
2084 midgard_vertex.prefix = *vertex_prefix;
2085 midgard_vertex.postfix = *vertex_postfix;
2086 vp = &midgard_vertex;
2087 vp_size = sizeof(midgard_vertex);
2088
2089 midgard_tiler.prefix = *tiler_prefix;
2090 midgard_tiler.postfix = *tiler_postfix;
2091 midgard_tiler.primitive_size = *primitive_size;
2092 tp = &midgard_tiler;
2093 tp_size = sizeof(midgard_tiler);
2094 }
2095
2096 if (wallpapering) {
2097 /* Inject in reverse order, with "predicted" job indices.
2098 * THIS IS A HACK XXX */
2099 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
2100 batch->scoreboard.job_index + 2, tp, tp_size, true);
2101 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2102 vp, vp_size, true);
2103 return;
2104 }
2105
2106 /* If rasterizer discard is enable, only submit the vertex */
2107
2108 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2109 vp, vp_size, false);
2110
2111 if (ctx->rasterizer->base.rasterizer_discard)
2112 return;
2113
2114 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
2115 false);
2116 }
2117
2118 /* TODO: stop hardcoding this */
2119 mali_ptr
2120 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2121 {
2122 uint16_t locations[] = {
2123 128, 128,
2124 0, 256,
2125 0, 256,
2126 0, 256,
2127 0, 256,
2128 0, 256,
2129 0, 256,
2130 0, 256,
2131 0, 256,
2132 0, 256,
2133 0, 256,
2134 0, 256,
2135 0, 256,
2136 0, 256,
2137 0, 256,
2138 0, 256,
2139 0, 256,
2140 0, 256,
2141 0, 256,
2142 0, 256,
2143 0, 256,
2144 0, 256,
2145 0, 256,
2146 0, 256,
2147 0, 256,
2148 0, 256,
2149 0, 256,
2150 0, 256,
2151 0, 256,
2152 0, 256,
2153 0, 256,
2154 0, 256,
2155 128, 128,
2156 0, 0,
2157 0, 0,
2158 0, 0,
2159 0, 0,
2160 0, 0,
2161 0, 0,
2162 0, 0,
2163 0, 0,
2164 0, 0,
2165 0, 0,
2166 0, 0,
2167 0, 0,
2168 0, 0,
2169 0, 0,
2170 0, 0,
2171 };
2172
2173 return panfrost_pool_upload_aligned(&batch->pool, locations, 96 * sizeof(uint16_t), 64);
2174 }