panfrost: Use pack for shaderless
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 struct mali_shared_memory shared = {
62 .shared_workgroup_count = ~0,
63 };
64
65 if (batch->stack_size) {
66 struct panfrost_bo *stack =
67 panfrost_batch_get_scratchpad(batch, batch->stack_size,
68 dev->thread_tls_alloc,
69 dev->core_count);
70
71 shared.stack_shift = panfrost_get_stack_shift(batch->stack_size);
72 shared.scratchpad = stack->gpu;
73 }
74
75 postfix->shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared, sizeof(shared), 64);
76 }
77
78 static void
79 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
80 struct mali_vertex_tiler_postfix *postfix)
81 {
82 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
83 postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
84 }
85
86 static void
87 panfrost_vt_update_rasterizer(struct panfrost_rasterizer *rasterizer,
88 struct mali_vertex_tiler_prefix *prefix,
89 struct mali_vertex_tiler_postfix *postfix)
90 {
91 postfix->gl_enables |= 0x7;
92 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
93 rasterizer->base.front_ccw);
94 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
95 (rasterizer->base.cull_face & PIPE_FACE_FRONT));
96 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
97 (rasterizer->base.cull_face & PIPE_FACE_BACK));
98 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
99 rasterizer->base.flatshade_first);
100 }
101
102 void
103 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
104 struct mali_vertex_tiler_prefix *prefix,
105 union midgard_primitive_size *primitive_size)
106 {
107 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
108
109 if (!panfrost_writes_point_size(ctx)) {
110 float val = (prefix->draw_mode == MALI_DRAW_MODE_POINTS) ?
111 rasterizer->base.point_size :
112 rasterizer->base.line_width;
113
114 primitive_size->constant = val;
115 }
116 }
117
118 static void
119 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
120 struct mali_vertex_tiler_postfix *postfix)
121 {
122 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
123 if (ctx->occlusion_query) {
124 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
125 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
126 PAN_BO_ACCESS_SHARED |
127 PAN_BO_ACCESS_RW |
128 PAN_BO_ACCESS_FRAGMENT);
129 } else {
130 postfix->occlusion_counter = 0;
131 }
132 }
133
134 void
135 panfrost_vt_init(struct panfrost_context *ctx,
136 enum pipe_shader_type stage,
137 struct mali_vertex_tiler_prefix *prefix,
138 struct mali_vertex_tiler_postfix *postfix)
139 {
140 struct panfrost_device *device = pan_device(ctx->base.screen);
141
142 if (!ctx->shader[stage])
143 return;
144
145 memset(prefix, 0, sizeof(*prefix));
146 memset(postfix, 0, sizeof(*postfix));
147
148 if (device->quirks & IS_BIFROST) {
149 postfix->gl_enables = 0x2;
150 panfrost_vt_emit_shared_memory(ctx, postfix);
151 } else {
152 postfix->gl_enables = 0x6;
153 panfrost_vt_attach_framebuffer(ctx, postfix);
154 }
155
156 if (stage == PIPE_SHADER_FRAGMENT) {
157 panfrost_vt_update_occlusion_query(ctx, postfix);
158 panfrost_vt_update_rasterizer(ctx->rasterizer, prefix, postfix);
159 }
160 }
161
162 static unsigned
163 panfrost_translate_index_size(unsigned size)
164 {
165 switch (size) {
166 case 1:
167 return MALI_DRAW_INDEXED_UINT8;
168
169 case 2:
170 return MALI_DRAW_INDEXED_UINT16;
171
172 case 4:
173 return MALI_DRAW_INDEXED_UINT32;
174
175 default:
176 unreachable("Invalid index size");
177 }
178 }
179
180 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
181 * good for the duration of the draw (transient), could last longer. Also get
182 * the bounds on the index buffer for the range accessed by the draw. We do
183 * these operations together because there are natural optimizations which
184 * require them to be together. */
185
186 static mali_ptr
187 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
188 const struct pipe_draw_info *info,
189 unsigned *min_index, unsigned *max_index)
190 {
191 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
192 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
193 off_t offset = info->start * info->index_size;
194 bool needs_indices = true;
195 mali_ptr out = 0;
196
197 if (info->max_index != ~0u) {
198 *min_index = info->min_index;
199 *max_index = info->max_index;
200 needs_indices = false;
201 }
202
203 if (!info->has_user_indices) {
204 /* Only resources can be directly mapped */
205 panfrost_batch_add_bo(batch, rsrc->bo,
206 PAN_BO_ACCESS_SHARED |
207 PAN_BO_ACCESS_READ |
208 PAN_BO_ACCESS_VERTEX_TILER);
209 out = rsrc->bo->gpu + offset;
210
211 /* Check the cache */
212 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
213 info->start,
214 info->count,
215 min_index,
216 max_index);
217 } else {
218 /* Otherwise, we need to upload to transient memory */
219 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
220 struct panfrost_transfer T =
221 panfrost_pool_alloc_aligned(&batch->pool,
222 info->count * info->index_size,
223 info->index_size);
224
225 memcpy(T.cpu, ibuf8 + offset, info->count * info->index_size);
226 out = T.gpu;
227 }
228
229 if (needs_indices) {
230 /* Fallback */
231 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
232
233 if (!info->has_user_indices)
234 panfrost_minmax_cache_add(rsrc->index_cache,
235 info->start, info->count,
236 *min_index, *max_index);
237 }
238
239 return out;
240 }
241
242 void
243 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
244 const struct pipe_draw_info *info,
245 enum mali_draw_mode draw_mode,
246 struct mali_vertex_tiler_postfix *vertex_postfix,
247 struct mali_vertex_tiler_prefix *tiler_prefix,
248 struct mali_vertex_tiler_postfix *tiler_postfix,
249 unsigned *vertex_count,
250 unsigned *padded_count)
251 {
252 tiler_prefix->draw_mode = draw_mode;
253
254 unsigned draw_flags = 0;
255
256 if (panfrost_writes_point_size(ctx))
257 draw_flags |= MALI_DRAW_VARYING_SIZE;
258
259 if (info->primitive_restart)
260 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
261
262 /* These doesn't make much sense */
263
264 draw_flags |= 0x3000;
265
266 if (info->index_size) {
267 unsigned min_index = 0, max_index = 0;
268
269 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
270 info,
271 &min_index,
272 &max_index);
273
274 /* Use the corresponding values */
275 *vertex_count = max_index - min_index + 1;
276 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
277 tiler_prefix->offset_bias_correction = -min_index;
278 tiler_prefix->index_count = MALI_POSITIVE(info->count);
279 draw_flags |= panfrost_translate_index_size(info->index_size);
280 } else {
281 tiler_prefix->indices = 0;
282 *vertex_count = ctx->vertex_count;
283 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
284 tiler_prefix->offset_bias_correction = 0;
285 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
286 }
287
288 tiler_prefix->unknown_draw = draw_flags;
289
290 /* Encode the padded vertex count */
291
292 if (info->instance_count > 1) {
293 *padded_count = panfrost_padded_vertex_count(*vertex_count);
294
295 unsigned shift = __builtin_ctz(ctx->padded_count);
296 unsigned k = ctx->padded_count >> (shift + 1);
297
298 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
299 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
300 } else {
301 *padded_count = *vertex_count;
302
303 /* Reset instancing state */
304 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
305 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
306 }
307 }
308
309 static void
310 panfrost_emit_compute_shader(struct panfrost_context *ctx,
311 enum pipe_shader_type st,
312 struct mali_shader_meta *meta)
313 {
314 const struct panfrost_device *dev = pan_device(ctx->base.screen);
315 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
316
317 memset(meta, 0, sizeof(*meta));
318 meta->shader = ss->shader;
319 meta->attribute_count = ss->attribute_count;
320 meta->varying_count = ss->varying_count;
321 meta->texture_count = ctx->sampler_view_count[st];
322 meta->sampler_count = ctx->sampler_count[st];
323
324 if (dev->quirks & IS_BIFROST) {
325 meta->bifrost1.unk1 = 0x800000;
326 meta->bifrost2.preload_regs = 0xC0;
327 meta->bifrost2.uniform_count = ss->uniform_count;
328 meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
329 } else {
330 struct mali_midgard_properties_packed prop;
331
332 pan_pack(&prop, MIDGARD_PROPERTIES, cfg) {
333 cfg.uniform_buffer_count = panfrost_ubo_count(ctx, st);
334 cfg.uniform_count = ss->uniform_count;
335 cfg.work_register_count = ss->work_reg_count;
336 cfg.writes_globals = ss->writes_global;
337 cfg.suppress_inf_nan = true; /* XXX */
338 }
339
340 memcpy(&meta->midgard1, &prop, sizeof(prop));
341 }
342 }
343
344 static unsigned
345 translate_tex_wrap(enum pipe_tex_wrap w)
346 {
347 switch (w) {
348 case PIPE_TEX_WRAP_REPEAT: return MALI_WRAP_MODE_REPEAT;
349 case PIPE_TEX_WRAP_CLAMP: return MALI_WRAP_MODE_CLAMP;
350 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return MALI_WRAP_MODE_CLAMP_TO_EDGE;
351 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return MALI_WRAP_MODE_CLAMP_TO_BORDER;
352 case PIPE_TEX_WRAP_MIRROR_REPEAT: return MALI_WRAP_MODE_MIRRORED_REPEAT;
353 case PIPE_TEX_WRAP_MIRROR_CLAMP: return MALI_WRAP_MODE_MIRRORED_CLAMP;
354 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
355 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
356 default: unreachable("Invalid wrap");
357 }
358 }
359
360 /* The hardware compares in the wrong order order, so we have to flip before
361 * encoding. Yes, really. */
362
363 static enum mali_func
364 panfrost_sampler_compare_func(const struct pipe_sampler_state *cso)
365 {
366 if (!cso->compare_mode)
367 return MALI_FUNC_NEVER;
368
369 enum mali_func f = panfrost_translate_compare_func(cso->compare_func);
370 return panfrost_flip_compare_func(f);
371 }
372
373 static enum mali_mipmap_mode
374 pan_pipe_to_mipmode(enum pipe_tex_mipfilter f)
375 {
376 switch (f) {
377 case PIPE_TEX_MIPFILTER_NEAREST: return MALI_MIPMAP_MODE_NEAREST;
378 case PIPE_TEX_MIPFILTER_LINEAR: return MALI_MIPMAP_MODE_TRILINEAR;
379 case PIPE_TEX_MIPFILTER_NONE: return MALI_MIPMAP_MODE_NONE;
380 default: unreachable("Invalid");
381 }
382 }
383
384 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
385 struct mali_midgard_sampler_packed *hw)
386 {
387 pan_pack(hw, MIDGARD_SAMPLER, cfg) {
388 cfg.magnify_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
389 cfg.minify_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
390 cfg.mipmap_mode = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR) ?
391 MALI_MIPMAP_MODE_TRILINEAR : MALI_MIPMAP_MODE_NEAREST;
392 cfg.normalized_coordinates = cso->normalized_coords;
393
394 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
395
396 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
397
398 /* If necessary, we disable mipmapping in the sampler descriptor by
399 * clamping the LOD as tight as possible (from 0 to epsilon,
400 * essentially -- remember these are fixed point numbers, so
401 * epsilon=1/256) */
402
403 cfg.maximum_lod = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) ?
404 cfg.minimum_lod + 1 :
405 FIXED_16(cso->max_lod, false);
406
407 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
408 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
409 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
410
411 cfg.compare_function = panfrost_sampler_compare_func(cso);
412 cfg.seamless_cube_map = cso->seamless_cube_map;
413
414 cfg.border_color_r = cso->border_color.f[0];
415 cfg.border_color_g = cso->border_color.f[1];
416 cfg.border_color_b = cso->border_color.f[2];
417 cfg.border_color_a = cso->border_color.f[3];
418 }
419 }
420
421 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
422 struct mali_bifrost_sampler_packed *hw)
423 {
424 pan_pack(hw, BIFROST_SAMPLER, cfg) {
425 cfg.magnify_linear = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR;
426 cfg.minify_linear = cso->min_img_filter == PIPE_TEX_FILTER_LINEAR;
427 cfg.mipmap_mode = pan_pipe_to_mipmode(cso->min_mip_filter);
428 cfg.normalized_coordinates = cso->normalized_coords;
429
430 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
431 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
432 cfg.maximum_lod = FIXED_16(cso->max_lod, false);
433
434 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
435 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
436 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
437
438 cfg.compare_function = panfrost_sampler_compare_func(cso);
439 cfg.seamless_cube_map = cso->seamless_cube_map;
440 }
441 }
442
443 static bool
444 panfrost_fs_required(
445 struct panfrost_shader_state *fs,
446 struct panfrost_blend_final *blend,
447 unsigned rt_count)
448 {
449 /* If we generally have side effects */
450 if (fs->fs_sidefx)
451 return true;
452
453 /* If colour is written we need to execute */
454 for (unsigned i = 0; i < rt_count; ++i) {
455 if (!blend[i].no_colour)
456 return true;
457 }
458
459 /* If depth is written and not implied we need to execute.
460 * TODO: Predicate on Z/S writes being enabled */
461 return (fs->writes_depth || fs->writes_stencil);
462 }
463
464 static void
465 panfrost_emit_blend(struct panfrost_batch *batch, void *rts,
466 struct panfrost_blend_final *blend)
467 {
468 const struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
469 struct panfrost_shader_state *fs = panfrost_get_shader_state(batch->ctx, PIPE_SHADER_FRAGMENT);
470 unsigned rt_count = batch->key.nr_cbufs;
471
472 struct bifrost_blend_rt *brts = rts;
473 struct midgard_blend_rt *mrts = rts;
474
475 /* Disable blending for depth-only on Bifrost */
476
477 if (rt_count == 0 && dev->quirks & IS_BIFROST)
478 brts[0].unk2 = 0x3;
479
480 for (unsigned i = 0; i < rt_count; ++i) {
481 unsigned flags = 0;
482
483 pan_pack(&flags, BLEND_FLAGS, cfg) {
484 if (blend[i].no_colour) {
485 cfg.enable = false;
486 break;
487 }
488
489 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
490
491 cfg.srgb = util_format_is_srgb(batch->key.cbufs[i]->format);
492 cfg.load_destination = blend[i].load_dest;
493 cfg.dither_disable = !batch->ctx->blend->base.dither;
494
495 if (!(dev->quirks & IS_BIFROST))
496 cfg.midgard_blend_shader = blend[i].is_shader;
497 }
498
499 if (dev->quirks & IS_BIFROST) {
500 brts[i].flags = flags;
501
502 if (blend[i].is_shader) {
503 /* The blend shader's address needs to be at
504 * the same top 32 bit as the fragment shader.
505 * TODO: Ensure that's always the case.
506 */
507 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
508 (fs->bo->gpu & (0xffffffffull << 32)));
509 brts[i].shader = blend[i].shader.gpu;
510 brts[i].unk2 = 0x0;
511 } else {
512 enum pipe_format format = batch->key.cbufs[i]->format;
513 const struct util_format_description *format_desc;
514 format_desc = util_format_description(format);
515
516 brts[i].equation = blend[i].equation.equation;
517
518 /* TODO: this is a bit more complicated */
519 brts[i].constant = blend[i].equation.constant;
520
521 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
522
523 /* 0x19 disables blending and forces REPLACE
524 * mode (equivalent to rgb_mode = alpha_mode =
525 * x122, colour mask = 0xF). 0x1a allows
526 * blending. */
527 brts[i].unk2 = blend[i].opaque ? 0x19 : 0x1a;
528
529 brts[i].shader_type = fs->blend_types[i];
530 }
531 } else {
532 memcpy(&mrts[i].flags, &flags, sizeof(flags));
533
534 if (blend[i].is_shader) {
535 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
536 } else {
537 mrts[i].blend.equation = blend[i].equation.equation;
538 mrts[i].blend.constant = blend[i].equation.constant;
539 }
540 }
541 }
542 }
543
544 static void
545 panfrost_emit_frag_shader(struct panfrost_context *ctx,
546 struct mali_shader_meta *fragmeta,
547 struct panfrost_blend_final *blend)
548 {
549 const struct panfrost_device *dev = pan_device(ctx->base.screen);
550 struct panfrost_shader_state *fs;
551
552 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
553
554 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
555 const struct panfrost_zsa_state *zsa = ctx->depth_stencil;
556
557 memset(fragmeta, 0, sizeof(*fragmeta));
558
559 fragmeta->shader = fs->shader;
560 fragmeta->attribute_count = fs->attribute_count;
561 fragmeta->varying_count = fs->varying_count;
562 fragmeta->texture_count = ctx->sampler_view_count[PIPE_SHADER_FRAGMENT];
563 fragmeta->sampler_count = ctx->sampler_count[PIPE_SHADER_FRAGMENT];
564
565 if (dev->quirks & IS_BIFROST) {
566 /* First clause ATEST |= 0x4000000.
567 * Lefs than 32 regs |= 0x200 */
568 fragmeta->bifrost1.unk1 = 0x950020;
569
570 fragmeta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, PIPE_SHADER_FRAGMENT);
571 fragmeta->bifrost2.preload_regs = 0x1;
572 SET_BIT(fragmeta->bifrost2.preload_regs, 0x10, fs->reads_frag_coord);
573
574 fragmeta->bifrost2.uniform_count = fs->uniform_count;
575 } else {
576 struct mali_midgard_properties_packed prop;
577
578 /* Reasons to disable early-Z from a shader perspective */
579 bool late_z = fs->can_discard || fs->writes_global ||
580 fs->writes_depth || fs->writes_stencil;
581
582 /* Reasons to disable early-Z from a CSO perspective */
583 bool alpha_to_coverage = ctx->blend->base.alpha_to_coverage;
584
585 /* If either depth or stencil is enabled, discard matters */
586 bool zs_enabled =
587 (zsa->base.depth.enabled && zsa->base.depth.func != PIPE_FUNC_ALWAYS) ||
588 zsa->base.stencil[0].enabled;
589
590 pan_pack(&prop, MIDGARD_PROPERTIES, cfg) {
591 cfg.uniform_buffer_count = panfrost_ubo_count(ctx, PIPE_SHADER_FRAGMENT);
592 cfg.uniform_count = fs->uniform_count;
593 cfg.work_register_count = fs->work_reg_count;
594 cfg.writes_globals = fs->writes_global;
595 cfg.suppress_inf_nan = true; /* XXX */
596
597 cfg.stencil_from_shader = fs->writes_stencil;
598 cfg.helper_invocation_enable = fs->helper_invocations;
599 cfg.depth_source = fs->writes_depth ?
600 MALI_DEPTH_SOURCE_SHADER :
601 MALI_DEPTH_SOURCE_FIXED_FUNCTION;
602
603 /* Depend on other state */
604 cfg.early_z_enable = !(late_z || alpha_to_coverage);
605 cfg.reads_tilebuffer = fs->outputs_read || (!zs_enabled && fs->can_discard);
606 cfg.reads_depth_stencil = zs_enabled && fs->can_discard;
607 }
608
609 memcpy(&fragmeta->midgard1, &prop, sizeof(prop));
610 }
611
612 bool msaa = rast->multisample;
613 fragmeta->coverage_mask = msaa ? ctx->sample_mask : ~0;
614
615 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
616 fragmeta->unknown2_4 = 0x4e0;
617
618 /* TODO: Sample size */
619 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
620 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
621
622 /* EXT_shader_framebuffer_fetch requires the shader to be run
623 * per-sample when outputs are read. */
624 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
625 SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
626
627 fragmeta->depth_units = rast->offset_units * 2.0f;
628 fragmeta->depth_factor = rast->offset_scale;
629
630 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
631
632 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
633 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
634
635 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
636 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
637
638 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
639 zsa->base.stencil[0].enabled);
640
641 fragmeta->stencil_mask_front = zsa->stencil_mask_front;
642 fragmeta->stencil_mask_back = zsa->stencil_mask_back;
643
644 /* Bottom bits for stencil ref, exactly one word */
645 fragmeta->stencil_front.opaque[0] = zsa->stencil_front.opaque[0] | ctx->stencil_ref.ref_value[0];
646
647 /* If back-stencil is not enabled, use the front values */
648
649 if (zsa->base.stencil[1].enabled)
650 fragmeta->stencil_back.opaque[0] = zsa->stencil_back.opaque[0] | ctx->stencil_ref.ref_value[1];
651 else
652 fragmeta->stencil_back = fragmeta->stencil_front;
653
654 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
655 zsa->base.depth.writemask);
656
657 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
658 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(
659 zsa->base.depth.enabled ? zsa->base.depth.func : PIPE_FUNC_ALWAYS));
660
661 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
662 (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
663 !ctx->blend->base.dither);
664
665 SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
666
667 SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
668 ctx->blend->base.alpha_to_coverage);
669
670 /* Get blending setup */
671 unsigned rt_count = ctx->pipe_framebuffer.nr_cbufs;
672
673 /* Disable shader execution if we can */
674 if (dev->quirks & MIDGARD_SHADERLESS
675 && !panfrost_fs_required(fs, blend, rt_count)) {
676 fragmeta->shader = 0x1;
677 fragmeta->attribute_count = 0;
678 fragmeta->varying_count = 0;
679 fragmeta->texture_count = 0;
680 fragmeta->sampler_count = 0;
681
682 /* This feature is not known to work on Bifrost */
683 struct mali_midgard_properties_packed prop;
684
685 pan_pack(&prop, MIDGARD_PROPERTIES, cfg) {
686 cfg.work_register_count = 1;
687 cfg.depth_source = MALI_DEPTH_SOURCE_FIXED_FUNCTION;
688 cfg.early_z_enable = true;
689 }
690
691 memcpy(&fragmeta->midgard1, &prop, sizeof(prop));
692 }
693
694 /* If there is a blend shader, work registers are shared. We impose 8
695 * work registers as a limit for blend shaders. Should be lower XXX */
696
697 if (!(dev->quirks & IS_BIFROST)) {
698 for (unsigned c = 0; c < rt_count; ++c) {
699 if (blend[c].is_shader) {
700 fragmeta->midgard1.work_count =
701 MAX2(fragmeta->midgard1.work_count, 8);
702 }
703 }
704 }
705
706 if (dev->quirks & MIDGARD_SFBD) {
707 /* When only a single render target platform is used, the blend
708 * information is inside the shader meta itself. We additionally
709 * need to signal CAN_DISCARD for nontrivial blend modes (so
710 * we're able to read back the destination buffer) */
711
712 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
713 blend[0].is_shader);
714
715 if (blend[0].is_shader) {
716 fragmeta->blend.shader = blend[0].shader.gpu |
717 blend[0].shader.first_tag;
718 } else {
719 fragmeta->blend.equation = blend[0].equation.equation;
720 fragmeta->blend.constant = blend[0].equation.constant;
721 }
722
723 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
724 blend[0].load_dest);
725 } else if (!(dev->quirks & IS_BIFROST)) {
726 /* Bug where MRT-capable hw apparently reads the last blend
727 * shader from here instead of the usual location? */
728
729 for (signed rt = ((signed) rt_count - 1); rt >= 0; --rt) {
730 if (!blend[rt].is_shader)
731 continue;
732
733 fragmeta->blend.shader = blend[rt].shader.gpu |
734 blend[rt].shader.first_tag;
735 break;
736 }
737 }
738
739 if (dev->quirks & IS_BIFROST) {
740 bool no_blend = true;
741
742 for (unsigned i = 0; i < rt_count; ++i)
743 no_blend &= (!blend[i].load_dest | blend[i].no_colour);
744
745 SET_BIT(fragmeta->bifrost1.unk1, MALI_BIFROST_EARLY_Z,
746 !fs->can_discard && !fs->writes_depth && no_blend);
747 }
748 }
749
750 void
751 panfrost_emit_shader_meta(struct panfrost_batch *batch,
752 enum pipe_shader_type st,
753 struct mali_vertex_tiler_postfix *postfix)
754 {
755 struct panfrost_context *ctx = batch->ctx;
756 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
757
758 if (!ss) {
759 postfix->shader = 0;
760 return;
761 }
762
763 struct mali_shader_meta meta;
764
765 /* Add the shader BO to the batch. */
766 panfrost_batch_add_bo(batch, ss->bo,
767 PAN_BO_ACCESS_PRIVATE |
768 PAN_BO_ACCESS_READ |
769 panfrost_bo_access_for_stage(st));
770
771 mali_ptr shader_ptr;
772
773 if (st == PIPE_SHADER_FRAGMENT) {
774 struct panfrost_device *dev = pan_device(ctx->base.screen);
775 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
776 size_t desc_size = sizeof(meta);
777 void *rts = NULL;
778 struct panfrost_transfer xfer;
779 unsigned rt_size;
780
781 if (dev->quirks & MIDGARD_SFBD)
782 rt_size = 0;
783 else if (dev->quirks & IS_BIFROST)
784 rt_size = sizeof(struct bifrost_blend_rt);
785 else
786 rt_size = sizeof(struct midgard_blend_rt);
787
788 desc_size += rt_size * rt_count;
789
790 if (rt_size)
791 rts = rzalloc_size(ctx, rt_size * rt_count);
792
793 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
794
795 for (unsigned c = 0; c < ctx->pipe_framebuffer.nr_cbufs; ++c)
796 blend[c] = panfrost_get_blend_for_context(ctx, c);
797
798 panfrost_emit_frag_shader(ctx, &meta, blend);
799
800 if (!(dev->quirks & MIDGARD_SFBD))
801 panfrost_emit_blend(batch, rts, blend);
802 else
803 batch->draws |= PIPE_CLEAR_COLOR0;
804
805 xfer = panfrost_pool_alloc_aligned(&batch->pool, desc_size, sizeof(meta));
806
807 memcpy(xfer.cpu, &meta, sizeof(meta));
808 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
809
810 if (rt_size)
811 ralloc_free(rts);
812
813 shader_ptr = xfer.gpu;
814 } else {
815 panfrost_emit_compute_shader(ctx, st, &meta);
816
817 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
818 sizeof(meta));
819 }
820
821 postfix->shader = shader_ptr;
822 }
823
824 void
825 panfrost_emit_viewport(struct panfrost_batch *batch,
826 struct mali_vertex_tiler_postfix *tiler_postfix)
827 {
828 struct panfrost_context *ctx = batch->ctx;
829 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
830 const struct pipe_scissor_state *ss = &ctx->scissor;
831 const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
832 const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
833
834 /* Derive min/max from translate/scale. Note since |x| >= 0 by
835 * definition, we have that -|x| <= |x| hence translate - |scale| <=
836 * translate + |scale|, so the ordering is correct here. */
837 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
838 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
839 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
840 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
841 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
842 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
843
844 /* Scissor to the intersection of viewport and to the scissor, clamped
845 * to the framebuffer */
846
847 unsigned minx = MIN2(fb->width, vp_minx);
848 unsigned maxx = MIN2(fb->width, vp_maxx);
849 unsigned miny = MIN2(fb->height, vp_miny);
850 unsigned maxy = MIN2(fb->height, vp_maxy);
851
852 if (ss && rast->scissor) {
853 minx = MAX2(ss->minx, minx);
854 miny = MAX2(ss->miny, miny);
855 maxx = MIN2(ss->maxx, maxx);
856 maxy = MIN2(ss->maxy, maxy);
857 }
858
859 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
860
861 pan_pack(T.cpu, VIEWPORT, cfg) {
862 cfg.scissor_minimum_x = minx;
863 cfg.scissor_minimum_y = miny;
864 cfg.scissor_maximum_x = maxx - 1;
865 cfg.scissor_maximum_y = maxy - 1;
866
867 cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
868 cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
869 }
870
871 tiler_postfix->viewport = T.gpu;
872 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
873 }
874
875 static mali_ptr
876 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
877 enum pipe_shader_type st,
878 struct panfrost_constant_buffer *buf,
879 unsigned index)
880 {
881 struct pipe_constant_buffer *cb = &buf->cb[index];
882 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
883
884 if (rsrc) {
885 panfrost_batch_add_bo(batch, rsrc->bo,
886 PAN_BO_ACCESS_SHARED |
887 PAN_BO_ACCESS_READ |
888 panfrost_bo_access_for_stage(st));
889
890 /* Alignment gauranteed by
891 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
892 return rsrc->bo->gpu + cb->buffer_offset;
893 } else if (cb->user_buffer) {
894 return panfrost_pool_upload_aligned(&batch->pool,
895 cb->user_buffer +
896 cb->buffer_offset,
897 cb->buffer_size, 16);
898 } else {
899 unreachable("No constant buffer");
900 }
901 }
902
903 struct sysval_uniform {
904 union {
905 float f[4];
906 int32_t i[4];
907 uint32_t u[4];
908 uint64_t du[2];
909 };
910 };
911
912 static void
913 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
914 struct sysval_uniform *uniform)
915 {
916 struct panfrost_context *ctx = batch->ctx;
917 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
918
919 uniform->f[0] = vp->scale[0];
920 uniform->f[1] = vp->scale[1];
921 uniform->f[2] = vp->scale[2];
922 }
923
924 static void
925 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
926 struct sysval_uniform *uniform)
927 {
928 struct panfrost_context *ctx = batch->ctx;
929 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
930
931 uniform->f[0] = vp->translate[0];
932 uniform->f[1] = vp->translate[1];
933 uniform->f[2] = vp->translate[2];
934 }
935
936 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
937 enum pipe_shader_type st,
938 unsigned int sysvalid,
939 struct sysval_uniform *uniform)
940 {
941 struct panfrost_context *ctx = batch->ctx;
942 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
943 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
944 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
945 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
946
947 assert(dim);
948 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
949
950 if (dim > 1)
951 uniform->i[1] = u_minify(tex->texture->height0,
952 tex->u.tex.first_level);
953
954 if (dim > 2)
955 uniform->i[2] = u_minify(tex->texture->depth0,
956 tex->u.tex.first_level);
957
958 if (is_array)
959 uniform->i[dim] = tex->texture->array_size;
960 }
961
962 static void
963 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
964 enum pipe_shader_type st,
965 unsigned ssbo_id,
966 struct sysval_uniform *uniform)
967 {
968 struct panfrost_context *ctx = batch->ctx;
969
970 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
971 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
972
973 /* Compute address */
974 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
975
976 panfrost_batch_add_bo(batch, bo,
977 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
978 panfrost_bo_access_for_stage(st));
979
980 /* Upload address and size as sysval */
981 uniform->du[0] = bo->gpu + sb.buffer_offset;
982 uniform->u[2] = sb.buffer_size;
983 }
984
985 static void
986 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
987 enum pipe_shader_type st,
988 unsigned samp_idx,
989 struct sysval_uniform *uniform)
990 {
991 struct panfrost_context *ctx = batch->ctx;
992 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
993
994 uniform->f[0] = sampl->min_lod;
995 uniform->f[1] = sampl->max_lod;
996 uniform->f[2] = sampl->lod_bias;
997
998 /* Even without any errata, Midgard represents "no mipmapping" as
999 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1000 * panfrost_create_sampler_state which also explains our choice of
1001 * epsilon value (again to keep behaviour consistent) */
1002
1003 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1004 uniform->f[1] = uniform->f[0] + (1.0/256.0);
1005 }
1006
1007 static void
1008 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
1009 struct sysval_uniform *uniform)
1010 {
1011 struct panfrost_context *ctx = batch->ctx;
1012
1013 uniform->u[0] = ctx->compute_grid->grid[0];
1014 uniform->u[1] = ctx->compute_grid->grid[1];
1015 uniform->u[2] = ctx->compute_grid->grid[2];
1016 }
1017
1018 static void
1019 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1020 struct panfrost_shader_state *ss,
1021 enum pipe_shader_type st)
1022 {
1023 struct sysval_uniform *uniforms = (void *)buf;
1024
1025 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1026 int sysval = ss->sysval[i];
1027
1028 switch (PAN_SYSVAL_TYPE(sysval)) {
1029 case PAN_SYSVAL_VIEWPORT_SCALE:
1030 panfrost_upload_viewport_scale_sysval(batch,
1031 &uniforms[i]);
1032 break;
1033 case PAN_SYSVAL_VIEWPORT_OFFSET:
1034 panfrost_upload_viewport_offset_sysval(batch,
1035 &uniforms[i]);
1036 break;
1037 case PAN_SYSVAL_TEXTURE_SIZE:
1038 panfrost_upload_txs_sysval(batch, st,
1039 PAN_SYSVAL_ID(sysval),
1040 &uniforms[i]);
1041 break;
1042 case PAN_SYSVAL_SSBO:
1043 panfrost_upload_ssbo_sysval(batch, st,
1044 PAN_SYSVAL_ID(sysval),
1045 &uniforms[i]);
1046 break;
1047 case PAN_SYSVAL_NUM_WORK_GROUPS:
1048 panfrost_upload_num_work_groups_sysval(batch,
1049 &uniforms[i]);
1050 break;
1051 case PAN_SYSVAL_SAMPLER:
1052 panfrost_upload_sampler_sysval(batch, st,
1053 PAN_SYSVAL_ID(sysval),
1054 &uniforms[i]);
1055 break;
1056 default:
1057 assert(0);
1058 }
1059 }
1060 }
1061
1062 static const void *
1063 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1064 unsigned index)
1065 {
1066 struct pipe_constant_buffer *cb = &buf->cb[index];
1067 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1068
1069 if (rsrc)
1070 return rsrc->bo->cpu;
1071 else if (cb->user_buffer)
1072 return cb->user_buffer;
1073 else
1074 unreachable("No constant buffer");
1075 }
1076
1077 void
1078 panfrost_emit_const_buf(struct panfrost_batch *batch,
1079 enum pipe_shader_type stage,
1080 struct mali_vertex_tiler_postfix *postfix)
1081 {
1082 struct panfrost_context *ctx = batch->ctx;
1083 struct panfrost_shader_variants *all = ctx->shader[stage];
1084
1085 if (!all)
1086 return;
1087
1088 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1089
1090 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1091
1092 /* Uniforms are implicitly UBO #0 */
1093 bool has_uniforms = buf->enabled_mask & (1 << 0);
1094
1095 /* Allocate room for the sysval and the uniforms */
1096 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1097 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1098 size_t size = sys_size + uniform_size;
1099 struct panfrost_transfer transfer =
1100 panfrost_pool_alloc_aligned(&batch->pool, size, 16);
1101
1102 /* Upload sysvals requested by the shader */
1103 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1104
1105 /* Upload uniforms */
1106 if (has_uniforms && uniform_size) {
1107 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1108 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1109 }
1110
1111 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1112 * uploaded */
1113
1114 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1115 assert(ubo_count >= 1);
1116
1117 size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
1118 struct panfrost_transfer ubos =
1119 panfrost_pool_alloc_aligned(&batch->pool, sz,
1120 MALI_UNIFORM_BUFFER_LENGTH);
1121
1122 uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
1123
1124 /* Upload uniforms as a UBO */
1125
1126 if (size) {
1127 pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
1128 cfg.entries = DIV_ROUND_UP(size, 16);
1129 cfg.pointer = transfer.gpu;
1130 }
1131 } else {
1132 *ubo_ptr = 0;
1133 }
1134
1135 /* The rest are honest-to-goodness UBOs */
1136
1137 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1138 size_t usz = buf->cb[ubo].buffer_size;
1139 bool enabled = buf->enabled_mask & (1 << ubo);
1140 bool empty = usz == 0;
1141
1142 if (!enabled || empty) {
1143 ubo_ptr[ubo] = 0;
1144 continue;
1145 }
1146
1147 pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
1148 cfg.entries = DIV_ROUND_UP(usz, 16);
1149 cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
1150 stage, buf, ubo);
1151 }
1152 }
1153
1154 postfix->uniforms = transfer.gpu;
1155 postfix->uniform_buffers = ubos.gpu;
1156
1157 buf->dirty_mask = 0;
1158 }
1159
1160 void
1161 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1162 const struct pipe_grid_info *info,
1163 struct midgard_payload_vertex_tiler *vtp)
1164 {
1165 struct panfrost_context *ctx = batch->ctx;
1166 struct panfrost_device *dev = pan_device(ctx->base.screen);
1167 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1168 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1169 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1170 128));
1171
1172 unsigned log2_instances =
1173 util_logbase2_ceil(info->grid[0]) +
1174 util_logbase2_ceil(info->grid[1]) +
1175 util_logbase2_ceil(info->grid[2]);
1176
1177 unsigned shared_size = single_size * (1 << log2_instances) * dev->core_count;
1178 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1179 shared_size,
1180 1);
1181
1182 struct mali_shared_memory shared = {
1183 .shared_memory = bo->gpu,
1184 .shared_workgroup_count = log2_instances,
1185 .shared_shift = util_logbase2(single_size) + 1
1186 };
1187
1188 vtp->postfix.shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared,
1189 sizeof(shared), 64);
1190 }
1191
1192 static mali_ptr
1193 panfrost_get_tex_desc(struct panfrost_batch *batch,
1194 enum pipe_shader_type st,
1195 struct panfrost_sampler_view *view)
1196 {
1197 if (!view)
1198 return (mali_ptr) 0;
1199
1200 struct pipe_sampler_view *pview = &view->base;
1201 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1202
1203 /* Add the BO to the job so it's retained until the job is done. */
1204
1205 panfrost_batch_add_bo(batch, rsrc->bo,
1206 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1207 panfrost_bo_access_for_stage(st));
1208
1209 panfrost_batch_add_bo(batch, view->bo,
1210 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1211 panfrost_bo_access_for_stage(st));
1212
1213 return view->bo->gpu;
1214 }
1215
1216 static void
1217 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1218 struct pipe_context *pctx)
1219 {
1220 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1221 if (view->texture_bo != rsrc->bo->gpu ||
1222 view->modifier != rsrc->modifier) {
1223 panfrost_bo_unreference(view->bo);
1224 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1225 }
1226 }
1227
1228 void
1229 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1230 enum pipe_shader_type stage,
1231 struct mali_vertex_tiler_postfix *postfix)
1232 {
1233 struct panfrost_context *ctx = batch->ctx;
1234 struct panfrost_device *device = pan_device(ctx->base.screen);
1235
1236 if (!ctx->sampler_view_count[stage])
1237 return;
1238
1239 if (device->quirks & IS_BIFROST) {
1240 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1241 MALI_BIFROST_TEXTURE_LENGTH *
1242 ctx->sampler_view_count[stage],
1243 MALI_BIFROST_TEXTURE_LENGTH);
1244
1245 struct mali_bifrost_texture_packed *out =
1246 (struct mali_bifrost_texture_packed *) T.cpu;
1247
1248 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1249 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1250 struct pipe_sampler_view *pview = &view->base;
1251 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1252
1253 panfrost_update_sampler_view(view, &ctx->base);
1254 out[i] = view->bifrost_descriptor;
1255
1256 /* Add the BOs to the job so they are retained until the job is done. */
1257
1258 panfrost_batch_add_bo(batch, rsrc->bo,
1259 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1260 panfrost_bo_access_for_stage(stage));
1261
1262 panfrost_batch_add_bo(batch, view->bo,
1263 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1264 panfrost_bo_access_for_stage(stage));
1265 }
1266
1267 postfix->textures = T.gpu;
1268 } else {
1269 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1270
1271 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1272 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1273
1274 panfrost_update_sampler_view(view, &ctx->base);
1275
1276 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1277 }
1278
1279 postfix->textures = panfrost_pool_upload_aligned(&batch->pool,
1280 trampolines,
1281 sizeof(uint64_t) *
1282 ctx->sampler_view_count[stage],
1283 sizeof(uint64_t));
1284 }
1285 }
1286
1287 void
1288 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1289 enum pipe_shader_type stage,
1290 struct mali_vertex_tiler_postfix *postfix)
1291 {
1292 struct panfrost_context *ctx = batch->ctx;
1293
1294 if (!ctx->sampler_count[stage])
1295 return;
1296
1297 size_t desc_size = MALI_BIFROST_SAMPLER_LENGTH;
1298 assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
1299
1300 size_t sz = desc_size * ctx->sampler_count[stage];
1301 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool, sz, desc_size);
1302 struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
1303
1304 for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
1305 out[i] = ctx->samplers[stage][i]->hw;
1306
1307 postfix->sampler_descriptor = T.gpu;
1308 }
1309
1310 void
1311 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1312 struct mali_vertex_tiler_postfix *vertex_postfix)
1313 {
1314 struct panfrost_context *ctx = batch->ctx;
1315 struct panfrost_vertex_state *so = ctx->vertex;
1316 struct panfrost_shader_state *vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1317
1318 unsigned instance_shift = vertex_postfix->instance_shift;
1319 unsigned instance_odd = vertex_postfix->instance_odd;
1320
1321 /* Worst case: everything is NPOT, which is only possible if instancing
1322 * is enabled. Otherwise single record is gauranteed */
1323 bool could_npot = instance_shift || instance_odd;
1324
1325 struct panfrost_transfer S = panfrost_pool_alloc_aligned(&batch->pool,
1326 MALI_ATTRIBUTE_BUFFER_LENGTH * vs->attribute_count *
1327 (could_npot ? 2 : 1),
1328 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
1329
1330 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1331 MALI_ATTRIBUTE_LENGTH * vs->attribute_count,
1332 MALI_ATTRIBUTE_LENGTH);
1333
1334 struct mali_attribute_buffer_packed *bufs =
1335 (struct mali_attribute_buffer_packed *) S.cpu;
1336
1337 struct mali_attribute_packed *out =
1338 (struct mali_attribute_packed *) T.cpu;
1339
1340 unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
1341 unsigned k = 0;
1342
1343 for (unsigned i = 0; i < so->num_elements; ++i) {
1344 /* We map buffers 1:1 with the attributes, which
1345 * means duplicating some vertex buffers (who cares? aside from
1346 * maybe some caching implications but I somehow doubt that
1347 * matters) */
1348
1349 struct pipe_vertex_element *elem = &so->pipe[i];
1350 unsigned vbi = elem->vertex_buffer_index;
1351 attrib_to_buffer[i] = k;
1352
1353 if (!(ctx->vb_mask & (1 << vbi)))
1354 continue;
1355
1356 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1357 struct panfrost_resource *rsrc;
1358
1359 rsrc = pan_resource(buf->buffer.resource);
1360 if (!rsrc)
1361 continue;
1362
1363 /* Add a dependency of the batch on the vertex buffer */
1364 panfrost_batch_add_bo(batch, rsrc->bo,
1365 PAN_BO_ACCESS_SHARED |
1366 PAN_BO_ACCESS_READ |
1367 PAN_BO_ACCESS_VERTEX_TILER);
1368
1369 /* Mask off lower bits, see offset fixup below */
1370 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1371 mali_ptr addr = raw_addr & ~63;
1372
1373 /* Since we advanced the base pointer, we shrink the buffer
1374 * size, but add the offset we subtracted */
1375 unsigned size = rsrc->base.width0 + (raw_addr - addr)
1376 - buf->buffer_offset;
1377
1378 /* When there is a divisor, the hardware-level divisor is
1379 * the product of the instance divisor and the padded count */
1380 unsigned divisor = elem->instance_divisor;
1381 unsigned hw_divisor = ctx->padded_count * divisor;
1382 unsigned stride = buf->stride;
1383
1384 /* If there's a divisor(=1) but no instancing, we want every
1385 * attribute to be the same */
1386
1387 if (divisor && ctx->instance_count == 1)
1388 stride = 0;
1389
1390 if (!divisor || ctx->instance_count <= 1) {
1391 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1392 if (ctx->instance_count > 1)
1393 cfg.type = MALI_ATTRIBUTE_TYPE_1D_MODULUS;
1394
1395 cfg.pointer = addr;
1396 cfg.stride = stride;
1397 cfg.size = size;
1398 cfg.divisor_r = instance_shift;
1399 cfg.divisor_p = instance_odd;
1400 }
1401 } else if (util_is_power_of_two_or_zero(hw_divisor)) {
1402 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1403 cfg.type = MALI_ATTRIBUTE_TYPE_1D_POT_DIVISOR;
1404 cfg.pointer = addr;
1405 cfg.stride = stride;
1406 cfg.size = size;
1407 cfg.divisor_r = __builtin_ctz(hw_divisor);
1408 }
1409
1410 } else {
1411 unsigned shift = 0, extra_flags = 0;
1412
1413 unsigned magic_divisor =
1414 panfrost_compute_magic_divisor(hw_divisor, &shift, &extra_flags);
1415
1416 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1417 cfg.type = MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR;
1418 cfg.pointer = addr;
1419 cfg.stride = stride;
1420 cfg.size = size;
1421
1422 cfg.divisor_r = shift;
1423 cfg.divisor_e = extra_flags;
1424 }
1425
1426 pan_pack(bufs + k + 1, ATTRIBUTE_BUFFER_CONTINUATION_NPOT, cfg) {
1427 cfg.divisor_numerator = magic_divisor;
1428 cfg.divisor = divisor;
1429 }
1430
1431 ++k;
1432 }
1433
1434 ++k;
1435 }
1436
1437 /* Add special gl_VertexID/gl_InstanceID buffers */
1438
1439 if (unlikely(vs->attribute_count >= PAN_VERTEX_ID)) {
1440 panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1441
1442 pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
1443 cfg.buffer_index = k++;
1444 cfg.format = so->formats[PAN_VERTEX_ID];
1445 }
1446
1447 panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1448
1449 pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
1450 cfg.buffer_index = k++;
1451 cfg.format = so->formats[PAN_INSTANCE_ID];
1452 }
1453 }
1454
1455 /* Attribute addresses require 64-byte alignment, so let:
1456 *
1457 * base' = base & ~63 = base - (base & 63)
1458 * offset' = offset + (base & 63)
1459 *
1460 * Since base' + offset' = base + offset, these are equivalent
1461 * addressing modes and now base is 64 aligned.
1462 */
1463
1464 unsigned start = vertex_postfix->offset_start;
1465
1466 for (unsigned i = 0; i < so->num_elements; ++i) {
1467 unsigned vbi = so->pipe[i].vertex_buffer_index;
1468 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1469
1470 /* Adjust by the masked off bits of the offset. Make sure we
1471 * read src_offset from so->hw (which is not GPU visible)
1472 * rather than target (which is) due to caching effects */
1473
1474 unsigned src_offset = so->pipe[i].src_offset;
1475
1476 /* BOs aligned to 4k so guaranteed aligned to 64 */
1477 src_offset += (buf->buffer_offset & 63);
1478
1479 /* Also, somewhat obscurely per-instance data needs to be
1480 * offset in response to a delayed start in an indexed draw */
1481
1482 if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
1483 src_offset -= buf->stride * start;
1484
1485 pan_pack(out + i, ATTRIBUTE, cfg) {
1486 cfg.buffer_index = attrib_to_buffer[i];
1487 cfg.format = so->formats[i];
1488 cfg.offset = src_offset;
1489 }
1490 }
1491
1492 vertex_postfix->attributes = S.gpu;
1493 vertex_postfix->attribute_meta = T.gpu;
1494 }
1495
1496 static mali_ptr
1497 panfrost_emit_varyings(struct panfrost_batch *batch,
1498 struct mali_attribute_buffer_packed *slot,
1499 unsigned stride, unsigned count)
1500 {
1501 unsigned size = stride * count;
1502 mali_ptr ptr = panfrost_pool_alloc_aligned(&batch->invisible_pool, size, 64).gpu;
1503
1504 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1505 cfg.stride = stride;
1506 cfg.size = size;
1507 cfg.pointer = ptr;
1508 }
1509
1510 return ptr;
1511 }
1512
1513 static unsigned
1514 panfrost_streamout_offset(unsigned stride, unsigned offset,
1515 struct pipe_stream_output_target *target)
1516 {
1517 return (target->buffer_offset + (offset * stride * 4)) & 63;
1518 }
1519
1520 static void
1521 panfrost_emit_streamout(struct panfrost_batch *batch,
1522 struct mali_attribute_buffer_packed *slot,
1523 unsigned stride_words, unsigned offset, unsigned count,
1524 struct pipe_stream_output_target *target)
1525 {
1526 unsigned stride = stride_words * 4;
1527 unsigned max_size = target->buffer_size;
1528 unsigned expected_size = stride * count;
1529
1530 /* Grab the BO and bind it to the batch */
1531 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1532
1533 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1534 * the perspective of the TILER and FRAGMENT.
1535 */
1536 panfrost_batch_add_bo(batch, bo,
1537 PAN_BO_ACCESS_SHARED |
1538 PAN_BO_ACCESS_RW |
1539 PAN_BO_ACCESS_VERTEX_TILER |
1540 PAN_BO_ACCESS_FRAGMENT);
1541
1542 /* We will have an offset applied to get alignment */
1543 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * stride);
1544
1545 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1546 cfg.pointer = (addr & ~63);
1547 cfg.stride = stride;
1548 cfg.size = MIN2(max_size, expected_size) + (addr & 63);
1549 }
1550 }
1551
1552 static bool
1553 has_point_coord(unsigned mask, gl_varying_slot loc)
1554 {
1555 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1556 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1557 else if (loc == VARYING_SLOT_PNTC)
1558 return (mask & (1 << 8));
1559 else
1560 return false;
1561 }
1562
1563 /* Helpers for manipulating stream out information so we can pack varyings
1564 * accordingly. Compute the src_offset for a given captured varying */
1565
1566 static struct pipe_stream_output *
1567 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1568 {
1569 for (unsigned i = 0; i < info->num_outputs; ++i) {
1570 if (info->output[i].register_index == loc)
1571 return &info->output[i];
1572 }
1573
1574 unreachable("Varying not captured");
1575 }
1576
1577 static unsigned
1578 pan_varying_size(enum mali_format fmt)
1579 {
1580 unsigned type = MALI_EXTRACT_TYPE(fmt);
1581 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1582 unsigned bits = MALI_EXTRACT_BITS(fmt);
1583 unsigned bpc = 0;
1584
1585 if (bits == MALI_CHANNEL_FLOAT) {
1586 /* No doubles */
1587 bool fp16 = (type == MALI_FORMAT_SINT);
1588 assert(fp16 || (type == MALI_FORMAT_UNORM));
1589
1590 bpc = fp16 ? 2 : 4;
1591 } else {
1592 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1593
1594 /* See the enums */
1595 bits = 1 << bits;
1596 assert(bits >= 8);
1597 bpc = bits / 8;
1598 }
1599
1600 return bpc * chan;
1601 }
1602
1603 /* Indices for named (non-XFB) varyings that are present. These are packed
1604 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1605 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1606 * of a given special field given a shift S by:
1607 *
1608 * idx = popcount(P & ((1 << S) - 1))
1609 *
1610 * That is... look at all of the varyings that come earlier and count them, the
1611 * count is the new index since plus one. Likewise, the total number of special
1612 * buffers required is simply popcount(P)
1613 */
1614
1615 enum pan_special_varying {
1616 PAN_VARY_GENERAL = 0,
1617 PAN_VARY_POSITION = 1,
1618 PAN_VARY_PSIZ = 2,
1619 PAN_VARY_PNTCOORD = 3,
1620 PAN_VARY_FACE = 4,
1621 PAN_VARY_FRAGCOORD = 5,
1622
1623 /* Keep last */
1624 PAN_VARY_MAX,
1625 };
1626
1627 /* Given a varying, figure out which index it correpsonds to */
1628
1629 static inline unsigned
1630 pan_varying_index(unsigned present, enum pan_special_varying v)
1631 {
1632 unsigned mask = (1 << v) - 1;
1633 return util_bitcount(present & mask);
1634 }
1635
1636 /* Get the base offset for XFB buffers, which by convention come after
1637 * everything else. Wrapper function for semantic reasons; by construction this
1638 * is just popcount. */
1639
1640 static inline unsigned
1641 pan_xfb_base(unsigned present)
1642 {
1643 return util_bitcount(present);
1644 }
1645
1646 /* Computes the present mask for varyings so we can start emitting varying records */
1647
1648 static inline unsigned
1649 pan_varying_present(
1650 struct panfrost_shader_state *vs,
1651 struct panfrost_shader_state *fs,
1652 unsigned quirks)
1653 {
1654 /* At the moment we always emit general and position buffers. Not
1655 * strictly necessary but usually harmless */
1656
1657 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1658
1659 /* Enable special buffers by the shader info */
1660
1661 if (vs->writes_point_size)
1662 present |= (1 << PAN_VARY_PSIZ);
1663
1664 if (fs->reads_point_coord)
1665 present |= (1 << PAN_VARY_PNTCOORD);
1666
1667 if (fs->reads_face)
1668 present |= (1 << PAN_VARY_FACE);
1669
1670 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1671 present |= (1 << PAN_VARY_FRAGCOORD);
1672
1673 /* Also, if we have a point sprite, we need a point coord buffer */
1674
1675 for (unsigned i = 0; i < fs->varying_count; i++) {
1676 gl_varying_slot loc = fs->varyings_loc[i];
1677
1678 if (has_point_coord(fs->point_sprite_mask, loc))
1679 present |= (1 << PAN_VARY_PNTCOORD);
1680 }
1681
1682 return present;
1683 }
1684
1685 /* Emitters for varying records */
1686
1687 static void
1688 pan_emit_vary(struct mali_attribute_packed *out,
1689 unsigned present, enum pan_special_varying buf,
1690 unsigned quirks, enum mali_format format,
1691 unsigned offset)
1692 {
1693 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1694 unsigned swizzle = quirks & HAS_SWIZZLES ?
1695 panfrost_get_default_swizzle(nr_channels) :
1696 panfrost_bifrost_swizzle(nr_channels);
1697
1698 pan_pack(out, ATTRIBUTE, cfg) {
1699 cfg.buffer_index = pan_varying_index(present, buf);
1700 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1701 cfg.format = (format << 12) | swizzle;
1702 cfg.offset = offset;
1703 }
1704 }
1705
1706 /* General varying that is unused */
1707
1708 static void
1709 pan_emit_vary_only(struct mali_attribute_packed *out,
1710 unsigned present, unsigned quirks)
1711 {
1712 pan_emit_vary(out, present, 0, quirks, MALI_VARYING_DISCARD, 0);
1713 }
1714
1715 /* Special records */
1716
1717 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1718 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1719 [PAN_VARY_PSIZ] = MALI_R16F,
1720 [PAN_VARY_PNTCOORD] = MALI_R16F,
1721 [PAN_VARY_FACE] = MALI_R32I,
1722 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1723 };
1724
1725 static void
1726 pan_emit_vary_special(struct mali_attribute_packed *out,
1727 unsigned present, enum pan_special_varying buf,
1728 unsigned quirks)
1729 {
1730 assert(buf < PAN_VARY_MAX);
1731 pan_emit_vary(out, present, buf, quirks, pan_varying_formats[buf], 0);
1732 }
1733
1734 static enum mali_format
1735 pan_xfb_format(enum mali_format format, unsigned nr)
1736 {
1737 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1738 return MALI_R32F | MALI_NR_CHANNELS(nr);
1739 else
1740 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1741 }
1742
1743 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1744 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1745 * value. */
1746
1747 static void
1748 pan_emit_vary_xfb(struct mali_attribute_packed *out,
1749 unsigned present,
1750 unsigned max_xfb,
1751 unsigned *streamout_offsets,
1752 unsigned quirks,
1753 enum mali_format format,
1754 struct pipe_stream_output o)
1755 {
1756 unsigned swizzle = quirks & HAS_SWIZZLES ?
1757 panfrost_get_default_swizzle(o.num_components) :
1758 panfrost_bifrost_swizzle(o.num_components);
1759
1760 pan_pack(out, ATTRIBUTE, cfg) {
1761 /* XFB buffers come after everything else */
1762 cfg.buffer_index = pan_xfb_base(present) + o.output_buffer;
1763 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1764
1765 /* Override number of channels and precision to highp */
1766 cfg.format = (pan_xfb_format(format, o.num_components) << 12) | swizzle;
1767
1768 /* Apply given offsets together */
1769 cfg.offset = (o.dst_offset * 4) /* dwords */
1770 + streamout_offsets[o.output_buffer];
1771 }
1772 }
1773
1774 /* Determine if we should capture a varying for XFB. This requires actually
1775 * having a buffer for it. If we don't capture it, we'll fallback to a general
1776 * varying path (linked or unlinked, possibly discarding the write) */
1777
1778 static bool
1779 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1780 unsigned loc, unsigned max_xfb)
1781 {
1782 if (!(xfb->so_mask & (1ll << loc)))
1783 return false;
1784
1785 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1786 return o->output_buffer < max_xfb;
1787 }
1788
1789 static void
1790 pan_emit_general_varying(struct mali_attribute_packed *out,
1791 struct panfrost_shader_state *other,
1792 struct panfrost_shader_state *xfb,
1793 gl_varying_slot loc,
1794 enum mali_format format,
1795 unsigned present,
1796 unsigned quirks,
1797 unsigned *gen_offsets,
1798 enum mali_format *gen_formats,
1799 unsigned *gen_stride,
1800 unsigned idx,
1801 bool should_alloc)
1802 {
1803 /* Check if we're linked */
1804 signed other_idx = -1;
1805
1806 for (unsigned j = 0; j < other->varying_count; ++j) {
1807 if (other->varyings_loc[j] == loc) {
1808 other_idx = j;
1809 break;
1810 }
1811 }
1812
1813 if (other_idx < 0) {
1814 pan_emit_vary_only(out, present, quirks);
1815 return;
1816 }
1817
1818 unsigned offset = gen_offsets[other_idx];
1819
1820 if (should_alloc) {
1821 /* We're linked, so allocate a space via a watermark allocation */
1822 enum mali_format alt = other->varyings[other_idx];
1823
1824 /* Do interpolation at minimum precision */
1825 unsigned size_main = pan_varying_size(format);
1826 unsigned size_alt = pan_varying_size(alt);
1827 unsigned size = MIN2(size_main, size_alt);
1828
1829 /* If a varying is marked for XFB but not actually captured, we
1830 * should match the format to the format that would otherwise
1831 * be used for XFB, since dEQP checks for invariance here. It's
1832 * unclear if this is required by the spec. */
1833
1834 if (xfb->so_mask & (1ull << loc)) {
1835 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1836 format = pan_xfb_format(format, o->num_components);
1837 size = pan_varying_size(format);
1838 } else if (size == size_alt) {
1839 format = alt;
1840 }
1841
1842 gen_offsets[idx] = *gen_stride;
1843 gen_formats[other_idx] = format;
1844 offset = *gen_stride;
1845 *gen_stride += size;
1846 }
1847
1848 pan_emit_vary(out, present, PAN_VARY_GENERAL, quirks, format, offset);
1849 }
1850
1851 /* Higher-level wrapper around all of the above, classifying a varying into one
1852 * of the above types */
1853
1854 static void
1855 panfrost_emit_varying(
1856 struct mali_attribute_packed *out,
1857 struct panfrost_shader_state *stage,
1858 struct panfrost_shader_state *other,
1859 struct panfrost_shader_state *xfb,
1860 unsigned present,
1861 unsigned max_xfb,
1862 unsigned *streamout_offsets,
1863 unsigned quirks,
1864 unsigned *gen_offsets,
1865 enum mali_format *gen_formats,
1866 unsigned *gen_stride,
1867 unsigned idx,
1868 bool should_alloc,
1869 bool is_fragment)
1870 {
1871 gl_varying_slot loc = stage->varyings_loc[idx];
1872 enum mali_format format = stage->varyings[idx];
1873
1874 /* Override format to match linkage */
1875 if (!should_alloc && gen_formats[idx])
1876 format = gen_formats[idx];
1877
1878 if (has_point_coord(stage->point_sprite_mask, loc)) {
1879 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1880 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1881 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1882 pan_emit_vary_xfb(out, present, max_xfb, streamout_offsets, quirks, format, *o);
1883 } else if (loc == VARYING_SLOT_POS) {
1884 if (is_fragment)
1885 pan_emit_vary_special(out, present, PAN_VARY_FRAGCOORD, quirks);
1886 else
1887 pan_emit_vary_special(out, present, PAN_VARY_POSITION, quirks);
1888 } else if (loc == VARYING_SLOT_PSIZ) {
1889 pan_emit_vary_special(out, present, PAN_VARY_PSIZ, quirks);
1890 } else if (loc == VARYING_SLOT_PNTC) {
1891 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1892 } else if (loc == VARYING_SLOT_FACE) {
1893 pan_emit_vary_special(out, present, PAN_VARY_FACE, quirks);
1894 } else {
1895 pan_emit_general_varying(out, other, xfb, loc, format, present,
1896 quirks, gen_offsets, gen_formats, gen_stride,
1897 idx, should_alloc);
1898 }
1899 }
1900
1901 static void
1902 pan_emit_special_input(struct mali_attribute_buffer_packed *out,
1903 unsigned present,
1904 enum pan_special_varying v,
1905 unsigned special)
1906 {
1907 if (present & (1 << v)) {
1908 unsigned idx = pan_varying_index(present, v);
1909
1910 pan_pack(out + idx, ATTRIBUTE_BUFFER, cfg) {
1911 cfg.special = special;
1912 cfg.type = 0;
1913 }
1914 }
1915 }
1916
1917 void
1918 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1919 unsigned vertex_count,
1920 struct mali_vertex_tiler_postfix *vertex_postfix,
1921 struct mali_vertex_tiler_postfix *tiler_postfix,
1922 union midgard_primitive_size *primitive_size)
1923 {
1924 /* Load the shaders */
1925 struct panfrost_context *ctx = batch->ctx;
1926 struct panfrost_device *dev = pan_device(ctx->base.screen);
1927 struct panfrost_shader_state *vs, *fs;
1928 size_t vs_size, fs_size;
1929
1930 /* Allocate the varying descriptor */
1931
1932 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1933 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
1934 vs_size = MALI_ATTRIBUTE_LENGTH * vs->varying_count;
1935 fs_size = MALI_ATTRIBUTE_LENGTH * fs->varying_count;
1936
1937 struct panfrost_transfer trans = panfrost_pool_alloc_aligned(
1938 &batch->pool, vs_size + fs_size, MALI_ATTRIBUTE_LENGTH);
1939
1940 struct pipe_stream_output_info *so = &vs->stream_output;
1941 unsigned present = pan_varying_present(vs, fs, dev->quirks);
1942
1943 /* Check if this varying is linked by us. This is the case for
1944 * general-purpose, non-captured varyings. If it is, link it. If it's
1945 * not, use the provided stream out information to determine the
1946 * offset, since it was already linked for us. */
1947
1948 unsigned gen_offsets[32];
1949 enum mali_format gen_formats[32];
1950 memset(gen_offsets, 0, sizeof(gen_offsets));
1951 memset(gen_formats, 0, sizeof(gen_formats));
1952
1953 unsigned gen_stride = 0;
1954 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
1955 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
1956
1957 unsigned streamout_offsets[32];
1958
1959 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1960 streamout_offsets[i] = panfrost_streamout_offset(
1961 so->stride[i],
1962 ctx->streamout.offsets[i],
1963 ctx->streamout.targets[i]);
1964 }
1965
1966 struct mali_attribute_packed *ovs = (struct mali_attribute_packed *)trans.cpu;
1967 struct mali_attribute_packed *ofs = ovs + vs->varying_count;
1968
1969 for (unsigned i = 0; i < vs->varying_count; i++) {
1970 panfrost_emit_varying(ovs + i, vs, fs, vs, present,
1971 ctx->streamout.num_targets, streamout_offsets,
1972 dev->quirks,
1973 gen_offsets, gen_formats, &gen_stride, i, true, false);
1974 }
1975
1976 for (unsigned i = 0; i < fs->varying_count; i++) {
1977 panfrost_emit_varying(ofs + i, fs, vs, vs, present,
1978 ctx->streamout.num_targets, streamout_offsets,
1979 dev->quirks,
1980 gen_offsets, gen_formats, &gen_stride, i, false, true);
1981 }
1982
1983 unsigned xfb_base = pan_xfb_base(present);
1984 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1985 MALI_ATTRIBUTE_BUFFER_LENGTH * (xfb_base + ctx->streamout.num_targets),
1986 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
1987 struct mali_attribute_buffer_packed *varyings =
1988 (struct mali_attribute_buffer_packed *) T.cpu;
1989
1990 /* Emit the stream out buffers */
1991
1992 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
1993 ctx->vertex_count);
1994
1995 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1996 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
1997 so->stride[i],
1998 ctx->streamout.offsets[i],
1999 out_count,
2000 ctx->streamout.targets[i]);
2001 }
2002
2003 panfrost_emit_varyings(batch,
2004 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
2005 gen_stride, vertex_count);
2006
2007 /* fp32 vec4 gl_Position */
2008 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
2009 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
2010 sizeof(float) * 4, vertex_count);
2011
2012 if (present & (1 << PAN_VARY_PSIZ)) {
2013 primitive_size->pointer = panfrost_emit_varyings(batch,
2014 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
2015 2, vertex_count);
2016 }
2017
2018 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_ATTRIBUTE_SPECIAL_POINT_COORD);
2019 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_ATTRIBUTE_SPECIAL_FRONT_FACING);
2020 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_ATTRIBUTE_SPECIAL_FRAG_COORD);
2021
2022 vertex_postfix->varyings = T.gpu;
2023 tiler_postfix->varyings = T.gpu;
2024
2025 vertex_postfix->varying_meta = trans.gpu;
2026 tiler_postfix->varying_meta = trans.gpu + vs_size;
2027 }
2028
2029 void
2030 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
2031 struct mali_vertex_tiler_prefix *vertex_prefix,
2032 struct mali_vertex_tiler_postfix *vertex_postfix,
2033 struct mali_vertex_tiler_prefix *tiler_prefix,
2034 struct mali_vertex_tiler_postfix *tiler_postfix,
2035 union midgard_primitive_size *primitive_size)
2036 {
2037 struct panfrost_context *ctx = batch->ctx;
2038 struct panfrost_device *device = pan_device(ctx->base.screen);
2039 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
2040 struct bifrost_payload_vertex bifrost_vertex = {0,};
2041 struct bifrost_payload_tiler bifrost_tiler = {0,};
2042 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2043 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2044 void *vp, *tp;
2045 size_t vp_size, tp_size;
2046
2047 if (device->quirks & IS_BIFROST) {
2048 bifrost_vertex.prefix = *vertex_prefix;
2049 bifrost_vertex.postfix = *vertex_postfix;
2050 vp = &bifrost_vertex;
2051 vp_size = sizeof(bifrost_vertex);
2052
2053 bifrost_tiler.prefix = *tiler_prefix;
2054 bifrost_tiler.tiler.primitive_size = *primitive_size;
2055 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2056 bifrost_tiler.postfix = *tiler_postfix;
2057 tp = &bifrost_tiler;
2058 tp_size = sizeof(bifrost_tiler);
2059 } else {
2060 midgard_vertex.prefix = *vertex_prefix;
2061 midgard_vertex.postfix = *vertex_postfix;
2062 vp = &midgard_vertex;
2063 vp_size = sizeof(midgard_vertex);
2064
2065 midgard_tiler.prefix = *tiler_prefix;
2066 midgard_tiler.postfix = *tiler_postfix;
2067 midgard_tiler.primitive_size = *primitive_size;
2068 tp = &midgard_tiler;
2069 tp_size = sizeof(midgard_tiler);
2070 }
2071
2072 if (wallpapering) {
2073 /* Inject in reverse order, with "predicted" job indices.
2074 * THIS IS A HACK XXX */
2075 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
2076 batch->scoreboard.job_index + 2, tp, tp_size, true);
2077 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2078 vp, vp_size, true);
2079 return;
2080 }
2081
2082 /* If rasterizer discard is enable, only submit the vertex */
2083
2084 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2085 vp, vp_size, false);
2086
2087 if (ctx->rasterizer->base.rasterizer_discard)
2088 return;
2089
2090 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
2091 false);
2092 }
2093
2094 /* TODO: stop hardcoding this */
2095 mali_ptr
2096 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2097 {
2098 uint16_t locations[] = {
2099 128, 128,
2100 0, 256,
2101 0, 256,
2102 0, 256,
2103 0, 256,
2104 0, 256,
2105 0, 256,
2106 0, 256,
2107 0, 256,
2108 0, 256,
2109 0, 256,
2110 0, 256,
2111 0, 256,
2112 0, 256,
2113 0, 256,
2114 0, 256,
2115 0, 256,
2116 0, 256,
2117 0, 256,
2118 0, 256,
2119 0, 256,
2120 0, 256,
2121 0, 256,
2122 0, 256,
2123 0, 256,
2124 0, 256,
2125 0, 256,
2126 0, 256,
2127 0, 256,
2128 0, 256,
2129 0, 256,
2130 0, 256,
2131 128, 128,
2132 0, 0,
2133 0, 0,
2134 0, 0,
2135 0, 0,
2136 0, 0,
2137 0, 0,
2138 0, 0,
2139 0, 0,
2140 0, 0,
2141 0, 0,
2142 0, 0,
2143 0, 0,
2144 0, 0,
2145 0, 0,
2146 0, 0,
2147 };
2148
2149 return panfrost_pool_upload_aligned(&batch->pool, locations, 96 * sizeof(uint16_t), 64);
2150 }