panfrost: XMLify Bifrost preload
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 struct mali_shared_memory shared = {
62 .shared_workgroup_count = ~0,
63 };
64
65 if (batch->stack_size) {
66 struct panfrost_bo *stack =
67 panfrost_batch_get_scratchpad(batch, batch->stack_size,
68 dev->thread_tls_alloc,
69 dev->core_count);
70
71 shared.stack_shift = panfrost_get_stack_shift(batch->stack_size);
72 shared.scratchpad = stack->gpu;
73 }
74
75 postfix->shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared, sizeof(shared), 64);
76 }
77
78 static void
79 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
80 struct mali_vertex_tiler_postfix *postfix)
81 {
82 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
83 postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
84 }
85
86 static void
87 panfrost_vt_update_rasterizer(struct panfrost_rasterizer *rasterizer,
88 struct mali_vertex_tiler_prefix *prefix,
89 struct mali_vertex_tiler_postfix *postfix)
90 {
91 postfix->gl_enables |= 0x7;
92 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
93 rasterizer->base.front_ccw);
94 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
95 (rasterizer->base.cull_face & PIPE_FACE_FRONT));
96 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
97 (rasterizer->base.cull_face & PIPE_FACE_BACK));
98 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
99 rasterizer->base.flatshade_first);
100 }
101
102 void
103 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
104 struct mali_vertex_tiler_prefix *prefix,
105 union midgard_primitive_size *primitive_size)
106 {
107 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
108
109 if (!panfrost_writes_point_size(ctx)) {
110 float val = (prefix->draw_mode == MALI_DRAW_MODE_POINTS) ?
111 rasterizer->base.point_size :
112 rasterizer->base.line_width;
113
114 primitive_size->constant = val;
115 }
116 }
117
118 static void
119 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
120 struct mali_vertex_tiler_postfix *postfix)
121 {
122 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
123 if (ctx->occlusion_query) {
124 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
125 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
126 PAN_BO_ACCESS_SHARED |
127 PAN_BO_ACCESS_RW |
128 PAN_BO_ACCESS_FRAGMENT);
129 } else {
130 postfix->occlusion_counter = 0;
131 }
132 }
133
134 void
135 panfrost_vt_init(struct panfrost_context *ctx,
136 enum pipe_shader_type stage,
137 struct mali_vertex_tiler_prefix *prefix,
138 struct mali_vertex_tiler_postfix *postfix)
139 {
140 struct panfrost_device *device = pan_device(ctx->base.screen);
141
142 if (!ctx->shader[stage])
143 return;
144
145 memset(prefix, 0, sizeof(*prefix));
146 memset(postfix, 0, sizeof(*postfix));
147
148 if (device->quirks & IS_BIFROST) {
149 postfix->gl_enables = 0x2;
150 panfrost_vt_emit_shared_memory(ctx, postfix);
151 } else {
152 postfix->gl_enables = 0x6;
153 panfrost_vt_attach_framebuffer(ctx, postfix);
154 }
155
156 if (stage == PIPE_SHADER_FRAGMENT) {
157 panfrost_vt_update_occlusion_query(ctx, postfix);
158 panfrost_vt_update_rasterizer(ctx->rasterizer, prefix, postfix);
159 }
160 }
161
162 static unsigned
163 panfrost_translate_index_size(unsigned size)
164 {
165 switch (size) {
166 case 1:
167 return MALI_DRAW_INDEXED_UINT8;
168
169 case 2:
170 return MALI_DRAW_INDEXED_UINT16;
171
172 case 4:
173 return MALI_DRAW_INDEXED_UINT32;
174
175 default:
176 unreachable("Invalid index size");
177 }
178 }
179
180 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
181 * good for the duration of the draw (transient), could last longer. Also get
182 * the bounds on the index buffer for the range accessed by the draw. We do
183 * these operations together because there are natural optimizations which
184 * require them to be together. */
185
186 static mali_ptr
187 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
188 const struct pipe_draw_info *info,
189 unsigned *min_index, unsigned *max_index)
190 {
191 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
192 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
193 off_t offset = info->start * info->index_size;
194 bool needs_indices = true;
195 mali_ptr out = 0;
196
197 if (info->max_index != ~0u) {
198 *min_index = info->min_index;
199 *max_index = info->max_index;
200 needs_indices = false;
201 }
202
203 if (!info->has_user_indices) {
204 /* Only resources can be directly mapped */
205 panfrost_batch_add_bo(batch, rsrc->bo,
206 PAN_BO_ACCESS_SHARED |
207 PAN_BO_ACCESS_READ |
208 PAN_BO_ACCESS_VERTEX_TILER);
209 out = rsrc->bo->gpu + offset;
210
211 /* Check the cache */
212 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
213 info->start,
214 info->count,
215 min_index,
216 max_index);
217 } else {
218 /* Otherwise, we need to upload to transient memory */
219 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
220 struct panfrost_transfer T =
221 panfrost_pool_alloc_aligned(&batch->pool,
222 info->count * info->index_size,
223 info->index_size);
224
225 memcpy(T.cpu, ibuf8 + offset, info->count * info->index_size);
226 out = T.gpu;
227 }
228
229 if (needs_indices) {
230 /* Fallback */
231 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
232
233 if (!info->has_user_indices)
234 panfrost_minmax_cache_add(rsrc->index_cache,
235 info->start, info->count,
236 *min_index, *max_index);
237 }
238
239 return out;
240 }
241
242 void
243 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
244 const struct pipe_draw_info *info,
245 enum mali_draw_mode draw_mode,
246 struct mali_vertex_tiler_postfix *vertex_postfix,
247 struct mali_vertex_tiler_prefix *tiler_prefix,
248 struct mali_vertex_tiler_postfix *tiler_postfix,
249 unsigned *vertex_count,
250 unsigned *padded_count)
251 {
252 tiler_prefix->draw_mode = draw_mode;
253
254 unsigned draw_flags = 0;
255
256 if (panfrost_writes_point_size(ctx))
257 draw_flags |= MALI_DRAW_VARYING_SIZE;
258
259 if (info->primitive_restart)
260 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
261
262 /* These doesn't make much sense */
263
264 draw_flags |= 0x3000;
265
266 if (info->index_size) {
267 unsigned min_index = 0, max_index = 0;
268
269 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
270 info,
271 &min_index,
272 &max_index);
273
274 /* Use the corresponding values */
275 *vertex_count = max_index - min_index + 1;
276 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
277 tiler_prefix->offset_bias_correction = -min_index;
278 tiler_prefix->index_count = MALI_POSITIVE(info->count);
279 draw_flags |= panfrost_translate_index_size(info->index_size);
280 } else {
281 tiler_prefix->indices = 0;
282 *vertex_count = ctx->vertex_count;
283 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
284 tiler_prefix->offset_bias_correction = 0;
285 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
286 }
287
288 tiler_prefix->unknown_draw = draw_flags;
289
290 /* Encode the padded vertex count */
291
292 if (info->instance_count > 1) {
293 *padded_count = panfrost_padded_vertex_count(*vertex_count);
294
295 unsigned shift = __builtin_ctz(ctx->padded_count);
296 unsigned k = ctx->padded_count >> (shift + 1);
297
298 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
299 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
300 } else {
301 *padded_count = *vertex_count;
302
303 /* Reset instancing state */
304 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
305 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
306 }
307 }
308
309 static void
310 panfrost_emit_compute_shader(struct panfrost_context *ctx,
311 enum pipe_shader_type st,
312 struct mali_shader_meta *meta)
313 {
314 const struct panfrost_device *dev = pan_device(ctx->base.screen);
315 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
316
317 memset(meta, 0, sizeof(*meta));
318 meta->shader = ss->shader;
319 meta->attribute_count = ss->attribute_count;
320 meta->varying_count = ss->varying_count;
321 meta->texture_count = ctx->sampler_view_count[st];
322 meta->sampler_count = ctx->sampler_count[st];
323
324 if (dev->quirks & IS_BIFROST) {
325 struct mali_bifrost_properties_packed prop;
326 struct mali_preload_vertex_packed preload;
327
328 pan_pack(&prop, BIFROST_PROPERTIES, cfg) {
329 cfg.unknown = 0x800000; /* XXX */
330 cfg.uniform_buffer_count = panfrost_ubo_count(ctx, st);
331 }
332
333 /* TODO: True compute shaders */
334 pan_pack(&preload, PRELOAD_VERTEX, cfg) {
335 cfg.uniform_count = ss->uniform_count;
336 cfg.vertex_id = true;
337 cfg.instance_id = true;
338 }
339
340 memcpy(&meta->bifrost_props, &prop, sizeof(prop));
341 memcpy(&meta->bifrost_preload, &preload, sizeof(preload));
342 } else {
343 struct mali_midgard_properties_packed prop;
344
345 pan_pack(&prop, MIDGARD_PROPERTIES, cfg) {
346 cfg.uniform_buffer_count = panfrost_ubo_count(ctx, st);
347 cfg.uniform_count = ss->uniform_count;
348 cfg.work_register_count = ss->work_reg_count;
349 cfg.writes_globals = ss->writes_global;
350 cfg.suppress_inf_nan = true; /* XXX */
351 }
352
353 memcpy(&meta->midgard_props, &prop, sizeof(prop));
354 }
355 }
356
357 static unsigned
358 translate_tex_wrap(enum pipe_tex_wrap w)
359 {
360 switch (w) {
361 case PIPE_TEX_WRAP_REPEAT: return MALI_WRAP_MODE_REPEAT;
362 case PIPE_TEX_WRAP_CLAMP: return MALI_WRAP_MODE_CLAMP;
363 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return MALI_WRAP_MODE_CLAMP_TO_EDGE;
364 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return MALI_WRAP_MODE_CLAMP_TO_BORDER;
365 case PIPE_TEX_WRAP_MIRROR_REPEAT: return MALI_WRAP_MODE_MIRRORED_REPEAT;
366 case PIPE_TEX_WRAP_MIRROR_CLAMP: return MALI_WRAP_MODE_MIRRORED_CLAMP;
367 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
368 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
369 default: unreachable("Invalid wrap");
370 }
371 }
372
373 /* The hardware compares in the wrong order order, so we have to flip before
374 * encoding. Yes, really. */
375
376 static enum mali_func
377 panfrost_sampler_compare_func(const struct pipe_sampler_state *cso)
378 {
379 if (!cso->compare_mode)
380 return MALI_FUNC_NEVER;
381
382 enum mali_func f = panfrost_translate_compare_func(cso->compare_func);
383 return panfrost_flip_compare_func(f);
384 }
385
386 static enum mali_mipmap_mode
387 pan_pipe_to_mipmode(enum pipe_tex_mipfilter f)
388 {
389 switch (f) {
390 case PIPE_TEX_MIPFILTER_NEAREST: return MALI_MIPMAP_MODE_NEAREST;
391 case PIPE_TEX_MIPFILTER_LINEAR: return MALI_MIPMAP_MODE_TRILINEAR;
392 case PIPE_TEX_MIPFILTER_NONE: return MALI_MIPMAP_MODE_NONE;
393 default: unreachable("Invalid");
394 }
395 }
396
397 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
398 struct mali_midgard_sampler_packed *hw)
399 {
400 pan_pack(hw, MIDGARD_SAMPLER, cfg) {
401 cfg.magnify_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
402 cfg.minify_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
403 cfg.mipmap_mode = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR) ?
404 MALI_MIPMAP_MODE_TRILINEAR : MALI_MIPMAP_MODE_NEAREST;
405 cfg.normalized_coordinates = cso->normalized_coords;
406
407 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
408
409 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
410
411 /* If necessary, we disable mipmapping in the sampler descriptor by
412 * clamping the LOD as tight as possible (from 0 to epsilon,
413 * essentially -- remember these are fixed point numbers, so
414 * epsilon=1/256) */
415
416 cfg.maximum_lod = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) ?
417 cfg.minimum_lod + 1 :
418 FIXED_16(cso->max_lod, false);
419
420 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
421 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
422 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
423
424 cfg.compare_function = panfrost_sampler_compare_func(cso);
425 cfg.seamless_cube_map = cso->seamless_cube_map;
426
427 cfg.border_color_r = cso->border_color.f[0];
428 cfg.border_color_g = cso->border_color.f[1];
429 cfg.border_color_b = cso->border_color.f[2];
430 cfg.border_color_a = cso->border_color.f[3];
431 }
432 }
433
434 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
435 struct mali_bifrost_sampler_packed *hw)
436 {
437 pan_pack(hw, BIFROST_SAMPLER, cfg) {
438 cfg.magnify_linear = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR;
439 cfg.minify_linear = cso->min_img_filter == PIPE_TEX_FILTER_LINEAR;
440 cfg.mipmap_mode = pan_pipe_to_mipmode(cso->min_mip_filter);
441 cfg.normalized_coordinates = cso->normalized_coords;
442
443 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
444 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
445 cfg.maximum_lod = FIXED_16(cso->max_lod, false);
446
447 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
448 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
449 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
450
451 cfg.compare_function = panfrost_sampler_compare_func(cso);
452 cfg.seamless_cube_map = cso->seamless_cube_map;
453 }
454 }
455
456 static bool
457 panfrost_fs_required(
458 struct panfrost_shader_state *fs,
459 struct panfrost_blend_final *blend,
460 unsigned rt_count)
461 {
462 /* If we generally have side effects */
463 if (fs->fs_sidefx)
464 return true;
465
466 /* If colour is written we need to execute */
467 for (unsigned i = 0; i < rt_count; ++i) {
468 if (!blend[i].no_colour)
469 return true;
470 }
471
472 /* If depth is written and not implied we need to execute.
473 * TODO: Predicate on Z/S writes being enabled */
474 return (fs->writes_depth || fs->writes_stencil);
475 }
476
477 static void
478 panfrost_emit_blend(struct panfrost_batch *batch, void *rts,
479 struct panfrost_blend_final *blend)
480 {
481 const struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
482 struct panfrost_shader_state *fs = panfrost_get_shader_state(batch->ctx, PIPE_SHADER_FRAGMENT);
483 unsigned rt_count = batch->key.nr_cbufs;
484
485 struct bifrost_blend_rt *brts = rts;
486 struct midgard_blend_rt *mrts = rts;
487
488 /* Disable blending for depth-only on Bifrost */
489
490 if (rt_count == 0 && dev->quirks & IS_BIFROST)
491 brts[0].unk2 = 0x3;
492
493 for (unsigned i = 0; i < rt_count; ++i) {
494 unsigned flags = 0;
495
496 pan_pack(&flags, BLEND_FLAGS, cfg) {
497 if (blend[i].no_colour) {
498 cfg.enable = false;
499 break;
500 }
501
502 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
503
504 cfg.srgb = util_format_is_srgb(batch->key.cbufs[i]->format);
505 cfg.load_destination = blend[i].load_dest;
506 cfg.dither_disable = !batch->ctx->blend->base.dither;
507
508 if (!(dev->quirks & IS_BIFROST))
509 cfg.midgard_blend_shader = blend[i].is_shader;
510 }
511
512 if (dev->quirks & IS_BIFROST) {
513 brts[i].flags = flags;
514
515 if (blend[i].is_shader) {
516 /* The blend shader's address needs to be at
517 * the same top 32 bit as the fragment shader.
518 * TODO: Ensure that's always the case.
519 */
520 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
521 (fs->bo->gpu & (0xffffffffull << 32)));
522 brts[i].shader = blend[i].shader.gpu;
523 brts[i].unk2 = 0x0;
524 } else {
525 enum pipe_format format = batch->key.cbufs[i]->format;
526 const struct util_format_description *format_desc;
527 format_desc = util_format_description(format);
528
529 brts[i].equation = blend[i].equation.equation;
530
531 /* TODO: this is a bit more complicated */
532 brts[i].constant = blend[i].equation.constant;
533
534 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
535
536 /* 0x19 disables blending and forces REPLACE
537 * mode (equivalent to rgb_mode = alpha_mode =
538 * x122, colour mask = 0xF). 0x1a allows
539 * blending. */
540 brts[i].unk2 = blend[i].opaque ? 0x19 : 0x1a;
541
542 brts[i].shader_type = fs->blend_types[i];
543 }
544 } else {
545 memcpy(&mrts[i].flags, &flags, sizeof(flags));
546
547 if (blend[i].is_shader) {
548 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
549 } else {
550 mrts[i].blend.equation = blend[i].equation.equation;
551 mrts[i].blend.constant = blend[i].equation.constant;
552 }
553 }
554 }
555 }
556
557 static void
558 panfrost_emit_frag_shader(struct panfrost_context *ctx,
559 struct mali_shader_meta *fragmeta,
560 struct panfrost_blend_final *blend)
561 {
562 const struct panfrost_device *dev = pan_device(ctx->base.screen);
563 struct panfrost_shader_state *fs;
564
565 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
566
567 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
568 const struct panfrost_zsa_state *zsa = ctx->depth_stencil;
569 unsigned rt_count = ctx->pipe_framebuffer.nr_cbufs;
570
571 memset(fragmeta, 0, sizeof(*fragmeta));
572
573 fragmeta->shader = fs->shader;
574 fragmeta->attribute_count = fs->attribute_count;
575 fragmeta->varying_count = fs->varying_count;
576 fragmeta->texture_count = ctx->sampler_view_count[PIPE_SHADER_FRAGMENT];
577 fragmeta->sampler_count = ctx->sampler_count[PIPE_SHADER_FRAGMENT];
578
579 if (dev->quirks & IS_BIFROST) {
580 struct mali_bifrost_properties_packed prop;
581 struct mali_preload_fragment_packed preload;
582
583 bool no_blend = true;
584
585 for (unsigned i = 0; i < rt_count; ++i)
586 no_blend &= (!blend[i].load_dest | blend[i].no_colour);
587
588 pan_pack(&prop, BIFROST_PROPERTIES, cfg) {
589 cfg.unknown = 0x950020; /* XXX */
590 cfg.uniform_buffer_count = panfrost_ubo_count(ctx, PIPE_SHADER_FRAGMENT);
591 cfg.early_z_enable = !fs->can_discard && !fs->writes_depth && no_blend;
592 }
593
594 pan_pack(&preload, PRELOAD_FRAGMENT, cfg) {
595 cfg.uniform_count = fs->uniform_count;
596 cfg.fragment_position = fs->reads_frag_coord;
597 }
598
599 memcpy(&fragmeta->bifrost_props, &prop, sizeof(prop));
600 memcpy(&fragmeta->bifrost_preload, &preload, sizeof(preload));
601 } else {
602 struct mali_midgard_properties_packed prop;
603
604 /* Reasons to disable early-Z from a shader perspective */
605 bool late_z = fs->can_discard || fs->writes_global ||
606 fs->writes_depth || fs->writes_stencil;
607
608 /* Reasons to disable early-Z from a CSO perspective */
609 bool alpha_to_coverage = ctx->blend->base.alpha_to_coverage;
610
611 /* If either depth or stencil is enabled, discard matters */
612 bool zs_enabled =
613 (zsa->base.depth.enabled && zsa->base.depth.func != PIPE_FUNC_ALWAYS) ||
614 zsa->base.stencil[0].enabled;
615
616 bool has_blend_shader = false;
617
618 for (unsigned c = 0; c < rt_count; ++c)
619 has_blend_shader |= blend[c].is_shader;
620
621 pan_pack(&prop, MIDGARD_PROPERTIES, cfg) {
622 cfg.uniform_buffer_count = panfrost_ubo_count(ctx, PIPE_SHADER_FRAGMENT);
623 cfg.uniform_count = fs->uniform_count;
624 cfg.work_register_count = fs->work_reg_count;
625 cfg.writes_globals = fs->writes_global;
626 cfg.suppress_inf_nan = true; /* XXX */
627
628 /* TODO: Reduce this limit? */
629 if (has_blend_shader)
630 cfg.work_register_count = MAX2(cfg.work_register_count, 8);
631
632 cfg.stencil_from_shader = fs->writes_stencil;
633 cfg.helper_invocation_enable = fs->helper_invocations;
634 cfg.depth_source = fs->writes_depth ?
635 MALI_DEPTH_SOURCE_SHADER :
636 MALI_DEPTH_SOURCE_FIXED_FUNCTION;
637
638 /* Depend on other state */
639 cfg.early_z_enable = !(late_z || alpha_to_coverage);
640 cfg.reads_tilebuffer = fs->outputs_read || (!zs_enabled && fs->can_discard);
641 cfg.reads_depth_stencil = zs_enabled && fs->can_discard;
642 }
643
644 memcpy(&fragmeta->midgard_props, &prop, sizeof(prop));
645 }
646
647 bool msaa = rast->multisample;
648 fragmeta->coverage_mask = msaa ? ctx->sample_mask : ~0;
649
650 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
651 fragmeta->unknown2_4 = 0x4e0;
652
653 /* TODO: Sample size */
654 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
655 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
656
657 /* EXT_shader_framebuffer_fetch requires the shader to be run
658 * per-sample when outputs are read. */
659 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
660 SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
661
662 fragmeta->depth_units = rast->offset_units * 2.0f;
663 fragmeta->depth_factor = rast->offset_scale;
664
665 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
666
667 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
668 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
669
670 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
671 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
672
673 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
674 zsa->base.stencil[0].enabled);
675
676 fragmeta->stencil_mask_front = zsa->stencil_mask_front;
677 fragmeta->stencil_mask_back = zsa->stencil_mask_back;
678
679 /* Bottom bits for stencil ref, exactly one word */
680 fragmeta->stencil_front.opaque[0] = zsa->stencil_front.opaque[0] | ctx->stencil_ref.ref_value[0];
681
682 /* If back-stencil is not enabled, use the front values */
683
684 if (zsa->base.stencil[1].enabled)
685 fragmeta->stencil_back.opaque[0] = zsa->stencil_back.opaque[0] | ctx->stencil_ref.ref_value[1];
686 else
687 fragmeta->stencil_back = fragmeta->stencil_front;
688
689 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
690 zsa->base.depth.writemask);
691
692 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
693 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(
694 zsa->base.depth.enabled ? zsa->base.depth.func : PIPE_FUNC_ALWAYS));
695
696 SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
697 ctx->blend->base.alpha_to_coverage);
698
699 /* Disable shader execution if we can */
700 if (dev->quirks & MIDGARD_SHADERLESS
701 && !panfrost_fs_required(fs, blend, rt_count)) {
702 fragmeta->shader = 0x1;
703 fragmeta->attribute_count = 0;
704 fragmeta->varying_count = 0;
705 fragmeta->texture_count = 0;
706 fragmeta->sampler_count = 0;
707
708 /* This feature is not known to work on Bifrost */
709 struct mali_midgard_properties_packed prop;
710
711 pan_pack(&prop, MIDGARD_PROPERTIES, cfg) {
712 cfg.work_register_count = 1;
713 cfg.depth_source = MALI_DEPTH_SOURCE_FIXED_FUNCTION;
714 cfg.early_z_enable = true;
715 }
716
717 memcpy(&fragmeta->midgard_props, &prop, sizeof(prop));
718 }
719
720 if (dev->quirks & MIDGARD_SFBD) {
721 /* When only a single render target platform is used, the blend
722 * information is inside the shader meta itself. We additionally
723 * need to signal CAN_DISCARD for nontrivial blend modes (so
724 * we're able to read back the destination buffer) */
725
726 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
727 blend[0].is_shader);
728
729 if (blend[0].is_shader) {
730 fragmeta->blend.shader = blend[0].shader.gpu |
731 blend[0].shader.first_tag;
732 } else {
733 fragmeta->blend.equation = blend[0].equation.equation;
734 fragmeta->blend.constant = blend[0].equation.constant;
735 }
736
737 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
738 blend[0].load_dest);
739
740 fragmeta->unknown2_4 |= 0x10;
741 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER, !ctx->blend->base.dither);
742 } else if (!(dev->quirks & IS_BIFROST)) {
743 /* Bug where MRT-capable hw apparently reads the last blend
744 * shader from here instead of the usual location? */
745
746 for (signed rt = ((signed) rt_count - 1); rt >= 0; --rt) {
747 if (!blend[rt].is_shader)
748 continue;
749
750 fragmeta->blend.shader = blend[rt].shader.gpu |
751 blend[rt].shader.first_tag;
752 break;
753 }
754 }
755 }
756
757 void
758 panfrost_emit_shader_meta(struct panfrost_batch *batch,
759 enum pipe_shader_type st,
760 struct mali_vertex_tiler_postfix *postfix)
761 {
762 struct panfrost_context *ctx = batch->ctx;
763 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
764
765 if (!ss) {
766 postfix->shader = 0;
767 return;
768 }
769
770 struct mali_shader_meta meta;
771
772 /* Add the shader BO to the batch. */
773 panfrost_batch_add_bo(batch, ss->bo,
774 PAN_BO_ACCESS_PRIVATE |
775 PAN_BO_ACCESS_READ |
776 panfrost_bo_access_for_stage(st));
777
778 mali_ptr shader_ptr;
779
780 if (st == PIPE_SHADER_FRAGMENT) {
781 struct panfrost_device *dev = pan_device(ctx->base.screen);
782 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
783 size_t desc_size = sizeof(meta);
784 void *rts = NULL;
785 struct panfrost_transfer xfer;
786 unsigned rt_size;
787
788 if (dev->quirks & MIDGARD_SFBD)
789 rt_size = 0;
790 else if (dev->quirks & IS_BIFROST)
791 rt_size = sizeof(struct bifrost_blend_rt);
792 else
793 rt_size = sizeof(struct midgard_blend_rt);
794
795 desc_size += rt_size * rt_count;
796
797 if (rt_size)
798 rts = rzalloc_size(ctx, rt_size * rt_count);
799
800 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
801
802 for (unsigned c = 0; c < ctx->pipe_framebuffer.nr_cbufs; ++c)
803 blend[c] = panfrost_get_blend_for_context(ctx, c);
804
805 panfrost_emit_frag_shader(ctx, &meta, blend);
806
807 if (!(dev->quirks & MIDGARD_SFBD))
808 panfrost_emit_blend(batch, rts, blend);
809 else
810 batch->draws |= PIPE_CLEAR_COLOR0;
811
812 xfer = panfrost_pool_alloc_aligned(&batch->pool, desc_size, sizeof(meta));
813
814 memcpy(xfer.cpu, &meta, sizeof(meta));
815 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
816
817 if (rt_size)
818 ralloc_free(rts);
819
820 shader_ptr = xfer.gpu;
821 } else {
822 panfrost_emit_compute_shader(ctx, st, &meta);
823
824 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
825 sizeof(meta));
826 }
827
828 postfix->shader = shader_ptr;
829 }
830
831 void
832 panfrost_emit_viewport(struct panfrost_batch *batch,
833 struct mali_vertex_tiler_postfix *tiler_postfix)
834 {
835 struct panfrost_context *ctx = batch->ctx;
836 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
837 const struct pipe_scissor_state *ss = &ctx->scissor;
838 const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
839 const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
840
841 /* Derive min/max from translate/scale. Note since |x| >= 0 by
842 * definition, we have that -|x| <= |x| hence translate - |scale| <=
843 * translate + |scale|, so the ordering is correct here. */
844 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
845 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
846 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
847 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
848 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
849 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
850
851 /* Scissor to the intersection of viewport and to the scissor, clamped
852 * to the framebuffer */
853
854 unsigned minx = MIN2(fb->width, vp_minx);
855 unsigned maxx = MIN2(fb->width, vp_maxx);
856 unsigned miny = MIN2(fb->height, vp_miny);
857 unsigned maxy = MIN2(fb->height, vp_maxy);
858
859 if (ss && rast->scissor) {
860 minx = MAX2(ss->minx, minx);
861 miny = MAX2(ss->miny, miny);
862 maxx = MIN2(ss->maxx, maxx);
863 maxy = MIN2(ss->maxy, maxy);
864 }
865
866 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
867
868 pan_pack(T.cpu, VIEWPORT, cfg) {
869 cfg.scissor_minimum_x = minx;
870 cfg.scissor_minimum_y = miny;
871 cfg.scissor_maximum_x = maxx - 1;
872 cfg.scissor_maximum_y = maxy - 1;
873
874 cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
875 cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
876 }
877
878 tiler_postfix->viewport = T.gpu;
879 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
880 }
881
882 static mali_ptr
883 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
884 enum pipe_shader_type st,
885 struct panfrost_constant_buffer *buf,
886 unsigned index)
887 {
888 struct pipe_constant_buffer *cb = &buf->cb[index];
889 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
890
891 if (rsrc) {
892 panfrost_batch_add_bo(batch, rsrc->bo,
893 PAN_BO_ACCESS_SHARED |
894 PAN_BO_ACCESS_READ |
895 panfrost_bo_access_for_stage(st));
896
897 /* Alignment gauranteed by
898 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
899 return rsrc->bo->gpu + cb->buffer_offset;
900 } else if (cb->user_buffer) {
901 return panfrost_pool_upload_aligned(&batch->pool,
902 cb->user_buffer +
903 cb->buffer_offset,
904 cb->buffer_size, 16);
905 } else {
906 unreachable("No constant buffer");
907 }
908 }
909
910 struct sysval_uniform {
911 union {
912 float f[4];
913 int32_t i[4];
914 uint32_t u[4];
915 uint64_t du[2];
916 };
917 };
918
919 static void
920 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
921 struct sysval_uniform *uniform)
922 {
923 struct panfrost_context *ctx = batch->ctx;
924 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
925
926 uniform->f[0] = vp->scale[0];
927 uniform->f[1] = vp->scale[1];
928 uniform->f[2] = vp->scale[2];
929 }
930
931 static void
932 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
933 struct sysval_uniform *uniform)
934 {
935 struct panfrost_context *ctx = batch->ctx;
936 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
937
938 uniform->f[0] = vp->translate[0];
939 uniform->f[1] = vp->translate[1];
940 uniform->f[2] = vp->translate[2];
941 }
942
943 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
944 enum pipe_shader_type st,
945 unsigned int sysvalid,
946 struct sysval_uniform *uniform)
947 {
948 struct panfrost_context *ctx = batch->ctx;
949 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
950 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
951 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
952 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
953
954 assert(dim);
955 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
956
957 if (dim > 1)
958 uniform->i[1] = u_minify(tex->texture->height0,
959 tex->u.tex.first_level);
960
961 if (dim > 2)
962 uniform->i[2] = u_minify(tex->texture->depth0,
963 tex->u.tex.first_level);
964
965 if (is_array)
966 uniform->i[dim] = tex->texture->array_size;
967 }
968
969 static void
970 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
971 enum pipe_shader_type st,
972 unsigned ssbo_id,
973 struct sysval_uniform *uniform)
974 {
975 struct panfrost_context *ctx = batch->ctx;
976
977 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
978 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
979
980 /* Compute address */
981 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
982
983 panfrost_batch_add_bo(batch, bo,
984 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
985 panfrost_bo_access_for_stage(st));
986
987 /* Upload address and size as sysval */
988 uniform->du[0] = bo->gpu + sb.buffer_offset;
989 uniform->u[2] = sb.buffer_size;
990 }
991
992 static void
993 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
994 enum pipe_shader_type st,
995 unsigned samp_idx,
996 struct sysval_uniform *uniform)
997 {
998 struct panfrost_context *ctx = batch->ctx;
999 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
1000
1001 uniform->f[0] = sampl->min_lod;
1002 uniform->f[1] = sampl->max_lod;
1003 uniform->f[2] = sampl->lod_bias;
1004
1005 /* Even without any errata, Midgard represents "no mipmapping" as
1006 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1007 * panfrost_create_sampler_state which also explains our choice of
1008 * epsilon value (again to keep behaviour consistent) */
1009
1010 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1011 uniform->f[1] = uniform->f[0] + (1.0/256.0);
1012 }
1013
1014 static void
1015 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
1016 struct sysval_uniform *uniform)
1017 {
1018 struct panfrost_context *ctx = batch->ctx;
1019
1020 uniform->u[0] = ctx->compute_grid->grid[0];
1021 uniform->u[1] = ctx->compute_grid->grid[1];
1022 uniform->u[2] = ctx->compute_grid->grid[2];
1023 }
1024
1025 static void
1026 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1027 struct panfrost_shader_state *ss,
1028 enum pipe_shader_type st)
1029 {
1030 struct sysval_uniform *uniforms = (void *)buf;
1031
1032 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1033 int sysval = ss->sysval[i];
1034
1035 switch (PAN_SYSVAL_TYPE(sysval)) {
1036 case PAN_SYSVAL_VIEWPORT_SCALE:
1037 panfrost_upload_viewport_scale_sysval(batch,
1038 &uniforms[i]);
1039 break;
1040 case PAN_SYSVAL_VIEWPORT_OFFSET:
1041 panfrost_upload_viewport_offset_sysval(batch,
1042 &uniforms[i]);
1043 break;
1044 case PAN_SYSVAL_TEXTURE_SIZE:
1045 panfrost_upload_txs_sysval(batch, st,
1046 PAN_SYSVAL_ID(sysval),
1047 &uniforms[i]);
1048 break;
1049 case PAN_SYSVAL_SSBO:
1050 panfrost_upload_ssbo_sysval(batch, st,
1051 PAN_SYSVAL_ID(sysval),
1052 &uniforms[i]);
1053 break;
1054 case PAN_SYSVAL_NUM_WORK_GROUPS:
1055 panfrost_upload_num_work_groups_sysval(batch,
1056 &uniforms[i]);
1057 break;
1058 case PAN_SYSVAL_SAMPLER:
1059 panfrost_upload_sampler_sysval(batch, st,
1060 PAN_SYSVAL_ID(sysval),
1061 &uniforms[i]);
1062 break;
1063 default:
1064 assert(0);
1065 }
1066 }
1067 }
1068
1069 static const void *
1070 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1071 unsigned index)
1072 {
1073 struct pipe_constant_buffer *cb = &buf->cb[index];
1074 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1075
1076 if (rsrc)
1077 return rsrc->bo->cpu;
1078 else if (cb->user_buffer)
1079 return cb->user_buffer;
1080 else
1081 unreachable("No constant buffer");
1082 }
1083
1084 void
1085 panfrost_emit_const_buf(struct panfrost_batch *batch,
1086 enum pipe_shader_type stage,
1087 struct mali_vertex_tiler_postfix *postfix)
1088 {
1089 struct panfrost_context *ctx = batch->ctx;
1090 struct panfrost_shader_variants *all = ctx->shader[stage];
1091
1092 if (!all)
1093 return;
1094
1095 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1096
1097 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1098
1099 /* Uniforms are implicitly UBO #0 */
1100 bool has_uniforms = buf->enabled_mask & (1 << 0);
1101
1102 /* Allocate room for the sysval and the uniforms */
1103 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1104 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1105 size_t size = sys_size + uniform_size;
1106 struct panfrost_transfer transfer =
1107 panfrost_pool_alloc_aligned(&batch->pool, size, 16);
1108
1109 /* Upload sysvals requested by the shader */
1110 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1111
1112 /* Upload uniforms */
1113 if (has_uniforms && uniform_size) {
1114 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1115 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1116 }
1117
1118 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1119 * uploaded */
1120
1121 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1122 assert(ubo_count >= 1);
1123
1124 size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
1125 struct panfrost_transfer ubos =
1126 panfrost_pool_alloc_aligned(&batch->pool, sz,
1127 MALI_UNIFORM_BUFFER_LENGTH);
1128
1129 uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
1130
1131 /* Upload uniforms as a UBO */
1132
1133 if (size) {
1134 pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
1135 cfg.entries = DIV_ROUND_UP(size, 16);
1136 cfg.pointer = transfer.gpu;
1137 }
1138 } else {
1139 *ubo_ptr = 0;
1140 }
1141
1142 /* The rest are honest-to-goodness UBOs */
1143
1144 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1145 size_t usz = buf->cb[ubo].buffer_size;
1146 bool enabled = buf->enabled_mask & (1 << ubo);
1147 bool empty = usz == 0;
1148
1149 if (!enabled || empty) {
1150 ubo_ptr[ubo] = 0;
1151 continue;
1152 }
1153
1154 pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
1155 cfg.entries = DIV_ROUND_UP(usz, 16);
1156 cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
1157 stage, buf, ubo);
1158 }
1159 }
1160
1161 postfix->uniforms = transfer.gpu;
1162 postfix->uniform_buffers = ubos.gpu;
1163
1164 buf->dirty_mask = 0;
1165 }
1166
1167 void
1168 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1169 const struct pipe_grid_info *info,
1170 struct midgard_payload_vertex_tiler *vtp)
1171 {
1172 struct panfrost_context *ctx = batch->ctx;
1173 struct panfrost_device *dev = pan_device(ctx->base.screen);
1174 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1175 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1176 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1177 128));
1178
1179 unsigned log2_instances =
1180 util_logbase2_ceil(info->grid[0]) +
1181 util_logbase2_ceil(info->grid[1]) +
1182 util_logbase2_ceil(info->grid[2]);
1183
1184 unsigned shared_size = single_size * (1 << log2_instances) * dev->core_count;
1185 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1186 shared_size,
1187 1);
1188
1189 struct mali_shared_memory shared = {
1190 .shared_memory = bo->gpu,
1191 .shared_workgroup_count = log2_instances,
1192 .shared_shift = util_logbase2(single_size) + 1
1193 };
1194
1195 vtp->postfix.shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared,
1196 sizeof(shared), 64);
1197 }
1198
1199 static mali_ptr
1200 panfrost_get_tex_desc(struct panfrost_batch *batch,
1201 enum pipe_shader_type st,
1202 struct panfrost_sampler_view *view)
1203 {
1204 if (!view)
1205 return (mali_ptr) 0;
1206
1207 struct pipe_sampler_view *pview = &view->base;
1208 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1209
1210 /* Add the BO to the job so it's retained until the job is done. */
1211
1212 panfrost_batch_add_bo(batch, rsrc->bo,
1213 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1214 panfrost_bo_access_for_stage(st));
1215
1216 panfrost_batch_add_bo(batch, view->bo,
1217 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1218 panfrost_bo_access_for_stage(st));
1219
1220 return view->bo->gpu;
1221 }
1222
1223 static void
1224 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1225 struct pipe_context *pctx)
1226 {
1227 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1228 if (view->texture_bo != rsrc->bo->gpu ||
1229 view->modifier != rsrc->modifier) {
1230 panfrost_bo_unreference(view->bo);
1231 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1232 }
1233 }
1234
1235 void
1236 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1237 enum pipe_shader_type stage,
1238 struct mali_vertex_tiler_postfix *postfix)
1239 {
1240 struct panfrost_context *ctx = batch->ctx;
1241 struct panfrost_device *device = pan_device(ctx->base.screen);
1242
1243 if (!ctx->sampler_view_count[stage])
1244 return;
1245
1246 if (device->quirks & IS_BIFROST) {
1247 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1248 MALI_BIFROST_TEXTURE_LENGTH *
1249 ctx->sampler_view_count[stage],
1250 MALI_BIFROST_TEXTURE_LENGTH);
1251
1252 struct mali_bifrost_texture_packed *out =
1253 (struct mali_bifrost_texture_packed *) T.cpu;
1254
1255 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1256 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1257 struct pipe_sampler_view *pview = &view->base;
1258 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1259
1260 panfrost_update_sampler_view(view, &ctx->base);
1261 out[i] = view->bifrost_descriptor;
1262
1263 /* Add the BOs to the job so they are retained until the job is done. */
1264
1265 panfrost_batch_add_bo(batch, rsrc->bo,
1266 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1267 panfrost_bo_access_for_stage(stage));
1268
1269 panfrost_batch_add_bo(batch, view->bo,
1270 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1271 panfrost_bo_access_for_stage(stage));
1272 }
1273
1274 postfix->textures = T.gpu;
1275 } else {
1276 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1277
1278 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1279 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1280
1281 panfrost_update_sampler_view(view, &ctx->base);
1282
1283 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1284 }
1285
1286 postfix->textures = panfrost_pool_upload_aligned(&batch->pool,
1287 trampolines,
1288 sizeof(uint64_t) *
1289 ctx->sampler_view_count[stage],
1290 sizeof(uint64_t));
1291 }
1292 }
1293
1294 void
1295 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1296 enum pipe_shader_type stage,
1297 struct mali_vertex_tiler_postfix *postfix)
1298 {
1299 struct panfrost_context *ctx = batch->ctx;
1300
1301 if (!ctx->sampler_count[stage])
1302 return;
1303
1304 size_t desc_size = MALI_BIFROST_SAMPLER_LENGTH;
1305 assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
1306
1307 size_t sz = desc_size * ctx->sampler_count[stage];
1308 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool, sz, desc_size);
1309 struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
1310
1311 for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
1312 out[i] = ctx->samplers[stage][i]->hw;
1313
1314 postfix->sampler_descriptor = T.gpu;
1315 }
1316
1317 void
1318 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1319 struct mali_vertex_tiler_postfix *vertex_postfix)
1320 {
1321 struct panfrost_context *ctx = batch->ctx;
1322 struct panfrost_vertex_state *so = ctx->vertex;
1323 struct panfrost_shader_state *vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1324
1325 unsigned instance_shift = vertex_postfix->instance_shift;
1326 unsigned instance_odd = vertex_postfix->instance_odd;
1327
1328 /* Worst case: everything is NPOT, which is only possible if instancing
1329 * is enabled. Otherwise single record is gauranteed */
1330 bool could_npot = instance_shift || instance_odd;
1331
1332 struct panfrost_transfer S = panfrost_pool_alloc_aligned(&batch->pool,
1333 MALI_ATTRIBUTE_BUFFER_LENGTH * vs->attribute_count *
1334 (could_npot ? 2 : 1),
1335 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
1336
1337 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1338 MALI_ATTRIBUTE_LENGTH * vs->attribute_count,
1339 MALI_ATTRIBUTE_LENGTH);
1340
1341 struct mali_attribute_buffer_packed *bufs =
1342 (struct mali_attribute_buffer_packed *) S.cpu;
1343
1344 struct mali_attribute_packed *out =
1345 (struct mali_attribute_packed *) T.cpu;
1346
1347 unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
1348 unsigned k = 0;
1349
1350 for (unsigned i = 0; i < so->num_elements; ++i) {
1351 /* We map buffers 1:1 with the attributes, which
1352 * means duplicating some vertex buffers (who cares? aside from
1353 * maybe some caching implications but I somehow doubt that
1354 * matters) */
1355
1356 struct pipe_vertex_element *elem = &so->pipe[i];
1357 unsigned vbi = elem->vertex_buffer_index;
1358 attrib_to_buffer[i] = k;
1359
1360 if (!(ctx->vb_mask & (1 << vbi)))
1361 continue;
1362
1363 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1364 struct panfrost_resource *rsrc;
1365
1366 rsrc = pan_resource(buf->buffer.resource);
1367 if (!rsrc)
1368 continue;
1369
1370 /* Add a dependency of the batch on the vertex buffer */
1371 panfrost_batch_add_bo(batch, rsrc->bo,
1372 PAN_BO_ACCESS_SHARED |
1373 PAN_BO_ACCESS_READ |
1374 PAN_BO_ACCESS_VERTEX_TILER);
1375
1376 /* Mask off lower bits, see offset fixup below */
1377 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1378 mali_ptr addr = raw_addr & ~63;
1379
1380 /* Since we advanced the base pointer, we shrink the buffer
1381 * size, but add the offset we subtracted */
1382 unsigned size = rsrc->base.width0 + (raw_addr - addr)
1383 - buf->buffer_offset;
1384
1385 /* When there is a divisor, the hardware-level divisor is
1386 * the product of the instance divisor and the padded count */
1387 unsigned divisor = elem->instance_divisor;
1388 unsigned hw_divisor = ctx->padded_count * divisor;
1389 unsigned stride = buf->stride;
1390
1391 /* If there's a divisor(=1) but no instancing, we want every
1392 * attribute to be the same */
1393
1394 if (divisor && ctx->instance_count == 1)
1395 stride = 0;
1396
1397 if (!divisor || ctx->instance_count <= 1) {
1398 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1399 if (ctx->instance_count > 1)
1400 cfg.type = MALI_ATTRIBUTE_TYPE_1D_MODULUS;
1401
1402 cfg.pointer = addr;
1403 cfg.stride = stride;
1404 cfg.size = size;
1405 cfg.divisor_r = instance_shift;
1406 cfg.divisor_p = instance_odd;
1407 }
1408 } else if (util_is_power_of_two_or_zero(hw_divisor)) {
1409 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1410 cfg.type = MALI_ATTRIBUTE_TYPE_1D_POT_DIVISOR;
1411 cfg.pointer = addr;
1412 cfg.stride = stride;
1413 cfg.size = size;
1414 cfg.divisor_r = __builtin_ctz(hw_divisor);
1415 }
1416
1417 } else {
1418 unsigned shift = 0, extra_flags = 0;
1419
1420 unsigned magic_divisor =
1421 panfrost_compute_magic_divisor(hw_divisor, &shift, &extra_flags);
1422
1423 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1424 cfg.type = MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR;
1425 cfg.pointer = addr;
1426 cfg.stride = stride;
1427 cfg.size = size;
1428
1429 cfg.divisor_r = shift;
1430 cfg.divisor_e = extra_flags;
1431 }
1432
1433 pan_pack(bufs + k + 1, ATTRIBUTE_BUFFER_CONTINUATION_NPOT, cfg) {
1434 cfg.divisor_numerator = magic_divisor;
1435 cfg.divisor = divisor;
1436 }
1437
1438 ++k;
1439 }
1440
1441 ++k;
1442 }
1443
1444 /* Add special gl_VertexID/gl_InstanceID buffers */
1445
1446 if (unlikely(vs->attribute_count >= PAN_VERTEX_ID)) {
1447 panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1448
1449 pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
1450 cfg.buffer_index = k++;
1451 cfg.format = so->formats[PAN_VERTEX_ID];
1452 }
1453
1454 panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1455
1456 pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
1457 cfg.buffer_index = k++;
1458 cfg.format = so->formats[PAN_INSTANCE_ID];
1459 }
1460 }
1461
1462 /* Attribute addresses require 64-byte alignment, so let:
1463 *
1464 * base' = base & ~63 = base - (base & 63)
1465 * offset' = offset + (base & 63)
1466 *
1467 * Since base' + offset' = base + offset, these are equivalent
1468 * addressing modes and now base is 64 aligned.
1469 */
1470
1471 unsigned start = vertex_postfix->offset_start;
1472
1473 for (unsigned i = 0; i < so->num_elements; ++i) {
1474 unsigned vbi = so->pipe[i].vertex_buffer_index;
1475 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1476
1477 /* Adjust by the masked off bits of the offset. Make sure we
1478 * read src_offset from so->hw (which is not GPU visible)
1479 * rather than target (which is) due to caching effects */
1480
1481 unsigned src_offset = so->pipe[i].src_offset;
1482
1483 /* BOs aligned to 4k so guaranteed aligned to 64 */
1484 src_offset += (buf->buffer_offset & 63);
1485
1486 /* Also, somewhat obscurely per-instance data needs to be
1487 * offset in response to a delayed start in an indexed draw */
1488
1489 if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
1490 src_offset -= buf->stride * start;
1491
1492 pan_pack(out + i, ATTRIBUTE, cfg) {
1493 cfg.buffer_index = attrib_to_buffer[i];
1494 cfg.format = so->formats[i];
1495 cfg.offset = src_offset;
1496 }
1497 }
1498
1499 vertex_postfix->attributes = S.gpu;
1500 vertex_postfix->attribute_meta = T.gpu;
1501 }
1502
1503 static mali_ptr
1504 panfrost_emit_varyings(struct panfrost_batch *batch,
1505 struct mali_attribute_buffer_packed *slot,
1506 unsigned stride, unsigned count)
1507 {
1508 unsigned size = stride * count;
1509 mali_ptr ptr = panfrost_pool_alloc_aligned(&batch->invisible_pool, size, 64).gpu;
1510
1511 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1512 cfg.stride = stride;
1513 cfg.size = size;
1514 cfg.pointer = ptr;
1515 }
1516
1517 return ptr;
1518 }
1519
1520 static unsigned
1521 panfrost_streamout_offset(unsigned stride, unsigned offset,
1522 struct pipe_stream_output_target *target)
1523 {
1524 return (target->buffer_offset + (offset * stride * 4)) & 63;
1525 }
1526
1527 static void
1528 panfrost_emit_streamout(struct panfrost_batch *batch,
1529 struct mali_attribute_buffer_packed *slot,
1530 unsigned stride_words, unsigned offset, unsigned count,
1531 struct pipe_stream_output_target *target)
1532 {
1533 unsigned stride = stride_words * 4;
1534 unsigned max_size = target->buffer_size;
1535 unsigned expected_size = stride * count;
1536
1537 /* Grab the BO and bind it to the batch */
1538 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1539
1540 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1541 * the perspective of the TILER and FRAGMENT.
1542 */
1543 panfrost_batch_add_bo(batch, bo,
1544 PAN_BO_ACCESS_SHARED |
1545 PAN_BO_ACCESS_RW |
1546 PAN_BO_ACCESS_VERTEX_TILER |
1547 PAN_BO_ACCESS_FRAGMENT);
1548
1549 /* We will have an offset applied to get alignment */
1550 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * stride);
1551
1552 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1553 cfg.pointer = (addr & ~63);
1554 cfg.stride = stride;
1555 cfg.size = MIN2(max_size, expected_size) + (addr & 63);
1556 }
1557 }
1558
1559 static bool
1560 has_point_coord(unsigned mask, gl_varying_slot loc)
1561 {
1562 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1563 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1564 else if (loc == VARYING_SLOT_PNTC)
1565 return (mask & (1 << 8));
1566 else
1567 return false;
1568 }
1569
1570 /* Helpers for manipulating stream out information so we can pack varyings
1571 * accordingly. Compute the src_offset for a given captured varying */
1572
1573 static struct pipe_stream_output *
1574 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1575 {
1576 for (unsigned i = 0; i < info->num_outputs; ++i) {
1577 if (info->output[i].register_index == loc)
1578 return &info->output[i];
1579 }
1580
1581 unreachable("Varying not captured");
1582 }
1583
1584 static unsigned
1585 pan_varying_size(enum mali_format fmt)
1586 {
1587 unsigned type = MALI_EXTRACT_TYPE(fmt);
1588 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1589 unsigned bits = MALI_EXTRACT_BITS(fmt);
1590 unsigned bpc = 0;
1591
1592 if (bits == MALI_CHANNEL_FLOAT) {
1593 /* No doubles */
1594 bool fp16 = (type == MALI_FORMAT_SINT);
1595 assert(fp16 || (type == MALI_FORMAT_UNORM));
1596
1597 bpc = fp16 ? 2 : 4;
1598 } else {
1599 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1600
1601 /* See the enums */
1602 bits = 1 << bits;
1603 assert(bits >= 8);
1604 bpc = bits / 8;
1605 }
1606
1607 return bpc * chan;
1608 }
1609
1610 /* Indices for named (non-XFB) varyings that are present. These are packed
1611 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1612 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1613 * of a given special field given a shift S by:
1614 *
1615 * idx = popcount(P & ((1 << S) - 1))
1616 *
1617 * That is... look at all of the varyings that come earlier and count them, the
1618 * count is the new index since plus one. Likewise, the total number of special
1619 * buffers required is simply popcount(P)
1620 */
1621
1622 enum pan_special_varying {
1623 PAN_VARY_GENERAL = 0,
1624 PAN_VARY_POSITION = 1,
1625 PAN_VARY_PSIZ = 2,
1626 PAN_VARY_PNTCOORD = 3,
1627 PAN_VARY_FACE = 4,
1628 PAN_VARY_FRAGCOORD = 5,
1629
1630 /* Keep last */
1631 PAN_VARY_MAX,
1632 };
1633
1634 /* Given a varying, figure out which index it correpsonds to */
1635
1636 static inline unsigned
1637 pan_varying_index(unsigned present, enum pan_special_varying v)
1638 {
1639 unsigned mask = (1 << v) - 1;
1640 return util_bitcount(present & mask);
1641 }
1642
1643 /* Get the base offset for XFB buffers, which by convention come after
1644 * everything else. Wrapper function for semantic reasons; by construction this
1645 * is just popcount. */
1646
1647 static inline unsigned
1648 pan_xfb_base(unsigned present)
1649 {
1650 return util_bitcount(present);
1651 }
1652
1653 /* Computes the present mask for varyings so we can start emitting varying records */
1654
1655 static inline unsigned
1656 pan_varying_present(
1657 struct panfrost_shader_state *vs,
1658 struct panfrost_shader_state *fs,
1659 unsigned quirks)
1660 {
1661 /* At the moment we always emit general and position buffers. Not
1662 * strictly necessary but usually harmless */
1663
1664 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1665
1666 /* Enable special buffers by the shader info */
1667
1668 if (vs->writes_point_size)
1669 present |= (1 << PAN_VARY_PSIZ);
1670
1671 if (fs->reads_point_coord)
1672 present |= (1 << PAN_VARY_PNTCOORD);
1673
1674 if (fs->reads_face)
1675 present |= (1 << PAN_VARY_FACE);
1676
1677 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1678 present |= (1 << PAN_VARY_FRAGCOORD);
1679
1680 /* Also, if we have a point sprite, we need a point coord buffer */
1681
1682 for (unsigned i = 0; i < fs->varying_count; i++) {
1683 gl_varying_slot loc = fs->varyings_loc[i];
1684
1685 if (has_point_coord(fs->point_sprite_mask, loc))
1686 present |= (1 << PAN_VARY_PNTCOORD);
1687 }
1688
1689 return present;
1690 }
1691
1692 /* Emitters for varying records */
1693
1694 static void
1695 pan_emit_vary(struct mali_attribute_packed *out,
1696 unsigned present, enum pan_special_varying buf,
1697 unsigned quirks, enum mali_format format,
1698 unsigned offset)
1699 {
1700 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1701 unsigned swizzle = quirks & HAS_SWIZZLES ?
1702 panfrost_get_default_swizzle(nr_channels) :
1703 panfrost_bifrost_swizzle(nr_channels);
1704
1705 pan_pack(out, ATTRIBUTE, cfg) {
1706 cfg.buffer_index = pan_varying_index(present, buf);
1707 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1708 cfg.format = (format << 12) | swizzle;
1709 cfg.offset = offset;
1710 }
1711 }
1712
1713 /* General varying that is unused */
1714
1715 static void
1716 pan_emit_vary_only(struct mali_attribute_packed *out,
1717 unsigned present, unsigned quirks)
1718 {
1719 pan_emit_vary(out, present, 0, quirks, MALI_VARYING_DISCARD, 0);
1720 }
1721
1722 /* Special records */
1723
1724 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1725 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1726 [PAN_VARY_PSIZ] = MALI_R16F,
1727 [PAN_VARY_PNTCOORD] = MALI_R16F,
1728 [PAN_VARY_FACE] = MALI_R32I,
1729 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1730 };
1731
1732 static void
1733 pan_emit_vary_special(struct mali_attribute_packed *out,
1734 unsigned present, enum pan_special_varying buf,
1735 unsigned quirks)
1736 {
1737 assert(buf < PAN_VARY_MAX);
1738 pan_emit_vary(out, present, buf, quirks, pan_varying_formats[buf], 0);
1739 }
1740
1741 static enum mali_format
1742 pan_xfb_format(enum mali_format format, unsigned nr)
1743 {
1744 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1745 return MALI_R32F | MALI_NR_CHANNELS(nr);
1746 else
1747 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1748 }
1749
1750 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1751 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1752 * value. */
1753
1754 static void
1755 pan_emit_vary_xfb(struct mali_attribute_packed *out,
1756 unsigned present,
1757 unsigned max_xfb,
1758 unsigned *streamout_offsets,
1759 unsigned quirks,
1760 enum mali_format format,
1761 struct pipe_stream_output o)
1762 {
1763 unsigned swizzle = quirks & HAS_SWIZZLES ?
1764 panfrost_get_default_swizzle(o.num_components) :
1765 panfrost_bifrost_swizzle(o.num_components);
1766
1767 pan_pack(out, ATTRIBUTE, cfg) {
1768 /* XFB buffers come after everything else */
1769 cfg.buffer_index = pan_xfb_base(present) + o.output_buffer;
1770 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1771
1772 /* Override number of channels and precision to highp */
1773 cfg.format = (pan_xfb_format(format, o.num_components) << 12) | swizzle;
1774
1775 /* Apply given offsets together */
1776 cfg.offset = (o.dst_offset * 4) /* dwords */
1777 + streamout_offsets[o.output_buffer];
1778 }
1779 }
1780
1781 /* Determine if we should capture a varying for XFB. This requires actually
1782 * having a buffer for it. If we don't capture it, we'll fallback to a general
1783 * varying path (linked or unlinked, possibly discarding the write) */
1784
1785 static bool
1786 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1787 unsigned loc, unsigned max_xfb)
1788 {
1789 if (!(xfb->so_mask & (1ll << loc)))
1790 return false;
1791
1792 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1793 return o->output_buffer < max_xfb;
1794 }
1795
1796 static void
1797 pan_emit_general_varying(struct mali_attribute_packed *out,
1798 struct panfrost_shader_state *other,
1799 struct panfrost_shader_state *xfb,
1800 gl_varying_slot loc,
1801 enum mali_format format,
1802 unsigned present,
1803 unsigned quirks,
1804 unsigned *gen_offsets,
1805 enum mali_format *gen_formats,
1806 unsigned *gen_stride,
1807 unsigned idx,
1808 bool should_alloc)
1809 {
1810 /* Check if we're linked */
1811 signed other_idx = -1;
1812
1813 for (unsigned j = 0; j < other->varying_count; ++j) {
1814 if (other->varyings_loc[j] == loc) {
1815 other_idx = j;
1816 break;
1817 }
1818 }
1819
1820 if (other_idx < 0) {
1821 pan_emit_vary_only(out, present, quirks);
1822 return;
1823 }
1824
1825 unsigned offset = gen_offsets[other_idx];
1826
1827 if (should_alloc) {
1828 /* We're linked, so allocate a space via a watermark allocation */
1829 enum mali_format alt = other->varyings[other_idx];
1830
1831 /* Do interpolation at minimum precision */
1832 unsigned size_main = pan_varying_size(format);
1833 unsigned size_alt = pan_varying_size(alt);
1834 unsigned size = MIN2(size_main, size_alt);
1835
1836 /* If a varying is marked for XFB but not actually captured, we
1837 * should match the format to the format that would otherwise
1838 * be used for XFB, since dEQP checks for invariance here. It's
1839 * unclear if this is required by the spec. */
1840
1841 if (xfb->so_mask & (1ull << loc)) {
1842 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1843 format = pan_xfb_format(format, o->num_components);
1844 size = pan_varying_size(format);
1845 } else if (size == size_alt) {
1846 format = alt;
1847 }
1848
1849 gen_offsets[idx] = *gen_stride;
1850 gen_formats[other_idx] = format;
1851 offset = *gen_stride;
1852 *gen_stride += size;
1853 }
1854
1855 pan_emit_vary(out, present, PAN_VARY_GENERAL, quirks, format, offset);
1856 }
1857
1858 /* Higher-level wrapper around all of the above, classifying a varying into one
1859 * of the above types */
1860
1861 static void
1862 panfrost_emit_varying(
1863 struct mali_attribute_packed *out,
1864 struct panfrost_shader_state *stage,
1865 struct panfrost_shader_state *other,
1866 struct panfrost_shader_state *xfb,
1867 unsigned present,
1868 unsigned max_xfb,
1869 unsigned *streamout_offsets,
1870 unsigned quirks,
1871 unsigned *gen_offsets,
1872 enum mali_format *gen_formats,
1873 unsigned *gen_stride,
1874 unsigned idx,
1875 bool should_alloc,
1876 bool is_fragment)
1877 {
1878 gl_varying_slot loc = stage->varyings_loc[idx];
1879 enum mali_format format = stage->varyings[idx];
1880
1881 /* Override format to match linkage */
1882 if (!should_alloc && gen_formats[idx])
1883 format = gen_formats[idx];
1884
1885 if (has_point_coord(stage->point_sprite_mask, loc)) {
1886 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1887 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1888 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1889 pan_emit_vary_xfb(out, present, max_xfb, streamout_offsets, quirks, format, *o);
1890 } else if (loc == VARYING_SLOT_POS) {
1891 if (is_fragment)
1892 pan_emit_vary_special(out, present, PAN_VARY_FRAGCOORD, quirks);
1893 else
1894 pan_emit_vary_special(out, present, PAN_VARY_POSITION, quirks);
1895 } else if (loc == VARYING_SLOT_PSIZ) {
1896 pan_emit_vary_special(out, present, PAN_VARY_PSIZ, quirks);
1897 } else if (loc == VARYING_SLOT_PNTC) {
1898 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1899 } else if (loc == VARYING_SLOT_FACE) {
1900 pan_emit_vary_special(out, present, PAN_VARY_FACE, quirks);
1901 } else {
1902 pan_emit_general_varying(out, other, xfb, loc, format, present,
1903 quirks, gen_offsets, gen_formats, gen_stride,
1904 idx, should_alloc);
1905 }
1906 }
1907
1908 static void
1909 pan_emit_special_input(struct mali_attribute_buffer_packed *out,
1910 unsigned present,
1911 enum pan_special_varying v,
1912 unsigned special)
1913 {
1914 if (present & (1 << v)) {
1915 unsigned idx = pan_varying_index(present, v);
1916
1917 pan_pack(out + idx, ATTRIBUTE_BUFFER, cfg) {
1918 cfg.special = special;
1919 cfg.type = 0;
1920 }
1921 }
1922 }
1923
1924 void
1925 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1926 unsigned vertex_count,
1927 struct mali_vertex_tiler_postfix *vertex_postfix,
1928 struct mali_vertex_tiler_postfix *tiler_postfix,
1929 union midgard_primitive_size *primitive_size)
1930 {
1931 /* Load the shaders */
1932 struct panfrost_context *ctx = batch->ctx;
1933 struct panfrost_device *dev = pan_device(ctx->base.screen);
1934 struct panfrost_shader_state *vs, *fs;
1935 size_t vs_size, fs_size;
1936
1937 /* Allocate the varying descriptor */
1938
1939 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1940 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
1941 vs_size = MALI_ATTRIBUTE_LENGTH * vs->varying_count;
1942 fs_size = MALI_ATTRIBUTE_LENGTH * fs->varying_count;
1943
1944 struct panfrost_transfer trans = panfrost_pool_alloc_aligned(
1945 &batch->pool, vs_size + fs_size, MALI_ATTRIBUTE_LENGTH);
1946
1947 struct pipe_stream_output_info *so = &vs->stream_output;
1948 unsigned present = pan_varying_present(vs, fs, dev->quirks);
1949
1950 /* Check if this varying is linked by us. This is the case for
1951 * general-purpose, non-captured varyings. If it is, link it. If it's
1952 * not, use the provided stream out information to determine the
1953 * offset, since it was already linked for us. */
1954
1955 unsigned gen_offsets[32];
1956 enum mali_format gen_formats[32];
1957 memset(gen_offsets, 0, sizeof(gen_offsets));
1958 memset(gen_formats, 0, sizeof(gen_formats));
1959
1960 unsigned gen_stride = 0;
1961 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
1962 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
1963
1964 unsigned streamout_offsets[32];
1965
1966 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1967 streamout_offsets[i] = panfrost_streamout_offset(
1968 so->stride[i],
1969 ctx->streamout.offsets[i],
1970 ctx->streamout.targets[i]);
1971 }
1972
1973 struct mali_attribute_packed *ovs = (struct mali_attribute_packed *)trans.cpu;
1974 struct mali_attribute_packed *ofs = ovs + vs->varying_count;
1975
1976 for (unsigned i = 0; i < vs->varying_count; i++) {
1977 panfrost_emit_varying(ovs + i, vs, fs, vs, present,
1978 ctx->streamout.num_targets, streamout_offsets,
1979 dev->quirks,
1980 gen_offsets, gen_formats, &gen_stride, i, true, false);
1981 }
1982
1983 for (unsigned i = 0; i < fs->varying_count; i++) {
1984 panfrost_emit_varying(ofs + i, fs, vs, vs, present,
1985 ctx->streamout.num_targets, streamout_offsets,
1986 dev->quirks,
1987 gen_offsets, gen_formats, &gen_stride, i, false, true);
1988 }
1989
1990 unsigned xfb_base = pan_xfb_base(present);
1991 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1992 MALI_ATTRIBUTE_BUFFER_LENGTH * (xfb_base + ctx->streamout.num_targets),
1993 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
1994 struct mali_attribute_buffer_packed *varyings =
1995 (struct mali_attribute_buffer_packed *) T.cpu;
1996
1997 /* Emit the stream out buffers */
1998
1999 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
2000 ctx->vertex_count);
2001
2002 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
2003 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
2004 so->stride[i],
2005 ctx->streamout.offsets[i],
2006 out_count,
2007 ctx->streamout.targets[i]);
2008 }
2009
2010 panfrost_emit_varyings(batch,
2011 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
2012 gen_stride, vertex_count);
2013
2014 /* fp32 vec4 gl_Position */
2015 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
2016 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
2017 sizeof(float) * 4, vertex_count);
2018
2019 if (present & (1 << PAN_VARY_PSIZ)) {
2020 primitive_size->pointer = panfrost_emit_varyings(batch,
2021 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
2022 2, vertex_count);
2023 }
2024
2025 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_ATTRIBUTE_SPECIAL_POINT_COORD);
2026 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_ATTRIBUTE_SPECIAL_FRONT_FACING);
2027 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_ATTRIBUTE_SPECIAL_FRAG_COORD);
2028
2029 vertex_postfix->varyings = T.gpu;
2030 tiler_postfix->varyings = T.gpu;
2031
2032 vertex_postfix->varying_meta = trans.gpu;
2033 tiler_postfix->varying_meta = trans.gpu + vs_size;
2034 }
2035
2036 void
2037 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
2038 struct mali_vertex_tiler_prefix *vertex_prefix,
2039 struct mali_vertex_tiler_postfix *vertex_postfix,
2040 struct mali_vertex_tiler_prefix *tiler_prefix,
2041 struct mali_vertex_tiler_postfix *tiler_postfix,
2042 union midgard_primitive_size *primitive_size)
2043 {
2044 struct panfrost_context *ctx = batch->ctx;
2045 struct panfrost_device *device = pan_device(ctx->base.screen);
2046 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
2047 struct bifrost_payload_vertex bifrost_vertex = {0,};
2048 struct bifrost_payload_tiler bifrost_tiler = {0,};
2049 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2050 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2051 void *vp, *tp;
2052 size_t vp_size, tp_size;
2053
2054 if (device->quirks & IS_BIFROST) {
2055 bifrost_vertex.prefix = *vertex_prefix;
2056 bifrost_vertex.postfix = *vertex_postfix;
2057 vp = &bifrost_vertex;
2058 vp_size = sizeof(bifrost_vertex);
2059
2060 bifrost_tiler.prefix = *tiler_prefix;
2061 bifrost_tiler.tiler.primitive_size = *primitive_size;
2062 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2063 bifrost_tiler.postfix = *tiler_postfix;
2064 tp = &bifrost_tiler;
2065 tp_size = sizeof(bifrost_tiler);
2066 } else {
2067 midgard_vertex.prefix = *vertex_prefix;
2068 midgard_vertex.postfix = *vertex_postfix;
2069 vp = &midgard_vertex;
2070 vp_size = sizeof(midgard_vertex);
2071
2072 midgard_tiler.prefix = *tiler_prefix;
2073 midgard_tiler.postfix = *tiler_postfix;
2074 midgard_tiler.primitive_size = *primitive_size;
2075 tp = &midgard_tiler;
2076 tp_size = sizeof(midgard_tiler);
2077 }
2078
2079 if (wallpapering) {
2080 /* Inject in reverse order, with "predicted" job indices.
2081 * THIS IS A HACK XXX */
2082 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
2083 batch->scoreboard.job_index + 2, tp, tp_size, true);
2084 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2085 vp, vp_size, true);
2086 return;
2087 }
2088
2089 /* If rasterizer discard is enable, only submit the vertex */
2090
2091 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2092 vp, vp_size, false);
2093
2094 if (ctx->rasterizer->base.rasterizer_discard)
2095 return;
2096
2097 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
2098 false);
2099 }
2100
2101 /* TODO: stop hardcoding this */
2102 mali_ptr
2103 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2104 {
2105 uint16_t locations[] = {
2106 128, 128,
2107 0, 256,
2108 0, 256,
2109 0, 256,
2110 0, 256,
2111 0, 256,
2112 0, 256,
2113 0, 256,
2114 0, 256,
2115 0, 256,
2116 0, 256,
2117 0, 256,
2118 0, 256,
2119 0, 256,
2120 0, 256,
2121 0, 256,
2122 0, 256,
2123 0, 256,
2124 0, 256,
2125 0, 256,
2126 0, 256,
2127 0, 256,
2128 0, 256,
2129 0, 256,
2130 0, 256,
2131 0, 256,
2132 0, 256,
2133 0, 256,
2134 0, 256,
2135 0, 256,
2136 0, 256,
2137 0, 256,
2138 128, 128,
2139 0, 0,
2140 0, 0,
2141 0, 0,
2142 0, 0,
2143 0, 0,
2144 0, 0,
2145 0, 0,
2146 0, 0,
2147 0, 0,
2148 0, 0,
2149 0, 0,
2150 0, 0,
2151 0, 0,
2152 0, 0,
2153 0, 0,
2154 };
2155
2156 return panfrost_pool_upload_aligned(&batch->pool, locations, 96 * sizeof(uint16_t), 64);
2157 }