panfrost: Identify additional SFBD flags
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 struct mali_shared_memory shared = {
62 .shared_workgroup_count = ~0,
63 };
64
65 if (batch->stack_size) {
66 struct panfrost_bo *stack =
67 panfrost_batch_get_scratchpad(batch, batch->stack_size,
68 dev->thread_tls_alloc,
69 dev->core_count);
70
71 shared.stack_shift = panfrost_get_stack_shift(batch->stack_size);
72 shared.scratchpad = stack->gpu;
73 }
74
75 postfix->shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared, sizeof(shared), 64);
76 }
77
78 static void
79 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
80 struct mali_vertex_tiler_postfix *postfix)
81 {
82 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
83 postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
84 }
85
86 static void
87 panfrost_vt_update_rasterizer(struct panfrost_rasterizer *rasterizer,
88 struct mali_vertex_tiler_prefix *prefix,
89 struct mali_vertex_tiler_postfix *postfix)
90 {
91 postfix->gl_enables |= 0x7;
92 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
93 rasterizer->base.front_ccw);
94 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
95 (rasterizer->base.cull_face & PIPE_FACE_FRONT));
96 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
97 (rasterizer->base.cull_face & PIPE_FACE_BACK));
98 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
99 rasterizer->base.flatshade_first);
100 }
101
102 void
103 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
104 struct mali_vertex_tiler_prefix *prefix,
105 union midgard_primitive_size *primitive_size)
106 {
107 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
108
109 if (!panfrost_writes_point_size(ctx)) {
110 float val = (prefix->draw_mode == MALI_DRAW_MODE_POINTS) ?
111 rasterizer->base.point_size :
112 rasterizer->base.line_width;
113
114 primitive_size->constant = val;
115 }
116 }
117
118 static void
119 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
120 struct mali_vertex_tiler_postfix *postfix)
121 {
122 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
123 if (ctx->occlusion_query) {
124 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
125 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
126 PAN_BO_ACCESS_SHARED |
127 PAN_BO_ACCESS_RW |
128 PAN_BO_ACCESS_FRAGMENT);
129 } else {
130 postfix->occlusion_counter = 0;
131 }
132 }
133
134 void
135 panfrost_vt_init(struct panfrost_context *ctx,
136 enum pipe_shader_type stage,
137 struct mali_vertex_tiler_prefix *prefix,
138 struct mali_vertex_tiler_postfix *postfix)
139 {
140 struct panfrost_device *device = pan_device(ctx->base.screen);
141
142 if (!ctx->shader[stage])
143 return;
144
145 memset(prefix, 0, sizeof(*prefix));
146 memset(postfix, 0, sizeof(*postfix));
147
148 if (device->quirks & IS_BIFROST) {
149 postfix->gl_enables = 0x2;
150 panfrost_vt_emit_shared_memory(ctx, postfix);
151 } else {
152 postfix->gl_enables = 0x6;
153 panfrost_vt_attach_framebuffer(ctx, postfix);
154 }
155
156 if (stage == PIPE_SHADER_FRAGMENT) {
157 panfrost_vt_update_occlusion_query(ctx, postfix);
158 panfrost_vt_update_rasterizer(ctx->rasterizer, prefix, postfix);
159 }
160 }
161
162 static unsigned
163 panfrost_translate_index_size(unsigned size)
164 {
165 switch (size) {
166 case 1:
167 return MALI_DRAW_INDEXED_UINT8;
168
169 case 2:
170 return MALI_DRAW_INDEXED_UINT16;
171
172 case 4:
173 return MALI_DRAW_INDEXED_UINT32;
174
175 default:
176 unreachable("Invalid index size");
177 }
178 }
179
180 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
181 * good for the duration of the draw (transient), could last longer. Also get
182 * the bounds on the index buffer for the range accessed by the draw. We do
183 * these operations together because there are natural optimizations which
184 * require them to be together. */
185
186 static mali_ptr
187 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
188 const struct pipe_draw_info *info,
189 unsigned *min_index, unsigned *max_index)
190 {
191 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
192 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
193 off_t offset = info->start * info->index_size;
194 bool needs_indices = true;
195 mali_ptr out = 0;
196
197 if (info->max_index != ~0u) {
198 *min_index = info->min_index;
199 *max_index = info->max_index;
200 needs_indices = false;
201 }
202
203 if (!info->has_user_indices) {
204 /* Only resources can be directly mapped */
205 panfrost_batch_add_bo(batch, rsrc->bo,
206 PAN_BO_ACCESS_SHARED |
207 PAN_BO_ACCESS_READ |
208 PAN_BO_ACCESS_VERTEX_TILER);
209 out = rsrc->bo->gpu + offset;
210
211 /* Check the cache */
212 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
213 info->start,
214 info->count,
215 min_index,
216 max_index);
217 } else {
218 /* Otherwise, we need to upload to transient memory */
219 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
220 struct panfrost_transfer T =
221 panfrost_pool_alloc_aligned(&batch->pool,
222 info->count * info->index_size,
223 info->index_size);
224
225 memcpy(T.cpu, ibuf8 + offset, info->count * info->index_size);
226 out = T.gpu;
227 }
228
229 if (needs_indices) {
230 /* Fallback */
231 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
232
233 if (!info->has_user_indices)
234 panfrost_minmax_cache_add(rsrc->index_cache,
235 info->start, info->count,
236 *min_index, *max_index);
237 }
238
239 return out;
240 }
241
242 void
243 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
244 const struct pipe_draw_info *info,
245 enum mali_draw_mode draw_mode,
246 struct mali_vertex_tiler_postfix *vertex_postfix,
247 struct mali_vertex_tiler_prefix *tiler_prefix,
248 struct mali_vertex_tiler_postfix *tiler_postfix,
249 unsigned *vertex_count,
250 unsigned *padded_count)
251 {
252 tiler_prefix->draw_mode = draw_mode;
253
254 unsigned draw_flags = 0;
255
256 if (panfrost_writes_point_size(ctx))
257 draw_flags |= MALI_DRAW_VARYING_SIZE;
258
259 if (info->primitive_restart)
260 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
261
262 /* These doesn't make much sense */
263
264 draw_flags |= 0x3000;
265
266 if (info->index_size) {
267 unsigned min_index = 0, max_index = 0;
268
269 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
270 info,
271 &min_index,
272 &max_index);
273
274 /* Use the corresponding values */
275 *vertex_count = max_index - min_index + 1;
276 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
277 tiler_prefix->offset_bias_correction = -min_index;
278 tiler_prefix->index_count = MALI_POSITIVE(info->count);
279 draw_flags |= panfrost_translate_index_size(info->index_size);
280 } else {
281 tiler_prefix->indices = 0;
282 *vertex_count = ctx->vertex_count;
283 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
284 tiler_prefix->offset_bias_correction = 0;
285 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
286 }
287
288 tiler_prefix->unknown_draw = draw_flags;
289
290 /* Encode the padded vertex count */
291
292 if (info->instance_count > 1) {
293 *padded_count = panfrost_padded_vertex_count(*vertex_count);
294
295 unsigned shift = __builtin_ctz(ctx->padded_count);
296 unsigned k = ctx->padded_count >> (shift + 1);
297
298 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
299 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
300 } else {
301 *padded_count = *vertex_count;
302
303 /* Reset instancing state */
304 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
305 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
306 }
307 }
308
309 static void
310 panfrost_emit_compute_shader(struct panfrost_context *ctx,
311 enum pipe_shader_type st,
312 struct mali_shader_meta *meta)
313 {
314 const struct panfrost_device *dev = pan_device(ctx->base.screen);
315 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
316
317 memset(meta, 0, sizeof(*meta));
318 meta->shader = ss->shader;
319 meta->attribute_count = ss->attribute_count;
320 meta->varying_count = ss->varying_count;
321 meta->texture_count = ctx->sampler_view_count[st];
322 meta->sampler_count = ctx->sampler_count[st];
323
324 if (dev->quirks & IS_BIFROST) {
325 struct mali_bifrost_properties_packed prop;
326 struct mali_preload_vertex_packed preload;
327
328 pan_pack(&prop, BIFROST_PROPERTIES, cfg) {
329 cfg.unknown = 0x800000; /* XXX */
330 cfg.uniform_buffer_count = panfrost_ubo_count(ctx, st);
331 }
332
333 /* TODO: True compute shaders */
334 pan_pack(&preload, PRELOAD_VERTEX, cfg) {
335 cfg.uniform_count = ss->uniform_count;
336 cfg.vertex_id = true;
337 cfg.instance_id = true;
338 }
339
340 memcpy(&meta->bifrost_props, &prop, sizeof(prop));
341 memcpy(&meta->bifrost_preload, &preload, sizeof(preload));
342 } else {
343 struct mali_midgard_properties_packed prop;
344
345 pan_pack(&prop, MIDGARD_PROPERTIES, cfg) {
346 cfg.uniform_buffer_count = panfrost_ubo_count(ctx, st);
347 cfg.uniform_count = ss->uniform_count;
348 cfg.work_register_count = ss->work_reg_count;
349 cfg.writes_globals = ss->writes_global;
350 cfg.suppress_inf_nan = true; /* XXX */
351 }
352
353 memcpy(&meta->midgard_props, &prop, sizeof(prop));
354 }
355 }
356
357 static unsigned
358 translate_tex_wrap(enum pipe_tex_wrap w)
359 {
360 switch (w) {
361 case PIPE_TEX_WRAP_REPEAT: return MALI_WRAP_MODE_REPEAT;
362 case PIPE_TEX_WRAP_CLAMP: return MALI_WRAP_MODE_CLAMP;
363 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return MALI_WRAP_MODE_CLAMP_TO_EDGE;
364 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return MALI_WRAP_MODE_CLAMP_TO_BORDER;
365 case PIPE_TEX_WRAP_MIRROR_REPEAT: return MALI_WRAP_MODE_MIRRORED_REPEAT;
366 case PIPE_TEX_WRAP_MIRROR_CLAMP: return MALI_WRAP_MODE_MIRRORED_CLAMP;
367 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
368 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
369 default: unreachable("Invalid wrap");
370 }
371 }
372
373 /* The hardware compares in the wrong order order, so we have to flip before
374 * encoding. Yes, really. */
375
376 static enum mali_func
377 panfrost_sampler_compare_func(const struct pipe_sampler_state *cso)
378 {
379 if (!cso->compare_mode)
380 return MALI_FUNC_NEVER;
381
382 enum mali_func f = panfrost_translate_compare_func(cso->compare_func);
383 return panfrost_flip_compare_func(f);
384 }
385
386 static enum mali_mipmap_mode
387 pan_pipe_to_mipmode(enum pipe_tex_mipfilter f)
388 {
389 switch (f) {
390 case PIPE_TEX_MIPFILTER_NEAREST: return MALI_MIPMAP_MODE_NEAREST;
391 case PIPE_TEX_MIPFILTER_LINEAR: return MALI_MIPMAP_MODE_TRILINEAR;
392 case PIPE_TEX_MIPFILTER_NONE: return MALI_MIPMAP_MODE_NONE;
393 default: unreachable("Invalid");
394 }
395 }
396
397 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
398 struct mali_midgard_sampler_packed *hw)
399 {
400 pan_pack(hw, MIDGARD_SAMPLER, cfg) {
401 cfg.magnify_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
402 cfg.minify_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
403 cfg.mipmap_mode = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR) ?
404 MALI_MIPMAP_MODE_TRILINEAR : MALI_MIPMAP_MODE_NEAREST;
405 cfg.normalized_coordinates = cso->normalized_coords;
406
407 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
408
409 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
410
411 /* If necessary, we disable mipmapping in the sampler descriptor by
412 * clamping the LOD as tight as possible (from 0 to epsilon,
413 * essentially -- remember these are fixed point numbers, so
414 * epsilon=1/256) */
415
416 cfg.maximum_lod = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) ?
417 cfg.minimum_lod + 1 :
418 FIXED_16(cso->max_lod, false);
419
420 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
421 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
422 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
423
424 cfg.compare_function = panfrost_sampler_compare_func(cso);
425 cfg.seamless_cube_map = cso->seamless_cube_map;
426
427 cfg.border_color_r = cso->border_color.f[0];
428 cfg.border_color_g = cso->border_color.f[1];
429 cfg.border_color_b = cso->border_color.f[2];
430 cfg.border_color_a = cso->border_color.f[3];
431 }
432 }
433
434 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
435 struct mali_bifrost_sampler_packed *hw)
436 {
437 pan_pack(hw, BIFROST_SAMPLER, cfg) {
438 cfg.magnify_linear = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR;
439 cfg.minify_linear = cso->min_img_filter == PIPE_TEX_FILTER_LINEAR;
440 cfg.mipmap_mode = pan_pipe_to_mipmode(cso->min_mip_filter);
441 cfg.normalized_coordinates = cso->normalized_coords;
442
443 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
444 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
445 cfg.maximum_lod = FIXED_16(cso->max_lod, false);
446
447 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
448 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
449 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
450
451 cfg.compare_function = panfrost_sampler_compare_func(cso);
452 cfg.seamless_cube_map = cso->seamless_cube_map;
453 }
454 }
455
456 static bool
457 panfrost_fs_required(
458 struct panfrost_shader_state *fs,
459 struct panfrost_blend_final *blend,
460 unsigned rt_count)
461 {
462 /* If we generally have side effects */
463 if (fs->fs_sidefx)
464 return true;
465
466 /* If colour is written we need to execute */
467 for (unsigned i = 0; i < rt_count; ++i) {
468 if (!blend[i].no_colour)
469 return true;
470 }
471
472 /* If depth is written and not implied we need to execute.
473 * TODO: Predicate on Z/S writes being enabled */
474 return (fs->writes_depth || fs->writes_stencil);
475 }
476
477 static void
478 panfrost_emit_blend(struct panfrost_batch *batch, void *rts,
479 struct panfrost_blend_final *blend)
480 {
481 const struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
482 struct panfrost_shader_state *fs = panfrost_get_shader_state(batch->ctx, PIPE_SHADER_FRAGMENT);
483 unsigned rt_count = batch->key.nr_cbufs;
484
485 struct bifrost_blend_rt *brts = rts;
486 struct midgard_blend_rt *mrts = rts;
487
488 /* Disable blending for depth-only on Bifrost */
489
490 if (rt_count == 0 && dev->quirks & IS_BIFROST)
491 brts[0].unk2 = 0x3;
492
493 for (unsigned i = 0; i < rt_count; ++i) {
494 unsigned flags = 0;
495
496 pan_pack(&flags, BLEND_FLAGS, cfg) {
497 if (blend[i].no_colour) {
498 cfg.enable = false;
499 break;
500 }
501
502 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
503
504 cfg.srgb = util_format_is_srgb(batch->key.cbufs[i]->format);
505 cfg.load_destination = blend[i].load_dest;
506 cfg.dither_disable = !batch->ctx->blend->base.dither;
507
508 if (!(dev->quirks & IS_BIFROST))
509 cfg.midgard_blend_shader = blend[i].is_shader;
510 }
511
512 if (dev->quirks & IS_BIFROST) {
513 brts[i].flags = flags;
514
515 if (blend[i].is_shader) {
516 /* The blend shader's address needs to be at
517 * the same top 32 bit as the fragment shader.
518 * TODO: Ensure that's always the case.
519 */
520 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
521 (fs->bo->gpu & (0xffffffffull << 32)));
522 brts[i].shader = blend[i].shader.gpu;
523 brts[i].unk2 = 0x0;
524 } else {
525 enum pipe_format format = batch->key.cbufs[i]->format;
526 const struct util_format_description *format_desc;
527 format_desc = util_format_description(format);
528
529 brts[i].equation = blend[i].equation.equation;
530
531 /* TODO: this is a bit more complicated */
532 brts[i].constant = blend[i].equation.constant;
533
534 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
535
536 /* 0x19 disables blending and forces REPLACE
537 * mode (equivalent to rgb_mode = alpha_mode =
538 * x122, colour mask = 0xF). 0x1a allows
539 * blending. */
540 brts[i].unk2 = blend[i].opaque ? 0x19 : 0x1a;
541
542 brts[i].shader_type = fs->blend_types[i];
543 }
544 } else {
545 memcpy(&mrts[i].flags, &flags, sizeof(flags));
546
547 if (blend[i].is_shader) {
548 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
549 } else {
550 mrts[i].blend.equation = blend[i].equation.equation;
551 mrts[i].blend.constant = blend[i].equation.constant;
552 }
553 }
554 }
555 }
556
557 static void
558 panfrost_emit_frag_shader(struct panfrost_context *ctx,
559 struct mali_shader_meta *fragmeta,
560 struct panfrost_blend_final *blend)
561 {
562 const struct panfrost_device *dev = pan_device(ctx->base.screen);
563 struct panfrost_shader_state *fs;
564
565 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
566
567 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
568 const struct panfrost_zsa_state *zsa = ctx->depth_stencil;
569 unsigned rt_count = ctx->pipe_framebuffer.nr_cbufs;
570
571 memset(fragmeta, 0, sizeof(*fragmeta));
572
573 fragmeta->shader = fs->shader;
574 fragmeta->attribute_count = fs->attribute_count;
575 fragmeta->varying_count = fs->varying_count;
576 fragmeta->texture_count = ctx->sampler_view_count[PIPE_SHADER_FRAGMENT];
577 fragmeta->sampler_count = ctx->sampler_count[PIPE_SHADER_FRAGMENT];
578
579 if (dev->quirks & IS_BIFROST) {
580 struct mali_bifrost_properties_packed prop;
581 struct mali_preload_fragment_packed preload;
582
583 bool no_blend = true;
584
585 for (unsigned i = 0; i < rt_count; ++i)
586 no_blend &= (!blend[i].load_dest | blend[i].no_colour);
587
588 pan_pack(&prop, BIFROST_PROPERTIES, cfg) {
589 cfg.unknown = 0x950020; /* XXX */
590 cfg.uniform_buffer_count = panfrost_ubo_count(ctx, PIPE_SHADER_FRAGMENT);
591 cfg.early_z_enable = !fs->can_discard && !fs->writes_depth && no_blend;
592 }
593
594 pan_pack(&preload, PRELOAD_FRAGMENT, cfg) {
595 cfg.uniform_count = fs->uniform_count;
596 cfg.fragment_position = fs->reads_frag_coord;
597 }
598
599 memcpy(&fragmeta->bifrost_props, &prop, sizeof(prop));
600 memcpy(&fragmeta->bifrost_preload, &preload, sizeof(preload));
601 } else {
602 struct mali_midgard_properties_packed prop;
603
604 /* Reasons to disable early-Z from a shader perspective */
605 bool late_z = fs->can_discard || fs->writes_global ||
606 fs->writes_depth || fs->writes_stencil;
607
608 /* Reasons to disable early-Z from a CSO perspective */
609 bool alpha_to_coverage = ctx->blend->base.alpha_to_coverage;
610
611 /* If either depth or stencil is enabled, discard matters */
612 bool zs_enabled =
613 (zsa->base.depth.enabled && zsa->base.depth.func != PIPE_FUNC_ALWAYS) ||
614 zsa->base.stencil[0].enabled;
615
616 bool has_blend_shader = false;
617
618 for (unsigned c = 0; c < rt_count; ++c)
619 has_blend_shader |= blend[c].is_shader;
620
621 pan_pack(&prop, MIDGARD_PROPERTIES, cfg) {
622 cfg.uniform_buffer_count = panfrost_ubo_count(ctx, PIPE_SHADER_FRAGMENT);
623 cfg.uniform_count = fs->uniform_count;
624 cfg.work_register_count = fs->work_reg_count;
625 cfg.writes_globals = fs->writes_global;
626 cfg.suppress_inf_nan = true; /* XXX */
627
628 /* TODO: Reduce this limit? */
629 if (has_blend_shader)
630 cfg.work_register_count = MAX2(cfg.work_register_count, 8);
631
632 cfg.stencil_from_shader = fs->writes_stencil;
633 cfg.helper_invocation_enable = fs->helper_invocations;
634 cfg.depth_source = fs->writes_depth ?
635 MALI_DEPTH_SOURCE_SHADER :
636 MALI_DEPTH_SOURCE_FIXED_FUNCTION;
637
638 /* Depend on other state */
639 cfg.early_z_enable = !(late_z || alpha_to_coverage);
640 cfg.reads_tilebuffer = fs->outputs_read || (!zs_enabled && fs->can_discard);
641 cfg.reads_depth_stencil = zs_enabled && fs->can_discard;
642 }
643
644 memcpy(&fragmeta->midgard_props, &prop, sizeof(prop));
645 }
646
647 bool msaa = rast->multisample;
648 fragmeta->coverage_mask = msaa ? ctx->sample_mask : ~0;
649
650 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
651 fragmeta->unknown2_4 = 0x4e0;
652
653 /* TODO: Sample size */
654 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
655 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
656
657 /* EXT_shader_framebuffer_fetch requires the shader to be run
658 * per-sample when outputs are read. */
659 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
660 SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
661
662 fragmeta->depth_units = rast->offset_units * 2.0f;
663 fragmeta->depth_factor = rast->offset_scale;
664
665 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
666
667 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
668 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
669
670 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
671 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
672
673 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
674 zsa->base.stencil[0].enabled);
675
676 fragmeta->stencil_mask_front = zsa->stencil_mask_front;
677 fragmeta->stencil_mask_back = zsa->stencil_mask_back;
678
679 /* Bottom bits for stencil ref, exactly one word */
680 fragmeta->stencil_front.opaque[0] = zsa->stencil_front.opaque[0] | ctx->stencil_ref.ref_value[0];
681
682 /* If back-stencil is not enabled, use the front values */
683
684 if (zsa->base.stencil[1].enabled)
685 fragmeta->stencil_back.opaque[0] = zsa->stencil_back.opaque[0] | ctx->stencil_ref.ref_value[1];
686 else
687 fragmeta->stencil_back = fragmeta->stencil_front;
688
689 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
690 zsa->base.depth.writemask);
691
692 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
693 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(
694 zsa->base.depth.enabled ? zsa->base.depth.func : PIPE_FUNC_ALWAYS));
695
696 SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
697 ctx->blend->base.alpha_to_coverage);
698
699 /* Disable shader execution if we can */
700 if (dev->quirks & MIDGARD_SHADERLESS
701 && !panfrost_fs_required(fs, blend, rt_count)) {
702 fragmeta->shader = 0x1;
703 fragmeta->attribute_count = 0;
704 fragmeta->varying_count = 0;
705 fragmeta->texture_count = 0;
706 fragmeta->sampler_count = 0;
707
708 /* This feature is not known to work on Bifrost */
709 struct mali_midgard_properties_packed prop;
710
711 pan_pack(&prop, MIDGARD_PROPERTIES, cfg) {
712 cfg.work_register_count = 1;
713 cfg.depth_source = MALI_DEPTH_SOURCE_FIXED_FUNCTION;
714 cfg.early_z_enable = true;
715 }
716
717 memcpy(&fragmeta->midgard_props, &prop, sizeof(prop));
718 }
719
720 if (dev->quirks & MIDGARD_SFBD) {
721 /* When only a single render target platform is used, the blend
722 * information is inside the shader meta itself. We additionally
723 * need to signal CAN_DISCARD for nontrivial blend modes (so
724 * we're able to read back the destination buffer) */
725
726 if (blend[0].no_colour)
727 return;
728
729 fragmeta->unknown2_4 |= MALI_SFBD_ENABLE;
730
731 SET_BIT(fragmeta->unknown2_4, MALI_SFBD_SRGB,
732 util_format_is_srgb(ctx->pipe_framebuffer.cbufs[0]->format));
733
734 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
735 blend[0].is_shader);
736
737 if (blend[0].is_shader) {
738 fragmeta->blend.shader = blend[0].shader.gpu |
739 blend[0].shader.first_tag;
740 } else {
741 fragmeta->blend.equation = blend[0].equation.equation;
742 fragmeta->blend.constant = blend[0].equation.constant;
743 }
744
745 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
746 blend[0].load_dest);
747
748 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER, !ctx->blend->base.dither);
749 } else if (!(dev->quirks & IS_BIFROST)) {
750 /* Bug where MRT-capable hw apparently reads the last blend
751 * shader from here instead of the usual location? */
752
753 for (signed rt = ((signed) rt_count - 1); rt >= 0; --rt) {
754 if (!blend[rt].is_shader)
755 continue;
756
757 fragmeta->blend.shader = blend[rt].shader.gpu |
758 blend[rt].shader.first_tag;
759 break;
760 }
761 }
762 }
763
764 void
765 panfrost_emit_shader_meta(struct panfrost_batch *batch,
766 enum pipe_shader_type st,
767 struct mali_vertex_tiler_postfix *postfix)
768 {
769 struct panfrost_context *ctx = batch->ctx;
770 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
771
772 if (!ss) {
773 postfix->shader = 0;
774 return;
775 }
776
777 struct mali_shader_meta meta;
778
779 /* Add the shader BO to the batch. */
780 panfrost_batch_add_bo(batch, ss->bo,
781 PAN_BO_ACCESS_PRIVATE |
782 PAN_BO_ACCESS_READ |
783 panfrost_bo_access_for_stage(st));
784
785 mali_ptr shader_ptr;
786
787 if (st == PIPE_SHADER_FRAGMENT) {
788 struct panfrost_device *dev = pan_device(ctx->base.screen);
789 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
790 size_t desc_size = sizeof(meta);
791 void *rts = NULL;
792 struct panfrost_transfer xfer;
793 unsigned rt_size;
794
795 if (dev->quirks & MIDGARD_SFBD)
796 rt_size = 0;
797 else if (dev->quirks & IS_BIFROST)
798 rt_size = sizeof(struct bifrost_blend_rt);
799 else
800 rt_size = sizeof(struct midgard_blend_rt);
801
802 desc_size += rt_size * rt_count;
803
804 if (rt_size)
805 rts = rzalloc_size(ctx, rt_size * rt_count);
806
807 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
808
809 for (unsigned c = 0; c < ctx->pipe_framebuffer.nr_cbufs; ++c)
810 blend[c] = panfrost_get_blend_for_context(ctx, c);
811
812 panfrost_emit_frag_shader(ctx, &meta, blend);
813
814 if (!(dev->quirks & MIDGARD_SFBD))
815 panfrost_emit_blend(batch, rts, blend);
816 else
817 batch->draws |= PIPE_CLEAR_COLOR0;
818
819 xfer = panfrost_pool_alloc_aligned(&batch->pool, desc_size, sizeof(meta));
820
821 memcpy(xfer.cpu, &meta, sizeof(meta));
822 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
823
824 if (rt_size)
825 ralloc_free(rts);
826
827 shader_ptr = xfer.gpu;
828 } else {
829 panfrost_emit_compute_shader(ctx, st, &meta);
830
831 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
832 sizeof(meta));
833 }
834
835 postfix->shader = shader_ptr;
836 }
837
838 void
839 panfrost_emit_viewport(struct panfrost_batch *batch,
840 struct mali_vertex_tiler_postfix *tiler_postfix)
841 {
842 struct panfrost_context *ctx = batch->ctx;
843 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
844 const struct pipe_scissor_state *ss = &ctx->scissor;
845 const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
846 const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
847
848 /* Derive min/max from translate/scale. Note since |x| >= 0 by
849 * definition, we have that -|x| <= |x| hence translate - |scale| <=
850 * translate + |scale|, so the ordering is correct here. */
851 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
852 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
853 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
854 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
855 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
856 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
857
858 /* Scissor to the intersection of viewport and to the scissor, clamped
859 * to the framebuffer */
860
861 unsigned minx = MIN2(fb->width, vp_minx);
862 unsigned maxx = MIN2(fb->width, vp_maxx);
863 unsigned miny = MIN2(fb->height, vp_miny);
864 unsigned maxy = MIN2(fb->height, vp_maxy);
865
866 if (ss && rast->scissor) {
867 minx = MAX2(ss->minx, minx);
868 miny = MAX2(ss->miny, miny);
869 maxx = MIN2(ss->maxx, maxx);
870 maxy = MIN2(ss->maxy, maxy);
871 }
872
873 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
874
875 pan_pack(T.cpu, VIEWPORT, cfg) {
876 cfg.scissor_minimum_x = minx;
877 cfg.scissor_minimum_y = miny;
878 cfg.scissor_maximum_x = maxx - 1;
879 cfg.scissor_maximum_y = maxy - 1;
880
881 cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
882 cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
883 }
884
885 tiler_postfix->viewport = T.gpu;
886 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
887 }
888
889 static mali_ptr
890 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
891 enum pipe_shader_type st,
892 struct panfrost_constant_buffer *buf,
893 unsigned index)
894 {
895 struct pipe_constant_buffer *cb = &buf->cb[index];
896 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
897
898 if (rsrc) {
899 panfrost_batch_add_bo(batch, rsrc->bo,
900 PAN_BO_ACCESS_SHARED |
901 PAN_BO_ACCESS_READ |
902 panfrost_bo_access_for_stage(st));
903
904 /* Alignment gauranteed by
905 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
906 return rsrc->bo->gpu + cb->buffer_offset;
907 } else if (cb->user_buffer) {
908 return panfrost_pool_upload_aligned(&batch->pool,
909 cb->user_buffer +
910 cb->buffer_offset,
911 cb->buffer_size, 16);
912 } else {
913 unreachable("No constant buffer");
914 }
915 }
916
917 struct sysval_uniform {
918 union {
919 float f[4];
920 int32_t i[4];
921 uint32_t u[4];
922 uint64_t du[2];
923 };
924 };
925
926 static void
927 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
928 struct sysval_uniform *uniform)
929 {
930 struct panfrost_context *ctx = batch->ctx;
931 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
932
933 uniform->f[0] = vp->scale[0];
934 uniform->f[1] = vp->scale[1];
935 uniform->f[2] = vp->scale[2];
936 }
937
938 static void
939 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
940 struct sysval_uniform *uniform)
941 {
942 struct panfrost_context *ctx = batch->ctx;
943 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
944
945 uniform->f[0] = vp->translate[0];
946 uniform->f[1] = vp->translate[1];
947 uniform->f[2] = vp->translate[2];
948 }
949
950 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
951 enum pipe_shader_type st,
952 unsigned int sysvalid,
953 struct sysval_uniform *uniform)
954 {
955 struct panfrost_context *ctx = batch->ctx;
956 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
957 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
958 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
959 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
960
961 assert(dim);
962 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
963
964 if (dim > 1)
965 uniform->i[1] = u_minify(tex->texture->height0,
966 tex->u.tex.first_level);
967
968 if (dim > 2)
969 uniform->i[2] = u_minify(tex->texture->depth0,
970 tex->u.tex.first_level);
971
972 if (is_array)
973 uniform->i[dim] = tex->texture->array_size;
974 }
975
976 static void
977 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
978 enum pipe_shader_type st,
979 unsigned ssbo_id,
980 struct sysval_uniform *uniform)
981 {
982 struct panfrost_context *ctx = batch->ctx;
983
984 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
985 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
986
987 /* Compute address */
988 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
989
990 panfrost_batch_add_bo(batch, bo,
991 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
992 panfrost_bo_access_for_stage(st));
993
994 /* Upload address and size as sysval */
995 uniform->du[0] = bo->gpu + sb.buffer_offset;
996 uniform->u[2] = sb.buffer_size;
997 }
998
999 static void
1000 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
1001 enum pipe_shader_type st,
1002 unsigned samp_idx,
1003 struct sysval_uniform *uniform)
1004 {
1005 struct panfrost_context *ctx = batch->ctx;
1006 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
1007
1008 uniform->f[0] = sampl->min_lod;
1009 uniform->f[1] = sampl->max_lod;
1010 uniform->f[2] = sampl->lod_bias;
1011
1012 /* Even without any errata, Midgard represents "no mipmapping" as
1013 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1014 * panfrost_create_sampler_state which also explains our choice of
1015 * epsilon value (again to keep behaviour consistent) */
1016
1017 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1018 uniform->f[1] = uniform->f[0] + (1.0/256.0);
1019 }
1020
1021 static void
1022 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
1023 struct sysval_uniform *uniform)
1024 {
1025 struct panfrost_context *ctx = batch->ctx;
1026
1027 uniform->u[0] = ctx->compute_grid->grid[0];
1028 uniform->u[1] = ctx->compute_grid->grid[1];
1029 uniform->u[2] = ctx->compute_grid->grid[2];
1030 }
1031
1032 static void
1033 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1034 struct panfrost_shader_state *ss,
1035 enum pipe_shader_type st)
1036 {
1037 struct sysval_uniform *uniforms = (void *)buf;
1038
1039 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1040 int sysval = ss->sysval[i];
1041
1042 switch (PAN_SYSVAL_TYPE(sysval)) {
1043 case PAN_SYSVAL_VIEWPORT_SCALE:
1044 panfrost_upload_viewport_scale_sysval(batch,
1045 &uniforms[i]);
1046 break;
1047 case PAN_SYSVAL_VIEWPORT_OFFSET:
1048 panfrost_upload_viewport_offset_sysval(batch,
1049 &uniforms[i]);
1050 break;
1051 case PAN_SYSVAL_TEXTURE_SIZE:
1052 panfrost_upload_txs_sysval(batch, st,
1053 PAN_SYSVAL_ID(sysval),
1054 &uniforms[i]);
1055 break;
1056 case PAN_SYSVAL_SSBO:
1057 panfrost_upload_ssbo_sysval(batch, st,
1058 PAN_SYSVAL_ID(sysval),
1059 &uniforms[i]);
1060 break;
1061 case PAN_SYSVAL_NUM_WORK_GROUPS:
1062 panfrost_upload_num_work_groups_sysval(batch,
1063 &uniforms[i]);
1064 break;
1065 case PAN_SYSVAL_SAMPLER:
1066 panfrost_upload_sampler_sysval(batch, st,
1067 PAN_SYSVAL_ID(sysval),
1068 &uniforms[i]);
1069 break;
1070 default:
1071 assert(0);
1072 }
1073 }
1074 }
1075
1076 static const void *
1077 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1078 unsigned index)
1079 {
1080 struct pipe_constant_buffer *cb = &buf->cb[index];
1081 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1082
1083 if (rsrc)
1084 return rsrc->bo->cpu;
1085 else if (cb->user_buffer)
1086 return cb->user_buffer;
1087 else
1088 unreachable("No constant buffer");
1089 }
1090
1091 void
1092 panfrost_emit_const_buf(struct panfrost_batch *batch,
1093 enum pipe_shader_type stage,
1094 struct mali_vertex_tiler_postfix *postfix)
1095 {
1096 struct panfrost_context *ctx = batch->ctx;
1097 struct panfrost_shader_variants *all = ctx->shader[stage];
1098
1099 if (!all)
1100 return;
1101
1102 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1103
1104 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1105
1106 /* Uniforms are implicitly UBO #0 */
1107 bool has_uniforms = buf->enabled_mask & (1 << 0);
1108
1109 /* Allocate room for the sysval and the uniforms */
1110 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1111 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1112 size_t size = sys_size + uniform_size;
1113 struct panfrost_transfer transfer =
1114 panfrost_pool_alloc_aligned(&batch->pool, size, 16);
1115
1116 /* Upload sysvals requested by the shader */
1117 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1118
1119 /* Upload uniforms */
1120 if (has_uniforms && uniform_size) {
1121 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1122 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1123 }
1124
1125 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1126 * uploaded */
1127
1128 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1129 assert(ubo_count >= 1);
1130
1131 size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
1132 struct panfrost_transfer ubos =
1133 panfrost_pool_alloc_aligned(&batch->pool, sz,
1134 MALI_UNIFORM_BUFFER_LENGTH);
1135
1136 uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
1137
1138 /* Upload uniforms as a UBO */
1139
1140 if (size) {
1141 pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
1142 cfg.entries = DIV_ROUND_UP(size, 16);
1143 cfg.pointer = transfer.gpu;
1144 }
1145 } else {
1146 *ubo_ptr = 0;
1147 }
1148
1149 /* The rest are honest-to-goodness UBOs */
1150
1151 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1152 size_t usz = buf->cb[ubo].buffer_size;
1153 bool enabled = buf->enabled_mask & (1 << ubo);
1154 bool empty = usz == 0;
1155
1156 if (!enabled || empty) {
1157 ubo_ptr[ubo] = 0;
1158 continue;
1159 }
1160
1161 pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
1162 cfg.entries = DIV_ROUND_UP(usz, 16);
1163 cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
1164 stage, buf, ubo);
1165 }
1166 }
1167
1168 postfix->uniforms = transfer.gpu;
1169 postfix->uniform_buffers = ubos.gpu;
1170
1171 buf->dirty_mask = 0;
1172 }
1173
1174 void
1175 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1176 const struct pipe_grid_info *info,
1177 struct midgard_payload_vertex_tiler *vtp)
1178 {
1179 struct panfrost_context *ctx = batch->ctx;
1180 struct panfrost_device *dev = pan_device(ctx->base.screen);
1181 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1182 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1183 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1184 128));
1185
1186 unsigned log2_instances =
1187 util_logbase2_ceil(info->grid[0]) +
1188 util_logbase2_ceil(info->grid[1]) +
1189 util_logbase2_ceil(info->grid[2]);
1190
1191 unsigned shared_size = single_size * (1 << log2_instances) * dev->core_count;
1192 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1193 shared_size,
1194 1);
1195
1196 struct mali_shared_memory shared = {
1197 .shared_memory = bo->gpu,
1198 .shared_workgroup_count = log2_instances,
1199 .shared_shift = util_logbase2(single_size) + 1
1200 };
1201
1202 vtp->postfix.shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared,
1203 sizeof(shared), 64);
1204 }
1205
1206 static mali_ptr
1207 panfrost_get_tex_desc(struct panfrost_batch *batch,
1208 enum pipe_shader_type st,
1209 struct panfrost_sampler_view *view)
1210 {
1211 if (!view)
1212 return (mali_ptr) 0;
1213
1214 struct pipe_sampler_view *pview = &view->base;
1215 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1216
1217 /* Add the BO to the job so it's retained until the job is done. */
1218
1219 panfrost_batch_add_bo(batch, rsrc->bo,
1220 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1221 panfrost_bo_access_for_stage(st));
1222
1223 panfrost_batch_add_bo(batch, view->bo,
1224 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1225 panfrost_bo_access_for_stage(st));
1226
1227 return view->bo->gpu;
1228 }
1229
1230 static void
1231 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1232 struct pipe_context *pctx)
1233 {
1234 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1235 if (view->texture_bo != rsrc->bo->gpu ||
1236 view->modifier != rsrc->modifier) {
1237 panfrost_bo_unreference(view->bo);
1238 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1239 }
1240 }
1241
1242 void
1243 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1244 enum pipe_shader_type stage,
1245 struct mali_vertex_tiler_postfix *postfix)
1246 {
1247 struct panfrost_context *ctx = batch->ctx;
1248 struct panfrost_device *device = pan_device(ctx->base.screen);
1249
1250 if (!ctx->sampler_view_count[stage])
1251 return;
1252
1253 if (device->quirks & IS_BIFROST) {
1254 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1255 MALI_BIFROST_TEXTURE_LENGTH *
1256 ctx->sampler_view_count[stage],
1257 MALI_BIFROST_TEXTURE_LENGTH);
1258
1259 struct mali_bifrost_texture_packed *out =
1260 (struct mali_bifrost_texture_packed *) T.cpu;
1261
1262 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1263 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1264 struct pipe_sampler_view *pview = &view->base;
1265 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1266
1267 panfrost_update_sampler_view(view, &ctx->base);
1268 out[i] = view->bifrost_descriptor;
1269
1270 /* Add the BOs to the job so they are retained until the job is done. */
1271
1272 panfrost_batch_add_bo(batch, rsrc->bo,
1273 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1274 panfrost_bo_access_for_stage(stage));
1275
1276 panfrost_batch_add_bo(batch, view->bo,
1277 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1278 panfrost_bo_access_for_stage(stage));
1279 }
1280
1281 postfix->textures = T.gpu;
1282 } else {
1283 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1284
1285 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1286 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1287
1288 panfrost_update_sampler_view(view, &ctx->base);
1289
1290 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1291 }
1292
1293 postfix->textures = panfrost_pool_upload_aligned(&batch->pool,
1294 trampolines,
1295 sizeof(uint64_t) *
1296 ctx->sampler_view_count[stage],
1297 sizeof(uint64_t));
1298 }
1299 }
1300
1301 void
1302 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1303 enum pipe_shader_type stage,
1304 struct mali_vertex_tiler_postfix *postfix)
1305 {
1306 struct panfrost_context *ctx = batch->ctx;
1307
1308 if (!ctx->sampler_count[stage])
1309 return;
1310
1311 size_t desc_size = MALI_BIFROST_SAMPLER_LENGTH;
1312 assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
1313
1314 size_t sz = desc_size * ctx->sampler_count[stage];
1315 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool, sz, desc_size);
1316 struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
1317
1318 for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
1319 out[i] = ctx->samplers[stage][i]->hw;
1320
1321 postfix->sampler_descriptor = T.gpu;
1322 }
1323
1324 void
1325 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1326 struct mali_vertex_tiler_postfix *vertex_postfix)
1327 {
1328 struct panfrost_context *ctx = batch->ctx;
1329 struct panfrost_vertex_state *so = ctx->vertex;
1330 struct panfrost_shader_state *vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1331
1332 unsigned instance_shift = vertex_postfix->instance_shift;
1333 unsigned instance_odd = vertex_postfix->instance_odd;
1334
1335 /* Worst case: everything is NPOT, which is only possible if instancing
1336 * is enabled. Otherwise single record is gauranteed */
1337 bool could_npot = instance_shift || instance_odd;
1338
1339 struct panfrost_transfer S = panfrost_pool_alloc_aligned(&batch->pool,
1340 MALI_ATTRIBUTE_BUFFER_LENGTH * vs->attribute_count *
1341 (could_npot ? 2 : 1),
1342 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
1343
1344 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1345 MALI_ATTRIBUTE_LENGTH * vs->attribute_count,
1346 MALI_ATTRIBUTE_LENGTH);
1347
1348 struct mali_attribute_buffer_packed *bufs =
1349 (struct mali_attribute_buffer_packed *) S.cpu;
1350
1351 struct mali_attribute_packed *out =
1352 (struct mali_attribute_packed *) T.cpu;
1353
1354 unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
1355 unsigned k = 0;
1356
1357 for (unsigned i = 0; i < so->num_elements; ++i) {
1358 /* We map buffers 1:1 with the attributes, which
1359 * means duplicating some vertex buffers (who cares? aside from
1360 * maybe some caching implications but I somehow doubt that
1361 * matters) */
1362
1363 struct pipe_vertex_element *elem = &so->pipe[i];
1364 unsigned vbi = elem->vertex_buffer_index;
1365 attrib_to_buffer[i] = k;
1366
1367 if (!(ctx->vb_mask & (1 << vbi)))
1368 continue;
1369
1370 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1371 struct panfrost_resource *rsrc;
1372
1373 rsrc = pan_resource(buf->buffer.resource);
1374 if (!rsrc)
1375 continue;
1376
1377 /* Add a dependency of the batch on the vertex buffer */
1378 panfrost_batch_add_bo(batch, rsrc->bo,
1379 PAN_BO_ACCESS_SHARED |
1380 PAN_BO_ACCESS_READ |
1381 PAN_BO_ACCESS_VERTEX_TILER);
1382
1383 /* Mask off lower bits, see offset fixup below */
1384 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1385 mali_ptr addr = raw_addr & ~63;
1386
1387 /* Since we advanced the base pointer, we shrink the buffer
1388 * size, but add the offset we subtracted */
1389 unsigned size = rsrc->base.width0 + (raw_addr - addr)
1390 - buf->buffer_offset;
1391
1392 /* When there is a divisor, the hardware-level divisor is
1393 * the product of the instance divisor and the padded count */
1394 unsigned divisor = elem->instance_divisor;
1395 unsigned hw_divisor = ctx->padded_count * divisor;
1396 unsigned stride = buf->stride;
1397
1398 /* If there's a divisor(=1) but no instancing, we want every
1399 * attribute to be the same */
1400
1401 if (divisor && ctx->instance_count == 1)
1402 stride = 0;
1403
1404 if (!divisor || ctx->instance_count <= 1) {
1405 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1406 if (ctx->instance_count > 1)
1407 cfg.type = MALI_ATTRIBUTE_TYPE_1D_MODULUS;
1408
1409 cfg.pointer = addr;
1410 cfg.stride = stride;
1411 cfg.size = size;
1412 cfg.divisor_r = instance_shift;
1413 cfg.divisor_p = instance_odd;
1414 }
1415 } else if (util_is_power_of_two_or_zero(hw_divisor)) {
1416 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1417 cfg.type = MALI_ATTRIBUTE_TYPE_1D_POT_DIVISOR;
1418 cfg.pointer = addr;
1419 cfg.stride = stride;
1420 cfg.size = size;
1421 cfg.divisor_r = __builtin_ctz(hw_divisor);
1422 }
1423
1424 } else {
1425 unsigned shift = 0, extra_flags = 0;
1426
1427 unsigned magic_divisor =
1428 panfrost_compute_magic_divisor(hw_divisor, &shift, &extra_flags);
1429
1430 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1431 cfg.type = MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR;
1432 cfg.pointer = addr;
1433 cfg.stride = stride;
1434 cfg.size = size;
1435
1436 cfg.divisor_r = shift;
1437 cfg.divisor_e = extra_flags;
1438 }
1439
1440 pan_pack(bufs + k + 1, ATTRIBUTE_BUFFER_CONTINUATION_NPOT, cfg) {
1441 cfg.divisor_numerator = magic_divisor;
1442 cfg.divisor = divisor;
1443 }
1444
1445 ++k;
1446 }
1447
1448 ++k;
1449 }
1450
1451 /* Add special gl_VertexID/gl_InstanceID buffers */
1452
1453 if (unlikely(vs->attribute_count >= PAN_VERTEX_ID)) {
1454 panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1455
1456 pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
1457 cfg.buffer_index = k++;
1458 cfg.format = so->formats[PAN_VERTEX_ID];
1459 }
1460
1461 panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1462
1463 pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
1464 cfg.buffer_index = k++;
1465 cfg.format = so->formats[PAN_INSTANCE_ID];
1466 }
1467 }
1468
1469 /* Attribute addresses require 64-byte alignment, so let:
1470 *
1471 * base' = base & ~63 = base - (base & 63)
1472 * offset' = offset + (base & 63)
1473 *
1474 * Since base' + offset' = base + offset, these are equivalent
1475 * addressing modes and now base is 64 aligned.
1476 */
1477
1478 unsigned start = vertex_postfix->offset_start;
1479
1480 for (unsigned i = 0; i < so->num_elements; ++i) {
1481 unsigned vbi = so->pipe[i].vertex_buffer_index;
1482 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1483
1484 /* Adjust by the masked off bits of the offset. Make sure we
1485 * read src_offset from so->hw (which is not GPU visible)
1486 * rather than target (which is) due to caching effects */
1487
1488 unsigned src_offset = so->pipe[i].src_offset;
1489
1490 /* BOs aligned to 4k so guaranteed aligned to 64 */
1491 src_offset += (buf->buffer_offset & 63);
1492
1493 /* Also, somewhat obscurely per-instance data needs to be
1494 * offset in response to a delayed start in an indexed draw */
1495
1496 if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
1497 src_offset -= buf->stride * start;
1498
1499 pan_pack(out + i, ATTRIBUTE, cfg) {
1500 cfg.buffer_index = attrib_to_buffer[i];
1501 cfg.format = so->formats[i];
1502 cfg.offset = src_offset;
1503 }
1504 }
1505
1506 vertex_postfix->attributes = S.gpu;
1507 vertex_postfix->attribute_meta = T.gpu;
1508 }
1509
1510 static mali_ptr
1511 panfrost_emit_varyings(struct panfrost_batch *batch,
1512 struct mali_attribute_buffer_packed *slot,
1513 unsigned stride, unsigned count)
1514 {
1515 unsigned size = stride * count;
1516 mali_ptr ptr = panfrost_pool_alloc_aligned(&batch->invisible_pool, size, 64).gpu;
1517
1518 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1519 cfg.stride = stride;
1520 cfg.size = size;
1521 cfg.pointer = ptr;
1522 }
1523
1524 return ptr;
1525 }
1526
1527 static unsigned
1528 panfrost_streamout_offset(unsigned stride, unsigned offset,
1529 struct pipe_stream_output_target *target)
1530 {
1531 return (target->buffer_offset + (offset * stride * 4)) & 63;
1532 }
1533
1534 static void
1535 panfrost_emit_streamout(struct panfrost_batch *batch,
1536 struct mali_attribute_buffer_packed *slot,
1537 unsigned stride_words, unsigned offset, unsigned count,
1538 struct pipe_stream_output_target *target)
1539 {
1540 unsigned stride = stride_words * 4;
1541 unsigned max_size = target->buffer_size;
1542 unsigned expected_size = stride * count;
1543
1544 /* Grab the BO and bind it to the batch */
1545 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1546
1547 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1548 * the perspective of the TILER and FRAGMENT.
1549 */
1550 panfrost_batch_add_bo(batch, bo,
1551 PAN_BO_ACCESS_SHARED |
1552 PAN_BO_ACCESS_RW |
1553 PAN_BO_ACCESS_VERTEX_TILER |
1554 PAN_BO_ACCESS_FRAGMENT);
1555
1556 /* We will have an offset applied to get alignment */
1557 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * stride);
1558
1559 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1560 cfg.pointer = (addr & ~63);
1561 cfg.stride = stride;
1562 cfg.size = MIN2(max_size, expected_size) + (addr & 63);
1563 }
1564 }
1565
1566 static bool
1567 has_point_coord(unsigned mask, gl_varying_slot loc)
1568 {
1569 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1570 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1571 else if (loc == VARYING_SLOT_PNTC)
1572 return (mask & (1 << 8));
1573 else
1574 return false;
1575 }
1576
1577 /* Helpers for manipulating stream out information so we can pack varyings
1578 * accordingly. Compute the src_offset for a given captured varying */
1579
1580 static struct pipe_stream_output *
1581 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1582 {
1583 for (unsigned i = 0; i < info->num_outputs; ++i) {
1584 if (info->output[i].register_index == loc)
1585 return &info->output[i];
1586 }
1587
1588 unreachable("Varying not captured");
1589 }
1590
1591 static unsigned
1592 pan_varying_size(enum mali_format fmt)
1593 {
1594 unsigned type = MALI_EXTRACT_TYPE(fmt);
1595 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1596 unsigned bits = MALI_EXTRACT_BITS(fmt);
1597 unsigned bpc = 0;
1598
1599 if (bits == MALI_CHANNEL_FLOAT) {
1600 /* No doubles */
1601 bool fp16 = (type == MALI_FORMAT_SINT);
1602 assert(fp16 || (type == MALI_FORMAT_UNORM));
1603
1604 bpc = fp16 ? 2 : 4;
1605 } else {
1606 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1607
1608 /* See the enums */
1609 bits = 1 << bits;
1610 assert(bits >= 8);
1611 bpc = bits / 8;
1612 }
1613
1614 return bpc * chan;
1615 }
1616
1617 /* Indices for named (non-XFB) varyings that are present. These are packed
1618 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1619 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1620 * of a given special field given a shift S by:
1621 *
1622 * idx = popcount(P & ((1 << S) - 1))
1623 *
1624 * That is... look at all of the varyings that come earlier and count them, the
1625 * count is the new index since plus one. Likewise, the total number of special
1626 * buffers required is simply popcount(P)
1627 */
1628
1629 enum pan_special_varying {
1630 PAN_VARY_GENERAL = 0,
1631 PAN_VARY_POSITION = 1,
1632 PAN_VARY_PSIZ = 2,
1633 PAN_VARY_PNTCOORD = 3,
1634 PAN_VARY_FACE = 4,
1635 PAN_VARY_FRAGCOORD = 5,
1636
1637 /* Keep last */
1638 PAN_VARY_MAX,
1639 };
1640
1641 /* Given a varying, figure out which index it correpsonds to */
1642
1643 static inline unsigned
1644 pan_varying_index(unsigned present, enum pan_special_varying v)
1645 {
1646 unsigned mask = (1 << v) - 1;
1647 return util_bitcount(present & mask);
1648 }
1649
1650 /* Get the base offset for XFB buffers, which by convention come after
1651 * everything else. Wrapper function for semantic reasons; by construction this
1652 * is just popcount. */
1653
1654 static inline unsigned
1655 pan_xfb_base(unsigned present)
1656 {
1657 return util_bitcount(present);
1658 }
1659
1660 /* Computes the present mask for varyings so we can start emitting varying records */
1661
1662 static inline unsigned
1663 pan_varying_present(
1664 struct panfrost_shader_state *vs,
1665 struct panfrost_shader_state *fs,
1666 unsigned quirks)
1667 {
1668 /* At the moment we always emit general and position buffers. Not
1669 * strictly necessary but usually harmless */
1670
1671 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1672
1673 /* Enable special buffers by the shader info */
1674
1675 if (vs->writes_point_size)
1676 present |= (1 << PAN_VARY_PSIZ);
1677
1678 if (fs->reads_point_coord)
1679 present |= (1 << PAN_VARY_PNTCOORD);
1680
1681 if (fs->reads_face)
1682 present |= (1 << PAN_VARY_FACE);
1683
1684 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1685 present |= (1 << PAN_VARY_FRAGCOORD);
1686
1687 /* Also, if we have a point sprite, we need a point coord buffer */
1688
1689 for (unsigned i = 0; i < fs->varying_count; i++) {
1690 gl_varying_slot loc = fs->varyings_loc[i];
1691
1692 if (has_point_coord(fs->point_sprite_mask, loc))
1693 present |= (1 << PAN_VARY_PNTCOORD);
1694 }
1695
1696 return present;
1697 }
1698
1699 /* Emitters for varying records */
1700
1701 static void
1702 pan_emit_vary(struct mali_attribute_packed *out,
1703 unsigned present, enum pan_special_varying buf,
1704 unsigned quirks, enum mali_format format,
1705 unsigned offset)
1706 {
1707 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1708 unsigned swizzle = quirks & HAS_SWIZZLES ?
1709 panfrost_get_default_swizzle(nr_channels) :
1710 panfrost_bifrost_swizzle(nr_channels);
1711
1712 pan_pack(out, ATTRIBUTE, cfg) {
1713 cfg.buffer_index = pan_varying_index(present, buf);
1714 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1715 cfg.format = (format << 12) | swizzle;
1716 cfg.offset = offset;
1717 }
1718 }
1719
1720 /* General varying that is unused */
1721
1722 static void
1723 pan_emit_vary_only(struct mali_attribute_packed *out,
1724 unsigned present, unsigned quirks)
1725 {
1726 pan_emit_vary(out, present, 0, quirks, MALI_VARYING_DISCARD, 0);
1727 }
1728
1729 /* Special records */
1730
1731 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1732 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1733 [PAN_VARY_PSIZ] = MALI_R16F,
1734 [PAN_VARY_PNTCOORD] = MALI_R16F,
1735 [PAN_VARY_FACE] = MALI_R32I,
1736 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1737 };
1738
1739 static void
1740 pan_emit_vary_special(struct mali_attribute_packed *out,
1741 unsigned present, enum pan_special_varying buf,
1742 unsigned quirks)
1743 {
1744 assert(buf < PAN_VARY_MAX);
1745 pan_emit_vary(out, present, buf, quirks, pan_varying_formats[buf], 0);
1746 }
1747
1748 static enum mali_format
1749 pan_xfb_format(enum mali_format format, unsigned nr)
1750 {
1751 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1752 return MALI_R32F | MALI_NR_CHANNELS(nr);
1753 else
1754 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1755 }
1756
1757 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1758 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1759 * value. */
1760
1761 static void
1762 pan_emit_vary_xfb(struct mali_attribute_packed *out,
1763 unsigned present,
1764 unsigned max_xfb,
1765 unsigned *streamout_offsets,
1766 unsigned quirks,
1767 enum mali_format format,
1768 struct pipe_stream_output o)
1769 {
1770 unsigned swizzle = quirks & HAS_SWIZZLES ?
1771 panfrost_get_default_swizzle(o.num_components) :
1772 panfrost_bifrost_swizzle(o.num_components);
1773
1774 pan_pack(out, ATTRIBUTE, cfg) {
1775 /* XFB buffers come after everything else */
1776 cfg.buffer_index = pan_xfb_base(present) + o.output_buffer;
1777 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1778
1779 /* Override number of channels and precision to highp */
1780 cfg.format = (pan_xfb_format(format, o.num_components) << 12) | swizzle;
1781
1782 /* Apply given offsets together */
1783 cfg.offset = (o.dst_offset * 4) /* dwords */
1784 + streamout_offsets[o.output_buffer];
1785 }
1786 }
1787
1788 /* Determine if we should capture a varying for XFB. This requires actually
1789 * having a buffer for it. If we don't capture it, we'll fallback to a general
1790 * varying path (linked or unlinked, possibly discarding the write) */
1791
1792 static bool
1793 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1794 unsigned loc, unsigned max_xfb)
1795 {
1796 if (!(xfb->so_mask & (1ll << loc)))
1797 return false;
1798
1799 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1800 return o->output_buffer < max_xfb;
1801 }
1802
1803 static void
1804 pan_emit_general_varying(struct mali_attribute_packed *out,
1805 struct panfrost_shader_state *other,
1806 struct panfrost_shader_state *xfb,
1807 gl_varying_slot loc,
1808 enum mali_format format,
1809 unsigned present,
1810 unsigned quirks,
1811 unsigned *gen_offsets,
1812 enum mali_format *gen_formats,
1813 unsigned *gen_stride,
1814 unsigned idx,
1815 bool should_alloc)
1816 {
1817 /* Check if we're linked */
1818 signed other_idx = -1;
1819
1820 for (unsigned j = 0; j < other->varying_count; ++j) {
1821 if (other->varyings_loc[j] == loc) {
1822 other_idx = j;
1823 break;
1824 }
1825 }
1826
1827 if (other_idx < 0) {
1828 pan_emit_vary_only(out, present, quirks);
1829 return;
1830 }
1831
1832 unsigned offset = gen_offsets[other_idx];
1833
1834 if (should_alloc) {
1835 /* We're linked, so allocate a space via a watermark allocation */
1836 enum mali_format alt = other->varyings[other_idx];
1837
1838 /* Do interpolation at minimum precision */
1839 unsigned size_main = pan_varying_size(format);
1840 unsigned size_alt = pan_varying_size(alt);
1841 unsigned size = MIN2(size_main, size_alt);
1842
1843 /* If a varying is marked for XFB but not actually captured, we
1844 * should match the format to the format that would otherwise
1845 * be used for XFB, since dEQP checks for invariance here. It's
1846 * unclear if this is required by the spec. */
1847
1848 if (xfb->so_mask & (1ull << loc)) {
1849 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1850 format = pan_xfb_format(format, o->num_components);
1851 size = pan_varying_size(format);
1852 } else if (size == size_alt) {
1853 format = alt;
1854 }
1855
1856 gen_offsets[idx] = *gen_stride;
1857 gen_formats[other_idx] = format;
1858 offset = *gen_stride;
1859 *gen_stride += size;
1860 }
1861
1862 pan_emit_vary(out, present, PAN_VARY_GENERAL, quirks, format, offset);
1863 }
1864
1865 /* Higher-level wrapper around all of the above, classifying a varying into one
1866 * of the above types */
1867
1868 static void
1869 panfrost_emit_varying(
1870 struct mali_attribute_packed *out,
1871 struct panfrost_shader_state *stage,
1872 struct panfrost_shader_state *other,
1873 struct panfrost_shader_state *xfb,
1874 unsigned present,
1875 unsigned max_xfb,
1876 unsigned *streamout_offsets,
1877 unsigned quirks,
1878 unsigned *gen_offsets,
1879 enum mali_format *gen_formats,
1880 unsigned *gen_stride,
1881 unsigned idx,
1882 bool should_alloc,
1883 bool is_fragment)
1884 {
1885 gl_varying_slot loc = stage->varyings_loc[idx];
1886 enum mali_format format = stage->varyings[idx];
1887
1888 /* Override format to match linkage */
1889 if (!should_alloc && gen_formats[idx])
1890 format = gen_formats[idx];
1891
1892 if (has_point_coord(stage->point_sprite_mask, loc)) {
1893 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1894 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1895 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1896 pan_emit_vary_xfb(out, present, max_xfb, streamout_offsets, quirks, format, *o);
1897 } else if (loc == VARYING_SLOT_POS) {
1898 if (is_fragment)
1899 pan_emit_vary_special(out, present, PAN_VARY_FRAGCOORD, quirks);
1900 else
1901 pan_emit_vary_special(out, present, PAN_VARY_POSITION, quirks);
1902 } else if (loc == VARYING_SLOT_PSIZ) {
1903 pan_emit_vary_special(out, present, PAN_VARY_PSIZ, quirks);
1904 } else if (loc == VARYING_SLOT_PNTC) {
1905 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1906 } else if (loc == VARYING_SLOT_FACE) {
1907 pan_emit_vary_special(out, present, PAN_VARY_FACE, quirks);
1908 } else {
1909 pan_emit_general_varying(out, other, xfb, loc, format, present,
1910 quirks, gen_offsets, gen_formats, gen_stride,
1911 idx, should_alloc);
1912 }
1913 }
1914
1915 static void
1916 pan_emit_special_input(struct mali_attribute_buffer_packed *out,
1917 unsigned present,
1918 enum pan_special_varying v,
1919 unsigned special)
1920 {
1921 if (present & (1 << v)) {
1922 unsigned idx = pan_varying_index(present, v);
1923
1924 pan_pack(out + idx, ATTRIBUTE_BUFFER, cfg) {
1925 cfg.special = special;
1926 cfg.type = 0;
1927 }
1928 }
1929 }
1930
1931 void
1932 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1933 unsigned vertex_count,
1934 struct mali_vertex_tiler_postfix *vertex_postfix,
1935 struct mali_vertex_tiler_postfix *tiler_postfix,
1936 union midgard_primitive_size *primitive_size)
1937 {
1938 /* Load the shaders */
1939 struct panfrost_context *ctx = batch->ctx;
1940 struct panfrost_device *dev = pan_device(ctx->base.screen);
1941 struct panfrost_shader_state *vs, *fs;
1942 size_t vs_size, fs_size;
1943
1944 /* Allocate the varying descriptor */
1945
1946 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1947 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
1948 vs_size = MALI_ATTRIBUTE_LENGTH * vs->varying_count;
1949 fs_size = MALI_ATTRIBUTE_LENGTH * fs->varying_count;
1950
1951 struct panfrost_transfer trans = panfrost_pool_alloc_aligned(
1952 &batch->pool, vs_size + fs_size, MALI_ATTRIBUTE_LENGTH);
1953
1954 struct pipe_stream_output_info *so = &vs->stream_output;
1955 unsigned present = pan_varying_present(vs, fs, dev->quirks);
1956
1957 /* Check if this varying is linked by us. This is the case for
1958 * general-purpose, non-captured varyings. If it is, link it. If it's
1959 * not, use the provided stream out information to determine the
1960 * offset, since it was already linked for us. */
1961
1962 unsigned gen_offsets[32];
1963 enum mali_format gen_formats[32];
1964 memset(gen_offsets, 0, sizeof(gen_offsets));
1965 memset(gen_formats, 0, sizeof(gen_formats));
1966
1967 unsigned gen_stride = 0;
1968 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
1969 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
1970
1971 unsigned streamout_offsets[32];
1972
1973 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1974 streamout_offsets[i] = panfrost_streamout_offset(
1975 so->stride[i],
1976 ctx->streamout.offsets[i],
1977 ctx->streamout.targets[i]);
1978 }
1979
1980 struct mali_attribute_packed *ovs = (struct mali_attribute_packed *)trans.cpu;
1981 struct mali_attribute_packed *ofs = ovs + vs->varying_count;
1982
1983 for (unsigned i = 0; i < vs->varying_count; i++) {
1984 panfrost_emit_varying(ovs + i, vs, fs, vs, present,
1985 ctx->streamout.num_targets, streamout_offsets,
1986 dev->quirks,
1987 gen_offsets, gen_formats, &gen_stride, i, true, false);
1988 }
1989
1990 for (unsigned i = 0; i < fs->varying_count; i++) {
1991 panfrost_emit_varying(ofs + i, fs, vs, vs, present,
1992 ctx->streamout.num_targets, streamout_offsets,
1993 dev->quirks,
1994 gen_offsets, gen_formats, &gen_stride, i, false, true);
1995 }
1996
1997 unsigned xfb_base = pan_xfb_base(present);
1998 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1999 MALI_ATTRIBUTE_BUFFER_LENGTH * (xfb_base + ctx->streamout.num_targets),
2000 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
2001 struct mali_attribute_buffer_packed *varyings =
2002 (struct mali_attribute_buffer_packed *) T.cpu;
2003
2004 /* Emit the stream out buffers */
2005
2006 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
2007 ctx->vertex_count);
2008
2009 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
2010 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
2011 so->stride[i],
2012 ctx->streamout.offsets[i],
2013 out_count,
2014 ctx->streamout.targets[i]);
2015 }
2016
2017 panfrost_emit_varyings(batch,
2018 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
2019 gen_stride, vertex_count);
2020
2021 /* fp32 vec4 gl_Position */
2022 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
2023 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
2024 sizeof(float) * 4, vertex_count);
2025
2026 if (present & (1 << PAN_VARY_PSIZ)) {
2027 primitive_size->pointer = panfrost_emit_varyings(batch,
2028 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
2029 2, vertex_count);
2030 }
2031
2032 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_ATTRIBUTE_SPECIAL_POINT_COORD);
2033 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_ATTRIBUTE_SPECIAL_FRONT_FACING);
2034 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_ATTRIBUTE_SPECIAL_FRAG_COORD);
2035
2036 vertex_postfix->varyings = T.gpu;
2037 tiler_postfix->varyings = T.gpu;
2038
2039 vertex_postfix->varying_meta = trans.gpu;
2040 tiler_postfix->varying_meta = trans.gpu + vs_size;
2041 }
2042
2043 void
2044 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
2045 struct mali_vertex_tiler_prefix *vertex_prefix,
2046 struct mali_vertex_tiler_postfix *vertex_postfix,
2047 struct mali_vertex_tiler_prefix *tiler_prefix,
2048 struct mali_vertex_tiler_postfix *tiler_postfix,
2049 union midgard_primitive_size *primitive_size)
2050 {
2051 struct panfrost_context *ctx = batch->ctx;
2052 struct panfrost_device *device = pan_device(ctx->base.screen);
2053 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
2054 struct bifrost_payload_vertex bifrost_vertex = {0,};
2055 struct bifrost_payload_tiler bifrost_tiler = {0,};
2056 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2057 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2058 void *vp, *tp;
2059 size_t vp_size, tp_size;
2060
2061 if (device->quirks & IS_BIFROST) {
2062 bifrost_vertex.prefix = *vertex_prefix;
2063 bifrost_vertex.postfix = *vertex_postfix;
2064 vp = &bifrost_vertex;
2065 vp_size = sizeof(bifrost_vertex);
2066
2067 bifrost_tiler.prefix = *tiler_prefix;
2068 bifrost_tiler.tiler.primitive_size = *primitive_size;
2069 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2070 bifrost_tiler.postfix = *tiler_postfix;
2071 tp = &bifrost_tiler;
2072 tp_size = sizeof(bifrost_tiler);
2073 } else {
2074 midgard_vertex.prefix = *vertex_prefix;
2075 midgard_vertex.postfix = *vertex_postfix;
2076 vp = &midgard_vertex;
2077 vp_size = sizeof(midgard_vertex);
2078
2079 midgard_tiler.prefix = *tiler_prefix;
2080 midgard_tiler.postfix = *tiler_postfix;
2081 midgard_tiler.primitive_size = *primitive_size;
2082 tp = &midgard_tiler;
2083 tp_size = sizeof(midgard_tiler);
2084 }
2085
2086 if (wallpapering) {
2087 /* Inject in reverse order, with "predicted" job indices.
2088 * THIS IS A HACK XXX */
2089 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
2090 batch->scoreboard.job_index + 2, tp, tp_size, true);
2091 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2092 vp, vp_size, true);
2093 return;
2094 }
2095
2096 /* If rasterizer discard is enable, only submit the vertex */
2097
2098 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2099 vp, vp_size, false);
2100
2101 if (ctx->rasterizer->base.rasterizer_discard)
2102 return;
2103
2104 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
2105 false);
2106 }
2107
2108 /* TODO: stop hardcoding this */
2109 mali_ptr
2110 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2111 {
2112 uint16_t locations[] = {
2113 128, 128,
2114 0, 256,
2115 0, 256,
2116 0, 256,
2117 0, 256,
2118 0, 256,
2119 0, 256,
2120 0, 256,
2121 0, 256,
2122 0, 256,
2123 0, 256,
2124 0, 256,
2125 0, 256,
2126 0, 256,
2127 0, 256,
2128 0, 256,
2129 0, 256,
2130 0, 256,
2131 0, 256,
2132 0, 256,
2133 0, 256,
2134 0, 256,
2135 0, 256,
2136 0, 256,
2137 0, 256,
2138 0, 256,
2139 0, 256,
2140 0, 256,
2141 0, 256,
2142 0, 256,
2143 0, 256,
2144 0, 256,
2145 128, 128,
2146 0, 0,
2147 0, 0,
2148 0, 0,
2149 0, 0,
2150 0, 0,
2151 0, 0,
2152 0, 0,
2153 0, 0,
2154 0, 0,
2155 0, 0,
2156 0, 0,
2157 0, 0,
2158 0, 0,
2159 0, 0,
2160 0, 0,
2161 };
2162
2163 return panfrost_pool_upload_aligned(&batch->pool, locations, 96 * sizeof(uint16_t), 64);
2164 }