panfrost: Support SHADERLESS mode everywhere
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 struct mali_shared_memory shared = {
62 .shared_workgroup_count = ~0,
63 };
64
65 if (batch->stack_size) {
66 struct panfrost_bo *stack =
67 panfrost_batch_get_scratchpad(batch, batch->stack_size,
68 dev->thread_tls_alloc,
69 dev->core_count);
70
71 shared.stack_shift = panfrost_get_stack_shift(batch->stack_size);
72 shared.scratchpad = stack->gpu;
73 }
74
75 postfix->shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared, sizeof(shared), 64);
76 }
77
78 static void
79 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
80 struct mali_vertex_tiler_postfix *postfix)
81 {
82 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
83 postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
84 }
85
86 static void
87 panfrost_vt_update_rasterizer(struct panfrost_rasterizer *rasterizer,
88 struct mali_vertex_tiler_prefix *prefix,
89 struct mali_vertex_tiler_postfix *postfix)
90 {
91 postfix->gl_enables |= 0x7;
92 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
93 rasterizer->base.front_ccw);
94 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
95 (rasterizer->base.cull_face & PIPE_FACE_FRONT));
96 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
97 (rasterizer->base.cull_face & PIPE_FACE_BACK));
98 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
99 rasterizer->base.flatshade_first);
100 }
101
102 void
103 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
104 struct mali_vertex_tiler_prefix *prefix,
105 union midgard_primitive_size *primitive_size)
106 {
107 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
108
109 if (!panfrost_writes_point_size(ctx)) {
110 float val = (prefix->draw_mode == MALI_DRAW_MODE_POINTS) ?
111 rasterizer->base.point_size :
112 rasterizer->base.line_width;
113
114 primitive_size->constant = val;
115 }
116 }
117
118 static void
119 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
120 struct mali_vertex_tiler_postfix *postfix)
121 {
122 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
123 if (ctx->occlusion_query) {
124 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
125 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
126 PAN_BO_ACCESS_SHARED |
127 PAN_BO_ACCESS_RW |
128 PAN_BO_ACCESS_FRAGMENT);
129 } else {
130 postfix->occlusion_counter = 0;
131 }
132 }
133
134 void
135 panfrost_vt_init(struct panfrost_context *ctx,
136 enum pipe_shader_type stage,
137 struct mali_vertex_tiler_prefix *prefix,
138 struct mali_vertex_tiler_postfix *postfix)
139 {
140 struct panfrost_device *device = pan_device(ctx->base.screen);
141
142 if (!ctx->shader[stage])
143 return;
144
145 memset(prefix, 0, sizeof(*prefix));
146 memset(postfix, 0, sizeof(*postfix));
147
148 if (device->quirks & IS_BIFROST) {
149 postfix->gl_enables = 0x2;
150 panfrost_vt_emit_shared_memory(ctx, postfix);
151 } else {
152 postfix->gl_enables = 0x6;
153 panfrost_vt_attach_framebuffer(ctx, postfix);
154 }
155
156 if (stage == PIPE_SHADER_FRAGMENT) {
157 panfrost_vt_update_occlusion_query(ctx, postfix);
158 panfrost_vt_update_rasterizer(ctx->rasterizer, prefix, postfix);
159 }
160 }
161
162 static unsigned
163 panfrost_translate_index_size(unsigned size)
164 {
165 switch (size) {
166 case 1:
167 return MALI_DRAW_INDEXED_UINT8;
168
169 case 2:
170 return MALI_DRAW_INDEXED_UINT16;
171
172 case 4:
173 return MALI_DRAW_INDEXED_UINT32;
174
175 default:
176 unreachable("Invalid index size");
177 }
178 }
179
180 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
181 * good for the duration of the draw (transient), could last longer. Also get
182 * the bounds on the index buffer for the range accessed by the draw. We do
183 * these operations together because there are natural optimizations which
184 * require them to be together. */
185
186 static mali_ptr
187 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
188 const struct pipe_draw_info *info,
189 unsigned *min_index, unsigned *max_index)
190 {
191 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
192 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
193 off_t offset = info->start * info->index_size;
194 bool needs_indices = true;
195 mali_ptr out = 0;
196
197 if (info->max_index != ~0u) {
198 *min_index = info->min_index;
199 *max_index = info->max_index;
200 needs_indices = false;
201 }
202
203 if (!info->has_user_indices) {
204 /* Only resources can be directly mapped */
205 panfrost_batch_add_bo(batch, rsrc->bo,
206 PAN_BO_ACCESS_SHARED |
207 PAN_BO_ACCESS_READ |
208 PAN_BO_ACCESS_VERTEX_TILER);
209 out = rsrc->bo->gpu + offset;
210
211 /* Check the cache */
212 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
213 info->start,
214 info->count,
215 min_index,
216 max_index);
217 } else {
218 /* Otherwise, we need to upload to transient memory */
219 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
220 struct panfrost_transfer T =
221 panfrost_pool_alloc_aligned(&batch->pool,
222 info->count * info->index_size,
223 info->index_size);
224
225 memcpy(T.cpu, ibuf8 + offset, info->count * info->index_size);
226 out = T.gpu;
227 }
228
229 if (needs_indices) {
230 /* Fallback */
231 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
232
233 if (!info->has_user_indices)
234 panfrost_minmax_cache_add(rsrc->index_cache,
235 info->start, info->count,
236 *min_index, *max_index);
237 }
238
239 return out;
240 }
241
242 void
243 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
244 const struct pipe_draw_info *info,
245 enum mali_draw_mode draw_mode,
246 struct mali_vertex_tiler_postfix *vertex_postfix,
247 struct mali_vertex_tiler_prefix *tiler_prefix,
248 struct mali_vertex_tiler_postfix *tiler_postfix,
249 unsigned *vertex_count,
250 unsigned *padded_count)
251 {
252 tiler_prefix->draw_mode = draw_mode;
253
254 unsigned draw_flags = 0;
255
256 if (panfrost_writes_point_size(ctx))
257 draw_flags |= MALI_DRAW_VARYING_SIZE;
258
259 if (info->primitive_restart)
260 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
261
262 /* These doesn't make much sense */
263
264 draw_flags |= 0x3000;
265
266 if (info->index_size) {
267 unsigned min_index = 0, max_index = 0;
268
269 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
270 info,
271 &min_index,
272 &max_index);
273
274 /* Use the corresponding values */
275 *vertex_count = max_index - min_index + 1;
276 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
277 tiler_prefix->offset_bias_correction = -min_index;
278 tiler_prefix->index_count = MALI_POSITIVE(info->count);
279 draw_flags |= panfrost_translate_index_size(info->index_size);
280 } else {
281 tiler_prefix->indices = 0;
282 *vertex_count = ctx->vertex_count;
283 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
284 tiler_prefix->offset_bias_correction = 0;
285 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
286 }
287
288 tiler_prefix->unknown_draw = draw_flags;
289
290 /* Encode the padded vertex count */
291
292 if (info->instance_count > 1) {
293 *padded_count = panfrost_padded_vertex_count(*vertex_count);
294
295 unsigned shift = __builtin_ctz(ctx->padded_count);
296 unsigned k = ctx->padded_count >> (shift + 1);
297
298 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
299 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
300 } else {
301 *padded_count = *vertex_count;
302
303 /* Reset instancing state */
304 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
305 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
306 }
307 }
308
309 static void
310 panfrost_emit_compute_shader(struct panfrost_context *ctx,
311 enum pipe_shader_type st,
312 struct mali_shader_meta *meta)
313 {
314 const struct panfrost_device *dev = pan_device(ctx->base.screen);
315 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
316
317 memset(meta, 0, sizeof(*meta));
318 meta->shader = ss->shader;
319 meta->attribute_count = ss->attribute_count;
320 meta->varying_count = ss->varying_count;
321 meta->texture_count = ctx->sampler_view_count[st];
322 meta->sampler_count = ctx->sampler_count[st];
323
324 if (dev->quirks & IS_BIFROST) {
325 struct mali_bifrost_properties_packed prop;
326 struct mali_preload_vertex_packed preload;
327
328 pan_pack(&prop, BIFROST_PROPERTIES, cfg) {
329 cfg.unknown = 0x800000; /* XXX */
330 cfg.uniform_buffer_count = panfrost_ubo_count(ctx, st);
331 }
332
333 /* TODO: True compute shaders */
334 pan_pack(&preload, PRELOAD_VERTEX, cfg) {
335 cfg.uniform_count = ss->uniform_count;
336 cfg.vertex_id = true;
337 cfg.instance_id = true;
338 }
339
340 memcpy(&meta->bifrost_props, &prop, sizeof(prop));
341 memcpy(&meta->bifrost_preload, &preload, sizeof(preload));
342 } else {
343 struct mali_midgard_properties_packed prop;
344
345 pan_pack(&prop, MIDGARD_PROPERTIES, cfg) {
346 cfg.uniform_buffer_count = panfrost_ubo_count(ctx, st);
347 cfg.uniform_count = ss->uniform_count;
348 cfg.work_register_count = ss->work_reg_count;
349 cfg.writes_globals = ss->writes_global;
350 cfg.suppress_inf_nan = true; /* XXX */
351 }
352
353 memcpy(&meta->midgard_props, &prop, sizeof(prop));
354 }
355 }
356
357 static unsigned
358 translate_tex_wrap(enum pipe_tex_wrap w)
359 {
360 switch (w) {
361 case PIPE_TEX_WRAP_REPEAT: return MALI_WRAP_MODE_REPEAT;
362 case PIPE_TEX_WRAP_CLAMP: return MALI_WRAP_MODE_CLAMP;
363 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return MALI_WRAP_MODE_CLAMP_TO_EDGE;
364 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return MALI_WRAP_MODE_CLAMP_TO_BORDER;
365 case PIPE_TEX_WRAP_MIRROR_REPEAT: return MALI_WRAP_MODE_MIRRORED_REPEAT;
366 case PIPE_TEX_WRAP_MIRROR_CLAMP: return MALI_WRAP_MODE_MIRRORED_CLAMP;
367 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
368 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
369 default: unreachable("Invalid wrap");
370 }
371 }
372
373 /* The hardware compares in the wrong order order, so we have to flip before
374 * encoding. Yes, really. */
375
376 static enum mali_func
377 panfrost_sampler_compare_func(const struct pipe_sampler_state *cso)
378 {
379 if (!cso->compare_mode)
380 return MALI_FUNC_NEVER;
381
382 enum mali_func f = panfrost_translate_compare_func(cso->compare_func);
383 return panfrost_flip_compare_func(f);
384 }
385
386 static enum mali_mipmap_mode
387 pan_pipe_to_mipmode(enum pipe_tex_mipfilter f)
388 {
389 switch (f) {
390 case PIPE_TEX_MIPFILTER_NEAREST: return MALI_MIPMAP_MODE_NEAREST;
391 case PIPE_TEX_MIPFILTER_LINEAR: return MALI_MIPMAP_MODE_TRILINEAR;
392 case PIPE_TEX_MIPFILTER_NONE: return MALI_MIPMAP_MODE_NONE;
393 default: unreachable("Invalid");
394 }
395 }
396
397 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
398 struct mali_midgard_sampler_packed *hw)
399 {
400 pan_pack(hw, MIDGARD_SAMPLER, cfg) {
401 cfg.magnify_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
402 cfg.minify_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
403 cfg.mipmap_mode = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR) ?
404 MALI_MIPMAP_MODE_TRILINEAR : MALI_MIPMAP_MODE_NEAREST;
405 cfg.normalized_coordinates = cso->normalized_coords;
406
407 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
408
409 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
410
411 /* If necessary, we disable mipmapping in the sampler descriptor by
412 * clamping the LOD as tight as possible (from 0 to epsilon,
413 * essentially -- remember these are fixed point numbers, so
414 * epsilon=1/256) */
415
416 cfg.maximum_lod = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) ?
417 cfg.minimum_lod + 1 :
418 FIXED_16(cso->max_lod, false);
419
420 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
421 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
422 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
423
424 cfg.compare_function = panfrost_sampler_compare_func(cso);
425 cfg.seamless_cube_map = cso->seamless_cube_map;
426
427 cfg.border_color_r = cso->border_color.f[0];
428 cfg.border_color_g = cso->border_color.f[1];
429 cfg.border_color_b = cso->border_color.f[2];
430 cfg.border_color_a = cso->border_color.f[3];
431 }
432 }
433
434 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
435 struct mali_bifrost_sampler_packed *hw)
436 {
437 pan_pack(hw, BIFROST_SAMPLER, cfg) {
438 cfg.magnify_linear = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR;
439 cfg.minify_linear = cso->min_img_filter == PIPE_TEX_FILTER_LINEAR;
440 cfg.mipmap_mode = pan_pipe_to_mipmode(cso->min_mip_filter);
441 cfg.normalized_coordinates = cso->normalized_coords;
442
443 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
444 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
445 cfg.maximum_lod = FIXED_16(cso->max_lod, false);
446
447 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
448 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
449 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
450
451 cfg.compare_function = panfrost_sampler_compare_func(cso);
452 cfg.seamless_cube_map = cso->seamless_cube_map;
453 }
454 }
455
456 static bool
457 panfrost_fs_required(
458 struct panfrost_shader_state *fs,
459 struct panfrost_blend_final *blend,
460 unsigned rt_count)
461 {
462 /* If we generally have side effects */
463 if (fs->fs_sidefx)
464 return true;
465
466 /* If colour is written we need to execute */
467 for (unsigned i = 0; i < rt_count; ++i) {
468 if (!blend[i].no_colour)
469 return true;
470 }
471
472 /* If depth is written and not implied we need to execute.
473 * TODO: Predicate on Z/S writes being enabled */
474 return (fs->writes_depth || fs->writes_stencil);
475 }
476
477 static void
478 panfrost_emit_blend(struct panfrost_batch *batch, void *rts,
479 struct panfrost_blend_final *blend)
480 {
481 const struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
482 struct panfrost_shader_state *fs = panfrost_get_shader_state(batch->ctx, PIPE_SHADER_FRAGMENT);
483 unsigned rt_count = batch->key.nr_cbufs;
484
485 struct bifrost_blend_rt *brts = rts;
486 struct midgard_blend_rt *mrts = rts;
487
488 /* Disable blending for depth-only on Bifrost */
489
490 if (rt_count == 0 && dev->quirks & IS_BIFROST)
491 brts[0].unk2 = 0x3;
492
493 for (unsigned i = 0; i < rt_count; ++i) {
494 unsigned flags = 0;
495
496 pan_pack(&flags, BLEND_FLAGS, cfg) {
497 if (blend[i].no_colour) {
498 cfg.enable = false;
499 break;
500 }
501
502 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
503
504 cfg.srgb = util_format_is_srgb(batch->key.cbufs[i]->format);
505 cfg.load_destination = blend[i].load_dest;
506 cfg.dither_disable = !batch->ctx->blend->base.dither;
507
508 if (!(dev->quirks & IS_BIFROST))
509 cfg.midgard_blend_shader = blend[i].is_shader;
510 }
511
512 if (dev->quirks & IS_BIFROST) {
513 brts[i].flags = flags;
514
515 if (blend[i].is_shader) {
516 /* The blend shader's address needs to be at
517 * the same top 32 bit as the fragment shader.
518 * TODO: Ensure that's always the case.
519 */
520 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
521 (fs->bo->gpu & (0xffffffffull << 32)));
522 brts[i].shader = blend[i].shader.gpu;
523 brts[i].unk2 = 0x0;
524 } else {
525 enum pipe_format format = batch->key.cbufs[i]->format;
526 const struct util_format_description *format_desc;
527 format_desc = util_format_description(format);
528
529 brts[i].equation = blend[i].equation.equation;
530
531 /* TODO: this is a bit more complicated */
532 brts[i].constant = blend[i].equation.constant;
533
534 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
535
536 /* 0x19 disables blending and forces REPLACE
537 * mode (equivalent to rgb_mode = alpha_mode =
538 * x122, colour mask = 0xF). 0x1a allows
539 * blending. */
540 brts[i].unk2 = blend[i].opaque ? 0x19 : 0x1a;
541
542 brts[i].shader_type = fs->blend_types[i];
543 }
544 } else {
545 memcpy(&mrts[i].flags, &flags, sizeof(flags));
546
547 if (blend[i].is_shader) {
548 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
549 } else {
550 mrts[i].blend.equation = blend[i].equation.equation;
551 mrts[i].blend.constant = blend[i].equation.constant;
552 }
553 }
554 }
555 }
556
557 static void
558 panfrost_emit_frag_shader(struct panfrost_context *ctx,
559 struct mali_shader_meta *fragmeta,
560 struct panfrost_blend_final *blend)
561 {
562 const struct panfrost_device *dev = pan_device(ctx->base.screen);
563 struct panfrost_shader_state *fs;
564
565 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
566
567 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
568 const struct panfrost_zsa_state *zsa = ctx->depth_stencil;
569 unsigned rt_count = ctx->pipe_framebuffer.nr_cbufs;
570
571 memset(fragmeta, 0, sizeof(*fragmeta));
572
573 fragmeta->shader = fs->shader;
574 fragmeta->attribute_count = fs->attribute_count;
575 fragmeta->varying_count = fs->varying_count;
576 fragmeta->texture_count = ctx->sampler_view_count[PIPE_SHADER_FRAGMENT];
577 fragmeta->sampler_count = ctx->sampler_count[PIPE_SHADER_FRAGMENT];
578
579 if (dev->quirks & IS_BIFROST) {
580 struct mali_bifrost_properties_packed prop;
581 struct mali_preload_fragment_packed preload;
582
583 bool no_blend = true;
584
585 for (unsigned i = 0; i < rt_count; ++i)
586 no_blend &= (!blend[i].load_dest | blend[i].no_colour);
587
588 pan_pack(&prop, BIFROST_PROPERTIES, cfg) {
589 cfg.unknown = 0x950020; /* XXX */
590 cfg.uniform_buffer_count = panfrost_ubo_count(ctx, PIPE_SHADER_FRAGMENT);
591 cfg.early_z_enable = !fs->can_discard && !fs->writes_depth && no_blend;
592 }
593
594 pan_pack(&preload, PRELOAD_FRAGMENT, cfg) {
595 cfg.uniform_count = fs->uniform_count;
596 cfg.fragment_position = fs->reads_frag_coord;
597 }
598
599 memcpy(&fragmeta->bifrost_props, &prop, sizeof(prop));
600 memcpy(&fragmeta->bifrost_preload, &preload, sizeof(preload));
601 } else {
602 struct mali_midgard_properties_packed prop;
603
604 /* Reasons to disable early-Z from a shader perspective */
605 bool late_z = fs->can_discard || fs->writes_global ||
606 fs->writes_depth || fs->writes_stencil;
607
608 /* Reasons to disable early-Z from a CSO perspective */
609 bool alpha_to_coverage = ctx->blend->base.alpha_to_coverage;
610
611 /* If either depth or stencil is enabled, discard matters */
612 bool zs_enabled =
613 (zsa->base.depth.enabled && zsa->base.depth.func != PIPE_FUNC_ALWAYS) ||
614 zsa->base.stencil[0].enabled;
615
616 bool has_blend_shader = false;
617
618 for (unsigned c = 0; c < rt_count; ++c)
619 has_blend_shader |= blend[c].is_shader;
620
621 pan_pack(&prop, MIDGARD_PROPERTIES, cfg) {
622 cfg.uniform_buffer_count = panfrost_ubo_count(ctx, PIPE_SHADER_FRAGMENT);
623 cfg.uniform_count = fs->uniform_count;
624 cfg.work_register_count = fs->work_reg_count;
625 cfg.writes_globals = fs->writes_global;
626 cfg.suppress_inf_nan = true; /* XXX */
627
628 /* TODO: Reduce this limit? */
629 if (has_blend_shader)
630 cfg.work_register_count = MAX2(cfg.work_register_count, 8);
631
632 cfg.stencil_from_shader = fs->writes_stencil;
633 cfg.helper_invocation_enable = fs->helper_invocations;
634 cfg.depth_source = fs->writes_depth ?
635 MALI_DEPTH_SOURCE_SHADER :
636 MALI_DEPTH_SOURCE_FIXED_FUNCTION;
637
638 /* Depend on other state */
639 cfg.early_z_enable = !(late_z || alpha_to_coverage);
640 cfg.reads_tilebuffer = fs->outputs_read || (!zs_enabled && fs->can_discard);
641 cfg.reads_depth_stencil = zs_enabled && fs->can_discard;
642 }
643
644 memcpy(&fragmeta->midgard_props, &prop, sizeof(prop));
645 }
646
647 bool msaa = rast->multisample;
648 fragmeta->coverage_mask = msaa ? ctx->sample_mask : ~0;
649
650 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
651 fragmeta->unknown2_4 = 0x4e0;
652
653 /* TODO: Sample size */
654 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
655 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
656
657 /* EXT_shader_framebuffer_fetch requires the shader to be run
658 * per-sample when outputs are read. */
659 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
660 SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
661
662 fragmeta->depth_units = rast->offset_units * 2.0f;
663 fragmeta->depth_factor = rast->offset_scale;
664
665 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
666
667 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
668 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
669
670 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
671 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
672
673 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
674 zsa->base.stencil[0].enabled);
675
676 fragmeta->stencil_mask_front = zsa->stencil_mask_front;
677 fragmeta->stencil_mask_back = zsa->stencil_mask_back;
678
679 /* Bottom bits for stencil ref, exactly one word */
680 fragmeta->stencil_front.opaque[0] = zsa->stencil_front.opaque[0] | ctx->stencil_ref.ref_value[0];
681
682 /* If back-stencil is not enabled, use the front values */
683
684 if (zsa->base.stencil[1].enabled)
685 fragmeta->stencil_back.opaque[0] = zsa->stencil_back.opaque[0] | ctx->stencil_ref.ref_value[1];
686 else
687 fragmeta->stencil_back = fragmeta->stencil_front;
688
689 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
690 zsa->base.depth.writemask);
691
692 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
693 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(
694 zsa->base.depth.enabled ? zsa->base.depth.func : PIPE_FUNC_ALWAYS));
695
696 SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
697 ctx->blend->base.alpha_to_coverage);
698
699 /* Disable shader execution if we can */
700 if (!panfrost_fs_required(fs, blend, rt_count)) {
701 fragmeta->attribute_count = 0;
702 fragmeta->varying_count = 0;
703 fragmeta->texture_count = 0;
704 fragmeta->sampler_count = 0;
705
706 struct mali_midgard_properties_packed prop;
707
708 if (dev->quirks & IS_BIFROST) {
709 fragmeta->shader = 0x0;
710
711 pan_pack(&prop, BIFROST_PROPERTIES, cfg) {
712 cfg.unknown = 0x950020; /* XXX */
713 cfg.early_z_enable = true;
714 }
715 } else {
716 fragmeta->shader = 0x1;
717
718 pan_pack(&prop, MIDGARD_PROPERTIES, cfg) {
719 cfg.work_register_count = 1;
720 cfg.depth_source = MALI_DEPTH_SOURCE_FIXED_FUNCTION;
721 cfg.early_z_enable = true;
722 }
723 }
724
725 memcpy(&fragmeta->midgard_props, &prop, sizeof(prop));
726 }
727
728 if (dev->quirks & MIDGARD_SFBD) {
729 /* When only a single render target platform is used, the blend
730 * information is inside the shader meta itself. We additionally
731 * need to signal CAN_DISCARD for nontrivial blend modes (so
732 * we're able to read back the destination buffer) */
733
734 if (blend[0].no_colour)
735 return;
736
737 fragmeta->unknown2_4 |= MALI_SFBD_ENABLE;
738
739 SET_BIT(fragmeta->unknown2_4, MALI_SFBD_SRGB,
740 util_format_is_srgb(ctx->pipe_framebuffer.cbufs[0]->format));
741
742 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
743 blend[0].is_shader);
744
745 if (blend[0].is_shader) {
746 fragmeta->blend.shader = blend[0].shader.gpu |
747 blend[0].shader.first_tag;
748 } else {
749 fragmeta->blend.equation = blend[0].equation.equation;
750 fragmeta->blend.constant = blend[0].equation.constant;
751 }
752
753 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
754 blend[0].load_dest);
755
756 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER, !ctx->blend->base.dither);
757 } else if (!(dev->quirks & IS_BIFROST)) {
758 /* Bug where MRT-capable hw apparently reads the last blend
759 * shader from here instead of the usual location? */
760
761 for (signed rt = ((signed) rt_count - 1); rt >= 0; --rt) {
762 if (!blend[rt].is_shader)
763 continue;
764
765 fragmeta->blend.shader = blend[rt].shader.gpu |
766 blend[rt].shader.first_tag;
767 break;
768 }
769 }
770 }
771
772 void
773 panfrost_emit_shader_meta(struct panfrost_batch *batch,
774 enum pipe_shader_type st,
775 struct mali_vertex_tiler_postfix *postfix)
776 {
777 struct panfrost_context *ctx = batch->ctx;
778 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
779
780 if (!ss) {
781 postfix->shader = 0;
782 return;
783 }
784
785 struct mali_shader_meta meta;
786
787 /* Add the shader BO to the batch. */
788 panfrost_batch_add_bo(batch, ss->bo,
789 PAN_BO_ACCESS_PRIVATE |
790 PAN_BO_ACCESS_READ |
791 panfrost_bo_access_for_stage(st));
792
793 mali_ptr shader_ptr;
794
795 if (st == PIPE_SHADER_FRAGMENT) {
796 struct panfrost_device *dev = pan_device(ctx->base.screen);
797 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
798 size_t desc_size = sizeof(meta);
799 void *rts = NULL;
800 struct panfrost_transfer xfer;
801 unsigned rt_size;
802
803 if (dev->quirks & MIDGARD_SFBD)
804 rt_size = 0;
805 else if (dev->quirks & IS_BIFROST)
806 rt_size = sizeof(struct bifrost_blend_rt);
807 else
808 rt_size = sizeof(struct midgard_blend_rt);
809
810 desc_size += rt_size * rt_count;
811
812 if (rt_size)
813 rts = rzalloc_size(ctx, rt_size * rt_count);
814
815 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
816
817 for (unsigned c = 0; c < ctx->pipe_framebuffer.nr_cbufs; ++c)
818 blend[c] = panfrost_get_blend_for_context(ctx, c);
819
820 panfrost_emit_frag_shader(ctx, &meta, blend);
821
822 if (!(dev->quirks & MIDGARD_SFBD))
823 panfrost_emit_blend(batch, rts, blend);
824 else
825 batch->draws |= PIPE_CLEAR_COLOR0;
826
827 xfer = panfrost_pool_alloc_aligned(&batch->pool, desc_size, sizeof(meta));
828
829 memcpy(xfer.cpu, &meta, sizeof(meta));
830 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
831
832 if (rt_size)
833 ralloc_free(rts);
834
835 shader_ptr = xfer.gpu;
836 } else {
837 panfrost_emit_compute_shader(ctx, st, &meta);
838
839 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
840 sizeof(meta));
841 }
842
843 postfix->shader = shader_ptr;
844 }
845
846 void
847 panfrost_emit_viewport(struct panfrost_batch *batch,
848 struct mali_vertex_tiler_postfix *tiler_postfix)
849 {
850 struct panfrost_context *ctx = batch->ctx;
851 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
852 const struct pipe_scissor_state *ss = &ctx->scissor;
853 const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
854 const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
855
856 /* Derive min/max from translate/scale. Note since |x| >= 0 by
857 * definition, we have that -|x| <= |x| hence translate - |scale| <=
858 * translate + |scale|, so the ordering is correct here. */
859 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
860 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
861 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
862 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
863 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
864 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
865
866 /* Scissor to the intersection of viewport and to the scissor, clamped
867 * to the framebuffer */
868
869 unsigned minx = MIN2(fb->width, vp_minx);
870 unsigned maxx = MIN2(fb->width, vp_maxx);
871 unsigned miny = MIN2(fb->height, vp_miny);
872 unsigned maxy = MIN2(fb->height, vp_maxy);
873
874 if (ss && rast->scissor) {
875 minx = MAX2(ss->minx, minx);
876 miny = MAX2(ss->miny, miny);
877 maxx = MIN2(ss->maxx, maxx);
878 maxy = MIN2(ss->maxy, maxy);
879 }
880
881 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
882
883 pan_pack(T.cpu, VIEWPORT, cfg) {
884 cfg.scissor_minimum_x = minx;
885 cfg.scissor_minimum_y = miny;
886 cfg.scissor_maximum_x = maxx - 1;
887 cfg.scissor_maximum_y = maxy - 1;
888
889 cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
890 cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
891 }
892
893 tiler_postfix->viewport = T.gpu;
894 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
895 }
896
897 static mali_ptr
898 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
899 enum pipe_shader_type st,
900 struct panfrost_constant_buffer *buf,
901 unsigned index)
902 {
903 struct pipe_constant_buffer *cb = &buf->cb[index];
904 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
905
906 if (rsrc) {
907 panfrost_batch_add_bo(batch, rsrc->bo,
908 PAN_BO_ACCESS_SHARED |
909 PAN_BO_ACCESS_READ |
910 panfrost_bo_access_for_stage(st));
911
912 /* Alignment gauranteed by
913 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
914 return rsrc->bo->gpu + cb->buffer_offset;
915 } else if (cb->user_buffer) {
916 return panfrost_pool_upload_aligned(&batch->pool,
917 cb->user_buffer +
918 cb->buffer_offset,
919 cb->buffer_size, 16);
920 } else {
921 unreachable("No constant buffer");
922 }
923 }
924
925 struct sysval_uniform {
926 union {
927 float f[4];
928 int32_t i[4];
929 uint32_t u[4];
930 uint64_t du[2];
931 };
932 };
933
934 static void
935 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
936 struct sysval_uniform *uniform)
937 {
938 struct panfrost_context *ctx = batch->ctx;
939 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
940
941 uniform->f[0] = vp->scale[0];
942 uniform->f[1] = vp->scale[1];
943 uniform->f[2] = vp->scale[2];
944 }
945
946 static void
947 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
948 struct sysval_uniform *uniform)
949 {
950 struct panfrost_context *ctx = batch->ctx;
951 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
952
953 uniform->f[0] = vp->translate[0];
954 uniform->f[1] = vp->translate[1];
955 uniform->f[2] = vp->translate[2];
956 }
957
958 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
959 enum pipe_shader_type st,
960 unsigned int sysvalid,
961 struct sysval_uniform *uniform)
962 {
963 struct panfrost_context *ctx = batch->ctx;
964 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
965 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
966 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
967 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
968
969 assert(dim);
970 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
971
972 if (dim > 1)
973 uniform->i[1] = u_minify(tex->texture->height0,
974 tex->u.tex.first_level);
975
976 if (dim > 2)
977 uniform->i[2] = u_minify(tex->texture->depth0,
978 tex->u.tex.first_level);
979
980 if (is_array)
981 uniform->i[dim] = tex->texture->array_size;
982 }
983
984 static void
985 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
986 enum pipe_shader_type st,
987 unsigned ssbo_id,
988 struct sysval_uniform *uniform)
989 {
990 struct panfrost_context *ctx = batch->ctx;
991
992 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
993 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
994
995 /* Compute address */
996 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
997
998 panfrost_batch_add_bo(batch, bo,
999 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
1000 panfrost_bo_access_for_stage(st));
1001
1002 /* Upload address and size as sysval */
1003 uniform->du[0] = bo->gpu + sb.buffer_offset;
1004 uniform->u[2] = sb.buffer_size;
1005 }
1006
1007 static void
1008 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
1009 enum pipe_shader_type st,
1010 unsigned samp_idx,
1011 struct sysval_uniform *uniform)
1012 {
1013 struct panfrost_context *ctx = batch->ctx;
1014 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
1015
1016 uniform->f[0] = sampl->min_lod;
1017 uniform->f[1] = sampl->max_lod;
1018 uniform->f[2] = sampl->lod_bias;
1019
1020 /* Even without any errata, Midgard represents "no mipmapping" as
1021 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1022 * panfrost_create_sampler_state which also explains our choice of
1023 * epsilon value (again to keep behaviour consistent) */
1024
1025 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1026 uniform->f[1] = uniform->f[0] + (1.0/256.0);
1027 }
1028
1029 static void
1030 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
1031 struct sysval_uniform *uniform)
1032 {
1033 struct panfrost_context *ctx = batch->ctx;
1034
1035 uniform->u[0] = ctx->compute_grid->grid[0];
1036 uniform->u[1] = ctx->compute_grid->grid[1];
1037 uniform->u[2] = ctx->compute_grid->grid[2];
1038 }
1039
1040 static void
1041 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1042 struct panfrost_shader_state *ss,
1043 enum pipe_shader_type st)
1044 {
1045 struct sysval_uniform *uniforms = (void *)buf;
1046
1047 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1048 int sysval = ss->sysval[i];
1049
1050 switch (PAN_SYSVAL_TYPE(sysval)) {
1051 case PAN_SYSVAL_VIEWPORT_SCALE:
1052 panfrost_upload_viewport_scale_sysval(batch,
1053 &uniforms[i]);
1054 break;
1055 case PAN_SYSVAL_VIEWPORT_OFFSET:
1056 panfrost_upload_viewport_offset_sysval(batch,
1057 &uniforms[i]);
1058 break;
1059 case PAN_SYSVAL_TEXTURE_SIZE:
1060 panfrost_upload_txs_sysval(batch, st,
1061 PAN_SYSVAL_ID(sysval),
1062 &uniforms[i]);
1063 break;
1064 case PAN_SYSVAL_SSBO:
1065 panfrost_upload_ssbo_sysval(batch, st,
1066 PAN_SYSVAL_ID(sysval),
1067 &uniforms[i]);
1068 break;
1069 case PAN_SYSVAL_NUM_WORK_GROUPS:
1070 panfrost_upload_num_work_groups_sysval(batch,
1071 &uniforms[i]);
1072 break;
1073 case PAN_SYSVAL_SAMPLER:
1074 panfrost_upload_sampler_sysval(batch, st,
1075 PAN_SYSVAL_ID(sysval),
1076 &uniforms[i]);
1077 break;
1078 default:
1079 assert(0);
1080 }
1081 }
1082 }
1083
1084 static const void *
1085 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1086 unsigned index)
1087 {
1088 struct pipe_constant_buffer *cb = &buf->cb[index];
1089 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1090
1091 if (rsrc)
1092 return rsrc->bo->cpu;
1093 else if (cb->user_buffer)
1094 return cb->user_buffer;
1095 else
1096 unreachable("No constant buffer");
1097 }
1098
1099 void
1100 panfrost_emit_const_buf(struct panfrost_batch *batch,
1101 enum pipe_shader_type stage,
1102 struct mali_vertex_tiler_postfix *postfix)
1103 {
1104 struct panfrost_context *ctx = batch->ctx;
1105 struct panfrost_shader_variants *all = ctx->shader[stage];
1106
1107 if (!all)
1108 return;
1109
1110 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1111
1112 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1113
1114 /* Uniforms are implicitly UBO #0 */
1115 bool has_uniforms = buf->enabled_mask & (1 << 0);
1116
1117 /* Allocate room for the sysval and the uniforms */
1118 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1119 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1120 size_t size = sys_size + uniform_size;
1121 struct panfrost_transfer transfer =
1122 panfrost_pool_alloc_aligned(&batch->pool, size, 16);
1123
1124 /* Upload sysvals requested by the shader */
1125 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1126
1127 /* Upload uniforms */
1128 if (has_uniforms && uniform_size) {
1129 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1130 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1131 }
1132
1133 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1134 * uploaded */
1135
1136 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1137 assert(ubo_count >= 1);
1138
1139 size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
1140 struct panfrost_transfer ubos =
1141 panfrost_pool_alloc_aligned(&batch->pool, sz,
1142 MALI_UNIFORM_BUFFER_LENGTH);
1143
1144 uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
1145
1146 /* Upload uniforms as a UBO */
1147
1148 if (size) {
1149 pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
1150 cfg.entries = DIV_ROUND_UP(size, 16);
1151 cfg.pointer = transfer.gpu;
1152 }
1153 } else {
1154 *ubo_ptr = 0;
1155 }
1156
1157 /* The rest are honest-to-goodness UBOs */
1158
1159 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1160 size_t usz = buf->cb[ubo].buffer_size;
1161 bool enabled = buf->enabled_mask & (1 << ubo);
1162 bool empty = usz == 0;
1163
1164 if (!enabled || empty) {
1165 ubo_ptr[ubo] = 0;
1166 continue;
1167 }
1168
1169 pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
1170 cfg.entries = DIV_ROUND_UP(usz, 16);
1171 cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
1172 stage, buf, ubo);
1173 }
1174 }
1175
1176 postfix->uniforms = transfer.gpu;
1177 postfix->uniform_buffers = ubos.gpu;
1178
1179 buf->dirty_mask = 0;
1180 }
1181
1182 void
1183 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1184 const struct pipe_grid_info *info,
1185 struct midgard_payload_vertex_tiler *vtp)
1186 {
1187 struct panfrost_context *ctx = batch->ctx;
1188 struct panfrost_device *dev = pan_device(ctx->base.screen);
1189 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1190 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1191 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1192 128));
1193
1194 unsigned log2_instances =
1195 util_logbase2_ceil(info->grid[0]) +
1196 util_logbase2_ceil(info->grid[1]) +
1197 util_logbase2_ceil(info->grid[2]);
1198
1199 unsigned shared_size = single_size * (1 << log2_instances) * dev->core_count;
1200 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1201 shared_size,
1202 1);
1203
1204 struct mali_shared_memory shared = {
1205 .shared_memory = bo->gpu,
1206 .shared_workgroup_count = log2_instances,
1207 .shared_shift = util_logbase2(single_size) + 1
1208 };
1209
1210 vtp->postfix.shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared,
1211 sizeof(shared), 64);
1212 }
1213
1214 static mali_ptr
1215 panfrost_get_tex_desc(struct panfrost_batch *batch,
1216 enum pipe_shader_type st,
1217 struct panfrost_sampler_view *view)
1218 {
1219 if (!view)
1220 return (mali_ptr) 0;
1221
1222 struct pipe_sampler_view *pview = &view->base;
1223 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1224
1225 /* Add the BO to the job so it's retained until the job is done. */
1226
1227 panfrost_batch_add_bo(batch, rsrc->bo,
1228 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1229 panfrost_bo_access_for_stage(st));
1230
1231 panfrost_batch_add_bo(batch, view->bo,
1232 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1233 panfrost_bo_access_for_stage(st));
1234
1235 return view->bo->gpu;
1236 }
1237
1238 static void
1239 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1240 struct pipe_context *pctx)
1241 {
1242 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1243 if (view->texture_bo != rsrc->bo->gpu ||
1244 view->modifier != rsrc->modifier) {
1245 panfrost_bo_unreference(view->bo);
1246 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1247 }
1248 }
1249
1250 void
1251 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1252 enum pipe_shader_type stage,
1253 struct mali_vertex_tiler_postfix *postfix)
1254 {
1255 struct panfrost_context *ctx = batch->ctx;
1256 struct panfrost_device *device = pan_device(ctx->base.screen);
1257
1258 if (!ctx->sampler_view_count[stage])
1259 return;
1260
1261 if (device->quirks & IS_BIFROST) {
1262 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1263 MALI_BIFROST_TEXTURE_LENGTH *
1264 ctx->sampler_view_count[stage],
1265 MALI_BIFROST_TEXTURE_LENGTH);
1266
1267 struct mali_bifrost_texture_packed *out =
1268 (struct mali_bifrost_texture_packed *) T.cpu;
1269
1270 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1271 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1272 struct pipe_sampler_view *pview = &view->base;
1273 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1274
1275 panfrost_update_sampler_view(view, &ctx->base);
1276 out[i] = view->bifrost_descriptor;
1277
1278 /* Add the BOs to the job so they are retained until the job is done. */
1279
1280 panfrost_batch_add_bo(batch, rsrc->bo,
1281 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1282 panfrost_bo_access_for_stage(stage));
1283
1284 panfrost_batch_add_bo(batch, view->bo,
1285 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1286 panfrost_bo_access_for_stage(stage));
1287 }
1288
1289 postfix->textures = T.gpu;
1290 } else {
1291 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1292
1293 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1294 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1295
1296 panfrost_update_sampler_view(view, &ctx->base);
1297
1298 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1299 }
1300
1301 postfix->textures = panfrost_pool_upload_aligned(&batch->pool,
1302 trampolines,
1303 sizeof(uint64_t) *
1304 ctx->sampler_view_count[stage],
1305 sizeof(uint64_t));
1306 }
1307 }
1308
1309 void
1310 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1311 enum pipe_shader_type stage,
1312 struct mali_vertex_tiler_postfix *postfix)
1313 {
1314 struct panfrost_context *ctx = batch->ctx;
1315
1316 if (!ctx->sampler_count[stage])
1317 return;
1318
1319 size_t desc_size = MALI_BIFROST_SAMPLER_LENGTH;
1320 assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
1321
1322 size_t sz = desc_size * ctx->sampler_count[stage];
1323 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool, sz, desc_size);
1324 struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
1325
1326 for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
1327 out[i] = ctx->samplers[stage][i]->hw;
1328
1329 postfix->sampler_descriptor = T.gpu;
1330 }
1331
1332 void
1333 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1334 struct mali_vertex_tiler_postfix *vertex_postfix)
1335 {
1336 struct panfrost_context *ctx = batch->ctx;
1337 struct panfrost_vertex_state *so = ctx->vertex;
1338 struct panfrost_shader_state *vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1339
1340 unsigned instance_shift = vertex_postfix->instance_shift;
1341 unsigned instance_odd = vertex_postfix->instance_odd;
1342
1343 /* Worst case: everything is NPOT, which is only possible if instancing
1344 * is enabled. Otherwise single record is gauranteed */
1345 bool could_npot = instance_shift || instance_odd;
1346
1347 struct panfrost_transfer S = panfrost_pool_alloc_aligned(&batch->pool,
1348 MALI_ATTRIBUTE_BUFFER_LENGTH * vs->attribute_count *
1349 (could_npot ? 2 : 1),
1350 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
1351
1352 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1353 MALI_ATTRIBUTE_LENGTH * vs->attribute_count,
1354 MALI_ATTRIBUTE_LENGTH);
1355
1356 struct mali_attribute_buffer_packed *bufs =
1357 (struct mali_attribute_buffer_packed *) S.cpu;
1358
1359 struct mali_attribute_packed *out =
1360 (struct mali_attribute_packed *) T.cpu;
1361
1362 unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
1363 unsigned k = 0;
1364
1365 for (unsigned i = 0; i < so->num_elements; ++i) {
1366 /* We map buffers 1:1 with the attributes, which
1367 * means duplicating some vertex buffers (who cares? aside from
1368 * maybe some caching implications but I somehow doubt that
1369 * matters) */
1370
1371 struct pipe_vertex_element *elem = &so->pipe[i];
1372 unsigned vbi = elem->vertex_buffer_index;
1373 attrib_to_buffer[i] = k;
1374
1375 if (!(ctx->vb_mask & (1 << vbi)))
1376 continue;
1377
1378 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1379 struct panfrost_resource *rsrc;
1380
1381 rsrc = pan_resource(buf->buffer.resource);
1382 if (!rsrc)
1383 continue;
1384
1385 /* Add a dependency of the batch on the vertex buffer */
1386 panfrost_batch_add_bo(batch, rsrc->bo,
1387 PAN_BO_ACCESS_SHARED |
1388 PAN_BO_ACCESS_READ |
1389 PAN_BO_ACCESS_VERTEX_TILER);
1390
1391 /* Mask off lower bits, see offset fixup below */
1392 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1393 mali_ptr addr = raw_addr & ~63;
1394
1395 /* Since we advanced the base pointer, we shrink the buffer
1396 * size, but add the offset we subtracted */
1397 unsigned size = rsrc->base.width0 + (raw_addr - addr)
1398 - buf->buffer_offset;
1399
1400 /* When there is a divisor, the hardware-level divisor is
1401 * the product of the instance divisor and the padded count */
1402 unsigned divisor = elem->instance_divisor;
1403 unsigned hw_divisor = ctx->padded_count * divisor;
1404 unsigned stride = buf->stride;
1405
1406 /* If there's a divisor(=1) but no instancing, we want every
1407 * attribute to be the same */
1408
1409 if (divisor && ctx->instance_count == 1)
1410 stride = 0;
1411
1412 if (!divisor || ctx->instance_count <= 1) {
1413 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1414 if (ctx->instance_count > 1)
1415 cfg.type = MALI_ATTRIBUTE_TYPE_1D_MODULUS;
1416
1417 cfg.pointer = addr;
1418 cfg.stride = stride;
1419 cfg.size = size;
1420 cfg.divisor_r = instance_shift;
1421 cfg.divisor_p = instance_odd;
1422 }
1423 } else if (util_is_power_of_two_or_zero(hw_divisor)) {
1424 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1425 cfg.type = MALI_ATTRIBUTE_TYPE_1D_POT_DIVISOR;
1426 cfg.pointer = addr;
1427 cfg.stride = stride;
1428 cfg.size = size;
1429 cfg.divisor_r = __builtin_ctz(hw_divisor);
1430 }
1431
1432 } else {
1433 unsigned shift = 0, extra_flags = 0;
1434
1435 unsigned magic_divisor =
1436 panfrost_compute_magic_divisor(hw_divisor, &shift, &extra_flags);
1437
1438 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1439 cfg.type = MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR;
1440 cfg.pointer = addr;
1441 cfg.stride = stride;
1442 cfg.size = size;
1443
1444 cfg.divisor_r = shift;
1445 cfg.divisor_e = extra_flags;
1446 }
1447
1448 pan_pack(bufs + k + 1, ATTRIBUTE_BUFFER_CONTINUATION_NPOT, cfg) {
1449 cfg.divisor_numerator = magic_divisor;
1450 cfg.divisor = divisor;
1451 }
1452
1453 ++k;
1454 }
1455
1456 ++k;
1457 }
1458
1459 /* Add special gl_VertexID/gl_InstanceID buffers */
1460
1461 if (unlikely(vs->attribute_count >= PAN_VERTEX_ID)) {
1462 panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1463
1464 pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
1465 cfg.buffer_index = k++;
1466 cfg.format = so->formats[PAN_VERTEX_ID];
1467 }
1468
1469 panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1470
1471 pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
1472 cfg.buffer_index = k++;
1473 cfg.format = so->formats[PAN_INSTANCE_ID];
1474 }
1475 }
1476
1477 /* Attribute addresses require 64-byte alignment, so let:
1478 *
1479 * base' = base & ~63 = base - (base & 63)
1480 * offset' = offset + (base & 63)
1481 *
1482 * Since base' + offset' = base + offset, these are equivalent
1483 * addressing modes and now base is 64 aligned.
1484 */
1485
1486 unsigned start = vertex_postfix->offset_start;
1487
1488 for (unsigned i = 0; i < so->num_elements; ++i) {
1489 unsigned vbi = so->pipe[i].vertex_buffer_index;
1490 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1491
1492 /* Adjust by the masked off bits of the offset. Make sure we
1493 * read src_offset from so->hw (which is not GPU visible)
1494 * rather than target (which is) due to caching effects */
1495
1496 unsigned src_offset = so->pipe[i].src_offset;
1497
1498 /* BOs aligned to 4k so guaranteed aligned to 64 */
1499 src_offset += (buf->buffer_offset & 63);
1500
1501 /* Also, somewhat obscurely per-instance data needs to be
1502 * offset in response to a delayed start in an indexed draw */
1503
1504 if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
1505 src_offset -= buf->stride * start;
1506
1507 pan_pack(out + i, ATTRIBUTE, cfg) {
1508 cfg.buffer_index = attrib_to_buffer[i];
1509 cfg.format = so->formats[i];
1510 cfg.offset = src_offset;
1511 }
1512 }
1513
1514 vertex_postfix->attributes = S.gpu;
1515 vertex_postfix->attribute_meta = T.gpu;
1516 }
1517
1518 static mali_ptr
1519 panfrost_emit_varyings(struct panfrost_batch *batch,
1520 struct mali_attribute_buffer_packed *slot,
1521 unsigned stride, unsigned count)
1522 {
1523 unsigned size = stride * count;
1524 mali_ptr ptr = panfrost_pool_alloc_aligned(&batch->invisible_pool, size, 64).gpu;
1525
1526 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1527 cfg.stride = stride;
1528 cfg.size = size;
1529 cfg.pointer = ptr;
1530 }
1531
1532 return ptr;
1533 }
1534
1535 static unsigned
1536 panfrost_streamout_offset(unsigned stride, unsigned offset,
1537 struct pipe_stream_output_target *target)
1538 {
1539 return (target->buffer_offset + (offset * stride * 4)) & 63;
1540 }
1541
1542 static void
1543 panfrost_emit_streamout(struct panfrost_batch *batch,
1544 struct mali_attribute_buffer_packed *slot,
1545 unsigned stride_words, unsigned offset, unsigned count,
1546 struct pipe_stream_output_target *target)
1547 {
1548 unsigned stride = stride_words * 4;
1549 unsigned max_size = target->buffer_size;
1550 unsigned expected_size = stride * count;
1551
1552 /* Grab the BO and bind it to the batch */
1553 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1554
1555 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1556 * the perspective of the TILER and FRAGMENT.
1557 */
1558 panfrost_batch_add_bo(batch, bo,
1559 PAN_BO_ACCESS_SHARED |
1560 PAN_BO_ACCESS_RW |
1561 PAN_BO_ACCESS_VERTEX_TILER |
1562 PAN_BO_ACCESS_FRAGMENT);
1563
1564 /* We will have an offset applied to get alignment */
1565 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * stride);
1566
1567 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1568 cfg.pointer = (addr & ~63);
1569 cfg.stride = stride;
1570 cfg.size = MIN2(max_size, expected_size) + (addr & 63);
1571 }
1572 }
1573
1574 static bool
1575 has_point_coord(unsigned mask, gl_varying_slot loc)
1576 {
1577 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1578 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1579 else if (loc == VARYING_SLOT_PNTC)
1580 return (mask & (1 << 8));
1581 else
1582 return false;
1583 }
1584
1585 /* Helpers for manipulating stream out information so we can pack varyings
1586 * accordingly. Compute the src_offset for a given captured varying */
1587
1588 static struct pipe_stream_output *
1589 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1590 {
1591 for (unsigned i = 0; i < info->num_outputs; ++i) {
1592 if (info->output[i].register_index == loc)
1593 return &info->output[i];
1594 }
1595
1596 unreachable("Varying not captured");
1597 }
1598
1599 static unsigned
1600 pan_varying_size(enum mali_format fmt)
1601 {
1602 unsigned type = MALI_EXTRACT_TYPE(fmt);
1603 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1604 unsigned bits = MALI_EXTRACT_BITS(fmt);
1605 unsigned bpc = 0;
1606
1607 if (bits == MALI_CHANNEL_FLOAT) {
1608 /* No doubles */
1609 bool fp16 = (type == MALI_FORMAT_SINT);
1610 assert(fp16 || (type == MALI_FORMAT_UNORM));
1611
1612 bpc = fp16 ? 2 : 4;
1613 } else {
1614 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1615
1616 /* See the enums */
1617 bits = 1 << bits;
1618 assert(bits >= 8);
1619 bpc = bits / 8;
1620 }
1621
1622 return bpc * chan;
1623 }
1624
1625 /* Indices for named (non-XFB) varyings that are present. These are packed
1626 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1627 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1628 * of a given special field given a shift S by:
1629 *
1630 * idx = popcount(P & ((1 << S) - 1))
1631 *
1632 * That is... look at all of the varyings that come earlier and count them, the
1633 * count is the new index since plus one. Likewise, the total number of special
1634 * buffers required is simply popcount(P)
1635 */
1636
1637 enum pan_special_varying {
1638 PAN_VARY_GENERAL = 0,
1639 PAN_VARY_POSITION = 1,
1640 PAN_VARY_PSIZ = 2,
1641 PAN_VARY_PNTCOORD = 3,
1642 PAN_VARY_FACE = 4,
1643 PAN_VARY_FRAGCOORD = 5,
1644
1645 /* Keep last */
1646 PAN_VARY_MAX,
1647 };
1648
1649 /* Given a varying, figure out which index it correpsonds to */
1650
1651 static inline unsigned
1652 pan_varying_index(unsigned present, enum pan_special_varying v)
1653 {
1654 unsigned mask = (1 << v) - 1;
1655 return util_bitcount(present & mask);
1656 }
1657
1658 /* Get the base offset for XFB buffers, which by convention come after
1659 * everything else. Wrapper function for semantic reasons; by construction this
1660 * is just popcount. */
1661
1662 static inline unsigned
1663 pan_xfb_base(unsigned present)
1664 {
1665 return util_bitcount(present);
1666 }
1667
1668 /* Computes the present mask for varyings so we can start emitting varying records */
1669
1670 static inline unsigned
1671 pan_varying_present(
1672 struct panfrost_shader_state *vs,
1673 struct panfrost_shader_state *fs,
1674 unsigned quirks)
1675 {
1676 /* At the moment we always emit general and position buffers. Not
1677 * strictly necessary but usually harmless */
1678
1679 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1680
1681 /* Enable special buffers by the shader info */
1682
1683 if (vs->writes_point_size)
1684 present |= (1 << PAN_VARY_PSIZ);
1685
1686 if (fs->reads_point_coord)
1687 present |= (1 << PAN_VARY_PNTCOORD);
1688
1689 if (fs->reads_face)
1690 present |= (1 << PAN_VARY_FACE);
1691
1692 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1693 present |= (1 << PAN_VARY_FRAGCOORD);
1694
1695 /* Also, if we have a point sprite, we need a point coord buffer */
1696
1697 for (unsigned i = 0; i < fs->varying_count; i++) {
1698 gl_varying_slot loc = fs->varyings_loc[i];
1699
1700 if (has_point_coord(fs->point_sprite_mask, loc))
1701 present |= (1 << PAN_VARY_PNTCOORD);
1702 }
1703
1704 return present;
1705 }
1706
1707 /* Emitters for varying records */
1708
1709 static void
1710 pan_emit_vary(struct mali_attribute_packed *out,
1711 unsigned present, enum pan_special_varying buf,
1712 unsigned quirks, enum mali_format format,
1713 unsigned offset)
1714 {
1715 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1716 unsigned swizzle = quirks & HAS_SWIZZLES ?
1717 panfrost_get_default_swizzle(nr_channels) :
1718 panfrost_bifrost_swizzle(nr_channels);
1719
1720 pan_pack(out, ATTRIBUTE, cfg) {
1721 cfg.buffer_index = pan_varying_index(present, buf);
1722 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1723 cfg.format = (format << 12) | swizzle;
1724 cfg.offset = offset;
1725 }
1726 }
1727
1728 /* General varying that is unused */
1729
1730 static void
1731 pan_emit_vary_only(struct mali_attribute_packed *out,
1732 unsigned present, unsigned quirks)
1733 {
1734 pan_emit_vary(out, present, 0, quirks, MALI_VARYING_DISCARD, 0);
1735 }
1736
1737 /* Special records */
1738
1739 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1740 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1741 [PAN_VARY_PSIZ] = MALI_R16F,
1742 [PAN_VARY_PNTCOORD] = MALI_R16F,
1743 [PAN_VARY_FACE] = MALI_R32I,
1744 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1745 };
1746
1747 static void
1748 pan_emit_vary_special(struct mali_attribute_packed *out,
1749 unsigned present, enum pan_special_varying buf,
1750 unsigned quirks)
1751 {
1752 assert(buf < PAN_VARY_MAX);
1753 pan_emit_vary(out, present, buf, quirks, pan_varying_formats[buf], 0);
1754 }
1755
1756 static enum mali_format
1757 pan_xfb_format(enum mali_format format, unsigned nr)
1758 {
1759 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1760 return MALI_R32F | MALI_NR_CHANNELS(nr);
1761 else
1762 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1763 }
1764
1765 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1766 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1767 * value. */
1768
1769 static void
1770 pan_emit_vary_xfb(struct mali_attribute_packed *out,
1771 unsigned present,
1772 unsigned max_xfb,
1773 unsigned *streamout_offsets,
1774 unsigned quirks,
1775 enum mali_format format,
1776 struct pipe_stream_output o)
1777 {
1778 unsigned swizzle = quirks & HAS_SWIZZLES ?
1779 panfrost_get_default_swizzle(o.num_components) :
1780 panfrost_bifrost_swizzle(o.num_components);
1781
1782 pan_pack(out, ATTRIBUTE, cfg) {
1783 /* XFB buffers come after everything else */
1784 cfg.buffer_index = pan_xfb_base(present) + o.output_buffer;
1785 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1786
1787 /* Override number of channels and precision to highp */
1788 cfg.format = (pan_xfb_format(format, o.num_components) << 12) | swizzle;
1789
1790 /* Apply given offsets together */
1791 cfg.offset = (o.dst_offset * 4) /* dwords */
1792 + streamout_offsets[o.output_buffer];
1793 }
1794 }
1795
1796 /* Determine if we should capture a varying for XFB. This requires actually
1797 * having a buffer for it. If we don't capture it, we'll fallback to a general
1798 * varying path (linked or unlinked, possibly discarding the write) */
1799
1800 static bool
1801 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1802 unsigned loc, unsigned max_xfb)
1803 {
1804 if (!(xfb->so_mask & (1ll << loc)))
1805 return false;
1806
1807 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1808 return o->output_buffer < max_xfb;
1809 }
1810
1811 static void
1812 pan_emit_general_varying(struct mali_attribute_packed *out,
1813 struct panfrost_shader_state *other,
1814 struct panfrost_shader_state *xfb,
1815 gl_varying_slot loc,
1816 enum mali_format format,
1817 unsigned present,
1818 unsigned quirks,
1819 unsigned *gen_offsets,
1820 enum mali_format *gen_formats,
1821 unsigned *gen_stride,
1822 unsigned idx,
1823 bool should_alloc)
1824 {
1825 /* Check if we're linked */
1826 signed other_idx = -1;
1827
1828 for (unsigned j = 0; j < other->varying_count; ++j) {
1829 if (other->varyings_loc[j] == loc) {
1830 other_idx = j;
1831 break;
1832 }
1833 }
1834
1835 if (other_idx < 0) {
1836 pan_emit_vary_only(out, present, quirks);
1837 return;
1838 }
1839
1840 unsigned offset = gen_offsets[other_idx];
1841
1842 if (should_alloc) {
1843 /* We're linked, so allocate a space via a watermark allocation */
1844 enum mali_format alt = other->varyings[other_idx];
1845
1846 /* Do interpolation at minimum precision */
1847 unsigned size_main = pan_varying_size(format);
1848 unsigned size_alt = pan_varying_size(alt);
1849 unsigned size = MIN2(size_main, size_alt);
1850
1851 /* If a varying is marked for XFB but not actually captured, we
1852 * should match the format to the format that would otherwise
1853 * be used for XFB, since dEQP checks for invariance here. It's
1854 * unclear if this is required by the spec. */
1855
1856 if (xfb->so_mask & (1ull << loc)) {
1857 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1858 format = pan_xfb_format(format, o->num_components);
1859 size = pan_varying_size(format);
1860 } else if (size == size_alt) {
1861 format = alt;
1862 }
1863
1864 gen_offsets[idx] = *gen_stride;
1865 gen_formats[other_idx] = format;
1866 offset = *gen_stride;
1867 *gen_stride += size;
1868 }
1869
1870 pan_emit_vary(out, present, PAN_VARY_GENERAL, quirks, format, offset);
1871 }
1872
1873 /* Higher-level wrapper around all of the above, classifying a varying into one
1874 * of the above types */
1875
1876 static void
1877 panfrost_emit_varying(
1878 struct mali_attribute_packed *out,
1879 struct panfrost_shader_state *stage,
1880 struct panfrost_shader_state *other,
1881 struct panfrost_shader_state *xfb,
1882 unsigned present,
1883 unsigned max_xfb,
1884 unsigned *streamout_offsets,
1885 unsigned quirks,
1886 unsigned *gen_offsets,
1887 enum mali_format *gen_formats,
1888 unsigned *gen_stride,
1889 unsigned idx,
1890 bool should_alloc,
1891 bool is_fragment)
1892 {
1893 gl_varying_slot loc = stage->varyings_loc[idx];
1894 enum mali_format format = stage->varyings[idx];
1895
1896 /* Override format to match linkage */
1897 if (!should_alloc && gen_formats[idx])
1898 format = gen_formats[idx];
1899
1900 if (has_point_coord(stage->point_sprite_mask, loc)) {
1901 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1902 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1903 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1904 pan_emit_vary_xfb(out, present, max_xfb, streamout_offsets, quirks, format, *o);
1905 } else if (loc == VARYING_SLOT_POS) {
1906 if (is_fragment)
1907 pan_emit_vary_special(out, present, PAN_VARY_FRAGCOORD, quirks);
1908 else
1909 pan_emit_vary_special(out, present, PAN_VARY_POSITION, quirks);
1910 } else if (loc == VARYING_SLOT_PSIZ) {
1911 pan_emit_vary_special(out, present, PAN_VARY_PSIZ, quirks);
1912 } else if (loc == VARYING_SLOT_PNTC) {
1913 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1914 } else if (loc == VARYING_SLOT_FACE) {
1915 pan_emit_vary_special(out, present, PAN_VARY_FACE, quirks);
1916 } else {
1917 pan_emit_general_varying(out, other, xfb, loc, format, present,
1918 quirks, gen_offsets, gen_formats, gen_stride,
1919 idx, should_alloc);
1920 }
1921 }
1922
1923 static void
1924 pan_emit_special_input(struct mali_attribute_buffer_packed *out,
1925 unsigned present,
1926 enum pan_special_varying v,
1927 unsigned special)
1928 {
1929 if (present & (1 << v)) {
1930 unsigned idx = pan_varying_index(present, v);
1931
1932 pan_pack(out + idx, ATTRIBUTE_BUFFER, cfg) {
1933 cfg.special = special;
1934 cfg.type = 0;
1935 }
1936 }
1937 }
1938
1939 void
1940 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1941 unsigned vertex_count,
1942 struct mali_vertex_tiler_postfix *vertex_postfix,
1943 struct mali_vertex_tiler_postfix *tiler_postfix,
1944 union midgard_primitive_size *primitive_size)
1945 {
1946 /* Load the shaders */
1947 struct panfrost_context *ctx = batch->ctx;
1948 struct panfrost_device *dev = pan_device(ctx->base.screen);
1949 struct panfrost_shader_state *vs, *fs;
1950 size_t vs_size, fs_size;
1951
1952 /* Allocate the varying descriptor */
1953
1954 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1955 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
1956 vs_size = MALI_ATTRIBUTE_LENGTH * vs->varying_count;
1957 fs_size = MALI_ATTRIBUTE_LENGTH * fs->varying_count;
1958
1959 struct panfrost_transfer trans = panfrost_pool_alloc_aligned(
1960 &batch->pool, vs_size + fs_size, MALI_ATTRIBUTE_LENGTH);
1961
1962 struct pipe_stream_output_info *so = &vs->stream_output;
1963 unsigned present = pan_varying_present(vs, fs, dev->quirks);
1964
1965 /* Check if this varying is linked by us. This is the case for
1966 * general-purpose, non-captured varyings. If it is, link it. If it's
1967 * not, use the provided stream out information to determine the
1968 * offset, since it was already linked for us. */
1969
1970 unsigned gen_offsets[32];
1971 enum mali_format gen_formats[32];
1972 memset(gen_offsets, 0, sizeof(gen_offsets));
1973 memset(gen_formats, 0, sizeof(gen_formats));
1974
1975 unsigned gen_stride = 0;
1976 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
1977 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
1978
1979 unsigned streamout_offsets[32];
1980
1981 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1982 streamout_offsets[i] = panfrost_streamout_offset(
1983 so->stride[i],
1984 ctx->streamout.offsets[i],
1985 ctx->streamout.targets[i]);
1986 }
1987
1988 struct mali_attribute_packed *ovs = (struct mali_attribute_packed *)trans.cpu;
1989 struct mali_attribute_packed *ofs = ovs + vs->varying_count;
1990
1991 for (unsigned i = 0; i < vs->varying_count; i++) {
1992 panfrost_emit_varying(ovs + i, vs, fs, vs, present,
1993 ctx->streamout.num_targets, streamout_offsets,
1994 dev->quirks,
1995 gen_offsets, gen_formats, &gen_stride, i, true, false);
1996 }
1997
1998 for (unsigned i = 0; i < fs->varying_count; i++) {
1999 panfrost_emit_varying(ofs + i, fs, vs, vs, present,
2000 ctx->streamout.num_targets, streamout_offsets,
2001 dev->quirks,
2002 gen_offsets, gen_formats, &gen_stride, i, false, true);
2003 }
2004
2005 unsigned xfb_base = pan_xfb_base(present);
2006 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
2007 MALI_ATTRIBUTE_BUFFER_LENGTH * (xfb_base + ctx->streamout.num_targets),
2008 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
2009 struct mali_attribute_buffer_packed *varyings =
2010 (struct mali_attribute_buffer_packed *) T.cpu;
2011
2012 /* Emit the stream out buffers */
2013
2014 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
2015 ctx->vertex_count);
2016
2017 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
2018 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
2019 so->stride[i],
2020 ctx->streamout.offsets[i],
2021 out_count,
2022 ctx->streamout.targets[i]);
2023 }
2024
2025 panfrost_emit_varyings(batch,
2026 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
2027 gen_stride, vertex_count);
2028
2029 /* fp32 vec4 gl_Position */
2030 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
2031 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
2032 sizeof(float) * 4, vertex_count);
2033
2034 if (present & (1 << PAN_VARY_PSIZ)) {
2035 primitive_size->pointer = panfrost_emit_varyings(batch,
2036 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
2037 2, vertex_count);
2038 }
2039
2040 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_ATTRIBUTE_SPECIAL_POINT_COORD);
2041 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_ATTRIBUTE_SPECIAL_FRONT_FACING);
2042 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_ATTRIBUTE_SPECIAL_FRAG_COORD);
2043
2044 vertex_postfix->varyings = T.gpu;
2045 tiler_postfix->varyings = T.gpu;
2046
2047 vertex_postfix->varying_meta = trans.gpu;
2048 tiler_postfix->varying_meta = trans.gpu + vs_size;
2049 }
2050
2051 void
2052 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
2053 struct mali_vertex_tiler_prefix *vertex_prefix,
2054 struct mali_vertex_tiler_postfix *vertex_postfix,
2055 struct mali_vertex_tiler_prefix *tiler_prefix,
2056 struct mali_vertex_tiler_postfix *tiler_postfix,
2057 union midgard_primitive_size *primitive_size)
2058 {
2059 struct panfrost_context *ctx = batch->ctx;
2060 struct panfrost_device *device = pan_device(ctx->base.screen);
2061 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
2062 struct bifrost_payload_vertex bifrost_vertex = {0,};
2063 struct bifrost_payload_tiler bifrost_tiler = {0,};
2064 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2065 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2066 void *vp, *tp;
2067 size_t vp_size, tp_size;
2068
2069 if (device->quirks & IS_BIFROST) {
2070 bifrost_vertex.prefix = *vertex_prefix;
2071 bifrost_vertex.postfix = *vertex_postfix;
2072 vp = &bifrost_vertex;
2073 vp_size = sizeof(bifrost_vertex);
2074
2075 bifrost_tiler.prefix = *tiler_prefix;
2076 bifrost_tiler.tiler.primitive_size = *primitive_size;
2077 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2078 bifrost_tiler.postfix = *tiler_postfix;
2079 tp = &bifrost_tiler;
2080 tp_size = sizeof(bifrost_tiler);
2081 } else {
2082 midgard_vertex.prefix = *vertex_prefix;
2083 midgard_vertex.postfix = *vertex_postfix;
2084 vp = &midgard_vertex;
2085 vp_size = sizeof(midgard_vertex);
2086
2087 midgard_tiler.prefix = *tiler_prefix;
2088 midgard_tiler.postfix = *tiler_postfix;
2089 midgard_tiler.primitive_size = *primitive_size;
2090 tp = &midgard_tiler;
2091 tp_size = sizeof(midgard_tiler);
2092 }
2093
2094 if (wallpapering) {
2095 /* Inject in reverse order, with "predicted" job indices.
2096 * THIS IS A HACK XXX */
2097 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
2098 batch->scoreboard.job_index + 2, tp, tp_size, true);
2099 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2100 vp, vp_size, true);
2101 return;
2102 }
2103
2104 /* If rasterizer discard is enable, only submit the vertex */
2105
2106 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2107 vp, vp_size, false);
2108
2109 if (ctx->rasterizer->base.rasterizer_discard)
2110 return;
2111
2112 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
2113 false);
2114 }
2115
2116 /* TODO: stop hardcoding this */
2117 mali_ptr
2118 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2119 {
2120 uint16_t locations[] = {
2121 128, 128,
2122 0, 256,
2123 0, 256,
2124 0, 256,
2125 0, 256,
2126 0, 256,
2127 0, 256,
2128 0, 256,
2129 0, 256,
2130 0, 256,
2131 0, 256,
2132 0, 256,
2133 0, 256,
2134 0, 256,
2135 0, 256,
2136 0, 256,
2137 0, 256,
2138 0, 256,
2139 0, 256,
2140 0, 256,
2141 0, 256,
2142 0, 256,
2143 0, 256,
2144 0, 256,
2145 0, 256,
2146 0, 256,
2147 0, 256,
2148 0, 256,
2149 0, 256,
2150 0, 256,
2151 0, 256,
2152 0, 256,
2153 128, 128,
2154 0, 0,
2155 0, 0,
2156 0, 0,
2157 0, 0,
2158 0, 0,
2159 0, 0,
2160 0, 0,
2161 0, 0,
2162 0, 0,
2163 0, 0,
2164 0, 0,
2165 0, 0,
2166 0, 0,
2167 0, 0,
2168 0, 0,
2169 };
2170
2171 return panfrost_pool_upload_aligned(&batch->pool, locations, 96 * sizeof(uint16_t), 64);
2172 }