panfrost: Simplify shaderless packing
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 struct mali_shared_memory shared = {
62 .shared_workgroup_count = ~0,
63 };
64
65 if (batch->stack_size) {
66 struct panfrost_bo *stack =
67 panfrost_batch_get_scratchpad(batch, batch->stack_size,
68 dev->thread_tls_alloc,
69 dev->core_count);
70
71 shared.stack_shift = panfrost_get_stack_shift(batch->stack_size);
72 shared.scratchpad = stack->gpu;
73 }
74
75 postfix->shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared, sizeof(shared), 64);
76 }
77
78 static void
79 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
80 struct mali_vertex_tiler_postfix *postfix)
81 {
82 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
83 postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
84 }
85
86 static void
87 panfrost_vt_update_rasterizer(struct panfrost_rasterizer *rasterizer,
88 struct mali_vertex_tiler_prefix *prefix,
89 struct mali_vertex_tiler_postfix *postfix)
90 {
91 postfix->gl_enables |= 0x7;
92 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
93 rasterizer->base.front_ccw);
94 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
95 (rasterizer->base.cull_face & PIPE_FACE_FRONT));
96 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
97 (rasterizer->base.cull_face & PIPE_FACE_BACK));
98 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
99 rasterizer->base.flatshade_first);
100 }
101
102 void
103 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
104 struct mali_vertex_tiler_prefix *prefix,
105 union midgard_primitive_size *primitive_size)
106 {
107 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
108
109 if (!panfrost_writes_point_size(ctx)) {
110 float val = (prefix->draw_mode == MALI_DRAW_MODE_POINTS) ?
111 rasterizer->base.point_size :
112 rasterizer->base.line_width;
113
114 primitive_size->constant = val;
115 }
116 }
117
118 static void
119 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
120 struct mali_vertex_tiler_postfix *postfix)
121 {
122 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
123 if (ctx->occlusion_query) {
124 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
125 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
126 PAN_BO_ACCESS_SHARED |
127 PAN_BO_ACCESS_RW |
128 PAN_BO_ACCESS_FRAGMENT);
129 } else {
130 postfix->occlusion_counter = 0;
131 }
132 }
133
134 void
135 panfrost_vt_init(struct panfrost_context *ctx,
136 enum pipe_shader_type stage,
137 struct mali_vertex_tiler_prefix *prefix,
138 struct mali_vertex_tiler_postfix *postfix)
139 {
140 struct panfrost_device *device = pan_device(ctx->base.screen);
141
142 if (!ctx->shader[stage])
143 return;
144
145 memset(prefix, 0, sizeof(*prefix));
146 memset(postfix, 0, sizeof(*postfix));
147
148 if (device->quirks & IS_BIFROST) {
149 postfix->gl_enables = 0x2;
150 panfrost_vt_emit_shared_memory(ctx, postfix);
151 } else {
152 postfix->gl_enables = 0x6;
153 panfrost_vt_attach_framebuffer(ctx, postfix);
154 }
155
156 if (stage == PIPE_SHADER_FRAGMENT) {
157 panfrost_vt_update_occlusion_query(ctx, postfix);
158 panfrost_vt_update_rasterizer(ctx->rasterizer, prefix, postfix);
159 }
160 }
161
162 static unsigned
163 panfrost_translate_index_size(unsigned size)
164 {
165 switch (size) {
166 case 1:
167 return MALI_DRAW_INDEXED_UINT8;
168
169 case 2:
170 return MALI_DRAW_INDEXED_UINT16;
171
172 case 4:
173 return MALI_DRAW_INDEXED_UINT32;
174
175 default:
176 unreachable("Invalid index size");
177 }
178 }
179
180 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
181 * good for the duration of the draw (transient), could last longer. Also get
182 * the bounds on the index buffer for the range accessed by the draw. We do
183 * these operations together because there are natural optimizations which
184 * require them to be together. */
185
186 static mali_ptr
187 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
188 const struct pipe_draw_info *info,
189 unsigned *min_index, unsigned *max_index)
190 {
191 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
192 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
193 off_t offset = info->start * info->index_size;
194 bool needs_indices = true;
195 mali_ptr out = 0;
196
197 if (info->max_index != ~0u) {
198 *min_index = info->min_index;
199 *max_index = info->max_index;
200 needs_indices = false;
201 }
202
203 if (!info->has_user_indices) {
204 /* Only resources can be directly mapped */
205 panfrost_batch_add_bo(batch, rsrc->bo,
206 PAN_BO_ACCESS_SHARED |
207 PAN_BO_ACCESS_READ |
208 PAN_BO_ACCESS_VERTEX_TILER);
209 out = rsrc->bo->gpu + offset;
210
211 /* Check the cache */
212 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
213 info->start,
214 info->count,
215 min_index,
216 max_index);
217 } else {
218 /* Otherwise, we need to upload to transient memory */
219 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
220 struct panfrost_transfer T =
221 panfrost_pool_alloc_aligned(&batch->pool,
222 info->count * info->index_size,
223 info->index_size);
224
225 memcpy(T.cpu, ibuf8 + offset, info->count * info->index_size);
226 out = T.gpu;
227 }
228
229 if (needs_indices) {
230 /* Fallback */
231 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
232
233 if (!info->has_user_indices)
234 panfrost_minmax_cache_add(rsrc->index_cache,
235 info->start, info->count,
236 *min_index, *max_index);
237 }
238
239 return out;
240 }
241
242 void
243 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
244 const struct pipe_draw_info *info,
245 enum mali_draw_mode draw_mode,
246 struct mali_vertex_tiler_postfix *vertex_postfix,
247 struct mali_vertex_tiler_prefix *tiler_prefix,
248 struct mali_vertex_tiler_postfix *tiler_postfix,
249 unsigned *vertex_count,
250 unsigned *padded_count)
251 {
252 tiler_prefix->draw_mode = draw_mode;
253
254 unsigned draw_flags = 0;
255
256 if (panfrost_writes_point_size(ctx))
257 draw_flags |= MALI_DRAW_VARYING_SIZE;
258
259 if (info->primitive_restart)
260 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
261
262 /* These doesn't make much sense */
263
264 draw_flags |= 0x3000;
265
266 if (info->index_size) {
267 unsigned min_index = 0, max_index = 0;
268
269 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
270 info,
271 &min_index,
272 &max_index);
273
274 /* Use the corresponding values */
275 *vertex_count = max_index - min_index + 1;
276 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
277 tiler_prefix->offset_bias_correction = -min_index;
278 tiler_prefix->index_count = MALI_POSITIVE(info->count);
279 draw_flags |= panfrost_translate_index_size(info->index_size);
280 } else {
281 tiler_prefix->indices = 0;
282 *vertex_count = ctx->vertex_count;
283 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
284 tiler_prefix->offset_bias_correction = 0;
285 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
286 }
287
288 tiler_prefix->unknown_draw = draw_flags;
289
290 /* Encode the padded vertex count */
291
292 if (info->instance_count > 1) {
293 *padded_count = panfrost_padded_vertex_count(*vertex_count);
294
295 unsigned shift = __builtin_ctz(ctx->padded_count);
296 unsigned k = ctx->padded_count >> (shift + 1);
297
298 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
299 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
300 } else {
301 *padded_count = *vertex_count;
302
303 /* Reset instancing state */
304 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
305 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
306 }
307 }
308
309 static void
310 panfrost_emit_compute_shader(struct panfrost_context *ctx,
311 enum pipe_shader_type st,
312 struct mali_shader_meta *meta)
313 {
314 const struct panfrost_device *dev = pan_device(ctx->base.screen);
315 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
316
317 memset(meta, 0, sizeof(*meta));
318 memcpy(&meta->shader, &ss->shader, sizeof(ss->shader));
319 memcpy(&meta->midgard_props, &ss->properties, sizeof(ss->properties));
320
321 if (dev->quirks & IS_BIFROST)
322 memcpy(&meta->bifrost_preload, &ss->preload, sizeof(ss->preload));
323 }
324
325 static unsigned
326 translate_tex_wrap(enum pipe_tex_wrap w)
327 {
328 switch (w) {
329 case PIPE_TEX_WRAP_REPEAT: return MALI_WRAP_MODE_REPEAT;
330 case PIPE_TEX_WRAP_CLAMP: return MALI_WRAP_MODE_CLAMP;
331 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return MALI_WRAP_MODE_CLAMP_TO_EDGE;
332 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return MALI_WRAP_MODE_CLAMP_TO_BORDER;
333 case PIPE_TEX_WRAP_MIRROR_REPEAT: return MALI_WRAP_MODE_MIRRORED_REPEAT;
334 case PIPE_TEX_WRAP_MIRROR_CLAMP: return MALI_WRAP_MODE_MIRRORED_CLAMP;
335 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
336 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
337 default: unreachable("Invalid wrap");
338 }
339 }
340
341 /* The hardware compares in the wrong order order, so we have to flip before
342 * encoding. Yes, really. */
343
344 static enum mali_func
345 panfrost_sampler_compare_func(const struct pipe_sampler_state *cso)
346 {
347 if (!cso->compare_mode)
348 return MALI_FUNC_NEVER;
349
350 enum mali_func f = panfrost_translate_compare_func(cso->compare_func);
351 return panfrost_flip_compare_func(f);
352 }
353
354 static enum mali_mipmap_mode
355 pan_pipe_to_mipmode(enum pipe_tex_mipfilter f)
356 {
357 switch (f) {
358 case PIPE_TEX_MIPFILTER_NEAREST: return MALI_MIPMAP_MODE_NEAREST;
359 case PIPE_TEX_MIPFILTER_LINEAR: return MALI_MIPMAP_MODE_TRILINEAR;
360 case PIPE_TEX_MIPFILTER_NONE: return MALI_MIPMAP_MODE_NONE;
361 default: unreachable("Invalid");
362 }
363 }
364
365 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
366 struct mali_midgard_sampler_packed *hw)
367 {
368 pan_pack(hw, MIDGARD_SAMPLER, cfg) {
369 cfg.magnify_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
370 cfg.minify_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
371 cfg.mipmap_mode = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR) ?
372 MALI_MIPMAP_MODE_TRILINEAR : MALI_MIPMAP_MODE_NEAREST;
373 cfg.normalized_coordinates = cso->normalized_coords;
374
375 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
376
377 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
378
379 /* If necessary, we disable mipmapping in the sampler descriptor by
380 * clamping the LOD as tight as possible (from 0 to epsilon,
381 * essentially -- remember these are fixed point numbers, so
382 * epsilon=1/256) */
383
384 cfg.maximum_lod = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) ?
385 cfg.minimum_lod + 1 :
386 FIXED_16(cso->max_lod, false);
387
388 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
389 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
390 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
391
392 cfg.compare_function = panfrost_sampler_compare_func(cso);
393 cfg.seamless_cube_map = cso->seamless_cube_map;
394
395 cfg.border_color_r = cso->border_color.f[0];
396 cfg.border_color_g = cso->border_color.f[1];
397 cfg.border_color_b = cso->border_color.f[2];
398 cfg.border_color_a = cso->border_color.f[3];
399 }
400 }
401
402 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
403 struct mali_bifrost_sampler_packed *hw)
404 {
405 pan_pack(hw, BIFROST_SAMPLER, cfg) {
406 cfg.magnify_linear = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR;
407 cfg.minify_linear = cso->min_img_filter == PIPE_TEX_FILTER_LINEAR;
408 cfg.mipmap_mode = pan_pipe_to_mipmode(cso->min_mip_filter);
409 cfg.normalized_coordinates = cso->normalized_coords;
410
411 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
412 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
413 cfg.maximum_lod = FIXED_16(cso->max_lod, false);
414
415 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
416 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
417 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
418
419 cfg.compare_function = panfrost_sampler_compare_func(cso);
420 cfg.seamless_cube_map = cso->seamless_cube_map;
421 }
422 }
423
424 static bool
425 panfrost_fs_required(
426 struct panfrost_shader_state *fs,
427 struct panfrost_blend_final *blend,
428 unsigned rt_count)
429 {
430 /* If we generally have side effects */
431 if (fs->fs_sidefx)
432 return true;
433
434 /* If colour is written we need to execute */
435 for (unsigned i = 0; i < rt_count; ++i) {
436 if (!blend[i].no_colour)
437 return true;
438 }
439
440 /* If depth is written and not implied we need to execute.
441 * TODO: Predicate on Z/S writes being enabled */
442 return (fs->writes_depth || fs->writes_stencil);
443 }
444
445 static void
446 panfrost_emit_blend(struct panfrost_batch *batch, void *rts,
447 struct panfrost_blend_final *blend)
448 {
449 const struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
450 struct panfrost_shader_state *fs = panfrost_get_shader_state(batch->ctx, PIPE_SHADER_FRAGMENT);
451 unsigned rt_count = batch->key.nr_cbufs;
452
453 struct bifrost_blend_rt *brts = rts;
454 struct midgard_blend_rt *mrts = rts;
455
456 /* Disable blending for depth-only on Bifrost */
457
458 if (rt_count == 0 && dev->quirks & IS_BIFROST)
459 brts[0].unk2 = 0x3;
460
461 for (unsigned i = 0; i < rt_count; ++i) {
462 unsigned flags = 0;
463
464 pan_pack(&flags, BLEND_FLAGS, cfg) {
465 if (blend[i].no_colour) {
466 cfg.enable = false;
467 break;
468 }
469
470 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
471
472 cfg.srgb = util_format_is_srgb(batch->key.cbufs[i]->format);
473 cfg.load_destination = blend[i].load_dest;
474 cfg.dither_disable = !batch->ctx->blend->base.dither;
475
476 if (!(dev->quirks & IS_BIFROST))
477 cfg.midgard_blend_shader = blend[i].is_shader;
478 }
479
480 if (dev->quirks & IS_BIFROST) {
481 brts[i].flags = flags;
482
483 if (blend[i].is_shader) {
484 /* The blend shader's address needs to be at
485 * the same top 32 bit as the fragment shader.
486 * TODO: Ensure that's always the case.
487 */
488 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
489 (fs->bo->gpu & (0xffffffffull << 32)));
490 brts[i].shader = blend[i].shader.gpu;
491 brts[i].unk2 = 0x0;
492 } else {
493 enum pipe_format format = batch->key.cbufs[i]->format;
494 const struct util_format_description *format_desc;
495 format_desc = util_format_description(format);
496
497 brts[i].equation = blend[i].equation.equation;
498
499 /* TODO: this is a bit more complicated */
500 brts[i].constant = blend[i].equation.constant;
501
502 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
503
504 /* 0x19 disables blending and forces REPLACE
505 * mode (equivalent to rgb_mode = alpha_mode =
506 * x122, colour mask = 0xF). 0x1a allows
507 * blending. */
508 brts[i].unk2 = blend[i].opaque ? 0x19 : 0x1a;
509
510 brts[i].shader_type = fs->blend_types[i];
511 }
512 } else {
513 memcpy(&mrts[i].flags, &flags, sizeof(flags));
514
515 if (blend[i].is_shader) {
516 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
517 } else {
518 mrts[i].blend.equation = blend[i].equation.equation;
519 mrts[i].blend.constant = blend[i].equation.constant;
520 }
521 }
522 }
523 }
524
525 static void
526 panfrost_emit_frag_shader(struct panfrost_context *ctx,
527 struct mali_shader_meta *fragmeta,
528 struct panfrost_blend_final *blend)
529 {
530 const struct panfrost_device *dev = pan_device(ctx->base.screen);
531 struct panfrost_shader_state *fs;
532
533 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
534
535 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
536 const struct panfrost_zsa_state *zsa = ctx->depth_stencil;
537 unsigned rt_count = ctx->pipe_framebuffer.nr_cbufs;
538
539 memset(fragmeta, 0, sizeof(*fragmeta));
540 memcpy(&fragmeta->shader, &fs->shader, sizeof(fs->shader));
541
542 if (!panfrost_fs_required(fs, blend, rt_count)) {
543 struct mali_shader_packed shader = { 0 };
544 struct mali_midgard_properties_packed prop;
545
546 if (dev->quirks & IS_BIFROST) {
547 struct mali_preload_packed preload = { 0 };
548 memcpy(&fragmeta->bifrost_preload, &preload, sizeof(preload));
549
550 pan_pack(&prop, BIFROST_PROPERTIES, cfg) {
551 cfg.unknown = 0x950020; /* XXX */
552 cfg.early_z_enable = true;
553 }
554 } else {
555 pan_pack(&shader, SHADER, cfg) {
556 cfg.shader = 0x1;
557 }
558
559 pan_pack(&prop, MIDGARD_PROPERTIES, cfg) {
560 cfg.work_register_count = 1;
561 cfg.depth_source = MALI_DEPTH_SOURCE_FIXED_FUNCTION;
562 cfg.early_z_enable = true;
563 }
564 }
565
566 memcpy(&fragmeta->shader, &shader, sizeof(shader));
567 memcpy(&fragmeta->midgard_props, &prop, sizeof(prop));
568 } else if (dev->quirks & IS_BIFROST) {
569 struct mali_bifrost_properties_packed prop;
570
571 bool no_blend = true;
572
573 for (unsigned i = 0; i < rt_count; ++i)
574 no_blend &= (!blend[i].load_dest | blend[i].no_colour);
575
576 pan_pack(&prop, BIFROST_PROPERTIES, cfg) {
577 cfg.early_z_enable = !fs->can_discard && !fs->writes_depth && no_blend;
578 }
579
580 /* Combine with prepacked properties */
581 prop.opaque[0] |= fs->properties.opaque[0];
582
583 memcpy(&fragmeta->bifrost_props, &prop, sizeof(prop));
584 memcpy(&fragmeta->bifrost_preload, &fs->preload, sizeof(fs->preload));
585 } else {
586 struct mali_midgard_properties_packed prop;
587
588 /* Reasons to disable early-Z from a shader perspective */
589 bool late_z = fs->can_discard || fs->writes_global ||
590 fs->writes_depth || fs->writes_stencil;
591
592 /* Reasons to disable early-Z from a CSO perspective */
593 bool alpha_to_coverage = ctx->blend->base.alpha_to_coverage;
594
595 /* If either depth or stencil is enabled, discard matters */
596 bool zs_enabled =
597 (zsa->base.depth.enabled && zsa->base.depth.func != PIPE_FUNC_ALWAYS) ||
598 zsa->base.stencil[0].enabled;
599
600 bool has_blend_shader = false;
601
602 for (unsigned c = 0; c < rt_count; ++c)
603 has_blend_shader |= blend[c].is_shader;
604
605 pan_pack(&prop, MIDGARD_PROPERTIES, cfg) {
606 /* TODO: Reduce this limit? */
607 if (has_blend_shader)
608 cfg.work_register_count = MAX2(fs->work_reg_count, 8);
609 else
610 cfg.work_register_count = fs->work_reg_count;
611
612 cfg.early_z_enable = !(late_z || alpha_to_coverage);
613 cfg.reads_tilebuffer = fs->outputs_read || (!zs_enabled && fs->can_discard);
614 cfg.reads_depth_stencil = zs_enabled && fs->can_discard;
615 }
616
617 /* Combine with prepacked properties */
618 prop.opaque[0] |= fs->properties.opaque[0];
619 memcpy(&fragmeta->midgard_props, &prop, sizeof(prop));
620 }
621
622 bool msaa = rast->multisample;
623 fragmeta->coverage_mask = msaa ? ctx->sample_mask : ~0;
624
625 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
626 fragmeta->unknown2_4 = 0x4e0;
627
628 /* TODO: Sample size */
629 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
630 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
631
632 /* EXT_shader_framebuffer_fetch requires the shader to be run
633 * per-sample when outputs are read. */
634 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
635 SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
636
637 fragmeta->depth_units = rast->offset_units * 2.0f;
638 fragmeta->depth_factor = rast->offset_scale;
639
640 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
641
642 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
643 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
644
645 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
646 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
647
648 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
649 zsa->base.stencil[0].enabled);
650
651 fragmeta->stencil_mask_front = zsa->stencil_mask_front;
652 fragmeta->stencil_mask_back = zsa->stencil_mask_back;
653
654 /* Bottom bits for stencil ref, exactly one word */
655 fragmeta->stencil_front.opaque[0] = zsa->stencil_front.opaque[0] | ctx->stencil_ref.ref_value[0];
656
657 /* If back-stencil is not enabled, use the front values */
658
659 if (zsa->base.stencil[1].enabled)
660 fragmeta->stencil_back.opaque[0] = zsa->stencil_back.opaque[0] | ctx->stencil_ref.ref_value[1];
661 else
662 fragmeta->stencil_back = fragmeta->stencil_front;
663
664 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
665 zsa->base.depth.writemask);
666
667 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
668 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(
669 zsa->base.depth.enabled ? zsa->base.depth.func : PIPE_FUNC_ALWAYS));
670
671 SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
672 ctx->blend->base.alpha_to_coverage);
673
674 if (dev->quirks & MIDGARD_SFBD) {
675 /* When only a single render target platform is used, the blend
676 * information is inside the shader meta itself. We additionally
677 * need to signal CAN_DISCARD for nontrivial blend modes (so
678 * we're able to read back the destination buffer) */
679
680 if (blend[0].no_colour)
681 return;
682
683 fragmeta->unknown2_4 |= MALI_SFBD_ENABLE;
684
685 SET_BIT(fragmeta->unknown2_4, MALI_SFBD_SRGB,
686 util_format_is_srgb(ctx->pipe_framebuffer.cbufs[0]->format));
687
688 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
689 blend[0].is_shader);
690
691 if (blend[0].is_shader) {
692 fragmeta->blend.shader = blend[0].shader.gpu |
693 blend[0].shader.first_tag;
694 } else {
695 fragmeta->blend.equation = blend[0].equation.equation;
696 fragmeta->blend.constant = blend[0].equation.constant;
697 }
698
699 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
700 blend[0].load_dest);
701
702 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER, !ctx->blend->base.dither);
703 } else if (!(dev->quirks & IS_BIFROST)) {
704 /* Bug where MRT-capable hw apparently reads the last blend
705 * shader from here instead of the usual location? */
706
707 for (signed rt = ((signed) rt_count - 1); rt >= 0; --rt) {
708 if (!blend[rt].is_shader)
709 continue;
710
711 fragmeta->blend.shader = blend[rt].shader.gpu |
712 blend[rt].shader.first_tag;
713 break;
714 }
715 }
716 }
717
718 void
719 panfrost_emit_shader_meta(struct panfrost_batch *batch,
720 enum pipe_shader_type st,
721 struct mali_vertex_tiler_postfix *postfix)
722 {
723 struct panfrost_context *ctx = batch->ctx;
724 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
725
726 if (!ss) {
727 postfix->shader = 0;
728 return;
729 }
730
731 struct mali_shader_meta meta;
732
733 /* Add the shader BO to the batch. */
734 panfrost_batch_add_bo(batch, ss->bo,
735 PAN_BO_ACCESS_PRIVATE |
736 PAN_BO_ACCESS_READ |
737 panfrost_bo_access_for_stage(st));
738
739 mali_ptr shader_ptr;
740
741 if (st == PIPE_SHADER_FRAGMENT) {
742 struct panfrost_device *dev = pan_device(ctx->base.screen);
743 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
744 size_t desc_size = sizeof(meta);
745 void *rts = NULL;
746 struct panfrost_transfer xfer;
747 unsigned rt_size;
748
749 if (dev->quirks & MIDGARD_SFBD)
750 rt_size = 0;
751 else if (dev->quirks & IS_BIFROST)
752 rt_size = sizeof(struct bifrost_blend_rt);
753 else
754 rt_size = sizeof(struct midgard_blend_rt);
755
756 desc_size += rt_size * rt_count;
757
758 if (rt_size)
759 rts = rzalloc_size(ctx, rt_size * rt_count);
760
761 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
762
763 for (unsigned c = 0; c < ctx->pipe_framebuffer.nr_cbufs; ++c)
764 blend[c] = panfrost_get_blend_for_context(ctx, c);
765
766 panfrost_emit_frag_shader(ctx, &meta, blend);
767
768 if (!(dev->quirks & MIDGARD_SFBD))
769 panfrost_emit_blend(batch, rts, blend);
770 else
771 batch->draws |= PIPE_CLEAR_COLOR0;
772
773 xfer = panfrost_pool_alloc_aligned(&batch->pool, desc_size, sizeof(meta));
774
775 memcpy(xfer.cpu, &meta, sizeof(meta));
776 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
777
778 if (rt_size)
779 ralloc_free(rts);
780
781 shader_ptr = xfer.gpu;
782 } else {
783 panfrost_emit_compute_shader(ctx, st, &meta);
784
785 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
786 sizeof(meta));
787 }
788
789 postfix->shader = shader_ptr;
790 }
791
792 void
793 panfrost_emit_viewport(struct panfrost_batch *batch,
794 struct mali_vertex_tiler_postfix *tiler_postfix)
795 {
796 struct panfrost_context *ctx = batch->ctx;
797 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
798 const struct pipe_scissor_state *ss = &ctx->scissor;
799 const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
800 const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
801
802 /* Derive min/max from translate/scale. Note since |x| >= 0 by
803 * definition, we have that -|x| <= |x| hence translate - |scale| <=
804 * translate + |scale|, so the ordering is correct here. */
805 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
806 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
807 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
808 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
809 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
810 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
811
812 /* Scissor to the intersection of viewport and to the scissor, clamped
813 * to the framebuffer */
814
815 unsigned minx = MIN2(fb->width, vp_minx);
816 unsigned maxx = MIN2(fb->width, vp_maxx);
817 unsigned miny = MIN2(fb->height, vp_miny);
818 unsigned maxy = MIN2(fb->height, vp_maxy);
819
820 if (ss && rast->scissor) {
821 minx = MAX2(ss->minx, minx);
822 miny = MAX2(ss->miny, miny);
823 maxx = MIN2(ss->maxx, maxx);
824 maxy = MIN2(ss->maxy, maxy);
825 }
826
827 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
828
829 pan_pack(T.cpu, VIEWPORT, cfg) {
830 cfg.scissor_minimum_x = minx;
831 cfg.scissor_minimum_y = miny;
832 cfg.scissor_maximum_x = maxx - 1;
833 cfg.scissor_maximum_y = maxy - 1;
834
835 cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
836 cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
837 }
838
839 tiler_postfix->viewport = T.gpu;
840 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
841 }
842
843 static mali_ptr
844 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
845 enum pipe_shader_type st,
846 struct panfrost_constant_buffer *buf,
847 unsigned index)
848 {
849 struct pipe_constant_buffer *cb = &buf->cb[index];
850 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
851
852 if (rsrc) {
853 panfrost_batch_add_bo(batch, rsrc->bo,
854 PAN_BO_ACCESS_SHARED |
855 PAN_BO_ACCESS_READ |
856 panfrost_bo_access_for_stage(st));
857
858 /* Alignment gauranteed by
859 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
860 return rsrc->bo->gpu + cb->buffer_offset;
861 } else if (cb->user_buffer) {
862 return panfrost_pool_upload_aligned(&batch->pool,
863 cb->user_buffer +
864 cb->buffer_offset,
865 cb->buffer_size, 16);
866 } else {
867 unreachable("No constant buffer");
868 }
869 }
870
871 struct sysval_uniform {
872 union {
873 float f[4];
874 int32_t i[4];
875 uint32_t u[4];
876 uint64_t du[2];
877 };
878 };
879
880 static void
881 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
882 struct sysval_uniform *uniform)
883 {
884 struct panfrost_context *ctx = batch->ctx;
885 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
886
887 uniform->f[0] = vp->scale[0];
888 uniform->f[1] = vp->scale[1];
889 uniform->f[2] = vp->scale[2];
890 }
891
892 static void
893 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
894 struct sysval_uniform *uniform)
895 {
896 struct panfrost_context *ctx = batch->ctx;
897 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
898
899 uniform->f[0] = vp->translate[0];
900 uniform->f[1] = vp->translate[1];
901 uniform->f[2] = vp->translate[2];
902 }
903
904 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
905 enum pipe_shader_type st,
906 unsigned int sysvalid,
907 struct sysval_uniform *uniform)
908 {
909 struct panfrost_context *ctx = batch->ctx;
910 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
911 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
912 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
913 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
914
915 assert(dim);
916 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
917
918 if (dim > 1)
919 uniform->i[1] = u_minify(tex->texture->height0,
920 tex->u.tex.first_level);
921
922 if (dim > 2)
923 uniform->i[2] = u_minify(tex->texture->depth0,
924 tex->u.tex.first_level);
925
926 if (is_array)
927 uniform->i[dim] = tex->texture->array_size;
928 }
929
930 static void
931 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
932 enum pipe_shader_type st,
933 unsigned ssbo_id,
934 struct sysval_uniform *uniform)
935 {
936 struct panfrost_context *ctx = batch->ctx;
937
938 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
939 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
940
941 /* Compute address */
942 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
943
944 panfrost_batch_add_bo(batch, bo,
945 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
946 panfrost_bo_access_for_stage(st));
947
948 /* Upload address and size as sysval */
949 uniform->du[0] = bo->gpu + sb.buffer_offset;
950 uniform->u[2] = sb.buffer_size;
951 }
952
953 static void
954 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
955 enum pipe_shader_type st,
956 unsigned samp_idx,
957 struct sysval_uniform *uniform)
958 {
959 struct panfrost_context *ctx = batch->ctx;
960 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
961
962 uniform->f[0] = sampl->min_lod;
963 uniform->f[1] = sampl->max_lod;
964 uniform->f[2] = sampl->lod_bias;
965
966 /* Even without any errata, Midgard represents "no mipmapping" as
967 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
968 * panfrost_create_sampler_state which also explains our choice of
969 * epsilon value (again to keep behaviour consistent) */
970
971 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
972 uniform->f[1] = uniform->f[0] + (1.0/256.0);
973 }
974
975 static void
976 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
977 struct sysval_uniform *uniform)
978 {
979 struct panfrost_context *ctx = batch->ctx;
980
981 uniform->u[0] = ctx->compute_grid->grid[0];
982 uniform->u[1] = ctx->compute_grid->grid[1];
983 uniform->u[2] = ctx->compute_grid->grid[2];
984 }
985
986 static void
987 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
988 struct panfrost_shader_state *ss,
989 enum pipe_shader_type st)
990 {
991 struct sysval_uniform *uniforms = (void *)buf;
992
993 for (unsigned i = 0; i < ss->sysval_count; ++i) {
994 int sysval = ss->sysval[i];
995
996 switch (PAN_SYSVAL_TYPE(sysval)) {
997 case PAN_SYSVAL_VIEWPORT_SCALE:
998 panfrost_upload_viewport_scale_sysval(batch,
999 &uniforms[i]);
1000 break;
1001 case PAN_SYSVAL_VIEWPORT_OFFSET:
1002 panfrost_upload_viewport_offset_sysval(batch,
1003 &uniforms[i]);
1004 break;
1005 case PAN_SYSVAL_TEXTURE_SIZE:
1006 panfrost_upload_txs_sysval(batch, st,
1007 PAN_SYSVAL_ID(sysval),
1008 &uniforms[i]);
1009 break;
1010 case PAN_SYSVAL_SSBO:
1011 panfrost_upload_ssbo_sysval(batch, st,
1012 PAN_SYSVAL_ID(sysval),
1013 &uniforms[i]);
1014 break;
1015 case PAN_SYSVAL_NUM_WORK_GROUPS:
1016 panfrost_upload_num_work_groups_sysval(batch,
1017 &uniforms[i]);
1018 break;
1019 case PAN_SYSVAL_SAMPLER:
1020 panfrost_upload_sampler_sysval(batch, st,
1021 PAN_SYSVAL_ID(sysval),
1022 &uniforms[i]);
1023 break;
1024 default:
1025 assert(0);
1026 }
1027 }
1028 }
1029
1030 static const void *
1031 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1032 unsigned index)
1033 {
1034 struct pipe_constant_buffer *cb = &buf->cb[index];
1035 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1036
1037 if (rsrc)
1038 return rsrc->bo->cpu;
1039 else if (cb->user_buffer)
1040 return cb->user_buffer;
1041 else
1042 unreachable("No constant buffer");
1043 }
1044
1045 void
1046 panfrost_emit_const_buf(struct panfrost_batch *batch,
1047 enum pipe_shader_type stage,
1048 struct mali_vertex_tiler_postfix *postfix)
1049 {
1050 struct panfrost_context *ctx = batch->ctx;
1051 struct panfrost_shader_variants *all = ctx->shader[stage];
1052
1053 if (!all)
1054 return;
1055
1056 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1057
1058 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1059
1060 /* Uniforms are implicitly UBO #0 */
1061 bool has_uniforms = buf->enabled_mask & (1 << 0);
1062
1063 /* Allocate room for the sysval and the uniforms */
1064 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1065 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1066 size_t size = sys_size + uniform_size;
1067 struct panfrost_transfer transfer =
1068 panfrost_pool_alloc_aligned(&batch->pool, size, 16);
1069
1070 /* Upload sysvals requested by the shader */
1071 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1072
1073 /* Upload uniforms */
1074 if (has_uniforms && uniform_size) {
1075 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1076 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1077 }
1078
1079 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1080 * uploaded, so it's always included. The count is the highest UBO
1081 * addressable -- gaps are included. */
1082
1083 unsigned ubo_count = 32 - __builtin_clz(buf->enabled_mask | 1);
1084
1085 size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
1086 struct panfrost_transfer ubos =
1087 panfrost_pool_alloc_aligned(&batch->pool, sz,
1088 MALI_UNIFORM_BUFFER_LENGTH);
1089
1090 uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
1091
1092 /* Upload uniforms as a UBO */
1093
1094 if (size) {
1095 pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
1096 cfg.entries = DIV_ROUND_UP(size, 16);
1097 cfg.pointer = transfer.gpu;
1098 }
1099 } else {
1100 *ubo_ptr = 0;
1101 }
1102
1103 /* The rest are honest-to-goodness UBOs */
1104
1105 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1106 size_t usz = buf->cb[ubo].buffer_size;
1107 bool enabled = buf->enabled_mask & (1 << ubo);
1108 bool empty = usz == 0;
1109
1110 if (!enabled || empty) {
1111 ubo_ptr[ubo] = 0;
1112 continue;
1113 }
1114
1115 pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
1116 cfg.entries = DIV_ROUND_UP(usz, 16);
1117 cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
1118 stage, buf, ubo);
1119 }
1120 }
1121
1122 postfix->uniforms = transfer.gpu;
1123 postfix->uniform_buffers = ubos.gpu;
1124
1125 buf->dirty_mask = 0;
1126 }
1127
1128 void
1129 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1130 const struct pipe_grid_info *info,
1131 struct midgard_payload_vertex_tiler *vtp)
1132 {
1133 struct panfrost_context *ctx = batch->ctx;
1134 struct panfrost_device *dev = pan_device(ctx->base.screen);
1135 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1136 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1137 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1138 128));
1139
1140 unsigned log2_instances =
1141 util_logbase2_ceil(info->grid[0]) +
1142 util_logbase2_ceil(info->grid[1]) +
1143 util_logbase2_ceil(info->grid[2]);
1144
1145 unsigned shared_size = single_size * (1 << log2_instances) * dev->core_count;
1146 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1147 shared_size,
1148 1);
1149
1150 struct mali_shared_memory shared = {
1151 .shared_memory = bo->gpu,
1152 .shared_workgroup_count = log2_instances,
1153 .shared_shift = util_logbase2(single_size) + 1
1154 };
1155
1156 vtp->postfix.shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared,
1157 sizeof(shared), 64);
1158 }
1159
1160 static mali_ptr
1161 panfrost_get_tex_desc(struct panfrost_batch *batch,
1162 enum pipe_shader_type st,
1163 struct panfrost_sampler_view *view)
1164 {
1165 if (!view)
1166 return (mali_ptr) 0;
1167
1168 struct pipe_sampler_view *pview = &view->base;
1169 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1170
1171 /* Add the BO to the job so it's retained until the job is done. */
1172
1173 panfrost_batch_add_bo(batch, rsrc->bo,
1174 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1175 panfrost_bo_access_for_stage(st));
1176
1177 panfrost_batch_add_bo(batch, view->bo,
1178 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1179 panfrost_bo_access_for_stage(st));
1180
1181 return view->bo->gpu;
1182 }
1183
1184 static void
1185 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1186 struct pipe_context *pctx)
1187 {
1188 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1189 if (view->texture_bo != rsrc->bo->gpu ||
1190 view->modifier != rsrc->modifier) {
1191 panfrost_bo_unreference(view->bo);
1192 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1193 }
1194 }
1195
1196 void
1197 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1198 enum pipe_shader_type stage,
1199 struct mali_vertex_tiler_postfix *postfix)
1200 {
1201 struct panfrost_context *ctx = batch->ctx;
1202 struct panfrost_device *device = pan_device(ctx->base.screen);
1203
1204 if (!ctx->sampler_view_count[stage])
1205 return;
1206
1207 if (device->quirks & IS_BIFROST) {
1208 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1209 MALI_BIFROST_TEXTURE_LENGTH *
1210 ctx->sampler_view_count[stage],
1211 MALI_BIFROST_TEXTURE_LENGTH);
1212
1213 struct mali_bifrost_texture_packed *out =
1214 (struct mali_bifrost_texture_packed *) T.cpu;
1215
1216 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1217 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1218 struct pipe_sampler_view *pview = &view->base;
1219 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1220
1221 panfrost_update_sampler_view(view, &ctx->base);
1222 out[i] = view->bifrost_descriptor;
1223
1224 /* Add the BOs to the job so they are retained until the job is done. */
1225
1226 panfrost_batch_add_bo(batch, rsrc->bo,
1227 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1228 panfrost_bo_access_for_stage(stage));
1229
1230 panfrost_batch_add_bo(batch, view->bo,
1231 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1232 panfrost_bo_access_for_stage(stage));
1233 }
1234
1235 postfix->textures = T.gpu;
1236 } else {
1237 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1238
1239 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1240 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1241
1242 panfrost_update_sampler_view(view, &ctx->base);
1243
1244 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1245 }
1246
1247 postfix->textures = panfrost_pool_upload_aligned(&batch->pool,
1248 trampolines,
1249 sizeof(uint64_t) *
1250 ctx->sampler_view_count[stage],
1251 sizeof(uint64_t));
1252 }
1253 }
1254
1255 void
1256 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1257 enum pipe_shader_type stage,
1258 struct mali_vertex_tiler_postfix *postfix)
1259 {
1260 struct panfrost_context *ctx = batch->ctx;
1261
1262 if (!ctx->sampler_count[stage])
1263 return;
1264
1265 size_t desc_size = MALI_BIFROST_SAMPLER_LENGTH;
1266 assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
1267
1268 size_t sz = desc_size * ctx->sampler_count[stage];
1269 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool, sz, desc_size);
1270 struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
1271
1272 for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
1273 out[i] = ctx->samplers[stage][i]->hw;
1274
1275 postfix->sampler_descriptor = T.gpu;
1276 }
1277
1278 void
1279 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1280 struct mali_vertex_tiler_postfix *vertex_postfix)
1281 {
1282 struct panfrost_context *ctx = batch->ctx;
1283 struct panfrost_vertex_state *so = ctx->vertex;
1284 struct panfrost_shader_state *vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1285
1286 unsigned instance_shift = vertex_postfix->instance_shift;
1287 unsigned instance_odd = vertex_postfix->instance_odd;
1288
1289 /* Worst case: everything is NPOT, which is only possible if instancing
1290 * is enabled. Otherwise single record is gauranteed */
1291 bool could_npot = instance_shift || instance_odd;
1292
1293 struct panfrost_transfer S = panfrost_pool_alloc_aligned(&batch->pool,
1294 MALI_ATTRIBUTE_BUFFER_LENGTH * vs->attribute_count *
1295 (could_npot ? 2 : 1),
1296 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
1297
1298 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1299 MALI_ATTRIBUTE_LENGTH * vs->attribute_count,
1300 MALI_ATTRIBUTE_LENGTH);
1301
1302 struct mali_attribute_buffer_packed *bufs =
1303 (struct mali_attribute_buffer_packed *) S.cpu;
1304
1305 struct mali_attribute_packed *out =
1306 (struct mali_attribute_packed *) T.cpu;
1307
1308 unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
1309 unsigned k = 0;
1310
1311 for (unsigned i = 0; i < so->num_elements; ++i) {
1312 /* We map buffers 1:1 with the attributes, which
1313 * means duplicating some vertex buffers (who cares? aside from
1314 * maybe some caching implications but I somehow doubt that
1315 * matters) */
1316
1317 struct pipe_vertex_element *elem = &so->pipe[i];
1318 unsigned vbi = elem->vertex_buffer_index;
1319 attrib_to_buffer[i] = k;
1320
1321 if (!(ctx->vb_mask & (1 << vbi)))
1322 continue;
1323
1324 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1325 struct panfrost_resource *rsrc;
1326
1327 rsrc = pan_resource(buf->buffer.resource);
1328 if (!rsrc)
1329 continue;
1330
1331 /* Add a dependency of the batch on the vertex buffer */
1332 panfrost_batch_add_bo(batch, rsrc->bo,
1333 PAN_BO_ACCESS_SHARED |
1334 PAN_BO_ACCESS_READ |
1335 PAN_BO_ACCESS_VERTEX_TILER);
1336
1337 /* Mask off lower bits, see offset fixup below */
1338 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1339 mali_ptr addr = raw_addr & ~63;
1340
1341 /* Since we advanced the base pointer, we shrink the buffer
1342 * size, but add the offset we subtracted */
1343 unsigned size = rsrc->base.width0 + (raw_addr - addr)
1344 - buf->buffer_offset;
1345
1346 /* When there is a divisor, the hardware-level divisor is
1347 * the product of the instance divisor and the padded count */
1348 unsigned divisor = elem->instance_divisor;
1349 unsigned hw_divisor = ctx->padded_count * divisor;
1350 unsigned stride = buf->stride;
1351
1352 /* If there's a divisor(=1) but no instancing, we want every
1353 * attribute to be the same */
1354
1355 if (divisor && ctx->instance_count == 1)
1356 stride = 0;
1357
1358 if (!divisor || ctx->instance_count <= 1) {
1359 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1360 if (ctx->instance_count > 1)
1361 cfg.type = MALI_ATTRIBUTE_TYPE_1D_MODULUS;
1362
1363 cfg.pointer = addr;
1364 cfg.stride = stride;
1365 cfg.size = size;
1366 cfg.divisor_r = instance_shift;
1367 cfg.divisor_p = instance_odd;
1368 }
1369 } else if (util_is_power_of_two_or_zero(hw_divisor)) {
1370 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1371 cfg.type = MALI_ATTRIBUTE_TYPE_1D_POT_DIVISOR;
1372 cfg.pointer = addr;
1373 cfg.stride = stride;
1374 cfg.size = size;
1375 cfg.divisor_r = __builtin_ctz(hw_divisor);
1376 }
1377
1378 } else {
1379 unsigned shift = 0, extra_flags = 0;
1380
1381 unsigned magic_divisor =
1382 panfrost_compute_magic_divisor(hw_divisor, &shift, &extra_flags);
1383
1384 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1385 cfg.type = MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR;
1386 cfg.pointer = addr;
1387 cfg.stride = stride;
1388 cfg.size = size;
1389
1390 cfg.divisor_r = shift;
1391 cfg.divisor_e = extra_flags;
1392 }
1393
1394 pan_pack(bufs + k + 1, ATTRIBUTE_BUFFER_CONTINUATION_NPOT, cfg) {
1395 cfg.divisor_numerator = magic_divisor;
1396 cfg.divisor = divisor;
1397 }
1398
1399 ++k;
1400 }
1401
1402 ++k;
1403 }
1404
1405 /* Add special gl_VertexID/gl_InstanceID buffers */
1406
1407 if (unlikely(vs->attribute_count >= PAN_VERTEX_ID)) {
1408 panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1409
1410 pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
1411 cfg.buffer_index = k++;
1412 cfg.format = so->formats[PAN_VERTEX_ID];
1413 }
1414
1415 panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1416
1417 pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
1418 cfg.buffer_index = k++;
1419 cfg.format = so->formats[PAN_INSTANCE_ID];
1420 }
1421 }
1422
1423 /* Attribute addresses require 64-byte alignment, so let:
1424 *
1425 * base' = base & ~63 = base - (base & 63)
1426 * offset' = offset + (base & 63)
1427 *
1428 * Since base' + offset' = base + offset, these are equivalent
1429 * addressing modes and now base is 64 aligned.
1430 */
1431
1432 unsigned start = vertex_postfix->offset_start;
1433
1434 for (unsigned i = 0; i < so->num_elements; ++i) {
1435 unsigned vbi = so->pipe[i].vertex_buffer_index;
1436 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1437
1438 /* Adjust by the masked off bits of the offset. Make sure we
1439 * read src_offset from so->hw (which is not GPU visible)
1440 * rather than target (which is) due to caching effects */
1441
1442 unsigned src_offset = so->pipe[i].src_offset;
1443
1444 /* BOs aligned to 4k so guaranteed aligned to 64 */
1445 src_offset += (buf->buffer_offset & 63);
1446
1447 /* Also, somewhat obscurely per-instance data needs to be
1448 * offset in response to a delayed start in an indexed draw */
1449
1450 if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
1451 src_offset -= buf->stride * start;
1452
1453 pan_pack(out + i, ATTRIBUTE, cfg) {
1454 cfg.buffer_index = attrib_to_buffer[i];
1455 cfg.format = so->formats[i];
1456 cfg.offset = src_offset;
1457 }
1458 }
1459
1460 vertex_postfix->attributes = S.gpu;
1461 vertex_postfix->attribute_meta = T.gpu;
1462 }
1463
1464 static mali_ptr
1465 panfrost_emit_varyings(struct panfrost_batch *batch,
1466 struct mali_attribute_buffer_packed *slot,
1467 unsigned stride, unsigned count)
1468 {
1469 unsigned size = stride * count;
1470 mali_ptr ptr = panfrost_pool_alloc_aligned(&batch->invisible_pool, size, 64).gpu;
1471
1472 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1473 cfg.stride = stride;
1474 cfg.size = size;
1475 cfg.pointer = ptr;
1476 }
1477
1478 return ptr;
1479 }
1480
1481 static unsigned
1482 panfrost_streamout_offset(unsigned stride, unsigned offset,
1483 struct pipe_stream_output_target *target)
1484 {
1485 return (target->buffer_offset + (offset * stride * 4)) & 63;
1486 }
1487
1488 static void
1489 panfrost_emit_streamout(struct panfrost_batch *batch,
1490 struct mali_attribute_buffer_packed *slot,
1491 unsigned stride_words, unsigned offset, unsigned count,
1492 struct pipe_stream_output_target *target)
1493 {
1494 unsigned stride = stride_words * 4;
1495 unsigned max_size = target->buffer_size;
1496 unsigned expected_size = stride * count;
1497
1498 /* Grab the BO and bind it to the batch */
1499 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1500
1501 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1502 * the perspective of the TILER and FRAGMENT.
1503 */
1504 panfrost_batch_add_bo(batch, bo,
1505 PAN_BO_ACCESS_SHARED |
1506 PAN_BO_ACCESS_RW |
1507 PAN_BO_ACCESS_VERTEX_TILER |
1508 PAN_BO_ACCESS_FRAGMENT);
1509
1510 /* We will have an offset applied to get alignment */
1511 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * stride);
1512
1513 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1514 cfg.pointer = (addr & ~63);
1515 cfg.stride = stride;
1516 cfg.size = MIN2(max_size, expected_size) + (addr & 63);
1517 }
1518 }
1519
1520 static bool
1521 has_point_coord(unsigned mask, gl_varying_slot loc)
1522 {
1523 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1524 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1525 else if (loc == VARYING_SLOT_PNTC)
1526 return (mask & (1 << 8));
1527 else
1528 return false;
1529 }
1530
1531 /* Helpers for manipulating stream out information so we can pack varyings
1532 * accordingly. Compute the src_offset for a given captured varying */
1533
1534 static struct pipe_stream_output *
1535 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1536 {
1537 for (unsigned i = 0; i < info->num_outputs; ++i) {
1538 if (info->output[i].register_index == loc)
1539 return &info->output[i];
1540 }
1541
1542 unreachable("Varying not captured");
1543 }
1544
1545 static unsigned
1546 pan_varying_size(enum mali_format fmt)
1547 {
1548 unsigned type = MALI_EXTRACT_TYPE(fmt);
1549 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1550 unsigned bits = MALI_EXTRACT_BITS(fmt);
1551 unsigned bpc = 0;
1552
1553 if (bits == MALI_CHANNEL_FLOAT) {
1554 /* No doubles */
1555 bool fp16 = (type == MALI_FORMAT_SINT);
1556 assert(fp16 || (type == MALI_FORMAT_UNORM));
1557
1558 bpc = fp16 ? 2 : 4;
1559 } else {
1560 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1561
1562 /* See the enums */
1563 bits = 1 << bits;
1564 assert(bits >= 8);
1565 bpc = bits / 8;
1566 }
1567
1568 return bpc * chan;
1569 }
1570
1571 /* Indices for named (non-XFB) varyings that are present. These are packed
1572 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1573 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1574 * of a given special field given a shift S by:
1575 *
1576 * idx = popcount(P & ((1 << S) - 1))
1577 *
1578 * That is... look at all of the varyings that come earlier and count them, the
1579 * count is the new index since plus one. Likewise, the total number of special
1580 * buffers required is simply popcount(P)
1581 */
1582
1583 enum pan_special_varying {
1584 PAN_VARY_GENERAL = 0,
1585 PAN_VARY_POSITION = 1,
1586 PAN_VARY_PSIZ = 2,
1587 PAN_VARY_PNTCOORD = 3,
1588 PAN_VARY_FACE = 4,
1589 PAN_VARY_FRAGCOORD = 5,
1590
1591 /* Keep last */
1592 PAN_VARY_MAX,
1593 };
1594
1595 /* Given a varying, figure out which index it correpsonds to */
1596
1597 static inline unsigned
1598 pan_varying_index(unsigned present, enum pan_special_varying v)
1599 {
1600 unsigned mask = (1 << v) - 1;
1601 return util_bitcount(present & mask);
1602 }
1603
1604 /* Get the base offset for XFB buffers, which by convention come after
1605 * everything else. Wrapper function for semantic reasons; by construction this
1606 * is just popcount. */
1607
1608 static inline unsigned
1609 pan_xfb_base(unsigned present)
1610 {
1611 return util_bitcount(present);
1612 }
1613
1614 /* Computes the present mask for varyings so we can start emitting varying records */
1615
1616 static inline unsigned
1617 pan_varying_present(
1618 struct panfrost_shader_state *vs,
1619 struct panfrost_shader_state *fs,
1620 unsigned quirks)
1621 {
1622 /* At the moment we always emit general and position buffers. Not
1623 * strictly necessary but usually harmless */
1624
1625 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1626
1627 /* Enable special buffers by the shader info */
1628
1629 if (vs->writes_point_size)
1630 present |= (1 << PAN_VARY_PSIZ);
1631
1632 if (fs->reads_point_coord)
1633 present |= (1 << PAN_VARY_PNTCOORD);
1634
1635 if (fs->reads_face)
1636 present |= (1 << PAN_VARY_FACE);
1637
1638 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1639 present |= (1 << PAN_VARY_FRAGCOORD);
1640
1641 /* Also, if we have a point sprite, we need a point coord buffer */
1642
1643 for (unsigned i = 0; i < fs->varying_count; i++) {
1644 gl_varying_slot loc = fs->varyings_loc[i];
1645
1646 if (has_point_coord(fs->point_sprite_mask, loc))
1647 present |= (1 << PAN_VARY_PNTCOORD);
1648 }
1649
1650 return present;
1651 }
1652
1653 /* Emitters for varying records */
1654
1655 static void
1656 pan_emit_vary(struct mali_attribute_packed *out,
1657 unsigned present, enum pan_special_varying buf,
1658 unsigned quirks, enum mali_format format,
1659 unsigned offset)
1660 {
1661 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1662 unsigned swizzle = quirks & HAS_SWIZZLES ?
1663 panfrost_get_default_swizzle(nr_channels) :
1664 panfrost_bifrost_swizzle(nr_channels);
1665
1666 pan_pack(out, ATTRIBUTE, cfg) {
1667 cfg.buffer_index = pan_varying_index(present, buf);
1668 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1669 cfg.format = (format << 12) | swizzle;
1670 cfg.offset = offset;
1671 }
1672 }
1673
1674 /* General varying that is unused */
1675
1676 static void
1677 pan_emit_vary_only(struct mali_attribute_packed *out,
1678 unsigned present, unsigned quirks)
1679 {
1680 pan_emit_vary(out, present, 0, quirks, MALI_VARYING_DISCARD, 0);
1681 }
1682
1683 /* Special records */
1684
1685 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1686 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1687 [PAN_VARY_PSIZ] = MALI_R16F,
1688 [PAN_VARY_PNTCOORD] = MALI_R16F,
1689 [PAN_VARY_FACE] = MALI_R32I,
1690 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1691 };
1692
1693 static void
1694 pan_emit_vary_special(struct mali_attribute_packed *out,
1695 unsigned present, enum pan_special_varying buf,
1696 unsigned quirks)
1697 {
1698 assert(buf < PAN_VARY_MAX);
1699 pan_emit_vary(out, present, buf, quirks, pan_varying_formats[buf], 0);
1700 }
1701
1702 static enum mali_format
1703 pan_xfb_format(enum mali_format format, unsigned nr)
1704 {
1705 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1706 return MALI_R32F | MALI_NR_CHANNELS(nr);
1707 else
1708 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1709 }
1710
1711 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1712 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1713 * value. */
1714
1715 static void
1716 pan_emit_vary_xfb(struct mali_attribute_packed *out,
1717 unsigned present,
1718 unsigned max_xfb,
1719 unsigned *streamout_offsets,
1720 unsigned quirks,
1721 enum mali_format format,
1722 struct pipe_stream_output o)
1723 {
1724 unsigned swizzle = quirks & HAS_SWIZZLES ?
1725 panfrost_get_default_swizzle(o.num_components) :
1726 panfrost_bifrost_swizzle(o.num_components);
1727
1728 pan_pack(out, ATTRIBUTE, cfg) {
1729 /* XFB buffers come after everything else */
1730 cfg.buffer_index = pan_xfb_base(present) + o.output_buffer;
1731 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1732
1733 /* Override number of channels and precision to highp */
1734 cfg.format = (pan_xfb_format(format, o.num_components) << 12) | swizzle;
1735
1736 /* Apply given offsets together */
1737 cfg.offset = (o.dst_offset * 4) /* dwords */
1738 + streamout_offsets[o.output_buffer];
1739 }
1740 }
1741
1742 /* Determine if we should capture a varying for XFB. This requires actually
1743 * having a buffer for it. If we don't capture it, we'll fallback to a general
1744 * varying path (linked or unlinked, possibly discarding the write) */
1745
1746 static bool
1747 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1748 unsigned loc, unsigned max_xfb)
1749 {
1750 if (!(xfb->so_mask & (1ll << loc)))
1751 return false;
1752
1753 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1754 return o->output_buffer < max_xfb;
1755 }
1756
1757 static void
1758 pan_emit_general_varying(struct mali_attribute_packed *out,
1759 struct panfrost_shader_state *other,
1760 struct panfrost_shader_state *xfb,
1761 gl_varying_slot loc,
1762 enum mali_format format,
1763 unsigned present,
1764 unsigned quirks,
1765 unsigned *gen_offsets,
1766 enum mali_format *gen_formats,
1767 unsigned *gen_stride,
1768 unsigned idx,
1769 bool should_alloc)
1770 {
1771 /* Check if we're linked */
1772 signed other_idx = -1;
1773
1774 for (unsigned j = 0; j < other->varying_count; ++j) {
1775 if (other->varyings_loc[j] == loc) {
1776 other_idx = j;
1777 break;
1778 }
1779 }
1780
1781 if (other_idx < 0) {
1782 pan_emit_vary_only(out, present, quirks);
1783 return;
1784 }
1785
1786 unsigned offset = gen_offsets[other_idx];
1787
1788 if (should_alloc) {
1789 /* We're linked, so allocate a space via a watermark allocation */
1790 enum mali_format alt = other->varyings[other_idx];
1791
1792 /* Do interpolation at minimum precision */
1793 unsigned size_main = pan_varying_size(format);
1794 unsigned size_alt = pan_varying_size(alt);
1795 unsigned size = MIN2(size_main, size_alt);
1796
1797 /* If a varying is marked for XFB but not actually captured, we
1798 * should match the format to the format that would otherwise
1799 * be used for XFB, since dEQP checks for invariance here. It's
1800 * unclear if this is required by the spec. */
1801
1802 if (xfb->so_mask & (1ull << loc)) {
1803 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1804 format = pan_xfb_format(format, o->num_components);
1805 size = pan_varying_size(format);
1806 } else if (size == size_alt) {
1807 format = alt;
1808 }
1809
1810 gen_offsets[idx] = *gen_stride;
1811 gen_formats[other_idx] = format;
1812 offset = *gen_stride;
1813 *gen_stride += size;
1814 }
1815
1816 pan_emit_vary(out, present, PAN_VARY_GENERAL, quirks, format, offset);
1817 }
1818
1819 /* Higher-level wrapper around all of the above, classifying a varying into one
1820 * of the above types */
1821
1822 static void
1823 panfrost_emit_varying(
1824 struct mali_attribute_packed *out,
1825 struct panfrost_shader_state *stage,
1826 struct panfrost_shader_state *other,
1827 struct panfrost_shader_state *xfb,
1828 unsigned present,
1829 unsigned max_xfb,
1830 unsigned *streamout_offsets,
1831 unsigned quirks,
1832 unsigned *gen_offsets,
1833 enum mali_format *gen_formats,
1834 unsigned *gen_stride,
1835 unsigned idx,
1836 bool should_alloc,
1837 bool is_fragment)
1838 {
1839 gl_varying_slot loc = stage->varyings_loc[idx];
1840 enum mali_format format = stage->varyings[idx];
1841
1842 /* Override format to match linkage */
1843 if (!should_alloc && gen_formats[idx])
1844 format = gen_formats[idx];
1845
1846 if (has_point_coord(stage->point_sprite_mask, loc)) {
1847 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1848 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1849 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1850 pan_emit_vary_xfb(out, present, max_xfb, streamout_offsets, quirks, format, *o);
1851 } else if (loc == VARYING_SLOT_POS) {
1852 if (is_fragment)
1853 pan_emit_vary_special(out, present, PAN_VARY_FRAGCOORD, quirks);
1854 else
1855 pan_emit_vary_special(out, present, PAN_VARY_POSITION, quirks);
1856 } else if (loc == VARYING_SLOT_PSIZ) {
1857 pan_emit_vary_special(out, present, PAN_VARY_PSIZ, quirks);
1858 } else if (loc == VARYING_SLOT_PNTC) {
1859 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1860 } else if (loc == VARYING_SLOT_FACE) {
1861 pan_emit_vary_special(out, present, PAN_VARY_FACE, quirks);
1862 } else {
1863 pan_emit_general_varying(out, other, xfb, loc, format, present,
1864 quirks, gen_offsets, gen_formats, gen_stride,
1865 idx, should_alloc);
1866 }
1867 }
1868
1869 static void
1870 pan_emit_special_input(struct mali_attribute_buffer_packed *out,
1871 unsigned present,
1872 enum pan_special_varying v,
1873 unsigned special)
1874 {
1875 if (present & (1 << v)) {
1876 unsigned idx = pan_varying_index(present, v);
1877
1878 pan_pack(out + idx, ATTRIBUTE_BUFFER, cfg) {
1879 cfg.special = special;
1880 cfg.type = 0;
1881 }
1882 }
1883 }
1884
1885 void
1886 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1887 unsigned vertex_count,
1888 struct mali_vertex_tiler_postfix *vertex_postfix,
1889 struct mali_vertex_tiler_postfix *tiler_postfix,
1890 union midgard_primitive_size *primitive_size)
1891 {
1892 /* Load the shaders */
1893 struct panfrost_context *ctx = batch->ctx;
1894 struct panfrost_device *dev = pan_device(ctx->base.screen);
1895 struct panfrost_shader_state *vs, *fs;
1896 size_t vs_size, fs_size;
1897
1898 /* Allocate the varying descriptor */
1899
1900 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1901 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
1902 vs_size = MALI_ATTRIBUTE_LENGTH * vs->varying_count;
1903 fs_size = MALI_ATTRIBUTE_LENGTH * fs->varying_count;
1904
1905 struct panfrost_transfer trans = panfrost_pool_alloc_aligned(
1906 &batch->pool, vs_size + fs_size, MALI_ATTRIBUTE_LENGTH);
1907
1908 struct pipe_stream_output_info *so = &vs->stream_output;
1909 unsigned present = pan_varying_present(vs, fs, dev->quirks);
1910
1911 /* Check if this varying is linked by us. This is the case for
1912 * general-purpose, non-captured varyings. If it is, link it. If it's
1913 * not, use the provided stream out information to determine the
1914 * offset, since it was already linked for us. */
1915
1916 unsigned gen_offsets[32];
1917 enum mali_format gen_formats[32];
1918 memset(gen_offsets, 0, sizeof(gen_offsets));
1919 memset(gen_formats, 0, sizeof(gen_formats));
1920
1921 unsigned gen_stride = 0;
1922 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
1923 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
1924
1925 unsigned streamout_offsets[32];
1926
1927 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1928 streamout_offsets[i] = panfrost_streamout_offset(
1929 so->stride[i],
1930 ctx->streamout.offsets[i],
1931 ctx->streamout.targets[i]);
1932 }
1933
1934 struct mali_attribute_packed *ovs = (struct mali_attribute_packed *)trans.cpu;
1935 struct mali_attribute_packed *ofs = ovs + vs->varying_count;
1936
1937 for (unsigned i = 0; i < vs->varying_count; i++) {
1938 panfrost_emit_varying(ovs + i, vs, fs, vs, present,
1939 ctx->streamout.num_targets, streamout_offsets,
1940 dev->quirks,
1941 gen_offsets, gen_formats, &gen_stride, i, true, false);
1942 }
1943
1944 for (unsigned i = 0; i < fs->varying_count; i++) {
1945 panfrost_emit_varying(ofs + i, fs, vs, vs, present,
1946 ctx->streamout.num_targets, streamout_offsets,
1947 dev->quirks,
1948 gen_offsets, gen_formats, &gen_stride, i, false, true);
1949 }
1950
1951 unsigned xfb_base = pan_xfb_base(present);
1952 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1953 MALI_ATTRIBUTE_BUFFER_LENGTH * (xfb_base + ctx->streamout.num_targets),
1954 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
1955 struct mali_attribute_buffer_packed *varyings =
1956 (struct mali_attribute_buffer_packed *) T.cpu;
1957
1958 /* Emit the stream out buffers */
1959
1960 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
1961 ctx->vertex_count);
1962
1963 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1964 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
1965 so->stride[i],
1966 ctx->streamout.offsets[i],
1967 out_count,
1968 ctx->streamout.targets[i]);
1969 }
1970
1971 panfrost_emit_varyings(batch,
1972 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
1973 gen_stride, vertex_count);
1974
1975 /* fp32 vec4 gl_Position */
1976 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
1977 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
1978 sizeof(float) * 4, vertex_count);
1979
1980 if (present & (1 << PAN_VARY_PSIZ)) {
1981 primitive_size->pointer = panfrost_emit_varyings(batch,
1982 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
1983 2, vertex_count);
1984 }
1985
1986 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_ATTRIBUTE_SPECIAL_POINT_COORD);
1987 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_ATTRIBUTE_SPECIAL_FRONT_FACING);
1988 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_ATTRIBUTE_SPECIAL_FRAG_COORD);
1989
1990 vertex_postfix->varyings = T.gpu;
1991 tiler_postfix->varyings = T.gpu;
1992
1993 vertex_postfix->varying_meta = trans.gpu;
1994 tiler_postfix->varying_meta = trans.gpu + vs_size;
1995 }
1996
1997 void
1998 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
1999 struct mali_vertex_tiler_prefix *vertex_prefix,
2000 struct mali_vertex_tiler_postfix *vertex_postfix,
2001 struct mali_vertex_tiler_prefix *tiler_prefix,
2002 struct mali_vertex_tiler_postfix *tiler_postfix,
2003 union midgard_primitive_size *primitive_size)
2004 {
2005 struct panfrost_context *ctx = batch->ctx;
2006 struct panfrost_device *device = pan_device(ctx->base.screen);
2007 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
2008 struct bifrost_payload_vertex bifrost_vertex = {0,};
2009 struct bifrost_payload_tiler bifrost_tiler = {0,};
2010 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2011 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2012 void *vp, *tp;
2013 size_t vp_size, tp_size;
2014
2015 if (device->quirks & IS_BIFROST) {
2016 bifrost_vertex.prefix = *vertex_prefix;
2017 bifrost_vertex.postfix = *vertex_postfix;
2018 vp = &bifrost_vertex;
2019 vp_size = sizeof(bifrost_vertex);
2020
2021 bifrost_tiler.prefix = *tiler_prefix;
2022 bifrost_tiler.tiler.primitive_size = *primitive_size;
2023 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2024 bifrost_tiler.postfix = *tiler_postfix;
2025 tp = &bifrost_tiler;
2026 tp_size = sizeof(bifrost_tiler);
2027 } else {
2028 midgard_vertex.prefix = *vertex_prefix;
2029 midgard_vertex.postfix = *vertex_postfix;
2030 vp = &midgard_vertex;
2031 vp_size = sizeof(midgard_vertex);
2032
2033 midgard_tiler.prefix = *tiler_prefix;
2034 midgard_tiler.postfix = *tiler_postfix;
2035 midgard_tiler.primitive_size = *primitive_size;
2036 tp = &midgard_tiler;
2037 tp_size = sizeof(midgard_tiler);
2038 }
2039
2040 if (wallpapering) {
2041 /* Inject in reverse order, with "predicted" job indices.
2042 * THIS IS A HACK XXX */
2043 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
2044 batch->scoreboard.job_index + 2, tp, tp_size, true);
2045 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2046 vp, vp_size, true);
2047 return;
2048 }
2049
2050 /* If rasterizer discard is enable, only submit the vertex */
2051
2052 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2053 vp, vp_size, false);
2054
2055 if (ctx->rasterizer->base.rasterizer_discard)
2056 return;
2057
2058 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
2059 false);
2060 }
2061
2062 /* TODO: stop hardcoding this */
2063 mali_ptr
2064 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2065 {
2066 uint16_t locations[] = {
2067 128, 128,
2068 0, 256,
2069 0, 256,
2070 0, 256,
2071 0, 256,
2072 0, 256,
2073 0, 256,
2074 0, 256,
2075 0, 256,
2076 0, 256,
2077 0, 256,
2078 0, 256,
2079 0, 256,
2080 0, 256,
2081 0, 256,
2082 0, 256,
2083 0, 256,
2084 0, 256,
2085 0, 256,
2086 0, 256,
2087 0, 256,
2088 0, 256,
2089 0, 256,
2090 0, 256,
2091 0, 256,
2092 0, 256,
2093 0, 256,
2094 0, 256,
2095 0, 256,
2096 0, 256,
2097 0, 256,
2098 0, 256,
2099 128, 128,
2100 0, 0,
2101 0, 0,
2102 0, 0,
2103 0, 0,
2104 0, 0,
2105 0, 0,
2106 0, 0,
2107 0, 0,
2108 0, 0,
2109 0, 0,
2110 0, 0,
2111 0, 0,
2112 0, 0,
2113 0, 0,
2114 0, 0,
2115 };
2116
2117 return panfrost_pool_upload_aligned(&batch->pool, locations, 96 * sizeof(uint16_t), 64);
2118 }