panfrost: Combine frag_shader_meta_init functions
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 struct mali_shared_memory shared = {
62 .shared_workgroup_count = ~0,
63 };
64
65 if (batch->stack_size) {
66 struct panfrost_bo *stack =
67 panfrost_batch_get_scratchpad(batch, batch->stack_size,
68 dev->thread_tls_alloc,
69 dev->core_count);
70
71 shared.stack_shift = panfrost_get_stack_shift(batch->stack_size);
72 shared.scratchpad = stack->gpu;
73 }
74
75 postfix->shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared, sizeof(shared), 64);
76 }
77
78 static void
79 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
80 struct mali_vertex_tiler_postfix *postfix)
81 {
82 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
83 postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
84 }
85
86 static void
87 panfrost_vt_update_rasterizer(struct panfrost_rasterizer *rasterizer,
88 struct mali_vertex_tiler_prefix *prefix,
89 struct mali_vertex_tiler_postfix *postfix)
90 {
91 postfix->gl_enables |= 0x7;
92 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
93 rasterizer->base.front_ccw);
94 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
95 (rasterizer->base.cull_face & PIPE_FACE_FRONT));
96 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
97 (rasterizer->base.cull_face & PIPE_FACE_BACK));
98 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
99 rasterizer->base.flatshade_first);
100 }
101
102 void
103 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
104 struct mali_vertex_tiler_prefix *prefix,
105 union midgard_primitive_size *primitive_size)
106 {
107 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
108
109 if (!panfrost_writes_point_size(ctx)) {
110 float val = (prefix->draw_mode == MALI_DRAW_MODE_POINTS) ?
111 rasterizer->base.point_size :
112 rasterizer->base.line_width;
113
114 primitive_size->constant = val;
115 }
116 }
117
118 static void
119 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
120 struct mali_vertex_tiler_postfix *postfix)
121 {
122 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
123 if (ctx->occlusion_query) {
124 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
125 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
126 PAN_BO_ACCESS_SHARED |
127 PAN_BO_ACCESS_RW |
128 PAN_BO_ACCESS_FRAGMENT);
129 } else {
130 postfix->occlusion_counter = 0;
131 }
132 }
133
134 void
135 panfrost_vt_init(struct panfrost_context *ctx,
136 enum pipe_shader_type stage,
137 struct mali_vertex_tiler_prefix *prefix,
138 struct mali_vertex_tiler_postfix *postfix)
139 {
140 struct panfrost_device *device = pan_device(ctx->base.screen);
141
142 if (!ctx->shader[stage])
143 return;
144
145 memset(prefix, 0, sizeof(*prefix));
146 memset(postfix, 0, sizeof(*postfix));
147
148 if (device->quirks & IS_BIFROST) {
149 postfix->gl_enables = 0x2;
150 panfrost_vt_emit_shared_memory(ctx, postfix);
151 } else {
152 postfix->gl_enables = 0x6;
153 panfrost_vt_attach_framebuffer(ctx, postfix);
154 }
155
156 if (stage == PIPE_SHADER_FRAGMENT) {
157 panfrost_vt_update_occlusion_query(ctx, postfix);
158 panfrost_vt_update_rasterizer(ctx->rasterizer, prefix, postfix);
159 }
160 }
161
162 static unsigned
163 panfrost_translate_index_size(unsigned size)
164 {
165 switch (size) {
166 case 1:
167 return MALI_DRAW_INDEXED_UINT8;
168
169 case 2:
170 return MALI_DRAW_INDEXED_UINT16;
171
172 case 4:
173 return MALI_DRAW_INDEXED_UINT32;
174
175 default:
176 unreachable("Invalid index size");
177 }
178 }
179
180 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
181 * good for the duration of the draw (transient), could last longer. Also get
182 * the bounds on the index buffer for the range accessed by the draw. We do
183 * these operations together because there are natural optimizations which
184 * require them to be together. */
185
186 static mali_ptr
187 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
188 const struct pipe_draw_info *info,
189 unsigned *min_index, unsigned *max_index)
190 {
191 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
192 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
193 off_t offset = info->start * info->index_size;
194 bool needs_indices = true;
195 mali_ptr out = 0;
196
197 if (info->max_index != ~0u) {
198 *min_index = info->min_index;
199 *max_index = info->max_index;
200 needs_indices = false;
201 }
202
203 if (!info->has_user_indices) {
204 /* Only resources can be directly mapped */
205 panfrost_batch_add_bo(batch, rsrc->bo,
206 PAN_BO_ACCESS_SHARED |
207 PAN_BO_ACCESS_READ |
208 PAN_BO_ACCESS_VERTEX_TILER);
209 out = rsrc->bo->gpu + offset;
210
211 /* Check the cache */
212 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
213 info->start,
214 info->count,
215 min_index,
216 max_index);
217 } else {
218 /* Otherwise, we need to upload to transient memory */
219 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
220 struct panfrost_transfer T =
221 panfrost_pool_alloc_aligned(&batch->pool,
222 info->count * info->index_size,
223 info->index_size);
224
225 memcpy(T.cpu, ibuf8 + offset, info->count * info->index_size);
226 out = T.gpu;
227 }
228
229 if (needs_indices) {
230 /* Fallback */
231 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
232
233 if (!info->has_user_indices)
234 panfrost_minmax_cache_add(rsrc->index_cache,
235 info->start, info->count,
236 *min_index, *max_index);
237 }
238
239 return out;
240 }
241
242 void
243 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
244 const struct pipe_draw_info *info,
245 enum mali_draw_mode draw_mode,
246 struct mali_vertex_tiler_postfix *vertex_postfix,
247 struct mali_vertex_tiler_prefix *tiler_prefix,
248 struct mali_vertex_tiler_postfix *tiler_postfix,
249 unsigned *vertex_count,
250 unsigned *padded_count)
251 {
252 tiler_prefix->draw_mode = draw_mode;
253
254 unsigned draw_flags = 0;
255
256 if (panfrost_writes_point_size(ctx))
257 draw_flags |= MALI_DRAW_VARYING_SIZE;
258
259 if (info->primitive_restart)
260 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
261
262 /* These doesn't make much sense */
263
264 draw_flags |= 0x3000;
265
266 if (info->index_size) {
267 unsigned min_index = 0, max_index = 0;
268
269 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
270 info,
271 &min_index,
272 &max_index);
273
274 /* Use the corresponding values */
275 *vertex_count = max_index - min_index + 1;
276 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
277 tiler_prefix->offset_bias_correction = -min_index;
278 tiler_prefix->index_count = MALI_POSITIVE(info->count);
279 draw_flags |= panfrost_translate_index_size(info->index_size);
280 } else {
281 tiler_prefix->indices = 0;
282 *vertex_count = ctx->vertex_count;
283 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
284 tiler_prefix->offset_bias_correction = 0;
285 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
286 }
287
288 tiler_prefix->unknown_draw = draw_flags;
289
290 /* Encode the padded vertex count */
291
292 if (info->instance_count > 1) {
293 *padded_count = panfrost_padded_vertex_count(*vertex_count);
294
295 unsigned shift = __builtin_ctz(ctx->padded_count);
296 unsigned k = ctx->padded_count >> (shift + 1);
297
298 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
299 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
300 } else {
301 *padded_count = *vertex_count;
302
303 /* Reset instancing state */
304 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
305 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
306 }
307 }
308
309 static void
310 panfrost_shader_meta_init(struct panfrost_context *ctx,
311 enum pipe_shader_type st,
312 struct mali_shader_meta *meta)
313 {
314 const struct panfrost_device *dev = pan_device(ctx->base.screen);
315 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
316
317 memset(meta, 0, sizeof(*meta));
318 meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
319 meta->attribute_count = ss->attribute_count;
320 meta->varying_count = ss->varying_count;
321 meta->texture_count = ctx->sampler_view_count[st];
322 meta->sampler_count = ctx->sampler_count[st];
323
324 if (dev->quirks & IS_BIFROST) {
325 if (st == PIPE_SHADER_VERTEX)
326 meta->bifrost1.unk1 = 0x800000;
327 else {
328 /* First clause ATEST |= 0x4000000.
329 * Less than 32 regs |= 0x200 */
330 meta->bifrost1.unk1 = 0x950020;
331 }
332
333 meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
334 if (st == PIPE_SHADER_VERTEX)
335 meta->bifrost2.preload_regs = 0xC0;
336 else {
337 meta->bifrost2.preload_regs = 0x1;
338 SET_BIT(meta->bifrost2.preload_regs, 0x10, ss->reads_frag_coord);
339 }
340
341 meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
342 ss->uniform_cutoff);
343 } else {
344 meta->midgard1.uniform_count = MIN2(ss->uniform_count,
345 ss->uniform_cutoff);
346 meta->midgard1.work_count = ss->work_reg_count;
347
348 /* TODO: This is not conformant on ES3 */
349 meta->midgard1.flags_hi = MALI_SUPPRESS_INF_NAN;
350
351 meta->midgard1.flags_lo = 0x20;
352 meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
353
354 SET_BIT(meta->midgard1.flags_lo, MALI_WRITES_GLOBAL, ss->writes_global);
355 }
356 }
357
358 static unsigned
359 translate_tex_wrap(enum pipe_tex_wrap w)
360 {
361 switch (w) {
362 case PIPE_TEX_WRAP_REPEAT: return MALI_WRAP_MODE_REPEAT;
363 case PIPE_TEX_WRAP_CLAMP: return MALI_WRAP_MODE_CLAMP;
364 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return MALI_WRAP_MODE_CLAMP_TO_EDGE;
365 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return MALI_WRAP_MODE_CLAMP_TO_BORDER;
366 case PIPE_TEX_WRAP_MIRROR_REPEAT: return MALI_WRAP_MODE_MIRRORED_REPEAT;
367 case PIPE_TEX_WRAP_MIRROR_CLAMP: return MALI_WRAP_MODE_MIRRORED_CLAMP;
368 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
369 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
370 default: unreachable("Invalid wrap");
371 }
372 }
373
374 /* The hardware compares in the wrong order order, so we have to flip before
375 * encoding. Yes, really. */
376
377 static enum mali_func
378 panfrost_sampler_compare_func(const struct pipe_sampler_state *cso)
379 {
380 if (!cso->compare_mode)
381 return MALI_FUNC_NEVER;
382
383 enum mali_func f = panfrost_translate_compare_func(cso->compare_func);
384 return panfrost_flip_compare_func(f);
385 }
386
387 static enum mali_mipmap_mode
388 pan_pipe_to_mipmode(enum pipe_tex_mipfilter f)
389 {
390 switch (f) {
391 case PIPE_TEX_MIPFILTER_NEAREST: return MALI_MIPMAP_MODE_NEAREST;
392 case PIPE_TEX_MIPFILTER_LINEAR: return MALI_MIPMAP_MODE_TRILINEAR;
393 case PIPE_TEX_MIPFILTER_NONE: return MALI_MIPMAP_MODE_NONE;
394 default: unreachable("Invalid");
395 }
396 }
397
398 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
399 struct mali_midgard_sampler_packed *hw)
400 {
401 pan_pack(hw, MIDGARD_SAMPLER, cfg) {
402 cfg.magnify_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
403 cfg.minify_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
404 cfg.mipmap_mode = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR) ?
405 MALI_MIPMAP_MODE_TRILINEAR : MALI_MIPMAP_MODE_NEAREST;
406 cfg.normalized_coordinates = cso->normalized_coords;
407
408 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
409
410 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
411
412 /* If necessary, we disable mipmapping in the sampler descriptor by
413 * clamping the LOD as tight as possible (from 0 to epsilon,
414 * essentially -- remember these are fixed point numbers, so
415 * epsilon=1/256) */
416
417 cfg.maximum_lod = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) ?
418 cfg.minimum_lod + 1 :
419 FIXED_16(cso->max_lod, false);
420
421 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
422 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
423 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
424
425 cfg.compare_function = panfrost_sampler_compare_func(cso);
426 cfg.seamless_cube_map = cso->seamless_cube_map;
427
428 cfg.border_color_r = cso->border_color.f[0];
429 cfg.border_color_g = cso->border_color.f[1];
430 cfg.border_color_b = cso->border_color.f[2];
431 cfg.border_color_a = cso->border_color.f[3];
432 }
433 }
434
435 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
436 struct mali_bifrost_sampler_packed *hw)
437 {
438 pan_pack(hw, BIFROST_SAMPLER, cfg) {
439 cfg.magnify_linear = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR;
440 cfg.minify_linear = cso->min_img_filter == PIPE_TEX_FILTER_LINEAR;
441 cfg.mipmap_mode = pan_pipe_to_mipmode(cso->min_mip_filter);
442 cfg.normalized_coordinates = cso->normalized_coords;
443
444 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
445 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
446 cfg.maximum_lod = FIXED_16(cso->max_lod, false);
447
448 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
449 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
450 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
451
452 cfg.compare_function = panfrost_sampler_compare_func(cso);
453 cfg.seamless_cube_map = cso->seamless_cube_map;
454 }
455 }
456
457 static bool
458 panfrost_fs_required(
459 struct panfrost_shader_state *fs,
460 struct panfrost_blend_final *blend,
461 unsigned rt_count)
462 {
463 /* If we generally have side effects */
464 if (fs->fs_sidefx)
465 return true;
466
467 /* If colour is written we need to execute */
468 for (unsigned i = 0; i < rt_count; ++i) {
469 if (!blend[i].no_colour)
470 return true;
471 }
472
473 /* If depth is written and not implied we need to execute.
474 * TODO: Predicate on Z/S writes being enabled */
475 return (fs->writes_depth || fs->writes_stencil);
476 }
477
478 static void
479 panfrost_emit_blend(struct panfrost_batch *batch, void *rts,
480 struct panfrost_blend_final *blend)
481 {
482 const struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
483 struct panfrost_shader_state *fs = panfrost_get_shader_state(batch->ctx, PIPE_SHADER_FRAGMENT);
484 unsigned rt_count = batch->key.nr_cbufs;
485
486 struct bifrost_blend_rt *brts = rts;
487 struct midgard_blend_rt *mrts = rts;
488
489 /* Disable blending for depth-only on Bifrost */
490
491 if (rt_count == 0 && dev->quirks & IS_BIFROST)
492 brts[0].unk2 = 0x3;
493
494 for (unsigned i = 0; i < rt_count; ++i) {
495 unsigned flags = 0;
496
497 pan_pack(&flags, BLEND_FLAGS, cfg) {
498 if (blend[i].no_colour) {
499 cfg.enable = false;
500 break;
501 }
502
503 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
504
505 cfg.srgb = util_format_is_srgb(batch->key.cbufs[i]->format);
506 cfg.load_destination = blend[i].load_dest;
507 cfg.dither_disable = !batch->ctx->blend->base.dither;
508
509 if (!(dev->quirks & IS_BIFROST))
510 cfg.midgard_blend_shader = blend[i].is_shader;
511 }
512
513 if (dev->quirks & IS_BIFROST) {
514 brts[i].flags = flags;
515
516 if (blend[i].is_shader) {
517 /* The blend shader's address needs to be at
518 * the same top 32 bit as the fragment shader.
519 * TODO: Ensure that's always the case.
520 */
521 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
522 (fs->bo->gpu & (0xffffffffull << 32)));
523 brts[i].shader = blend[i].shader.gpu;
524 brts[i].unk2 = 0x0;
525 } else {
526 enum pipe_format format = batch->key.cbufs[i]->format;
527 const struct util_format_description *format_desc;
528 format_desc = util_format_description(format);
529
530 brts[i].equation = blend[i].equation.equation;
531
532 /* TODO: this is a bit more complicated */
533 brts[i].constant = blend[i].equation.constant;
534
535 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
536
537 /* 0x19 disables blending and forces REPLACE
538 * mode (equivalent to rgb_mode = alpha_mode =
539 * x122, colour mask = 0xF). 0x1a allows
540 * blending. */
541 brts[i].unk2 = blend[i].opaque ? 0x19 : 0x1a;
542
543 brts[i].shader_type = fs->blend_types[i];
544 }
545 } else {
546 memcpy(&mrts[i].flags, &flags, sizeof(flags));
547
548 if (blend[i].is_shader) {
549 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
550 } else {
551 mrts[i].blend.equation = blend[i].equation.equation;
552 mrts[i].blend.constant = blend[i].equation.constant;
553 }
554 }
555 }
556 }
557
558 static void
559 panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
560 struct mali_shader_meta *fragmeta,
561 struct panfrost_blend_final *blend)
562 {
563 const struct panfrost_device *dev = pan_device(ctx->base.screen);
564 struct panfrost_shader_state *fs;
565
566 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
567
568 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
569 const struct panfrost_zsa_state *zsa = ctx->depth_stencil;
570
571 bool msaa = rast->multisample;
572 fragmeta->coverage_mask = msaa ? ctx->sample_mask : ~0;
573
574 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
575 fragmeta->unknown2_4 = 0x4e0;
576
577 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
578 * is required (independent of 32-bit/64-bit descriptors), or why it's
579 * not used on later GPU revisions. Otherwise, all shader jobs fault on
580 * these earlier chips (perhaps this is a chicken bit of some kind).
581 * More investigation is needed. */
582
583 SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
584
585 if (dev->quirks & IS_BIFROST) {
586 /* TODO */
587 } else {
588 /* Depending on whether it's legal to in the given shader, we try to
589 * enable early-z testing. TODO: respect e-z force */
590
591 SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
592 !fs->can_discard && !fs->writes_global &&
593 !fs->writes_depth && !fs->writes_stencil &&
594 !ctx->blend->base.alpha_to_coverage);
595
596 /* Add the writes Z/S flags if needed. */
597 SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
598 SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
599
600 /* Any time texturing is used, derivatives are implicitly calculated,
601 * so we need to enable helper invocations */
602
603 SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
604 fs->helper_invocations);
605
606 /* If discard is enabled, which bit we set to convey this
607 * depends on if depth/stencil is used for the draw or not.
608 * Just one of depth OR stencil is enough to trigger this. */
609
610 bool zs_enabled =
611 fs->writes_depth || fs->writes_stencil ||
612 (zsa->base.depth.enabled && zsa->base.depth.func != PIPE_FUNC_ALWAYS) ||
613 zsa->base.stencil[0].enabled;
614
615 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_TILEBUFFER,
616 fs->outputs_read || (!zs_enabled && fs->can_discard));
617 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_ZS, zs_enabled && fs->can_discard);
618 }
619
620 /* TODO: Sample size */
621 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
622 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
623
624 /* EXT_shader_framebuffer_fetch requires the shader to be run
625 * per-sample when outputs are read. */
626 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
627 SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
628
629 fragmeta->depth_units = rast->offset_units * 2.0f;
630 fragmeta->depth_factor = rast->offset_scale;
631
632 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
633
634 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
635 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
636
637 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
638 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
639
640 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
641 zsa->base.stencil[0].enabled);
642
643 fragmeta->stencil_mask_front = zsa->stencil_mask_front;
644 fragmeta->stencil_mask_back = zsa->stencil_mask_back;
645
646 /* Bottom bits for stencil ref, exactly one word */
647 fragmeta->stencil_front.opaque[0] = zsa->stencil_front.opaque[0] | ctx->stencil_ref.ref_value[0];
648
649 /* If back-stencil is not enabled, use the front values */
650
651 if (zsa->base.stencil[1].enabled)
652 fragmeta->stencil_back.opaque[0] = zsa->stencil_back.opaque[0] | ctx->stencil_ref.ref_value[1];
653 else
654 fragmeta->stencil_back = fragmeta->stencil_front;
655
656 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
657 zsa->base.depth.writemask);
658
659 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
660 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(
661 zsa->base.depth.enabled ? zsa->base.depth.func : PIPE_FUNC_ALWAYS));
662
663 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
664 (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
665 !ctx->blend->base.dither);
666
667 SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
668 ctx->blend->base.alpha_to_coverage);
669
670 /* Get blending setup */
671 unsigned rt_count = ctx->pipe_framebuffer.nr_cbufs;
672
673 /* Disable shader execution if we can */
674 if (dev->quirks & MIDGARD_SHADERLESS
675 && !panfrost_fs_required(fs, blend, rt_count)) {
676 fragmeta->shader = 0;
677 fragmeta->attribute_count = 0;
678 fragmeta->varying_count = 0;
679 fragmeta->texture_count = 0;
680 fragmeta->sampler_count = 0;
681
682 /* This feature is not known to work on Bifrost */
683 fragmeta->midgard1.work_count = 1;
684 fragmeta->midgard1.uniform_count = 0;
685 fragmeta->midgard1.uniform_buffer_count = 0;
686 }
687
688 /* If there is a blend shader, work registers are shared. We impose 8
689 * work registers as a limit for blend shaders. Should be lower XXX */
690
691 if (!(dev->quirks & IS_BIFROST)) {
692 for (unsigned c = 0; c < rt_count; ++c) {
693 if (blend[c].is_shader) {
694 fragmeta->midgard1.work_count =
695 MAX2(fragmeta->midgard1.work_count, 8);
696 }
697 }
698 }
699
700 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
701 * copied to the blend_meta appended (by convention), but this is the
702 * field actually read by the hardware. (Or maybe both are read...?).
703 * Specify the last RTi with a blend shader. */
704
705 fragmeta->blend.shader = 0;
706
707 for (signed rt = ((signed) rt_count - 1); rt >= 0; --rt) {
708 if (!blend[rt].is_shader)
709 continue;
710
711 fragmeta->blend.shader = blend[rt].shader.gpu |
712 blend[rt].shader.first_tag;
713 break;
714 }
715
716 if (dev->quirks & MIDGARD_SFBD) {
717 /* When only a single render target platform is used, the blend
718 * information is inside the shader meta itself. We additionally
719 * need to signal CAN_DISCARD for nontrivial blend modes (so
720 * we're able to read back the destination buffer) */
721
722 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
723 blend[0].is_shader);
724
725 if (!blend[0].is_shader) {
726 fragmeta->blend.equation = blend[0].equation.equation;
727 fragmeta->blend.constant = blend[0].equation.constant;
728 }
729
730 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
731 blend[0].load_dest);
732 }
733
734 if (dev->quirks & IS_BIFROST) {
735 bool no_blend = true;
736
737 for (unsigned i = 0; i < rt_count; ++i)
738 no_blend &= (!blend[i].load_dest | blend[i].no_colour);
739
740 SET_BIT(fragmeta->bifrost1.unk1, MALI_BIFROST_EARLY_Z,
741 !fs->can_discard && !fs->writes_depth && no_blend);
742 }
743 }
744
745 void
746 panfrost_emit_shader_meta(struct panfrost_batch *batch,
747 enum pipe_shader_type st,
748 struct mali_vertex_tiler_postfix *postfix)
749 {
750 struct panfrost_context *ctx = batch->ctx;
751 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
752
753 if (!ss) {
754 postfix->shader = 0;
755 return;
756 }
757
758 struct mali_shader_meta meta;
759
760 panfrost_shader_meta_init(ctx, st, &meta);
761
762 /* Add the shader BO to the batch. */
763 panfrost_batch_add_bo(batch, ss->bo,
764 PAN_BO_ACCESS_PRIVATE |
765 PAN_BO_ACCESS_READ |
766 panfrost_bo_access_for_stage(st));
767
768 mali_ptr shader_ptr;
769
770 if (st == PIPE_SHADER_FRAGMENT) {
771 struct panfrost_device *dev = pan_device(ctx->base.screen);
772 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
773 size_t desc_size = sizeof(meta);
774 void *rts = NULL;
775 struct panfrost_transfer xfer;
776 unsigned rt_size;
777
778 if (dev->quirks & MIDGARD_SFBD)
779 rt_size = 0;
780 else if (dev->quirks & IS_BIFROST)
781 rt_size = sizeof(struct bifrost_blend_rt);
782 else
783 rt_size = sizeof(struct midgard_blend_rt);
784
785 desc_size += rt_size * rt_count;
786
787 if (rt_size)
788 rts = rzalloc_size(ctx, rt_size * rt_count);
789
790 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
791
792 for (unsigned c = 0; c < ctx->pipe_framebuffer.nr_cbufs; ++c)
793 blend[c] = panfrost_get_blend_for_context(ctx, c);
794
795 panfrost_frag_shader_meta_init(ctx, &meta, blend);
796
797 if (!(dev->quirks & MIDGARD_SFBD))
798 panfrost_emit_blend(batch, rts, blend);
799 else
800 batch->draws |= PIPE_CLEAR_COLOR0;
801
802 xfer = panfrost_pool_alloc_aligned(&batch->pool, desc_size, sizeof(meta));
803
804 memcpy(xfer.cpu, &meta, sizeof(meta));
805 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
806
807 if (rt_size)
808 ralloc_free(rts);
809
810 shader_ptr = xfer.gpu;
811 } else {
812 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
813 sizeof(meta));
814 }
815
816 postfix->shader = shader_ptr;
817 }
818
819 void
820 panfrost_emit_viewport(struct panfrost_batch *batch,
821 struct mali_vertex_tiler_postfix *tiler_postfix)
822 {
823 struct panfrost_context *ctx = batch->ctx;
824 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
825 const struct pipe_scissor_state *ss = &ctx->scissor;
826 const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
827 const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
828
829 /* Derive min/max from translate/scale. Note since |x| >= 0 by
830 * definition, we have that -|x| <= |x| hence translate - |scale| <=
831 * translate + |scale|, so the ordering is correct here. */
832 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
833 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
834 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
835 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
836 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
837 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
838
839 /* Scissor to the intersection of viewport and to the scissor, clamped
840 * to the framebuffer */
841
842 unsigned minx = MIN2(fb->width, vp_minx);
843 unsigned maxx = MIN2(fb->width, vp_maxx);
844 unsigned miny = MIN2(fb->height, vp_miny);
845 unsigned maxy = MIN2(fb->height, vp_maxy);
846
847 if (ss && rast->scissor) {
848 minx = MAX2(ss->minx, minx);
849 miny = MAX2(ss->miny, miny);
850 maxx = MIN2(ss->maxx, maxx);
851 maxy = MIN2(ss->maxy, maxy);
852 }
853
854 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
855
856 pan_pack(T.cpu, VIEWPORT, cfg) {
857 cfg.scissor_minimum_x = minx;
858 cfg.scissor_minimum_y = miny;
859 cfg.scissor_maximum_x = maxx - 1;
860 cfg.scissor_maximum_y = maxy - 1;
861
862 cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
863 cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
864 }
865
866 tiler_postfix->viewport = T.gpu;
867 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
868 }
869
870 static mali_ptr
871 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
872 enum pipe_shader_type st,
873 struct panfrost_constant_buffer *buf,
874 unsigned index)
875 {
876 struct pipe_constant_buffer *cb = &buf->cb[index];
877 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
878
879 if (rsrc) {
880 panfrost_batch_add_bo(batch, rsrc->bo,
881 PAN_BO_ACCESS_SHARED |
882 PAN_BO_ACCESS_READ |
883 panfrost_bo_access_for_stage(st));
884
885 /* Alignment gauranteed by
886 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
887 return rsrc->bo->gpu + cb->buffer_offset;
888 } else if (cb->user_buffer) {
889 return panfrost_pool_upload_aligned(&batch->pool,
890 cb->user_buffer +
891 cb->buffer_offset,
892 cb->buffer_size, 16);
893 } else {
894 unreachable("No constant buffer");
895 }
896 }
897
898 struct sysval_uniform {
899 union {
900 float f[4];
901 int32_t i[4];
902 uint32_t u[4];
903 uint64_t du[2];
904 };
905 };
906
907 static void
908 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
909 struct sysval_uniform *uniform)
910 {
911 struct panfrost_context *ctx = batch->ctx;
912 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
913
914 uniform->f[0] = vp->scale[0];
915 uniform->f[1] = vp->scale[1];
916 uniform->f[2] = vp->scale[2];
917 }
918
919 static void
920 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
921 struct sysval_uniform *uniform)
922 {
923 struct panfrost_context *ctx = batch->ctx;
924 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
925
926 uniform->f[0] = vp->translate[0];
927 uniform->f[1] = vp->translate[1];
928 uniform->f[2] = vp->translate[2];
929 }
930
931 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
932 enum pipe_shader_type st,
933 unsigned int sysvalid,
934 struct sysval_uniform *uniform)
935 {
936 struct panfrost_context *ctx = batch->ctx;
937 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
938 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
939 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
940 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
941
942 assert(dim);
943 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
944
945 if (dim > 1)
946 uniform->i[1] = u_minify(tex->texture->height0,
947 tex->u.tex.first_level);
948
949 if (dim > 2)
950 uniform->i[2] = u_minify(tex->texture->depth0,
951 tex->u.tex.first_level);
952
953 if (is_array)
954 uniform->i[dim] = tex->texture->array_size;
955 }
956
957 static void
958 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
959 enum pipe_shader_type st,
960 unsigned ssbo_id,
961 struct sysval_uniform *uniform)
962 {
963 struct panfrost_context *ctx = batch->ctx;
964
965 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
966 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
967
968 /* Compute address */
969 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
970
971 panfrost_batch_add_bo(batch, bo,
972 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
973 panfrost_bo_access_for_stage(st));
974
975 /* Upload address and size as sysval */
976 uniform->du[0] = bo->gpu + sb.buffer_offset;
977 uniform->u[2] = sb.buffer_size;
978 }
979
980 static void
981 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
982 enum pipe_shader_type st,
983 unsigned samp_idx,
984 struct sysval_uniform *uniform)
985 {
986 struct panfrost_context *ctx = batch->ctx;
987 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
988
989 uniform->f[0] = sampl->min_lod;
990 uniform->f[1] = sampl->max_lod;
991 uniform->f[2] = sampl->lod_bias;
992
993 /* Even without any errata, Midgard represents "no mipmapping" as
994 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
995 * panfrost_create_sampler_state which also explains our choice of
996 * epsilon value (again to keep behaviour consistent) */
997
998 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
999 uniform->f[1] = uniform->f[0] + (1.0/256.0);
1000 }
1001
1002 static void
1003 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
1004 struct sysval_uniform *uniform)
1005 {
1006 struct panfrost_context *ctx = batch->ctx;
1007
1008 uniform->u[0] = ctx->compute_grid->grid[0];
1009 uniform->u[1] = ctx->compute_grid->grid[1];
1010 uniform->u[2] = ctx->compute_grid->grid[2];
1011 }
1012
1013 static void
1014 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1015 struct panfrost_shader_state *ss,
1016 enum pipe_shader_type st)
1017 {
1018 struct sysval_uniform *uniforms = (void *)buf;
1019
1020 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1021 int sysval = ss->sysval[i];
1022
1023 switch (PAN_SYSVAL_TYPE(sysval)) {
1024 case PAN_SYSVAL_VIEWPORT_SCALE:
1025 panfrost_upload_viewport_scale_sysval(batch,
1026 &uniforms[i]);
1027 break;
1028 case PAN_SYSVAL_VIEWPORT_OFFSET:
1029 panfrost_upload_viewport_offset_sysval(batch,
1030 &uniforms[i]);
1031 break;
1032 case PAN_SYSVAL_TEXTURE_SIZE:
1033 panfrost_upload_txs_sysval(batch, st,
1034 PAN_SYSVAL_ID(sysval),
1035 &uniforms[i]);
1036 break;
1037 case PAN_SYSVAL_SSBO:
1038 panfrost_upload_ssbo_sysval(batch, st,
1039 PAN_SYSVAL_ID(sysval),
1040 &uniforms[i]);
1041 break;
1042 case PAN_SYSVAL_NUM_WORK_GROUPS:
1043 panfrost_upload_num_work_groups_sysval(batch,
1044 &uniforms[i]);
1045 break;
1046 case PAN_SYSVAL_SAMPLER:
1047 panfrost_upload_sampler_sysval(batch, st,
1048 PAN_SYSVAL_ID(sysval),
1049 &uniforms[i]);
1050 break;
1051 default:
1052 assert(0);
1053 }
1054 }
1055 }
1056
1057 static const void *
1058 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1059 unsigned index)
1060 {
1061 struct pipe_constant_buffer *cb = &buf->cb[index];
1062 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1063
1064 if (rsrc)
1065 return rsrc->bo->cpu;
1066 else if (cb->user_buffer)
1067 return cb->user_buffer;
1068 else
1069 unreachable("No constant buffer");
1070 }
1071
1072 void
1073 panfrost_emit_const_buf(struct panfrost_batch *batch,
1074 enum pipe_shader_type stage,
1075 struct mali_vertex_tiler_postfix *postfix)
1076 {
1077 struct panfrost_context *ctx = batch->ctx;
1078 struct panfrost_shader_variants *all = ctx->shader[stage];
1079
1080 if (!all)
1081 return;
1082
1083 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1084
1085 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1086
1087 /* Uniforms are implicitly UBO #0 */
1088 bool has_uniforms = buf->enabled_mask & (1 << 0);
1089
1090 /* Allocate room for the sysval and the uniforms */
1091 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1092 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1093 size_t size = sys_size + uniform_size;
1094 struct panfrost_transfer transfer =
1095 panfrost_pool_alloc_aligned(&batch->pool, size, 16);
1096
1097 /* Upload sysvals requested by the shader */
1098 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1099
1100 /* Upload uniforms */
1101 if (has_uniforms && uniform_size) {
1102 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1103 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1104 }
1105
1106 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1107 * uploaded */
1108
1109 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1110 assert(ubo_count >= 1);
1111
1112 size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
1113 struct panfrost_transfer ubos =
1114 panfrost_pool_alloc_aligned(&batch->pool, sz,
1115 MALI_UNIFORM_BUFFER_LENGTH);
1116
1117 uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
1118
1119 /* Upload uniforms as a UBO */
1120
1121 if (ss->uniform_count) {
1122 pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
1123 cfg.entries = ss->uniform_count;
1124 cfg.pointer = transfer.gpu;
1125 }
1126 } else {
1127 *ubo_ptr = 0;
1128 }
1129
1130 /* The rest are honest-to-goodness UBOs */
1131
1132 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1133 size_t usz = buf->cb[ubo].buffer_size;
1134 bool enabled = buf->enabled_mask & (1 << ubo);
1135 bool empty = usz == 0;
1136
1137 if (!enabled || empty) {
1138 ubo_ptr[ubo] = 0;
1139 continue;
1140 }
1141
1142 pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
1143 cfg.entries = DIV_ROUND_UP(usz, 16);
1144 cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
1145 stage, buf, ubo);
1146 }
1147 }
1148
1149 postfix->uniforms = transfer.gpu;
1150 postfix->uniform_buffers = ubos.gpu;
1151
1152 buf->dirty_mask = 0;
1153 }
1154
1155 void
1156 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1157 const struct pipe_grid_info *info,
1158 struct midgard_payload_vertex_tiler *vtp)
1159 {
1160 struct panfrost_context *ctx = batch->ctx;
1161 struct panfrost_device *dev = pan_device(ctx->base.screen);
1162 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1163 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1164 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1165 128));
1166
1167 unsigned log2_instances =
1168 util_logbase2_ceil(info->grid[0]) +
1169 util_logbase2_ceil(info->grid[1]) +
1170 util_logbase2_ceil(info->grid[2]);
1171
1172 unsigned shared_size = single_size * (1 << log2_instances) * dev->core_count;
1173 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1174 shared_size,
1175 1);
1176
1177 struct mali_shared_memory shared = {
1178 .shared_memory = bo->gpu,
1179 .shared_workgroup_count = log2_instances,
1180 .shared_shift = util_logbase2(single_size) + 1
1181 };
1182
1183 vtp->postfix.shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared,
1184 sizeof(shared), 64);
1185 }
1186
1187 static mali_ptr
1188 panfrost_get_tex_desc(struct panfrost_batch *batch,
1189 enum pipe_shader_type st,
1190 struct panfrost_sampler_view *view)
1191 {
1192 if (!view)
1193 return (mali_ptr) 0;
1194
1195 struct pipe_sampler_view *pview = &view->base;
1196 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1197
1198 /* Add the BO to the job so it's retained until the job is done. */
1199
1200 panfrost_batch_add_bo(batch, rsrc->bo,
1201 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1202 panfrost_bo_access_for_stage(st));
1203
1204 panfrost_batch_add_bo(batch, view->bo,
1205 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1206 panfrost_bo_access_for_stage(st));
1207
1208 return view->bo->gpu;
1209 }
1210
1211 static void
1212 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1213 struct pipe_context *pctx)
1214 {
1215 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1216 if (view->texture_bo != rsrc->bo->gpu ||
1217 view->modifier != rsrc->modifier) {
1218 panfrost_bo_unreference(view->bo);
1219 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1220 }
1221 }
1222
1223 void
1224 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1225 enum pipe_shader_type stage,
1226 struct mali_vertex_tiler_postfix *postfix)
1227 {
1228 struct panfrost_context *ctx = batch->ctx;
1229 struct panfrost_device *device = pan_device(ctx->base.screen);
1230
1231 if (!ctx->sampler_view_count[stage])
1232 return;
1233
1234 if (device->quirks & IS_BIFROST) {
1235 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1236 MALI_BIFROST_TEXTURE_LENGTH *
1237 ctx->sampler_view_count[stage],
1238 MALI_BIFROST_TEXTURE_LENGTH);
1239
1240 struct mali_bifrost_texture_packed *out =
1241 (struct mali_bifrost_texture_packed *) T.cpu;
1242
1243 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1244 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1245 struct pipe_sampler_view *pview = &view->base;
1246 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1247
1248 panfrost_update_sampler_view(view, &ctx->base);
1249 out[i] = view->bifrost_descriptor;
1250
1251 /* Add the BOs to the job so they are retained until the job is done. */
1252
1253 panfrost_batch_add_bo(batch, rsrc->bo,
1254 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1255 panfrost_bo_access_for_stage(stage));
1256
1257 panfrost_batch_add_bo(batch, view->bo,
1258 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1259 panfrost_bo_access_for_stage(stage));
1260 }
1261
1262 postfix->textures = T.gpu;
1263 } else {
1264 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1265
1266 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1267 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1268
1269 panfrost_update_sampler_view(view, &ctx->base);
1270
1271 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1272 }
1273
1274 postfix->textures = panfrost_pool_upload_aligned(&batch->pool,
1275 trampolines,
1276 sizeof(uint64_t) *
1277 ctx->sampler_view_count[stage],
1278 sizeof(uint64_t));
1279 }
1280 }
1281
1282 void
1283 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1284 enum pipe_shader_type stage,
1285 struct mali_vertex_tiler_postfix *postfix)
1286 {
1287 struct panfrost_context *ctx = batch->ctx;
1288
1289 if (!ctx->sampler_count[stage])
1290 return;
1291
1292 size_t desc_size = MALI_BIFROST_SAMPLER_LENGTH;
1293 assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
1294
1295 size_t sz = desc_size * ctx->sampler_count[stage];
1296 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool, sz, desc_size);
1297 struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
1298
1299 for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
1300 out[i] = ctx->samplers[stage][i]->hw;
1301
1302 postfix->sampler_descriptor = T.gpu;
1303 }
1304
1305 void
1306 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1307 struct mali_vertex_tiler_postfix *vertex_postfix)
1308 {
1309 struct panfrost_context *ctx = batch->ctx;
1310 struct panfrost_vertex_state *so = ctx->vertex;
1311 struct panfrost_shader_state *vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1312
1313 unsigned instance_shift = vertex_postfix->instance_shift;
1314 unsigned instance_odd = vertex_postfix->instance_odd;
1315
1316 /* Worst case: everything is NPOT, which is only possible if instancing
1317 * is enabled. Otherwise single record is gauranteed */
1318 bool could_npot = instance_shift || instance_odd;
1319
1320 struct panfrost_transfer S = panfrost_pool_alloc_aligned(&batch->pool,
1321 MALI_ATTRIBUTE_BUFFER_LENGTH * vs->attribute_count *
1322 (could_npot ? 2 : 1),
1323 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
1324
1325 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1326 MALI_ATTRIBUTE_LENGTH * vs->attribute_count,
1327 MALI_ATTRIBUTE_LENGTH);
1328
1329 struct mali_attribute_buffer_packed *bufs =
1330 (struct mali_attribute_buffer_packed *) S.cpu;
1331
1332 struct mali_attribute_packed *out =
1333 (struct mali_attribute_packed *) T.cpu;
1334
1335 unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
1336 unsigned k = 0;
1337
1338 for (unsigned i = 0; i < so->num_elements; ++i) {
1339 /* We map buffers 1:1 with the attributes, which
1340 * means duplicating some vertex buffers (who cares? aside from
1341 * maybe some caching implications but I somehow doubt that
1342 * matters) */
1343
1344 struct pipe_vertex_element *elem = &so->pipe[i];
1345 unsigned vbi = elem->vertex_buffer_index;
1346 attrib_to_buffer[i] = k;
1347
1348 if (!(ctx->vb_mask & (1 << vbi)))
1349 continue;
1350
1351 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1352 struct panfrost_resource *rsrc;
1353
1354 rsrc = pan_resource(buf->buffer.resource);
1355 if (!rsrc)
1356 continue;
1357
1358 /* Add a dependency of the batch on the vertex buffer */
1359 panfrost_batch_add_bo(batch, rsrc->bo,
1360 PAN_BO_ACCESS_SHARED |
1361 PAN_BO_ACCESS_READ |
1362 PAN_BO_ACCESS_VERTEX_TILER);
1363
1364 /* Mask off lower bits, see offset fixup below */
1365 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1366 mali_ptr addr = raw_addr & ~63;
1367
1368 /* Since we advanced the base pointer, we shrink the buffer
1369 * size, but add the offset we subtracted */
1370 unsigned size = rsrc->base.width0 + (raw_addr - addr)
1371 - buf->buffer_offset;
1372
1373 /* When there is a divisor, the hardware-level divisor is
1374 * the product of the instance divisor and the padded count */
1375 unsigned divisor = elem->instance_divisor;
1376 unsigned hw_divisor = ctx->padded_count * divisor;
1377 unsigned stride = buf->stride;
1378
1379 /* If there's a divisor(=1) but no instancing, we want every
1380 * attribute to be the same */
1381
1382 if (divisor && ctx->instance_count == 1)
1383 stride = 0;
1384
1385 if (!divisor || ctx->instance_count <= 1) {
1386 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1387 if (ctx->instance_count > 1)
1388 cfg.type = MALI_ATTRIBUTE_TYPE_1D_MODULUS;
1389
1390 cfg.pointer = addr;
1391 cfg.stride = stride;
1392 cfg.size = size;
1393 cfg.divisor_r = instance_shift;
1394 cfg.divisor_p = instance_odd;
1395 }
1396 } else if (util_is_power_of_two_or_zero(hw_divisor)) {
1397 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1398 cfg.type = MALI_ATTRIBUTE_TYPE_1D_POT_DIVISOR;
1399 cfg.pointer = addr;
1400 cfg.stride = stride;
1401 cfg.size = size;
1402 cfg.divisor_r = __builtin_ctz(hw_divisor);
1403 }
1404
1405 } else {
1406 unsigned shift = 0, extra_flags = 0;
1407
1408 unsigned magic_divisor =
1409 panfrost_compute_magic_divisor(hw_divisor, &shift, &extra_flags);
1410
1411 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1412 cfg.type = MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR;
1413 cfg.pointer = addr;
1414 cfg.stride = stride;
1415 cfg.size = size;
1416
1417 cfg.divisor_r = shift;
1418 cfg.divisor_e = extra_flags;
1419 }
1420
1421 pan_pack(bufs + k + 1, ATTRIBUTE_BUFFER_CONTINUATION_NPOT, cfg) {
1422 cfg.divisor_numerator = magic_divisor;
1423 cfg.divisor = divisor;
1424 }
1425
1426 ++k;
1427 }
1428
1429 ++k;
1430 }
1431
1432 /* Add special gl_VertexID/gl_InstanceID buffers */
1433
1434 if (unlikely(vs->attribute_count >= PAN_VERTEX_ID)) {
1435 panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1436
1437 pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
1438 cfg.buffer_index = k++;
1439 cfg.format = so->formats[PAN_VERTEX_ID];
1440 }
1441
1442 panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1443
1444 pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
1445 cfg.buffer_index = k++;
1446 cfg.format = so->formats[PAN_INSTANCE_ID];
1447 }
1448 }
1449
1450 /* Attribute addresses require 64-byte alignment, so let:
1451 *
1452 * base' = base & ~63 = base - (base & 63)
1453 * offset' = offset + (base & 63)
1454 *
1455 * Since base' + offset' = base + offset, these are equivalent
1456 * addressing modes and now base is 64 aligned.
1457 */
1458
1459 unsigned start = vertex_postfix->offset_start;
1460
1461 for (unsigned i = 0; i < so->num_elements; ++i) {
1462 unsigned vbi = so->pipe[i].vertex_buffer_index;
1463 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1464
1465 /* Adjust by the masked off bits of the offset. Make sure we
1466 * read src_offset from so->hw (which is not GPU visible)
1467 * rather than target (which is) due to caching effects */
1468
1469 unsigned src_offset = so->pipe[i].src_offset;
1470
1471 /* BOs aligned to 4k so guaranteed aligned to 64 */
1472 src_offset += (buf->buffer_offset & 63);
1473
1474 /* Also, somewhat obscurely per-instance data needs to be
1475 * offset in response to a delayed start in an indexed draw */
1476
1477 if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
1478 src_offset -= buf->stride * start;
1479
1480 pan_pack(out + i, ATTRIBUTE, cfg) {
1481 cfg.buffer_index = attrib_to_buffer[i];
1482 cfg.format = so->formats[i];
1483 cfg.offset = src_offset;
1484 }
1485 }
1486
1487 vertex_postfix->attributes = S.gpu;
1488 vertex_postfix->attribute_meta = T.gpu;
1489 }
1490
1491 static mali_ptr
1492 panfrost_emit_varyings(struct panfrost_batch *batch,
1493 struct mali_attribute_buffer_packed *slot,
1494 unsigned stride, unsigned count)
1495 {
1496 unsigned size = stride * count;
1497 mali_ptr ptr = panfrost_pool_alloc_aligned(&batch->invisible_pool, size, 64).gpu;
1498
1499 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1500 cfg.stride = stride;
1501 cfg.size = size;
1502 cfg.pointer = ptr;
1503 }
1504
1505 return ptr;
1506 }
1507
1508 static unsigned
1509 panfrost_streamout_offset(unsigned stride, unsigned offset,
1510 struct pipe_stream_output_target *target)
1511 {
1512 return (target->buffer_offset + (offset * stride * 4)) & 63;
1513 }
1514
1515 static void
1516 panfrost_emit_streamout(struct panfrost_batch *batch,
1517 struct mali_attribute_buffer_packed *slot,
1518 unsigned stride_words, unsigned offset, unsigned count,
1519 struct pipe_stream_output_target *target)
1520 {
1521 unsigned stride = stride_words * 4;
1522 unsigned max_size = target->buffer_size;
1523 unsigned expected_size = stride * count;
1524
1525 /* Grab the BO and bind it to the batch */
1526 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1527
1528 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1529 * the perspective of the TILER and FRAGMENT.
1530 */
1531 panfrost_batch_add_bo(batch, bo,
1532 PAN_BO_ACCESS_SHARED |
1533 PAN_BO_ACCESS_RW |
1534 PAN_BO_ACCESS_VERTEX_TILER |
1535 PAN_BO_ACCESS_FRAGMENT);
1536
1537 /* We will have an offset applied to get alignment */
1538 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * stride);
1539
1540 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1541 cfg.pointer = (addr & ~63);
1542 cfg.stride = stride;
1543 cfg.size = MIN2(max_size, expected_size) + (addr & 63);
1544 }
1545 }
1546
1547 static bool
1548 has_point_coord(unsigned mask, gl_varying_slot loc)
1549 {
1550 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1551 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1552 else if (loc == VARYING_SLOT_PNTC)
1553 return (mask & (1 << 8));
1554 else
1555 return false;
1556 }
1557
1558 /* Helpers for manipulating stream out information so we can pack varyings
1559 * accordingly. Compute the src_offset for a given captured varying */
1560
1561 static struct pipe_stream_output *
1562 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1563 {
1564 for (unsigned i = 0; i < info->num_outputs; ++i) {
1565 if (info->output[i].register_index == loc)
1566 return &info->output[i];
1567 }
1568
1569 unreachable("Varying not captured");
1570 }
1571
1572 static unsigned
1573 pan_varying_size(enum mali_format fmt)
1574 {
1575 unsigned type = MALI_EXTRACT_TYPE(fmt);
1576 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1577 unsigned bits = MALI_EXTRACT_BITS(fmt);
1578 unsigned bpc = 0;
1579
1580 if (bits == MALI_CHANNEL_FLOAT) {
1581 /* No doubles */
1582 bool fp16 = (type == MALI_FORMAT_SINT);
1583 assert(fp16 || (type == MALI_FORMAT_UNORM));
1584
1585 bpc = fp16 ? 2 : 4;
1586 } else {
1587 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1588
1589 /* See the enums */
1590 bits = 1 << bits;
1591 assert(bits >= 8);
1592 bpc = bits / 8;
1593 }
1594
1595 return bpc * chan;
1596 }
1597
1598 /* Indices for named (non-XFB) varyings that are present. These are packed
1599 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1600 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1601 * of a given special field given a shift S by:
1602 *
1603 * idx = popcount(P & ((1 << S) - 1))
1604 *
1605 * That is... look at all of the varyings that come earlier and count them, the
1606 * count is the new index since plus one. Likewise, the total number of special
1607 * buffers required is simply popcount(P)
1608 */
1609
1610 enum pan_special_varying {
1611 PAN_VARY_GENERAL = 0,
1612 PAN_VARY_POSITION = 1,
1613 PAN_VARY_PSIZ = 2,
1614 PAN_VARY_PNTCOORD = 3,
1615 PAN_VARY_FACE = 4,
1616 PAN_VARY_FRAGCOORD = 5,
1617
1618 /* Keep last */
1619 PAN_VARY_MAX,
1620 };
1621
1622 /* Given a varying, figure out which index it correpsonds to */
1623
1624 static inline unsigned
1625 pan_varying_index(unsigned present, enum pan_special_varying v)
1626 {
1627 unsigned mask = (1 << v) - 1;
1628 return util_bitcount(present & mask);
1629 }
1630
1631 /* Get the base offset for XFB buffers, which by convention come after
1632 * everything else. Wrapper function for semantic reasons; by construction this
1633 * is just popcount. */
1634
1635 static inline unsigned
1636 pan_xfb_base(unsigned present)
1637 {
1638 return util_bitcount(present);
1639 }
1640
1641 /* Computes the present mask for varyings so we can start emitting varying records */
1642
1643 static inline unsigned
1644 pan_varying_present(
1645 struct panfrost_shader_state *vs,
1646 struct panfrost_shader_state *fs,
1647 unsigned quirks)
1648 {
1649 /* At the moment we always emit general and position buffers. Not
1650 * strictly necessary but usually harmless */
1651
1652 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1653
1654 /* Enable special buffers by the shader info */
1655
1656 if (vs->writes_point_size)
1657 present |= (1 << PAN_VARY_PSIZ);
1658
1659 if (fs->reads_point_coord)
1660 present |= (1 << PAN_VARY_PNTCOORD);
1661
1662 if (fs->reads_face)
1663 present |= (1 << PAN_VARY_FACE);
1664
1665 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1666 present |= (1 << PAN_VARY_FRAGCOORD);
1667
1668 /* Also, if we have a point sprite, we need a point coord buffer */
1669
1670 for (unsigned i = 0; i < fs->varying_count; i++) {
1671 gl_varying_slot loc = fs->varyings_loc[i];
1672
1673 if (has_point_coord(fs->point_sprite_mask, loc))
1674 present |= (1 << PAN_VARY_PNTCOORD);
1675 }
1676
1677 return present;
1678 }
1679
1680 /* Emitters for varying records */
1681
1682 static void
1683 pan_emit_vary(struct mali_attribute_packed *out,
1684 unsigned present, enum pan_special_varying buf,
1685 unsigned quirks, enum mali_format format,
1686 unsigned offset)
1687 {
1688 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1689 unsigned swizzle = quirks & HAS_SWIZZLES ?
1690 panfrost_get_default_swizzle(nr_channels) :
1691 panfrost_bifrost_swizzle(nr_channels);
1692
1693 pan_pack(out, ATTRIBUTE, cfg) {
1694 cfg.buffer_index = pan_varying_index(present, buf);
1695 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1696 cfg.format = (format << 12) | swizzle;
1697 cfg.offset = offset;
1698 }
1699 }
1700
1701 /* General varying that is unused */
1702
1703 static void
1704 pan_emit_vary_only(struct mali_attribute_packed *out,
1705 unsigned present, unsigned quirks)
1706 {
1707 pan_emit_vary(out, present, 0, quirks, MALI_VARYING_DISCARD, 0);
1708 }
1709
1710 /* Special records */
1711
1712 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1713 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1714 [PAN_VARY_PSIZ] = MALI_R16F,
1715 [PAN_VARY_PNTCOORD] = MALI_R16F,
1716 [PAN_VARY_FACE] = MALI_R32I,
1717 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1718 };
1719
1720 static void
1721 pan_emit_vary_special(struct mali_attribute_packed *out,
1722 unsigned present, enum pan_special_varying buf,
1723 unsigned quirks)
1724 {
1725 assert(buf < PAN_VARY_MAX);
1726 pan_emit_vary(out, present, buf, quirks, pan_varying_formats[buf], 0);
1727 }
1728
1729 static enum mali_format
1730 pan_xfb_format(enum mali_format format, unsigned nr)
1731 {
1732 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1733 return MALI_R32F | MALI_NR_CHANNELS(nr);
1734 else
1735 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1736 }
1737
1738 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1739 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1740 * value. */
1741
1742 static void
1743 pan_emit_vary_xfb(struct mali_attribute_packed *out,
1744 unsigned present,
1745 unsigned max_xfb,
1746 unsigned *streamout_offsets,
1747 unsigned quirks,
1748 enum mali_format format,
1749 struct pipe_stream_output o)
1750 {
1751 unsigned swizzle = quirks & HAS_SWIZZLES ?
1752 panfrost_get_default_swizzle(o.num_components) :
1753 panfrost_bifrost_swizzle(o.num_components);
1754
1755 pan_pack(out, ATTRIBUTE, cfg) {
1756 /* XFB buffers come after everything else */
1757 cfg.buffer_index = pan_xfb_base(present) + o.output_buffer;
1758 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1759
1760 /* Override number of channels and precision to highp */
1761 cfg.format = (pan_xfb_format(format, o.num_components) << 12) | swizzle;
1762
1763 /* Apply given offsets together */
1764 cfg.offset = (o.dst_offset * 4) /* dwords */
1765 + streamout_offsets[o.output_buffer];
1766 }
1767 }
1768
1769 /* Determine if we should capture a varying for XFB. This requires actually
1770 * having a buffer for it. If we don't capture it, we'll fallback to a general
1771 * varying path (linked or unlinked, possibly discarding the write) */
1772
1773 static bool
1774 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1775 unsigned loc, unsigned max_xfb)
1776 {
1777 if (!(xfb->so_mask & (1ll << loc)))
1778 return false;
1779
1780 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1781 return o->output_buffer < max_xfb;
1782 }
1783
1784 static void
1785 pan_emit_general_varying(struct mali_attribute_packed *out,
1786 struct panfrost_shader_state *other,
1787 struct panfrost_shader_state *xfb,
1788 gl_varying_slot loc,
1789 enum mali_format format,
1790 unsigned present,
1791 unsigned quirks,
1792 unsigned *gen_offsets,
1793 enum mali_format *gen_formats,
1794 unsigned *gen_stride,
1795 unsigned idx,
1796 bool should_alloc)
1797 {
1798 /* Check if we're linked */
1799 signed other_idx = -1;
1800
1801 for (unsigned j = 0; j < other->varying_count; ++j) {
1802 if (other->varyings_loc[j] == loc) {
1803 other_idx = j;
1804 break;
1805 }
1806 }
1807
1808 if (other_idx < 0) {
1809 pan_emit_vary_only(out, present, quirks);
1810 return;
1811 }
1812
1813 unsigned offset = gen_offsets[other_idx];
1814
1815 if (should_alloc) {
1816 /* We're linked, so allocate a space via a watermark allocation */
1817 enum mali_format alt = other->varyings[other_idx];
1818
1819 /* Do interpolation at minimum precision */
1820 unsigned size_main = pan_varying_size(format);
1821 unsigned size_alt = pan_varying_size(alt);
1822 unsigned size = MIN2(size_main, size_alt);
1823
1824 /* If a varying is marked for XFB but not actually captured, we
1825 * should match the format to the format that would otherwise
1826 * be used for XFB, since dEQP checks for invariance here. It's
1827 * unclear if this is required by the spec. */
1828
1829 if (xfb->so_mask & (1ull << loc)) {
1830 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1831 format = pan_xfb_format(format, o->num_components);
1832 size = pan_varying_size(format);
1833 } else if (size == size_alt) {
1834 format = alt;
1835 }
1836
1837 gen_offsets[idx] = *gen_stride;
1838 gen_formats[other_idx] = format;
1839 offset = *gen_stride;
1840 *gen_stride += size;
1841 }
1842
1843 pan_emit_vary(out, present, PAN_VARY_GENERAL, quirks, format, offset);
1844 }
1845
1846 /* Higher-level wrapper around all of the above, classifying a varying into one
1847 * of the above types */
1848
1849 static void
1850 panfrost_emit_varying(
1851 struct mali_attribute_packed *out,
1852 struct panfrost_shader_state *stage,
1853 struct panfrost_shader_state *other,
1854 struct panfrost_shader_state *xfb,
1855 unsigned present,
1856 unsigned max_xfb,
1857 unsigned *streamout_offsets,
1858 unsigned quirks,
1859 unsigned *gen_offsets,
1860 enum mali_format *gen_formats,
1861 unsigned *gen_stride,
1862 unsigned idx,
1863 bool should_alloc,
1864 bool is_fragment)
1865 {
1866 gl_varying_slot loc = stage->varyings_loc[idx];
1867 enum mali_format format = stage->varyings[idx];
1868
1869 /* Override format to match linkage */
1870 if (!should_alloc && gen_formats[idx])
1871 format = gen_formats[idx];
1872
1873 if (has_point_coord(stage->point_sprite_mask, loc)) {
1874 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1875 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1876 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1877 pan_emit_vary_xfb(out, present, max_xfb, streamout_offsets, quirks, format, *o);
1878 } else if (loc == VARYING_SLOT_POS) {
1879 if (is_fragment)
1880 pan_emit_vary_special(out, present, PAN_VARY_FRAGCOORD, quirks);
1881 else
1882 pan_emit_vary_special(out, present, PAN_VARY_POSITION, quirks);
1883 } else if (loc == VARYING_SLOT_PSIZ) {
1884 pan_emit_vary_special(out, present, PAN_VARY_PSIZ, quirks);
1885 } else if (loc == VARYING_SLOT_PNTC) {
1886 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1887 } else if (loc == VARYING_SLOT_FACE) {
1888 pan_emit_vary_special(out, present, PAN_VARY_FACE, quirks);
1889 } else {
1890 pan_emit_general_varying(out, other, xfb, loc, format, present,
1891 quirks, gen_offsets, gen_formats, gen_stride,
1892 idx, should_alloc);
1893 }
1894 }
1895
1896 static void
1897 pan_emit_special_input(struct mali_attribute_buffer_packed *out,
1898 unsigned present,
1899 enum pan_special_varying v,
1900 unsigned special)
1901 {
1902 if (present & (1 << v)) {
1903 unsigned idx = pan_varying_index(present, v);
1904
1905 pan_pack(out + idx, ATTRIBUTE_BUFFER, cfg) {
1906 cfg.special = special;
1907 cfg.type = 0;
1908 }
1909 }
1910 }
1911
1912 void
1913 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1914 unsigned vertex_count,
1915 struct mali_vertex_tiler_postfix *vertex_postfix,
1916 struct mali_vertex_tiler_postfix *tiler_postfix,
1917 union midgard_primitive_size *primitive_size)
1918 {
1919 /* Load the shaders */
1920 struct panfrost_context *ctx = batch->ctx;
1921 struct panfrost_device *dev = pan_device(ctx->base.screen);
1922 struct panfrost_shader_state *vs, *fs;
1923 size_t vs_size, fs_size;
1924
1925 /* Allocate the varying descriptor */
1926
1927 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1928 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
1929 vs_size = MALI_ATTRIBUTE_LENGTH * vs->varying_count;
1930 fs_size = MALI_ATTRIBUTE_LENGTH * fs->varying_count;
1931
1932 struct panfrost_transfer trans = panfrost_pool_alloc_aligned(
1933 &batch->pool, vs_size + fs_size, MALI_ATTRIBUTE_LENGTH);
1934
1935 struct pipe_stream_output_info *so = &vs->stream_output;
1936 unsigned present = pan_varying_present(vs, fs, dev->quirks);
1937
1938 /* Check if this varying is linked by us. This is the case for
1939 * general-purpose, non-captured varyings. If it is, link it. If it's
1940 * not, use the provided stream out information to determine the
1941 * offset, since it was already linked for us. */
1942
1943 unsigned gen_offsets[32];
1944 enum mali_format gen_formats[32];
1945 memset(gen_offsets, 0, sizeof(gen_offsets));
1946 memset(gen_formats, 0, sizeof(gen_formats));
1947
1948 unsigned gen_stride = 0;
1949 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
1950 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
1951
1952 unsigned streamout_offsets[32];
1953
1954 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1955 streamout_offsets[i] = panfrost_streamout_offset(
1956 so->stride[i],
1957 ctx->streamout.offsets[i],
1958 ctx->streamout.targets[i]);
1959 }
1960
1961 struct mali_attribute_packed *ovs = (struct mali_attribute_packed *)trans.cpu;
1962 struct mali_attribute_packed *ofs = ovs + vs->varying_count;
1963
1964 for (unsigned i = 0; i < vs->varying_count; i++) {
1965 panfrost_emit_varying(ovs + i, vs, fs, vs, present,
1966 ctx->streamout.num_targets, streamout_offsets,
1967 dev->quirks,
1968 gen_offsets, gen_formats, &gen_stride, i, true, false);
1969 }
1970
1971 for (unsigned i = 0; i < fs->varying_count; i++) {
1972 panfrost_emit_varying(ofs + i, fs, vs, vs, present,
1973 ctx->streamout.num_targets, streamout_offsets,
1974 dev->quirks,
1975 gen_offsets, gen_formats, &gen_stride, i, false, true);
1976 }
1977
1978 unsigned xfb_base = pan_xfb_base(present);
1979 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1980 MALI_ATTRIBUTE_BUFFER_LENGTH * (xfb_base + ctx->streamout.num_targets),
1981 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
1982 struct mali_attribute_buffer_packed *varyings =
1983 (struct mali_attribute_buffer_packed *) T.cpu;
1984
1985 /* Emit the stream out buffers */
1986
1987 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
1988 ctx->vertex_count);
1989
1990 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1991 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
1992 so->stride[i],
1993 ctx->streamout.offsets[i],
1994 out_count,
1995 ctx->streamout.targets[i]);
1996 }
1997
1998 panfrost_emit_varyings(batch,
1999 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
2000 gen_stride, vertex_count);
2001
2002 /* fp32 vec4 gl_Position */
2003 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
2004 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
2005 sizeof(float) * 4, vertex_count);
2006
2007 if (present & (1 << PAN_VARY_PSIZ)) {
2008 primitive_size->pointer = panfrost_emit_varyings(batch,
2009 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
2010 2, vertex_count);
2011 }
2012
2013 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_ATTRIBUTE_SPECIAL_POINT_COORD);
2014 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_ATTRIBUTE_SPECIAL_FRONT_FACING);
2015 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_ATTRIBUTE_SPECIAL_FRAG_COORD);
2016
2017 vertex_postfix->varyings = T.gpu;
2018 tiler_postfix->varyings = T.gpu;
2019
2020 vertex_postfix->varying_meta = trans.gpu;
2021 tiler_postfix->varying_meta = trans.gpu + vs_size;
2022 }
2023
2024 void
2025 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
2026 struct mali_vertex_tiler_prefix *vertex_prefix,
2027 struct mali_vertex_tiler_postfix *vertex_postfix,
2028 struct mali_vertex_tiler_prefix *tiler_prefix,
2029 struct mali_vertex_tiler_postfix *tiler_postfix,
2030 union midgard_primitive_size *primitive_size)
2031 {
2032 struct panfrost_context *ctx = batch->ctx;
2033 struct panfrost_device *device = pan_device(ctx->base.screen);
2034 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
2035 struct bifrost_payload_vertex bifrost_vertex = {0,};
2036 struct bifrost_payload_tiler bifrost_tiler = {0,};
2037 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2038 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2039 void *vp, *tp;
2040 size_t vp_size, tp_size;
2041
2042 if (device->quirks & IS_BIFROST) {
2043 bifrost_vertex.prefix = *vertex_prefix;
2044 bifrost_vertex.postfix = *vertex_postfix;
2045 vp = &bifrost_vertex;
2046 vp_size = sizeof(bifrost_vertex);
2047
2048 bifrost_tiler.prefix = *tiler_prefix;
2049 bifrost_tiler.tiler.primitive_size = *primitive_size;
2050 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2051 bifrost_tiler.postfix = *tiler_postfix;
2052 tp = &bifrost_tiler;
2053 tp_size = sizeof(bifrost_tiler);
2054 } else {
2055 midgard_vertex.prefix = *vertex_prefix;
2056 midgard_vertex.postfix = *vertex_postfix;
2057 vp = &midgard_vertex;
2058 vp_size = sizeof(midgard_vertex);
2059
2060 midgard_tiler.prefix = *tiler_prefix;
2061 midgard_tiler.postfix = *tiler_postfix;
2062 midgard_tiler.primitive_size = *primitive_size;
2063 tp = &midgard_tiler;
2064 tp_size = sizeof(midgard_tiler);
2065 }
2066
2067 if (wallpapering) {
2068 /* Inject in reverse order, with "predicted" job indices.
2069 * THIS IS A HACK XXX */
2070 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
2071 batch->scoreboard.job_index + 2, tp, tp_size, true);
2072 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2073 vp, vp_size, true);
2074 return;
2075 }
2076
2077 /* If rasterizer discard is enable, only submit the vertex */
2078
2079 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2080 vp, vp_size, false);
2081
2082 if (ctx->rasterizer->base.rasterizer_discard)
2083 return;
2084
2085 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
2086 false);
2087 }
2088
2089 /* TODO: stop hardcoding this */
2090 mali_ptr
2091 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2092 {
2093 uint16_t locations[] = {
2094 128, 128,
2095 0, 256,
2096 0, 256,
2097 0, 256,
2098 0, 256,
2099 0, 256,
2100 0, 256,
2101 0, 256,
2102 0, 256,
2103 0, 256,
2104 0, 256,
2105 0, 256,
2106 0, 256,
2107 0, 256,
2108 0, 256,
2109 0, 256,
2110 0, 256,
2111 0, 256,
2112 0, 256,
2113 0, 256,
2114 0, 256,
2115 0, 256,
2116 0, 256,
2117 0, 256,
2118 0, 256,
2119 0, 256,
2120 0, 256,
2121 0, 256,
2122 0, 256,
2123 0, 256,
2124 0, 256,
2125 0, 256,
2126 128, 128,
2127 0, 0,
2128 0, 0,
2129 0, 0,
2130 0, 0,
2131 0, 0,
2132 0, 0,
2133 0, 0,
2134 0, 0,
2135 0, 0,
2136 0, 0,
2137 0, 0,
2138 0, 0,
2139 0, 0,
2140 0, 0,
2141 0, 0,
2142 };
2143
2144 return panfrost_pool_upload_aligned(&batch->pool, locations, 96 * sizeof(uint16_t), 64);
2145 }