panfrost: Hoist blend finalize calls
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 struct mali_shared_memory shared = {
62 .shared_workgroup_count = ~0,
63 };
64
65 if (batch->stack_size) {
66 struct panfrost_bo *stack =
67 panfrost_batch_get_scratchpad(batch, batch->stack_size,
68 dev->thread_tls_alloc,
69 dev->core_count);
70
71 shared.stack_shift = panfrost_get_stack_shift(batch->stack_size);
72 shared.scratchpad = stack->gpu;
73 }
74
75 postfix->shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared, sizeof(shared), 64);
76 }
77
78 static void
79 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
80 struct mali_vertex_tiler_postfix *postfix)
81 {
82 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
83 postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
84 }
85
86 static void
87 panfrost_vt_update_rasterizer(struct panfrost_rasterizer *rasterizer,
88 struct mali_vertex_tiler_prefix *prefix,
89 struct mali_vertex_tiler_postfix *postfix)
90 {
91 postfix->gl_enables |= 0x7;
92 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
93 rasterizer->base.front_ccw);
94 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
95 (rasterizer->base.cull_face & PIPE_FACE_FRONT));
96 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
97 (rasterizer->base.cull_face & PIPE_FACE_BACK));
98 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
99 rasterizer->base.flatshade_first);
100 }
101
102 void
103 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
104 struct mali_vertex_tiler_prefix *prefix,
105 union midgard_primitive_size *primitive_size)
106 {
107 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
108
109 if (!panfrost_writes_point_size(ctx)) {
110 float val = (prefix->draw_mode == MALI_DRAW_MODE_POINTS) ?
111 rasterizer->base.point_size :
112 rasterizer->base.line_width;
113
114 primitive_size->constant = val;
115 }
116 }
117
118 static void
119 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
120 struct mali_vertex_tiler_postfix *postfix)
121 {
122 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
123 if (ctx->occlusion_query) {
124 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
125 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
126 PAN_BO_ACCESS_SHARED |
127 PAN_BO_ACCESS_RW |
128 PAN_BO_ACCESS_FRAGMENT);
129 } else {
130 postfix->occlusion_counter = 0;
131 }
132 }
133
134 void
135 panfrost_vt_init(struct panfrost_context *ctx,
136 enum pipe_shader_type stage,
137 struct mali_vertex_tiler_prefix *prefix,
138 struct mali_vertex_tiler_postfix *postfix)
139 {
140 struct panfrost_device *device = pan_device(ctx->base.screen);
141
142 if (!ctx->shader[stage])
143 return;
144
145 memset(prefix, 0, sizeof(*prefix));
146 memset(postfix, 0, sizeof(*postfix));
147
148 if (device->quirks & IS_BIFROST) {
149 postfix->gl_enables = 0x2;
150 panfrost_vt_emit_shared_memory(ctx, postfix);
151 } else {
152 postfix->gl_enables = 0x6;
153 panfrost_vt_attach_framebuffer(ctx, postfix);
154 }
155
156 if (stage == PIPE_SHADER_FRAGMENT) {
157 panfrost_vt_update_occlusion_query(ctx, postfix);
158 panfrost_vt_update_rasterizer(ctx->rasterizer, prefix, postfix);
159 }
160 }
161
162 static unsigned
163 panfrost_translate_index_size(unsigned size)
164 {
165 switch (size) {
166 case 1:
167 return MALI_DRAW_INDEXED_UINT8;
168
169 case 2:
170 return MALI_DRAW_INDEXED_UINT16;
171
172 case 4:
173 return MALI_DRAW_INDEXED_UINT32;
174
175 default:
176 unreachable("Invalid index size");
177 }
178 }
179
180 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
181 * good for the duration of the draw (transient), could last longer. Also get
182 * the bounds on the index buffer for the range accessed by the draw. We do
183 * these operations together because there are natural optimizations which
184 * require them to be together. */
185
186 static mali_ptr
187 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
188 const struct pipe_draw_info *info,
189 unsigned *min_index, unsigned *max_index)
190 {
191 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
192 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
193 off_t offset = info->start * info->index_size;
194 bool needs_indices = true;
195 mali_ptr out = 0;
196
197 if (info->max_index != ~0u) {
198 *min_index = info->min_index;
199 *max_index = info->max_index;
200 needs_indices = false;
201 }
202
203 if (!info->has_user_indices) {
204 /* Only resources can be directly mapped */
205 panfrost_batch_add_bo(batch, rsrc->bo,
206 PAN_BO_ACCESS_SHARED |
207 PAN_BO_ACCESS_READ |
208 PAN_BO_ACCESS_VERTEX_TILER);
209 out = rsrc->bo->gpu + offset;
210
211 /* Check the cache */
212 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
213 info->start,
214 info->count,
215 min_index,
216 max_index);
217 } else {
218 /* Otherwise, we need to upload to transient memory */
219 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
220 struct panfrost_transfer T =
221 panfrost_pool_alloc_aligned(&batch->pool,
222 info->count * info->index_size,
223 info->index_size);
224
225 memcpy(T.cpu, ibuf8 + offset, info->count * info->index_size);
226 out = T.gpu;
227 }
228
229 if (needs_indices) {
230 /* Fallback */
231 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
232
233 if (!info->has_user_indices)
234 panfrost_minmax_cache_add(rsrc->index_cache,
235 info->start, info->count,
236 *min_index, *max_index);
237 }
238
239 return out;
240 }
241
242 void
243 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
244 const struct pipe_draw_info *info,
245 enum mali_draw_mode draw_mode,
246 struct mali_vertex_tiler_postfix *vertex_postfix,
247 struct mali_vertex_tiler_prefix *tiler_prefix,
248 struct mali_vertex_tiler_postfix *tiler_postfix,
249 unsigned *vertex_count,
250 unsigned *padded_count)
251 {
252 tiler_prefix->draw_mode = draw_mode;
253
254 unsigned draw_flags = 0;
255
256 if (panfrost_writes_point_size(ctx))
257 draw_flags |= MALI_DRAW_VARYING_SIZE;
258
259 if (info->primitive_restart)
260 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
261
262 /* These doesn't make much sense */
263
264 draw_flags |= 0x3000;
265
266 if (info->index_size) {
267 unsigned min_index = 0, max_index = 0;
268
269 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
270 info,
271 &min_index,
272 &max_index);
273
274 /* Use the corresponding values */
275 *vertex_count = max_index - min_index + 1;
276 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
277 tiler_prefix->offset_bias_correction = -min_index;
278 tiler_prefix->index_count = MALI_POSITIVE(info->count);
279 draw_flags |= panfrost_translate_index_size(info->index_size);
280 } else {
281 tiler_prefix->indices = 0;
282 *vertex_count = ctx->vertex_count;
283 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
284 tiler_prefix->offset_bias_correction = 0;
285 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
286 }
287
288 tiler_prefix->unknown_draw = draw_flags;
289
290 /* Encode the padded vertex count */
291
292 if (info->instance_count > 1) {
293 *padded_count = panfrost_padded_vertex_count(*vertex_count);
294
295 unsigned shift = __builtin_ctz(ctx->padded_count);
296 unsigned k = ctx->padded_count >> (shift + 1);
297
298 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
299 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
300 } else {
301 *padded_count = *vertex_count;
302
303 /* Reset instancing state */
304 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
305 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
306 }
307 }
308
309 static void
310 panfrost_shader_meta_init(struct panfrost_context *ctx,
311 enum pipe_shader_type st,
312 struct mali_shader_meta *meta)
313 {
314 const struct panfrost_device *dev = pan_device(ctx->base.screen);
315 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
316
317 memset(meta, 0, sizeof(*meta));
318 meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
319 meta->attribute_count = ss->attribute_count;
320 meta->varying_count = ss->varying_count;
321 meta->texture_count = ctx->sampler_view_count[st];
322 meta->sampler_count = ctx->sampler_count[st];
323
324 if (dev->quirks & IS_BIFROST) {
325 if (st == PIPE_SHADER_VERTEX)
326 meta->bifrost1.unk1 = 0x800000;
327 else {
328 /* First clause ATEST |= 0x4000000.
329 * Less than 32 regs |= 0x200 */
330 meta->bifrost1.unk1 = 0x950020;
331 }
332
333 meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
334 if (st == PIPE_SHADER_VERTEX)
335 meta->bifrost2.preload_regs = 0xC0;
336 else {
337 meta->bifrost2.preload_regs = 0x1;
338 SET_BIT(meta->bifrost2.preload_regs, 0x10, ss->reads_frag_coord);
339 }
340
341 meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
342 ss->uniform_cutoff);
343 } else {
344 meta->midgard1.uniform_count = MIN2(ss->uniform_count,
345 ss->uniform_cutoff);
346 meta->midgard1.work_count = ss->work_reg_count;
347
348 /* TODO: This is not conformant on ES3 */
349 meta->midgard1.flags_hi = MALI_SUPPRESS_INF_NAN;
350
351 meta->midgard1.flags_lo = 0x20;
352 meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
353
354 SET_BIT(meta->midgard1.flags_lo, MALI_WRITES_GLOBAL, ss->writes_global);
355 }
356 }
357
358 static unsigned
359 translate_tex_wrap(enum pipe_tex_wrap w)
360 {
361 switch (w) {
362 case PIPE_TEX_WRAP_REPEAT: return MALI_WRAP_MODE_REPEAT;
363 case PIPE_TEX_WRAP_CLAMP: return MALI_WRAP_MODE_CLAMP;
364 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return MALI_WRAP_MODE_CLAMP_TO_EDGE;
365 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return MALI_WRAP_MODE_CLAMP_TO_BORDER;
366 case PIPE_TEX_WRAP_MIRROR_REPEAT: return MALI_WRAP_MODE_MIRRORED_REPEAT;
367 case PIPE_TEX_WRAP_MIRROR_CLAMP: return MALI_WRAP_MODE_MIRRORED_CLAMP;
368 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
369 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
370 default: unreachable("Invalid wrap");
371 }
372 }
373
374 /* The hardware compares in the wrong order order, so we have to flip before
375 * encoding. Yes, really. */
376
377 static enum mali_func
378 panfrost_sampler_compare_func(const struct pipe_sampler_state *cso)
379 {
380 if (!cso->compare_mode)
381 return MALI_FUNC_NEVER;
382
383 enum mali_func f = panfrost_translate_compare_func(cso->compare_func);
384 return panfrost_flip_compare_func(f);
385 }
386
387 static enum mali_mipmap_mode
388 pan_pipe_to_mipmode(enum pipe_tex_mipfilter f)
389 {
390 switch (f) {
391 case PIPE_TEX_MIPFILTER_NEAREST: return MALI_MIPMAP_MODE_NEAREST;
392 case PIPE_TEX_MIPFILTER_LINEAR: return MALI_MIPMAP_MODE_TRILINEAR;
393 case PIPE_TEX_MIPFILTER_NONE: return MALI_MIPMAP_MODE_NONE;
394 default: unreachable("Invalid");
395 }
396 }
397
398 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
399 struct mali_midgard_sampler_packed *hw)
400 {
401 pan_pack(hw, MIDGARD_SAMPLER, cfg) {
402 cfg.magnify_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
403 cfg.minify_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
404 cfg.mipmap_mode = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR) ?
405 MALI_MIPMAP_MODE_TRILINEAR : MALI_MIPMAP_MODE_NEAREST;
406 cfg.normalized_coordinates = cso->normalized_coords;
407
408 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
409
410 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
411
412 /* If necessary, we disable mipmapping in the sampler descriptor by
413 * clamping the LOD as tight as possible (from 0 to epsilon,
414 * essentially -- remember these are fixed point numbers, so
415 * epsilon=1/256) */
416
417 cfg.maximum_lod = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) ?
418 cfg.minimum_lod + 1 :
419 FIXED_16(cso->max_lod, false);
420
421 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
422 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
423 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
424
425 cfg.compare_function = panfrost_sampler_compare_func(cso);
426 cfg.seamless_cube_map = cso->seamless_cube_map;
427
428 cfg.border_color_r = cso->border_color.f[0];
429 cfg.border_color_g = cso->border_color.f[1];
430 cfg.border_color_b = cso->border_color.f[2];
431 cfg.border_color_a = cso->border_color.f[3];
432 }
433 }
434
435 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
436 struct mali_bifrost_sampler_packed *hw)
437 {
438 pan_pack(hw, BIFROST_SAMPLER, cfg) {
439 cfg.magnify_linear = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR;
440 cfg.minify_linear = cso->min_img_filter == PIPE_TEX_FILTER_LINEAR;
441 cfg.mipmap_mode = pan_pipe_to_mipmode(cso->min_mip_filter);
442 cfg.normalized_coordinates = cso->normalized_coords;
443
444 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
445 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
446 cfg.maximum_lod = FIXED_16(cso->max_lod, false);
447
448 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
449 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
450 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
451
452 cfg.compare_function = panfrost_sampler_compare_func(cso);
453 cfg.seamless_cube_map = cso->seamless_cube_map;
454 }
455 }
456
457 static void
458 panfrost_frag_meta_rasterizer_update(struct panfrost_context *ctx,
459 struct mali_shader_meta *fragmeta)
460 {
461 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
462
463 bool msaa = rast->multisample;
464
465 /* TODO: Sample size */
466 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
467 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
468
469 struct panfrost_shader_state *fs;
470 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
471
472 /* EXT_shader_framebuffer_fetch requires the shader to be run
473 * per-sample when outputs are read. */
474 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
475 SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
476
477 fragmeta->depth_units = rast->offset_units * 2.0f;
478 fragmeta->depth_factor = rast->offset_scale;
479
480 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
481
482 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
483 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
484
485 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
486 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
487 }
488
489 static void
490 panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
491 struct mali_shader_meta *fragmeta)
492 {
493 const struct panfrost_zsa_state *so = ctx->depth_stencil;
494
495 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
496 so->base.stencil[0].enabled);
497
498 fragmeta->stencil_mask_front = so->stencil_mask_front;
499 fragmeta->stencil_mask_back = so->stencil_mask_back;
500
501 /* Bottom bits for stencil ref, exactly one word */
502 fragmeta->stencil_front.opaque[0] = so->stencil_front.opaque[0] | ctx->stencil_ref.ref_value[0];
503
504 /* If back-stencil is not enabled, use the front values */
505
506 if (so->base.stencil[1].enabled)
507 fragmeta->stencil_back.opaque[0] = so->stencil_back.opaque[0] | ctx->stencil_ref.ref_value[1];
508 else
509 fragmeta->stencil_back = fragmeta->stencil_front;
510
511 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
512 so->base.depth.writemask);
513
514 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
515 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(
516 so->base.depth.enabled ? so->base.depth.func : PIPE_FUNC_ALWAYS));
517 }
518
519 static bool
520 panfrost_fs_required(
521 struct panfrost_shader_state *fs,
522 struct panfrost_blend_final *blend,
523 unsigned rt_count)
524 {
525 /* If we generally have side effects */
526 if (fs->fs_sidefx)
527 return true;
528
529 /* If colour is written we need to execute */
530 for (unsigned i = 0; i < rt_count; ++i) {
531 if (!blend[i].no_colour)
532 return true;
533 }
534
535 /* If depth is written and not implied we need to execute.
536 * TODO: Predicate on Z/S writes being enabled */
537 return (fs->writes_depth || fs->writes_stencil);
538 }
539
540 static void
541 panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
542 struct mali_shader_meta *fragmeta,
543 void *rts,
544 struct panfrost_blend_final *blend)
545 {
546 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
547 const struct panfrost_device *dev = pan_device(ctx->base.screen);
548 struct panfrost_shader_state *fs;
549 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
550
551 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
552 (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
553 !ctx->blend->base.dither);
554
555 SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
556 ctx->blend->base.alpha_to_coverage);
557
558 /* Get blending setup */
559 unsigned rt_count = ctx->pipe_framebuffer.nr_cbufs;
560
561 /* Disable shader execution if we can */
562 if (dev->quirks & MIDGARD_SHADERLESS
563 && !panfrost_fs_required(fs, blend, rt_count)) {
564 fragmeta->shader = 0;
565 fragmeta->attribute_count = 0;
566 fragmeta->varying_count = 0;
567 fragmeta->texture_count = 0;
568 fragmeta->sampler_count = 0;
569
570 /* This feature is not known to work on Bifrost */
571 fragmeta->midgard1.work_count = 1;
572 fragmeta->midgard1.uniform_count = 0;
573 fragmeta->midgard1.uniform_buffer_count = 0;
574 }
575
576 /* If there is a blend shader, work registers are shared. We impose 8
577 * work registers as a limit for blend shaders. Should be lower XXX */
578
579 if (!(dev->quirks & IS_BIFROST)) {
580 for (unsigned c = 0; c < rt_count; ++c) {
581 if (blend[c].is_shader) {
582 fragmeta->midgard1.work_count =
583 MAX2(fragmeta->midgard1.work_count, 8);
584 }
585 }
586 }
587
588 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
589 * copied to the blend_meta appended (by convention), but this is the
590 * field actually read by the hardware. (Or maybe both are read...?).
591 * Specify the last RTi with a blend shader. */
592
593 fragmeta->blend.shader = 0;
594
595 for (signed rt = ((signed) rt_count - 1); rt >= 0; --rt) {
596 if (!blend[rt].is_shader)
597 continue;
598
599 fragmeta->blend.shader = blend[rt].shader.gpu |
600 blend[rt].shader.first_tag;
601 break;
602 }
603
604 if (dev->quirks & MIDGARD_SFBD) {
605 /* When only a single render target platform is used, the blend
606 * information is inside the shader meta itself. We additionally
607 * need to signal CAN_DISCARD for nontrivial blend modes (so
608 * we're able to read back the destination buffer) */
609
610 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
611 blend[0].is_shader);
612
613 if (!blend[0].is_shader) {
614 fragmeta->blend.equation = *blend[0].equation.equation;
615 fragmeta->blend.constant = blend[0].equation.constant;
616 }
617
618 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
619 !blend[0].no_blending || fs->can_discard);
620
621 batch->draws |= PIPE_CLEAR_COLOR0;
622 return;
623 }
624
625 if (dev->quirks & IS_BIFROST) {
626 bool no_blend = true;
627
628 for (unsigned i = 0; i < rt_count; ++i)
629 no_blend &= (blend[i].no_blending | blend[i].no_colour);
630
631 SET_BIT(fragmeta->bifrost1.unk1, MALI_BIFROST_EARLY_Z,
632 !fs->can_discard && !fs->writes_depth && no_blend);
633 }
634
635 /* Additional blend descriptor tacked on for jobs using MFBD */
636
637 struct bifrost_blend_rt *brts = rts;
638 struct midgard_blend_rt *mrts = rts;
639
640 /* Disable blending for depth-only on Bifrost */
641
642 if (rt_count == 0 && dev->quirks & IS_BIFROST)
643 brts[0].unk2 = 0x3;
644
645 for (unsigned i = 0; i < rt_count; ++i) {
646 unsigned flags = 0;
647
648 if (!blend[i].no_colour) {
649 flags = 0x200;
650 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
651
652 bool is_srgb = util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
653
654 SET_BIT(flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
655 SET_BIT(flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
656 SET_BIT(flags, MALI_BLEND_SRGB, is_srgb);
657 SET_BIT(flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
658 }
659
660 if (dev->quirks & IS_BIFROST) {
661 brts[i].flags = flags;
662
663 if (blend[i].is_shader) {
664 /* The blend shader's address needs to be at
665 * the same top 32 bit as the fragment shader.
666 * TODO: Ensure that's always the case.
667 */
668 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
669 (fs->bo->gpu & (0xffffffffull << 32)));
670 brts[i].shader = blend[i].shader.gpu;
671 brts[i].unk2 = 0x0;
672 } else {
673 enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
674 const struct util_format_description *format_desc;
675 format_desc = util_format_description(format);
676
677 brts[i].equation = *blend[i].equation.equation;
678
679 /* TODO: this is a bit more complicated */
680 brts[i].constant = blend[i].equation.constant;
681
682 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
683
684 /* 0x19 disables blending and forces REPLACE
685 * mode (equivalent to rgb_mode = alpha_mode =
686 * x122, colour mask = 0xF). 0x1a allows
687 * blending. */
688 brts[i].unk2 = blend[i].no_blending ? 0x19 : 0x1a;
689
690 brts[i].shader_type = fs->blend_types[i];
691 }
692 } else {
693 mrts[i].flags = flags;
694
695 if (blend[i].is_shader) {
696 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
697 } else {
698 mrts[i].blend.equation = *blend[i].equation.equation;
699 mrts[i].blend.constant = blend[i].equation.constant;
700 }
701 }
702 }
703 }
704
705 static void
706 panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
707 struct mali_shader_meta *fragmeta,
708 void *rts,
709 struct panfrost_blend_final *blend)
710 {
711 const struct panfrost_device *dev = pan_device(ctx->base.screen);
712 struct panfrost_shader_state *fs;
713
714 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
715
716 bool msaa = ctx->rasterizer->base.multisample;
717 fragmeta->coverage_mask = msaa ? ctx->sample_mask : ~0;
718
719 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
720 fragmeta->unknown2_4 = 0x4e0;
721
722 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
723 * is required (independent of 32-bit/64-bit descriptors), or why it's
724 * not used on later GPU revisions. Otherwise, all shader jobs fault on
725 * these earlier chips (perhaps this is a chicken bit of some kind).
726 * More investigation is needed. */
727
728 SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
729
730 if (dev->quirks & IS_BIFROST) {
731 /* TODO */
732 } else {
733 /* Depending on whether it's legal to in the given shader, we try to
734 * enable early-z testing. TODO: respect e-z force */
735
736 SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
737 !fs->can_discard && !fs->writes_global &&
738 !fs->writes_depth && !fs->writes_stencil &&
739 !ctx->blend->base.alpha_to_coverage);
740
741 /* Add the writes Z/S flags if needed. */
742 SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
743 SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
744
745 /* Any time texturing is used, derivatives are implicitly calculated,
746 * so we need to enable helper invocations */
747
748 SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
749 fs->helper_invocations);
750
751 /* If discard is enabled, which bit we set to convey this
752 * depends on if depth/stencil is used for the draw or not.
753 * Just one of depth OR stencil is enough to trigger this. */
754
755 const struct pipe_depth_stencil_alpha_state *zsa = &ctx->depth_stencil->base;
756 bool zs_enabled =
757 fs->writes_depth || fs->writes_stencil ||
758 (zsa->depth.enabled && zsa->depth.func != PIPE_FUNC_ALWAYS) ||
759 zsa->stencil[0].enabled;
760
761 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_TILEBUFFER,
762 fs->outputs_read || (!zs_enabled && fs->can_discard));
763 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_ZS, zs_enabled && fs->can_discard);
764 }
765
766 panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
767 panfrost_frag_meta_zsa_update(ctx, fragmeta);
768 panfrost_frag_meta_blend_update(ctx, fragmeta, rts, blend);
769 }
770
771 void
772 panfrost_emit_shader_meta(struct panfrost_batch *batch,
773 enum pipe_shader_type st,
774 struct mali_vertex_tiler_postfix *postfix)
775 {
776 struct panfrost_context *ctx = batch->ctx;
777 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
778
779 if (!ss) {
780 postfix->shader = 0;
781 return;
782 }
783
784 struct mali_shader_meta meta;
785
786 panfrost_shader_meta_init(ctx, st, &meta);
787
788 /* Add the shader BO to the batch. */
789 panfrost_batch_add_bo(batch, ss->bo,
790 PAN_BO_ACCESS_PRIVATE |
791 PAN_BO_ACCESS_READ |
792 panfrost_bo_access_for_stage(st));
793
794 mali_ptr shader_ptr;
795
796 if (st == PIPE_SHADER_FRAGMENT) {
797 struct panfrost_device *dev = pan_device(ctx->base.screen);
798 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
799 size_t desc_size = sizeof(meta);
800 void *rts = NULL;
801 struct panfrost_transfer xfer;
802 unsigned rt_size;
803
804 if (dev->quirks & MIDGARD_SFBD)
805 rt_size = 0;
806 else if (dev->quirks & IS_BIFROST)
807 rt_size = sizeof(struct bifrost_blend_rt);
808 else
809 rt_size = sizeof(struct midgard_blend_rt);
810
811 desc_size += rt_size * rt_count;
812
813 if (rt_size)
814 rts = rzalloc_size(ctx, rt_size * rt_count);
815
816 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
817
818 for (unsigned c = 0; c < ctx->pipe_framebuffer.nr_cbufs; ++c)
819 blend[c] = panfrost_get_blend_for_context(ctx, c);
820
821 panfrost_frag_shader_meta_init(ctx, &meta, rts, blend);
822
823 xfer = panfrost_pool_alloc_aligned(&batch->pool, desc_size, sizeof(meta));
824
825 memcpy(xfer.cpu, &meta, sizeof(meta));
826 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
827
828 if (rt_size)
829 ralloc_free(rts);
830
831 shader_ptr = xfer.gpu;
832 } else {
833 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
834 sizeof(meta));
835 }
836
837 postfix->shader = shader_ptr;
838 }
839
840 void
841 panfrost_emit_viewport(struct panfrost_batch *batch,
842 struct mali_vertex_tiler_postfix *tiler_postfix)
843 {
844 struct panfrost_context *ctx = batch->ctx;
845 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
846 const struct pipe_scissor_state *ss = &ctx->scissor;
847 const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
848 const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
849
850 /* Derive min/max from translate/scale. Note since |x| >= 0 by
851 * definition, we have that -|x| <= |x| hence translate - |scale| <=
852 * translate + |scale|, so the ordering is correct here. */
853 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
854 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
855 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
856 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
857 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
858 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
859
860 /* Scissor to the intersection of viewport and to the scissor, clamped
861 * to the framebuffer */
862
863 unsigned minx = MIN2(fb->width, vp_minx);
864 unsigned maxx = MIN2(fb->width, vp_maxx);
865 unsigned miny = MIN2(fb->height, vp_miny);
866 unsigned maxy = MIN2(fb->height, vp_maxy);
867
868 if (ss && rast->scissor) {
869 minx = MAX2(ss->minx, minx);
870 miny = MAX2(ss->miny, miny);
871 maxx = MIN2(ss->maxx, maxx);
872 maxy = MIN2(ss->maxy, maxy);
873 }
874
875 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
876
877 pan_pack(T.cpu, VIEWPORT, cfg) {
878 cfg.scissor_minimum_x = minx;
879 cfg.scissor_minimum_y = miny;
880 cfg.scissor_maximum_x = maxx - 1;
881 cfg.scissor_maximum_y = maxy - 1;
882
883 cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
884 cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
885 }
886
887 tiler_postfix->viewport = T.gpu;
888 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
889 }
890
891 static mali_ptr
892 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
893 enum pipe_shader_type st,
894 struct panfrost_constant_buffer *buf,
895 unsigned index)
896 {
897 struct pipe_constant_buffer *cb = &buf->cb[index];
898 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
899
900 if (rsrc) {
901 panfrost_batch_add_bo(batch, rsrc->bo,
902 PAN_BO_ACCESS_SHARED |
903 PAN_BO_ACCESS_READ |
904 panfrost_bo_access_for_stage(st));
905
906 /* Alignment gauranteed by
907 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
908 return rsrc->bo->gpu + cb->buffer_offset;
909 } else if (cb->user_buffer) {
910 return panfrost_pool_upload_aligned(&batch->pool,
911 cb->user_buffer +
912 cb->buffer_offset,
913 cb->buffer_size, 16);
914 } else {
915 unreachable("No constant buffer");
916 }
917 }
918
919 struct sysval_uniform {
920 union {
921 float f[4];
922 int32_t i[4];
923 uint32_t u[4];
924 uint64_t du[2];
925 };
926 };
927
928 static void
929 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
930 struct sysval_uniform *uniform)
931 {
932 struct panfrost_context *ctx = batch->ctx;
933 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
934
935 uniform->f[0] = vp->scale[0];
936 uniform->f[1] = vp->scale[1];
937 uniform->f[2] = vp->scale[2];
938 }
939
940 static void
941 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
942 struct sysval_uniform *uniform)
943 {
944 struct panfrost_context *ctx = batch->ctx;
945 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
946
947 uniform->f[0] = vp->translate[0];
948 uniform->f[1] = vp->translate[1];
949 uniform->f[2] = vp->translate[2];
950 }
951
952 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
953 enum pipe_shader_type st,
954 unsigned int sysvalid,
955 struct sysval_uniform *uniform)
956 {
957 struct panfrost_context *ctx = batch->ctx;
958 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
959 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
960 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
961 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
962
963 assert(dim);
964 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
965
966 if (dim > 1)
967 uniform->i[1] = u_minify(tex->texture->height0,
968 tex->u.tex.first_level);
969
970 if (dim > 2)
971 uniform->i[2] = u_minify(tex->texture->depth0,
972 tex->u.tex.first_level);
973
974 if (is_array)
975 uniform->i[dim] = tex->texture->array_size;
976 }
977
978 static void
979 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
980 enum pipe_shader_type st,
981 unsigned ssbo_id,
982 struct sysval_uniform *uniform)
983 {
984 struct panfrost_context *ctx = batch->ctx;
985
986 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
987 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
988
989 /* Compute address */
990 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
991
992 panfrost_batch_add_bo(batch, bo,
993 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
994 panfrost_bo_access_for_stage(st));
995
996 /* Upload address and size as sysval */
997 uniform->du[0] = bo->gpu + sb.buffer_offset;
998 uniform->u[2] = sb.buffer_size;
999 }
1000
1001 static void
1002 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
1003 enum pipe_shader_type st,
1004 unsigned samp_idx,
1005 struct sysval_uniform *uniform)
1006 {
1007 struct panfrost_context *ctx = batch->ctx;
1008 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
1009
1010 uniform->f[0] = sampl->min_lod;
1011 uniform->f[1] = sampl->max_lod;
1012 uniform->f[2] = sampl->lod_bias;
1013
1014 /* Even without any errata, Midgard represents "no mipmapping" as
1015 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1016 * panfrost_create_sampler_state which also explains our choice of
1017 * epsilon value (again to keep behaviour consistent) */
1018
1019 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1020 uniform->f[1] = uniform->f[0] + (1.0/256.0);
1021 }
1022
1023 static void
1024 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
1025 struct sysval_uniform *uniform)
1026 {
1027 struct panfrost_context *ctx = batch->ctx;
1028
1029 uniform->u[0] = ctx->compute_grid->grid[0];
1030 uniform->u[1] = ctx->compute_grid->grid[1];
1031 uniform->u[2] = ctx->compute_grid->grid[2];
1032 }
1033
1034 static void
1035 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1036 struct panfrost_shader_state *ss,
1037 enum pipe_shader_type st)
1038 {
1039 struct sysval_uniform *uniforms = (void *)buf;
1040
1041 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1042 int sysval = ss->sysval[i];
1043
1044 switch (PAN_SYSVAL_TYPE(sysval)) {
1045 case PAN_SYSVAL_VIEWPORT_SCALE:
1046 panfrost_upload_viewport_scale_sysval(batch,
1047 &uniforms[i]);
1048 break;
1049 case PAN_SYSVAL_VIEWPORT_OFFSET:
1050 panfrost_upload_viewport_offset_sysval(batch,
1051 &uniforms[i]);
1052 break;
1053 case PAN_SYSVAL_TEXTURE_SIZE:
1054 panfrost_upload_txs_sysval(batch, st,
1055 PAN_SYSVAL_ID(sysval),
1056 &uniforms[i]);
1057 break;
1058 case PAN_SYSVAL_SSBO:
1059 panfrost_upload_ssbo_sysval(batch, st,
1060 PAN_SYSVAL_ID(sysval),
1061 &uniforms[i]);
1062 break;
1063 case PAN_SYSVAL_NUM_WORK_GROUPS:
1064 panfrost_upload_num_work_groups_sysval(batch,
1065 &uniforms[i]);
1066 break;
1067 case PAN_SYSVAL_SAMPLER:
1068 panfrost_upload_sampler_sysval(batch, st,
1069 PAN_SYSVAL_ID(sysval),
1070 &uniforms[i]);
1071 break;
1072 default:
1073 assert(0);
1074 }
1075 }
1076 }
1077
1078 static const void *
1079 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1080 unsigned index)
1081 {
1082 struct pipe_constant_buffer *cb = &buf->cb[index];
1083 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1084
1085 if (rsrc)
1086 return rsrc->bo->cpu;
1087 else if (cb->user_buffer)
1088 return cb->user_buffer;
1089 else
1090 unreachable("No constant buffer");
1091 }
1092
1093 void
1094 panfrost_emit_const_buf(struct panfrost_batch *batch,
1095 enum pipe_shader_type stage,
1096 struct mali_vertex_tiler_postfix *postfix)
1097 {
1098 struct panfrost_context *ctx = batch->ctx;
1099 struct panfrost_shader_variants *all = ctx->shader[stage];
1100
1101 if (!all)
1102 return;
1103
1104 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1105
1106 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1107
1108 /* Uniforms are implicitly UBO #0 */
1109 bool has_uniforms = buf->enabled_mask & (1 << 0);
1110
1111 /* Allocate room for the sysval and the uniforms */
1112 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1113 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1114 size_t size = sys_size + uniform_size;
1115 struct panfrost_transfer transfer =
1116 panfrost_pool_alloc_aligned(&batch->pool, size, 16);
1117
1118 /* Upload sysvals requested by the shader */
1119 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1120
1121 /* Upload uniforms */
1122 if (has_uniforms && uniform_size) {
1123 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1124 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1125 }
1126
1127 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1128 * uploaded */
1129
1130 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1131 assert(ubo_count >= 1);
1132
1133 size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
1134 struct panfrost_transfer ubos =
1135 panfrost_pool_alloc_aligned(&batch->pool, sz,
1136 MALI_UNIFORM_BUFFER_LENGTH);
1137
1138 uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
1139
1140 /* Upload uniforms as a UBO */
1141
1142 if (ss->uniform_count) {
1143 pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
1144 cfg.entries = ss->uniform_count;
1145 cfg.pointer = transfer.gpu;
1146 }
1147 } else {
1148 *ubo_ptr = 0;
1149 }
1150
1151 /* The rest are honest-to-goodness UBOs */
1152
1153 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1154 size_t usz = buf->cb[ubo].buffer_size;
1155 bool enabled = buf->enabled_mask & (1 << ubo);
1156 bool empty = usz == 0;
1157
1158 if (!enabled || empty) {
1159 ubo_ptr[ubo] = 0;
1160 continue;
1161 }
1162
1163 pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
1164 cfg.entries = DIV_ROUND_UP(usz, 16);
1165 cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
1166 stage, buf, ubo);
1167 }
1168 }
1169
1170 postfix->uniforms = transfer.gpu;
1171 postfix->uniform_buffers = ubos.gpu;
1172
1173 buf->dirty_mask = 0;
1174 }
1175
1176 void
1177 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1178 const struct pipe_grid_info *info,
1179 struct midgard_payload_vertex_tiler *vtp)
1180 {
1181 struct panfrost_context *ctx = batch->ctx;
1182 struct panfrost_device *dev = pan_device(ctx->base.screen);
1183 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1184 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1185 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1186 128));
1187
1188 unsigned log2_instances =
1189 util_logbase2_ceil(info->grid[0]) +
1190 util_logbase2_ceil(info->grid[1]) +
1191 util_logbase2_ceil(info->grid[2]);
1192
1193 unsigned shared_size = single_size * (1 << log2_instances) * dev->core_count;
1194 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1195 shared_size,
1196 1);
1197
1198 struct mali_shared_memory shared = {
1199 .shared_memory = bo->gpu,
1200 .shared_workgroup_count = log2_instances,
1201 .shared_shift = util_logbase2(single_size) + 1
1202 };
1203
1204 vtp->postfix.shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared,
1205 sizeof(shared), 64);
1206 }
1207
1208 static mali_ptr
1209 panfrost_get_tex_desc(struct panfrost_batch *batch,
1210 enum pipe_shader_type st,
1211 struct panfrost_sampler_view *view)
1212 {
1213 if (!view)
1214 return (mali_ptr) 0;
1215
1216 struct pipe_sampler_view *pview = &view->base;
1217 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1218
1219 /* Add the BO to the job so it's retained until the job is done. */
1220
1221 panfrost_batch_add_bo(batch, rsrc->bo,
1222 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1223 panfrost_bo_access_for_stage(st));
1224
1225 panfrost_batch_add_bo(batch, view->bo,
1226 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1227 panfrost_bo_access_for_stage(st));
1228
1229 return view->bo->gpu;
1230 }
1231
1232 static void
1233 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1234 struct pipe_context *pctx)
1235 {
1236 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1237 if (view->texture_bo != rsrc->bo->gpu ||
1238 view->modifier != rsrc->modifier) {
1239 panfrost_bo_unreference(view->bo);
1240 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1241 }
1242 }
1243
1244 void
1245 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1246 enum pipe_shader_type stage,
1247 struct mali_vertex_tiler_postfix *postfix)
1248 {
1249 struct panfrost_context *ctx = batch->ctx;
1250 struct panfrost_device *device = pan_device(ctx->base.screen);
1251
1252 if (!ctx->sampler_view_count[stage])
1253 return;
1254
1255 if (device->quirks & IS_BIFROST) {
1256 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1257 MALI_BIFROST_TEXTURE_LENGTH *
1258 ctx->sampler_view_count[stage],
1259 MALI_BIFROST_TEXTURE_LENGTH);
1260
1261 struct mali_bifrost_texture_packed *out =
1262 (struct mali_bifrost_texture_packed *) T.cpu;
1263
1264 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1265 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1266 struct pipe_sampler_view *pview = &view->base;
1267 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1268
1269 panfrost_update_sampler_view(view, &ctx->base);
1270 out[i] = view->bifrost_descriptor;
1271
1272 /* Add the BOs to the job so they are retained until the job is done. */
1273
1274 panfrost_batch_add_bo(batch, rsrc->bo,
1275 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1276 panfrost_bo_access_for_stage(stage));
1277
1278 panfrost_batch_add_bo(batch, view->bo,
1279 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1280 panfrost_bo_access_for_stage(stage));
1281 }
1282
1283 postfix->textures = T.gpu;
1284 } else {
1285 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1286
1287 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1288 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1289
1290 panfrost_update_sampler_view(view, &ctx->base);
1291
1292 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1293 }
1294
1295 postfix->textures = panfrost_pool_upload_aligned(&batch->pool,
1296 trampolines,
1297 sizeof(uint64_t) *
1298 ctx->sampler_view_count[stage],
1299 sizeof(uint64_t));
1300 }
1301 }
1302
1303 void
1304 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1305 enum pipe_shader_type stage,
1306 struct mali_vertex_tiler_postfix *postfix)
1307 {
1308 struct panfrost_context *ctx = batch->ctx;
1309
1310 if (!ctx->sampler_count[stage])
1311 return;
1312
1313 size_t desc_size = MALI_BIFROST_SAMPLER_LENGTH;
1314 assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
1315
1316 size_t sz = desc_size * ctx->sampler_count[stage];
1317 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool, sz, desc_size);
1318 struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
1319
1320 for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
1321 out[i] = ctx->samplers[stage][i]->hw;
1322
1323 postfix->sampler_descriptor = T.gpu;
1324 }
1325
1326 void
1327 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1328 struct mali_vertex_tiler_postfix *vertex_postfix)
1329 {
1330 struct panfrost_context *ctx = batch->ctx;
1331 struct panfrost_vertex_state *so = ctx->vertex;
1332 struct panfrost_shader_state *vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1333
1334 unsigned instance_shift = vertex_postfix->instance_shift;
1335 unsigned instance_odd = vertex_postfix->instance_odd;
1336
1337 /* Worst case: everything is NPOT, which is only possible if instancing
1338 * is enabled. Otherwise single record is gauranteed */
1339 bool could_npot = instance_shift || instance_odd;
1340
1341 struct panfrost_transfer S = panfrost_pool_alloc_aligned(&batch->pool,
1342 MALI_ATTRIBUTE_BUFFER_LENGTH * vs->attribute_count *
1343 (could_npot ? 2 : 1),
1344 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
1345
1346 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1347 MALI_ATTRIBUTE_LENGTH * vs->attribute_count,
1348 MALI_ATTRIBUTE_LENGTH);
1349
1350 struct mali_attribute_buffer_packed *bufs =
1351 (struct mali_attribute_buffer_packed *) S.cpu;
1352
1353 struct mali_attribute_packed *out =
1354 (struct mali_attribute_packed *) T.cpu;
1355
1356 unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
1357 unsigned k = 0;
1358
1359 for (unsigned i = 0; i < so->num_elements; ++i) {
1360 /* We map buffers 1:1 with the attributes, which
1361 * means duplicating some vertex buffers (who cares? aside from
1362 * maybe some caching implications but I somehow doubt that
1363 * matters) */
1364
1365 struct pipe_vertex_element *elem = &so->pipe[i];
1366 unsigned vbi = elem->vertex_buffer_index;
1367 attrib_to_buffer[i] = k;
1368
1369 if (!(ctx->vb_mask & (1 << vbi)))
1370 continue;
1371
1372 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1373 struct panfrost_resource *rsrc;
1374
1375 rsrc = pan_resource(buf->buffer.resource);
1376 if (!rsrc)
1377 continue;
1378
1379 /* Add a dependency of the batch on the vertex buffer */
1380 panfrost_batch_add_bo(batch, rsrc->bo,
1381 PAN_BO_ACCESS_SHARED |
1382 PAN_BO_ACCESS_READ |
1383 PAN_BO_ACCESS_VERTEX_TILER);
1384
1385 /* Mask off lower bits, see offset fixup below */
1386 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1387 mali_ptr addr = raw_addr & ~63;
1388
1389 /* Since we advanced the base pointer, we shrink the buffer
1390 * size, but add the offset we subtracted */
1391 unsigned size = rsrc->base.width0 + (raw_addr - addr)
1392 - buf->buffer_offset;
1393
1394 /* When there is a divisor, the hardware-level divisor is
1395 * the product of the instance divisor and the padded count */
1396 unsigned divisor = elem->instance_divisor;
1397 unsigned hw_divisor = ctx->padded_count * divisor;
1398 unsigned stride = buf->stride;
1399
1400 /* If there's a divisor(=1) but no instancing, we want every
1401 * attribute to be the same */
1402
1403 if (divisor && ctx->instance_count == 1)
1404 stride = 0;
1405
1406 if (!divisor || ctx->instance_count <= 1) {
1407 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1408 if (ctx->instance_count > 1)
1409 cfg.type = MALI_ATTRIBUTE_TYPE_1D_MODULUS;
1410
1411 cfg.pointer = addr;
1412 cfg.stride = stride;
1413 cfg.size = size;
1414 cfg.divisor_r = instance_shift;
1415 cfg.divisor_p = instance_odd;
1416 }
1417 } else if (util_is_power_of_two_or_zero(hw_divisor)) {
1418 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1419 cfg.type = MALI_ATTRIBUTE_TYPE_1D_POT_DIVISOR;
1420 cfg.pointer = addr;
1421 cfg.stride = stride;
1422 cfg.size = size;
1423 cfg.divisor_r = __builtin_ctz(hw_divisor);
1424 }
1425
1426 } else {
1427 unsigned shift = 0, extra_flags = 0;
1428
1429 unsigned magic_divisor =
1430 panfrost_compute_magic_divisor(hw_divisor, &shift, &extra_flags);
1431
1432 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1433 cfg.type = MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR;
1434 cfg.pointer = addr;
1435 cfg.stride = stride;
1436 cfg.size = size;
1437
1438 cfg.divisor_r = shift;
1439 cfg.divisor_e = extra_flags;
1440 }
1441
1442 pan_pack(bufs + k + 1, ATTRIBUTE_BUFFER_CONTINUATION_NPOT, cfg) {
1443 cfg.divisor_numerator = magic_divisor;
1444 cfg.divisor = divisor;
1445 }
1446
1447 ++k;
1448 }
1449
1450 ++k;
1451 }
1452
1453 /* Add special gl_VertexID/gl_InstanceID buffers */
1454
1455 if (unlikely(vs->attribute_count >= PAN_VERTEX_ID)) {
1456 panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1457
1458 pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
1459 cfg.buffer_index = k++;
1460 cfg.format = so->formats[PAN_VERTEX_ID];
1461 }
1462
1463 panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1464
1465 pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
1466 cfg.buffer_index = k++;
1467 cfg.format = so->formats[PAN_INSTANCE_ID];
1468 }
1469 }
1470
1471 /* Attribute addresses require 64-byte alignment, so let:
1472 *
1473 * base' = base & ~63 = base - (base & 63)
1474 * offset' = offset + (base & 63)
1475 *
1476 * Since base' + offset' = base + offset, these are equivalent
1477 * addressing modes and now base is 64 aligned.
1478 */
1479
1480 unsigned start = vertex_postfix->offset_start;
1481
1482 for (unsigned i = 0; i < so->num_elements; ++i) {
1483 unsigned vbi = so->pipe[i].vertex_buffer_index;
1484 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1485
1486 /* Adjust by the masked off bits of the offset. Make sure we
1487 * read src_offset from so->hw (which is not GPU visible)
1488 * rather than target (which is) due to caching effects */
1489
1490 unsigned src_offset = so->pipe[i].src_offset;
1491
1492 /* BOs aligned to 4k so guaranteed aligned to 64 */
1493 src_offset += (buf->buffer_offset & 63);
1494
1495 /* Also, somewhat obscurely per-instance data needs to be
1496 * offset in response to a delayed start in an indexed draw */
1497
1498 if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
1499 src_offset -= buf->stride * start;
1500
1501 pan_pack(out + i, ATTRIBUTE, cfg) {
1502 cfg.buffer_index = attrib_to_buffer[i];
1503 cfg.format = so->formats[i];
1504 cfg.offset = src_offset;
1505 }
1506 }
1507
1508 vertex_postfix->attributes = S.gpu;
1509 vertex_postfix->attribute_meta = T.gpu;
1510 }
1511
1512 static mali_ptr
1513 panfrost_emit_varyings(struct panfrost_batch *batch,
1514 struct mali_attribute_buffer_packed *slot,
1515 unsigned stride, unsigned count)
1516 {
1517 unsigned size = stride * count;
1518 mali_ptr ptr = panfrost_pool_alloc_aligned(&batch->invisible_pool, size, 64).gpu;
1519
1520 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1521 cfg.stride = stride;
1522 cfg.size = size;
1523 cfg.pointer = ptr;
1524 }
1525
1526 return ptr;
1527 }
1528
1529 static unsigned
1530 panfrost_streamout_offset(unsigned stride, unsigned offset,
1531 struct pipe_stream_output_target *target)
1532 {
1533 return (target->buffer_offset + (offset * stride * 4)) & 63;
1534 }
1535
1536 static void
1537 panfrost_emit_streamout(struct panfrost_batch *batch,
1538 struct mali_attribute_buffer_packed *slot,
1539 unsigned stride_words, unsigned offset, unsigned count,
1540 struct pipe_stream_output_target *target)
1541 {
1542 unsigned stride = stride_words * 4;
1543 unsigned max_size = target->buffer_size;
1544 unsigned expected_size = stride * count;
1545
1546 /* Grab the BO and bind it to the batch */
1547 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1548
1549 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1550 * the perspective of the TILER and FRAGMENT.
1551 */
1552 panfrost_batch_add_bo(batch, bo,
1553 PAN_BO_ACCESS_SHARED |
1554 PAN_BO_ACCESS_RW |
1555 PAN_BO_ACCESS_VERTEX_TILER |
1556 PAN_BO_ACCESS_FRAGMENT);
1557
1558 /* We will have an offset applied to get alignment */
1559 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * stride);
1560
1561 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1562 cfg.pointer = (addr & ~63);
1563 cfg.stride = stride;
1564 cfg.size = MIN2(max_size, expected_size) + (addr & 63);
1565 }
1566 }
1567
1568 static bool
1569 has_point_coord(unsigned mask, gl_varying_slot loc)
1570 {
1571 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1572 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1573 else if (loc == VARYING_SLOT_PNTC)
1574 return (mask & (1 << 8));
1575 else
1576 return false;
1577 }
1578
1579 /* Helpers for manipulating stream out information so we can pack varyings
1580 * accordingly. Compute the src_offset for a given captured varying */
1581
1582 static struct pipe_stream_output *
1583 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1584 {
1585 for (unsigned i = 0; i < info->num_outputs; ++i) {
1586 if (info->output[i].register_index == loc)
1587 return &info->output[i];
1588 }
1589
1590 unreachable("Varying not captured");
1591 }
1592
1593 static unsigned
1594 pan_varying_size(enum mali_format fmt)
1595 {
1596 unsigned type = MALI_EXTRACT_TYPE(fmt);
1597 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1598 unsigned bits = MALI_EXTRACT_BITS(fmt);
1599 unsigned bpc = 0;
1600
1601 if (bits == MALI_CHANNEL_FLOAT) {
1602 /* No doubles */
1603 bool fp16 = (type == MALI_FORMAT_SINT);
1604 assert(fp16 || (type == MALI_FORMAT_UNORM));
1605
1606 bpc = fp16 ? 2 : 4;
1607 } else {
1608 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1609
1610 /* See the enums */
1611 bits = 1 << bits;
1612 assert(bits >= 8);
1613 bpc = bits / 8;
1614 }
1615
1616 return bpc * chan;
1617 }
1618
1619 /* Indices for named (non-XFB) varyings that are present. These are packed
1620 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1621 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1622 * of a given special field given a shift S by:
1623 *
1624 * idx = popcount(P & ((1 << S) - 1))
1625 *
1626 * That is... look at all of the varyings that come earlier and count them, the
1627 * count is the new index since plus one. Likewise, the total number of special
1628 * buffers required is simply popcount(P)
1629 */
1630
1631 enum pan_special_varying {
1632 PAN_VARY_GENERAL = 0,
1633 PAN_VARY_POSITION = 1,
1634 PAN_VARY_PSIZ = 2,
1635 PAN_VARY_PNTCOORD = 3,
1636 PAN_VARY_FACE = 4,
1637 PAN_VARY_FRAGCOORD = 5,
1638
1639 /* Keep last */
1640 PAN_VARY_MAX,
1641 };
1642
1643 /* Given a varying, figure out which index it correpsonds to */
1644
1645 static inline unsigned
1646 pan_varying_index(unsigned present, enum pan_special_varying v)
1647 {
1648 unsigned mask = (1 << v) - 1;
1649 return util_bitcount(present & mask);
1650 }
1651
1652 /* Get the base offset for XFB buffers, which by convention come after
1653 * everything else. Wrapper function for semantic reasons; by construction this
1654 * is just popcount. */
1655
1656 static inline unsigned
1657 pan_xfb_base(unsigned present)
1658 {
1659 return util_bitcount(present);
1660 }
1661
1662 /* Computes the present mask for varyings so we can start emitting varying records */
1663
1664 static inline unsigned
1665 pan_varying_present(
1666 struct panfrost_shader_state *vs,
1667 struct panfrost_shader_state *fs,
1668 unsigned quirks)
1669 {
1670 /* At the moment we always emit general and position buffers. Not
1671 * strictly necessary but usually harmless */
1672
1673 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1674
1675 /* Enable special buffers by the shader info */
1676
1677 if (vs->writes_point_size)
1678 present |= (1 << PAN_VARY_PSIZ);
1679
1680 if (fs->reads_point_coord)
1681 present |= (1 << PAN_VARY_PNTCOORD);
1682
1683 if (fs->reads_face)
1684 present |= (1 << PAN_VARY_FACE);
1685
1686 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1687 present |= (1 << PAN_VARY_FRAGCOORD);
1688
1689 /* Also, if we have a point sprite, we need a point coord buffer */
1690
1691 for (unsigned i = 0; i < fs->varying_count; i++) {
1692 gl_varying_slot loc = fs->varyings_loc[i];
1693
1694 if (has_point_coord(fs->point_sprite_mask, loc))
1695 present |= (1 << PAN_VARY_PNTCOORD);
1696 }
1697
1698 return present;
1699 }
1700
1701 /* Emitters for varying records */
1702
1703 static void
1704 pan_emit_vary(struct mali_attribute_packed *out,
1705 unsigned present, enum pan_special_varying buf,
1706 unsigned quirks, enum mali_format format,
1707 unsigned offset)
1708 {
1709 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1710 unsigned swizzle = quirks & HAS_SWIZZLES ?
1711 panfrost_get_default_swizzle(nr_channels) :
1712 panfrost_bifrost_swizzle(nr_channels);
1713
1714 pan_pack(out, ATTRIBUTE, cfg) {
1715 cfg.buffer_index = pan_varying_index(present, buf);
1716 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1717 cfg.format = (format << 12) | swizzle;
1718 cfg.offset = offset;
1719 }
1720 }
1721
1722 /* General varying that is unused */
1723
1724 static void
1725 pan_emit_vary_only(struct mali_attribute_packed *out,
1726 unsigned present, unsigned quirks)
1727 {
1728 pan_emit_vary(out, present, 0, quirks, MALI_VARYING_DISCARD, 0);
1729 }
1730
1731 /* Special records */
1732
1733 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1734 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1735 [PAN_VARY_PSIZ] = MALI_R16F,
1736 [PAN_VARY_PNTCOORD] = MALI_R16F,
1737 [PAN_VARY_FACE] = MALI_R32I,
1738 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1739 };
1740
1741 static void
1742 pan_emit_vary_special(struct mali_attribute_packed *out,
1743 unsigned present, enum pan_special_varying buf,
1744 unsigned quirks)
1745 {
1746 assert(buf < PAN_VARY_MAX);
1747 pan_emit_vary(out, present, buf, quirks, pan_varying_formats[buf], 0);
1748 }
1749
1750 static enum mali_format
1751 pan_xfb_format(enum mali_format format, unsigned nr)
1752 {
1753 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1754 return MALI_R32F | MALI_NR_CHANNELS(nr);
1755 else
1756 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1757 }
1758
1759 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1760 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1761 * value. */
1762
1763 static void
1764 pan_emit_vary_xfb(struct mali_attribute_packed *out,
1765 unsigned present,
1766 unsigned max_xfb,
1767 unsigned *streamout_offsets,
1768 unsigned quirks,
1769 enum mali_format format,
1770 struct pipe_stream_output o)
1771 {
1772 unsigned swizzle = quirks & HAS_SWIZZLES ?
1773 panfrost_get_default_swizzle(o.num_components) :
1774 panfrost_bifrost_swizzle(o.num_components);
1775
1776 pan_pack(out, ATTRIBUTE, cfg) {
1777 /* XFB buffers come after everything else */
1778 cfg.buffer_index = pan_xfb_base(present) + o.output_buffer;
1779 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1780
1781 /* Override number of channels and precision to highp */
1782 cfg.format = (pan_xfb_format(format, o.num_components) << 12) | swizzle;
1783
1784 /* Apply given offsets together */
1785 cfg.offset = (o.dst_offset * 4) /* dwords */
1786 + streamout_offsets[o.output_buffer];
1787 }
1788 }
1789
1790 /* Determine if we should capture a varying for XFB. This requires actually
1791 * having a buffer for it. If we don't capture it, we'll fallback to a general
1792 * varying path (linked or unlinked, possibly discarding the write) */
1793
1794 static bool
1795 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1796 unsigned loc, unsigned max_xfb)
1797 {
1798 if (!(xfb->so_mask & (1ll << loc)))
1799 return false;
1800
1801 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1802 return o->output_buffer < max_xfb;
1803 }
1804
1805 static void
1806 pan_emit_general_varying(struct mali_attribute_packed *out,
1807 struct panfrost_shader_state *other,
1808 struct panfrost_shader_state *xfb,
1809 gl_varying_slot loc,
1810 enum mali_format format,
1811 unsigned present,
1812 unsigned quirks,
1813 unsigned *gen_offsets,
1814 enum mali_format *gen_formats,
1815 unsigned *gen_stride,
1816 unsigned idx,
1817 bool should_alloc)
1818 {
1819 /* Check if we're linked */
1820 signed other_idx = -1;
1821
1822 for (unsigned j = 0; j < other->varying_count; ++j) {
1823 if (other->varyings_loc[j] == loc) {
1824 other_idx = j;
1825 break;
1826 }
1827 }
1828
1829 if (other_idx < 0) {
1830 pan_emit_vary_only(out, present, quirks);
1831 return;
1832 }
1833
1834 unsigned offset = gen_offsets[other_idx];
1835
1836 if (should_alloc) {
1837 /* We're linked, so allocate a space via a watermark allocation */
1838 enum mali_format alt = other->varyings[other_idx];
1839
1840 /* Do interpolation at minimum precision */
1841 unsigned size_main = pan_varying_size(format);
1842 unsigned size_alt = pan_varying_size(alt);
1843 unsigned size = MIN2(size_main, size_alt);
1844
1845 /* If a varying is marked for XFB but not actually captured, we
1846 * should match the format to the format that would otherwise
1847 * be used for XFB, since dEQP checks for invariance here. It's
1848 * unclear if this is required by the spec. */
1849
1850 if (xfb->so_mask & (1ull << loc)) {
1851 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1852 format = pan_xfb_format(format, o->num_components);
1853 size = pan_varying_size(format);
1854 } else if (size == size_alt) {
1855 format = alt;
1856 }
1857
1858 gen_offsets[idx] = *gen_stride;
1859 gen_formats[other_idx] = format;
1860 offset = *gen_stride;
1861 *gen_stride += size;
1862 }
1863
1864 pan_emit_vary(out, present, PAN_VARY_GENERAL, quirks, format, offset);
1865 }
1866
1867 /* Higher-level wrapper around all of the above, classifying a varying into one
1868 * of the above types */
1869
1870 static void
1871 panfrost_emit_varying(
1872 struct mali_attribute_packed *out,
1873 struct panfrost_shader_state *stage,
1874 struct panfrost_shader_state *other,
1875 struct panfrost_shader_state *xfb,
1876 unsigned present,
1877 unsigned max_xfb,
1878 unsigned *streamout_offsets,
1879 unsigned quirks,
1880 unsigned *gen_offsets,
1881 enum mali_format *gen_formats,
1882 unsigned *gen_stride,
1883 unsigned idx,
1884 bool should_alloc,
1885 bool is_fragment)
1886 {
1887 gl_varying_slot loc = stage->varyings_loc[idx];
1888 enum mali_format format = stage->varyings[idx];
1889
1890 /* Override format to match linkage */
1891 if (!should_alloc && gen_formats[idx])
1892 format = gen_formats[idx];
1893
1894 if (has_point_coord(stage->point_sprite_mask, loc)) {
1895 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1896 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1897 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1898 pan_emit_vary_xfb(out, present, max_xfb, streamout_offsets, quirks, format, *o);
1899 } else if (loc == VARYING_SLOT_POS) {
1900 if (is_fragment)
1901 pan_emit_vary_special(out, present, PAN_VARY_FRAGCOORD, quirks);
1902 else
1903 pan_emit_vary_special(out, present, PAN_VARY_POSITION, quirks);
1904 } else if (loc == VARYING_SLOT_PSIZ) {
1905 pan_emit_vary_special(out, present, PAN_VARY_PSIZ, quirks);
1906 } else if (loc == VARYING_SLOT_PNTC) {
1907 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1908 } else if (loc == VARYING_SLOT_FACE) {
1909 pan_emit_vary_special(out, present, PAN_VARY_FACE, quirks);
1910 } else {
1911 pan_emit_general_varying(out, other, xfb, loc, format, present,
1912 quirks, gen_offsets, gen_formats, gen_stride,
1913 idx, should_alloc);
1914 }
1915 }
1916
1917 static void
1918 pan_emit_special_input(struct mali_attribute_buffer_packed *out,
1919 unsigned present,
1920 enum pan_special_varying v,
1921 unsigned special)
1922 {
1923 if (present & (1 << v)) {
1924 unsigned idx = pan_varying_index(present, v);
1925
1926 pan_pack(out + idx, ATTRIBUTE_BUFFER, cfg) {
1927 cfg.special = special;
1928 cfg.type = 0;
1929 }
1930 }
1931 }
1932
1933 void
1934 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1935 unsigned vertex_count,
1936 struct mali_vertex_tiler_postfix *vertex_postfix,
1937 struct mali_vertex_tiler_postfix *tiler_postfix,
1938 union midgard_primitive_size *primitive_size)
1939 {
1940 /* Load the shaders */
1941 struct panfrost_context *ctx = batch->ctx;
1942 struct panfrost_device *dev = pan_device(ctx->base.screen);
1943 struct panfrost_shader_state *vs, *fs;
1944 size_t vs_size, fs_size;
1945
1946 /* Allocate the varying descriptor */
1947
1948 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1949 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
1950 vs_size = MALI_ATTRIBUTE_LENGTH * vs->varying_count;
1951 fs_size = MALI_ATTRIBUTE_LENGTH * fs->varying_count;
1952
1953 struct panfrost_transfer trans = panfrost_pool_alloc_aligned(
1954 &batch->pool, vs_size + fs_size, MALI_ATTRIBUTE_LENGTH);
1955
1956 struct pipe_stream_output_info *so = &vs->stream_output;
1957 unsigned present = pan_varying_present(vs, fs, dev->quirks);
1958
1959 /* Check if this varying is linked by us. This is the case for
1960 * general-purpose, non-captured varyings. If it is, link it. If it's
1961 * not, use the provided stream out information to determine the
1962 * offset, since it was already linked for us. */
1963
1964 unsigned gen_offsets[32];
1965 enum mali_format gen_formats[32];
1966 memset(gen_offsets, 0, sizeof(gen_offsets));
1967 memset(gen_formats, 0, sizeof(gen_formats));
1968
1969 unsigned gen_stride = 0;
1970 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
1971 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
1972
1973 unsigned streamout_offsets[32];
1974
1975 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1976 streamout_offsets[i] = panfrost_streamout_offset(
1977 so->stride[i],
1978 ctx->streamout.offsets[i],
1979 ctx->streamout.targets[i]);
1980 }
1981
1982 struct mali_attribute_packed *ovs = (struct mali_attribute_packed *)trans.cpu;
1983 struct mali_attribute_packed *ofs = ovs + vs->varying_count;
1984
1985 for (unsigned i = 0; i < vs->varying_count; i++) {
1986 panfrost_emit_varying(ovs + i, vs, fs, vs, present,
1987 ctx->streamout.num_targets, streamout_offsets,
1988 dev->quirks,
1989 gen_offsets, gen_formats, &gen_stride, i, true, false);
1990 }
1991
1992 for (unsigned i = 0; i < fs->varying_count; i++) {
1993 panfrost_emit_varying(ofs + i, fs, vs, vs, present,
1994 ctx->streamout.num_targets, streamout_offsets,
1995 dev->quirks,
1996 gen_offsets, gen_formats, &gen_stride, i, false, true);
1997 }
1998
1999 unsigned xfb_base = pan_xfb_base(present);
2000 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
2001 MALI_ATTRIBUTE_BUFFER_LENGTH * (xfb_base + ctx->streamout.num_targets),
2002 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
2003 struct mali_attribute_buffer_packed *varyings =
2004 (struct mali_attribute_buffer_packed *) T.cpu;
2005
2006 /* Emit the stream out buffers */
2007
2008 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
2009 ctx->vertex_count);
2010
2011 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
2012 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
2013 so->stride[i],
2014 ctx->streamout.offsets[i],
2015 out_count,
2016 ctx->streamout.targets[i]);
2017 }
2018
2019 panfrost_emit_varyings(batch,
2020 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
2021 gen_stride, vertex_count);
2022
2023 /* fp32 vec4 gl_Position */
2024 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
2025 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
2026 sizeof(float) * 4, vertex_count);
2027
2028 if (present & (1 << PAN_VARY_PSIZ)) {
2029 primitive_size->pointer = panfrost_emit_varyings(batch,
2030 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
2031 2, vertex_count);
2032 }
2033
2034 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_ATTRIBUTE_SPECIAL_POINT_COORD);
2035 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_ATTRIBUTE_SPECIAL_FRONT_FACING);
2036 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_ATTRIBUTE_SPECIAL_FRAG_COORD);
2037
2038 vertex_postfix->varyings = T.gpu;
2039 tiler_postfix->varyings = T.gpu;
2040
2041 vertex_postfix->varying_meta = trans.gpu;
2042 tiler_postfix->varying_meta = trans.gpu + vs_size;
2043 }
2044
2045 void
2046 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
2047 struct mali_vertex_tiler_prefix *vertex_prefix,
2048 struct mali_vertex_tiler_postfix *vertex_postfix,
2049 struct mali_vertex_tiler_prefix *tiler_prefix,
2050 struct mali_vertex_tiler_postfix *tiler_postfix,
2051 union midgard_primitive_size *primitive_size)
2052 {
2053 struct panfrost_context *ctx = batch->ctx;
2054 struct panfrost_device *device = pan_device(ctx->base.screen);
2055 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
2056 struct bifrost_payload_vertex bifrost_vertex = {0,};
2057 struct bifrost_payload_tiler bifrost_tiler = {0,};
2058 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2059 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2060 void *vp, *tp;
2061 size_t vp_size, tp_size;
2062
2063 if (device->quirks & IS_BIFROST) {
2064 bifrost_vertex.prefix = *vertex_prefix;
2065 bifrost_vertex.postfix = *vertex_postfix;
2066 vp = &bifrost_vertex;
2067 vp_size = sizeof(bifrost_vertex);
2068
2069 bifrost_tiler.prefix = *tiler_prefix;
2070 bifrost_tiler.tiler.primitive_size = *primitive_size;
2071 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2072 bifrost_tiler.postfix = *tiler_postfix;
2073 tp = &bifrost_tiler;
2074 tp_size = sizeof(bifrost_tiler);
2075 } else {
2076 midgard_vertex.prefix = *vertex_prefix;
2077 midgard_vertex.postfix = *vertex_postfix;
2078 vp = &midgard_vertex;
2079 vp_size = sizeof(midgard_vertex);
2080
2081 midgard_tiler.prefix = *tiler_prefix;
2082 midgard_tiler.postfix = *tiler_postfix;
2083 midgard_tiler.primitive_size = *primitive_size;
2084 tp = &midgard_tiler;
2085 tp_size = sizeof(midgard_tiler);
2086 }
2087
2088 if (wallpapering) {
2089 /* Inject in reverse order, with "predicted" job indices.
2090 * THIS IS A HACK XXX */
2091 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
2092 batch->scoreboard.job_index + 2, tp, tp_size, true);
2093 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2094 vp, vp_size, true);
2095 return;
2096 }
2097
2098 /* If rasterizer discard is enable, only submit the vertex */
2099
2100 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2101 vp, vp_size, false);
2102
2103 if (ctx->rasterizer->base.rasterizer_discard)
2104 return;
2105
2106 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
2107 false);
2108 }
2109
2110 /* TODO: stop hardcoding this */
2111 mali_ptr
2112 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2113 {
2114 uint16_t locations[] = {
2115 128, 128,
2116 0, 256,
2117 0, 256,
2118 0, 256,
2119 0, 256,
2120 0, 256,
2121 0, 256,
2122 0, 256,
2123 0, 256,
2124 0, 256,
2125 0, 256,
2126 0, 256,
2127 0, 256,
2128 0, 256,
2129 0, 256,
2130 0, 256,
2131 0, 256,
2132 0, 256,
2133 0, 256,
2134 0, 256,
2135 0, 256,
2136 0, 256,
2137 0, 256,
2138 0, 256,
2139 0, 256,
2140 0, 256,
2141 0, 256,
2142 0, 256,
2143 0, 256,
2144 0, 256,
2145 0, 256,
2146 0, 256,
2147 128, 128,
2148 0, 0,
2149 0, 0,
2150 0, 0,
2151 0, 0,
2152 0, 0,
2153 0, 0,
2154 0, 0,
2155 0, 0,
2156 0, 0,
2157 0, 0,
2158 0, 0,
2159 0, 0,
2160 0, 0,
2161 0, 0,
2162 0, 0,
2163 };
2164
2165 return panfrost_pool_upload_aligned(&batch->pool, locations, 96 * sizeof(uint16_t), 64);
2166 }