panfrost: Reduce attribute buffer allocations
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 struct mali_shared_memory shared = {
62 .shared_workgroup_count = ~0,
63 };
64
65 if (batch->stack_size) {
66 struct panfrost_bo *stack =
67 panfrost_batch_get_scratchpad(batch, batch->stack_size,
68 dev->thread_tls_alloc,
69 dev->core_count);
70
71 shared.stack_shift = panfrost_get_stack_shift(batch->stack_size);
72 shared.scratchpad = stack->gpu;
73 }
74
75 postfix->shared_memory = panfrost_pool_upload(&batch->pool, &shared, sizeof(shared));
76 }
77
78 static void
79 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
80 struct mali_vertex_tiler_postfix *postfix)
81 {
82 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
83 postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
84 }
85
86 static void
87 panfrost_vt_update_rasterizer(struct panfrost_rasterizer *rasterizer,
88 struct mali_vertex_tiler_prefix *prefix,
89 struct mali_vertex_tiler_postfix *postfix)
90 {
91 postfix->gl_enables |= 0x7;
92 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
93 rasterizer->base.front_ccw);
94 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
95 (rasterizer->base.cull_face & PIPE_FACE_FRONT));
96 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
97 (rasterizer->base.cull_face & PIPE_FACE_BACK));
98 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
99 rasterizer->base.flatshade_first);
100 }
101
102 void
103 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
104 struct mali_vertex_tiler_prefix *prefix,
105 union midgard_primitive_size *primitive_size)
106 {
107 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
108
109 if (!panfrost_writes_point_size(ctx)) {
110 float val = (prefix->draw_mode == MALI_DRAW_MODE_POINTS) ?
111 rasterizer->base.point_size :
112 rasterizer->base.line_width;
113
114 primitive_size->constant = val;
115 }
116 }
117
118 static void
119 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
120 struct mali_vertex_tiler_postfix *postfix)
121 {
122 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
123 if (ctx->occlusion_query) {
124 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
125 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
126 PAN_BO_ACCESS_SHARED |
127 PAN_BO_ACCESS_RW |
128 PAN_BO_ACCESS_FRAGMENT);
129 } else {
130 postfix->occlusion_counter = 0;
131 }
132 }
133
134 void
135 panfrost_vt_init(struct panfrost_context *ctx,
136 enum pipe_shader_type stage,
137 struct mali_vertex_tiler_prefix *prefix,
138 struct mali_vertex_tiler_postfix *postfix)
139 {
140 struct panfrost_device *device = pan_device(ctx->base.screen);
141
142 if (!ctx->shader[stage])
143 return;
144
145 memset(prefix, 0, sizeof(*prefix));
146 memset(postfix, 0, sizeof(*postfix));
147
148 if (device->quirks & IS_BIFROST) {
149 postfix->gl_enables = 0x2;
150 panfrost_vt_emit_shared_memory(ctx, postfix);
151 } else {
152 postfix->gl_enables = 0x6;
153 panfrost_vt_attach_framebuffer(ctx, postfix);
154 }
155
156 if (stage == PIPE_SHADER_FRAGMENT) {
157 panfrost_vt_update_occlusion_query(ctx, postfix);
158 panfrost_vt_update_rasterizer(ctx->rasterizer, prefix, postfix);
159 }
160 }
161
162 static unsigned
163 panfrost_translate_index_size(unsigned size)
164 {
165 switch (size) {
166 case 1:
167 return MALI_DRAW_INDEXED_UINT8;
168
169 case 2:
170 return MALI_DRAW_INDEXED_UINT16;
171
172 case 4:
173 return MALI_DRAW_INDEXED_UINT32;
174
175 default:
176 unreachable("Invalid index size");
177 }
178 }
179
180 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
181 * good for the duration of the draw (transient), could last longer. Also get
182 * the bounds on the index buffer for the range accessed by the draw. We do
183 * these operations together because there are natural optimizations which
184 * require them to be together. */
185
186 static mali_ptr
187 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
188 const struct pipe_draw_info *info,
189 unsigned *min_index, unsigned *max_index)
190 {
191 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
192 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
193 off_t offset = info->start * info->index_size;
194 bool needs_indices = true;
195 mali_ptr out = 0;
196
197 if (info->max_index != ~0u) {
198 *min_index = info->min_index;
199 *max_index = info->max_index;
200 needs_indices = false;
201 }
202
203 if (!info->has_user_indices) {
204 /* Only resources can be directly mapped */
205 panfrost_batch_add_bo(batch, rsrc->bo,
206 PAN_BO_ACCESS_SHARED |
207 PAN_BO_ACCESS_READ |
208 PAN_BO_ACCESS_VERTEX_TILER);
209 out = rsrc->bo->gpu + offset;
210
211 /* Check the cache */
212 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
213 info->start,
214 info->count,
215 min_index,
216 max_index);
217 } else {
218 /* Otherwise, we need to upload to transient memory */
219 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
220 struct panfrost_transfer T =
221 panfrost_pool_alloc_aligned(&batch->pool,
222 info->count * info->index_size,
223 info->index_size);
224
225 memcpy(T.cpu, ibuf8 + offset, info->count * info->index_size);
226 out = T.gpu;
227 }
228
229 if (needs_indices) {
230 /* Fallback */
231 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
232
233 if (!info->has_user_indices)
234 panfrost_minmax_cache_add(rsrc->index_cache,
235 info->start, info->count,
236 *min_index, *max_index);
237 }
238
239 return out;
240 }
241
242 void
243 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
244 const struct pipe_draw_info *info,
245 enum mali_draw_mode draw_mode,
246 struct mali_vertex_tiler_postfix *vertex_postfix,
247 struct mali_vertex_tiler_prefix *tiler_prefix,
248 struct mali_vertex_tiler_postfix *tiler_postfix,
249 unsigned *vertex_count,
250 unsigned *padded_count)
251 {
252 tiler_prefix->draw_mode = draw_mode;
253
254 unsigned draw_flags = 0;
255
256 if (panfrost_writes_point_size(ctx))
257 draw_flags |= MALI_DRAW_VARYING_SIZE;
258
259 if (info->primitive_restart)
260 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
261
262 /* These doesn't make much sense */
263
264 draw_flags |= 0x3000;
265
266 if (info->index_size) {
267 unsigned min_index = 0, max_index = 0;
268
269 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
270 info,
271 &min_index,
272 &max_index);
273
274 /* Use the corresponding values */
275 *vertex_count = max_index - min_index + 1;
276 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
277 tiler_prefix->offset_bias_correction = -min_index;
278 tiler_prefix->index_count = MALI_POSITIVE(info->count);
279 draw_flags |= panfrost_translate_index_size(info->index_size);
280 } else {
281 tiler_prefix->indices = 0;
282 *vertex_count = ctx->vertex_count;
283 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
284 tiler_prefix->offset_bias_correction = 0;
285 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
286 }
287
288 tiler_prefix->unknown_draw = draw_flags;
289
290 /* Encode the padded vertex count */
291
292 if (info->instance_count > 1) {
293 *padded_count = panfrost_padded_vertex_count(*vertex_count);
294
295 unsigned shift = __builtin_ctz(ctx->padded_count);
296 unsigned k = ctx->padded_count >> (shift + 1);
297
298 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
299 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
300 } else {
301 *padded_count = *vertex_count;
302
303 /* Reset instancing state */
304 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
305 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
306 }
307 }
308
309 static void
310 panfrost_shader_meta_init(struct panfrost_context *ctx,
311 enum pipe_shader_type st,
312 struct mali_shader_meta *meta)
313 {
314 const struct panfrost_device *dev = pan_device(ctx->base.screen);
315 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
316
317 memset(meta, 0, sizeof(*meta));
318 meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
319 meta->attribute_count = ss->attribute_count;
320 meta->varying_count = ss->varying_count;
321 meta->texture_count = ctx->sampler_view_count[st];
322 meta->sampler_count = ctx->sampler_count[st];
323
324 if (dev->quirks & IS_BIFROST) {
325 if (st == PIPE_SHADER_VERTEX)
326 meta->bifrost1.unk1 = 0x800000;
327 else {
328 /* First clause ATEST |= 0x4000000.
329 * Less than 32 regs |= 0x200 */
330 meta->bifrost1.unk1 = 0x950020;
331 }
332
333 meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
334 if (st == PIPE_SHADER_VERTEX)
335 meta->bifrost2.preload_regs = 0xC0;
336 else {
337 meta->bifrost2.preload_regs = 0x1;
338 SET_BIT(meta->bifrost2.preload_regs, 0x10, ss->reads_frag_coord);
339 }
340
341 meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
342 ss->uniform_cutoff);
343 } else {
344 meta->midgard1.uniform_count = MIN2(ss->uniform_count,
345 ss->uniform_cutoff);
346 meta->midgard1.work_count = ss->work_reg_count;
347
348 /* TODO: This is not conformant on ES3 */
349 meta->midgard1.flags_hi = MALI_SUPPRESS_INF_NAN;
350
351 meta->midgard1.flags_lo = 0x20;
352 meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
353
354 SET_BIT(meta->midgard1.flags_lo, MALI_WRITES_GLOBAL, ss->writes_global);
355 }
356 }
357
358 static unsigned
359 translate_tex_wrap(enum pipe_tex_wrap w)
360 {
361 switch (w) {
362 case PIPE_TEX_WRAP_REPEAT: return MALI_WRAP_MODE_REPEAT;
363 case PIPE_TEX_WRAP_CLAMP: return MALI_WRAP_MODE_CLAMP;
364 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return MALI_WRAP_MODE_CLAMP_TO_EDGE;
365 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return MALI_WRAP_MODE_CLAMP_TO_BORDER;
366 case PIPE_TEX_WRAP_MIRROR_REPEAT: return MALI_WRAP_MODE_MIRRORED_REPEAT;
367 case PIPE_TEX_WRAP_MIRROR_CLAMP: return MALI_WRAP_MODE_MIRRORED_CLAMP;
368 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
369 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
370 default: unreachable("Invalid wrap");
371 }
372 }
373
374 /* The hardware compares in the wrong order order, so we have to flip before
375 * encoding. Yes, really. */
376
377 static enum mali_func
378 panfrost_sampler_compare_func(const struct pipe_sampler_state *cso)
379 {
380 if (!cso->compare_mode)
381 return MALI_FUNC_NEVER;
382
383 enum mali_func f = panfrost_translate_compare_func(cso->compare_func);
384 return panfrost_flip_compare_func(f);
385 }
386
387 static enum mali_mipmap_mode
388 pan_pipe_to_mipmode(enum pipe_tex_mipfilter f)
389 {
390 switch (f) {
391 case PIPE_TEX_MIPFILTER_NEAREST: return MALI_MIPMAP_MODE_NEAREST;
392 case PIPE_TEX_MIPFILTER_LINEAR: return MALI_MIPMAP_MODE_TRILINEAR;
393 case PIPE_TEX_MIPFILTER_NONE: return MALI_MIPMAP_MODE_NONE;
394 default: unreachable("Invalid");
395 }
396 }
397
398 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
399 struct mali_midgard_sampler_packed *hw)
400 {
401 pan_pack(hw, MIDGARD_SAMPLER, cfg) {
402 cfg.magnify_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
403 cfg.minify_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
404 cfg.mipmap_mode = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR) ?
405 MALI_MIPMAP_MODE_TRILINEAR : MALI_MIPMAP_MODE_NEAREST;
406 cfg.normalized_coordinates = cso->normalized_coords;
407
408 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
409
410 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
411
412 /* If necessary, we disable mipmapping in the sampler descriptor by
413 * clamping the LOD as tight as possible (from 0 to epsilon,
414 * essentially -- remember these are fixed point numbers, so
415 * epsilon=1/256) */
416
417 cfg.maximum_lod = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) ?
418 cfg.minimum_lod + 1 :
419 FIXED_16(cso->max_lod, false);
420
421 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
422 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
423 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
424
425 cfg.compare_function = panfrost_sampler_compare_func(cso);
426 cfg.seamless_cube_map = cso->seamless_cube_map;
427
428 cfg.border_color_r = cso->border_color.f[0];
429 cfg.border_color_g = cso->border_color.f[1];
430 cfg.border_color_b = cso->border_color.f[2];
431 cfg.border_color_a = cso->border_color.f[3];
432 }
433 }
434
435 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
436 struct mali_bifrost_sampler_packed *hw)
437 {
438 pan_pack(hw, BIFROST_SAMPLER, cfg) {
439 cfg.magnify_linear = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR;
440 cfg.minify_linear = cso->min_img_filter == PIPE_TEX_FILTER_LINEAR;
441 cfg.mipmap_mode = pan_pipe_to_mipmode(cso->min_mip_filter);
442 cfg.normalized_coordinates = cso->normalized_coords;
443
444 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
445 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
446 cfg.maximum_lod = FIXED_16(cso->max_lod, false);
447
448 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
449 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
450 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
451
452 cfg.compare_function = panfrost_sampler_compare_func(cso);
453 cfg.seamless_cube_map = cso->seamless_cube_map;
454 }
455 }
456
457 static void
458 panfrost_frag_meta_rasterizer_update(struct panfrost_context *ctx,
459 struct mali_shader_meta *fragmeta)
460 {
461 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
462
463 bool msaa = rast->multisample;
464
465 /* TODO: Sample size */
466 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
467 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
468
469 struct panfrost_shader_state *fs;
470 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
471
472 /* EXT_shader_framebuffer_fetch requires the shader to be run
473 * per-sample when outputs are read. */
474 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
475 SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
476
477 fragmeta->depth_units = rast->offset_units * 2.0f;
478 fragmeta->depth_factor = rast->offset_scale;
479
480 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
481
482 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
483 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
484
485 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
486 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
487 }
488
489 static void
490 panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
491 struct mali_shader_meta *fragmeta)
492 {
493 const struct panfrost_zsa_state *so = ctx->depth_stencil;
494
495 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
496 so->base.stencil[0].enabled);
497
498 fragmeta->stencil_mask_front = so->stencil_mask_front;
499 fragmeta->stencil_mask_back = so->stencil_mask_back;
500
501 /* Bottom bits for stencil ref, exactly one word */
502 fragmeta->stencil_front.opaque[0] = so->stencil_front.opaque[0] | ctx->stencil_ref.ref_value[0];
503
504 /* If back-stencil is not enabled, use the front values */
505
506 if (so->base.stencil[1].enabled)
507 fragmeta->stencil_back.opaque[0] = so->stencil_back.opaque[0] | ctx->stencil_ref.ref_value[1];
508 else
509 fragmeta->stencil_back = fragmeta->stencil_front;
510
511 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
512 so->base.depth.writemask);
513
514 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
515 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(
516 so->base.depth.enabled ? so->base.depth.func : PIPE_FUNC_ALWAYS));
517 }
518
519 static bool
520 panfrost_fs_required(
521 struct panfrost_shader_state *fs,
522 struct panfrost_blend_final *blend,
523 unsigned rt_count)
524 {
525 /* If we generally have side effects */
526 if (fs->fs_sidefx)
527 return true;
528
529 /* If colour is written we need to execute */
530 for (unsigned i = 0; i < rt_count; ++i) {
531 if (!blend[i].no_colour)
532 return true;
533 }
534
535 /* If depth is written and not implied we need to execute.
536 * TODO: Predicate on Z/S writes being enabled */
537 return (fs->writes_depth || fs->writes_stencil);
538 }
539
540 static void
541 panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
542 struct mali_shader_meta *fragmeta,
543 void *rts)
544 {
545 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
546 const struct panfrost_device *dev = pan_device(ctx->base.screen);
547 struct panfrost_shader_state *fs;
548 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
549
550 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
551 (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
552 !ctx->blend->base.dither);
553
554 SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
555 ctx->blend->base.alpha_to_coverage);
556
557 /* Get blending setup */
558 unsigned rt_count = ctx->pipe_framebuffer.nr_cbufs;
559
560 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
561
562 for (unsigned c = 0; c < rt_count; ++c)
563 blend[c] = panfrost_get_blend_for_context(ctx, c);
564
565 /* Disable shader execution if we can */
566 if (dev->quirks & MIDGARD_SHADERLESS
567 && !panfrost_fs_required(fs, blend, rt_count)) {
568 fragmeta->shader = 0;
569 fragmeta->attribute_count = 0;
570 fragmeta->varying_count = 0;
571 fragmeta->texture_count = 0;
572 fragmeta->sampler_count = 0;
573
574 /* This feature is not known to work on Bifrost */
575 fragmeta->midgard1.work_count = 1;
576 fragmeta->midgard1.uniform_count = 0;
577 fragmeta->midgard1.uniform_buffer_count = 0;
578 }
579
580 /* If there is a blend shader, work registers are shared. We impose 8
581 * work registers as a limit for blend shaders. Should be lower XXX */
582
583 if (!(dev->quirks & IS_BIFROST)) {
584 for (unsigned c = 0; c < rt_count; ++c) {
585 if (blend[c].is_shader) {
586 fragmeta->midgard1.work_count =
587 MAX2(fragmeta->midgard1.work_count, 8);
588 }
589 }
590 }
591
592 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
593 * copied to the blend_meta appended (by convention), but this is the
594 * field actually read by the hardware. (Or maybe both are read...?).
595 * Specify the last RTi with a blend shader. */
596
597 fragmeta->blend.shader = 0;
598
599 for (signed rt = ((signed) rt_count - 1); rt >= 0; --rt) {
600 if (!blend[rt].is_shader)
601 continue;
602
603 fragmeta->blend.shader = blend[rt].shader.gpu |
604 blend[rt].shader.first_tag;
605 break;
606 }
607
608 if (dev->quirks & MIDGARD_SFBD) {
609 /* When only a single render target platform is used, the blend
610 * information is inside the shader meta itself. We additionally
611 * need to signal CAN_DISCARD for nontrivial blend modes (so
612 * we're able to read back the destination buffer) */
613
614 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
615 blend[0].is_shader);
616
617 if (!blend[0].is_shader) {
618 fragmeta->blend.equation = *blend[0].equation.equation;
619 fragmeta->blend.constant = blend[0].equation.constant;
620 }
621
622 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
623 !blend[0].no_blending || fs->can_discard);
624
625 batch->draws |= PIPE_CLEAR_COLOR0;
626 return;
627 }
628
629 if (dev->quirks & IS_BIFROST) {
630 bool no_blend = true;
631
632 for (unsigned i = 0; i < rt_count; ++i)
633 no_blend &= (blend[i].no_blending | blend[i].no_colour);
634
635 SET_BIT(fragmeta->bifrost1.unk1, MALI_BIFROST_EARLY_Z,
636 !fs->can_discard && !fs->writes_depth && no_blend);
637 }
638
639 /* Additional blend descriptor tacked on for jobs using MFBD */
640
641 struct bifrost_blend_rt *brts = rts;
642 struct midgard_blend_rt *mrts = rts;
643
644 /* Disable blending for depth-only on Bifrost */
645
646 if (rt_count == 0 && dev->quirks & IS_BIFROST)
647 brts[0].unk2 = 0x3;
648
649 for (unsigned i = 0; i < rt_count; ++i) {
650 unsigned flags = 0;
651
652 if (!blend[i].no_colour) {
653 flags = 0x200;
654 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
655
656 bool is_srgb = util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
657
658 SET_BIT(flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
659 SET_BIT(flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
660 SET_BIT(flags, MALI_BLEND_SRGB, is_srgb);
661 SET_BIT(flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
662 }
663
664 if (dev->quirks & IS_BIFROST) {
665 brts[i].flags = flags;
666
667 if (blend[i].is_shader) {
668 /* The blend shader's address needs to be at
669 * the same top 32 bit as the fragment shader.
670 * TODO: Ensure that's always the case.
671 */
672 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
673 (fs->bo->gpu & (0xffffffffull << 32)));
674 brts[i].shader = blend[i].shader.gpu;
675 brts[i].unk2 = 0x0;
676 } else {
677 enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
678 const struct util_format_description *format_desc;
679 format_desc = util_format_description(format);
680
681 brts[i].equation = *blend[i].equation.equation;
682
683 /* TODO: this is a bit more complicated */
684 brts[i].constant = blend[i].equation.constant;
685
686 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
687
688 /* 0x19 disables blending and forces REPLACE
689 * mode (equivalent to rgb_mode = alpha_mode =
690 * x122, colour mask = 0xF). 0x1a allows
691 * blending. */
692 brts[i].unk2 = blend[i].no_blending ? 0x19 : 0x1a;
693
694 brts[i].shader_type = fs->blend_types[i];
695 }
696 } else {
697 mrts[i].flags = flags;
698
699 if (blend[i].is_shader) {
700 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
701 } else {
702 mrts[i].blend.equation = *blend[i].equation.equation;
703 mrts[i].blend.constant = blend[i].equation.constant;
704 }
705 }
706 }
707 }
708
709 static void
710 panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
711 struct mali_shader_meta *fragmeta,
712 void *rts)
713 {
714 const struct panfrost_device *dev = pan_device(ctx->base.screen);
715 struct panfrost_shader_state *fs;
716
717 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
718
719 bool msaa = ctx->rasterizer->base.multisample;
720 fragmeta->coverage_mask = msaa ? ctx->sample_mask : ~0;
721
722 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
723 fragmeta->unknown2_4 = 0x4e0;
724
725 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
726 * is required (independent of 32-bit/64-bit descriptors), or why it's
727 * not used on later GPU revisions. Otherwise, all shader jobs fault on
728 * these earlier chips (perhaps this is a chicken bit of some kind).
729 * More investigation is needed. */
730
731 SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
732
733 if (dev->quirks & IS_BIFROST) {
734 /* TODO */
735 } else {
736 /* Depending on whether it's legal to in the given shader, we try to
737 * enable early-z testing. TODO: respect e-z force */
738
739 SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
740 !fs->can_discard && !fs->writes_global &&
741 !fs->writes_depth && !fs->writes_stencil &&
742 !ctx->blend->base.alpha_to_coverage);
743
744 /* Add the writes Z/S flags if needed. */
745 SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
746 SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
747
748 /* Any time texturing is used, derivatives are implicitly calculated,
749 * so we need to enable helper invocations */
750
751 SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
752 fs->helper_invocations);
753
754 /* If discard is enabled, which bit we set to convey this
755 * depends on if depth/stencil is used for the draw or not.
756 * Just one of depth OR stencil is enough to trigger this. */
757
758 const struct pipe_depth_stencil_alpha_state *zsa = &ctx->depth_stencil->base;
759 bool zs_enabled =
760 fs->writes_depth || fs->writes_stencil ||
761 (zsa->depth.enabled && zsa->depth.func != PIPE_FUNC_ALWAYS) ||
762 zsa->stencil[0].enabled;
763
764 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_TILEBUFFER,
765 fs->outputs_read || (!zs_enabled && fs->can_discard));
766 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_ZS, zs_enabled && fs->can_discard);
767 }
768
769 panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
770 panfrost_frag_meta_zsa_update(ctx, fragmeta);
771 panfrost_frag_meta_blend_update(ctx, fragmeta, rts);
772 }
773
774 void
775 panfrost_emit_shader_meta(struct panfrost_batch *batch,
776 enum pipe_shader_type st,
777 struct mali_vertex_tiler_postfix *postfix)
778 {
779 struct panfrost_context *ctx = batch->ctx;
780 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
781
782 if (!ss) {
783 postfix->shader = 0;
784 return;
785 }
786
787 struct mali_shader_meta meta;
788
789 panfrost_shader_meta_init(ctx, st, &meta);
790
791 /* Add the shader BO to the batch. */
792 panfrost_batch_add_bo(batch, ss->bo,
793 PAN_BO_ACCESS_PRIVATE |
794 PAN_BO_ACCESS_READ |
795 panfrost_bo_access_for_stage(st));
796
797 mali_ptr shader_ptr;
798
799 if (st == PIPE_SHADER_FRAGMENT) {
800 struct panfrost_device *dev = pan_device(ctx->base.screen);
801 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
802 size_t desc_size = sizeof(meta);
803 void *rts = NULL;
804 struct panfrost_transfer xfer;
805 unsigned rt_size;
806
807 if (dev->quirks & MIDGARD_SFBD)
808 rt_size = 0;
809 else if (dev->quirks & IS_BIFROST)
810 rt_size = sizeof(struct bifrost_blend_rt);
811 else
812 rt_size = sizeof(struct midgard_blend_rt);
813
814 desc_size += rt_size * rt_count;
815
816 if (rt_size)
817 rts = rzalloc_size(ctx, rt_size * rt_count);
818
819 panfrost_frag_shader_meta_init(ctx, &meta, rts);
820
821 xfer = panfrost_pool_alloc_aligned(&batch->pool, desc_size, sizeof(meta));
822
823 memcpy(xfer.cpu, &meta, sizeof(meta));
824 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
825
826 if (rt_size)
827 ralloc_free(rts);
828
829 shader_ptr = xfer.gpu;
830 } else {
831 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
832 sizeof(meta));
833 }
834
835 postfix->shader = shader_ptr;
836 }
837
838 void
839 panfrost_emit_viewport(struct panfrost_batch *batch,
840 struct mali_vertex_tiler_postfix *tiler_postfix)
841 {
842 struct panfrost_context *ctx = batch->ctx;
843 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
844 const struct pipe_scissor_state *ss = &ctx->scissor;
845 const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
846 const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
847
848 /* Derive min/max from translate/scale. Note since |x| >= 0 by
849 * definition, we have that -|x| <= |x| hence translate - |scale| <=
850 * translate + |scale|, so the ordering is correct here. */
851 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
852 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
853 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
854 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
855 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
856 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
857
858 /* Scissor to the intersection of viewport and to the scissor, clamped
859 * to the framebuffer */
860
861 unsigned minx = MIN2(fb->width, vp_minx);
862 unsigned maxx = MIN2(fb->width, vp_maxx);
863 unsigned miny = MIN2(fb->height, vp_miny);
864 unsigned maxy = MIN2(fb->height, vp_maxy);
865
866 if (ss && rast->scissor) {
867 minx = MAX2(ss->minx, minx);
868 miny = MAX2(ss->miny, miny);
869 maxx = MIN2(ss->maxx, maxx);
870 maxy = MIN2(ss->maxy, maxy);
871 }
872
873 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
874
875 pan_pack(T.cpu, VIEWPORT, cfg) {
876 cfg.scissor_minimum_x = minx;
877 cfg.scissor_minimum_y = miny;
878 cfg.scissor_maximum_x = maxx - 1;
879 cfg.scissor_maximum_y = maxy - 1;
880
881 cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
882 cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
883 }
884
885 tiler_postfix->viewport = T.gpu;
886 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
887 }
888
889 static mali_ptr
890 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
891 enum pipe_shader_type st,
892 struct panfrost_constant_buffer *buf,
893 unsigned index)
894 {
895 struct pipe_constant_buffer *cb = &buf->cb[index];
896 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
897
898 if (rsrc) {
899 panfrost_batch_add_bo(batch, rsrc->bo,
900 PAN_BO_ACCESS_SHARED |
901 PAN_BO_ACCESS_READ |
902 panfrost_bo_access_for_stage(st));
903
904 /* Alignment gauranteed by
905 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
906 return rsrc->bo->gpu + cb->buffer_offset;
907 } else if (cb->user_buffer) {
908 return panfrost_pool_upload(&batch->pool,
909 cb->user_buffer +
910 cb->buffer_offset,
911 cb->buffer_size);
912 } else {
913 unreachable("No constant buffer");
914 }
915 }
916
917 struct sysval_uniform {
918 union {
919 float f[4];
920 int32_t i[4];
921 uint32_t u[4];
922 uint64_t du[2];
923 };
924 };
925
926 static void
927 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
928 struct sysval_uniform *uniform)
929 {
930 struct panfrost_context *ctx = batch->ctx;
931 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
932
933 uniform->f[0] = vp->scale[0];
934 uniform->f[1] = vp->scale[1];
935 uniform->f[2] = vp->scale[2];
936 }
937
938 static void
939 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
940 struct sysval_uniform *uniform)
941 {
942 struct panfrost_context *ctx = batch->ctx;
943 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
944
945 uniform->f[0] = vp->translate[0];
946 uniform->f[1] = vp->translate[1];
947 uniform->f[2] = vp->translate[2];
948 }
949
950 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
951 enum pipe_shader_type st,
952 unsigned int sysvalid,
953 struct sysval_uniform *uniform)
954 {
955 struct panfrost_context *ctx = batch->ctx;
956 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
957 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
958 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
959 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
960
961 assert(dim);
962 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
963
964 if (dim > 1)
965 uniform->i[1] = u_minify(tex->texture->height0,
966 tex->u.tex.first_level);
967
968 if (dim > 2)
969 uniform->i[2] = u_minify(tex->texture->depth0,
970 tex->u.tex.first_level);
971
972 if (is_array)
973 uniform->i[dim] = tex->texture->array_size;
974 }
975
976 static void
977 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
978 enum pipe_shader_type st,
979 unsigned ssbo_id,
980 struct sysval_uniform *uniform)
981 {
982 struct panfrost_context *ctx = batch->ctx;
983
984 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
985 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
986
987 /* Compute address */
988 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
989
990 panfrost_batch_add_bo(batch, bo,
991 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
992 panfrost_bo_access_for_stage(st));
993
994 /* Upload address and size as sysval */
995 uniform->du[0] = bo->gpu + sb.buffer_offset;
996 uniform->u[2] = sb.buffer_size;
997 }
998
999 static void
1000 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
1001 enum pipe_shader_type st,
1002 unsigned samp_idx,
1003 struct sysval_uniform *uniform)
1004 {
1005 struct panfrost_context *ctx = batch->ctx;
1006 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
1007
1008 uniform->f[0] = sampl->min_lod;
1009 uniform->f[1] = sampl->max_lod;
1010 uniform->f[2] = sampl->lod_bias;
1011
1012 /* Even without any errata, Midgard represents "no mipmapping" as
1013 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1014 * panfrost_create_sampler_state which also explains our choice of
1015 * epsilon value (again to keep behaviour consistent) */
1016
1017 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1018 uniform->f[1] = uniform->f[0] + (1.0/256.0);
1019 }
1020
1021 static void
1022 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
1023 struct sysval_uniform *uniform)
1024 {
1025 struct panfrost_context *ctx = batch->ctx;
1026
1027 uniform->u[0] = ctx->compute_grid->grid[0];
1028 uniform->u[1] = ctx->compute_grid->grid[1];
1029 uniform->u[2] = ctx->compute_grid->grid[2];
1030 }
1031
1032 static void
1033 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1034 struct panfrost_shader_state *ss,
1035 enum pipe_shader_type st)
1036 {
1037 struct sysval_uniform *uniforms = (void *)buf;
1038
1039 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1040 int sysval = ss->sysval[i];
1041
1042 switch (PAN_SYSVAL_TYPE(sysval)) {
1043 case PAN_SYSVAL_VIEWPORT_SCALE:
1044 panfrost_upload_viewport_scale_sysval(batch,
1045 &uniforms[i]);
1046 break;
1047 case PAN_SYSVAL_VIEWPORT_OFFSET:
1048 panfrost_upload_viewport_offset_sysval(batch,
1049 &uniforms[i]);
1050 break;
1051 case PAN_SYSVAL_TEXTURE_SIZE:
1052 panfrost_upload_txs_sysval(batch, st,
1053 PAN_SYSVAL_ID(sysval),
1054 &uniforms[i]);
1055 break;
1056 case PAN_SYSVAL_SSBO:
1057 panfrost_upload_ssbo_sysval(batch, st,
1058 PAN_SYSVAL_ID(sysval),
1059 &uniforms[i]);
1060 break;
1061 case PAN_SYSVAL_NUM_WORK_GROUPS:
1062 panfrost_upload_num_work_groups_sysval(batch,
1063 &uniforms[i]);
1064 break;
1065 case PAN_SYSVAL_SAMPLER:
1066 panfrost_upload_sampler_sysval(batch, st,
1067 PAN_SYSVAL_ID(sysval),
1068 &uniforms[i]);
1069 break;
1070 default:
1071 assert(0);
1072 }
1073 }
1074 }
1075
1076 static const void *
1077 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1078 unsigned index)
1079 {
1080 struct pipe_constant_buffer *cb = &buf->cb[index];
1081 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1082
1083 if (rsrc)
1084 return rsrc->bo->cpu;
1085 else if (cb->user_buffer)
1086 return cb->user_buffer;
1087 else
1088 unreachable("No constant buffer");
1089 }
1090
1091 void
1092 panfrost_emit_const_buf(struct panfrost_batch *batch,
1093 enum pipe_shader_type stage,
1094 struct mali_vertex_tiler_postfix *postfix)
1095 {
1096 struct panfrost_context *ctx = batch->ctx;
1097 struct panfrost_shader_variants *all = ctx->shader[stage];
1098
1099 if (!all)
1100 return;
1101
1102 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1103
1104 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1105
1106 /* Uniforms are implicitly UBO #0 */
1107 bool has_uniforms = buf->enabled_mask & (1 << 0);
1108
1109 /* Allocate room for the sysval and the uniforms */
1110 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1111 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1112 size_t size = sys_size + uniform_size;
1113 struct panfrost_transfer transfer =
1114 panfrost_pool_alloc_aligned(&batch->pool, size, 16);
1115
1116 /* Upload sysvals requested by the shader */
1117 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1118
1119 /* Upload uniforms */
1120 if (has_uniforms && uniform_size) {
1121 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1122 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1123 }
1124
1125 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1126 * uploaded */
1127
1128 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1129 assert(ubo_count >= 1);
1130
1131 size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
1132 struct panfrost_transfer ubos =
1133 panfrost_pool_alloc_aligned(&batch->pool, sz,
1134 MALI_UNIFORM_BUFFER_LENGTH);
1135
1136 uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
1137
1138 /* Upload uniforms as a UBO */
1139
1140 if (ss->uniform_count) {
1141 pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
1142 cfg.entries = ss->uniform_count;
1143 cfg.pointer = transfer.gpu;
1144 }
1145 } else {
1146 *ubo_ptr = 0;
1147 }
1148
1149 /* The rest are honest-to-goodness UBOs */
1150
1151 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1152 size_t usz = buf->cb[ubo].buffer_size;
1153 bool enabled = buf->enabled_mask & (1 << ubo);
1154 bool empty = usz == 0;
1155
1156 if (!enabled || empty) {
1157 ubo_ptr[ubo] = 0;
1158 continue;
1159 }
1160
1161 pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
1162 cfg.entries = DIV_ROUND_UP(usz, 16);
1163 cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
1164 stage, buf, ubo);
1165 }
1166 }
1167
1168 postfix->uniforms = transfer.gpu;
1169 postfix->uniform_buffers = ubos.gpu;
1170
1171 buf->dirty_mask = 0;
1172 }
1173
1174 void
1175 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1176 const struct pipe_grid_info *info,
1177 struct midgard_payload_vertex_tiler *vtp)
1178 {
1179 struct panfrost_context *ctx = batch->ctx;
1180 struct panfrost_device *dev = pan_device(ctx->base.screen);
1181 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1182 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1183 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1184 128));
1185
1186 unsigned log2_instances =
1187 util_logbase2_ceil(info->grid[0]) +
1188 util_logbase2_ceil(info->grid[1]) +
1189 util_logbase2_ceil(info->grid[2]);
1190
1191 unsigned shared_size = single_size * (1 << log2_instances) * dev->core_count;
1192 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1193 shared_size,
1194 1);
1195
1196 struct mali_shared_memory shared = {
1197 .shared_memory = bo->gpu,
1198 .shared_workgroup_count = log2_instances,
1199 .shared_shift = util_logbase2(single_size) + 1
1200 };
1201
1202 vtp->postfix.shared_memory = panfrost_pool_upload(&batch->pool, &shared,
1203 sizeof(shared));
1204 }
1205
1206 static mali_ptr
1207 panfrost_get_tex_desc(struct panfrost_batch *batch,
1208 enum pipe_shader_type st,
1209 struct panfrost_sampler_view *view)
1210 {
1211 if (!view)
1212 return (mali_ptr) 0;
1213
1214 struct pipe_sampler_view *pview = &view->base;
1215 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1216
1217 /* Add the BO to the job so it's retained until the job is done. */
1218
1219 panfrost_batch_add_bo(batch, rsrc->bo,
1220 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1221 panfrost_bo_access_for_stage(st));
1222
1223 panfrost_batch_add_bo(batch, view->bo,
1224 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1225 panfrost_bo_access_for_stage(st));
1226
1227 return view->bo->gpu;
1228 }
1229
1230 static void
1231 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1232 struct pipe_context *pctx)
1233 {
1234 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1235 if (view->texture_bo != rsrc->bo->gpu ||
1236 view->modifier != rsrc->modifier) {
1237 panfrost_bo_unreference(view->bo);
1238 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1239 }
1240 }
1241
1242 void
1243 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1244 enum pipe_shader_type stage,
1245 struct mali_vertex_tiler_postfix *postfix)
1246 {
1247 struct panfrost_context *ctx = batch->ctx;
1248 struct panfrost_device *device = pan_device(ctx->base.screen);
1249
1250 if (!ctx->sampler_view_count[stage])
1251 return;
1252
1253 if (device->quirks & IS_BIFROST) {
1254 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1255 MALI_BIFROST_TEXTURE_LENGTH *
1256 ctx->sampler_view_count[stage],
1257 MALI_BIFROST_TEXTURE_LENGTH);
1258
1259 struct mali_bifrost_texture_packed *out =
1260 (struct mali_bifrost_texture_packed *) T.cpu;
1261
1262 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1263 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1264 struct pipe_sampler_view *pview = &view->base;
1265 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1266
1267 panfrost_update_sampler_view(view, &ctx->base);
1268 out[i] = view->bifrost_descriptor;
1269
1270 /* Add the BOs to the job so they are retained until the job is done. */
1271
1272 panfrost_batch_add_bo(batch, rsrc->bo,
1273 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1274 panfrost_bo_access_for_stage(stage));
1275
1276 panfrost_batch_add_bo(batch, view->bo,
1277 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1278 panfrost_bo_access_for_stage(stage));
1279 }
1280
1281 postfix->textures = T.gpu;
1282 } else {
1283 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1284
1285 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1286 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1287
1288 panfrost_update_sampler_view(view, &ctx->base);
1289
1290 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1291 }
1292
1293 postfix->textures = panfrost_pool_upload(&batch->pool,
1294 trampolines,
1295 sizeof(uint64_t) *
1296 ctx->sampler_view_count[stage]);
1297 }
1298 }
1299
1300 void
1301 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1302 enum pipe_shader_type stage,
1303 struct mali_vertex_tiler_postfix *postfix)
1304 {
1305 struct panfrost_context *ctx = batch->ctx;
1306
1307 if (!ctx->sampler_count[stage])
1308 return;
1309
1310 size_t desc_size = MALI_BIFROST_SAMPLER_LENGTH;
1311 assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
1312
1313 size_t sz = desc_size * ctx->sampler_count[stage];
1314 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool, sz, desc_size);
1315 struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
1316
1317 for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
1318 out[i] = ctx->samplers[stage][i]->hw;
1319
1320 postfix->sampler_descriptor = T.gpu;
1321 }
1322
1323 void
1324 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1325 struct mali_vertex_tiler_postfix *vertex_postfix)
1326 {
1327 struct panfrost_context *ctx = batch->ctx;
1328 struct panfrost_vertex_state *so = ctx->vertex;
1329 struct panfrost_shader_state *vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1330
1331 unsigned instance_shift = vertex_postfix->instance_shift;
1332 unsigned instance_odd = vertex_postfix->instance_odd;
1333
1334 /* Worst case: everything is NPOT, which is only possible if instancing
1335 * is enabled. Otherwise single record is gauranteed */
1336 bool could_npot = instance_shift || instance_odd;
1337
1338 struct panfrost_transfer S = panfrost_pool_alloc_aligned(&batch->pool,
1339 MALI_ATTRIBUTE_BUFFER_LENGTH * vs->attribute_count *
1340 (could_npot ? 2 : 1),
1341 MALI_ATTRIBUTE_BUFFER_LENGTH);
1342
1343 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1344 MALI_ATTRIBUTE_LENGTH * vs->attribute_count,
1345 MALI_ATTRIBUTE_LENGTH);
1346
1347 struct mali_attribute_buffer_packed *bufs =
1348 (struct mali_attribute_buffer_packed *) S.cpu;
1349
1350 struct mali_attribute_packed *out =
1351 (struct mali_attribute_packed *) T.cpu;
1352
1353 unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
1354 unsigned k = 0;
1355
1356 for (unsigned i = 0; i < so->num_elements; ++i) {
1357 /* We map buffers 1:1 with the attributes, which
1358 * means duplicating some vertex buffers (who cares? aside from
1359 * maybe some caching implications but I somehow doubt that
1360 * matters) */
1361
1362 struct pipe_vertex_element *elem = &so->pipe[i];
1363 unsigned vbi = elem->vertex_buffer_index;
1364 attrib_to_buffer[i] = k;
1365
1366 if (!(ctx->vb_mask & (1 << vbi)))
1367 continue;
1368
1369 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1370 struct panfrost_resource *rsrc;
1371
1372 rsrc = pan_resource(buf->buffer.resource);
1373 if (!rsrc)
1374 continue;
1375
1376 /* Add a dependency of the batch on the vertex buffer */
1377 panfrost_batch_add_bo(batch, rsrc->bo,
1378 PAN_BO_ACCESS_SHARED |
1379 PAN_BO_ACCESS_READ |
1380 PAN_BO_ACCESS_VERTEX_TILER);
1381
1382 /* Mask off lower bits, see offset fixup below */
1383 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1384 mali_ptr addr = raw_addr & ~63;
1385
1386 /* Since we advanced the base pointer, we shrink the buffer
1387 * size, but add the offset we subtracted */
1388 unsigned size = rsrc->base.width0 + (raw_addr - addr)
1389 - buf->buffer_offset;
1390
1391 /* When there is a divisor, the hardware-level divisor is
1392 * the product of the instance divisor and the padded count */
1393 unsigned divisor = elem->instance_divisor;
1394 unsigned hw_divisor = ctx->padded_count * divisor;
1395 unsigned stride = buf->stride;
1396
1397 /* If there's a divisor(=1) but no instancing, we want every
1398 * attribute to be the same */
1399
1400 if (divisor && ctx->instance_count == 1)
1401 stride = 0;
1402
1403 if (!divisor || ctx->instance_count <= 1) {
1404 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1405 if (ctx->instance_count > 1)
1406 cfg.type = MALI_ATTRIBUTE_TYPE_1D_MODULUS;
1407
1408 cfg.pointer = addr;
1409 cfg.stride = stride;
1410 cfg.size = size;
1411 cfg.divisor_r = instance_shift;
1412 cfg.divisor_p = instance_odd;
1413 }
1414 } else if (util_is_power_of_two_or_zero(hw_divisor)) {
1415 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1416 cfg.type = MALI_ATTRIBUTE_TYPE_1D_POT_DIVISOR;
1417 cfg.pointer = addr;
1418 cfg.stride = stride;
1419 cfg.size = size;
1420 cfg.divisor_r = __builtin_ctz(hw_divisor);
1421 }
1422
1423 } else {
1424 unsigned shift = 0, extra_flags = 0;
1425
1426 unsigned magic_divisor =
1427 panfrost_compute_magic_divisor(hw_divisor, &shift, &extra_flags);
1428
1429 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1430 cfg.type = MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR;
1431 cfg.pointer = addr;
1432 cfg.stride = stride;
1433 cfg.size = size;
1434
1435 cfg.divisor_r = shift;
1436 cfg.divisor_e = extra_flags;
1437 }
1438
1439 pan_pack(bufs + k + 1, ATTRIBUTE_BUFFER_CONTINUATION_NPOT, cfg) {
1440 cfg.divisor_numerator = magic_divisor;
1441 cfg.divisor = divisor;
1442 }
1443
1444 ++k;
1445 }
1446
1447 ++k;
1448 }
1449
1450 /* Add special gl_VertexID/gl_InstanceID buffers */
1451
1452 if (unlikely(vs->attribute_count >= PAN_VERTEX_ID)) {
1453 panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1454
1455 pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
1456 cfg.buffer_index = k++;
1457 cfg.format = so->formats[PAN_VERTEX_ID];
1458 }
1459
1460 panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1461
1462 pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
1463 cfg.buffer_index = k++;
1464 cfg.format = so->formats[PAN_INSTANCE_ID];
1465 }
1466 }
1467
1468 /* Attribute addresses require 64-byte alignment, so let:
1469 *
1470 * base' = base & ~63 = base - (base & 63)
1471 * offset' = offset + (base & 63)
1472 *
1473 * Since base' + offset' = base + offset, these are equivalent
1474 * addressing modes and now base is 64 aligned.
1475 */
1476
1477 unsigned start = vertex_postfix->offset_start;
1478
1479 for (unsigned i = 0; i < so->num_elements; ++i) {
1480 unsigned vbi = so->pipe[i].vertex_buffer_index;
1481 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1482
1483 /* Adjust by the masked off bits of the offset. Make sure we
1484 * read src_offset from so->hw (which is not GPU visible)
1485 * rather than target (which is) due to caching effects */
1486
1487 unsigned src_offset = so->pipe[i].src_offset;
1488
1489 /* BOs aligned to 4k so guaranteed aligned to 64 */
1490 src_offset += (buf->buffer_offset & 63);
1491
1492 /* Also, somewhat obscurely per-instance data needs to be
1493 * offset in response to a delayed start in an indexed draw */
1494
1495 if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
1496 src_offset -= buf->stride * start;
1497
1498 pan_pack(out + i, ATTRIBUTE, cfg) {
1499 cfg.buffer_index = attrib_to_buffer[i];
1500 cfg.format = so->formats[i];
1501 cfg.offset = src_offset;
1502 }
1503 }
1504
1505 vertex_postfix->attributes = S.gpu;
1506 vertex_postfix->attribute_meta = T.gpu;
1507 }
1508
1509 static mali_ptr
1510 panfrost_emit_varyings(struct panfrost_batch *batch,
1511 struct mali_attribute_buffer_packed *slot,
1512 unsigned stride, unsigned count)
1513 {
1514 unsigned size = stride * count;
1515 mali_ptr ptr = panfrost_pool_alloc_aligned(&batch->invisible_pool, size, 64).gpu;
1516
1517 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1518 cfg.stride = stride;
1519 cfg.size = size;
1520 cfg.pointer = ptr;
1521 }
1522
1523 return ptr;
1524 }
1525
1526 static unsigned
1527 panfrost_streamout_offset(unsigned stride, unsigned offset,
1528 struct pipe_stream_output_target *target)
1529 {
1530 return (target->buffer_offset + (offset * stride * 4)) & 63;
1531 }
1532
1533 static void
1534 panfrost_emit_streamout(struct panfrost_batch *batch,
1535 struct mali_attribute_buffer_packed *slot,
1536 unsigned stride_words, unsigned offset, unsigned count,
1537 struct pipe_stream_output_target *target)
1538 {
1539 unsigned stride = stride_words * 4;
1540 unsigned max_size = target->buffer_size;
1541 unsigned expected_size = stride * count;
1542
1543 /* Grab the BO and bind it to the batch */
1544 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1545
1546 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1547 * the perspective of the TILER and FRAGMENT.
1548 */
1549 panfrost_batch_add_bo(batch, bo,
1550 PAN_BO_ACCESS_SHARED |
1551 PAN_BO_ACCESS_RW |
1552 PAN_BO_ACCESS_VERTEX_TILER |
1553 PAN_BO_ACCESS_FRAGMENT);
1554
1555 /* We will have an offset applied to get alignment */
1556 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * stride);
1557
1558 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1559 cfg.pointer = (addr & ~63);
1560 cfg.stride = stride;
1561 cfg.size = MIN2(max_size, expected_size) + (addr & 63);
1562 }
1563 }
1564
1565 static bool
1566 has_point_coord(unsigned mask, gl_varying_slot loc)
1567 {
1568 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1569 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1570 else if (loc == VARYING_SLOT_PNTC)
1571 return (mask & (1 << 8));
1572 else
1573 return false;
1574 }
1575
1576 /* Helpers for manipulating stream out information so we can pack varyings
1577 * accordingly. Compute the src_offset for a given captured varying */
1578
1579 static struct pipe_stream_output *
1580 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1581 {
1582 for (unsigned i = 0; i < info->num_outputs; ++i) {
1583 if (info->output[i].register_index == loc)
1584 return &info->output[i];
1585 }
1586
1587 unreachable("Varying not captured");
1588 }
1589
1590 static unsigned
1591 pan_varying_size(enum mali_format fmt)
1592 {
1593 unsigned type = MALI_EXTRACT_TYPE(fmt);
1594 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1595 unsigned bits = MALI_EXTRACT_BITS(fmt);
1596 unsigned bpc = 0;
1597
1598 if (bits == MALI_CHANNEL_FLOAT) {
1599 /* No doubles */
1600 bool fp16 = (type == MALI_FORMAT_SINT);
1601 assert(fp16 || (type == MALI_FORMAT_UNORM));
1602
1603 bpc = fp16 ? 2 : 4;
1604 } else {
1605 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1606
1607 /* See the enums */
1608 bits = 1 << bits;
1609 assert(bits >= 8);
1610 bpc = bits / 8;
1611 }
1612
1613 return bpc * chan;
1614 }
1615
1616 /* Indices for named (non-XFB) varyings that are present. These are packed
1617 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1618 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1619 * of a given special field given a shift S by:
1620 *
1621 * idx = popcount(P & ((1 << S) - 1))
1622 *
1623 * That is... look at all of the varyings that come earlier and count them, the
1624 * count is the new index since plus one. Likewise, the total number of special
1625 * buffers required is simply popcount(P)
1626 */
1627
1628 enum pan_special_varying {
1629 PAN_VARY_GENERAL = 0,
1630 PAN_VARY_POSITION = 1,
1631 PAN_VARY_PSIZ = 2,
1632 PAN_VARY_PNTCOORD = 3,
1633 PAN_VARY_FACE = 4,
1634 PAN_VARY_FRAGCOORD = 5,
1635
1636 /* Keep last */
1637 PAN_VARY_MAX,
1638 };
1639
1640 /* Given a varying, figure out which index it correpsonds to */
1641
1642 static inline unsigned
1643 pan_varying_index(unsigned present, enum pan_special_varying v)
1644 {
1645 unsigned mask = (1 << v) - 1;
1646 return util_bitcount(present & mask);
1647 }
1648
1649 /* Get the base offset for XFB buffers, which by convention come after
1650 * everything else. Wrapper function for semantic reasons; by construction this
1651 * is just popcount. */
1652
1653 static inline unsigned
1654 pan_xfb_base(unsigned present)
1655 {
1656 return util_bitcount(present);
1657 }
1658
1659 /* Computes the present mask for varyings so we can start emitting varying records */
1660
1661 static inline unsigned
1662 pan_varying_present(
1663 struct panfrost_shader_state *vs,
1664 struct panfrost_shader_state *fs,
1665 unsigned quirks)
1666 {
1667 /* At the moment we always emit general and position buffers. Not
1668 * strictly necessary but usually harmless */
1669
1670 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1671
1672 /* Enable special buffers by the shader info */
1673
1674 if (vs->writes_point_size)
1675 present |= (1 << PAN_VARY_PSIZ);
1676
1677 if (fs->reads_point_coord)
1678 present |= (1 << PAN_VARY_PNTCOORD);
1679
1680 if (fs->reads_face)
1681 present |= (1 << PAN_VARY_FACE);
1682
1683 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1684 present |= (1 << PAN_VARY_FRAGCOORD);
1685
1686 /* Also, if we have a point sprite, we need a point coord buffer */
1687
1688 for (unsigned i = 0; i < fs->varying_count; i++) {
1689 gl_varying_slot loc = fs->varyings_loc[i];
1690
1691 if (has_point_coord(fs->point_sprite_mask, loc))
1692 present |= (1 << PAN_VARY_PNTCOORD);
1693 }
1694
1695 return present;
1696 }
1697
1698 /* Emitters for varying records */
1699
1700 static void
1701 pan_emit_vary(struct mali_attribute_packed *out,
1702 unsigned present, enum pan_special_varying buf,
1703 unsigned quirks, enum mali_format format,
1704 unsigned offset)
1705 {
1706 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1707 unsigned swizzle = quirks & HAS_SWIZZLES ?
1708 panfrost_get_default_swizzle(nr_channels) :
1709 panfrost_bifrost_swizzle(nr_channels);
1710
1711 pan_pack(out, ATTRIBUTE, cfg) {
1712 cfg.buffer_index = pan_varying_index(present, buf);
1713 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1714 cfg.format = (format << 12) | swizzle;
1715 cfg.offset = offset;
1716 }
1717 }
1718
1719 /* General varying that is unused */
1720
1721 static void
1722 pan_emit_vary_only(struct mali_attribute_packed *out,
1723 unsigned present, unsigned quirks)
1724 {
1725 pan_emit_vary(out, present, 0, quirks, MALI_VARYING_DISCARD, 0);
1726 }
1727
1728 /* Special records */
1729
1730 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1731 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1732 [PAN_VARY_PSIZ] = MALI_R16F,
1733 [PAN_VARY_PNTCOORD] = MALI_R16F,
1734 [PAN_VARY_FACE] = MALI_R32I,
1735 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1736 };
1737
1738 static void
1739 pan_emit_vary_special(struct mali_attribute_packed *out,
1740 unsigned present, enum pan_special_varying buf,
1741 unsigned quirks)
1742 {
1743 assert(buf < PAN_VARY_MAX);
1744 pan_emit_vary(out, present, buf, quirks, pan_varying_formats[buf], 0);
1745 }
1746
1747 static enum mali_format
1748 pan_xfb_format(enum mali_format format, unsigned nr)
1749 {
1750 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1751 return MALI_R32F | MALI_NR_CHANNELS(nr);
1752 else
1753 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1754 }
1755
1756 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1757 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1758 * value. */
1759
1760 static void
1761 pan_emit_vary_xfb(struct mali_attribute_packed *out,
1762 unsigned present,
1763 unsigned max_xfb,
1764 unsigned *streamout_offsets,
1765 unsigned quirks,
1766 enum mali_format format,
1767 struct pipe_stream_output o)
1768 {
1769 unsigned swizzle = quirks & HAS_SWIZZLES ?
1770 panfrost_get_default_swizzle(o.num_components) :
1771 panfrost_bifrost_swizzle(o.num_components);
1772
1773 pan_pack(out, ATTRIBUTE, cfg) {
1774 /* XFB buffers come after everything else */
1775 cfg.buffer_index = pan_xfb_base(present) + o.output_buffer;
1776 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1777
1778 /* Override number of channels and precision to highp */
1779 cfg.format = (pan_xfb_format(format, o.num_components) << 12) | swizzle;
1780
1781 /* Apply given offsets together */
1782 cfg.offset = (o.dst_offset * 4) /* dwords */
1783 + streamout_offsets[o.output_buffer];
1784 }
1785 }
1786
1787 /* Determine if we should capture a varying for XFB. This requires actually
1788 * having a buffer for it. If we don't capture it, we'll fallback to a general
1789 * varying path (linked or unlinked, possibly discarding the write) */
1790
1791 static bool
1792 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1793 unsigned loc, unsigned max_xfb)
1794 {
1795 if (!(xfb->so_mask & (1ll << loc)))
1796 return false;
1797
1798 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1799 return o->output_buffer < max_xfb;
1800 }
1801
1802 static void
1803 pan_emit_general_varying(struct mali_attribute_packed *out,
1804 struct panfrost_shader_state *other,
1805 struct panfrost_shader_state *xfb,
1806 gl_varying_slot loc,
1807 enum mali_format format,
1808 unsigned present,
1809 unsigned quirks,
1810 unsigned *gen_offsets,
1811 enum mali_format *gen_formats,
1812 unsigned *gen_stride,
1813 unsigned idx,
1814 bool should_alloc)
1815 {
1816 /* Check if we're linked */
1817 signed other_idx = -1;
1818
1819 for (unsigned j = 0; j < other->varying_count; ++j) {
1820 if (other->varyings_loc[j] == loc) {
1821 other_idx = j;
1822 break;
1823 }
1824 }
1825
1826 if (other_idx < 0) {
1827 pan_emit_vary_only(out, present, quirks);
1828 return;
1829 }
1830
1831 unsigned offset = gen_offsets[other_idx];
1832
1833 if (should_alloc) {
1834 /* We're linked, so allocate a space via a watermark allocation */
1835 enum mali_format alt = other->varyings[other_idx];
1836
1837 /* Do interpolation at minimum precision */
1838 unsigned size_main = pan_varying_size(format);
1839 unsigned size_alt = pan_varying_size(alt);
1840 unsigned size = MIN2(size_main, size_alt);
1841
1842 /* If a varying is marked for XFB but not actually captured, we
1843 * should match the format to the format that would otherwise
1844 * be used for XFB, since dEQP checks for invariance here. It's
1845 * unclear if this is required by the spec. */
1846
1847 if (xfb->so_mask & (1ull << loc)) {
1848 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1849 format = pan_xfb_format(format, o->num_components);
1850 size = pan_varying_size(format);
1851 } else if (size == size_alt) {
1852 format = alt;
1853 }
1854
1855 gen_offsets[idx] = *gen_stride;
1856 gen_formats[other_idx] = format;
1857 offset = *gen_stride;
1858 *gen_stride += size;
1859 }
1860
1861 pan_emit_vary(out, present, PAN_VARY_GENERAL, quirks, format, offset);
1862 }
1863
1864 /* Higher-level wrapper around all of the above, classifying a varying into one
1865 * of the above types */
1866
1867 static void
1868 panfrost_emit_varying(
1869 struct mali_attribute_packed *out,
1870 struct panfrost_shader_state *stage,
1871 struct panfrost_shader_state *other,
1872 struct panfrost_shader_state *xfb,
1873 unsigned present,
1874 unsigned max_xfb,
1875 unsigned *streamout_offsets,
1876 unsigned quirks,
1877 unsigned *gen_offsets,
1878 enum mali_format *gen_formats,
1879 unsigned *gen_stride,
1880 unsigned idx,
1881 bool should_alloc,
1882 bool is_fragment)
1883 {
1884 gl_varying_slot loc = stage->varyings_loc[idx];
1885 enum mali_format format = stage->varyings[idx];
1886
1887 /* Override format to match linkage */
1888 if (!should_alloc && gen_formats[idx])
1889 format = gen_formats[idx];
1890
1891 if (has_point_coord(stage->point_sprite_mask, loc)) {
1892 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1893 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1894 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1895 pan_emit_vary_xfb(out, present, max_xfb, streamout_offsets, quirks, format, *o);
1896 } else if (loc == VARYING_SLOT_POS) {
1897 if (is_fragment)
1898 pan_emit_vary_special(out, present, PAN_VARY_FRAGCOORD, quirks);
1899 else
1900 pan_emit_vary_special(out, present, PAN_VARY_POSITION, quirks);
1901 } else if (loc == VARYING_SLOT_PSIZ) {
1902 pan_emit_vary_special(out, present, PAN_VARY_PSIZ, quirks);
1903 } else if (loc == VARYING_SLOT_PNTC) {
1904 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1905 } else if (loc == VARYING_SLOT_FACE) {
1906 pan_emit_vary_special(out, present, PAN_VARY_FACE, quirks);
1907 } else {
1908 pan_emit_general_varying(out, other, xfb, loc, format, present,
1909 quirks, gen_offsets, gen_formats, gen_stride,
1910 idx, should_alloc);
1911 }
1912 }
1913
1914 static void
1915 pan_emit_special_input(struct mali_attribute_buffer_packed *out,
1916 unsigned present,
1917 enum pan_special_varying v,
1918 unsigned special)
1919 {
1920 if (present & (1 << v)) {
1921 unsigned idx = pan_varying_index(present, v);
1922
1923 pan_pack(out + idx, ATTRIBUTE_BUFFER, cfg) {
1924 cfg.special = special;
1925 cfg.type = 0;
1926 }
1927 }
1928 }
1929
1930 void
1931 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1932 unsigned vertex_count,
1933 struct mali_vertex_tiler_postfix *vertex_postfix,
1934 struct mali_vertex_tiler_postfix *tiler_postfix,
1935 union midgard_primitive_size *primitive_size)
1936 {
1937 /* Load the shaders */
1938 struct panfrost_context *ctx = batch->ctx;
1939 struct panfrost_device *dev = pan_device(ctx->base.screen);
1940 struct panfrost_shader_state *vs, *fs;
1941 size_t vs_size, fs_size;
1942
1943 /* Allocate the varying descriptor */
1944
1945 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1946 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
1947 vs_size = MALI_ATTRIBUTE_LENGTH * vs->varying_count;
1948 fs_size = MALI_ATTRIBUTE_LENGTH * fs->varying_count;
1949
1950 struct panfrost_transfer trans = panfrost_pool_alloc_aligned(
1951 &batch->pool, vs_size + fs_size, MALI_ATTRIBUTE_LENGTH);
1952
1953 struct pipe_stream_output_info *so = &vs->stream_output;
1954 unsigned present = pan_varying_present(vs, fs, dev->quirks);
1955
1956 /* Check if this varying is linked by us. This is the case for
1957 * general-purpose, non-captured varyings. If it is, link it. If it's
1958 * not, use the provided stream out information to determine the
1959 * offset, since it was already linked for us. */
1960
1961 unsigned gen_offsets[32];
1962 enum mali_format gen_formats[32];
1963 memset(gen_offsets, 0, sizeof(gen_offsets));
1964 memset(gen_formats, 0, sizeof(gen_formats));
1965
1966 unsigned gen_stride = 0;
1967 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
1968 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
1969
1970 unsigned streamout_offsets[32];
1971
1972 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1973 streamout_offsets[i] = panfrost_streamout_offset(
1974 so->stride[i],
1975 ctx->streamout.offsets[i],
1976 ctx->streamout.targets[i]);
1977 }
1978
1979 struct mali_attribute_packed *ovs = (struct mali_attribute_packed *)trans.cpu;
1980 struct mali_attribute_packed *ofs = ovs + vs->varying_count;
1981
1982 for (unsigned i = 0; i < vs->varying_count; i++) {
1983 panfrost_emit_varying(ovs + i, vs, fs, vs, present,
1984 ctx->streamout.num_targets, streamout_offsets,
1985 dev->quirks,
1986 gen_offsets, gen_formats, &gen_stride, i, true, false);
1987 }
1988
1989 for (unsigned i = 0; i < fs->varying_count; i++) {
1990 panfrost_emit_varying(ofs + i, fs, vs, vs, present,
1991 ctx->streamout.num_targets, streamout_offsets,
1992 dev->quirks,
1993 gen_offsets, gen_formats, &gen_stride, i, false, true);
1994 }
1995
1996 unsigned xfb_base = pan_xfb_base(present);
1997 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1998 MALI_ATTRIBUTE_BUFFER_LENGTH * (xfb_base + ctx->streamout.num_targets),
1999 MALI_ATTRIBUTE_BUFFER_LENGTH);
2000 struct mali_attribute_buffer_packed *varyings =
2001 (struct mali_attribute_buffer_packed *) T.cpu;
2002
2003 /* Emit the stream out buffers */
2004
2005 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
2006 ctx->vertex_count);
2007
2008 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
2009 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
2010 so->stride[i],
2011 ctx->streamout.offsets[i],
2012 out_count,
2013 ctx->streamout.targets[i]);
2014 }
2015
2016 panfrost_emit_varyings(batch,
2017 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
2018 gen_stride, vertex_count);
2019
2020 /* fp32 vec4 gl_Position */
2021 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
2022 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
2023 sizeof(float) * 4, vertex_count);
2024
2025 if (present & (1 << PAN_VARY_PSIZ)) {
2026 primitive_size->pointer = panfrost_emit_varyings(batch,
2027 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
2028 2, vertex_count);
2029 }
2030
2031 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_ATTRIBUTE_SPECIAL_POINT_COORD);
2032 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_ATTRIBUTE_SPECIAL_FRONT_FACING);
2033 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_ATTRIBUTE_SPECIAL_FRAG_COORD);
2034
2035 vertex_postfix->varyings = T.gpu;
2036 tiler_postfix->varyings = T.gpu;
2037
2038 vertex_postfix->varying_meta = trans.gpu;
2039 tiler_postfix->varying_meta = trans.gpu + vs_size;
2040 }
2041
2042 void
2043 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
2044 struct mali_vertex_tiler_prefix *vertex_prefix,
2045 struct mali_vertex_tiler_postfix *vertex_postfix,
2046 struct mali_vertex_tiler_prefix *tiler_prefix,
2047 struct mali_vertex_tiler_postfix *tiler_postfix,
2048 union midgard_primitive_size *primitive_size)
2049 {
2050 struct panfrost_context *ctx = batch->ctx;
2051 struct panfrost_device *device = pan_device(ctx->base.screen);
2052 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
2053 struct bifrost_payload_vertex bifrost_vertex = {0,};
2054 struct bifrost_payload_tiler bifrost_tiler = {0,};
2055 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2056 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2057 void *vp, *tp;
2058 size_t vp_size, tp_size;
2059
2060 if (device->quirks & IS_BIFROST) {
2061 bifrost_vertex.prefix = *vertex_prefix;
2062 bifrost_vertex.postfix = *vertex_postfix;
2063 vp = &bifrost_vertex;
2064 vp_size = sizeof(bifrost_vertex);
2065
2066 bifrost_tiler.prefix = *tiler_prefix;
2067 bifrost_tiler.tiler.primitive_size = *primitive_size;
2068 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2069 bifrost_tiler.postfix = *tiler_postfix;
2070 tp = &bifrost_tiler;
2071 tp_size = sizeof(bifrost_tiler);
2072 } else {
2073 midgard_vertex.prefix = *vertex_prefix;
2074 midgard_vertex.postfix = *vertex_postfix;
2075 vp = &midgard_vertex;
2076 vp_size = sizeof(midgard_vertex);
2077
2078 midgard_tiler.prefix = *tiler_prefix;
2079 midgard_tiler.postfix = *tiler_postfix;
2080 midgard_tiler.primitive_size = *primitive_size;
2081 tp = &midgard_tiler;
2082 tp_size = sizeof(midgard_tiler);
2083 }
2084
2085 if (wallpapering) {
2086 /* Inject in reverse order, with "predicted" job indices.
2087 * THIS IS A HACK XXX */
2088 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
2089 batch->scoreboard.job_index + 2, tp, tp_size, true);
2090 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2091 vp, vp_size, true);
2092 return;
2093 }
2094
2095 /* If rasterizer discard is enable, only submit the vertex */
2096
2097 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2098 vp, vp_size, false);
2099
2100 if (ctx->rasterizer->base.rasterizer_discard)
2101 return;
2102
2103 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
2104 false);
2105 }
2106
2107 /* TODO: stop hardcoding this */
2108 mali_ptr
2109 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2110 {
2111 uint16_t locations[] = {
2112 128, 128,
2113 0, 256,
2114 0, 256,
2115 0, 256,
2116 0, 256,
2117 0, 256,
2118 0, 256,
2119 0, 256,
2120 0, 256,
2121 0, 256,
2122 0, 256,
2123 0, 256,
2124 0, 256,
2125 0, 256,
2126 0, 256,
2127 0, 256,
2128 0, 256,
2129 0, 256,
2130 0, 256,
2131 0, 256,
2132 0, 256,
2133 0, 256,
2134 0, 256,
2135 0, 256,
2136 0, 256,
2137 0, 256,
2138 0, 256,
2139 0, 256,
2140 0, 256,
2141 0, 256,
2142 0, 256,
2143 0, 256,
2144 128, 128,
2145 0, 0,
2146 0, 0,
2147 0, 0,
2148 0, 0,
2149 0, 0,
2150 0, 0,
2151 0, 0,
2152 0, 0,
2153 0, 0,
2154 0, 0,
2155 0, 0,
2156 0, 0,
2157 0, 0,
2158 0, 0,
2159 0, 0,
2160 };
2161
2162 return panfrost_pool_upload(&batch->pool, locations, 96 * sizeof(uint16_t));
2163 }