panfrost: XMLify blend equation
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 struct mali_shared_memory shared = {
62 .shared_workgroup_count = ~0,
63 };
64
65 if (batch->stack_size) {
66 struct panfrost_bo *stack =
67 panfrost_batch_get_scratchpad(batch, batch->stack_size,
68 dev->thread_tls_alloc,
69 dev->core_count);
70
71 shared.stack_shift = panfrost_get_stack_shift(batch->stack_size);
72 shared.scratchpad = stack->gpu;
73 }
74
75 postfix->shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared, sizeof(shared), 64);
76 }
77
78 static void
79 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
80 struct mali_vertex_tiler_postfix *postfix)
81 {
82 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
83 postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
84 }
85
86 static void
87 panfrost_vt_update_rasterizer(struct panfrost_rasterizer *rasterizer,
88 struct mali_vertex_tiler_prefix *prefix,
89 struct mali_vertex_tiler_postfix *postfix)
90 {
91 postfix->gl_enables |= 0x7;
92 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
93 rasterizer->base.front_ccw);
94 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
95 (rasterizer->base.cull_face & PIPE_FACE_FRONT));
96 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
97 (rasterizer->base.cull_face & PIPE_FACE_BACK));
98 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
99 rasterizer->base.flatshade_first);
100 }
101
102 void
103 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
104 struct mali_vertex_tiler_prefix *prefix,
105 union midgard_primitive_size *primitive_size)
106 {
107 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
108
109 if (!panfrost_writes_point_size(ctx)) {
110 float val = (prefix->draw_mode == MALI_DRAW_MODE_POINTS) ?
111 rasterizer->base.point_size :
112 rasterizer->base.line_width;
113
114 primitive_size->constant = val;
115 }
116 }
117
118 static void
119 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
120 struct mali_vertex_tiler_postfix *postfix)
121 {
122 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
123 if (ctx->occlusion_query) {
124 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
125 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
126 PAN_BO_ACCESS_SHARED |
127 PAN_BO_ACCESS_RW |
128 PAN_BO_ACCESS_FRAGMENT);
129 } else {
130 postfix->occlusion_counter = 0;
131 }
132 }
133
134 void
135 panfrost_vt_init(struct panfrost_context *ctx,
136 enum pipe_shader_type stage,
137 struct mali_vertex_tiler_prefix *prefix,
138 struct mali_vertex_tiler_postfix *postfix)
139 {
140 struct panfrost_device *device = pan_device(ctx->base.screen);
141
142 if (!ctx->shader[stage])
143 return;
144
145 memset(prefix, 0, sizeof(*prefix));
146 memset(postfix, 0, sizeof(*postfix));
147
148 if (device->quirks & IS_BIFROST) {
149 postfix->gl_enables = 0x2;
150 panfrost_vt_emit_shared_memory(ctx, postfix);
151 } else {
152 postfix->gl_enables = 0x6;
153 panfrost_vt_attach_framebuffer(ctx, postfix);
154 }
155
156 if (stage == PIPE_SHADER_FRAGMENT) {
157 panfrost_vt_update_occlusion_query(ctx, postfix);
158 panfrost_vt_update_rasterizer(ctx->rasterizer, prefix, postfix);
159 }
160 }
161
162 static unsigned
163 panfrost_translate_index_size(unsigned size)
164 {
165 switch (size) {
166 case 1:
167 return MALI_DRAW_INDEXED_UINT8;
168
169 case 2:
170 return MALI_DRAW_INDEXED_UINT16;
171
172 case 4:
173 return MALI_DRAW_INDEXED_UINT32;
174
175 default:
176 unreachable("Invalid index size");
177 }
178 }
179
180 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
181 * good for the duration of the draw (transient), could last longer. Also get
182 * the bounds on the index buffer for the range accessed by the draw. We do
183 * these operations together because there are natural optimizations which
184 * require them to be together. */
185
186 static mali_ptr
187 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
188 const struct pipe_draw_info *info,
189 unsigned *min_index, unsigned *max_index)
190 {
191 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
192 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
193 off_t offset = info->start * info->index_size;
194 bool needs_indices = true;
195 mali_ptr out = 0;
196
197 if (info->max_index != ~0u) {
198 *min_index = info->min_index;
199 *max_index = info->max_index;
200 needs_indices = false;
201 }
202
203 if (!info->has_user_indices) {
204 /* Only resources can be directly mapped */
205 panfrost_batch_add_bo(batch, rsrc->bo,
206 PAN_BO_ACCESS_SHARED |
207 PAN_BO_ACCESS_READ |
208 PAN_BO_ACCESS_VERTEX_TILER);
209 out = rsrc->bo->gpu + offset;
210
211 /* Check the cache */
212 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
213 info->start,
214 info->count,
215 min_index,
216 max_index);
217 } else {
218 /* Otherwise, we need to upload to transient memory */
219 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
220 struct panfrost_transfer T =
221 panfrost_pool_alloc_aligned(&batch->pool,
222 info->count * info->index_size,
223 info->index_size);
224
225 memcpy(T.cpu, ibuf8 + offset, info->count * info->index_size);
226 out = T.gpu;
227 }
228
229 if (needs_indices) {
230 /* Fallback */
231 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
232
233 if (!info->has_user_indices)
234 panfrost_minmax_cache_add(rsrc->index_cache,
235 info->start, info->count,
236 *min_index, *max_index);
237 }
238
239 return out;
240 }
241
242 void
243 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
244 const struct pipe_draw_info *info,
245 enum mali_draw_mode draw_mode,
246 struct mali_vertex_tiler_postfix *vertex_postfix,
247 struct mali_vertex_tiler_prefix *tiler_prefix,
248 struct mali_vertex_tiler_postfix *tiler_postfix,
249 unsigned *vertex_count,
250 unsigned *padded_count)
251 {
252 tiler_prefix->draw_mode = draw_mode;
253
254 unsigned draw_flags = 0;
255
256 if (panfrost_writes_point_size(ctx))
257 draw_flags |= MALI_DRAW_VARYING_SIZE;
258
259 if (info->primitive_restart)
260 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
261
262 /* These doesn't make much sense */
263
264 draw_flags |= 0x3000;
265
266 if (info->index_size) {
267 unsigned min_index = 0, max_index = 0;
268
269 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
270 info,
271 &min_index,
272 &max_index);
273
274 /* Use the corresponding values */
275 *vertex_count = max_index - min_index + 1;
276 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
277 tiler_prefix->offset_bias_correction = -min_index;
278 tiler_prefix->index_count = MALI_POSITIVE(info->count);
279 draw_flags |= panfrost_translate_index_size(info->index_size);
280 } else {
281 tiler_prefix->indices = 0;
282 *vertex_count = ctx->vertex_count;
283 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
284 tiler_prefix->offset_bias_correction = 0;
285 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
286 }
287
288 tiler_prefix->unknown_draw = draw_flags;
289
290 /* Encode the padded vertex count */
291
292 if (info->instance_count > 1) {
293 *padded_count = panfrost_padded_vertex_count(*vertex_count);
294
295 unsigned shift = __builtin_ctz(ctx->padded_count);
296 unsigned k = ctx->padded_count >> (shift + 1);
297
298 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
299 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
300 } else {
301 *padded_count = *vertex_count;
302
303 /* Reset instancing state */
304 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
305 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
306 }
307 }
308
309 static void
310 panfrost_shader_meta_init(struct panfrost_context *ctx,
311 enum pipe_shader_type st,
312 struct mali_shader_meta *meta)
313 {
314 const struct panfrost_device *dev = pan_device(ctx->base.screen);
315 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
316
317 memset(meta, 0, sizeof(*meta));
318 meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
319 meta->attribute_count = ss->attribute_count;
320 meta->varying_count = ss->varying_count;
321 meta->texture_count = ctx->sampler_view_count[st];
322 meta->sampler_count = ctx->sampler_count[st];
323
324 if (dev->quirks & IS_BIFROST) {
325 if (st == PIPE_SHADER_VERTEX)
326 meta->bifrost1.unk1 = 0x800000;
327 else {
328 /* First clause ATEST |= 0x4000000.
329 * Less than 32 regs |= 0x200 */
330 meta->bifrost1.unk1 = 0x950020;
331 }
332
333 meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
334 if (st == PIPE_SHADER_VERTEX)
335 meta->bifrost2.preload_regs = 0xC0;
336 else {
337 meta->bifrost2.preload_regs = 0x1;
338 SET_BIT(meta->bifrost2.preload_regs, 0x10, ss->reads_frag_coord);
339 }
340
341 meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
342 ss->uniform_cutoff);
343 } else {
344 meta->midgard1.uniform_count = MIN2(ss->uniform_count,
345 ss->uniform_cutoff);
346 meta->midgard1.work_count = ss->work_reg_count;
347
348 /* TODO: This is not conformant on ES3 */
349 meta->midgard1.flags_hi = MALI_SUPPRESS_INF_NAN;
350
351 meta->midgard1.flags_lo = 0x20;
352 meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
353
354 SET_BIT(meta->midgard1.flags_lo, MALI_WRITES_GLOBAL, ss->writes_global);
355 }
356 }
357
358 static unsigned
359 translate_tex_wrap(enum pipe_tex_wrap w)
360 {
361 switch (w) {
362 case PIPE_TEX_WRAP_REPEAT: return MALI_WRAP_MODE_REPEAT;
363 case PIPE_TEX_WRAP_CLAMP: return MALI_WRAP_MODE_CLAMP;
364 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return MALI_WRAP_MODE_CLAMP_TO_EDGE;
365 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return MALI_WRAP_MODE_CLAMP_TO_BORDER;
366 case PIPE_TEX_WRAP_MIRROR_REPEAT: return MALI_WRAP_MODE_MIRRORED_REPEAT;
367 case PIPE_TEX_WRAP_MIRROR_CLAMP: return MALI_WRAP_MODE_MIRRORED_CLAMP;
368 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
369 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
370 default: unreachable("Invalid wrap");
371 }
372 }
373
374 /* The hardware compares in the wrong order order, so we have to flip before
375 * encoding. Yes, really. */
376
377 static enum mali_func
378 panfrost_sampler_compare_func(const struct pipe_sampler_state *cso)
379 {
380 if (!cso->compare_mode)
381 return MALI_FUNC_NEVER;
382
383 enum mali_func f = panfrost_translate_compare_func(cso->compare_func);
384 return panfrost_flip_compare_func(f);
385 }
386
387 static enum mali_mipmap_mode
388 pan_pipe_to_mipmode(enum pipe_tex_mipfilter f)
389 {
390 switch (f) {
391 case PIPE_TEX_MIPFILTER_NEAREST: return MALI_MIPMAP_MODE_NEAREST;
392 case PIPE_TEX_MIPFILTER_LINEAR: return MALI_MIPMAP_MODE_TRILINEAR;
393 case PIPE_TEX_MIPFILTER_NONE: return MALI_MIPMAP_MODE_NONE;
394 default: unreachable("Invalid");
395 }
396 }
397
398 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
399 struct mali_midgard_sampler_packed *hw)
400 {
401 pan_pack(hw, MIDGARD_SAMPLER, cfg) {
402 cfg.magnify_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
403 cfg.minify_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
404 cfg.mipmap_mode = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR) ?
405 MALI_MIPMAP_MODE_TRILINEAR : MALI_MIPMAP_MODE_NEAREST;
406 cfg.normalized_coordinates = cso->normalized_coords;
407
408 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
409
410 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
411
412 /* If necessary, we disable mipmapping in the sampler descriptor by
413 * clamping the LOD as tight as possible (from 0 to epsilon,
414 * essentially -- remember these are fixed point numbers, so
415 * epsilon=1/256) */
416
417 cfg.maximum_lod = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) ?
418 cfg.minimum_lod + 1 :
419 FIXED_16(cso->max_lod, false);
420
421 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
422 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
423 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
424
425 cfg.compare_function = panfrost_sampler_compare_func(cso);
426 cfg.seamless_cube_map = cso->seamless_cube_map;
427
428 cfg.border_color_r = cso->border_color.f[0];
429 cfg.border_color_g = cso->border_color.f[1];
430 cfg.border_color_b = cso->border_color.f[2];
431 cfg.border_color_a = cso->border_color.f[3];
432 }
433 }
434
435 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
436 struct mali_bifrost_sampler_packed *hw)
437 {
438 pan_pack(hw, BIFROST_SAMPLER, cfg) {
439 cfg.magnify_linear = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR;
440 cfg.minify_linear = cso->min_img_filter == PIPE_TEX_FILTER_LINEAR;
441 cfg.mipmap_mode = pan_pipe_to_mipmode(cso->min_mip_filter);
442 cfg.normalized_coordinates = cso->normalized_coords;
443
444 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
445 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
446 cfg.maximum_lod = FIXED_16(cso->max_lod, false);
447
448 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
449 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
450 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
451
452 cfg.compare_function = panfrost_sampler_compare_func(cso);
453 cfg.seamless_cube_map = cso->seamless_cube_map;
454 }
455 }
456
457 static void
458 panfrost_frag_meta_rasterizer_update(struct panfrost_context *ctx,
459 struct mali_shader_meta *fragmeta)
460 {
461 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
462
463 bool msaa = rast->multisample;
464
465 /* TODO: Sample size */
466 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
467 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
468
469 struct panfrost_shader_state *fs;
470 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
471
472 /* EXT_shader_framebuffer_fetch requires the shader to be run
473 * per-sample when outputs are read. */
474 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
475 SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
476
477 fragmeta->depth_units = rast->offset_units * 2.0f;
478 fragmeta->depth_factor = rast->offset_scale;
479
480 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
481
482 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
483 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
484
485 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
486 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
487 }
488
489 static void
490 panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
491 struct mali_shader_meta *fragmeta)
492 {
493 const struct panfrost_zsa_state *so = ctx->depth_stencil;
494
495 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
496 so->base.stencil[0].enabled);
497
498 fragmeta->stencil_mask_front = so->stencil_mask_front;
499 fragmeta->stencil_mask_back = so->stencil_mask_back;
500
501 /* Bottom bits for stencil ref, exactly one word */
502 fragmeta->stencil_front.opaque[0] = so->stencil_front.opaque[0] | ctx->stencil_ref.ref_value[0];
503
504 /* If back-stencil is not enabled, use the front values */
505
506 if (so->base.stencil[1].enabled)
507 fragmeta->stencil_back.opaque[0] = so->stencil_back.opaque[0] | ctx->stencil_ref.ref_value[1];
508 else
509 fragmeta->stencil_back = fragmeta->stencil_front;
510
511 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
512 so->base.depth.writemask);
513
514 fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
515 fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(
516 so->base.depth.enabled ? so->base.depth.func : PIPE_FUNC_ALWAYS));
517 }
518
519 static bool
520 panfrost_fs_required(
521 struct panfrost_shader_state *fs,
522 struct panfrost_blend_final *blend,
523 unsigned rt_count)
524 {
525 /* If we generally have side effects */
526 if (fs->fs_sidefx)
527 return true;
528
529 /* If colour is written we need to execute */
530 for (unsigned i = 0; i < rt_count; ++i) {
531 if (!blend[i].no_colour)
532 return true;
533 }
534
535 /* If depth is written and not implied we need to execute.
536 * TODO: Predicate on Z/S writes being enabled */
537 return (fs->writes_depth || fs->writes_stencil);
538 }
539
540 static void
541 panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
542 struct mali_shader_meta *fragmeta,
543 struct panfrost_blend_final *blend)
544 {
545 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
546 const struct panfrost_device *dev = pan_device(ctx->base.screen);
547 struct panfrost_shader_state *fs;
548 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
549
550 SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
551 (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
552 !ctx->blend->base.dither);
553
554 SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
555 ctx->blend->base.alpha_to_coverage);
556
557 /* Get blending setup */
558 unsigned rt_count = ctx->pipe_framebuffer.nr_cbufs;
559
560 /* Disable shader execution if we can */
561 if (dev->quirks & MIDGARD_SHADERLESS
562 && !panfrost_fs_required(fs, blend, rt_count)) {
563 fragmeta->shader = 0;
564 fragmeta->attribute_count = 0;
565 fragmeta->varying_count = 0;
566 fragmeta->texture_count = 0;
567 fragmeta->sampler_count = 0;
568
569 /* This feature is not known to work on Bifrost */
570 fragmeta->midgard1.work_count = 1;
571 fragmeta->midgard1.uniform_count = 0;
572 fragmeta->midgard1.uniform_buffer_count = 0;
573 }
574
575 /* If there is a blend shader, work registers are shared. We impose 8
576 * work registers as a limit for blend shaders. Should be lower XXX */
577
578 if (!(dev->quirks & IS_BIFROST)) {
579 for (unsigned c = 0; c < rt_count; ++c) {
580 if (blend[c].is_shader) {
581 fragmeta->midgard1.work_count =
582 MAX2(fragmeta->midgard1.work_count, 8);
583 }
584 }
585 }
586
587 /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
588 * copied to the blend_meta appended (by convention), but this is the
589 * field actually read by the hardware. (Or maybe both are read...?).
590 * Specify the last RTi with a blend shader. */
591
592 fragmeta->blend.shader = 0;
593
594 for (signed rt = ((signed) rt_count - 1); rt >= 0; --rt) {
595 if (!blend[rt].is_shader)
596 continue;
597
598 fragmeta->blend.shader = blend[rt].shader.gpu |
599 blend[rt].shader.first_tag;
600 break;
601 }
602
603 if (dev->quirks & MIDGARD_SFBD) {
604 /* When only a single render target platform is used, the blend
605 * information is inside the shader meta itself. We additionally
606 * need to signal CAN_DISCARD for nontrivial blend modes (so
607 * we're able to read back the destination buffer) */
608
609 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
610 blend[0].is_shader);
611
612 if (!blend[0].is_shader) {
613 fragmeta->blend.equation = blend[0].equation.equation;
614 fragmeta->blend.constant = blend[0].equation.constant;
615 }
616
617 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
618 blend[0].load_dest);
619
620 batch->draws |= PIPE_CLEAR_COLOR0;
621 return;
622 }
623
624 if (dev->quirks & IS_BIFROST) {
625 bool no_blend = true;
626
627 for (unsigned i = 0; i < rt_count; ++i)
628 no_blend &= (!blend[i].load_dest | blend[i].no_colour);
629
630 SET_BIT(fragmeta->bifrost1.unk1, MALI_BIFROST_EARLY_Z,
631 !fs->can_discard && !fs->writes_depth && no_blend);
632 }
633 }
634
635 static void
636 panfrost_emit_blend(struct panfrost_batch *batch, void *rts,
637 struct panfrost_blend_final *blend)
638 {
639 const struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
640 struct panfrost_shader_state *fs = panfrost_get_shader_state(batch->ctx, PIPE_SHADER_FRAGMENT);
641 unsigned rt_count = batch->key.nr_cbufs;
642
643 struct bifrost_blend_rt *brts = rts;
644 struct midgard_blend_rt *mrts = rts;
645
646 /* Disable blending for depth-only on Bifrost */
647
648 if (rt_count == 0 && dev->quirks & IS_BIFROST)
649 brts[0].unk2 = 0x3;
650
651 for (unsigned i = 0; i < rt_count; ++i) {
652 unsigned flags = 0;
653
654 pan_pack(&flags, BLEND_FLAGS, cfg) {
655 if (blend[i].no_colour) {
656 cfg.enable = false;
657 break;
658 }
659
660 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
661
662 cfg.srgb = util_format_is_srgb(batch->key.cbufs[i]->format);
663 cfg.load_destination = blend[i].load_dest;
664 cfg.dither_disable = !batch->ctx->blend->base.dither;
665
666 if (!(dev->quirks & IS_BIFROST))
667 cfg.midgard_blend_shader = blend[i].is_shader;
668 }
669
670 if (dev->quirks & IS_BIFROST) {
671 brts[i].flags = flags;
672
673 if (blend[i].is_shader) {
674 /* The blend shader's address needs to be at
675 * the same top 32 bit as the fragment shader.
676 * TODO: Ensure that's always the case.
677 */
678 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
679 (fs->bo->gpu & (0xffffffffull << 32)));
680 brts[i].shader = blend[i].shader.gpu;
681 brts[i].unk2 = 0x0;
682 } else {
683 enum pipe_format format = batch->key.cbufs[i]->format;
684 const struct util_format_description *format_desc;
685 format_desc = util_format_description(format);
686
687 brts[i].equation = blend[i].equation.equation;
688
689 /* TODO: this is a bit more complicated */
690 brts[i].constant = blend[i].equation.constant;
691
692 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
693
694 /* 0x19 disables blending and forces REPLACE
695 * mode (equivalent to rgb_mode = alpha_mode =
696 * x122, colour mask = 0xF). 0x1a allows
697 * blending. */
698 brts[i].unk2 = blend[i].opaque ? 0x19 : 0x1a;
699
700 brts[i].shader_type = fs->blend_types[i];
701 }
702 } else {
703 memcpy(&mrts[i].flags, &flags, sizeof(flags));
704
705 if (blend[i].is_shader) {
706 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
707 } else {
708 mrts[i].blend.equation = blend[i].equation.equation;
709 mrts[i].blend.constant = blend[i].equation.constant;
710 }
711 }
712 }
713 }
714
715 static void
716 panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
717 struct mali_shader_meta *fragmeta,
718 struct panfrost_blend_final *blend)
719 {
720 const struct panfrost_device *dev = pan_device(ctx->base.screen);
721 struct panfrost_shader_state *fs;
722
723 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
724
725 bool msaa = ctx->rasterizer->base.multisample;
726 fragmeta->coverage_mask = msaa ? ctx->sample_mask : ~0;
727
728 fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
729 fragmeta->unknown2_4 = 0x4e0;
730
731 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
732 * is required (independent of 32-bit/64-bit descriptors), or why it's
733 * not used on later GPU revisions. Otherwise, all shader jobs fault on
734 * these earlier chips (perhaps this is a chicken bit of some kind).
735 * More investigation is needed. */
736
737 SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
738
739 if (dev->quirks & IS_BIFROST) {
740 /* TODO */
741 } else {
742 /* Depending on whether it's legal to in the given shader, we try to
743 * enable early-z testing. TODO: respect e-z force */
744
745 SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
746 !fs->can_discard && !fs->writes_global &&
747 !fs->writes_depth && !fs->writes_stencil &&
748 !ctx->blend->base.alpha_to_coverage);
749
750 /* Add the writes Z/S flags if needed. */
751 SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
752 SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
753
754 /* Any time texturing is used, derivatives are implicitly calculated,
755 * so we need to enable helper invocations */
756
757 SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
758 fs->helper_invocations);
759
760 /* If discard is enabled, which bit we set to convey this
761 * depends on if depth/stencil is used for the draw or not.
762 * Just one of depth OR stencil is enough to trigger this. */
763
764 const struct pipe_depth_stencil_alpha_state *zsa = &ctx->depth_stencil->base;
765 bool zs_enabled =
766 fs->writes_depth || fs->writes_stencil ||
767 (zsa->depth.enabled && zsa->depth.func != PIPE_FUNC_ALWAYS) ||
768 zsa->stencil[0].enabled;
769
770 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_TILEBUFFER,
771 fs->outputs_read || (!zs_enabled && fs->can_discard));
772 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_ZS, zs_enabled && fs->can_discard);
773 }
774
775 panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
776 panfrost_frag_meta_zsa_update(ctx, fragmeta);
777 panfrost_frag_meta_blend_update(ctx, fragmeta, blend);
778 }
779
780 void
781 panfrost_emit_shader_meta(struct panfrost_batch *batch,
782 enum pipe_shader_type st,
783 struct mali_vertex_tiler_postfix *postfix)
784 {
785 struct panfrost_context *ctx = batch->ctx;
786 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
787
788 if (!ss) {
789 postfix->shader = 0;
790 return;
791 }
792
793 struct mali_shader_meta meta;
794
795 panfrost_shader_meta_init(ctx, st, &meta);
796
797 /* Add the shader BO to the batch. */
798 panfrost_batch_add_bo(batch, ss->bo,
799 PAN_BO_ACCESS_PRIVATE |
800 PAN_BO_ACCESS_READ |
801 panfrost_bo_access_for_stage(st));
802
803 mali_ptr shader_ptr;
804
805 if (st == PIPE_SHADER_FRAGMENT) {
806 struct panfrost_device *dev = pan_device(ctx->base.screen);
807 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
808 size_t desc_size = sizeof(meta);
809 void *rts = NULL;
810 struct panfrost_transfer xfer;
811 unsigned rt_size;
812
813 if (dev->quirks & MIDGARD_SFBD)
814 rt_size = 0;
815 else if (dev->quirks & IS_BIFROST)
816 rt_size = sizeof(struct bifrost_blend_rt);
817 else
818 rt_size = sizeof(struct midgard_blend_rt);
819
820 desc_size += rt_size * rt_count;
821
822 if (rt_size)
823 rts = rzalloc_size(ctx, rt_size * rt_count);
824
825 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
826
827 for (unsigned c = 0; c < ctx->pipe_framebuffer.nr_cbufs; ++c)
828 blend[c] = panfrost_get_blend_for_context(ctx, c);
829
830 panfrost_frag_shader_meta_init(ctx, &meta, blend);
831
832 if (!(dev->quirks & MIDGARD_SFBD))
833 panfrost_emit_blend(batch, rts, blend);
834
835 xfer = panfrost_pool_alloc_aligned(&batch->pool, desc_size, sizeof(meta));
836
837 memcpy(xfer.cpu, &meta, sizeof(meta));
838 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
839
840 if (rt_size)
841 ralloc_free(rts);
842
843 shader_ptr = xfer.gpu;
844 } else {
845 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
846 sizeof(meta));
847 }
848
849 postfix->shader = shader_ptr;
850 }
851
852 void
853 panfrost_emit_viewport(struct panfrost_batch *batch,
854 struct mali_vertex_tiler_postfix *tiler_postfix)
855 {
856 struct panfrost_context *ctx = batch->ctx;
857 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
858 const struct pipe_scissor_state *ss = &ctx->scissor;
859 const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
860 const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
861
862 /* Derive min/max from translate/scale. Note since |x| >= 0 by
863 * definition, we have that -|x| <= |x| hence translate - |scale| <=
864 * translate + |scale|, so the ordering is correct here. */
865 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
866 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
867 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
868 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
869 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
870 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
871
872 /* Scissor to the intersection of viewport and to the scissor, clamped
873 * to the framebuffer */
874
875 unsigned minx = MIN2(fb->width, vp_minx);
876 unsigned maxx = MIN2(fb->width, vp_maxx);
877 unsigned miny = MIN2(fb->height, vp_miny);
878 unsigned maxy = MIN2(fb->height, vp_maxy);
879
880 if (ss && rast->scissor) {
881 minx = MAX2(ss->minx, minx);
882 miny = MAX2(ss->miny, miny);
883 maxx = MIN2(ss->maxx, maxx);
884 maxy = MIN2(ss->maxy, maxy);
885 }
886
887 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
888
889 pan_pack(T.cpu, VIEWPORT, cfg) {
890 cfg.scissor_minimum_x = minx;
891 cfg.scissor_minimum_y = miny;
892 cfg.scissor_maximum_x = maxx - 1;
893 cfg.scissor_maximum_y = maxy - 1;
894
895 cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
896 cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
897 }
898
899 tiler_postfix->viewport = T.gpu;
900 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
901 }
902
903 static mali_ptr
904 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
905 enum pipe_shader_type st,
906 struct panfrost_constant_buffer *buf,
907 unsigned index)
908 {
909 struct pipe_constant_buffer *cb = &buf->cb[index];
910 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
911
912 if (rsrc) {
913 panfrost_batch_add_bo(batch, rsrc->bo,
914 PAN_BO_ACCESS_SHARED |
915 PAN_BO_ACCESS_READ |
916 panfrost_bo_access_for_stage(st));
917
918 /* Alignment gauranteed by
919 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
920 return rsrc->bo->gpu + cb->buffer_offset;
921 } else if (cb->user_buffer) {
922 return panfrost_pool_upload_aligned(&batch->pool,
923 cb->user_buffer +
924 cb->buffer_offset,
925 cb->buffer_size, 16);
926 } else {
927 unreachable("No constant buffer");
928 }
929 }
930
931 struct sysval_uniform {
932 union {
933 float f[4];
934 int32_t i[4];
935 uint32_t u[4];
936 uint64_t du[2];
937 };
938 };
939
940 static void
941 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
942 struct sysval_uniform *uniform)
943 {
944 struct panfrost_context *ctx = batch->ctx;
945 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
946
947 uniform->f[0] = vp->scale[0];
948 uniform->f[1] = vp->scale[1];
949 uniform->f[2] = vp->scale[2];
950 }
951
952 static void
953 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
954 struct sysval_uniform *uniform)
955 {
956 struct panfrost_context *ctx = batch->ctx;
957 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
958
959 uniform->f[0] = vp->translate[0];
960 uniform->f[1] = vp->translate[1];
961 uniform->f[2] = vp->translate[2];
962 }
963
964 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
965 enum pipe_shader_type st,
966 unsigned int sysvalid,
967 struct sysval_uniform *uniform)
968 {
969 struct panfrost_context *ctx = batch->ctx;
970 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
971 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
972 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
973 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
974
975 assert(dim);
976 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
977
978 if (dim > 1)
979 uniform->i[1] = u_minify(tex->texture->height0,
980 tex->u.tex.first_level);
981
982 if (dim > 2)
983 uniform->i[2] = u_minify(tex->texture->depth0,
984 tex->u.tex.first_level);
985
986 if (is_array)
987 uniform->i[dim] = tex->texture->array_size;
988 }
989
990 static void
991 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
992 enum pipe_shader_type st,
993 unsigned ssbo_id,
994 struct sysval_uniform *uniform)
995 {
996 struct panfrost_context *ctx = batch->ctx;
997
998 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
999 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
1000
1001 /* Compute address */
1002 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
1003
1004 panfrost_batch_add_bo(batch, bo,
1005 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
1006 panfrost_bo_access_for_stage(st));
1007
1008 /* Upload address and size as sysval */
1009 uniform->du[0] = bo->gpu + sb.buffer_offset;
1010 uniform->u[2] = sb.buffer_size;
1011 }
1012
1013 static void
1014 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
1015 enum pipe_shader_type st,
1016 unsigned samp_idx,
1017 struct sysval_uniform *uniform)
1018 {
1019 struct panfrost_context *ctx = batch->ctx;
1020 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
1021
1022 uniform->f[0] = sampl->min_lod;
1023 uniform->f[1] = sampl->max_lod;
1024 uniform->f[2] = sampl->lod_bias;
1025
1026 /* Even without any errata, Midgard represents "no mipmapping" as
1027 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1028 * panfrost_create_sampler_state which also explains our choice of
1029 * epsilon value (again to keep behaviour consistent) */
1030
1031 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1032 uniform->f[1] = uniform->f[0] + (1.0/256.0);
1033 }
1034
1035 static void
1036 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
1037 struct sysval_uniform *uniform)
1038 {
1039 struct panfrost_context *ctx = batch->ctx;
1040
1041 uniform->u[0] = ctx->compute_grid->grid[0];
1042 uniform->u[1] = ctx->compute_grid->grid[1];
1043 uniform->u[2] = ctx->compute_grid->grid[2];
1044 }
1045
1046 static void
1047 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1048 struct panfrost_shader_state *ss,
1049 enum pipe_shader_type st)
1050 {
1051 struct sysval_uniform *uniforms = (void *)buf;
1052
1053 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1054 int sysval = ss->sysval[i];
1055
1056 switch (PAN_SYSVAL_TYPE(sysval)) {
1057 case PAN_SYSVAL_VIEWPORT_SCALE:
1058 panfrost_upload_viewport_scale_sysval(batch,
1059 &uniforms[i]);
1060 break;
1061 case PAN_SYSVAL_VIEWPORT_OFFSET:
1062 panfrost_upload_viewport_offset_sysval(batch,
1063 &uniforms[i]);
1064 break;
1065 case PAN_SYSVAL_TEXTURE_SIZE:
1066 panfrost_upload_txs_sysval(batch, st,
1067 PAN_SYSVAL_ID(sysval),
1068 &uniforms[i]);
1069 break;
1070 case PAN_SYSVAL_SSBO:
1071 panfrost_upload_ssbo_sysval(batch, st,
1072 PAN_SYSVAL_ID(sysval),
1073 &uniforms[i]);
1074 break;
1075 case PAN_SYSVAL_NUM_WORK_GROUPS:
1076 panfrost_upload_num_work_groups_sysval(batch,
1077 &uniforms[i]);
1078 break;
1079 case PAN_SYSVAL_SAMPLER:
1080 panfrost_upload_sampler_sysval(batch, st,
1081 PAN_SYSVAL_ID(sysval),
1082 &uniforms[i]);
1083 break;
1084 default:
1085 assert(0);
1086 }
1087 }
1088 }
1089
1090 static const void *
1091 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1092 unsigned index)
1093 {
1094 struct pipe_constant_buffer *cb = &buf->cb[index];
1095 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1096
1097 if (rsrc)
1098 return rsrc->bo->cpu;
1099 else if (cb->user_buffer)
1100 return cb->user_buffer;
1101 else
1102 unreachable("No constant buffer");
1103 }
1104
1105 void
1106 panfrost_emit_const_buf(struct panfrost_batch *batch,
1107 enum pipe_shader_type stage,
1108 struct mali_vertex_tiler_postfix *postfix)
1109 {
1110 struct panfrost_context *ctx = batch->ctx;
1111 struct panfrost_shader_variants *all = ctx->shader[stage];
1112
1113 if (!all)
1114 return;
1115
1116 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1117
1118 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1119
1120 /* Uniforms are implicitly UBO #0 */
1121 bool has_uniforms = buf->enabled_mask & (1 << 0);
1122
1123 /* Allocate room for the sysval and the uniforms */
1124 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1125 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1126 size_t size = sys_size + uniform_size;
1127 struct panfrost_transfer transfer =
1128 panfrost_pool_alloc_aligned(&batch->pool, size, 16);
1129
1130 /* Upload sysvals requested by the shader */
1131 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1132
1133 /* Upload uniforms */
1134 if (has_uniforms && uniform_size) {
1135 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1136 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1137 }
1138
1139 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1140 * uploaded */
1141
1142 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1143 assert(ubo_count >= 1);
1144
1145 size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
1146 struct panfrost_transfer ubos =
1147 panfrost_pool_alloc_aligned(&batch->pool, sz,
1148 MALI_UNIFORM_BUFFER_LENGTH);
1149
1150 uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
1151
1152 /* Upload uniforms as a UBO */
1153
1154 if (ss->uniform_count) {
1155 pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
1156 cfg.entries = ss->uniform_count;
1157 cfg.pointer = transfer.gpu;
1158 }
1159 } else {
1160 *ubo_ptr = 0;
1161 }
1162
1163 /* The rest are honest-to-goodness UBOs */
1164
1165 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1166 size_t usz = buf->cb[ubo].buffer_size;
1167 bool enabled = buf->enabled_mask & (1 << ubo);
1168 bool empty = usz == 0;
1169
1170 if (!enabled || empty) {
1171 ubo_ptr[ubo] = 0;
1172 continue;
1173 }
1174
1175 pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
1176 cfg.entries = DIV_ROUND_UP(usz, 16);
1177 cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
1178 stage, buf, ubo);
1179 }
1180 }
1181
1182 postfix->uniforms = transfer.gpu;
1183 postfix->uniform_buffers = ubos.gpu;
1184
1185 buf->dirty_mask = 0;
1186 }
1187
1188 void
1189 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1190 const struct pipe_grid_info *info,
1191 struct midgard_payload_vertex_tiler *vtp)
1192 {
1193 struct panfrost_context *ctx = batch->ctx;
1194 struct panfrost_device *dev = pan_device(ctx->base.screen);
1195 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1196 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1197 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1198 128));
1199
1200 unsigned log2_instances =
1201 util_logbase2_ceil(info->grid[0]) +
1202 util_logbase2_ceil(info->grid[1]) +
1203 util_logbase2_ceil(info->grid[2]);
1204
1205 unsigned shared_size = single_size * (1 << log2_instances) * dev->core_count;
1206 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1207 shared_size,
1208 1);
1209
1210 struct mali_shared_memory shared = {
1211 .shared_memory = bo->gpu,
1212 .shared_workgroup_count = log2_instances,
1213 .shared_shift = util_logbase2(single_size) + 1
1214 };
1215
1216 vtp->postfix.shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared,
1217 sizeof(shared), 64);
1218 }
1219
1220 static mali_ptr
1221 panfrost_get_tex_desc(struct panfrost_batch *batch,
1222 enum pipe_shader_type st,
1223 struct panfrost_sampler_view *view)
1224 {
1225 if (!view)
1226 return (mali_ptr) 0;
1227
1228 struct pipe_sampler_view *pview = &view->base;
1229 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1230
1231 /* Add the BO to the job so it's retained until the job is done. */
1232
1233 panfrost_batch_add_bo(batch, rsrc->bo,
1234 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1235 panfrost_bo_access_for_stage(st));
1236
1237 panfrost_batch_add_bo(batch, view->bo,
1238 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1239 panfrost_bo_access_for_stage(st));
1240
1241 return view->bo->gpu;
1242 }
1243
1244 static void
1245 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1246 struct pipe_context *pctx)
1247 {
1248 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1249 if (view->texture_bo != rsrc->bo->gpu ||
1250 view->modifier != rsrc->modifier) {
1251 panfrost_bo_unreference(view->bo);
1252 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1253 }
1254 }
1255
1256 void
1257 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1258 enum pipe_shader_type stage,
1259 struct mali_vertex_tiler_postfix *postfix)
1260 {
1261 struct panfrost_context *ctx = batch->ctx;
1262 struct panfrost_device *device = pan_device(ctx->base.screen);
1263
1264 if (!ctx->sampler_view_count[stage])
1265 return;
1266
1267 if (device->quirks & IS_BIFROST) {
1268 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1269 MALI_BIFROST_TEXTURE_LENGTH *
1270 ctx->sampler_view_count[stage],
1271 MALI_BIFROST_TEXTURE_LENGTH);
1272
1273 struct mali_bifrost_texture_packed *out =
1274 (struct mali_bifrost_texture_packed *) T.cpu;
1275
1276 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1277 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1278 struct pipe_sampler_view *pview = &view->base;
1279 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1280
1281 panfrost_update_sampler_view(view, &ctx->base);
1282 out[i] = view->bifrost_descriptor;
1283
1284 /* Add the BOs to the job so they are retained until the job is done. */
1285
1286 panfrost_batch_add_bo(batch, rsrc->bo,
1287 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1288 panfrost_bo_access_for_stage(stage));
1289
1290 panfrost_batch_add_bo(batch, view->bo,
1291 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1292 panfrost_bo_access_for_stage(stage));
1293 }
1294
1295 postfix->textures = T.gpu;
1296 } else {
1297 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1298
1299 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1300 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1301
1302 panfrost_update_sampler_view(view, &ctx->base);
1303
1304 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1305 }
1306
1307 postfix->textures = panfrost_pool_upload_aligned(&batch->pool,
1308 trampolines,
1309 sizeof(uint64_t) *
1310 ctx->sampler_view_count[stage],
1311 sizeof(uint64_t));
1312 }
1313 }
1314
1315 void
1316 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1317 enum pipe_shader_type stage,
1318 struct mali_vertex_tiler_postfix *postfix)
1319 {
1320 struct panfrost_context *ctx = batch->ctx;
1321
1322 if (!ctx->sampler_count[stage])
1323 return;
1324
1325 size_t desc_size = MALI_BIFROST_SAMPLER_LENGTH;
1326 assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
1327
1328 size_t sz = desc_size * ctx->sampler_count[stage];
1329 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool, sz, desc_size);
1330 struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
1331
1332 for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
1333 out[i] = ctx->samplers[stage][i]->hw;
1334
1335 postfix->sampler_descriptor = T.gpu;
1336 }
1337
1338 void
1339 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1340 struct mali_vertex_tiler_postfix *vertex_postfix)
1341 {
1342 struct panfrost_context *ctx = batch->ctx;
1343 struct panfrost_vertex_state *so = ctx->vertex;
1344 struct panfrost_shader_state *vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1345
1346 unsigned instance_shift = vertex_postfix->instance_shift;
1347 unsigned instance_odd = vertex_postfix->instance_odd;
1348
1349 /* Worst case: everything is NPOT, which is only possible if instancing
1350 * is enabled. Otherwise single record is gauranteed */
1351 bool could_npot = instance_shift || instance_odd;
1352
1353 struct panfrost_transfer S = panfrost_pool_alloc_aligned(&batch->pool,
1354 MALI_ATTRIBUTE_BUFFER_LENGTH * vs->attribute_count *
1355 (could_npot ? 2 : 1),
1356 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
1357
1358 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1359 MALI_ATTRIBUTE_LENGTH * vs->attribute_count,
1360 MALI_ATTRIBUTE_LENGTH);
1361
1362 struct mali_attribute_buffer_packed *bufs =
1363 (struct mali_attribute_buffer_packed *) S.cpu;
1364
1365 struct mali_attribute_packed *out =
1366 (struct mali_attribute_packed *) T.cpu;
1367
1368 unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
1369 unsigned k = 0;
1370
1371 for (unsigned i = 0; i < so->num_elements; ++i) {
1372 /* We map buffers 1:1 with the attributes, which
1373 * means duplicating some vertex buffers (who cares? aside from
1374 * maybe some caching implications but I somehow doubt that
1375 * matters) */
1376
1377 struct pipe_vertex_element *elem = &so->pipe[i];
1378 unsigned vbi = elem->vertex_buffer_index;
1379 attrib_to_buffer[i] = k;
1380
1381 if (!(ctx->vb_mask & (1 << vbi)))
1382 continue;
1383
1384 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1385 struct panfrost_resource *rsrc;
1386
1387 rsrc = pan_resource(buf->buffer.resource);
1388 if (!rsrc)
1389 continue;
1390
1391 /* Add a dependency of the batch on the vertex buffer */
1392 panfrost_batch_add_bo(batch, rsrc->bo,
1393 PAN_BO_ACCESS_SHARED |
1394 PAN_BO_ACCESS_READ |
1395 PAN_BO_ACCESS_VERTEX_TILER);
1396
1397 /* Mask off lower bits, see offset fixup below */
1398 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1399 mali_ptr addr = raw_addr & ~63;
1400
1401 /* Since we advanced the base pointer, we shrink the buffer
1402 * size, but add the offset we subtracted */
1403 unsigned size = rsrc->base.width0 + (raw_addr - addr)
1404 - buf->buffer_offset;
1405
1406 /* When there is a divisor, the hardware-level divisor is
1407 * the product of the instance divisor and the padded count */
1408 unsigned divisor = elem->instance_divisor;
1409 unsigned hw_divisor = ctx->padded_count * divisor;
1410 unsigned stride = buf->stride;
1411
1412 /* If there's a divisor(=1) but no instancing, we want every
1413 * attribute to be the same */
1414
1415 if (divisor && ctx->instance_count == 1)
1416 stride = 0;
1417
1418 if (!divisor || ctx->instance_count <= 1) {
1419 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1420 if (ctx->instance_count > 1)
1421 cfg.type = MALI_ATTRIBUTE_TYPE_1D_MODULUS;
1422
1423 cfg.pointer = addr;
1424 cfg.stride = stride;
1425 cfg.size = size;
1426 cfg.divisor_r = instance_shift;
1427 cfg.divisor_p = instance_odd;
1428 }
1429 } else if (util_is_power_of_two_or_zero(hw_divisor)) {
1430 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1431 cfg.type = MALI_ATTRIBUTE_TYPE_1D_POT_DIVISOR;
1432 cfg.pointer = addr;
1433 cfg.stride = stride;
1434 cfg.size = size;
1435 cfg.divisor_r = __builtin_ctz(hw_divisor);
1436 }
1437
1438 } else {
1439 unsigned shift = 0, extra_flags = 0;
1440
1441 unsigned magic_divisor =
1442 panfrost_compute_magic_divisor(hw_divisor, &shift, &extra_flags);
1443
1444 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1445 cfg.type = MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR;
1446 cfg.pointer = addr;
1447 cfg.stride = stride;
1448 cfg.size = size;
1449
1450 cfg.divisor_r = shift;
1451 cfg.divisor_e = extra_flags;
1452 }
1453
1454 pan_pack(bufs + k + 1, ATTRIBUTE_BUFFER_CONTINUATION_NPOT, cfg) {
1455 cfg.divisor_numerator = magic_divisor;
1456 cfg.divisor = divisor;
1457 }
1458
1459 ++k;
1460 }
1461
1462 ++k;
1463 }
1464
1465 /* Add special gl_VertexID/gl_InstanceID buffers */
1466
1467 if (unlikely(vs->attribute_count >= PAN_VERTEX_ID)) {
1468 panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1469
1470 pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
1471 cfg.buffer_index = k++;
1472 cfg.format = so->formats[PAN_VERTEX_ID];
1473 }
1474
1475 panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1476
1477 pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
1478 cfg.buffer_index = k++;
1479 cfg.format = so->formats[PAN_INSTANCE_ID];
1480 }
1481 }
1482
1483 /* Attribute addresses require 64-byte alignment, so let:
1484 *
1485 * base' = base & ~63 = base - (base & 63)
1486 * offset' = offset + (base & 63)
1487 *
1488 * Since base' + offset' = base + offset, these are equivalent
1489 * addressing modes and now base is 64 aligned.
1490 */
1491
1492 unsigned start = vertex_postfix->offset_start;
1493
1494 for (unsigned i = 0; i < so->num_elements; ++i) {
1495 unsigned vbi = so->pipe[i].vertex_buffer_index;
1496 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1497
1498 /* Adjust by the masked off bits of the offset. Make sure we
1499 * read src_offset from so->hw (which is not GPU visible)
1500 * rather than target (which is) due to caching effects */
1501
1502 unsigned src_offset = so->pipe[i].src_offset;
1503
1504 /* BOs aligned to 4k so guaranteed aligned to 64 */
1505 src_offset += (buf->buffer_offset & 63);
1506
1507 /* Also, somewhat obscurely per-instance data needs to be
1508 * offset in response to a delayed start in an indexed draw */
1509
1510 if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
1511 src_offset -= buf->stride * start;
1512
1513 pan_pack(out + i, ATTRIBUTE, cfg) {
1514 cfg.buffer_index = attrib_to_buffer[i];
1515 cfg.format = so->formats[i];
1516 cfg.offset = src_offset;
1517 }
1518 }
1519
1520 vertex_postfix->attributes = S.gpu;
1521 vertex_postfix->attribute_meta = T.gpu;
1522 }
1523
1524 static mali_ptr
1525 panfrost_emit_varyings(struct panfrost_batch *batch,
1526 struct mali_attribute_buffer_packed *slot,
1527 unsigned stride, unsigned count)
1528 {
1529 unsigned size = stride * count;
1530 mali_ptr ptr = panfrost_pool_alloc_aligned(&batch->invisible_pool, size, 64).gpu;
1531
1532 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1533 cfg.stride = stride;
1534 cfg.size = size;
1535 cfg.pointer = ptr;
1536 }
1537
1538 return ptr;
1539 }
1540
1541 static unsigned
1542 panfrost_streamout_offset(unsigned stride, unsigned offset,
1543 struct pipe_stream_output_target *target)
1544 {
1545 return (target->buffer_offset + (offset * stride * 4)) & 63;
1546 }
1547
1548 static void
1549 panfrost_emit_streamout(struct panfrost_batch *batch,
1550 struct mali_attribute_buffer_packed *slot,
1551 unsigned stride_words, unsigned offset, unsigned count,
1552 struct pipe_stream_output_target *target)
1553 {
1554 unsigned stride = stride_words * 4;
1555 unsigned max_size = target->buffer_size;
1556 unsigned expected_size = stride * count;
1557
1558 /* Grab the BO and bind it to the batch */
1559 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1560
1561 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1562 * the perspective of the TILER and FRAGMENT.
1563 */
1564 panfrost_batch_add_bo(batch, bo,
1565 PAN_BO_ACCESS_SHARED |
1566 PAN_BO_ACCESS_RW |
1567 PAN_BO_ACCESS_VERTEX_TILER |
1568 PAN_BO_ACCESS_FRAGMENT);
1569
1570 /* We will have an offset applied to get alignment */
1571 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * stride);
1572
1573 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1574 cfg.pointer = (addr & ~63);
1575 cfg.stride = stride;
1576 cfg.size = MIN2(max_size, expected_size) + (addr & 63);
1577 }
1578 }
1579
1580 static bool
1581 has_point_coord(unsigned mask, gl_varying_slot loc)
1582 {
1583 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1584 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1585 else if (loc == VARYING_SLOT_PNTC)
1586 return (mask & (1 << 8));
1587 else
1588 return false;
1589 }
1590
1591 /* Helpers for manipulating stream out information so we can pack varyings
1592 * accordingly. Compute the src_offset for a given captured varying */
1593
1594 static struct pipe_stream_output *
1595 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1596 {
1597 for (unsigned i = 0; i < info->num_outputs; ++i) {
1598 if (info->output[i].register_index == loc)
1599 return &info->output[i];
1600 }
1601
1602 unreachable("Varying not captured");
1603 }
1604
1605 static unsigned
1606 pan_varying_size(enum mali_format fmt)
1607 {
1608 unsigned type = MALI_EXTRACT_TYPE(fmt);
1609 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1610 unsigned bits = MALI_EXTRACT_BITS(fmt);
1611 unsigned bpc = 0;
1612
1613 if (bits == MALI_CHANNEL_FLOAT) {
1614 /* No doubles */
1615 bool fp16 = (type == MALI_FORMAT_SINT);
1616 assert(fp16 || (type == MALI_FORMAT_UNORM));
1617
1618 bpc = fp16 ? 2 : 4;
1619 } else {
1620 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1621
1622 /* See the enums */
1623 bits = 1 << bits;
1624 assert(bits >= 8);
1625 bpc = bits / 8;
1626 }
1627
1628 return bpc * chan;
1629 }
1630
1631 /* Indices for named (non-XFB) varyings that are present. These are packed
1632 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1633 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1634 * of a given special field given a shift S by:
1635 *
1636 * idx = popcount(P & ((1 << S) - 1))
1637 *
1638 * That is... look at all of the varyings that come earlier and count them, the
1639 * count is the new index since plus one. Likewise, the total number of special
1640 * buffers required is simply popcount(P)
1641 */
1642
1643 enum pan_special_varying {
1644 PAN_VARY_GENERAL = 0,
1645 PAN_VARY_POSITION = 1,
1646 PAN_VARY_PSIZ = 2,
1647 PAN_VARY_PNTCOORD = 3,
1648 PAN_VARY_FACE = 4,
1649 PAN_VARY_FRAGCOORD = 5,
1650
1651 /* Keep last */
1652 PAN_VARY_MAX,
1653 };
1654
1655 /* Given a varying, figure out which index it correpsonds to */
1656
1657 static inline unsigned
1658 pan_varying_index(unsigned present, enum pan_special_varying v)
1659 {
1660 unsigned mask = (1 << v) - 1;
1661 return util_bitcount(present & mask);
1662 }
1663
1664 /* Get the base offset for XFB buffers, which by convention come after
1665 * everything else. Wrapper function for semantic reasons; by construction this
1666 * is just popcount. */
1667
1668 static inline unsigned
1669 pan_xfb_base(unsigned present)
1670 {
1671 return util_bitcount(present);
1672 }
1673
1674 /* Computes the present mask for varyings so we can start emitting varying records */
1675
1676 static inline unsigned
1677 pan_varying_present(
1678 struct panfrost_shader_state *vs,
1679 struct panfrost_shader_state *fs,
1680 unsigned quirks)
1681 {
1682 /* At the moment we always emit general and position buffers. Not
1683 * strictly necessary but usually harmless */
1684
1685 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1686
1687 /* Enable special buffers by the shader info */
1688
1689 if (vs->writes_point_size)
1690 present |= (1 << PAN_VARY_PSIZ);
1691
1692 if (fs->reads_point_coord)
1693 present |= (1 << PAN_VARY_PNTCOORD);
1694
1695 if (fs->reads_face)
1696 present |= (1 << PAN_VARY_FACE);
1697
1698 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1699 present |= (1 << PAN_VARY_FRAGCOORD);
1700
1701 /* Also, if we have a point sprite, we need a point coord buffer */
1702
1703 for (unsigned i = 0; i < fs->varying_count; i++) {
1704 gl_varying_slot loc = fs->varyings_loc[i];
1705
1706 if (has_point_coord(fs->point_sprite_mask, loc))
1707 present |= (1 << PAN_VARY_PNTCOORD);
1708 }
1709
1710 return present;
1711 }
1712
1713 /* Emitters for varying records */
1714
1715 static void
1716 pan_emit_vary(struct mali_attribute_packed *out,
1717 unsigned present, enum pan_special_varying buf,
1718 unsigned quirks, enum mali_format format,
1719 unsigned offset)
1720 {
1721 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1722 unsigned swizzle = quirks & HAS_SWIZZLES ?
1723 panfrost_get_default_swizzle(nr_channels) :
1724 panfrost_bifrost_swizzle(nr_channels);
1725
1726 pan_pack(out, ATTRIBUTE, cfg) {
1727 cfg.buffer_index = pan_varying_index(present, buf);
1728 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1729 cfg.format = (format << 12) | swizzle;
1730 cfg.offset = offset;
1731 }
1732 }
1733
1734 /* General varying that is unused */
1735
1736 static void
1737 pan_emit_vary_only(struct mali_attribute_packed *out,
1738 unsigned present, unsigned quirks)
1739 {
1740 pan_emit_vary(out, present, 0, quirks, MALI_VARYING_DISCARD, 0);
1741 }
1742
1743 /* Special records */
1744
1745 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1746 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1747 [PAN_VARY_PSIZ] = MALI_R16F,
1748 [PAN_VARY_PNTCOORD] = MALI_R16F,
1749 [PAN_VARY_FACE] = MALI_R32I,
1750 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1751 };
1752
1753 static void
1754 pan_emit_vary_special(struct mali_attribute_packed *out,
1755 unsigned present, enum pan_special_varying buf,
1756 unsigned quirks)
1757 {
1758 assert(buf < PAN_VARY_MAX);
1759 pan_emit_vary(out, present, buf, quirks, pan_varying_formats[buf], 0);
1760 }
1761
1762 static enum mali_format
1763 pan_xfb_format(enum mali_format format, unsigned nr)
1764 {
1765 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1766 return MALI_R32F | MALI_NR_CHANNELS(nr);
1767 else
1768 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1769 }
1770
1771 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1772 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1773 * value. */
1774
1775 static void
1776 pan_emit_vary_xfb(struct mali_attribute_packed *out,
1777 unsigned present,
1778 unsigned max_xfb,
1779 unsigned *streamout_offsets,
1780 unsigned quirks,
1781 enum mali_format format,
1782 struct pipe_stream_output o)
1783 {
1784 unsigned swizzle = quirks & HAS_SWIZZLES ?
1785 panfrost_get_default_swizzle(o.num_components) :
1786 panfrost_bifrost_swizzle(o.num_components);
1787
1788 pan_pack(out, ATTRIBUTE, cfg) {
1789 /* XFB buffers come after everything else */
1790 cfg.buffer_index = pan_xfb_base(present) + o.output_buffer;
1791 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1792
1793 /* Override number of channels and precision to highp */
1794 cfg.format = (pan_xfb_format(format, o.num_components) << 12) | swizzle;
1795
1796 /* Apply given offsets together */
1797 cfg.offset = (o.dst_offset * 4) /* dwords */
1798 + streamout_offsets[o.output_buffer];
1799 }
1800 }
1801
1802 /* Determine if we should capture a varying for XFB. This requires actually
1803 * having a buffer for it. If we don't capture it, we'll fallback to a general
1804 * varying path (linked or unlinked, possibly discarding the write) */
1805
1806 static bool
1807 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1808 unsigned loc, unsigned max_xfb)
1809 {
1810 if (!(xfb->so_mask & (1ll << loc)))
1811 return false;
1812
1813 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1814 return o->output_buffer < max_xfb;
1815 }
1816
1817 static void
1818 pan_emit_general_varying(struct mali_attribute_packed *out,
1819 struct panfrost_shader_state *other,
1820 struct panfrost_shader_state *xfb,
1821 gl_varying_slot loc,
1822 enum mali_format format,
1823 unsigned present,
1824 unsigned quirks,
1825 unsigned *gen_offsets,
1826 enum mali_format *gen_formats,
1827 unsigned *gen_stride,
1828 unsigned idx,
1829 bool should_alloc)
1830 {
1831 /* Check if we're linked */
1832 signed other_idx = -1;
1833
1834 for (unsigned j = 0; j < other->varying_count; ++j) {
1835 if (other->varyings_loc[j] == loc) {
1836 other_idx = j;
1837 break;
1838 }
1839 }
1840
1841 if (other_idx < 0) {
1842 pan_emit_vary_only(out, present, quirks);
1843 return;
1844 }
1845
1846 unsigned offset = gen_offsets[other_idx];
1847
1848 if (should_alloc) {
1849 /* We're linked, so allocate a space via a watermark allocation */
1850 enum mali_format alt = other->varyings[other_idx];
1851
1852 /* Do interpolation at minimum precision */
1853 unsigned size_main = pan_varying_size(format);
1854 unsigned size_alt = pan_varying_size(alt);
1855 unsigned size = MIN2(size_main, size_alt);
1856
1857 /* If a varying is marked for XFB but not actually captured, we
1858 * should match the format to the format that would otherwise
1859 * be used for XFB, since dEQP checks for invariance here. It's
1860 * unclear if this is required by the spec. */
1861
1862 if (xfb->so_mask & (1ull << loc)) {
1863 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1864 format = pan_xfb_format(format, o->num_components);
1865 size = pan_varying_size(format);
1866 } else if (size == size_alt) {
1867 format = alt;
1868 }
1869
1870 gen_offsets[idx] = *gen_stride;
1871 gen_formats[other_idx] = format;
1872 offset = *gen_stride;
1873 *gen_stride += size;
1874 }
1875
1876 pan_emit_vary(out, present, PAN_VARY_GENERAL, quirks, format, offset);
1877 }
1878
1879 /* Higher-level wrapper around all of the above, classifying a varying into one
1880 * of the above types */
1881
1882 static void
1883 panfrost_emit_varying(
1884 struct mali_attribute_packed *out,
1885 struct panfrost_shader_state *stage,
1886 struct panfrost_shader_state *other,
1887 struct panfrost_shader_state *xfb,
1888 unsigned present,
1889 unsigned max_xfb,
1890 unsigned *streamout_offsets,
1891 unsigned quirks,
1892 unsigned *gen_offsets,
1893 enum mali_format *gen_formats,
1894 unsigned *gen_stride,
1895 unsigned idx,
1896 bool should_alloc,
1897 bool is_fragment)
1898 {
1899 gl_varying_slot loc = stage->varyings_loc[idx];
1900 enum mali_format format = stage->varyings[idx];
1901
1902 /* Override format to match linkage */
1903 if (!should_alloc && gen_formats[idx])
1904 format = gen_formats[idx];
1905
1906 if (has_point_coord(stage->point_sprite_mask, loc)) {
1907 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1908 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1909 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1910 pan_emit_vary_xfb(out, present, max_xfb, streamout_offsets, quirks, format, *o);
1911 } else if (loc == VARYING_SLOT_POS) {
1912 if (is_fragment)
1913 pan_emit_vary_special(out, present, PAN_VARY_FRAGCOORD, quirks);
1914 else
1915 pan_emit_vary_special(out, present, PAN_VARY_POSITION, quirks);
1916 } else if (loc == VARYING_SLOT_PSIZ) {
1917 pan_emit_vary_special(out, present, PAN_VARY_PSIZ, quirks);
1918 } else if (loc == VARYING_SLOT_PNTC) {
1919 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1920 } else if (loc == VARYING_SLOT_FACE) {
1921 pan_emit_vary_special(out, present, PAN_VARY_FACE, quirks);
1922 } else {
1923 pan_emit_general_varying(out, other, xfb, loc, format, present,
1924 quirks, gen_offsets, gen_formats, gen_stride,
1925 idx, should_alloc);
1926 }
1927 }
1928
1929 static void
1930 pan_emit_special_input(struct mali_attribute_buffer_packed *out,
1931 unsigned present,
1932 enum pan_special_varying v,
1933 unsigned special)
1934 {
1935 if (present & (1 << v)) {
1936 unsigned idx = pan_varying_index(present, v);
1937
1938 pan_pack(out + idx, ATTRIBUTE_BUFFER, cfg) {
1939 cfg.special = special;
1940 cfg.type = 0;
1941 }
1942 }
1943 }
1944
1945 void
1946 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1947 unsigned vertex_count,
1948 struct mali_vertex_tiler_postfix *vertex_postfix,
1949 struct mali_vertex_tiler_postfix *tiler_postfix,
1950 union midgard_primitive_size *primitive_size)
1951 {
1952 /* Load the shaders */
1953 struct panfrost_context *ctx = batch->ctx;
1954 struct panfrost_device *dev = pan_device(ctx->base.screen);
1955 struct panfrost_shader_state *vs, *fs;
1956 size_t vs_size, fs_size;
1957
1958 /* Allocate the varying descriptor */
1959
1960 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1961 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
1962 vs_size = MALI_ATTRIBUTE_LENGTH * vs->varying_count;
1963 fs_size = MALI_ATTRIBUTE_LENGTH * fs->varying_count;
1964
1965 struct panfrost_transfer trans = panfrost_pool_alloc_aligned(
1966 &batch->pool, vs_size + fs_size, MALI_ATTRIBUTE_LENGTH);
1967
1968 struct pipe_stream_output_info *so = &vs->stream_output;
1969 unsigned present = pan_varying_present(vs, fs, dev->quirks);
1970
1971 /* Check if this varying is linked by us. This is the case for
1972 * general-purpose, non-captured varyings. If it is, link it. If it's
1973 * not, use the provided stream out information to determine the
1974 * offset, since it was already linked for us. */
1975
1976 unsigned gen_offsets[32];
1977 enum mali_format gen_formats[32];
1978 memset(gen_offsets, 0, sizeof(gen_offsets));
1979 memset(gen_formats, 0, sizeof(gen_formats));
1980
1981 unsigned gen_stride = 0;
1982 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
1983 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
1984
1985 unsigned streamout_offsets[32];
1986
1987 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1988 streamout_offsets[i] = panfrost_streamout_offset(
1989 so->stride[i],
1990 ctx->streamout.offsets[i],
1991 ctx->streamout.targets[i]);
1992 }
1993
1994 struct mali_attribute_packed *ovs = (struct mali_attribute_packed *)trans.cpu;
1995 struct mali_attribute_packed *ofs = ovs + vs->varying_count;
1996
1997 for (unsigned i = 0; i < vs->varying_count; i++) {
1998 panfrost_emit_varying(ovs + i, vs, fs, vs, present,
1999 ctx->streamout.num_targets, streamout_offsets,
2000 dev->quirks,
2001 gen_offsets, gen_formats, &gen_stride, i, true, false);
2002 }
2003
2004 for (unsigned i = 0; i < fs->varying_count; i++) {
2005 panfrost_emit_varying(ofs + i, fs, vs, vs, present,
2006 ctx->streamout.num_targets, streamout_offsets,
2007 dev->quirks,
2008 gen_offsets, gen_formats, &gen_stride, i, false, true);
2009 }
2010
2011 unsigned xfb_base = pan_xfb_base(present);
2012 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
2013 MALI_ATTRIBUTE_BUFFER_LENGTH * (xfb_base + ctx->streamout.num_targets),
2014 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
2015 struct mali_attribute_buffer_packed *varyings =
2016 (struct mali_attribute_buffer_packed *) T.cpu;
2017
2018 /* Emit the stream out buffers */
2019
2020 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
2021 ctx->vertex_count);
2022
2023 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
2024 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
2025 so->stride[i],
2026 ctx->streamout.offsets[i],
2027 out_count,
2028 ctx->streamout.targets[i]);
2029 }
2030
2031 panfrost_emit_varyings(batch,
2032 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
2033 gen_stride, vertex_count);
2034
2035 /* fp32 vec4 gl_Position */
2036 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
2037 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
2038 sizeof(float) * 4, vertex_count);
2039
2040 if (present & (1 << PAN_VARY_PSIZ)) {
2041 primitive_size->pointer = panfrost_emit_varyings(batch,
2042 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
2043 2, vertex_count);
2044 }
2045
2046 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_ATTRIBUTE_SPECIAL_POINT_COORD);
2047 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_ATTRIBUTE_SPECIAL_FRONT_FACING);
2048 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_ATTRIBUTE_SPECIAL_FRAG_COORD);
2049
2050 vertex_postfix->varyings = T.gpu;
2051 tiler_postfix->varyings = T.gpu;
2052
2053 vertex_postfix->varying_meta = trans.gpu;
2054 tiler_postfix->varying_meta = trans.gpu + vs_size;
2055 }
2056
2057 void
2058 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
2059 struct mali_vertex_tiler_prefix *vertex_prefix,
2060 struct mali_vertex_tiler_postfix *vertex_postfix,
2061 struct mali_vertex_tiler_prefix *tiler_prefix,
2062 struct mali_vertex_tiler_postfix *tiler_postfix,
2063 union midgard_primitive_size *primitive_size)
2064 {
2065 struct panfrost_context *ctx = batch->ctx;
2066 struct panfrost_device *device = pan_device(ctx->base.screen);
2067 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
2068 struct bifrost_payload_vertex bifrost_vertex = {0,};
2069 struct bifrost_payload_tiler bifrost_tiler = {0,};
2070 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2071 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2072 void *vp, *tp;
2073 size_t vp_size, tp_size;
2074
2075 if (device->quirks & IS_BIFROST) {
2076 bifrost_vertex.prefix = *vertex_prefix;
2077 bifrost_vertex.postfix = *vertex_postfix;
2078 vp = &bifrost_vertex;
2079 vp_size = sizeof(bifrost_vertex);
2080
2081 bifrost_tiler.prefix = *tiler_prefix;
2082 bifrost_tiler.tiler.primitive_size = *primitive_size;
2083 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2084 bifrost_tiler.postfix = *tiler_postfix;
2085 tp = &bifrost_tiler;
2086 tp_size = sizeof(bifrost_tiler);
2087 } else {
2088 midgard_vertex.prefix = *vertex_prefix;
2089 midgard_vertex.postfix = *vertex_postfix;
2090 vp = &midgard_vertex;
2091 vp_size = sizeof(midgard_vertex);
2092
2093 midgard_tiler.prefix = *tiler_prefix;
2094 midgard_tiler.postfix = *tiler_postfix;
2095 midgard_tiler.primitive_size = *primitive_size;
2096 tp = &midgard_tiler;
2097 tp_size = sizeof(midgard_tiler);
2098 }
2099
2100 if (wallpapering) {
2101 /* Inject in reverse order, with "predicted" job indices.
2102 * THIS IS A HACK XXX */
2103 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
2104 batch->scoreboard.job_index + 2, tp, tp_size, true);
2105 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2106 vp, vp_size, true);
2107 return;
2108 }
2109
2110 /* If rasterizer discard is enable, only submit the vertex */
2111
2112 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2113 vp, vp_size, false);
2114
2115 if (ctx->rasterizer->base.rasterizer_discard)
2116 return;
2117
2118 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
2119 false);
2120 }
2121
2122 /* TODO: stop hardcoding this */
2123 mali_ptr
2124 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2125 {
2126 uint16_t locations[] = {
2127 128, 128,
2128 0, 256,
2129 0, 256,
2130 0, 256,
2131 0, 256,
2132 0, 256,
2133 0, 256,
2134 0, 256,
2135 0, 256,
2136 0, 256,
2137 0, 256,
2138 0, 256,
2139 0, 256,
2140 0, 256,
2141 0, 256,
2142 0, 256,
2143 0, 256,
2144 0, 256,
2145 0, 256,
2146 0, 256,
2147 0, 256,
2148 0, 256,
2149 0, 256,
2150 0, 256,
2151 0, 256,
2152 0, 256,
2153 0, 256,
2154 0, 256,
2155 0, 256,
2156 0, 256,
2157 0, 256,
2158 0, 256,
2159 128, 128,
2160 0, 0,
2161 0, 0,
2162 0, 0,
2163 0, 0,
2164 0, 0,
2165 0, 0,
2166 0, 0,
2167 0, 0,
2168 0, 0,
2169 0, 0,
2170 0, 0,
2171 0, 0,
2172 0, 0,
2173 0, 0,
2174 0, 0,
2175 };
2176
2177 return panfrost_pool_upload_aligned(&batch->pool, locations, 96 * sizeof(uint16_t), 64);
2178 }