panfrost: Drop blend indirection
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 static void
55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56 struct mali_vertex_tiler_postfix *postfix)
57 {
58 struct panfrost_device *dev = pan_device(ctx->base.screen);
59 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60
61 struct mali_shared_memory shared = {
62 .shared_workgroup_count = ~0,
63 };
64
65 if (batch->stack_size) {
66 struct panfrost_bo *stack =
67 panfrost_batch_get_scratchpad(batch, batch->stack_size,
68 dev->thread_tls_alloc,
69 dev->core_count);
70
71 shared.stack_shift = panfrost_get_stack_shift(batch->stack_size);
72 shared.scratchpad = stack->gpu;
73 }
74
75 postfix->shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared, sizeof(shared), 64);
76 }
77
78 static void
79 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
80 struct mali_vertex_tiler_postfix *postfix)
81 {
82 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
83 postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
84 }
85
86 static void
87 panfrost_vt_update_rasterizer(struct panfrost_rasterizer *rasterizer,
88 struct mali_vertex_tiler_prefix *prefix,
89 struct mali_vertex_tiler_postfix *postfix)
90 {
91 postfix->gl_enables |= 0x7;
92 SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
93 rasterizer->base.front_ccw);
94 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
95 (rasterizer->base.cull_face & PIPE_FACE_FRONT));
96 SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
97 (rasterizer->base.cull_face & PIPE_FACE_BACK));
98 SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
99 rasterizer->base.flatshade_first);
100 }
101
102 void
103 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
104 struct mali_vertex_tiler_prefix *prefix,
105 union midgard_primitive_size *primitive_size)
106 {
107 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
108
109 if (!panfrost_writes_point_size(ctx)) {
110 float val = (prefix->draw_mode == MALI_DRAW_MODE_POINTS) ?
111 rasterizer->base.point_size :
112 rasterizer->base.line_width;
113
114 primitive_size->constant = val;
115 }
116 }
117
118 static void
119 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
120 struct mali_vertex_tiler_postfix *postfix)
121 {
122 SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
123 if (ctx->occlusion_query) {
124 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
125 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
126 PAN_BO_ACCESS_SHARED |
127 PAN_BO_ACCESS_RW |
128 PAN_BO_ACCESS_FRAGMENT);
129 } else {
130 postfix->occlusion_counter = 0;
131 }
132 }
133
134 void
135 panfrost_vt_init(struct panfrost_context *ctx,
136 enum pipe_shader_type stage,
137 struct mali_vertex_tiler_prefix *prefix,
138 struct mali_vertex_tiler_postfix *postfix)
139 {
140 struct panfrost_device *device = pan_device(ctx->base.screen);
141
142 if (!ctx->shader[stage])
143 return;
144
145 memset(prefix, 0, sizeof(*prefix));
146 memset(postfix, 0, sizeof(*postfix));
147
148 if (device->quirks & IS_BIFROST) {
149 postfix->gl_enables = 0x2;
150 panfrost_vt_emit_shared_memory(ctx, postfix);
151 } else {
152 postfix->gl_enables = 0x6;
153 panfrost_vt_attach_framebuffer(ctx, postfix);
154 }
155
156 if (stage == PIPE_SHADER_FRAGMENT) {
157 panfrost_vt_update_occlusion_query(ctx, postfix);
158 panfrost_vt_update_rasterizer(ctx->rasterizer, prefix, postfix);
159 }
160 }
161
162 static unsigned
163 panfrost_translate_index_size(unsigned size)
164 {
165 switch (size) {
166 case 1:
167 return MALI_DRAW_INDEXED_UINT8;
168
169 case 2:
170 return MALI_DRAW_INDEXED_UINT16;
171
172 case 4:
173 return MALI_DRAW_INDEXED_UINT32;
174
175 default:
176 unreachable("Invalid index size");
177 }
178 }
179
180 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
181 * good for the duration of the draw (transient), could last longer. Also get
182 * the bounds on the index buffer for the range accessed by the draw. We do
183 * these operations together because there are natural optimizations which
184 * require them to be together. */
185
186 static mali_ptr
187 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
188 const struct pipe_draw_info *info,
189 unsigned *min_index, unsigned *max_index)
190 {
191 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
192 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
193 off_t offset = info->start * info->index_size;
194 bool needs_indices = true;
195 mali_ptr out = 0;
196
197 if (info->max_index != ~0u) {
198 *min_index = info->min_index;
199 *max_index = info->max_index;
200 needs_indices = false;
201 }
202
203 if (!info->has_user_indices) {
204 /* Only resources can be directly mapped */
205 panfrost_batch_add_bo(batch, rsrc->bo,
206 PAN_BO_ACCESS_SHARED |
207 PAN_BO_ACCESS_READ |
208 PAN_BO_ACCESS_VERTEX_TILER);
209 out = rsrc->bo->gpu + offset;
210
211 /* Check the cache */
212 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
213 info->start,
214 info->count,
215 min_index,
216 max_index);
217 } else {
218 /* Otherwise, we need to upload to transient memory */
219 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
220 struct panfrost_transfer T =
221 panfrost_pool_alloc_aligned(&batch->pool,
222 info->count * info->index_size,
223 info->index_size);
224
225 memcpy(T.cpu, ibuf8 + offset, info->count * info->index_size);
226 out = T.gpu;
227 }
228
229 if (needs_indices) {
230 /* Fallback */
231 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
232
233 if (!info->has_user_indices)
234 panfrost_minmax_cache_add(rsrc->index_cache,
235 info->start, info->count,
236 *min_index, *max_index);
237 }
238
239 return out;
240 }
241
242 void
243 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
244 const struct pipe_draw_info *info,
245 enum mali_draw_mode draw_mode,
246 struct mali_vertex_tiler_postfix *vertex_postfix,
247 struct mali_vertex_tiler_prefix *tiler_prefix,
248 struct mali_vertex_tiler_postfix *tiler_postfix,
249 unsigned *vertex_count,
250 unsigned *padded_count)
251 {
252 tiler_prefix->draw_mode = draw_mode;
253
254 unsigned draw_flags = 0;
255
256 if (panfrost_writes_point_size(ctx))
257 draw_flags |= MALI_DRAW_VARYING_SIZE;
258
259 if (info->primitive_restart)
260 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
261
262 /* These doesn't make much sense */
263
264 draw_flags |= 0x3000;
265
266 if (info->index_size) {
267 unsigned min_index = 0, max_index = 0;
268
269 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
270 info,
271 &min_index,
272 &max_index);
273
274 /* Use the corresponding values */
275 *vertex_count = max_index - min_index + 1;
276 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
277 tiler_prefix->offset_bias_correction = -min_index;
278 tiler_prefix->index_count = MALI_POSITIVE(info->count);
279 draw_flags |= panfrost_translate_index_size(info->index_size);
280 } else {
281 tiler_prefix->indices = 0;
282 *vertex_count = ctx->vertex_count;
283 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
284 tiler_prefix->offset_bias_correction = 0;
285 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
286 }
287
288 tiler_prefix->unknown_draw = draw_flags;
289
290 /* Encode the padded vertex count */
291
292 if (info->instance_count > 1) {
293 *padded_count = panfrost_padded_vertex_count(*vertex_count);
294
295 unsigned shift = __builtin_ctz(ctx->padded_count);
296 unsigned k = ctx->padded_count >> (shift + 1);
297
298 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
299 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
300 } else {
301 *padded_count = *vertex_count;
302
303 /* Reset instancing state */
304 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
305 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
306 }
307 }
308
309 static unsigned
310 translate_tex_wrap(enum pipe_tex_wrap w)
311 {
312 switch (w) {
313 case PIPE_TEX_WRAP_REPEAT: return MALI_WRAP_MODE_REPEAT;
314 case PIPE_TEX_WRAP_CLAMP: return MALI_WRAP_MODE_CLAMP;
315 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return MALI_WRAP_MODE_CLAMP_TO_EDGE;
316 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return MALI_WRAP_MODE_CLAMP_TO_BORDER;
317 case PIPE_TEX_WRAP_MIRROR_REPEAT: return MALI_WRAP_MODE_MIRRORED_REPEAT;
318 case PIPE_TEX_WRAP_MIRROR_CLAMP: return MALI_WRAP_MODE_MIRRORED_CLAMP;
319 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
320 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
321 default: unreachable("Invalid wrap");
322 }
323 }
324
325 /* The hardware compares in the wrong order order, so we have to flip before
326 * encoding. Yes, really. */
327
328 static enum mali_func
329 panfrost_sampler_compare_func(const struct pipe_sampler_state *cso)
330 {
331 if (!cso->compare_mode)
332 return MALI_FUNC_NEVER;
333
334 enum mali_func f = panfrost_translate_compare_func(cso->compare_func);
335 return panfrost_flip_compare_func(f);
336 }
337
338 static enum mali_mipmap_mode
339 pan_pipe_to_mipmode(enum pipe_tex_mipfilter f)
340 {
341 switch (f) {
342 case PIPE_TEX_MIPFILTER_NEAREST: return MALI_MIPMAP_MODE_NEAREST;
343 case PIPE_TEX_MIPFILTER_LINEAR: return MALI_MIPMAP_MODE_TRILINEAR;
344 case PIPE_TEX_MIPFILTER_NONE: return MALI_MIPMAP_MODE_NONE;
345 default: unreachable("Invalid");
346 }
347 }
348
349 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
350 struct mali_midgard_sampler_packed *hw)
351 {
352 pan_pack(hw, MIDGARD_SAMPLER, cfg) {
353 cfg.magnify_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
354 cfg.minify_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
355 cfg.mipmap_mode = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR) ?
356 MALI_MIPMAP_MODE_TRILINEAR : MALI_MIPMAP_MODE_NEAREST;
357 cfg.normalized_coordinates = cso->normalized_coords;
358
359 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
360
361 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
362
363 /* If necessary, we disable mipmapping in the sampler descriptor by
364 * clamping the LOD as tight as possible (from 0 to epsilon,
365 * essentially -- remember these are fixed point numbers, so
366 * epsilon=1/256) */
367
368 cfg.maximum_lod = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) ?
369 cfg.minimum_lod + 1 :
370 FIXED_16(cso->max_lod, false);
371
372 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
373 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
374 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
375
376 cfg.compare_function = panfrost_sampler_compare_func(cso);
377 cfg.seamless_cube_map = cso->seamless_cube_map;
378
379 cfg.border_color_r = cso->border_color.f[0];
380 cfg.border_color_g = cso->border_color.f[1];
381 cfg.border_color_b = cso->border_color.f[2];
382 cfg.border_color_a = cso->border_color.f[3];
383 }
384 }
385
386 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
387 struct mali_bifrost_sampler_packed *hw)
388 {
389 pan_pack(hw, BIFROST_SAMPLER, cfg) {
390 cfg.magnify_linear = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR;
391 cfg.minify_linear = cso->min_img_filter == PIPE_TEX_FILTER_LINEAR;
392 cfg.mipmap_mode = pan_pipe_to_mipmode(cso->min_mip_filter);
393 cfg.normalized_coordinates = cso->normalized_coords;
394
395 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
396 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
397 cfg.maximum_lod = FIXED_16(cso->max_lod, false);
398
399 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
400 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
401 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
402
403 cfg.compare_function = panfrost_sampler_compare_func(cso);
404 cfg.seamless_cube_map = cso->seamless_cube_map;
405 }
406 }
407
408 static bool
409 panfrost_fs_required(
410 struct panfrost_shader_state *fs,
411 struct panfrost_blend_final *blend,
412 unsigned rt_count)
413 {
414 /* If we generally have side effects */
415 if (fs->fs_sidefx)
416 return true;
417
418 /* If colour is written we need to execute */
419 for (unsigned i = 0; i < rt_count; ++i) {
420 if (!blend[i].no_colour)
421 return true;
422 }
423
424 /* If depth is written and not implied we need to execute.
425 * TODO: Predicate on Z/S writes being enabled */
426 return (fs->writes_depth || fs->writes_stencil);
427 }
428
429 static void
430 panfrost_emit_blend(struct panfrost_batch *batch, void *rts,
431 struct panfrost_blend_final *blend)
432 {
433 const struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
434 struct panfrost_shader_state *fs = panfrost_get_shader_state(batch->ctx, PIPE_SHADER_FRAGMENT);
435 unsigned rt_count = batch->key.nr_cbufs;
436
437 struct bifrost_blend_rt *brts = rts;
438
439 /* Disable blending for depth-only */
440
441 if (rt_count == 0) {
442 if (dev->quirks & IS_BIFROST) {
443 memset(brts, 0, sizeof(*brts));
444 brts[0].unk2 = 0x3;
445 } else {
446 pan_pack(rts, MIDGARD_BLEND_OPAQUE, cfg) {
447 cfg.equation = 0xf0122122; /* Replace */
448 }
449 }
450 }
451
452 for (unsigned i = 0; i < rt_count; ++i) {
453 struct mali_blend_flags_packed flags = {};
454
455 pan_pack(&flags, BLEND_FLAGS, cfg) {
456 if (blend[i].no_colour) {
457 cfg.enable = false;
458 break;
459 }
460
461 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
462
463 cfg.srgb = util_format_is_srgb(batch->key.cbufs[i]->format);
464 cfg.load_destination = blend[i].load_dest;
465 cfg.dither_disable = !batch->ctx->blend->base.dither;
466
467 if (!(dev->quirks & IS_BIFROST))
468 cfg.midgard_blend_shader = blend[i].is_shader;
469 }
470
471 if (dev->quirks & IS_BIFROST) {
472 memset(brts + i, 0, sizeof(brts[i]));
473 brts[i].flags = flags.opaque[0];
474
475 if (blend[i].is_shader) {
476 /* The blend shader's address needs to be at
477 * the same top 32 bit as the fragment shader.
478 * TODO: Ensure that's always the case.
479 */
480 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
481 (fs->bo->gpu & (0xffffffffull << 32)));
482 brts[i].shader = blend[i].shader.gpu;
483 brts[i].unk2 = 0x0;
484 } else {
485 enum pipe_format format = batch->key.cbufs[i]->format;
486 const struct util_format_description *format_desc;
487 format_desc = util_format_description(format);
488
489 brts[i].equation = blend[i].equation.equation;
490
491 /* TODO: this is a bit more complicated */
492 brts[i].constant = blend[i].equation.constant;
493
494 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
495
496 /* 0x19 disables blending and forces REPLACE
497 * mode (equivalent to rgb_mode = alpha_mode =
498 * x122, colour mask = 0xF). 0x1a allows
499 * blending. */
500 brts[i].unk2 = blend[i].opaque ? 0x19 : 0x1a;
501
502 brts[i].shader_type = fs->blend_types[i];
503 }
504 } else {
505 pan_pack(rts, MIDGARD_BLEND_OPAQUE, cfg) {
506 cfg.flags = flags;
507
508 if (blend[i].is_shader) {
509 cfg.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
510 } else {
511 cfg.equation = blend[i].equation.equation.opaque[0];
512 cfg.constant = blend[i].equation.constant;
513 }
514 }
515
516 rts += MALI_MIDGARD_BLEND_LENGTH;
517 }
518 }
519 }
520
521 static void
522 panfrost_emit_frag_shader(struct panfrost_context *ctx,
523 struct mali_state_packed *fragmeta,
524 struct panfrost_blend_final *blend)
525 {
526 const struct panfrost_device *dev = pan_device(ctx->base.screen);
527 struct panfrost_shader_state *fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
528 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
529 const struct panfrost_zsa_state *zsa = ctx->depth_stencil;
530 unsigned rt_count = ctx->pipe_framebuffer.nr_cbufs;
531 bool alpha_to_coverage = ctx->blend->base.alpha_to_coverage;
532
533 /* Built up here */
534 struct mali_shader_packed shader = fs->shader;
535 struct mali_preload_packed preload = fs->preload;
536 uint32_t properties;
537 struct mali_multisample_misc_packed multisample_misc;
538 struct mali_stencil_mask_misc_packed stencil_mask_misc;
539 union midgard_blend sfbd_blend = { 0 };
540
541 if (!panfrost_fs_required(fs, blend, rt_count)) {
542 if (dev->quirks & IS_BIFROST) {
543 pan_pack(&shader, SHADER, cfg) {}
544
545 pan_pack(&properties, BIFROST_PROPERTIES, cfg) {
546 cfg.unknown = 0x950020; /* XXX */
547 cfg.early_z_enable = true;
548 }
549
550 preload.opaque[0] = 0;
551 } else {
552 pan_pack(&shader, SHADER, cfg) {
553 cfg.shader = 0x1;
554 }
555
556 pan_pack(&properties, MIDGARD_PROPERTIES, cfg) {
557 cfg.work_register_count = 1;
558 cfg.depth_source = MALI_DEPTH_SOURCE_FIXED_FUNCTION;
559 cfg.early_z_enable = true;
560 }
561 }
562 } else if (dev->quirks & IS_BIFROST) {
563 bool no_blend = true;
564
565 for (unsigned i = 0; i < rt_count; ++i)
566 no_blend &= (!blend[i].load_dest | blend[i].no_colour);
567
568 pan_pack(&properties, BIFROST_PROPERTIES, cfg) {
569 cfg.early_z_enable = !fs->can_discard && !fs->writes_depth && no_blend;
570 }
571
572 /* Combine with prepacked properties */
573 properties |= fs->properties.opaque[0];
574 } else {
575 /* Reasons to disable early-Z from a shader perspective */
576 bool late_z = fs->can_discard || fs->writes_global ||
577 fs->writes_depth || fs->writes_stencil;
578
579 /* If either depth or stencil is enabled, discard matters */
580 bool zs_enabled =
581 (zsa->base.depth.enabled && zsa->base.depth.func != PIPE_FUNC_ALWAYS) ||
582 zsa->base.stencil[0].enabled;
583
584 bool has_blend_shader = false;
585
586 for (unsigned c = 0; c < rt_count; ++c)
587 has_blend_shader |= blend[c].is_shader;
588
589 pan_pack(&properties, MIDGARD_PROPERTIES, cfg) {
590 /* TODO: Reduce this limit? */
591 if (has_blend_shader)
592 cfg.work_register_count = MAX2(fs->work_reg_count, 8);
593 else
594 cfg.work_register_count = fs->work_reg_count;
595
596 cfg.early_z_enable = !(late_z || alpha_to_coverage);
597 cfg.reads_tilebuffer = fs->outputs_read || (!zs_enabled && fs->can_discard);
598 cfg.reads_depth_stencil = zs_enabled && fs->can_discard;
599 }
600
601 properties |= fs->properties.opaque[0];
602 }
603
604 pan_pack(&multisample_misc, MULTISAMPLE_MISC, cfg) {
605 bool msaa = rast->multisample;
606 cfg.multisample_enable = msaa;
607 cfg.sample_mask = (msaa ? ctx->sample_mask : ~0) & 0xFFFF;
608
609 /* EXT_shader_framebuffer_fetch requires per-sample */
610 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
611 cfg.evaluate_per_sample = msaa && per_sample;
612
613 if (dev->quirks & MIDGARD_SFBD) {
614 cfg.sfbd_load_destination = blend[0].load_dest;
615 cfg.sfbd_blend_shader = blend[0].is_shader;
616 }
617
618 cfg.depth_function = zsa->base.depth.enabled ?
619 panfrost_translate_compare_func(zsa->base.depth.func) :
620 MALI_FUNC_ALWAYS;
621
622 cfg.depth_write_mask = zsa->base.depth.writemask;
623 cfg.near_discard = rast->depth_clip_near;
624 cfg.far_discard = rast->depth_clip_far;
625 cfg.unknown_2 = true;
626 }
627
628 pan_pack(&stencil_mask_misc, STENCIL_MASK_MISC, cfg) {
629 cfg.stencil_mask_front = zsa->stencil_mask_front;
630 cfg.stencil_mask_back = zsa->stencil_mask_back;
631 cfg.stencil_enable = zsa->base.stencil[0].enabled;
632 cfg.alpha_to_coverage = alpha_to_coverage;
633
634 if (dev->quirks & MIDGARD_SFBD) {
635 cfg.sfbd_write_enable = !blend[0].no_colour;
636 cfg.sfbd_srgb = util_format_is_srgb(ctx->pipe_framebuffer.cbufs[0]->format);
637 cfg.sfbd_dither_disable = !ctx->blend->base.dither;
638 }
639
640 cfg.unknown_1 = 0x7;
641 cfg.depth_range_1 = cfg.depth_range_2 = rast->offset_tri;
642 cfg.single_sampled_lines = !rast->multisample;
643 }
644
645 if (dev->quirks & MIDGARD_SFBD) {
646 if (blend[0].is_shader) {
647 sfbd_blend.shader = blend[0].shader.gpu |
648 blend[0].shader.first_tag;
649 } else {
650 sfbd_blend.equation = blend[0].equation.equation;
651 sfbd_blend.constant = blend[0].equation.constant;
652 }
653 } else if (!(dev->quirks & IS_BIFROST)) {
654 /* Bug where MRT-capable hw apparently reads the last blend
655 * shader from here instead of the usual location? */
656
657 for (signed rt = ((signed) rt_count - 1); rt >= 0; --rt) {
658 if (!blend[rt].is_shader)
659 continue;
660
661 sfbd_blend.shader = blend[rt].shader.gpu |
662 blend[rt].shader.first_tag;
663 break;
664 }
665 }
666
667 pan_pack(fragmeta, STATE_OPAQUE, cfg) {
668 cfg.shader = fs->shader;
669 cfg.properties = properties;
670 cfg.depth_units = rast->offset_units * 2.0f;
671 cfg.depth_factor = rast->offset_scale;
672 cfg.multisample_misc = multisample_misc;
673 cfg.stencil_mask_misc = stencil_mask_misc;
674
675 cfg.stencil_front = zsa->stencil_front;
676 cfg.stencil_back = zsa->stencil_back;
677
678 /* Bottom bits for stencil ref, exactly one word */
679 bool back_enab = zsa->base.stencil[1].enabled;
680 cfg.stencil_front.opaque[0] |= ctx->stencil_ref.ref_value[0];
681 cfg.stencil_back.opaque[0] |= ctx->stencil_ref.ref_value[back_enab ? 1 : 0];
682
683 if (dev->quirks & IS_BIFROST)
684 cfg.preload = preload;
685 else
686 memcpy(&cfg.sfbd_blend, &sfbd_blend, sizeof(sfbd_blend));
687 }
688 }
689
690 mali_ptr
691 panfrost_emit_compute_shader_meta(struct panfrost_batch *batch, enum pipe_shader_type stage)
692 {
693 struct panfrost_shader_state *ss = panfrost_get_shader_state(batch->ctx, stage);
694
695 panfrost_batch_add_bo(batch, ss->bo,
696 PAN_BO_ACCESS_PRIVATE |
697 PAN_BO_ACCESS_READ |
698 PAN_BO_ACCESS_VERTEX_TILER);
699
700 panfrost_batch_add_bo(batch, pan_resource(ss->upload.rsrc)->bo,
701 PAN_BO_ACCESS_PRIVATE |
702 PAN_BO_ACCESS_READ |
703 PAN_BO_ACCESS_VERTEX_TILER);
704
705 return pan_resource(ss->upload.rsrc)->bo->gpu + ss->upload.offset;
706 }
707
708 mali_ptr
709 panfrost_emit_frag_shader_meta(struct panfrost_batch *batch)
710 {
711 struct panfrost_context *ctx = batch->ctx;
712 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
713
714 /* Add the shader BO to the batch. */
715 panfrost_batch_add_bo(batch, ss->bo,
716 PAN_BO_ACCESS_PRIVATE |
717 PAN_BO_ACCESS_READ |
718 PAN_BO_ACCESS_FRAGMENT);
719
720 struct panfrost_device *dev = pan_device(ctx->base.screen);
721 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
722 struct panfrost_transfer xfer;
723 unsigned rt_size;
724
725 if (dev->quirks & MIDGARD_SFBD)
726 rt_size = 0;
727 else if (dev->quirks & IS_BIFROST)
728 rt_size = sizeof(struct bifrost_blend_rt);
729 else
730 rt_size = sizeof(struct midgard_blend_rt);
731
732 unsigned desc_size = MALI_STATE_LENGTH + rt_size * rt_count;
733 xfer = panfrost_pool_alloc_aligned(&batch->pool, desc_size, MALI_STATE_LENGTH);
734
735 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
736
737 for (unsigned c = 0; c < ctx->pipe_framebuffer.nr_cbufs; ++c)
738 blend[c] = panfrost_get_blend_for_context(ctx, c);
739
740 panfrost_emit_frag_shader(ctx, (struct mali_state_packed *) xfer.cpu, blend);
741
742 if (!(dev->quirks & MIDGARD_SFBD))
743 panfrost_emit_blend(batch, xfer.cpu + MALI_STATE_LENGTH, blend);
744 else
745 batch->draws |= PIPE_CLEAR_COLOR0;
746
747 return xfer.gpu;
748 }
749
750 void
751 panfrost_emit_viewport(struct panfrost_batch *batch,
752 struct mali_vertex_tiler_postfix *tiler_postfix)
753 {
754 struct panfrost_context *ctx = batch->ctx;
755 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
756 const struct pipe_scissor_state *ss = &ctx->scissor;
757 const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
758 const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
759
760 /* Derive min/max from translate/scale. Note since |x| >= 0 by
761 * definition, we have that -|x| <= |x| hence translate - |scale| <=
762 * translate + |scale|, so the ordering is correct here. */
763 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
764 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
765 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
766 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
767 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
768 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
769
770 /* Scissor to the intersection of viewport and to the scissor, clamped
771 * to the framebuffer */
772
773 unsigned minx = MIN2(fb->width, vp_minx);
774 unsigned maxx = MIN2(fb->width, vp_maxx);
775 unsigned miny = MIN2(fb->height, vp_miny);
776 unsigned maxy = MIN2(fb->height, vp_maxy);
777
778 if (ss && rast->scissor) {
779 minx = MAX2(ss->minx, minx);
780 miny = MAX2(ss->miny, miny);
781 maxx = MIN2(ss->maxx, maxx);
782 maxy = MIN2(ss->maxy, maxy);
783 }
784
785 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
786
787 pan_pack(T.cpu, VIEWPORT, cfg) {
788 cfg.scissor_minimum_x = minx;
789 cfg.scissor_minimum_y = miny;
790 cfg.scissor_maximum_x = maxx - 1;
791 cfg.scissor_maximum_y = maxy - 1;
792
793 cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
794 cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
795 }
796
797 tiler_postfix->viewport = T.gpu;
798 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
799 }
800
801 static mali_ptr
802 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
803 enum pipe_shader_type st,
804 struct panfrost_constant_buffer *buf,
805 unsigned index)
806 {
807 struct pipe_constant_buffer *cb = &buf->cb[index];
808 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
809
810 if (rsrc) {
811 panfrost_batch_add_bo(batch, rsrc->bo,
812 PAN_BO_ACCESS_SHARED |
813 PAN_BO_ACCESS_READ |
814 panfrost_bo_access_for_stage(st));
815
816 /* Alignment gauranteed by
817 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
818 return rsrc->bo->gpu + cb->buffer_offset;
819 } else if (cb->user_buffer) {
820 return panfrost_pool_upload_aligned(&batch->pool,
821 cb->user_buffer +
822 cb->buffer_offset,
823 cb->buffer_size, 16);
824 } else {
825 unreachable("No constant buffer");
826 }
827 }
828
829 struct sysval_uniform {
830 union {
831 float f[4];
832 int32_t i[4];
833 uint32_t u[4];
834 uint64_t du[2];
835 };
836 };
837
838 static void
839 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
840 struct sysval_uniform *uniform)
841 {
842 struct panfrost_context *ctx = batch->ctx;
843 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
844
845 uniform->f[0] = vp->scale[0];
846 uniform->f[1] = vp->scale[1];
847 uniform->f[2] = vp->scale[2];
848 }
849
850 static void
851 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
852 struct sysval_uniform *uniform)
853 {
854 struct panfrost_context *ctx = batch->ctx;
855 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
856
857 uniform->f[0] = vp->translate[0];
858 uniform->f[1] = vp->translate[1];
859 uniform->f[2] = vp->translate[2];
860 }
861
862 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
863 enum pipe_shader_type st,
864 unsigned int sysvalid,
865 struct sysval_uniform *uniform)
866 {
867 struct panfrost_context *ctx = batch->ctx;
868 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
869 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
870 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
871 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
872
873 assert(dim);
874 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
875
876 if (dim > 1)
877 uniform->i[1] = u_minify(tex->texture->height0,
878 tex->u.tex.first_level);
879
880 if (dim > 2)
881 uniform->i[2] = u_minify(tex->texture->depth0,
882 tex->u.tex.first_level);
883
884 if (is_array)
885 uniform->i[dim] = tex->texture->array_size;
886 }
887
888 static void
889 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
890 enum pipe_shader_type st,
891 unsigned ssbo_id,
892 struct sysval_uniform *uniform)
893 {
894 struct panfrost_context *ctx = batch->ctx;
895
896 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
897 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
898
899 /* Compute address */
900 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
901
902 panfrost_batch_add_bo(batch, bo,
903 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
904 panfrost_bo_access_for_stage(st));
905
906 /* Upload address and size as sysval */
907 uniform->du[0] = bo->gpu + sb.buffer_offset;
908 uniform->u[2] = sb.buffer_size;
909 }
910
911 static void
912 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
913 enum pipe_shader_type st,
914 unsigned samp_idx,
915 struct sysval_uniform *uniform)
916 {
917 struct panfrost_context *ctx = batch->ctx;
918 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
919
920 uniform->f[0] = sampl->min_lod;
921 uniform->f[1] = sampl->max_lod;
922 uniform->f[2] = sampl->lod_bias;
923
924 /* Even without any errata, Midgard represents "no mipmapping" as
925 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
926 * panfrost_create_sampler_state which also explains our choice of
927 * epsilon value (again to keep behaviour consistent) */
928
929 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
930 uniform->f[1] = uniform->f[0] + (1.0/256.0);
931 }
932
933 static void
934 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
935 struct sysval_uniform *uniform)
936 {
937 struct panfrost_context *ctx = batch->ctx;
938
939 uniform->u[0] = ctx->compute_grid->grid[0];
940 uniform->u[1] = ctx->compute_grid->grid[1];
941 uniform->u[2] = ctx->compute_grid->grid[2];
942 }
943
944 static void
945 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
946 struct panfrost_shader_state *ss,
947 enum pipe_shader_type st)
948 {
949 struct sysval_uniform *uniforms = (void *)buf;
950
951 for (unsigned i = 0; i < ss->sysval_count; ++i) {
952 int sysval = ss->sysval[i];
953
954 switch (PAN_SYSVAL_TYPE(sysval)) {
955 case PAN_SYSVAL_VIEWPORT_SCALE:
956 panfrost_upload_viewport_scale_sysval(batch,
957 &uniforms[i]);
958 break;
959 case PAN_SYSVAL_VIEWPORT_OFFSET:
960 panfrost_upload_viewport_offset_sysval(batch,
961 &uniforms[i]);
962 break;
963 case PAN_SYSVAL_TEXTURE_SIZE:
964 panfrost_upload_txs_sysval(batch, st,
965 PAN_SYSVAL_ID(sysval),
966 &uniforms[i]);
967 break;
968 case PAN_SYSVAL_SSBO:
969 panfrost_upload_ssbo_sysval(batch, st,
970 PAN_SYSVAL_ID(sysval),
971 &uniforms[i]);
972 break;
973 case PAN_SYSVAL_NUM_WORK_GROUPS:
974 panfrost_upload_num_work_groups_sysval(batch,
975 &uniforms[i]);
976 break;
977 case PAN_SYSVAL_SAMPLER:
978 panfrost_upload_sampler_sysval(batch, st,
979 PAN_SYSVAL_ID(sysval),
980 &uniforms[i]);
981 break;
982 default:
983 assert(0);
984 }
985 }
986 }
987
988 static const void *
989 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
990 unsigned index)
991 {
992 struct pipe_constant_buffer *cb = &buf->cb[index];
993 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
994
995 if (rsrc)
996 return rsrc->bo->cpu;
997 else if (cb->user_buffer)
998 return cb->user_buffer;
999 else
1000 unreachable("No constant buffer");
1001 }
1002
1003 void
1004 panfrost_emit_const_buf(struct panfrost_batch *batch,
1005 enum pipe_shader_type stage,
1006 struct mali_vertex_tiler_postfix *postfix)
1007 {
1008 struct panfrost_context *ctx = batch->ctx;
1009 struct panfrost_shader_variants *all = ctx->shader[stage];
1010
1011 if (!all)
1012 return;
1013
1014 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1015
1016 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1017
1018 /* Uniforms are implicitly UBO #0 */
1019 bool has_uniforms = buf->enabled_mask & (1 << 0);
1020
1021 /* Allocate room for the sysval and the uniforms */
1022 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1023 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1024 size_t size = sys_size + uniform_size;
1025 struct panfrost_transfer transfer =
1026 panfrost_pool_alloc_aligned(&batch->pool, size, 16);
1027
1028 /* Upload sysvals requested by the shader */
1029 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1030
1031 /* Upload uniforms */
1032 if (has_uniforms && uniform_size) {
1033 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1034 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1035 }
1036
1037 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1038 * uploaded, so it's always included. The count is the highest UBO
1039 * addressable -- gaps are included. */
1040
1041 unsigned ubo_count = 32 - __builtin_clz(buf->enabled_mask | 1);
1042
1043 size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
1044 struct panfrost_transfer ubos =
1045 panfrost_pool_alloc_aligned(&batch->pool, sz,
1046 MALI_UNIFORM_BUFFER_LENGTH);
1047
1048 uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
1049
1050 /* Upload uniforms as a UBO */
1051
1052 if (size) {
1053 pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
1054 cfg.entries = DIV_ROUND_UP(size, 16);
1055 cfg.pointer = transfer.gpu;
1056 }
1057 } else {
1058 *ubo_ptr = 0;
1059 }
1060
1061 /* The rest are honest-to-goodness UBOs */
1062
1063 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1064 size_t usz = buf->cb[ubo].buffer_size;
1065 bool enabled = buf->enabled_mask & (1 << ubo);
1066 bool empty = usz == 0;
1067
1068 if (!enabled || empty) {
1069 ubo_ptr[ubo] = 0;
1070 continue;
1071 }
1072
1073 pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
1074 cfg.entries = DIV_ROUND_UP(usz, 16);
1075 cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
1076 stage, buf, ubo);
1077 }
1078 }
1079
1080 postfix->uniforms = transfer.gpu;
1081 postfix->uniform_buffers = ubos.gpu;
1082
1083 buf->dirty_mask = 0;
1084 }
1085
1086 void
1087 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1088 const struct pipe_grid_info *info,
1089 struct midgard_payload_vertex_tiler *vtp)
1090 {
1091 struct panfrost_context *ctx = batch->ctx;
1092 struct panfrost_device *dev = pan_device(ctx->base.screen);
1093 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1094 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1095 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1096 128));
1097
1098 unsigned log2_instances =
1099 util_logbase2_ceil(info->grid[0]) +
1100 util_logbase2_ceil(info->grid[1]) +
1101 util_logbase2_ceil(info->grid[2]);
1102
1103 unsigned shared_size = single_size * (1 << log2_instances) * dev->core_count;
1104 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1105 shared_size,
1106 1);
1107
1108 struct mali_shared_memory shared = {
1109 .shared_memory = bo->gpu,
1110 .shared_workgroup_count = log2_instances,
1111 .shared_shift = util_logbase2(single_size) + 1
1112 };
1113
1114 vtp->postfix.shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared,
1115 sizeof(shared), 64);
1116 }
1117
1118 static mali_ptr
1119 panfrost_get_tex_desc(struct panfrost_batch *batch,
1120 enum pipe_shader_type st,
1121 struct panfrost_sampler_view *view)
1122 {
1123 if (!view)
1124 return (mali_ptr) 0;
1125
1126 struct pipe_sampler_view *pview = &view->base;
1127 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1128
1129 /* Add the BO to the job so it's retained until the job is done. */
1130
1131 panfrost_batch_add_bo(batch, rsrc->bo,
1132 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1133 panfrost_bo_access_for_stage(st));
1134
1135 panfrost_batch_add_bo(batch, view->bo,
1136 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1137 panfrost_bo_access_for_stage(st));
1138
1139 return view->bo->gpu;
1140 }
1141
1142 static void
1143 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1144 struct pipe_context *pctx)
1145 {
1146 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1147 if (view->texture_bo != rsrc->bo->gpu ||
1148 view->modifier != rsrc->modifier) {
1149 panfrost_bo_unreference(view->bo);
1150 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1151 }
1152 }
1153
1154 void
1155 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1156 enum pipe_shader_type stage,
1157 struct mali_vertex_tiler_postfix *postfix)
1158 {
1159 struct panfrost_context *ctx = batch->ctx;
1160 struct panfrost_device *device = pan_device(ctx->base.screen);
1161
1162 if (!ctx->sampler_view_count[stage])
1163 return;
1164
1165 if (device->quirks & IS_BIFROST) {
1166 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1167 MALI_BIFROST_TEXTURE_LENGTH *
1168 ctx->sampler_view_count[stage],
1169 MALI_BIFROST_TEXTURE_LENGTH);
1170
1171 struct mali_bifrost_texture_packed *out =
1172 (struct mali_bifrost_texture_packed *) T.cpu;
1173
1174 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1175 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1176 struct pipe_sampler_view *pview = &view->base;
1177 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1178
1179 panfrost_update_sampler_view(view, &ctx->base);
1180 out[i] = view->bifrost_descriptor;
1181
1182 /* Add the BOs to the job so they are retained until the job is done. */
1183
1184 panfrost_batch_add_bo(batch, rsrc->bo,
1185 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1186 panfrost_bo_access_for_stage(stage));
1187
1188 panfrost_batch_add_bo(batch, view->bo,
1189 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1190 panfrost_bo_access_for_stage(stage));
1191 }
1192
1193 postfix->textures = T.gpu;
1194 } else {
1195 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1196
1197 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1198 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1199
1200 panfrost_update_sampler_view(view, &ctx->base);
1201
1202 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1203 }
1204
1205 postfix->textures = panfrost_pool_upload_aligned(&batch->pool,
1206 trampolines,
1207 sizeof(uint64_t) *
1208 ctx->sampler_view_count[stage],
1209 sizeof(uint64_t));
1210 }
1211 }
1212
1213 void
1214 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1215 enum pipe_shader_type stage,
1216 struct mali_vertex_tiler_postfix *postfix)
1217 {
1218 struct panfrost_context *ctx = batch->ctx;
1219
1220 if (!ctx->sampler_count[stage])
1221 return;
1222
1223 size_t desc_size = MALI_BIFROST_SAMPLER_LENGTH;
1224 assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
1225
1226 size_t sz = desc_size * ctx->sampler_count[stage];
1227 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool, sz, desc_size);
1228 struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
1229
1230 for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
1231 out[i] = ctx->samplers[stage][i]->hw;
1232
1233 postfix->sampler_descriptor = T.gpu;
1234 }
1235
1236 void
1237 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1238 struct mali_vertex_tiler_postfix *vertex_postfix)
1239 {
1240 struct panfrost_context *ctx = batch->ctx;
1241 struct panfrost_vertex_state *so = ctx->vertex;
1242 struct panfrost_shader_state *vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1243
1244 unsigned instance_shift = vertex_postfix->instance_shift;
1245 unsigned instance_odd = vertex_postfix->instance_odd;
1246
1247 /* Worst case: everything is NPOT, which is only possible if instancing
1248 * is enabled. Otherwise single record is gauranteed */
1249 bool could_npot = instance_shift || instance_odd;
1250
1251 struct panfrost_transfer S = panfrost_pool_alloc_aligned(&batch->pool,
1252 MALI_ATTRIBUTE_BUFFER_LENGTH * vs->attribute_count *
1253 (could_npot ? 2 : 1),
1254 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
1255
1256 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1257 MALI_ATTRIBUTE_LENGTH * vs->attribute_count,
1258 MALI_ATTRIBUTE_LENGTH);
1259
1260 struct mali_attribute_buffer_packed *bufs =
1261 (struct mali_attribute_buffer_packed *) S.cpu;
1262
1263 struct mali_attribute_packed *out =
1264 (struct mali_attribute_packed *) T.cpu;
1265
1266 unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
1267 unsigned k = 0;
1268
1269 for (unsigned i = 0; i < so->num_elements; ++i) {
1270 /* We map buffers 1:1 with the attributes, which
1271 * means duplicating some vertex buffers (who cares? aside from
1272 * maybe some caching implications but I somehow doubt that
1273 * matters) */
1274
1275 struct pipe_vertex_element *elem = &so->pipe[i];
1276 unsigned vbi = elem->vertex_buffer_index;
1277 attrib_to_buffer[i] = k;
1278
1279 if (!(ctx->vb_mask & (1 << vbi)))
1280 continue;
1281
1282 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1283 struct panfrost_resource *rsrc;
1284
1285 rsrc = pan_resource(buf->buffer.resource);
1286 if (!rsrc)
1287 continue;
1288
1289 /* Add a dependency of the batch on the vertex buffer */
1290 panfrost_batch_add_bo(batch, rsrc->bo,
1291 PAN_BO_ACCESS_SHARED |
1292 PAN_BO_ACCESS_READ |
1293 PAN_BO_ACCESS_VERTEX_TILER);
1294
1295 /* Mask off lower bits, see offset fixup below */
1296 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1297 mali_ptr addr = raw_addr & ~63;
1298
1299 /* Since we advanced the base pointer, we shrink the buffer
1300 * size, but add the offset we subtracted */
1301 unsigned size = rsrc->base.width0 + (raw_addr - addr)
1302 - buf->buffer_offset;
1303
1304 /* When there is a divisor, the hardware-level divisor is
1305 * the product of the instance divisor and the padded count */
1306 unsigned divisor = elem->instance_divisor;
1307 unsigned hw_divisor = ctx->padded_count * divisor;
1308 unsigned stride = buf->stride;
1309
1310 /* If there's a divisor(=1) but no instancing, we want every
1311 * attribute to be the same */
1312
1313 if (divisor && ctx->instance_count == 1)
1314 stride = 0;
1315
1316 if (!divisor || ctx->instance_count <= 1) {
1317 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1318 if (ctx->instance_count > 1)
1319 cfg.type = MALI_ATTRIBUTE_TYPE_1D_MODULUS;
1320
1321 cfg.pointer = addr;
1322 cfg.stride = stride;
1323 cfg.size = size;
1324 cfg.divisor_r = instance_shift;
1325 cfg.divisor_p = instance_odd;
1326 }
1327 } else if (util_is_power_of_two_or_zero(hw_divisor)) {
1328 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1329 cfg.type = MALI_ATTRIBUTE_TYPE_1D_POT_DIVISOR;
1330 cfg.pointer = addr;
1331 cfg.stride = stride;
1332 cfg.size = size;
1333 cfg.divisor_r = __builtin_ctz(hw_divisor);
1334 }
1335
1336 } else {
1337 unsigned shift = 0, extra_flags = 0;
1338
1339 unsigned magic_divisor =
1340 panfrost_compute_magic_divisor(hw_divisor, &shift, &extra_flags);
1341
1342 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1343 cfg.type = MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR;
1344 cfg.pointer = addr;
1345 cfg.stride = stride;
1346 cfg.size = size;
1347
1348 cfg.divisor_r = shift;
1349 cfg.divisor_e = extra_flags;
1350 }
1351
1352 pan_pack(bufs + k + 1, ATTRIBUTE_BUFFER_CONTINUATION_NPOT, cfg) {
1353 cfg.divisor_numerator = magic_divisor;
1354 cfg.divisor = divisor;
1355 }
1356
1357 ++k;
1358 }
1359
1360 ++k;
1361 }
1362
1363 /* Add special gl_VertexID/gl_InstanceID buffers */
1364
1365 if (unlikely(vs->attribute_count >= PAN_VERTEX_ID)) {
1366 panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1367
1368 pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
1369 cfg.buffer_index = k++;
1370 cfg.format = so->formats[PAN_VERTEX_ID];
1371 }
1372
1373 panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1374
1375 pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
1376 cfg.buffer_index = k++;
1377 cfg.format = so->formats[PAN_INSTANCE_ID];
1378 }
1379 }
1380
1381 /* Attribute addresses require 64-byte alignment, so let:
1382 *
1383 * base' = base & ~63 = base - (base & 63)
1384 * offset' = offset + (base & 63)
1385 *
1386 * Since base' + offset' = base + offset, these are equivalent
1387 * addressing modes and now base is 64 aligned.
1388 */
1389
1390 unsigned start = vertex_postfix->offset_start;
1391
1392 for (unsigned i = 0; i < so->num_elements; ++i) {
1393 unsigned vbi = so->pipe[i].vertex_buffer_index;
1394 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1395
1396 /* Adjust by the masked off bits of the offset. Make sure we
1397 * read src_offset from so->hw (which is not GPU visible)
1398 * rather than target (which is) due to caching effects */
1399
1400 unsigned src_offset = so->pipe[i].src_offset;
1401
1402 /* BOs aligned to 4k so guaranteed aligned to 64 */
1403 src_offset += (buf->buffer_offset & 63);
1404
1405 /* Also, somewhat obscurely per-instance data needs to be
1406 * offset in response to a delayed start in an indexed draw */
1407
1408 if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
1409 src_offset -= buf->stride * start;
1410
1411 pan_pack(out + i, ATTRIBUTE, cfg) {
1412 cfg.buffer_index = attrib_to_buffer[i];
1413 cfg.format = so->formats[i];
1414 cfg.offset = src_offset;
1415 }
1416 }
1417
1418 vertex_postfix->attributes = S.gpu;
1419 vertex_postfix->attribute_meta = T.gpu;
1420 }
1421
1422 static mali_ptr
1423 panfrost_emit_varyings(struct panfrost_batch *batch,
1424 struct mali_attribute_buffer_packed *slot,
1425 unsigned stride, unsigned count)
1426 {
1427 unsigned size = stride * count;
1428 mali_ptr ptr = panfrost_pool_alloc_aligned(&batch->invisible_pool, size, 64).gpu;
1429
1430 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1431 cfg.stride = stride;
1432 cfg.size = size;
1433 cfg.pointer = ptr;
1434 }
1435
1436 return ptr;
1437 }
1438
1439 static unsigned
1440 panfrost_streamout_offset(unsigned stride, unsigned offset,
1441 struct pipe_stream_output_target *target)
1442 {
1443 return (target->buffer_offset + (offset * stride * 4)) & 63;
1444 }
1445
1446 static void
1447 panfrost_emit_streamout(struct panfrost_batch *batch,
1448 struct mali_attribute_buffer_packed *slot,
1449 unsigned stride_words, unsigned offset, unsigned count,
1450 struct pipe_stream_output_target *target)
1451 {
1452 unsigned stride = stride_words * 4;
1453 unsigned max_size = target->buffer_size;
1454 unsigned expected_size = stride * count;
1455
1456 /* Grab the BO and bind it to the batch */
1457 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1458
1459 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1460 * the perspective of the TILER and FRAGMENT.
1461 */
1462 panfrost_batch_add_bo(batch, bo,
1463 PAN_BO_ACCESS_SHARED |
1464 PAN_BO_ACCESS_RW |
1465 PAN_BO_ACCESS_VERTEX_TILER |
1466 PAN_BO_ACCESS_FRAGMENT);
1467
1468 /* We will have an offset applied to get alignment */
1469 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * stride);
1470
1471 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1472 cfg.pointer = (addr & ~63);
1473 cfg.stride = stride;
1474 cfg.size = MIN2(max_size, expected_size) + (addr & 63);
1475 }
1476 }
1477
1478 static bool
1479 has_point_coord(unsigned mask, gl_varying_slot loc)
1480 {
1481 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1482 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1483 else if (loc == VARYING_SLOT_PNTC)
1484 return (mask & (1 << 8));
1485 else
1486 return false;
1487 }
1488
1489 /* Helpers for manipulating stream out information so we can pack varyings
1490 * accordingly. Compute the src_offset for a given captured varying */
1491
1492 static struct pipe_stream_output *
1493 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1494 {
1495 for (unsigned i = 0; i < info->num_outputs; ++i) {
1496 if (info->output[i].register_index == loc)
1497 return &info->output[i];
1498 }
1499
1500 unreachable("Varying not captured");
1501 }
1502
1503 static unsigned
1504 pan_varying_size(enum mali_format fmt)
1505 {
1506 unsigned type = MALI_EXTRACT_TYPE(fmt);
1507 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1508 unsigned bits = MALI_EXTRACT_BITS(fmt);
1509 unsigned bpc = 0;
1510
1511 if (bits == MALI_CHANNEL_FLOAT) {
1512 /* No doubles */
1513 bool fp16 = (type == MALI_FORMAT_SINT);
1514 assert(fp16 || (type == MALI_FORMAT_UNORM));
1515
1516 bpc = fp16 ? 2 : 4;
1517 } else {
1518 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1519
1520 /* See the enums */
1521 bits = 1 << bits;
1522 assert(bits >= 8);
1523 bpc = bits / 8;
1524 }
1525
1526 return bpc * chan;
1527 }
1528
1529 /* Indices for named (non-XFB) varyings that are present. These are packed
1530 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1531 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1532 * of a given special field given a shift S by:
1533 *
1534 * idx = popcount(P & ((1 << S) - 1))
1535 *
1536 * That is... look at all of the varyings that come earlier and count them, the
1537 * count is the new index since plus one. Likewise, the total number of special
1538 * buffers required is simply popcount(P)
1539 */
1540
1541 enum pan_special_varying {
1542 PAN_VARY_GENERAL = 0,
1543 PAN_VARY_POSITION = 1,
1544 PAN_VARY_PSIZ = 2,
1545 PAN_VARY_PNTCOORD = 3,
1546 PAN_VARY_FACE = 4,
1547 PAN_VARY_FRAGCOORD = 5,
1548
1549 /* Keep last */
1550 PAN_VARY_MAX,
1551 };
1552
1553 /* Given a varying, figure out which index it correpsonds to */
1554
1555 static inline unsigned
1556 pan_varying_index(unsigned present, enum pan_special_varying v)
1557 {
1558 unsigned mask = (1 << v) - 1;
1559 return util_bitcount(present & mask);
1560 }
1561
1562 /* Get the base offset for XFB buffers, which by convention come after
1563 * everything else. Wrapper function for semantic reasons; by construction this
1564 * is just popcount. */
1565
1566 static inline unsigned
1567 pan_xfb_base(unsigned present)
1568 {
1569 return util_bitcount(present);
1570 }
1571
1572 /* Computes the present mask for varyings so we can start emitting varying records */
1573
1574 static inline unsigned
1575 pan_varying_present(
1576 struct panfrost_shader_state *vs,
1577 struct panfrost_shader_state *fs,
1578 unsigned quirks)
1579 {
1580 /* At the moment we always emit general and position buffers. Not
1581 * strictly necessary but usually harmless */
1582
1583 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1584
1585 /* Enable special buffers by the shader info */
1586
1587 if (vs->writes_point_size)
1588 present |= (1 << PAN_VARY_PSIZ);
1589
1590 if (fs->reads_point_coord)
1591 present |= (1 << PAN_VARY_PNTCOORD);
1592
1593 if (fs->reads_face)
1594 present |= (1 << PAN_VARY_FACE);
1595
1596 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1597 present |= (1 << PAN_VARY_FRAGCOORD);
1598
1599 /* Also, if we have a point sprite, we need a point coord buffer */
1600
1601 for (unsigned i = 0; i < fs->varying_count; i++) {
1602 gl_varying_slot loc = fs->varyings_loc[i];
1603
1604 if (has_point_coord(fs->point_sprite_mask, loc))
1605 present |= (1 << PAN_VARY_PNTCOORD);
1606 }
1607
1608 return present;
1609 }
1610
1611 /* Emitters for varying records */
1612
1613 static void
1614 pan_emit_vary(struct mali_attribute_packed *out,
1615 unsigned present, enum pan_special_varying buf,
1616 unsigned quirks, enum mali_format format,
1617 unsigned offset)
1618 {
1619 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1620 unsigned swizzle = quirks & HAS_SWIZZLES ?
1621 panfrost_get_default_swizzle(nr_channels) :
1622 panfrost_bifrost_swizzle(nr_channels);
1623
1624 pan_pack(out, ATTRIBUTE, cfg) {
1625 cfg.buffer_index = pan_varying_index(present, buf);
1626 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1627 cfg.format = (format << 12) | swizzle;
1628 cfg.offset = offset;
1629 }
1630 }
1631
1632 /* General varying that is unused */
1633
1634 static void
1635 pan_emit_vary_only(struct mali_attribute_packed *out,
1636 unsigned present, unsigned quirks)
1637 {
1638 pan_emit_vary(out, present, 0, quirks, MALI_VARYING_DISCARD, 0);
1639 }
1640
1641 /* Special records */
1642
1643 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1644 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1645 [PAN_VARY_PSIZ] = MALI_R16F,
1646 [PAN_VARY_PNTCOORD] = MALI_R16F,
1647 [PAN_VARY_FACE] = MALI_R32I,
1648 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1649 };
1650
1651 static void
1652 pan_emit_vary_special(struct mali_attribute_packed *out,
1653 unsigned present, enum pan_special_varying buf,
1654 unsigned quirks)
1655 {
1656 assert(buf < PAN_VARY_MAX);
1657 pan_emit_vary(out, present, buf, quirks, pan_varying_formats[buf], 0);
1658 }
1659
1660 static enum mali_format
1661 pan_xfb_format(enum mali_format format, unsigned nr)
1662 {
1663 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1664 return MALI_R32F | MALI_NR_CHANNELS(nr);
1665 else
1666 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1667 }
1668
1669 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1670 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1671 * value. */
1672
1673 static void
1674 pan_emit_vary_xfb(struct mali_attribute_packed *out,
1675 unsigned present,
1676 unsigned max_xfb,
1677 unsigned *streamout_offsets,
1678 unsigned quirks,
1679 enum mali_format format,
1680 struct pipe_stream_output o)
1681 {
1682 unsigned swizzle = quirks & HAS_SWIZZLES ?
1683 panfrost_get_default_swizzle(o.num_components) :
1684 panfrost_bifrost_swizzle(o.num_components);
1685
1686 pan_pack(out, ATTRIBUTE, cfg) {
1687 /* XFB buffers come after everything else */
1688 cfg.buffer_index = pan_xfb_base(present) + o.output_buffer;
1689 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1690
1691 /* Override number of channels and precision to highp */
1692 cfg.format = (pan_xfb_format(format, o.num_components) << 12) | swizzle;
1693
1694 /* Apply given offsets together */
1695 cfg.offset = (o.dst_offset * 4) /* dwords */
1696 + streamout_offsets[o.output_buffer];
1697 }
1698 }
1699
1700 /* Determine if we should capture a varying for XFB. This requires actually
1701 * having a buffer for it. If we don't capture it, we'll fallback to a general
1702 * varying path (linked or unlinked, possibly discarding the write) */
1703
1704 static bool
1705 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1706 unsigned loc, unsigned max_xfb)
1707 {
1708 if (!(xfb->so_mask & (1ll << loc)))
1709 return false;
1710
1711 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1712 return o->output_buffer < max_xfb;
1713 }
1714
1715 static void
1716 pan_emit_general_varying(struct mali_attribute_packed *out,
1717 struct panfrost_shader_state *other,
1718 struct panfrost_shader_state *xfb,
1719 gl_varying_slot loc,
1720 enum mali_format format,
1721 unsigned present,
1722 unsigned quirks,
1723 unsigned *gen_offsets,
1724 enum mali_format *gen_formats,
1725 unsigned *gen_stride,
1726 unsigned idx,
1727 bool should_alloc)
1728 {
1729 /* Check if we're linked */
1730 signed other_idx = -1;
1731
1732 for (unsigned j = 0; j < other->varying_count; ++j) {
1733 if (other->varyings_loc[j] == loc) {
1734 other_idx = j;
1735 break;
1736 }
1737 }
1738
1739 if (other_idx < 0) {
1740 pan_emit_vary_only(out, present, quirks);
1741 return;
1742 }
1743
1744 unsigned offset = gen_offsets[other_idx];
1745
1746 if (should_alloc) {
1747 /* We're linked, so allocate a space via a watermark allocation */
1748 enum mali_format alt = other->varyings[other_idx];
1749
1750 /* Do interpolation at minimum precision */
1751 unsigned size_main = pan_varying_size(format);
1752 unsigned size_alt = pan_varying_size(alt);
1753 unsigned size = MIN2(size_main, size_alt);
1754
1755 /* If a varying is marked for XFB but not actually captured, we
1756 * should match the format to the format that would otherwise
1757 * be used for XFB, since dEQP checks for invariance here. It's
1758 * unclear if this is required by the spec. */
1759
1760 if (xfb->so_mask & (1ull << loc)) {
1761 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1762 format = pan_xfb_format(format, o->num_components);
1763 size = pan_varying_size(format);
1764 } else if (size == size_alt) {
1765 format = alt;
1766 }
1767
1768 gen_offsets[idx] = *gen_stride;
1769 gen_formats[other_idx] = format;
1770 offset = *gen_stride;
1771 *gen_stride += size;
1772 }
1773
1774 pan_emit_vary(out, present, PAN_VARY_GENERAL, quirks, format, offset);
1775 }
1776
1777 /* Higher-level wrapper around all of the above, classifying a varying into one
1778 * of the above types */
1779
1780 static void
1781 panfrost_emit_varying(
1782 struct mali_attribute_packed *out,
1783 struct panfrost_shader_state *stage,
1784 struct panfrost_shader_state *other,
1785 struct panfrost_shader_state *xfb,
1786 unsigned present,
1787 unsigned max_xfb,
1788 unsigned *streamout_offsets,
1789 unsigned quirks,
1790 unsigned *gen_offsets,
1791 enum mali_format *gen_formats,
1792 unsigned *gen_stride,
1793 unsigned idx,
1794 bool should_alloc,
1795 bool is_fragment)
1796 {
1797 gl_varying_slot loc = stage->varyings_loc[idx];
1798 enum mali_format format = stage->varyings[idx];
1799
1800 /* Override format to match linkage */
1801 if (!should_alloc && gen_formats[idx])
1802 format = gen_formats[idx];
1803
1804 if (has_point_coord(stage->point_sprite_mask, loc)) {
1805 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1806 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1807 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1808 pan_emit_vary_xfb(out, present, max_xfb, streamout_offsets, quirks, format, *o);
1809 } else if (loc == VARYING_SLOT_POS) {
1810 if (is_fragment)
1811 pan_emit_vary_special(out, present, PAN_VARY_FRAGCOORD, quirks);
1812 else
1813 pan_emit_vary_special(out, present, PAN_VARY_POSITION, quirks);
1814 } else if (loc == VARYING_SLOT_PSIZ) {
1815 pan_emit_vary_special(out, present, PAN_VARY_PSIZ, quirks);
1816 } else if (loc == VARYING_SLOT_PNTC) {
1817 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1818 } else if (loc == VARYING_SLOT_FACE) {
1819 pan_emit_vary_special(out, present, PAN_VARY_FACE, quirks);
1820 } else {
1821 pan_emit_general_varying(out, other, xfb, loc, format, present,
1822 quirks, gen_offsets, gen_formats, gen_stride,
1823 idx, should_alloc);
1824 }
1825 }
1826
1827 static void
1828 pan_emit_special_input(struct mali_attribute_buffer_packed *out,
1829 unsigned present,
1830 enum pan_special_varying v,
1831 unsigned special)
1832 {
1833 if (present & (1 << v)) {
1834 unsigned idx = pan_varying_index(present, v);
1835
1836 pan_pack(out + idx, ATTRIBUTE_BUFFER, cfg) {
1837 cfg.special = special;
1838 cfg.type = 0;
1839 }
1840 }
1841 }
1842
1843 void
1844 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1845 unsigned vertex_count,
1846 struct mali_vertex_tiler_postfix *vertex_postfix,
1847 struct mali_vertex_tiler_postfix *tiler_postfix,
1848 union midgard_primitive_size *primitive_size)
1849 {
1850 /* Load the shaders */
1851 struct panfrost_context *ctx = batch->ctx;
1852 struct panfrost_device *dev = pan_device(ctx->base.screen);
1853 struct panfrost_shader_state *vs, *fs;
1854 size_t vs_size, fs_size;
1855
1856 /* Allocate the varying descriptor */
1857
1858 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1859 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
1860 vs_size = MALI_ATTRIBUTE_LENGTH * vs->varying_count;
1861 fs_size = MALI_ATTRIBUTE_LENGTH * fs->varying_count;
1862
1863 struct panfrost_transfer trans = panfrost_pool_alloc_aligned(
1864 &batch->pool, vs_size + fs_size, MALI_ATTRIBUTE_LENGTH);
1865
1866 struct pipe_stream_output_info *so = &vs->stream_output;
1867 unsigned present = pan_varying_present(vs, fs, dev->quirks);
1868
1869 /* Check if this varying is linked by us. This is the case for
1870 * general-purpose, non-captured varyings. If it is, link it. If it's
1871 * not, use the provided stream out information to determine the
1872 * offset, since it was already linked for us. */
1873
1874 unsigned gen_offsets[32];
1875 enum mali_format gen_formats[32];
1876 memset(gen_offsets, 0, sizeof(gen_offsets));
1877 memset(gen_formats, 0, sizeof(gen_formats));
1878
1879 unsigned gen_stride = 0;
1880 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
1881 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
1882
1883 unsigned streamout_offsets[32];
1884
1885 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1886 streamout_offsets[i] = panfrost_streamout_offset(
1887 so->stride[i],
1888 ctx->streamout.offsets[i],
1889 ctx->streamout.targets[i]);
1890 }
1891
1892 struct mali_attribute_packed *ovs = (struct mali_attribute_packed *)trans.cpu;
1893 struct mali_attribute_packed *ofs = ovs + vs->varying_count;
1894
1895 for (unsigned i = 0; i < vs->varying_count; i++) {
1896 panfrost_emit_varying(ovs + i, vs, fs, vs, present,
1897 ctx->streamout.num_targets, streamout_offsets,
1898 dev->quirks,
1899 gen_offsets, gen_formats, &gen_stride, i, true, false);
1900 }
1901
1902 for (unsigned i = 0; i < fs->varying_count; i++) {
1903 panfrost_emit_varying(ofs + i, fs, vs, vs, present,
1904 ctx->streamout.num_targets, streamout_offsets,
1905 dev->quirks,
1906 gen_offsets, gen_formats, &gen_stride, i, false, true);
1907 }
1908
1909 unsigned xfb_base = pan_xfb_base(present);
1910 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1911 MALI_ATTRIBUTE_BUFFER_LENGTH * (xfb_base + ctx->streamout.num_targets),
1912 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
1913 struct mali_attribute_buffer_packed *varyings =
1914 (struct mali_attribute_buffer_packed *) T.cpu;
1915
1916 /* Emit the stream out buffers */
1917
1918 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
1919 ctx->vertex_count);
1920
1921 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1922 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
1923 so->stride[i],
1924 ctx->streamout.offsets[i],
1925 out_count,
1926 ctx->streamout.targets[i]);
1927 }
1928
1929 panfrost_emit_varyings(batch,
1930 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
1931 gen_stride, vertex_count);
1932
1933 /* fp32 vec4 gl_Position */
1934 tiler_postfix->position_varying = panfrost_emit_varyings(batch,
1935 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
1936 sizeof(float) * 4, vertex_count);
1937
1938 if (present & (1 << PAN_VARY_PSIZ)) {
1939 primitive_size->pointer = panfrost_emit_varyings(batch,
1940 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
1941 2, vertex_count);
1942 }
1943
1944 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_ATTRIBUTE_SPECIAL_POINT_COORD);
1945 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_ATTRIBUTE_SPECIAL_FRONT_FACING);
1946 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_ATTRIBUTE_SPECIAL_FRAG_COORD);
1947
1948 vertex_postfix->varyings = T.gpu;
1949 tiler_postfix->varyings = T.gpu;
1950
1951 vertex_postfix->varying_meta = trans.gpu;
1952 tiler_postfix->varying_meta = trans.gpu + vs_size;
1953 }
1954
1955 void
1956 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
1957 struct mali_vertex_tiler_prefix *vertex_prefix,
1958 struct mali_vertex_tiler_postfix *vertex_postfix,
1959 struct mali_vertex_tiler_prefix *tiler_prefix,
1960 struct mali_vertex_tiler_postfix *tiler_postfix,
1961 union midgard_primitive_size *primitive_size)
1962 {
1963 struct panfrost_context *ctx = batch->ctx;
1964 struct panfrost_device *device = pan_device(ctx->base.screen);
1965 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
1966 struct bifrost_payload_vertex bifrost_vertex = {0,};
1967 struct bifrost_payload_tiler bifrost_tiler = {0,};
1968 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
1969 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
1970 void *vp, *tp;
1971 size_t vp_size, tp_size;
1972
1973 if (device->quirks & IS_BIFROST) {
1974 bifrost_vertex.prefix = *vertex_prefix;
1975 bifrost_vertex.postfix = *vertex_postfix;
1976 vp = &bifrost_vertex;
1977 vp_size = sizeof(bifrost_vertex);
1978
1979 bifrost_tiler.prefix = *tiler_prefix;
1980 bifrost_tiler.tiler.primitive_size = *primitive_size;
1981 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
1982 bifrost_tiler.postfix = *tiler_postfix;
1983 tp = &bifrost_tiler;
1984 tp_size = sizeof(bifrost_tiler);
1985 } else {
1986 midgard_vertex.prefix = *vertex_prefix;
1987 midgard_vertex.postfix = *vertex_postfix;
1988 vp = &midgard_vertex;
1989 vp_size = sizeof(midgard_vertex);
1990
1991 midgard_tiler.prefix = *tiler_prefix;
1992 midgard_tiler.postfix = *tiler_postfix;
1993 midgard_tiler.primitive_size = *primitive_size;
1994 tp = &midgard_tiler;
1995 tp_size = sizeof(midgard_tiler);
1996 }
1997
1998 if (wallpapering) {
1999 /* Inject in reverse order, with "predicted" job indices.
2000 * THIS IS A HACK XXX */
2001 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
2002 batch->scoreboard.job_index + 2, tp, tp_size, true);
2003 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2004 vp, vp_size, true);
2005 return;
2006 }
2007
2008 /* If rasterizer discard is enable, only submit the vertex */
2009
2010 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
2011 vp, vp_size, false);
2012
2013 if (ctx->rasterizer->base.rasterizer_discard)
2014 return;
2015
2016 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
2017 false);
2018 }
2019
2020 /* TODO: stop hardcoding this */
2021 mali_ptr
2022 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2023 {
2024 uint16_t locations[] = {
2025 128, 128,
2026 0, 256,
2027 0, 256,
2028 0, 256,
2029 0, 256,
2030 0, 256,
2031 0, 256,
2032 0, 256,
2033 0, 256,
2034 0, 256,
2035 0, 256,
2036 0, 256,
2037 0, 256,
2038 0, 256,
2039 0, 256,
2040 0, 256,
2041 0, 256,
2042 0, 256,
2043 0, 256,
2044 0, 256,
2045 0, 256,
2046 0, 256,
2047 0, 256,
2048 0, 256,
2049 0, 256,
2050 0, 256,
2051 0, 256,
2052 0, 256,
2053 0, 256,
2054 0, 256,
2055 0, 256,
2056 0, 256,
2057 128, 128,
2058 0, 0,
2059 0, 0,
2060 0, 0,
2061 0, 0,
2062 0, 0,
2063 0, 0,
2064 0, 0,
2065 0, 0,
2066 0, 0,
2067 0, 0,
2068 0, 0,
2069 0, 0,
2070 0, 0,
2071 0, 0,
2072 0, 0,
2073 };
2074
2075 return panfrost_pool_upload_aligned(&batch->pool, locations, 96 * sizeof(uint16_t), 64);
2076 }