panfrost: Use pack for draw descriptor
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28
29 #include "panfrost-quirks.h"
30
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38 * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39 * fragment will be primary, e.g. compute jobs will be considered
40 * "vertex/tiler" by analogy */
41
42 static inline uint32_t
43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45 assert(stage == PIPE_SHADER_FRAGMENT ||
46 stage == PIPE_SHADER_VERTEX ||
47 stage == PIPE_SHADER_COMPUTE);
48
49 return stage == PIPE_SHADER_FRAGMENT ?
50 PAN_BO_ACCESS_FRAGMENT :
51 PAN_BO_ACCESS_VERTEX_TILER;
52 }
53
54 mali_ptr
55 panfrost_vt_emit_shared_memory(struct panfrost_batch *batch)
56 {
57 struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
58
59 struct mali_shared_memory shared = {
60 .shared_workgroup_count = ~0,
61 };
62
63 if (batch->stack_size) {
64 struct panfrost_bo *stack =
65 panfrost_batch_get_scratchpad(batch, batch->stack_size,
66 dev->thread_tls_alloc,
67 dev->core_count);
68
69 shared.stack_shift = panfrost_get_stack_shift(batch->stack_size);
70 shared.scratchpad = stack->gpu;
71 }
72
73 return panfrost_pool_upload_aligned(&batch->pool, &shared, sizeof(shared), 64);
74 }
75
76 void
77 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
78 struct mali_vertex_tiler_prefix *prefix,
79 union midgard_primitive_size *primitive_size)
80 {
81 struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
82
83 if (!panfrost_writes_point_size(ctx)) {
84 float val = (prefix->draw_mode == MALI_DRAW_MODE_POINTS) ?
85 rasterizer->base.point_size :
86 rasterizer->base.line_width;
87
88 primitive_size->constant = val;
89 }
90 }
91
92 unsigned
93 panfrost_translate_index_size(unsigned size)
94 {
95 switch (size) {
96 case 1:
97 return MALI_DRAW_INDEXED_UINT8;
98
99 case 2:
100 return MALI_DRAW_INDEXED_UINT16;
101
102 case 4:
103 return MALI_DRAW_INDEXED_UINT32;
104
105 default:
106 unreachable("Invalid index size");
107 }
108 }
109
110 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
111 * good for the duration of the draw (transient), could last longer. Also get
112 * the bounds on the index buffer for the range accessed by the draw. We do
113 * these operations together because there are natural optimizations which
114 * require them to be together. */
115
116 mali_ptr
117 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
118 const struct pipe_draw_info *info,
119 unsigned *min_index, unsigned *max_index)
120 {
121 struct panfrost_resource *rsrc = pan_resource(info->index.resource);
122 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
123 off_t offset = info->start * info->index_size;
124 bool needs_indices = true;
125 mali_ptr out = 0;
126
127 if (info->max_index != ~0u) {
128 *min_index = info->min_index;
129 *max_index = info->max_index;
130 needs_indices = false;
131 }
132
133 if (!info->has_user_indices) {
134 /* Only resources can be directly mapped */
135 panfrost_batch_add_bo(batch, rsrc->bo,
136 PAN_BO_ACCESS_SHARED |
137 PAN_BO_ACCESS_READ |
138 PAN_BO_ACCESS_VERTEX_TILER);
139 out = rsrc->bo->gpu + offset;
140
141 /* Check the cache */
142 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
143 info->start,
144 info->count,
145 min_index,
146 max_index);
147 } else {
148 /* Otherwise, we need to upload to transient memory */
149 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
150 struct panfrost_transfer T =
151 panfrost_pool_alloc_aligned(&batch->pool,
152 info->count * info->index_size,
153 info->index_size);
154
155 memcpy(T.cpu, ibuf8 + offset, info->count * info->index_size);
156 out = T.gpu;
157 }
158
159 if (needs_indices) {
160 /* Fallback */
161 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
162
163 if (!info->has_user_indices)
164 panfrost_minmax_cache_add(rsrc->index_cache,
165 info->start, info->count,
166 *min_index, *max_index);
167 }
168
169 return out;
170 }
171
172 static unsigned
173 translate_tex_wrap(enum pipe_tex_wrap w)
174 {
175 switch (w) {
176 case PIPE_TEX_WRAP_REPEAT: return MALI_WRAP_MODE_REPEAT;
177 case PIPE_TEX_WRAP_CLAMP: return MALI_WRAP_MODE_CLAMP;
178 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return MALI_WRAP_MODE_CLAMP_TO_EDGE;
179 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return MALI_WRAP_MODE_CLAMP_TO_BORDER;
180 case PIPE_TEX_WRAP_MIRROR_REPEAT: return MALI_WRAP_MODE_MIRRORED_REPEAT;
181 case PIPE_TEX_WRAP_MIRROR_CLAMP: return MALI_WRAP_MODE_MIRRORED_CLAMP;
182 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
183 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
184 default: unreachable("Invalid wrap");
185 }
186 }
187
188 /* The hardware compares in the wrong order order, so we have to flip before
189 * encoding. Yes, really. */
190
191 static enum mali_func
192 panfrost_sampler_compare_func(const struct pipe_sampler_state *cso)
193 {
194 if (!cso->compare_mode)
195 return MALI_FUNC_NEVER;
196
197 enum mali_func f = panfrost_translate_compare_func(cso->compare_func);
198 return panfrost_flip_compare_func(f);
199 }
200
201 static enum mali_mipmap_mode
202 pan_pipe_to_mipmode(enum pipe_tex_mipfilter f)
203 {
204 switch (f) {
205 case PIPE_TEX_MIPFILTER_NEAREST: return MALI_MIPMAP_MODE_NEAREST;
206 case PIPE_TEX_MIPFILTER_LINEAR: return MALI_MIPMAP_MODE_TRILINEAR;
207 case PIPE_TEX_MIPFILTER_NONE: return MALI_MIPMAP_MODE_NONE;
208 default: unreachable("Invalid");
209 }
210 }
211
212 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
213 struct mali_midgard_sampler_packed *hw)
214 {
215 pan_pack(hw, MIDGARD_SAMPLER, cfg) {
216 cfg.magnify_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
217 cfg.minify_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
218 cfg.mipmap_mode = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR) ?
219 MALI_MIPMAP_MODE_TRILINEAR : MALI_MIPMAP_MODE_NEAREST;
220 cfg.normalized_coordinates = cso->normalized_coords;
221
222 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
223
224 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
225
226 /* If necessary, we disable mipmapping in the sampler descriptor by
227 * clamping the LOD as tight as possible (from 0 to epsilon,
228 * essentially -- remember these are fixed point numbers, so
229 * epsilon=1/256) */
230
231 cfg.maximum_lod = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) ?
232 cfg.minimum_lod + 1 :
233 FIXED_16(cso->max_lod, false);
234
235 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
236 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
237 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
238
239 cfg.compare_function = panfrost_sampler_compare_func(cso);
240 cfg.seamless_cube_map = cso->seamless_cube_map;
241
242 cfg.border_color_r = cso->border_color.f[0];
243 cfg.border_color_g = cso->border_color.f[1];
244 cfg.border_color_b = cso->border_color.f[2];
245 cfg.border_color_a = cso->border_color.f[3];
246 }
247 }
248
249 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
250 struct mali_bifrost_sampler_packed *hw)
251 {
252 pan_pack(hw, BIFROST_SAMPLER, cfg) {
253 cfg.magnify_linear = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR;
254 cfg.minify_linear = cso->min_img_filter == PIPE_TEX_FILTER_LINEAR;
255 cfg.mipmap_mode = pan_pipe_to_mipmode(cso->min_mip_filter);
256 cfg.normalized_coordinates = cso->normalized_coords;
257
258 cfg.lod_bias = FIXED_16(cso->lod_bias, true);
259 cfg.minimum_lod = FIXED_16(cso->min_lod, false);
260 cfg.maximum_lod = FIXED_16(cso->max_lod, false);
261
262 cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
263 cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
264 cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
265
266 cfg.compare_function = panfrost_sampler_compare_func(cso);
267 cfg.seamless_cube_map = cso->seamless_cube_map;
268 }
269 }
270
271 static bool
272 panfrost_fs_required(
273 struct panfrost_shader_state *fs,
274 struct panfrost_blend_final *blend,
275 unsigned rt_count)
276 {
277 /* If we generally have side effects */
278 if (fs->fs_sidefx)
279 return true;
280
281 /* If colour is written we need to execute */
282 for (unsigned i = 0; i < rt_count; ++i) {
283 if (!blend[i].no_colour)
284 return true;
285 }
286
287 /* If depth is written and not implied we need to execute.
288 * TODO: Predicate on Z/S writes being enabled */
289 return (fs->writes_depth || fs->writes_stencil);
290 }
291
292 static void
293 panfrost_emit_blend(struct panfrost_batch *batch, void *rts,
294 struct panfrost_blend_final *blend)
295 {
296 const struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
297 struct panfrost_shader_state *fs = panfrost_get_shader_state(batch->ctx, PIPE_SHADER_FRAGMENT);
298 unsigned rt_count = batch->key.nr_cbufs;
299
300 struct bifrost_blend_rt *brts = rts;
301
302 /* Disable blending for depth-only */
303
304 if (rt_count == 0) {
305 if (dev->quirks & IS_BIFROST) {
306 memset(brts, 0, sizeof(*brts));
307 brts[0].unk2 = 0x3;
308 } else {
309 pan_pack(rts, MIDGARD_BLEND_OPAQUE, cfg) {
310 cfg.equation = 0xf0122122; /* Replace */
311 }
312 }
313 }
314
315 for (unsigned i = 0; i < rt_count; ++i) {
316 struct mali_blend_flags_packed flags = {};
317
318 pan_pack(&flags, BLEND_FLAGS, cfg) {
319 if (blend[i].no_colour) {
320 cfg.enable = false;
321 break;
322 }
323
324 batch->draws |= (PIPE_CLEAR_COLOR0 << i);
325
326 cfg.srgb = util_format_is_srgb(batch->key.cbufs[i]->format);
327 cfg.load_destination = blend[i].load_dest;
328 cfg.dither_disable = !batch->ctx->blend->base.dither;
329
330 if (!(dev->quirks & IS_BIFROST))
331 cfg.midgard_blend_shader = blend[i].is_shader;
332 }
333
334 if (dev->quirks & IS_BIFROST) {
335 memset(brts + i, 0, sizeof(brts[i]));
336 brts[i].flags = flags.opaque[0];
337
338 if (blend[i].is_shader) {
339 /* The blend shader's address needs to be at
340 * the same top 32 bit as the fragment shader.
341 * TODO: Ensure that's always the case.
342 */
343 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
344 (fs->bo->gpu & (0xffffffffull << 32)));
345 brts[i].shader = blend[i].shader.gpu;
346 brts[i].unk2 = 0x0;
347 } else {
348 enum pipe_format format = batch->key.cbufs[i]->format;
349 const struct util_format_description *format_desc;
350 format_desc = util_format_description(format);
351
352 brts[i].equation = blend[i].equation.equation;
353
354 /* TODO: this is a bit more complicated */
355 brts[i].constant = blend[i].equation.constant;
356
357 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
358
359 /* 0x19 disables blending and forces REPLACE
360 * mode (equivalent to rgb_mode = alpha_mode =
361 * x122, colour mask = 0xF). 0x1a allows
362 * blending. */
363 brts[i].unk2 = blend[i].opaque ? 0x19 : 0x1a;
364
365 brts[i].shader_type = fs->blend_types[i];
366 }
367 } else {
368 pan_pack(rts, MIDGARD_BLEND_OPAQUE, cfg) {
369 cfg.flags = flags;
370
371 if (blend[i].is_shader) {
372 cfg.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
373 } else {
374 cfg.equation = blend[i].equation.equation.opaque[0];
375 cfg.constant = blend[i].equation.constant;
376 }
377 }
378
379 rts += MALI_MIDGARD_BLEND_LENGTH;
380 }
381 }
382 }
383
384 static void
385 panfrost_emit_frag_shader(struct panfrost_context *ctx,
386 struct mali_state_packed *fragmeta,
387 struct panfrost_blend_final *blend)
388 {
389 const struct panfrost_device *dev = pan_device(ctx->base.screen);
390 struct panfrost_shader_state *fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
391 struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
392 const struct panfrost_zsa_state *zsa = ctx->depth_stencil;
393 unsigned rt_count = ctx->pipe_framebuffer.nr_cbufs;
394 bool alpha_to_coverage = ctx->blend->base.alpha_to_coverage;
395
396 /* Built up here */
397 struct mali_shader_packed shader = fs->shader;
398 struct mali_preload_packed preload = fs->preload;
399 uint32_t properties;
400 struct mali_multisample_misc_packed multisample_misc;
401 struct mali_stencil_mask_misc_packed stencil_mask_misc;
402 union midgard_blend sfbd_blend = { 0 };
403
404 if (!panfrost_fs_required(fs, blend, rt_count)) {
405 if (dev->quirks & IS_BIFROST) {
406 pan_pack(&shader, SHADER, cfg) {}
407
408 pan_pack(&properties, BIFROST_PROPERTIES, cfg) {
409 cfg.unknown = 0x950020; /* XXX */
410 cfg.early_z_enable = true;
411 }
412
413 preload.opaque[0] = 0;
414 } else {
415 pan_pack(&shader, SHADER, cfg) {
416 cfg.shader = 0x1;
417 }
418
419 pan_pack(&properties, MIDGARD_PROPERTIES, cfg) {
420 cfg.work_register_count = 1;
421 cfg.depth_source = MALI_DEPTH_SOURCE_FIXED_FUNCTION;
422 cfg.early_z_enable = true;
423 }
424 }
425 } else if (dev->quirks & IS_BIFROST) {
426 bool no_blend = true;
427
428 for (unsigned i = 0; i < rt_count; ++i)
429 no_blend &= (!blend[i].load_dest | blend[i].no_colour);
430
431 pan_pack(&properties, BIFROST_PROPERTIES, cfg) {
432 cfg.early_z_enable = !fs->can_discard && !fs->writes_depth && no_blend;
433 }
434
435 /* Combine with prepacked properties */
436 properties |= fs->properties.opaque[0];
437 } else {
438 /* Reasons to disable early-Z from a shader perspective */
439 bool late_z = fs->can_discard || fs->writes_global ||
440 fs->writes_depth || fs->writes_stencil;
441
442 /* If either depth or stencil is enabled, discard matters */
443 bool zs_enabled =
444 (zsa->base.depth.enabled && zsa->base.depth.func != PIPE_FUNC_ALWAYS) ||
445 zsa->base.stencil[0].enabled;
446
447 bool has_blend_shader = false;
448
449 for (unsigned c = 0; c < rt_count; ++c)
450 has_blend_shader |= blend[c].is_shader;
451
452 pan_pack(&properties, MIDGARD_PROPERTIES, cfg) {
453 /* TODO: Reduce this limit? */
454 if (has_blend_shader)
455 cfg.work_register_count = MAX2(fs->work_reg_count, 8);
456 else
457 cfg.work_register_count = fs->work_reg_count;
458
459 cfg.early_z_enable = !(late_z || alpha_to_coverage);
460 cfg.reads_tilebuffer = fs->outputs_read || (!zs_enabled && fs->can_discard);
461 cfg.reads_depth_stencil = zs_enabled && fs->can_discard;
462 }
463
464 properties |= fs->properties.opaque[0];
465 }
466
467 pan_pack(&multisample_misc, MULTISAMPLE_MISC, cfg) {
468 bool msaa = rast->multisample;
469 cfg.multisample_enable = msaa;
470 cfg.sample_mask = (msaa ? ctx->sample_mask : ~0) & 0xFFFF;
471
472 /* EXT_shader_framebuffer_fetch requires per-sample */
473 bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
474 cfg.evaluate_per_sample = msaa && per_sample;
475
476 if (dev->quirks & MIDGARD_SFBD) {
477 cfg.sfbd_load_destination = blend[0].load_dest;
478 cfg.sfbd_blend_shader = blend[0].is_shader;
479 }
480
481 cfg.depth_function = zsa->base.depth.enabled ?
482 panfrost_translate_compare_func(zsa->base.depth.func) :
483 MALI_FUNC_ALWAYS;
484
485 cfg.depth_write_mask = zsa->base.depth.writemask;
486 cfg.near_discard = rast->depth_clip_near;
487 cfg.far_discard = rast->depth_clip_far;
488 cfg.unknown_2 = true;
489 }
490
491 pan_pack(&stencil_mask_misc, STENCIL_MASK_MISC, cfg) {
492 cfg.stencil_mask_front = zsa->stencil_mask_front;
493 cfg.stencil_mask_back = zsa->stencil_mask_back;
494 cfg.stencil_enable = zsa->base.stencil[0].enabled;
495 cfg.alpha_to_coverage = alpha_to_coverage;
496
497 if (dev->quirks & MIDGARD_SFBD) {
498 cfg.sfbd_write_enable = !blend[0].no_colour;
499 cfg.sfbd_srgb = util_format_is_srgb(ctx->pipe_framebuffer.cbufs[0]->format);
500 cfg.sfbd_dither_disable = !ctx->blend->base.dither;
501 }
502
503 cfg.unknown_1 = 0x7;
504 cfg.depth_range_1 = cfg.depth_range_2 = rast->offset_tri;
505 cfg.single_sampled_lines = !rast->multisample;
506 }
507
508 if (dev->quirks & MIDGARD_SFBD) {
509 if (blend[0].is_shader) {
510 sfbd_blend.shader = blend[0].shader.gpu |
511 blend[0].shader.first_tag;
512 } else {
513 sfbd_blend.equation = blend[0].equation.equation;
514 sfbd_blend.constant = blend[0].equation.constant;
515 }
516 } else if (!(dev->quirks & IS_BIFROST)) {
517 /* Bug where MRT-capable hw apparently reads the last blend
518 * shader from here instead of the usual location? */
519
520 for (signed rt = ((signed) rt_count - 1); rt >= 0; --rt) {
521 if (!blend[rt].is_shader)
522 continue;
523
524 sfbd_blend.shader = blend[rt].shader.gpu |
525 blend[rt].shader.first_tag;
526 break;
527 }
528 }
529
530 pan_pack(fragmeta, STATE_OPAQUE, cfg) {
531 cfg.shader = fs->shader;
532 cfg.properties = properties;
533 cfg.depth_units = rast->offset_units * 2.0f;
534 cfg.depth_factor = rast->offset_scale;
535 cfg.multisample_misc = multisample_misc;
536 cfg.stencil_mask_misc = stencil_mask_misc;
537
538 cfg.stencil_front = zsa->stencil_front;
539 cfg.stencil_back = zsa->stencil_back;
540
541 /* Bottom bits for stencil ref, exactly one word */
542 bool back_enab = zsa->base.stencil[1].enabled;
543 cfg.stencil_front.opaque[0] |= ctx->stencil_ref.ref_value[0];
544 cfg.stencil_back.opaque[0] |= ctx->stencil_ref.ref_value[back_enab ? 1 : 0];
545
546 if (dev->quirks & IS_BIFROST)
547 cfg.preload = preload;
548 else
549 memcpy(&cfg.sfbd_blend, &sfbd_blend, sizeof(sfbd_blend));
550 }
551 }
552
553 mali_ptr
554 panfrost_emit_compute_shader_meta(struct panfrost_batch *batch, enum pipe_shader_type stage)
555 {
556 struct panfrost_shader_state *ss = panfrost_get_shader_state(batch->ctx, stage);
557
558 panfrost_batch_add_bo(batch, ss->bo,
559 PAN_BO_ACCESS_PRIVATE |
560 PAN_BO_ACCESS_READ |
561 PAN_BO_ACCESS_VERTEX_TILER);
562
563 panfrost_batch_add_bo(batch, pan_resource(ss->upload.rsrc)->bo,
564 PAN_BO_ACCESS_PRIVATE |
565 PAN_BO_ACCESS_READ |
566 PAN_BO_ACCESS_VERTEX_TILER);
567
568 return pan_resource(ss->upload.rsrc)->bo->gpu + ss->upload.offset;
569 }
570
571 mali_ptr
572 panfrost_emit_frag_shader_meta(struct panfrost_batch *batch)
573 {
574 struct panfrost_context *ctx = batch->ctx;
575 struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
576
577 /* Add the shader BO to the batch. */
578 panfrost_batch_add_bo(batch, ss->bo,
579 PAN_BO_ACCESS_PRIVATE |
580 PAN_BO_ACCESS_READ |
581 PAN_BO_ACCESS_FRAGMENT);
582
583 struct panfrost_device *dev = pan_device(ctx->base.screen);
584 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
585 struct panfrost_transfer xfer;
586 unsigned rt_size;
587
588 if (dev->quirks & MIDGARD_SFBD)
589 rt_size = 0;
590 else if (dev->quirks & IS_BIFROST)
591 rt_size = sizeof(struct bifrost_blend_rt);
592 else
593 rt_size = sizeof(struct midgard_blend_rt);
594
595 unsigned desc_size = MALI_STATE_LENGTH + rt_size * rt_count;
596 xfer = panfrost_pool_alloc_aligned(&batch->pool, desc_size, MALI_STATE_LENGTH);
597
598 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
599
600 for (unsigned c = 0; c < ctx->pipe_framebuffer.nr_cbufs; ++c)
601 blend[c] = panfrost_get_blend_for_context(ctx, c);
602
603 panfrost_emit_frag_shader(ctx, (struct mali_state_packed *) xfer.cpu, blend);
604
605 if (!(dev->quirks & MIDGARD_SFBD))
606 panfrost_emit_blend(batch, xfer.cpu + MALI_STATE_LENGTH, blend);
607 else
608 batch->draws |= PIPE_CLEAR_COLOR0;
609
610 return xfer.gpu;
611 }
612
613 mali_ptr
614 panfrost_emit_viewport(struct panfrost_batch *batch)
615 {
616 struct panfrost_context *ctx = batch->ctx;
617 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
618 const struct pipe_scissor_state *ss = &ctx->scissor;
619 const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
620 const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
621
622 /* Derive min/max from translate/scale. Note since |x| >= 0 by
623 * definition, we have that -|x| <= |x| hence translate - |scale| <=
624 * translate + |scale|, so the ordering is correct here. */
625 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
626 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
627 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
628 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
629 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
630 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
631
632 /* Scissor to the intersection of viewport and to the scissor, clamped
633 * to the framebuffer */
634
635 unsigned minx = MIN2(fb->width, vp_minx);
636 unsigned maxx = MIN2(fb->width, vp_maxx);
637 unsigned miny = MIN2(fb->height, vp_miny);
638 unsigned maxy = MIN2(fb->height, vp_maxy);
639
640 if (ss && rast->scissor) {
641 minx = MAX2(ss->minx, minx);
642 miny = MAX2(ss->miny, miny);
643 maxx = MIN2(ss->maxx, maxx);
644 maxy = MIN2(ss->maxy, maxy);
645 }
646
647 struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
648
649 pan_pack(T.cpu, VIEWPORT, cfg) {
650 cfg.scissor_minimum_x = minx;
651 cfg.scissor_minimum_y = miny;
652 cfg.scissor_maximum_x = maxx - 1;
653 cfg.scissor_maximum_y = maxy - 1;
654
655 cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
656 cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
657 }
658
659 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
660 return T.gpu;
661 }
662
663 static mali_ptr
664 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
665 enum pipe_shader_type st,
666 struct panfrost_constant_buffer *buf,
667 unsigned index)
668 {
669 struct pipe_constant_buffer *cb = &buf->cb[index];
670 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
671
672 if (rsrc) {
673 panfrost_batch_add_bo(batch, rsrc->bo,
674 PAN_BO_ACCESS_SHARED |
675 PAN_BO_ACCESS_READ |
676 panfrost_bo_access_for_stage(st));
677
678 /* Alignment gauranteed by
679 * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
680 return rsrc->bo->gpu + cb->buffer_offset;
681 } else if (cb->user_buffer) {
682 return panfrost_pool_upload_aligned(&batch->pool,
683 cb->user_buffer +
684 cb->buffer_offset,
685 cb->buffer_size, 16);
686 } else {
687 unreachable("No constant buffer");
688 }
689 }
690
691 struct sysval_uniform {
692 union {
693 float f[4];
694 int32_t i[4];
695 uint32_t u[4];
696 uint64_t du[2];
697 };
698 };
699
700 static void
701 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
702 struct sysval_uniform *uniform)
703 {
704 struct panfrost_context *ctx = batch->ctx;
705 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
706
707 uniform->f[0] = vp->scale[0];
708 uniform->f[1] = vp->scale[1];
709 uniform->f[2] = vp->scale[2];
710 }
711
712 static void
713 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
714 struct sysval_uniform *uniform)
715 {
716 struct panfrost_context *ctx = batch->ctx;
717 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
718
719 uniform->f[0] = vp->translate[0];
720 uniform->f[1] = vp->translate[1];
721 uniform->f[2] = vp->translate[2];
722 }
723
724 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
725 enum pipe_shader_type st,
726 unsigned int sysvalid,
727 struct sysval_uniform *uniform)
728 {
729 struct panfrost_context *ctx = batch->ctx;
730 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
731 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
732 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
733 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
734
735 assert(dim);
736 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
737
738 if (dim > 1)
739 uniform->i[1] = u_minify(tex->texture->height0,
740 tex->u.tex.first_level);
741
742 if (dim > 2)
743 uniform->i[2] = u_minify(tex->texture->depth0,
744 tex->u.tex.first_level);
745
746 if (is_array)
747 uniform->i[dim] = tex->texture->array_size;
748 }
749
750 static void
751 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
752 enum pipe_shader_type st,
753 unsigned ssbo_id,
754 struct sysval_uniform *uniform)
755 {
756 struct panfrost_context *ctx = batch->ctx;
757
758 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
759 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
760
761 /* Compute address */
762 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
763
764 panfrost_batch_add_bo(batch, bo,
765 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
766 panfrost_bo_access_for_stage(st));
767
768 /* Upload address and size as sysval */
769 uniform->du[0] = bo->gpu + sb.buffer_offset;
770 uniform->u[2] = sb.buffer_size;
771 }
772
773 static void
774 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
775 enum pipe_shader_type st,
776 unsigned samp_idx,
777 struct sysval_uniform *uniform)
778 {
779 struct panfrost_context *ctx = batch->ctx;
780 struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
781
782 uniform->f[0] = sampl->min_lod;
783 uniform->f[1] = sampl->max_lod;
784 uniform->f[2] = sampl->lod_bias;
785
786 /* Even without any errata, Midgard represents "no mipmapping" as
787 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
788 * panfrost_create_sampler_state which also explains our choice of
789 * epsilon value (again to keep behaviour consistent) */
790
791 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
792 uniform->f[1] = uniform->f[0] + (1.0/256.0);
793 }
794
795 static void
796 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
797 struct sysval_uniform *uniform)
798 {
799 struct panfrost_context *ctx = batch->ctx;
800
801 uniform->u[0] = ctx->compute_grid->grid[0];
802 uniform->u[1] = ctx->compute_grid->grid[1];
803 uniform->u[2] = ctx->compute_grid->grid[2];
804 }
805
806 static void
807 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
808 struct panfrost_shader_state *ss,
809 enum pipe_shader_type st)
810 {
811 struct sysval_uniform *uniforms = (void *)buf;
812
813 for (unsigned i = 0; i < ss->sysval_count; ++i) {
814 int sysval = ss->sysval[i];
815
816 switch (PAN_SYSVAL_TYPE(sysval)) {
817 case PAN_SYSVAL_VIEWPORT_SCALE:
818 panfrost_upload_viewport_scale_sysval(batch,
819 &uniforms[i]);
820 break;
821 case PAN_SYSVAL_VIEWPORT_OFFSET:
822 panfrost_upload_viewport_offset_sysval(batch,
823 &uniforms[i]);
824 break;
825 case PAN_SYSVAL_TEXTURE_SIZE:
826 panfrost_upload_txs_sysval(batch, st,
827 PAN_SYSVAL_ID(sysval),
828 &uniforms[i]);
829 break;
830 case PAN_SYSVAL_SSBO:
831 panfrost_upload_ssbo_sysval(batch, st,
832 PAN_SYSVAL_ID(sysval),
833 &uniforms[i]);
834 break;
835 case PAN_SYSVAL_NUM_WORK_GROUPS:
836 panfrost_upload_num_work_groups_sysval(batch,
837 &uniforms[i]);
838 break;
839 case PAN_SYSVAL_SAMPLER:
840 panfrost_upload_sampler_sysval(batch, st,
841 PAN_SYSVAL_ID(sysval),
842 &uniforms[i]);
843 break;
844 default:
845 assert(0);
846 }
847 }
848 }
849
850 static const void *
851 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
852 unsigned index)
853 {
854 struct pipe_constant_buffer *cb = &buf->cb[index];
855 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
856
857 if (rsrc)
858 return rsrc->bo->cpu;
859 else if (cb->user_buffer)
860 return cb->user_buffer;
861 else
862 unreachable("No constant buffer");
863 }
864
865 mali_ptr
866 panfrost_emit_const_buf(struct panfrost_batch *batch,
867 enum pipe_shader_type stage,
868 mali_ptr *push_constants)
869 {
870 struct panfrost_context *ctx = batch->ctx;
871 struct panfrost_shader_variants *all = ctx->shader[stage];
872
873 if (!all)
874 return 0;
875
876 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
877
878 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
879
880 /* Uniforms are implicitly UBO #0 */
881 bool has_uniforms = buf->enabled_mask & (1 << 0);
882
883 /* Allocate room for the sysval and the uniforms */
884 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
885 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
886 size_t size = sys_size + uniform_size;
887 struct panfrost_transfer transfer =
888 panfrost_pool_alloc_aligned(&batch->pool, size, 16);
889
890 /* Upload sysvals requested by the shader */
891 panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
892
893 /* Upload uniforms */
894 if (has_uniforms && uniform_size) {
895 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
896 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
897 }
898
899 /* Next up, attach UBOs. UBO #0 is the uniforms we just
900 * uploaded, so it's always included. The count is the highest UBO
901 * addressable -- gaps are included. */
902
903 unsigned ubo_count = 32 - __builtin_clz(buf->enabled_mask | 1);
904
905 size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
906 struct panfrost_transfer ubos =
907 panfrost_pool_alloc_aligned(&batch->pool, sz,
908 MALI_UNIFORM_BUFFER_LENGTH);
909
910 uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
911
912 /* Upload uniforms as a UBO */
913
914 if (size) {
915 pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
916 cfg.entries = DIV_ROUND_UP(size, 16);
917 cfg.pointer = transfer.gpu;
918 }
919 } else {
920 *ubo_ptr = 0;
921 }
922
923 /* The rest are honest-to-goodness UBOs */
924
925 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
926 size_t usz = buf->cb[ubo].buffer_size;
927 bool enabled = buf->enabled_mask & (1 << ubo);
928 bool empty = usz == 0;
929
930 if (!enabled || empty) {
931 ubo_ptr[ubo] = 0;
932 continue;
933 }
934
935 pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
936 cfg.entries = DIV_ROUND_UP(usz, 16);
937 cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
938 stage, buf, ubo);
939 }
940 }
941
942 *push_constants = transfer.gpu;
943
944 buf->dirty_mask = 0;
945 return ubos.gpu;
946 }
947
948 mali_ptr
949 panfrost_emit_shared_memory(struct panfrost_batch *batch,
950 const struct pipe_grid_info *info)
951 {
952 struct panfrost_context *ctx = batch->ctx;
953 struct panfrost_device *dev = pan_device(ctx->base.screen);
954 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
955 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
956 unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
957 128));
958
959 unsigned log2_instances =
960 util_logbase2_ceil(info->grid[0]) +
961 util_logbase2_ceil(info->grid[1]) +
962 util_logbase2_ceil(info->grid[2]);
963
964 unsigned shared_size = single_size * (1 << log2_instances) * dev->core_count;
965 struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
966 shared_size,
967 1);
968
969 struct mali_shared_memory shared = {
970 .shared_memory = bo->gpu,
971 .shared_workgroup_count = log2_instances,
972 .shared_shift = util_logbase2(single_size) + 1
973 };
974
975 return panfrost_pool_upload_aligned(&batch->pool, &shared,
976 sizeof(shared), 64);
977 }
978
979 static mali_ptr
980 panfrost_get_tex_desc(struct panfrost_batch *batch,
981 enum pipe_shader_type st,
982 struct panfrost_sampler_view *view)
983 {
984 if (!view)
985 return (mali_ptr) 0;
986
987 struct pipe_sampler_view *pview = &view->base;
988 struct panfrost_resource *rsrc = pan_resource(pview->texture);
989
990 /* Add the BO to the job so it's retained until the job is done. */
991
992 panfrost_batch_add_bo(batch, rsrc->bo,
993 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
994 panfrost_bo_access_for_stage(st));
995
996 panfrost_batch_add_bo(batch, view->bo,
997 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
998 panfrost_bo_access_for_stage(st));
999
1000 return view->bo->gpu;
1001 }
1002
1003 static void
1004 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1005 struct pipe_context *pctx)
1006 {
1007 struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1008 if (view->texture_bo != rsrc->bo->gpu ||
1009 view->modifier != rsrc->modifier) {
1010 panfrost_bo_unreference(view->bo);
1011 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1012 }
1013 }
1014
1015 mali_ptr
1016 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1017 enum pipe_shader_type stage)
1018 {
1019 struct panfrost_context *ctx = batch->ctx;
1020 struct panfrost_device *device = pan_device(ctx->base.screen);
1021
1022 if (!ctx->sampler_view_count[stage])
1023 return 0;
1024
1025 if (device->quirks & IS_BIFROST) {
1026 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1027 MALI_BIFROST_TEXTURE_LENGTH *
1028 ctx->sampler_view_count[stage],
1029 MALI_BIFROST_TEXTURE_LENGTH);
1030
1031 struct mali_bifrost_texture_packed *out =
1032 (struct mali_bifrost_texture_packed *) T.cpu;
1033
1034 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1035 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1036 struct pipe_sampler_view *pview = &view->base;
1037 struct panfrost_resource *rsrc = pan_resource(pview->texture);
1038
1039 panfrost_update_sampler_view(view, &ctx->base);
1040 out[i] = view->bifrost_descriptor;
1041
1042 /* Add the BOs to the job so they are retained until the job is done. */
1043
1044 panfrost_batch_add_bo(batch, rsrc->bo,
1045 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1046 panfrost_bo_access_for_stage(stage));
1047
1048 panfrost_batch_add_bo(batch, view->bo,
1049 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1050 panfrost_bo_access_for_stage(stage));
1051 }
1052
1053 return T.gpu;
1054 } else {
1055 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1056
1057 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1058 struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1059
1060 panfrost_update_sampler_view(view, &ctx->base);
1061
1062 trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1063 }
1064
1065 return panfrost_pool_upload_aligned(&batch->pool, trampolines,
1066 sizeof(uint64_t) *
1067 ctx->sampler_view_count[stage],
1068 sizeof(uint64_t));
1069 }
1070 }
1071
1072 mali_ptr
1073 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1074 enum pipe_shader_type stage)
1075 {
1076 struct panfrost_context *ctx = batch->ctx;
1077
1078 if (!ctx->sampler_count[stage])
1079 return 0;
1080
1081 size_t desc_size = MALI_BIFROST_SAMPLER_LENGTH;
1082 assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
1083
1084 size_t sz = desc_size * ctx->sampler_count[stage];
1085 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool, sz, desc_size);
1086 struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
1087
1088 for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
1089 out[i] = ctx->samplers[stage][i]->hw;
1090
1091 return T.gpu;
1092 }
1093
1094 mali_ptr
1095 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1096 mali_ptr *buffers)
1097 {
1098 struct panfrost_context *ctx = batch->ctx;
1099 struct panfrost_vertex_state *so = ctx->vertex;
1100 struct panfrost_shader_state *vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1101
1102 /* Worst case: everything is NPOT, which is only possible if instancing
1103 * is enabled. Otherwise single record is gauranteed */
1104 struct panfrost_transfer S = panfrost_pool_alloc_aligned(&batch->pool,
1105 MALI_ATTRIBUTE_BUFFER_LENGTH * vs->attribute_count *
1106 (ctx->instance_count > 1 ? 2 : 1),
1107 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
1108
1109 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1110 MALI_ATTRIBUTE_LENGTH * vs->attribute_count,
1111 MALI_ATTRIBUTE_LENGTH);
1112
1113 struct mali_attribute_buffer_packed *bufs =
1114 (struct mali_attribute_buffer_packed *) S.cpu;
1115
1116 struct mali_attribute_packed *out =
1117 (struct mali_attribute_packed *) T.cpu;
1118
1119 unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
1120 unsigned k = 0;
1121
1122 for (unsigned i = 0; i < so->num_elements; ++i) {
1123 /* We map buffers 1:1 with the attributes, which
1124 * means duplicating some vertex buffers (who cares? aside from
1125 * maybe some caching implications but I somehow doubt that
1126 * matters) */
1127
1128 struct pipe_vertex_element *elem = &so->pipe[i];
1129 unsigned vbi = elem->vertex_buffer_index;
1130 attrib_to_buffer[i] = k;
1131
1132 if (!(ctx->vb_mask & (1 << vbi)))
1133 continue;
1134
1135 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1136 struct panfrost_resource *rsrc;
1137
1138 rsrc = pan_resource(buf->buffer.resource);
1139 if (!rsrc)
1140 continue;
1141
1142 /* Add a dependency of the batch on the vertex buffer */
1143 panfrost_batch_add_bo(batch, rsrc->bo,
1144 PAN_BO_ACCESS_SHARED |
1145 PAN_BO_ACCESS_READ |
1146 PAN_BO_ACCESS_VERTEX_TILER);
1147
1148 /* Mask off lower bits, see offset fixup below */
1149 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1150 mali_ptr addr = raw_addr & ~63;
1151
1152 /* Since we advanced the base pointer, we shrink the buffer
1153 * size, but add the offset we subtracted */
1154 unsigned size = rsrc->base.width0 + (raw_addr - addr)
1155 - buf->buffer_offset;
1156
1157 /* When there is a divisor, the hardware-level divisor is
1158 * the product of the instance divisor and the padded count */
1159 unsigned divisor = elem->instance_divisor;
1160 unsigned hw_divisor = ctx->padded_count * divisor;
1161 unsigned stride = buf->stride;
1162
1163 /* If there's a divisor(=1) but no instancing, we want every
1164 * attribute to be the same */
1165
1166 if (divisor && ctx->instance_count == 1)
1167 stride = 0;
1168
1169 if (!divisor || ctx->instance_count <= 1) {
1170 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1171 if (ctx->instance_count > 1) {
1172 cfg.type = MALI_ATTRIBUTE_TYPE_1D_MODULUS;
1173 cfg.divisor = ctx->padded_count;
1174 }
1175
1176 cfg.pointer = addr;
1177 cfg.stride = stride;
1178 cfg.size = size;
1179 }
1180 } else if (util_is_power_of_two_or_zero(hw_divisor)) {
1181 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1182 cfg.type = MALI_ATTRIBUTE_TYPE_1D_POT_DIVISOR;
1183 cfg.pointer = addr;
1184 cfg.stride = stride;
1185 cfg.size = size;
1186 cfg.divisor_r = __builtin_ctz(hw_divisor);
1187 }
1188
1189 } else {
1190 unsigned shift = 0, extra_flags = 0;
1191
1192 unsigned magic_divisor =
1193 panfrost_compute_magic_divisor(hw_divisor, &shift, &extra_flags);
1194
1195 pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
1196 cfg.type = MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR;
1197 cfg.pointer = addr;
1198 cfg.stride = stride;
1199 cfg.size = size;
1200
1201 cfg.divisor_r = shift;
1202 cfg.divisor_e = extra_flags;
1203 }
1204
1205 pan_pack(bufs + k + 1, ATTRIBUTE_BUFFER_CONTINUATION_NPOT, cfg) {
1206 cfg.divisor_numerator = magic_divisor;
1207 cfg.divisor = divisor;
1208 }
1209
1210 ++k;
1211 }
1212
1213 ++k;
1214 }
1215
1216 /* Add special gl_VertexID/gl_InstanceID buffers */
1217
1218 if (unlikely(vs->attribute_count >= PAN_VERTEX_ID)) {
1219 panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1220
1221 pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
1222 cfg.buffer_index = k++;
1223 cfg.format = so->formats[PAN_VERTEX_ID];
1224 }
1225
1226 panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
1227
1228 pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
1229 cfg.buffer_index = k++;
1230 cfg.format = so->formats[PAN_INSTANCE_ID];
1231 }
1232 }
1233
1234 /* Attribute addresses require 64-byte alignment, so let:
1235 *
1236 * base' = base & ~63 = base - (base & 63)
1237 * offset' = offset + (base & 63)
1238 *
1239 * Since base' + offset' = base + offset, these are equivalent
1240 * addressing modes and now base is 64 aligned.
1241 */
1242
1243 for (unsigned i = 0; i < so->num_elements; ++i) {
1244 unsigned vbi = so->pipe[i].vertex_buffer_index;
1245 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1246
1247 /* Adjust by the masked off bits of the offset. Make sure we
1248 * read src_offset from so->hw (which is not GPU visible)
1249 * rather than target (which is) due to caching effects */
1250
1251 unsigned src_offset = so->pipe[i].src_offset;
1252
1253 /* BOs aligned to 4k so guaranteed aligned to 64 */
1254 src_offset += (buf->buffer_offset & 63);
1255
1256 /* Also, somewhat obscurely per-instance data needs to be
1257 * offset in response to a delayed start in an indexed draw */
1258
1259 if (so->pipe[i].instance_divisor && ctx->instance_count > 1)
1260 src_offset -= buf->stride * ctx->offset_start;
1261
1262 pan_pack(out + i, ATTRIBUTE, cfg) {
1263 cfg.buffer_index = attrib_to_buffer[i];
1264 cfg.format = so->formats[i];
1265 cfg.offset = src_offset;
1266 }
1267 }
1268
1269 *buffers = S.gpu;
1270 return T.gpu;
1271 }
1272
1273 static mali_ptr
1274 panfrost_emit_varyings(struct panfrost_batch *batch,
1275 struct mali_attribute_buffer_packed *slot,
1276 unsigned stride, unsigned count)
1277 {
1278 unsigned size = stride * count;
1279 mali_ptr ptr = panfrost_pool_alloc_aligned(&batch->invisible_pool, size, 64).gpu;
1280
1281 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1282 cfg.stride = stride;
1283 cfg.size = size;
1284 cfg.pointer = ptr;
1285 }
1286
1287 return ptr;
1288 }
1289
1290 static unsigned
1291 panfrost_streamout_offset(unsigned stride, unsigned offset,
1292 struct pipe_stream_output_target *target)
1293 {
1294 return (target->buffer_offset + (offset * stride * 4)) & 63;
1295 }
1296
1297 static void
1298 panfrost_emit_streamout(struct panfrost_batch *batch,
1299 struct mali_attribute_buffer_packed *slot,
1300 unsigned stride_words, unsigned offset, unsigned count,
1301 struct pipe_stream_output_target *target)
1302 {
1303 unsigned stride = stride_words * 4;
1304 unsigned max_size = target->buffer_size;
1305 unsigned expected_size = stride * count;
1306
1307 /* Grab the BO and bind it to the batch */
1308 struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1309
1310 /* Varyings are WRITE from the perspective of the VERTEX but READ from
1311 * the perspective of the TILER and FRAGMENT.
1312 */
1313 panfrost_batch_add_bo(batch, bo,
1314 PAN_BO_ACCESS_SHARED |
1315 PAN_BO_ACCESS_RW |
1316 PAN_BO_ACCESS_VERTEX_TILER |
1317 PAN_BO_ACCESS_FRAGMENT);
1318
1319 /* We will have an offset applied to get alignment */
1320 mali_ptr addr = bo->gpu + target->buffer_offset + (offset * stride);
1321
1322 pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
1323 cfg.pointer = (addr & ~63);
1324 cfg.stride = stride;
1325 cfg.size = MIN2(max_size, expected_size) + (addr & 63);
1326 }
1327 }
1328
1329 static bool
1330 has_point_coord(unsigned mask, gl_varying_slot loc)
1331 {
1332 if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1333 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1334 else if (loc == VARYING_SLOT_PNTC)
1335 return (mask & (1 << 8));
1336 else
1337 return false;
1338 }
1339
1340 /* Helpers for manipulating stream out information so we can pack varyings
1341 * accordingly. Compute the src_offset for a given captured varying */
1342
1343 static struct pipe_stream_output *
1344 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1345 {
1346 for (unsigned i = 0; i < info->num_outputs; ++i) {
1347 if (info->output[i].register_index == loc)
1348 return &info->output[i];
1349 }
1350
1351 unreachable("Varying not captured");
1352 }
1353
1354 static unsigned
1355 pan_varying_size(enum mali_format fmt)
1356 {
1357 unsigned type = MALI_EXTRACT_TYPE(fmt);
1358 unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1359 unsigned bits = MALI_EXTRACT_BITS(fmt);
1360 unsigned bpc = 0;
1361
1362 if (bits == MALI_CHANNEL_FLOAT) {
1363 /* No doubles */
1364 bool fp16 = (type == MALI_FORMAT_SINT);
1365 assert(fp16 || (type == MALI_FORMAT_UNORM));
1366
1367 bpc = fp16 ? 2 : 4;
1368 } else {
1369 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1370
1371 /* See the enums */
1372 bits = 1 << bits;
1373 assert(bits >= 8);
1374 bpc = bits / 8;
1375 }
1376
1377 return bpc * chan;
1378 }
1379
1380 /* Indices for named (non-XFB) varyings that are present. These are packed
1381 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1382 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1383 * of a given special field given a shift S by:
1384 *
1385 * idx = popcount(P & ((1 << S) - 1))
1386 *
1387 * That is... look at all of the varyings that come earlier and count them, the
1388 * count is the new index since plus one. Likewise, the total number of special
1389 * buffers required is simply popcount(P)
1390 */
1391
1392 enum pan_special_varying {
1393 PAN_VARY_GENERAL = 0,
1394 PAN_VARY_POSITION = 1,
1395 PAN_VARY_PSIZ = 2,
1396 PAN_VARY_PNTCOORD = 3,
1397 PAN_VARY_FACE = 4,
1398 PAN_VARY_FRAGCOORD = 5,
1399
1400 /* Keep last */
1401 PAN_VARY_MAX,
1402 };
1403
1404 /* Given a varying, figure out which index it correpsonds to */
1405
1406 static inline unsigned
1407 pan_varying_index(unsigned present, enum pan_special_varying v)
1408 {
1409 unsigned mask = (1 << v) - 1;
1410 return util_bitcount(present & mask);
1411 }
1412
1413 /* Get the base offset for XFB buffers, which by convention come after
1414 * everything else. Wrapper function for semantic reasons; by construction this
1415 * is just popcount. */
1416
1417 static inline unsigned
1418 pan_xfb_base(unsigned present)
1419 {
1420 return util_bitcount(present);
1421 }
1422
1423 /* Computes the present mask for varyings so we can start emitting varying records */
1424
1425 static inline unsigned
1426 pan_varying_present(
1427 struct panfrost_shader_state *vs,
1428 struct panfrost_shader_state *fs,
1429 unsigned quirks)
1430 {
1431 /* At the moment we always emit general and position buffers. Not
1432 * strictly necessary but usually harmless */
1433
1434 unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1435
1436 /* Enable special buffers by the shader info */
1437
1438 if (vs->writes_point_size)
1439 present |= (1 << PAN_VARY_PSIZ);
1440
1441 if (fs->reads_point_coord)
1442 present |= (1 << PAN_VARY_PNTCOORD);
1443
1444 if (fs->reads_face)
1445 present |= (1 << PAN_VARY_FACE);
1446
1447 if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1448 present |= (1 << PAN_VARY_FRAGCOORD);
1449
1450 /* Also, if we have a point sprite, we need a point coord buffer */
1451
1452 for (unsigned i = 0; i < fs->varying_count; i++) {
1453 gl_varying_slot loc = fs->varyings_loc[i];
1454
1455 if (has_point_coord(fs->point_sprite_mask, loc))
1456 present |= (1 << PAN_VARY_PNTCOORD);
1457 }
1458
1459 return present;
1460 }
1461
1462 /* Emitters for varying records */
1463
1464 static void
1465 pan_emit_vary(struct mali_attribute_packed *out,
1466 unsigned present, enum pan_special_varying buf,
1467 unsigned quirks, enum mali_format format,
1468 unsigned offset)
1469 {
1470 unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1471 unsigned swizzle = quirks & HAS_SWIZZLES ?
1472 panfrost_get_default_swizzle(nr_channels) :
1473 panfrost_bifrost_swizzle(nr_channels);
1474
1475 pan_pack(out, ATTRIBUTE, cfg) {
1476 cfg.buffer_index = pan_varying_index(present, buf);
1477 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1478 cfg.format = (format << 12) | swizzle;
1479 cfg.offset = offset;
1480 }
1481 }
1482
1483 /* General varying that is unused */
1484
1485 static void
1486 pan_emit_vary_only(struct mali_attribute_packed *out,
1487 unsigned present, unsigned quirks)
1488 {
1489 pan_emit_vary(out, present, 0, quirks, MALI_VARYING_DISCARD, 0);
1490 }
1491
1492 /* Special records */
1493
1494 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1495 [PAN_VARY_POSITION] = MALI_VARYING_POS,
1496 [PAN_VARY_PSIZ] = MALI_R16F,
1497 [PAN_VARY_PNTCOORD] = MALI_R16F,
1498 [PAN_VARY_FACE] = MALI_R32I,
1499 [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
1500 };
1501
1502 static void
1503 pan_emit_vary_special(struct mali_attribute_packed *out,
1504 unsigned present, enum pan_special_varying buf,
1505 unsigned quirks)
1506 {
1507 assert(buf < PAN_VARY_MAX);
1508 pan_emit_vary(out, present, buf, quirks, pan_varying_formats[buf], 0);
1509 }
1510
1511 static enum mali_format
1512 pan_xfb_format(enum mali_format format, unsigned nr)
1513 {
1514 if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1515 return MALI_R32F | MALI_NR_CHANNELS(nr);
1516 else
1517 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1518 }
1519
1520 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1521 * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1522 * value. */
1523
1524 static void
1525 pan_emit_vary_xfb(struct mali_attribute_packed *out,
1526 unsigned present,
1527 unsigned max_xfb,
1528 unsigned *streamout_offsets,
1529 unsigned quirks,
1530 enum mali_format format,
1531 struct pipe_stream_output o)
1532 {
1533 unsigned swizzle = quirks & HAS_SWIZZLES ?
1534 panfrost_get_default_swizzle(o.num_components) :
1535 panfrost_bifrost_swizzle(o.num_components);
1536
1537 pan_pack(out, ATTRIBUTE, cfg) {
1538 /* XFB buffers come after everything else */
1539 cfg.buffer_index = pan_xfb_base(present) + o.output_buffer;
1540 cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
1541
1542 /* Override number of channels and precision to highp */
1543 cfg.format = (pan_xfb_format(format, o.num_components) << 12) | swizzle;
1544
1545 /* Apply given offsets together */
1546 cfg.offset = (o.dst_offset * 4) /* dwords */
1547 + streamout_offsets[o.output_buffer];
1548 }
1549 }
1550
1551 /* Determine if we should capture a varying for XFB. This requires actually
1552 * having a buffer for it. If we don't capture it, we'll fallback to a general
1553 * varying path (linked or unlinked, possibly discarding the write) */
1554
1555 static bool
1556 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1557 unsigned loc, unsigned max_xfb)
1558 {
1559 if (!(xfb->so_mask & (1ll << loc)))
1560 return false;
1561
1562 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1563 return o->output_buffer < max_xfb;
1564 }
1565
1566 static void
1567 pan_emit_general_varying(struct mali_attribute_packed *out,
1568 struct panfrost_shader_state *other,
1569 struct panfrost_shader_state *xfb,
1570 gl_varying_slot loc,
1571 enum mali_format format,
1572 unsigned present,
1573 unsigned quirks,
1574 unsigned *gen_offsets,
1575 enum mali_format *gen_formats,
1576 unsigned *gen_stride,
1577 unsigned idx,
1578 bool should_alloc)
1579 {
1580 /* Check if we're linked */
1581 signed other_idx = -1;
1582
1583 for (unsigned j = 0; j < other->varying_count; ++j) {
1584 if (other->varyings_loc[j] == loc) {
1585 other_idx = j;
1586 break;
1587 }
1588 }
1589
1590 if (other_idx < 0) {
1591 pan_emit_vary_only(out, present, quirks);
1592 return;
1593 }
1594
1595 unsigned offset = gen_offsets[other_idx];
1596
1597 if (should_alloc) {
1598 /* We're linked, so allocate a space via a watermark allocation */
1599 enum mali_format alt = other->varyings[other_idx];
1600
1601 /* Do interpolation at minimum precision */
1602 unsigned size_main = pan_varying_size(format);
1603 unsigned size_alt = pan_varying_size(alt);
1604 unsigned size = MIN2(size_main, size_alt);
1605
1606 /* If a varying is marked for XFB but not actually captured, we
1607 * should match the format to the format that would otherwise
1608 * be used for XFB, since dEQP checks for invariance here. It's
1609 * unclear if this is required by the spec. */
1610
1611 if (xfb->so_mask & (1ull << loc)) {
1612 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1613 format = pan_xfb_format(format, o->num_components);
1614 size = pan_varying_size(format);
1615 } else if (size == size_alt) {
1616 format = alt;
1617 }
1618
1619 gen_offsets[idx] = *gen_stride;
1620 gen_formats[other_idx] = format;
1621 offset = *gen_stride;
1622 *gen_stride += size;
1623 }
1624
1625 pan_emit_vary(out, present, PAN_VARY_GENERAL, quirks, format, offset);
1626 }
1627
1628 /* Higher-level wrapper around all of the above, classifying a varying into one
1629 * of the above types */
1630
1631 static void
1632 panfrost_emit_varying(
1633 struct mali_attribute_packed *out,
1634 struct panfrost_shader_state *stage,
1635 struct panfrost_shader_state *other,
1636 struct panfrost_shader_state *xfb,
1637 unsigned present,
1638 unsigned max_xfb,
1639 unsigned *streamout_offsets,
1640 unsigned quirks,
1641 unsigned *gen_offsets,
1642 enum mali_format *gen_formats,
1643 unsigned *gen_stride,
1644 unsigned idx,
1645 bool should_alloc,
1646 bool is_fragment)
1647 {
1648 gl_varying_slot loc = stage->varyings_loc[idx];
1649 enum mali_format format = stage->varyings[idx];
1650
1651 /* Override format to match linkage */
1652 if (!should_alloc && gen_formats[idx])
1653 format = gen_formats[idx];
1654
1655 if (has_point_coord(stage->point_sprite_mask, loc)) {
1656 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1657 } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1658 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1659 pan_emit_vary_xfb(out, present, max_xfb, streamout_offsets, quirks, format, *o);
1660 } else if (loc == VARYING_SLOT_POS) {
1661 if (is_fragment)
1662 pan_emit_vary_special(out, present, PAN_VARY_FRAGCOORD, quirks);
1663 else
1664 pan_emit_vary_special(out, present, PAN_VARY_POSITION, quirks);
1665 } else if (loc == VARYING_SLOT_PSIZ) {
1666 pan_emit_vary_special(out, present, PAN_VARY_PSIZ, quirks);
1667 } else if (loc == VARYING_SLOT_PNTC) {
1668 pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
1669 } else if (loc == VARYING_SLOT_FACE) {
1670 pan_emit_vary_special(out, present, PAN_VARY_FACE, quirks);
1671 } else {
1672 pan_emit_general_varying(out, other, xfb, loc, format, present,
1673 quirks, gen_offsets, gen_formats, gen_stride,
1674 idx, should_alloc);
1675 }
1676 }
1677
1678 static void
1679 pan_emit_special_input(struct mali_attribute_buffer_packed *out,
1680 unsigned present,
1681 enum pan_special_varying v,
1682 unsigned special)
1683 {
1684 if (present & (1 << v)) {
1685 unsigned idx = pan_varying_index(present, v);
1686
1687 pan_pack(out + idx, ATTRIBUTE_BUFFER, cfg) {
1688 cfg.special = special;
1689 cfg.type = 0;
1690 }
1691 }
1692 }
1693
1694 void
1695 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
1696 unsigned vertex_count,
1697 mali_ptr *vs_attribs,
1698 mali_ptr *fs_attribs,
1699 mali_ptr *buffers,
1700 mali_ptr *position,
1701 mali_ptr *psiz)
1702 {
1703 /* Load the shaders */
1704 struct panfrost_context *ctx = batch->ctx;
1705 struct panfrost_device *dev = pan_device(ctx->base.screen);
1706 struct panfrost_shader_state *vs, *fs;
1707 size_t vs_size, fs_size;
1708
1709 /* Allocate the varying descriptor */
1710
1711 vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
1712 fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
1713 vs_size = MALI_ATTRIBUTE_LENGTH * vs->varying_count;
1714 fs_size = MALI_ATTRIBUTE_LENGTH * fs->varying_count;
1715
1716 struct panfrost_transfer trans = panfrost_pool_alloc_aligned(
1717 &batch->pool, vs_size + fs_size, MALI_ATTRIBUTE_LENGTH);
1718
1719 struct pipe_stream_output_info *so = &vs->stream_output;
1720 unsigned present = pan_varying_present(vs, fs, dev->quirks);
1721
1722 /* Check if this varying is linked by us. This is the case for
1723 * general-purpose, non-captured varyings. If it is, link it. If it's
1724 * not, use the provided stream out information to determine the
1725 * offset, since it was already linked for us. */
1726
1727 unsigned gen_offsets[32];
1728 enum mali_format gen_formats[32];
1729 memset(gen_offsets, 0, sizeof(gen_offsets));
1730 memset(gen_formats, 0, sizeof(gen_formats));
1731
1732 unsigned gen_stride = 0;
1733 assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
1734 assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
1735
1736 unsigned streamout_offsets[32];
1737
1738 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1739 streamout_offsets[i] = panfrost_streamout_offset(
1740 so->stride[i],
1741 ctx->streamout.offsets[i],
1742 ctx->streamout.targets[i]);
1743 }
1744
1745 struct mali_attribute_packed *ovs = (struct mali_attribute_packed *)trans.cpu;
1746 struct mali_attribute_packed *ofs = ovs + vs->varying_count;
1747
1748 for (unsigned i = 0; i < vs->varying_count; i++) {
1749 panfrost_emit_varying(ovs + i, vs, fs, vs, present,
1750 ctx->streamout.num_targets, streamout_offsets,
1751 dev->quirks,
1752 gen_offsets, gen_formats, &gen_stride, i, true, false);
1753 }
1754
1755 for (unsigned i = 0; i < fs->varying_count; i++) {
1756 panfrost_emit_varying(ofs + i, fs, vs, vs, present,
1757 ctx->streamout.num_targets, streamout_offsets,
1758 dev->quirks,
1759 gen_offsets, gen_formats, &gen_stride, i, false, true);
1760 }
1761
1762 unsigned xfb_base = pan_xfb_base(present);
1763 struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
1764 MALI_ATTRIBUTE_BUFFER_LENGTH * (xfb_base + ctx->streamout.num_targets),
1765 MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
1766 struct mali_attribute_buffer_packed *varyings =
1767 (struct mali_attribute_buffer_packed *) T.cpu;
1768
1769 /* Emit the stream out buffers */
1770
1771 unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
1772 ctx->vertex_count);
1773
1774 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1775 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
1776 so->stride[i],
1777 ctx->streamout.offsets[i],
1778 out_count,
1779 ctx->streamout.targets[i]);
1780 }
1781
1782 panfrost_emit_varyings(batch,
1783 &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
1784 gen_stride, vertex_count);
1785
1786 /* fp32 vec4 gl_Position */
1787 *position = panfrost_emit_varyings(batch,
1788 &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
1789 sizeof(float) * 4, vertex_count);
1790
1791 if (present & (1 << PAN_VARY_PSIZ)) {
1792 *psiz = panfrost_emit_varyings(batch,
1793 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
1794 2, vertex_count);
1795 }
1796
1797 pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_ATTRIBUTE_SPECIAL_POINT_COORD);
1798 pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_ATTRIBUTE_SPECIAL_FRONT_FACING);
1799 pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_ATTRIBUTE_SPECIAL_FRAG_COORD);
1800
1801 *buffers = T.gpu;
1802 *vs_attribs = trans.gpu;
1803 *fs_attribs = trans.gpu + vs_size;
1804 }
1805
1806 void
1807 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
1808 struct mali_vertex_tiler_prefix *vertex_prefix,
1809 struct mali_draw_packed *vertex_draw,
1810 struct mali_vertex_tiler_prefix *tiler_prefix,
1811 struct mali_draw_packed *tiler_draw,
1812 union midgard_primitive_size *primitive_size)
1813 {
1814 struct panfrost_context *ctx = batch->ctx;
1815 struct panfrost_device *device = pan_device(ctx->base.screen);
1816 bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
1817 struct bifrost_payload_vertex bifrost_vertex = {0,};
1818 struct bifrost_payload_tiler bifrost_tiler = {0,};
1819 struct midgard_payload_vertex_tiler midgard_vertex = {0,};
1820 struct midgard_payload_vertex_tiler midgard_tiler = {0,};
1821 void *vp, *tp;
1822 size_t vp_size, tp_size;
1823
1824 if (device->quirks & IS_BIFROST) {
1825 bifrost_vertex.prefix = *vertex_prefix;
1826 memcpy(&bifrost_vertex.postfix, vertex_draw, MALI_DRAW_LENGTH);
1827 vp = &bifrost_vertex;
1828 vp_size = sizeof(bifrost_vertex);
1829
1830 bifrost_tiler.prefix = *tiler_prefix;
1831 bifrost_tiler.tiler.primitive_size = *primitive_size;
1832 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
1833 memcpy(&bifrost_tiler.postfix, tiler_draw, MALI_DRAW_LENGTH);
1834 tp = &bifrost_tiler;
1835 tp_size = sizeof(bifrost_tiler);
1836 } else {
1837 midgard_vertex.prefix = *vertex_prefix;
1838 memcpy(&midgard_vertex.postfix, vertex_draw, MALI_DRAW_LENGTH);
1839 vp = &midgard_vertex;
1840 vp_size = sizeof(midgard_vertex);
1841
1842 midgard_tiler.prefix = *tiler_prefix;
1843 memcpy(&midgard_tiler.postfix, tiler_draw, MALI_DRAW_LENGTH);
1844 midgard_tiler.primitive_size = *primitive_size;
1845 tp = &midgard_tiler;
1846 tp_size = sizeof(midgard_tiler);
1847 }
1848
1849 if (wallpapering) {
1850 /* Inject in reverse order, with "predicted" job indices.
1851 * THIS IS A HACK XXX */
1852 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
1853 batch->scoreboard.job_index + 2, tp, tp_size, true);
1854 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
1855 vp, vp_size, true);
1856 return;
1857 }
1858
1859 /* If rasterizer discard is enable, only submit the vertex */
1860
1861 unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
1862 vp, vp_size, false);
1863
1864 if (ctx->rasterizer->base.rasterizer_discard)
1865 return;
1866
1867 panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
1868 false);
1869 }
1870
1871 /* TODO: stop hardcoding this */
1872 mali_ptr
1873 panfrost_emit_sample_locations(struct panfrost_batch *batch)
1874 {
1875 uint16_t locations[] = {
1876 128, 128,
1877 0, 256,
1878 0, 256,
1879 0, 256,
1880 0, 256,
1881 0, 256,
1882 0, 256,
1883 0, 256,
1884 0, 256,
1885 0, 256,
1886 0, 256,
1887 0, 256,
1888 0, 256,
1889 0, 256,
1890 0, 256,
1891 0, 256,
1892 0, 256,
1893 0, 256,
1894 0, 256,
1895 0, 256,
1896 0, 256,
1897 0, 256,
1898 0, 256,
1899 0, 256,
1900 0, 256,
1901 0, 256,
1902 0, 256,
1903 0, 256,
1904 0, 256,
1905 0, 256,
1906 0, 256,
1907 0, 256,
1908 128, 128,
1909 0, 0,
1910 0, 0,
1911 0, 0,
1912 0, 0,
1913 0, 0,
1914 0, 0,
1915 0, 0,
1916 0, 0,
1917 0, 0,
1918 0, 0,
1919 0, 0,
1920 0, 0,
1921 0, 0,
1922 0, 0,
1923 0, 0,
1924 };
1925
1926 return panfrost_pool_upload_aligned(&batch->pool, locations, 96 * sizeof(uint16_t), 64);
1927 }