panfrost: Pack MRT blend shaders into a single BO
[mesa.git] / src / gallium / drivers / panfrost / pan_context.c
1 /*
2 * © Copyright 2018 Alyssa Rosenzweig
3 * Copyright © 2014-2017 Broadcom
4 * Copyright (C) 2017 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 */
26
27 #include <sys/poll.h>
28 #include <errno.h>
29
30 #include "pan_bo.h"
31 #include "pan_context.h"
32 #include "pan_format.h"
33 #include "panfrost-quirks.h"
34
35 #include "util/macros.h"
36 #include "util/format/u_format.h"
37 #include "util/u_inlines.h"
38 #include "util/u_upload_mgr.h"
39 #include "util/u_memory.h"
40 #include "util/u_vbuf.h"
41 #include "util/half_float.h"
42 #include "util/u_helpers.h"
43 #include "util/format/u_format.h"
44 #include "util/u_prim.h"
45 #include "util/u_prim_restart.h"
46 #include "indices/u_primconvert.h"
47 #include "tgsi/tgsi_parse.h"
48 #include "tgsi/tgsi_from_mesa.h"
49 #include "util/u_math.h"
50
51 #include "pan_screen.h"
52 #include "pan_blending.h"
53 #include "pan_blend_shaders.h"
54 #include "pan_util.h"
55
56 struct midgard_tiler_descriptor
57 panfrost_emit_midg_tiler(struct panfrost_batch *batch, unsigned vertex_count)
58 {
59 struct panfrost_screen *screen = pan_screen(batch->ctx->base.screen);
60 bool hierarchy = !(screen->quirks & MIDGARD_NO_HIER_TILING);
61 struct midgard_tiler_descriptor t = {0};
62 unsigned height = batch->key.height;
63 unsigned width = batch->key.width;
64
65 t.hierarchy_mask =
66 panfrost_choose_hierarchy_mask(width, height, vertex_count, hierarchy);
67
68 /* Compute the polygon header size and use that to offset the body */
69
70 unsigned header_size = panfrost_tiler_header_size(
71 width, height, t.hierarchy_mask, hierarchy);
72
73 t.polygon_list_size = panfrost_tiler_full_size(
74 width, height, t.hierarchy_mask, hierarchy);
75
76 /* Sanity check */
77
78 if (vertex_count) {
79 struct panfrost_bo *tiler_heap;
80
81 tiler_heap = panfrost_batch_get_tiler_heap(batch);
82 t.polygon_list = panfrost_batch_get_polygon_list(batch,
83 header_size +
84 t.polygon_list_size);
85
86
87 /* Allow the entire tiler heap */
88 t.heap_start = tiler_heap->gpu;
89 t.heap_end = tiler_heap->gpu + tiler_heap->size;
90 } else {
91 struct panfrost_bo *tiler_dummy;
92
93 tiler_dummy = panfrost_batch_get_tiler_dummy(batch);
94 header_size = MALI_TILER_MINIMUM_HEADER_SIZE;
95
96 /* The tiler is disabled, so don't allow the tiler heap */
97 t.heap_start = tiler_dummy->gpu;
98 t.heap_end = t.heap_start;
99
100 /* Use a dummy polygon list */
101 t.polygon_list = tiler_dummy->gpu;
102
103 /* Disable the tiler */
104 if (hierarchy)
105 t.hierarchy_mask |= MALI_TILER_DISABLED;
106 else {
107 t.hierarchy_mask = MALI_TILER_USER;
108 t.polygon_list_size = MALI_TILER_MINIMUM_HEADER_SIZE + 4;
109
110 /* We don't have a WRITE_VALUE job, so write the polygon list manually */
111 uint32_t *polygon_list_body = (uint32_t *) (tiler_dummy->cpu + header_size);
112 polygon_list_body[0] = 0xa0000000; /* TODO: Just that? */
113 }
114 }
115
116 t.polygon_list_body =
117 t.polygon_list + header_size;
118
119 return t;
120 }
121
122 static void
123 panfrost_clear(
124 struct pipe_context *pipe,
125 unsigned buffers,
126 const union pipe_color_union *color,
127 double depth, unsigned stencil)
128 {
129 struct panfrost_context *ctx = pan_context(pipe);
130
131 /* TODO: panfrost_get_fresh_batch_for_fbo() instantiates a new batch if
132 * the existing batch targeting this FBO has draws. We could probably
133 * avoid that by replacing plain clears by quad-draws with a specific
134 * color/depth/stencil value, thus avoiding the generation of extra
135 * fragment jobs.
136 */
137 struct panfrost_batch *batch = panfrost_get_fresh_batch_for_fbo(ctx);
138
139 panfrost_batch_add_fbo_bos(batch);
140 panfrost_batch_clear(batch, buffers, color, depth, stencil);
141 }
142
143 static void
144 panfrost_attach_vt_framebuffer(struct panfrost_context *ctx)
145 {
146 struct panfrost_screen *screen = pan_screen(ctx->base.screen);
147 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
148
149 /* If we haven't, reserve space for the framebuffer */
150
151 if (!batch->framebuffer.gpu) {
152 unsigned size = (screen->quirks & MIDGARD_SFBD) ?
153 sizeof(struct mali_single_framebuffer) :
154 sizeof(struct bifrost_framebuffer);
155
156 batch->framebuffer = panfrost_allocate_transient(batch, size);
157
158 /* Tag the pointer */
159 if (!(screen->quirks & MIDGARD_SFBD))
160 batch->framebuffer.gpu |= MALI_MFBD;
161 }
162
163 for (unsigned i = 0; i < PIPE_SHADER_TYPES; ++i)
164 ctx->payloads[i].postfix.framebuffer = batch->framebuffer.gpu;
165 }
166
167 /* Reset per-frame context, called on context initialisation as well as after
168 * flushing a frame */
169
170 void
171 panfrost_invalidate_frame(struct panfrost_context *ctx)
172 {
173 for (unsigned i = 0; i < PIPE_SHADER_TYPES; ++i)
174 ctx->payloads[i].postfix.framebuffer = 0;
175
176 if (ctx->rasterizer)
177 ctx->dirty |= PAN_DIRTY_RASTERIZER;
178
179 /* XXX */
180 ctx->dirty |= PAN_DIRTY_SAMPLERS | PAN_DIRTY_TEXTURES;
181
182 /* TODO: When does this need to be handled? */
183 ctx->active_queries = true;
184 }
185
186 /* In practice, every field of these payloads should be configurable
187 * arbitrarily, which means these functions are basically catch-all's for
188 * as-of-yet unwavering unknowns */
189
190 static void
191 panfrost_emit_vertex_payload(struct panfrost_context *ctx)
192 {
193 /* 0x2 bit clear on 32-bit T6XX */
194
195 struct midgard_payload_vertex_tiler payload = {
196 .gl_enables = 0x4 | 0x2,
197 };
198
199 /* Vertex and compute are closely coupled, so share a payload */
200
201 memcpy(&ctx->payloads[PIPE_SHADER_VERTEX], &payload, sizeof(payload));
202 memcpy(&ctx->payloads[PIPE_SHADER_COMPUTE], &payload, sizeof(payload));
203 }
204
205 static void
206 panfrost_emit_tiler_payload(struct panfrost_context *ctx)
207 {
208 struct midgard_payload_vertex_tiler payload = {
209 .prefix = {
210 .zero1 = 0xffff, /* Why is this only seen on test-quad-textured? */
211 },
212 };
213
214 memcpy(&ctx->payloads[PIPE_SHADER_FRAGMENT], &payload, sizeof(payload));
215 }
216
217 static unsigned
218 translate_tex_wrap(enum pipe_tex_wrap w)
219 {
220 switch (w) {
221 case PIPE_TEX_WRAP_REPEAT:
222 return MALI_WRAP_REPEAT;
223
224 case PIPE_TEX_WRAP_CLAMP:
225 return MALI_WRAP_CLAMP;
226
227 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
228 return MALI_WRAP_CLAMP_TO_EDGE;
229
230 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
231 return MALI_WRAP_CLAMP_TO_BORDER;
232
233 case PIPE_TEX_WRAP_MIRROR_REPEAT:
234 return MALI_WRAP_MIRRORED_REPEAT;
235
236 case PIPE_TEX_WRAP_MIRROR_CLAMP:
237 return MALI_WRAP_MIRRORED_CLAMP;
238
239 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
240 return MALI_WRAP_MIRRORED_CLAMP_TO_EDGE;
241
242 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
243 return MALI_WRAP_MIRRORED_CLAMP_TO_BORDER;
244
245 default:
246 unreachable("Invalid wrap");
247 }
248 }
249
250 static unsigned
251 panfrost_translate_compare_func(enum pipe_compare_func in)
252 {
253 switch (in) {
254 case PIPE_FUNC_NEVER:
255 return MALI_FUNC_NEVER;
256
257 case PIPE_FUNC_LESS:
258 return MALI_FUNC_LESS;
259
260 case PIPE_FUNC_EQUAL:
261 return MALI_FUNC_EQUAL;
262
263 case PIPE_FUNC_LEQUAL:
264 return MALI_FUNC_LEQUAL;
265
266 case PIPE_FUNC_GREATER:
267 return MALI_FUNC_GREATER;
268
269 case PIPE_FUNC_NOTEQUAL:
270 return MALI_FUNC_NOTEQUAL;
271
272 case PIPE_FUNC_GEQUAL:
273 return MALI_FUNC_GEQUAL;
274
275 case PIPE_FUNC_ALWAYS:
276 return MALI_FUNC_ALWAYS;
277
278 default:
279 unreachable("Invalid func");
280 }
281 }
282
283 static unsigned
284 panfrost_translate_stencil_op(enum pipe_stencil_op in)
285 {
286 switch (in) {
287 case PIPE_STENCIL_OP_KEEP:
288 return MALI_STENCIL_KEEP;
289
290 case PIPE_STENCIL_OP_ZERO:
291 return MALI_STENCIL_ZERO;
292
293 case PIPE_STENCIL_OP_REPLACE:
294 return MALI_STENCIL_REPLACE;
295
296 case PIPE_STENCIL_OP_INCR:
297 return MALI_STENCIL_INCR;
298
299 case PIPE_STENCIL_OP_DECR:
300 return MALI_STENCIL_DECR;
301
302 case PIPE_STENCIL_OP_INCR_WRAP:
303 return MALI_STENCIL_INCR_WRAP;
304
305 case PIPE_STENCIL_OP_DECR_WRAP:
306 return MALI_STENCIL_DECR_WRAP;
307
308 case PIPE_STENCIL_OP_INVERT:
309 return MALI_STENCIL_INVERT;
310
311 default:
312 unreachable("Invalid stencil op");
313 }
314 }
315
316 static void
317 panfrost_make_stencil_state(const struct pipe_stencil_state *in, struct mali_stencil_test *out)
318 {
319 out->ref = 0; /* Gallium gets it from elsewhere */
320
321 out->mask = in->valuemask;
322 out->func = panfrost_translate_compare_func(in->func);
323 out->sfail = panfrost_translate_stencil_op(in->fail_op);
324 out->dpfail = panfrost_translate_stencil_op(in->zfail_op);
325 out->dppass = panfrost_translate_stencil_op(in->zpass_op);
326 }
327
328 static void
329 panfrost_default_shader_backend(struct panfrost_context *ctx)
330 {
331 struct panfrost_screen *screen = pan_screen(ctx->base.screen);
332 struct mali_shader_meta shader = {
333 .alpha_coverage = ~MALI_ALPHA_COVERAGE(0.000000),
334
335 .unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x3010,
336 .unknown2_4 = MALI_NO_MSAA | 0x4e0,
337 };
338
339 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this is
340 * required (independent of 32-bit/64-bit descriptors), or why it's not
341 * used on later GPU revisions. Otherwise, all shader jobs fault on
342 * these earlier chips (perhaps this is a chicken bit of some kind).
343 * More investigation is needed. */
344
345 if (screen->quirks & MIDGARD_SFBD)
346 shader.unknown2_4 |= 0x10;
347
348 struct pipe_stencil_state default_stencil = {
349 .enabled = 0,
350 .func = PIPE_FUNC_ALWAYS,
351 .fail_op = MALI_STENCIL_KEEP,
352 .zfail_op = MALI_STENCIL_KEEP,
353 .zpass_op = MALI_STENCIL_KEEP,
354 .writemask = 0xFF,
355 .valuemask = 0xFF
356 };
357
358 panfrost_make_stencil_state(&default_stencil, &shader.stencil_front);
359 shader.stencil_mask_front = default_stencil.writemask;
360
361 panfrost_make_stencil_state(&default_stencil, &shader.stencil_back);
362 shader.stencil_mask_back = default_stencil.writemask;
363
364 if (default_stencil.enabled)
365 shader.unknown2_4 |= MALI_STENCIL_TEST;
366
367 memcpy(&ctx->fragment_shader_core, &shader, sizeof(shader));
368 }
369
370 /* Generates a vertex/tiler job. This is, in some sense, the heart of the
371 * graphics command stream. It should be called once per draw, accordding to
372 * presentations. Set is_tiler for "tiler" jobs (fragment shader jobs, but in
373 * Mali parlance, "fragment" refers to framebuffer writeout). Clear it for
374 * vertex jobs. */
375
376 struct panfrost_transfer
377 panfrost_vertex_tiler_job(struct panfrost_context *ctx, bool is_tiler)
378 {
379 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
380 struct mali_job_descriptor_header job = {
381 .job_type = is_tiler ? JOB_TYPE_TILER : JOB_TYPE_VERTEX,
382 .job_descriptor_size = 1,
383 };
384
385 struct midgard_payload_vertex_tiler *payload = is_tiler ? &ctx->payloads[PIPE_SHADER_FRAGMENT] : &ctx->payloads[PIPE_SHADER_VERTEX];
386
387 struct panfrost_transfer transfer = panfrost_allocate_transient(batch, sizeof(job) + sizeof(*payload));
388 memcpy(transfer.cpu, &job, sizeof(job));
389 memcpy(transfer.cpu + sizeof(job), payload, sizeof(*payload));
390 return transfer;
391 }
392
393 mali_ptr
394 panfrost_vertex_buffer_address(struct panfrost_context *ctx, unsigned i)
395 {
396 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[i];
397 struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer.resource);
398
399 return rsrc->bo->gpu + buf->buffer_offset;
400 }
401
402 static bool
403 panfrost_writes_point_size(struct panfrost_context *ctx)
404 {
405 assert(ctx->shader[PIPE_SHADER_VERTEX]);
406 struct panfrost_shader_state *vs = &ctx->shader[PIPE_SHADER_VERTEX]->variants[ctx->shader[PIPE_SHADER_VERTEX]->active_variant];
407
408 return vs->writes_point_size && ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.draw_mode == MALI_POINTS;
409 }
410
411 /* Stage the attribute descriptors so we can adjust src_offset
412 * to let BOs align nicely */
413
414 static void
415 panfrost_stage_attributes(struct panfrost_context *ctx)
416 {
417 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
418 struct panfrost_vertex_state *so = ctx->vertex;
419
420 size_t sz = sizeof(struct mali_attr_meta) * PAN_MAX_ATTRIBUTE;
421 struct panfrost_transfer transfer = panfrost_allocate_transient(batch, sz);
422 struct mali_attr_meta *target = (struct mali_attr_meta *) transfer.cpu;
423
424 /* Copy as-is for the first pass */
425 memcpy(target, so->hw, sz);
426
427 /* Fixup offsets for the second pass. Recall that the hardware
428 * calculates attribute addresses as:
429 *
430 * addr = base + (stride * vtx) + src_offset;
431 *
432 * However, on Mali, base must be aligned to 64-bytes, so we
433 * instead let:
434 *
435 * base' = base & ~63 = base - (base & 63)
436 *
437 * To compensate when using base' (see emit_vertex_data), we have
438 * to adjust src_offset by the masked off piece:
439 *
440 * addr' = base' + (stride * vtx) + (src_offset + (base & 63))
441 * = base - (base & 63) + (stride * vtx) + src_offset + (base & 63)
442 * = base + (stride * vtx) + src_offset
443 * = addr;
444 *
445 * QED.
446 */
447
448 unsigned start = ctx->payloads[PIPE_SHADER_VERTEX].offset_start;
449
450 for (unsigned i = 0; i < so->num_elements; ++i) {
451 unsigned vbi = so->pipe[i].vertex_buffer_index;
452 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
453 mali_ptr addr = panfrost_vertex_buffer_address(ctx, vbi);
454
455 /* Adjust by the masked off bits of the offset */
456 target[i].src_offset += (addr & 63);
457
458 /* Also, somewhat obscurely per-instance data needs to be
459 * offset in response to a delayed start in an indexed draw */
460
461 if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
462 target[i].src_offset -= buf->stride * start;
463 }
464
465 /* Let's also include vertex builtins */
466
467 target[PAN_VERTEX_ID].format = MALI_R32UI;
468 target[PAN_VERTEX_ID].swizzle = panfrost_get_default_swizzle(1);
469
470 target[PAN_INSTANCE_ID].format = MALI_R32UI;
471 target[PAN_INSTANCE_ID].swizzle = panfrost_get_default_swizzle(1);
472
473 ctx->payloads[PIPE_SHADER_VERTEX].postfix.attribute_meta = transfer.gpu;
474 }
475
476 static void
477 panfrost_upload_sampler_descriptors(struct panfrost_context *ctx)
478 {
479 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
480 size_t desc_size = sizeof(struct mali_sampler_descriptor);
481
482 for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) {
483 mali_ptr upload = 0;
484
485 if (ctx->sampler_count[t]) {
486 size_t transfer_size = desc_size * ctx->sampler_count[t];
487
488 struct panfrost_transfer transfer =
489 panfrost_allocate_transient(batch, transfer_size);
490
491 struct mali_sampler_descriptor *desc =
492 (struct mali_sampler_descriptor *) transfer.cpu;
493
494 for (int i = 0; i < ctx->sampler_count[t]; ++i)
495 desc[i] = ctx->samplers[t][i]->hw;
496
497 upload = transfer.gpu;
498 }
499
500 ctx->payloads[t].postfix.sampler_descriptor = upload;
501 }
502 }
503
504 static enum mali_texture_layout
505 panfrost_layout_for_texture(struct panfrost_resource *rsrc)
506 {
507 /* TODO: other linear depth textures */
508 bool is_depth = rsrc->base.format == PIPE_FORMAT_Z32_UNORM;
509
510 switch (rsrc->layout) {
511 case PAN_AFBC:
512 return MALI_TEXTURE_AFBC;
513 case PAN_TILED:
514 assert(!is_depth);
515 return MALI_TEXTURE_TILED;
516 case PAN_LINEAR:
517 return is_depth ? MALI_TEXTURE_TILED : MALI_TEXTURE_LINEAR;
518 default:
519 unreachable("Invalid texture layout");
520 }
521 }
522
523 static mali_ptr
524 panfrost_upload_tex(
525 struct panfrost_context *ctx,
526 enum pipe_shader_type st,
527 struct panfrost_sampler_view *view)
528 {
529 if (!view)
530 return (mali_ptr) 0;
531
532 struct pipe_sampler_view *pview = &view->base;
533 struct panfrost_resource *rsrc = pan_resource(pview->texture);
534 mali_ptr descriptor_gpu;
535 void *descriptor;
536
537 /* Do we interleave an explicit stride with every element? */
538
539 bool has_manual_stride = view->manual_stride;
540
541 /* For easy access */
542
543 bool is_buffer = pview->target == PIPE_BUFFER;
544 unsigned first_level = is_buffer ? 0 : pview->u.tex.first_level;
545 unsigned last_level = is_buffer ? 0 : pview->u.tex.last_level;
546 unsigned first_layer = is_buffer ? 0 : pview->u.tex.first_layer;
547 unsigned last_layer = is_buffer ? 0 : pview->u.tex.last_layer;
548
549 /* Lower-bit is set when sampling from colour AFBC */
550 bool is_afbc = rsrc->layout == PAN_AFBC;
551 bool is_zs = rsrc->base.bind & PIPE_BIND_DEPTH_STENCIL;
552 unsigned afbc_bit = (is_afbc && !is_zs) ? 1 : 0;
553
554 /* Add the BO to the job so it's retained until the job is done. */
555 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
556 panfrost_batch_add_bo(batch, rsrc->bo,
557 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
558 panfrost_bo_access_for_stage(st));
559
560 /* Add the usage flags in, since they can change across the CSO
561 * lifetime due to layout switches */
562
563 view->hw.format.layout = panfrost_layout_for_texture(rsrc);
564 view->hw.format.manual_stride = has_manual_stride;
565
566 /* Inject the addresses in, interleaving mip levels, cube faces, and
567 * strides in that order */
568
569 unsigned idx = 0;
570 unsigned levels = 1 + last_level - first_level;
571 unsigned layers = 1 + last_layer - first_layer;
572 unsigned num_elements = levels * layers;
573 if (has_manual_stride)
574 num_elements *= 2;
575
576 descriptor = malloc(sizeof(struct mali_texture_descriptor) +
577 sizeof(mali_ptr) * num_elements);
578 memcpy(descriptor, &view->hw, sizeof(struct mali_texture_descriptor));
579
580 mali_ptr *pointers_and_strides = descriptor +
581 sizeof(struct mali_texture_descriptor);
582
583 for (unsigned l = first_level; l <= last_level; ++l) {
584 for (unsigned f = first_layer; f <= last_layer; ++f) {
585
586 pointers_and_strides[idx++] =
587 panfrost_get_texture_address(rsrc, l, f) + afbc_bit;
588
589 if (has_manual_stride) {
590 pointers_and_strides[idx++] =
591 rsrc->slices[l].stride;
592 }
593 }
594 }
595
596 descriptor_gpu = panfrost_upload_transient(batch, descriptor,
597 sizeof(struct mali_texture_descriptor) +
598 num_elements * sizeof(*pointers_and_strides));
599 free(descriptor);
600
601 return descriptor_gpu;
602 }
603
604 static void
605 panfrost_upload_texture_descriptors(struct panfrost_context *ctx)
606 {
607 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
608
609 for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) {
610 mali_ptr trampoline = 0;
611
612 if (ctx->sampler_view_count[t]) {
613 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
614
615 for (int i = 0; i < ctx->sampler_view_count[t]; ++i)
616 trampolines[i] =
617 panfrost_upload_tex(ctx, t, ctx->sampler_views[t][i]);
618
619 trampoline = panfrost_upload_transient(batch, trampolines, sizeof(uint64_t) * ctx->sampler_view_count[t]);
620 }
621
622 ctx->payloads[t].postfix.texture_trampoline = trampoline;
623 }
624 }
625
626 struct sysval_uniform {
627 union {
628 float f[4];
629 int32_t i[4];
630 uint32_t u[4];
631 uint64_t du[2];
632 };
633 };
634
635 static void panfrost_upload_viewport_scale_sysval(struct panfrost_context *ctx,
636 struct sysval_uniform *uniform)
637 {
638 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
639
640 uniform->f[0] = vp->scale[0];
641 uniform->f[1] = vp->scale[1];
642 uniform->f[2] = vp->scale[2];
643 }
644
645 static void panfrost_upload_viewport_offset_sysval(struct panfrost_context *ctx,
646 struct sysval_uniform *uniform)
647 {
648 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
649
650 uniform->f[0] = vp->translate[0];
651 uniform->f[1] = vp->translate[1];
652 uniform->f[2] = vp->translate[2];
653 }
654
655 static void panfrost_upload_txs_sysval(struct panfrost_context *ctx,
656 enum pipe_shader_type st,
657 unsigned int sysvalid,
658 struct sysval_uniform *uniform)
659 {
660 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
661 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
662 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
663 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
664
665 assert(dim);
666 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
667
668 if (dim > 1)
669 uniform->i[1] = u_minify(tex->texture->height0,
670 tex->u.tex.first_level);
671
672 if (dim > 2)
673 uniform->i[2] = u_minify(tex->texture->depth0,
674 tex->u.tex.first_level);
675
676 if (is_array)
677 uniform->i[dim] = tex->texture->array_size;
678 }
679
680 static void panfrost_upload_ssbo_sysval(
681 struct panfrost_context *ctx,
682 enum pipe_shader_type st,
683 unsigned ssbo_id,
684 struct sysval_uniform *uniform)
685 {
686 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
687 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
688
689 /* Compute address */
690 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
691 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
692
693 panfrost_batch_add_bo(batch, bo,
694 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
695 panfrost_bo_access_for_stage(st));
696
697 /* Upload address and size as sysval */
698 uniform->du[0] = bo->gpu + sb.buffer_offset;
699 uniform->u[2] = sb.buffer_size;
700 }
701
702 static void
703 panfrost_upload_sampler_sysval(
704 struct panfrost_context *ctx,
705 enum pipe_shader_type st,
706 unsigned sampler_index,
707 struct sysval_uniform *uniform)
708 {
709 struct pipe_sampler_state *sampl =
710 &ctx->samplers[st][sampler_index]->base;
711
712 uniform->f[0] = sampl->min_lod;
713 uniform->f[1] = sampl->max_lod;
714 uniform->f[2] = sampl->lod_bias;
715
716 /* Even without any errata, Midgard represents "no mipmapping" as
717 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
718 * panfrost_create_sampler_state which also explains our choice of
719 * epsilon value (again to keep behaviour consistent) */
720
721 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
722 uniform->f[1] = uniform->f[0] + (1.0/256.0);
723 }
724
725 static void panfrost_upload_num_work_groups_sysval(struct panfrost_context *ctx,
726 struct sysval_uniform *uniform)
727 {
728 uniform->u[0] = ctx->compute_grid->grid[0];
729 uniform->u[1] = ctx->compute_grid->grid[1];
730 uniform->u[2] = ctx->compute_grid->grid[2];
731 }
732
733 static void panfrost_upload_sysvals(struct panfrost_context *ctx, void *buf,
734 struct panfrost_shader_state *ss,
735 enum pipe_shader_type st)
736 {
737 struct sysval_uniform *uniforms = (void *)buf;
738
739 for (unsigned i = 0; i < ss->sysval_count; ++i) {
740 int sysval = ss->sysval[i];
741
742 switch (PAN_SYSVAL_TYPE(sysval)) {
743 case PAN_SYSVAL_VIEWPORT_SCALE:
744 panfrost_upload_viewport_scale_sysval(ctx, &uniforms[i]);
745 break;
746 case PAN_SYSVAL_VIEWPORT_OFFSET:
747 panfrost_upload_viewport_offset_sysval(ctx, &uniforms[i]);
748 break;
749 case PAN_SYSVAL_TEXTURE_SIZE:
750 panfrost_upload_txs_sysval(ctx, st, PAN_SYSVAL_ID(sysval),
751 &uniforms[i]);
752 break;
753 case PAN_SYSVAL_SSBO:
754 panfrost_upload_ssbo_sysval(ctx, st, PAN_SYSVAL_ID(sysval),
755 &uniforms[i]);
756 break;
757 case PAN_SYSVAL_NUM_WORK_GROUPS:
758 panfrost_upload_num_work_groups_sysval(ctx, &uniforms[i]);
759 break;
760 case PAN_SYSVAL_SAMPLER:
761 panfrost_upload_sampler_sysval(ctx, st, PAN_SYSVAL_ID(sysval),
762 &uniforms[i]);
763 break;
764 default:
765 assert(0);
766 }
767 }
768 }
769
770 static const void *
771 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf, unsigned index)
772 {
773 struct pipe_constant_buffer *cb = &buf->cb[index];
774 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
775
776 if (rsrc)
777 return rsrc->bo->cpu;
778 else if (cb->user_buffer)
779 return cb->user_buffer;
780 else
781 unreachable("No constant buffer");
782 }
783
784 static mali_ptr
785 panfrost_map_constant_buffer_gpu(
786 struct panfrost_context *ctx,
787 enum pipe_shader_type st,
788 struct panfrost_constant_buffer *buf,
789 unsigned index)
790 {
791 struct pipe_constant_buffer *cb = &buf->cb[index];
792 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
793 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
794
795 if (rsrc) {
796 panfrost_batch_add_bo(batch, rsrc->bo,
797 PAN_BO_ACCESS_SHARED |
798 PAN_BO_ACCESS_READ |
799 panfrost_bo_access_for_stage(st));
800 return rsrc->bo->gpu;
801 } else if (cb->user_buffer) {
802 return panfrost_upload_transient(batch, cb->user_buffer, cb->buffer_size);
803 } else {
804 unreachable("No constant buffer");
805 }
806 }
807
808 /* Compute number of UBOs active (more specifically, compute the highest UBO
809 * number addressable -- if there are gaps, include them in the count anyway).
810 * We always include UBO #0 in the count, since we *need* uniforms enabled for
811 * sysvals. */
812
813 static unsigned
814 panfrost_ubo_count(struct panfrost_context *ctx, enum pipe_shader_type stage)
815 {
816 unsigned mask = ctx->constant_buffer[stage].enabled_mask | 1;
817 return 32 - __builtin_clz(mask);
818 }
819
820 /* Fixes up a shader state with current state */
821
822 static void
823 panfrost_patch_shader_state(struct panfrost_context *ctx,
824 enum pipe_shader_type stage)
825 {
826 struct panfrost_shader_variants *all = ctx->shader[stage];
827
828 if (!all) {
829 ctx->payloads[stage].postfix.shader = 0;
830 return;
831 }
832
833 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
834
835 ss->tripipe->texture_count = ctx->sampler_view_count[stage];
836 ss->tripipe->sampler_count = ctx->sampler_count[stage];
837
838 ss->tripipe->midgard1.flags = 0x220;
839
840 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
841 ss->tripipe->midgard1.uniform_buffer_count = ubo_count;
842
843 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
844
845 /* Add the shader BO to the batch. */
846 panfrost_batch_add_bo(batch, ss->bo,
847 PAN_BO_ACCESS_PRIVATE |
848 PAN_BO_ACCESS_READ |
849 panfrost_bo_access_for_stage(stage));
850
851 ctx->payloads[stage].postfix.shader = panfrost_upload_transient(batch,
852 ss->tripipe,
853 sizeof(struct mali_shader_meta));
854 }
855
856 /* Go through dirty flags and actualise them in the cmdstream. */
857
858 void
859 panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
860 {
861 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
862 struct panfrost_screen *screen = pan_screen(ctx->base.screen);
863
864 panfrost_batch_add_fbo_bos(batch);
865 panfrost_attach_vt_framebuffer(ctx);
866
867 if (with_vertex_data) {
868 panfrost_emit_vertex_data(batch);
869
870 /* Varyings emitted for -all- geometry */
871 unsigned total_count = ctx->padded_count * ctx->instance_count;
872 panfrost_emit_varying_descriptor(ctx, total_count);
873 }
874
875 bool msaa = ctx->rasterizer->base.multisample;
876
877 if (ctx->dirty & PAN_DIRTY_RASTERIZER) {
878 ctx->payloads[PIPE_SHADER_FRAGMENT].gl_enables = ctx->rasterizer->tiler_gl_enables;
879
880 /* TODO: Sample size */
881 SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_HAS_MSAA, msaa);
882 SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_NO_MSAA, !msaa);
883 }
884
885 panfrost_batch_set_requirements(batch);
886
887 if (ctx->occlusion_query) {
888 ctx->payloads[PIPE_SHADER_FRAGMENT].gl_enables |= MALI_OCCLUSION_QUERY;
889 ctx->payloads[PIPE_SHADER_FRAGMENT].postfix.occlusion_counter = ctx->occlusion_query->bo->gpu;
890 }
891
892 panfrost_patch_shader_state(ctx, PIPE_SHADER_VERTEX);
893 panfrost_patch_shader_state(ctx, PIPE_SHADER_COMPUTE);
894
895 if (ctx->dirty & (PAN_DIRTY_RASTERIZER | PAN_DIRTY_VS)) {
896 /* Check if we need to link the gl_PointSize varying */
897 if (!panfrost_writes_point_size(ctx)) {
898 /* If the size is constant, write it out. Otherwise,
899 * don't touch primitive_size (since we would clobber
900 * the pointer there) */
901
902 bool points = ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.draw_mode == MALI_POINTS;
903
904 ctx->payloads[PIPE_SHADER_FRAGMENT].primitive_size.constant = points ?
905 ctx->rasterizer->base.point_size :
906 ctx->rasterizer->base.line_width;
907 }
908 }
909
910 /* TODO: Maybe dirty track FS, maybe not. For now, it's transient. */
911 if (ctx->shader[PIPE_SHADER_FRAGMENT])
912 ctx->dirty |= PAN_DIRTY_FS;
913
914 if (ctx->dirty & PAN_DIRTY_FS) {
915 assert(ctx->shader[PIPE_SHADER_FRAGMENT]);
916 struct panfrost_shader_state *variant = &ctx->shader[PIPE_SHADER_FRAGMENT]->variants[ctx->shader[PIPE_SHADER_FRAGMENT]->active_variant];
917
918 panfrost_patch_shader_state(ctx, PIPE_SHADER_FRAGMENT);
919
920 #define COPY(name) ctx->fragment_shader_core.name = variant->tripipe->name
921
922 COPY(shader);
923 COPY(attribute_count);
924 COPY(varying_count);
925 COPY(texture_count);
926 COPY(sampler_count);
927 COPY(midgard1.uniform_count);
928 COPY(midgard1.uniform_buffer_count);
929 COPY(midgard1.work_count);
930 COPY(midgard1.flags);
931 COPY(midgard1.unknown2);
932
933 #undef COPY
934
935 /* Get blending setup */
936 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
937
938 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
939 unsigned shader_offset = 0;
940 struct panfrost_bo *shader_bo = NULL;
941
942 for (unsigned c = 0; c < rt_count; ++c) {
943 blend[c] = panfrost_get_blend_for_context(ctx, c, &shader_bo, &shader_offset);
944 }
945
946 /* If there is a blend shader, work registers are shared. XXX: opt */
947
948 for (unsigned c = 0; c < rt_count; ++c) {
949 if (blend[c].is_shader)
950 ctx->fragment_shader_core.midgard1.work_count = 16;
951 }
952
953 /* Depending on whether it's legal to in the given shader, we
954 * try to enable early-z testing (or forward-pixel kill?) */
955
956 SET_BIT(ctx->fragment_shader_core.midgard1.flags, MALI_EARLY_Z, !variant->can_discard);
957
958 /* Any time texturing is used, derivatives are implicitly
959 * calculated, so we need to enable helper invocations */
960
961 SET_BIT(ctx->fragment_shader_core.midgard1.flags, MALI_HELPER_INVOCATIONS, variant->helper_invocations);
962
963 /* Assign the stencil refs late */
964
965 unsigned front_ref = ctx->stencil_ref.ref_value[0];
966 unsigned back_ref = ctx->stencil_ref.ref_value[1];
967 bool back_enab = ctx->depth_stencil->stencil[1].enabled;
968
969 ctx->fragment_shader_core.stencil_front.ref = front_ref;
970 ctx->fragment_shader_core.stencil_back.ref = back_enab ? back_ref : front_ref;
971
972 /* CAN_DISCARD should be set if the fragment shader possibly
973 * contains a 'discard' instruction. It is likely this is
974 * related to optimizations related to forward-pixel kill, as
975 * per "Mali Performance 3: Is EGL_BUFFER_PRESERVED a good
976 * thing?" by Peter Harris
977 */
978
979 SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_CAN_DISCARD, variant->can_discard);
980 SET_BIT(ctx->fragment_shader_core.midgard1.flags, 0x400, variant->can_discard);
981
982 /* Even on MFBD, the shader descriptor gets blend shaders. It's
983 * *also* copied to the blend_meta appended (by convention),
984 * but this is the field actually read by the hardware. (Or
985 * maybe both are read...?). Specify the last RTi with a blend
986 * shader. */
987
988 ctx->fragment_shader_core.blend.shader = 0;
989
990 for (signed rt = (rt_count - 1); rt >= 0; --rt) {
991 if (blend[rt].is_shader) {
992 ctx->fragment_shader_core.blend.shader =
993 blend[rt].shader.gpu | blend[rt].shader.first_tag;
994 break;
995 }
996 }
997
998 if (screen->quirks & MIDGARD_SFBD) {
999 /* When only a single render target platform is used, the blend
1000 * information is inside the shader meta itself. We
1001 * additionally need to signal CAN_DISCARD for nontrivial blend
1002 * modes (so we're able to read back the destination buffer) */
1003
1004 SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_HAS_BLEND_SHADER, blend[0].is_shader);
1005
1006 if (!blend[0].is_shader) {
1007 ctx->fragment_shader_core.blend.equation =
1008 *blend[0].equation.equation;
1009 ctx->fragment_shader_core.blend.constant =
1010 blend[0].equation.constant;
1011 }
1012
1013 SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_CAN_DISCARD, !blend[0].no_blending);
1014 }
1015
1016 size_t size = sizeof(struct mali_shader_meta) + (sizeof(struct midgard_blend_rt) * rt_count);
1017 struct panfrost_transfer transfer = panfrost_allocate_transient(batch, size);
1018 memcpy(transfer.cpu, &ctx->fragment_shader_core, sizeof(struct mali_shader_meta));
1019
1020 ctx->payloads[PIPE_SHADER_FRAGMENT].postfix.shader = transfer.gpu;
1021
1022 if (!(screen->quirks & MIDGARD_SFBD)) {
1023 /* Additional blend descriptor tacked on for jobs using MFBD */
1024
1025 struct midgard_blend_rt rts[4];
1026
1027 for (unsigned i = 0; i < rt_count; ++i) {
1028 rts[i].flags = 0x200;
1029
1030 bool is_srgb =
1031 (ctx->pipe_framebuffer.nr_cbufs > i) &&
1032 (ctx->pipe_framebuffer.cbufs[i]) &&
1033 util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
1034
1035 SET_BIT(rts[i].flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
1036 SET_BIT(rts[i].flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
1037 SET_BIT(rts[i].flags, MALI_BLEND_SRGB, is_srgb);
1038 SET_BIT(rts[i].flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
1039
1040 /* TODO: sRGB in blend shaders is currently
1041 * unimplemented. Contact me (Alyssa) if you're
1042 * interested in working on this. We have
1043 * native Midgard ops for helping here, but
1044 * they're not well-understood yet. */
1045
1046 assert(!(is_srgb && blend[i].is_shader));
1047
1048 if (blend[i].is_shader) {
1049 rts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
1050 } else {
1051 rts[i].blend.equation = *blend[i].equation.equation;
1052 rts[i].blend.constant = blend[i].equation.constant;
1053 }
1054 }
1055
1056 memcpy(transfer.cpu + sizeof(struct mali_shader_meta), rts, sizeof(rts[0]) * rt_count);
1057 }
1058 }
1059
1060 /* We stage to transient, so always dirty.. */
1061 if (ctx->vertex)
1062 panfrost_stage_attributes(ctx);
1063
1064 if (ctx->dirty & PAN_DIRTY_SAMPLERS)
1065 panfrost_upload_sampler_descriptors(ctx);
1066
1067 if (ctx->dirty & PAN_DIRTY_TEXTURES)
1068 panfrost_upload_texture_descriptors(ctx);
1069
1070 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
1071
1072 for (int i = 0; i < PIPE_SHADER_TYPES; ++i) {
1073 struct panfrost_shader_variants *all = ctx->shader[i];
1074
1075 if (!all)
1076 continue;
1077
1078 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[i];
1079
1080 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1081
1082 /* Uniforms are implicitly UBO #0 */
1083 bool has_uniforms = buf->enabled_mask & (1 << 0);
1084
1085 /* Allocate room for the sysval and the uniforms */
1086 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1087 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1088 size_t size = sys_size + uniform_size;
1089 struct panfrost_transfer transfer = panfrost_allocate_transient(batch, size);
1090
1091 /* Upload sysvals requested by the shader */
1092 panfrost_upload_sysvals(ctx, transfer.cpu, ss, i);
1093
1094 /* Upload uniforms */
1095 if (has_uniforms) {
1096 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1097 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1098 }
1099
1100 int uniform_count =
1101 ctx->shader[i]->variants[ctx->shader[i]->active_variant].uniform_count;
1102
1103 struct mali_vertex_tiler_postfix *postfix =
1104 &ctx->payloads[i].postfix;
1105
1106 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1107 * uploaded */
1108
1109 unsigned ubo_count = panfrost_ubo_count(ctx, i);
1110 assert(ubo_count >= 1);
1111
1112 size_t sz = sizeof(struct mali_uniform_buffer_meta) * ubo_count;
1113 struct mali_uniform_buffer_meta ubos[PAN_MAX_CONST_BUFFERS];
1114
1115 /* Upload uniforms as a UBO */
1116 ubos[0].size = MALI_POSITIVE((2 + uniform_count));
1117 ubos[0].ptr = transfer.gpu >> 2;
1118
1119 /* The rest are honest-to-goodness UBOs */
1120
1121 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1122 size_t usz = buf->cb[ubo].buffer_size;
1123
1124 bool enabled = buf->enabled_mask & (1 << ubo);
1125 bool empty = usz == 0;
1126
1127 if (!enabled || empty) {
1128 /* Stub out disabled UBOs to catch accesses */
1129
1130 ubos[ubo].size = 0;
1131 ubos[ubo].ptr = 0xDEAD0000;
1132 continue;
1133 }
1134
1135 mali_ptr gpu = panfrost_map_constant_buffer_gpu(ctx, i, buf, ubo);
1136
1137 unsigned bytes_per_field = 16;
1138 unsigned aligned = ALIGN_POT(usz, bytes_per_field);
1139 unsigned fields = aligned / bytes_per_field;
1140
1141 ubos[ubo].size = MALI_POSITIVE(fields);
1142 ubos[ubo].ptr = gpu >> 2;
1143 }
1144
1145 mali_ptr ubufs = panfrost_upload_transient(batch, ubos, sz);
1146 postfix->uniforms = transfer.gpu;
1147 postfix->uniform_buffers = ubufs;
1148
1149 buf->dirty_mask = 0;
1150 }
1151
1152 /* TODO: Upload the viewport somewhere more appropriate */
1153
1154 /* Clip bounds are encoded as floats. The viewport itself is encoded as
1155 * (somewhat) asymmetric ints. */
1156 const struct pipe_scissor_state *ss = &ctx->scissor;
1157
1158 struct mali_viewport view = {
1159 /* By default, do no viewport clipping, i.e. clip to (-inf,
1160 * inf) in each direction. Clipping to the viewport in theory
1161 * should work, but in practice causes issues when we're not
1162 * explicitly trying to scissor */
1163
1164 .clip_minx = -INFINITY,
1165 .clip_miny = -INFINITY,
1166 .clip_maxx = INFINITY,
1167 .clip_maxy = INFINITY,
1168 };
1169
1170 /* Always scissor to the viewport by default. */
1171 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
1172 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
1173
1174 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
1175 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
1176
1177 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
1178 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
1179
1180 /* Apply the scissor test */
1181
1182 unsigned minx, miny, maxx, maxy;
1183
1184 if (ss && ctx->rasterizer && ctx->rasterizer->base.scissor) {
1185 minx = MAX2(ss->minx, vp_minx);
1186 miny = MAX2(ss->miny, vp_miny);
1187 maxx = MIN2(ss->maxx, vp_maxx);
1188 maxy = MIN2(ss->maxy, vp_maxy);
1189 } else {
1190 minx = vp_minx;
1191 miny = vp_miny;
1192 maxx = vp_maxx;
1193 maxy = vp_maxy;
1194 }
1195
1196 /* Hardware needs the min/max to be strictly ordered, so flip if we
1197 * need to. The viewport transformation in the vertex shader will
1198 * handle the negatives if we don't */
1199
1200 if (miny > maxy) {
1201 unsigned temp = miny;
1202 miny = maxy;
1203 maxy = temp;
1204 }
1205
1206 if (minx > maxx) {
1207 unsigned temp = minx;
1208 minx = maxx;
1209 maxx = temp;
1210 }
1211
1212 if (minz > maxz) {
1213 float temp = minz;
1214 minz = maxz;
1215 maxz = temp;
1216 }
1217
1218 /* Clamp to the framebuffer size as a last check */
1219
1220 minx = MIN2(ctx->pipe_framebuffer.width, minx);
1221 maxx = MIN2(ctx->pipe_framebuffer.width, maxx);
1222
1223 miny = MIN2(ctx->pipe_framebuffer.height, miny);
1224 maxy = MIN2(ctx->pipe_framebuffer.height, maxy);
1225
1226 /* Update the job, unless we're doing wallpapering (whose lack of
1227 * scissor we can ignore, since if we "miss" a tile of wallpaper, it'll
1228 * just... be faster :) */
1229
1230 if (!ctx->wallpaper_batch)
1231 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
1232
1233 /* Upload */
1234
1235 view.viewport0[0] = minx;
1236 view.viewport1[0] = MALI_POSITIVE(maxx);
1237
1238 view.viewport0[1] = miny;
1239 view.viewport1[1] = MALI_POSITIVE(maxy);
1240
1241 view.clip_minz = minz;
1242 view.clip_maxz = maxz;
1243
1244 ctx->payloads[PIPE_SHADER_FRAGMENT].postfix.viewport =
1245 panfrost_upload_transient(batch,
1246 &view,
1247 sizeof(struct mali_viewport));
1248
1249 ctx->dirty = 0;
1250 }
1251
1252 /* Corresponds to exactly one draw, but does not submit anything */
1253
1254 static void
1255 panfrost_queue_draw(struct panfrost_context *ctx)
1256 {
1257 /* Handle dirty flags now */
1258 panfrost_emit_for_draw(ctx, true);
1259
1260 /* If rasterizer discard is enable, only submit the vertex */
1261
1262 bool rasterizer_discard = ctx->rasterizer
1263 && ctx->rasterizer->base.rasterizer_discard;
1264
1265 struct panfrost_transfer vertex = panfrost_vertex_tiler_job(ctx, false);
1266 struct panfrost_transfer tiler;
1267
1268 if (!rasterizer_discard)
1269 tiler = panfrost_vertex_tiler_job(ctx, true);
1270
1271 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
1272
1273 if (rasterizer_discard)
1274 panfrost_scoreboard_queue_vertex_job(batch, vertex, FALSE);
1275 else if (ctx->wallpaper_batch && batch->first_tiler.gpu)
1276 panfrost_scoreboard_queue_fused_job_prepend(batch, vertex, tiler);
1277 else
1278 panfrost_scoreboard_queue_fused_job(batch, vertex, tiler);
1279
1280 for (unsigned i = 0; i < PIPE_SHADER_TYPES; ++i) {
1281 struct panfrost_shader_variants *all = ctx->shader[i];
1282
1283 if (!all)
1284 continue;
1285
1286 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1287 batch->stack_size = MAX2(batch->stack_size, ss->stack_size);
1288 }
1289 }
1290
1291 /* The entire frame is in memory -- send it off to the kernel! */
1292
1293 void
1294 panfrost_flush(
1295 struct pipe_context *pipe,
1296 struct pipe_fence_handle **fence,
1297 unsigned flags)
1298 {
1299 struct panfrost_context *ctx = pan_context(pipe);
1300 struct util_dynarray fences;
1301
1302 /* We must collect the fences before the flush is done, otherwise we'll
1303 * lose track of them.
1304 */
1305 if (fence) {
1306 util_dynarray_init(&fences, NULL);
1307 hash_table_foreach(ctx->batches, hentry) {
1308 struct panfrost_batch *batch = hentry->data;
1309
1310 panfrost_batch_fence_reference(batch->out_sync);
1311 util_dynarray_append(&fences,
1312 struct panfrost_batch_fence *,
1313 batch->out_sync);
1314 }
1315 }
1316
1317 /* Submit all pending jobs */
1318 panfrost_flush_all_batches(ctx, false);
1319
1320 if (fence) {
1321 struct panfrost_fence *f = panfrost_fence_create(ctx, &fences);
1322 pipe->screen->fence_reference(pipe->screen, fence, NULL);
1323 *fence = (struct pipe_fence_handle *)f;
1324
1325 util_dynarray_foreach(&fences, struct panfrost_batch_fence *, fence)
1326 panfrost_batch_fence_unreference(*fence);
1327
1328 util_dynarray_fini(&fences);
1329 }
1330 }
1331
1332 #define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_##c;
1333
1334 static int
1335 g2m_draw_mode(enum pipe_prim_type mode)
1336 {
1337 switch (mode) {
1338 DEFINE_CASE(POINTS);
1339 DEFINE_CASE(LINES);
1340 DEFINE_CASE(LINE_LOOP);
1341 DEFINE_CASE(LINE_STRIP);
1342 DEFINE_CASE(TRIANGLES);
1343 DEFINE_CASE(TRIANGLE_STRIP);
1344 DEFINE_CASE(TRIANGLE_FAN);
1345 DEFINE_CASE(QUADS);
1346 DEFINE_CASE(QUAD_STRIP);
1347 DEFINE_CASE(POLYGON);
1348
1349 default:
1350 unreachable("Invalid draw mode");
1351 }
1352 }
1353
1354 #undef DEFINE_CASE
1355
1356 static unsigned
1357 panfrost_translate_index_size(unsigned size)
1358 {
1359 switch (size) {
1360 case 1:
1361 return MALI_DRAW_INDEXED_UINT8;
1362
1363 case 2:
1364 return MALI_DRAW_INDEXED_UINT16;
1365
1366 case 4:
1367 return MALI_DRAW_INDEXED_UINT32;
1368
1369 default:
1370 unreachable("Invalid index size");
1371 }
1372 }
1373
1374 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
1375 * good for the duration of the draw (transient), could last longer */
1376
1377 static mali_ptr
1378 panfrost_get_index_buffer_mapped(struct panfrost_context *ctx, const struct pipe_draw_info *info)
1379 {
1380 struct panfrost_resource *rsrc = (struct panfrost_resource *) (info->index.resource);
1381
1382 off_t offset = info->start * info->index_size;
1383 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
1384
1385 if (!info->has_user_indices) {
1386 /* Only resources can be directly mapped */
1387 panfrost_batch_add_bo(batch, rsrc->bo,
1388 PAN_BO_ACCESS_SHARED |
1389 PAN_BO_ACCESS_READ |
1390 PAN_BO_ACCESS_VERTEX_TILER);
1391 return rsrc->bo->gpu + offset;
1392 } else {
1393 /* Otherwise, we need to upload to transient memory */
1394 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
1395 return panfrost_upload_transient(batch, ibuf8 + offset, info->count * info->index_size);
1396 }
1397 }
1398
1399 static bool
1400 panfrost_scissor_culls_everything(struct panfrost_context *ctx)
1401 {
1402 const struct pipe_scissor_state *ss = &ctx->scissor;
1403
1404 /* Check if we're scissoring at all */
1405
1406 if (!(ctx->rasterizer && ctx->rasterizer->base.scissor))
1407 return false;
1408
1409 return (ss->minx == ss->maxx) || (ss->miny == ss->maxy);
1410 }
1411
1412 /* Count generated primitives (when there is no geom/tess shaders) for
1413 * transform feedback */
1414
1415 static void
1416 panfrost_statistics_record(
1417 struct panfrost_context *ctx,
1418 const struct pipe_draw_info *info)
1419 {
1420 if (!ctx->active_queries)
1421 return;
1422
1423 uint32_t prims = u_prims_for_vertices(info->mode, info->count);
1424 ctx->prims_generated += prims;
1425
1426 if (!ctx->streamout.num_targets)
1427 return;
1428
1429 ctx->tf_prims_generated += prims;
1430 }
1431
1432 static void
1433 panfrost_draw_vbo(
1434 struct pipe_context *pipe,
1435 const struct pipe_draw_info *info)
1436 {
1437 struct panfrost_context *ctx = pan_context(pipe);
1438
1439 /* First of all, check the scissor to see if anything is drawn at all.
1440 * If it's not, we drop the draw (mostly a conformance issue;
1441 * well-behaved apps shouldn't hit this) */
1442
1443 if (panfrost_scissor_culls_everything(ctx))
1444 return;
1445
1446 int mode = info->mode;
1447
1448 /* Fallback unsupported restart index */
1449 unsigned primitive_index = (1 << (info->index_size * 8)) - 1;
1450
1451 if (info->primitive_restart && info->index_size
1452 && info->restart_index != primitive_index) {
1453 util_draw_vbo_without_prim_restart(pipe, info);
1454 return;
1455 }
1456
1457 /* Fallback for unsupported modes */
1458
1459 assert(ctx->rasterizer != NULL);
1460
1461 if (!(ctx->draw_modes & (1 << mode))) {
1462 if (mode == PIPE_PRIM_QUADS && info->count == 4 && !ctx->rasterizer->base.flatshade) {
1463 mode = PIPE_PRIM_TRIANGLE_FAN;
1464 } else {
1465 if (info->count < 4) {
1466 /* Degenerate case? */
1467 return;
1468 }
1469
1470 util_primconvert_save_rasterizer_state(ctx->primconvert, &ctx->rasterizer->base);
1471 util_primconvert_draw_vbo(ctx->primconvert, info);
1472 return;
1473 }
1474 }
1475
1476 ctx->payloads[PIPE_SHADER_VERTEX].offset_start = info->start;
1477 ctx->payloads[PIPE_SHADER_FRAGMENT].offset_start = info->start;
1478
1479 /* Now that we have a guaranteed terminating path, find the job.
1480 * Assignment commented out to prevent unused warning */
1481
1482 /* struct panfrost_batch *batch = */ panfrost_get_batch_for_fbo(ctx);
1483
1484 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.draw_mode = g2m_draw_mode(mode);
1485
1486 /* Take into account a negative bias */
1487 ctx->vertex_count = info->count + abs(info->index_bias);
1488 ctx->instance_count = info->instance_count;
1489 ctx->active_prim = info->mode;
1490
1491 /* For non-indexed draws, they're the same */
1492 unsigned vertex_count = ctx->vertex_count;
1493
1494 unsigned draw_flags = 0;
1495
1496 /* The draw flags interpret how primitive size is interpreted */
1497
1498 if (panfrost_writes_point_size(ctx))
1499 draw_flags |= MALI_DRAW_VARYING_SIZE;
1500
1501 if (info->primitive_restart)
1502 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
1503
1504 /* These doesn't make much sense */
1505
1506 draw_flags |= 0x3000;
1507
1508 if (ctx->rasterizer && ctx->rasterizer->base.flatshade_first)
1509 draw_flags |= MALI_DRAW_FLATSHADE_FIRST;
1510
1511 panfrost_statistics_record(ctx, info);
1512
1513 if (info->index_size) {
1514 /* Calculate the min/max index used so we can figure out how
1515 * many times to invoke the vertex shader */
1516
1517 /* Fetch / calculate index bounds */
1518 unsigned min_index = 0, max_index = 0;
1519
1520 if (info->max_index == ~0u) {
1521 u_vbuf_get_minmax_index(pipe, info, &min_index, &max_index);
1522 } else {
1523 min_index = info->min_index;
1524 max_index = info->max_index;
1525 }
1526
1527 /* Use the corresponding values */
1528 vertex_count = max_index - min_index + 1;
1529 ctx->payloads[PIPE_SHADER_VERTEX].offset_start = min_index + info->index_bias;
1530 ctx->payloads[PIPE_SHADER_FRAGMENT].offset_start = min_index + info->index_bias;
1531
1532 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.offset_bias_correction = -min_index;
1533 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.index_count = MALI_POSITIVE(info->count);
1534
1535 //assert(!info->restart_index); /* TODO: Research */
1536
1537 draw_flags |= panfrost_translate_index_size(info->index_size);
1538 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.indices = panfrost_get_index_buffer_mapped(ctx, info);
1539 } else {
1540 /* Index count == vertex count, if no indexing is applied, as
1541 * if it is internally indexed in the expected order */
1542
1543 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.offset_bias_correction = 0;
1544 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.index_count = MALI_POSITIVE(ctx->vertex_count);
1545
1546 /* Reverse index state */
1547 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.indices = (u64) NULL;
1548 }
1549
1550 /* Dispatch "compute jobs" for the vertex/tiler pair as (1,
1551 * vertex_count, 1) */
1552
1553 panfrost_pack_work_groups_fused(
1554 &ctx->payloads[PIPE_SHADER_VERTEX].prefix,
1555 &ctx->payloads[PIPE_SHADER_FRAGMENT].prefix,
1556 1, vertex_count, info->instance_count,
1557 1, 1, 1);
1558
1559 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.unknown_draw = draw_flags;
1560
1561 /* Encode the padded vertex count */
1562
1563 if (info->instance_count > 1) {
1564 ctx->padded_count = panfrost_padded_vertex_count(vertex_count);
1565
1566 unsigned shift = __builtin_ctz(ctx->padded_count);
1567 unsigned k = ctx->padded_count >> (shift + 1);
1568
1569 ctx->payloads[PIPE_SHADER_VERTEX].instance_shift = shift;
1570 ctx->payloads[PIPE_SHADER_FRAGMENT].instance_shift = shift;
1571
1572 ctx->payloads[PIPE_SHADER_VERTEX].instance_odd = k;
1573 ctx->payloads[PIPE_SHADER_FRAGMENT].instance_odd = k;
1574 } else {
1575 ctx->padded_count = vertex_count;
1576
1577 /* Reset instancing state */
1578 ctx->payloads[PIPE_SHADER_VERTEX].instance_shift = 0;
1579 ctx->payloads[PIPE_SHADER_VERTEX].instance_odd = 0;
1580 ctx->payloads[PIPE_SHADER_FRAGMENT].instance_shift = 0;
1581 ctx->payloads[PIPE_SHADER_FRAGMENT].instance_odd = 0;
1582 }
1583
1584 /* Fire off the draw itself */
1585 panfrost_queue_draw(ctx);
1586
1587 /* Increment transform feedback offsets */
1588
1589 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1590 unsigned output_count = u_stream_outputs_for_vertices(
1591 ctx->active_prim, ctx->vertex_count);
1592
1593 ctx->streamout.offsets[i] += output_count;
1594 }
1595 }
1596
1597 /* CSO state */
1598
1599 static void
1600 panfrost_generic_cso_delete(struct pipe_context *pctx, void *hwcso)
1601 {
1602 free(hwcso);
1603 }
1604
1605 static void *
1606 panfrost_create_rasterizer_state(
1607 struct pipe_context *pctx,
1608 const struct pipe_rasterizer_state *cso)
1609 {
1610 struct panfrost_rasterizer *so = CALLOC_STRUCT(panfrost_rasterizer);
1611
1612 so->base = *cso;
1613
1614 /* Bitmask, unknown meaning of the start value. 0x105 on 32-bit T6XX */
1615 so->tiler_gl_enables = 0x7;
1616
1617 if (cso->front_ccw)
1618 so->tiler_gl_enables |= MALI_FRONT_CCW_TOP;
1619
1620 if (cso->cull_face & PIPE_FACE_FRONT)
1621 so->tiler_gl_enables |= MALI_CULL_FACE_FRONT;
1622
1623 if (cso->cull_face & PIPE_FACE_BACK)
1624 so->tiler_gl_enables |= MALI_CULL_FACE_BACK;
1625
1626 return so;
1627 }
1628
1629 static void
1630 panfrost_bind_rasterizer_state(
1631 struct pipe_context *pctx,
1632 void *hwcso)
1633 {
1634 struct panfrost_context *ctx = pan_context(pctx);
1635
1636 /* TODO: Why can't rasterizer be NULL ever? Other drivers are fine.. */
1637 if (!hwcso)
1638 return;
1639
1640 ctx->rasterizer = hwcso;
1641 ctx->dirty |= PAN_DIRTY_RASTERIZER;
1642
1643 ctx->fragment_shader_core.depth_units = ctx->rasterizer->base.offset_units * 2.0f;
1644 ctx->fragment_shader_core.depth_factor = ctx->rasterizer->base.offset_scale;
1645
1646 /* Gauranteed with the core GL call, so don't expose ARB_polygon_offset */
1647 assert(ctx->rasterizer->base.offset_clamp == 0.0);
1648
1649 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
1650
1651 SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_DEPTH_RANGE_A, ctx->rasterizer->base.offset_tri);
1652 SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_DEPTH_RANGE_B, ctx->rasterizer->base.offset_tri);
1653
1654 /* Point sprites are emulated */
1655
1656 struct panfrost_shader_state *variant =
1657 ctx->shader[PIPE_SHADER_FRAGMENT] ? &ctx->shader[PIPE_SHADER_FRAGMENT]->variants[ctx->shader[PIPE_SHADER_FRAGMENT]->active_variant] : NULL;
1658
1659 if (ctx->rasterizer->base.sprite_coord_enable || (variant && variant->point_sprite_mask))
1660 ctx->base.bind_fs_state(&ctx->base, ctx->shader[PIPE_SHADER_FRAGMENT]);
1661 }
1662
1663 static void *
1664 panfrost_create_vertex_elements_state(
1665 struct pipe_context *pctx,
1666 unsigned num_elements,
1667 const struct pipe_vertex_element *elements)
1668 {
1669 struct panfrost_vertex_state *so = CALLOC_STRUCT(panfrost_vertex_state);
1670
1671 so->num_elements = num_elements;
1672 memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
1673
1674 for (int i = 0; i < num_elements; ++i) {
1675 so->hw[i].index = i;
1676
1677 enum pipe_format fmt = elements[i].src_format;
1678 const struct util_format_description *desc = util_format_description(fmt);
1679 so->hw[i].unknown1 = 0x2;
1680 so->hw[i].swizzle = panfrost_get_default_swizzle(desc->nr_channels);
1681
1682 so->hw[i].format = panfrost_find_format(desc);
1683
1684 /* The field itself should probably be shifted over */
1685 so->hw[i].src_offset = elements[i].src_offset;
1686 }
1687
1688 return so;
1689 }
1690
1691 static void
1692 panfrost_bind_vertex_elements_state(
1693 struct pipe_context *pctx,
1694 void *hwcso)
1695 {
1696 struct panfrost_context *ctx = pan_context(pctx);
1697
1698 ctx->vertex = hwcso;
1699 ctx->dirty |= PAN_DIRTY_VERTEX;
1700 }
1701
1702 static void *
1703 panfrost_create_shader_state(
1704 struct pipe_context *pctx,
1705 const struct pipe_shader_state *cso,
1706 enum pipe_shader_type stage)
1707 {
1708 struct panfrost_shader_variants *so = CALLOC_STRUCT(panfrost_shader_variants);
1709 so->base = *cso;
1710
1711 /* Token deep copy to prevent memory corruption */
1712
1713 if (cso->type == PIPE_SHADER_IR_TGSI)
1714 so->base.tokens = tgsi_dup_tokens(so->base.tokens);
1715
1716 /* Precompile for shader-db if we need to */
1717 if (unlikely((pan_debug & PAN_DBG_PRECOMPILE) && cso->type == PIPE_SHADER_IR_NIR)) {
1718 struct panfrost_context *ctx = pan_context(pctx);
1719
1720 struct mali_shader_meta meta;
1721 struct panfrost_shader_state state;
1722 uint64_t outputs_written;
1723
1724 panfrost_shader_compile(ctx, &meta,
1725 PIPE_SHADER_IR_NIR,
1726 so->base.ir.nir,
1727 tgsi_processor_to_shader_stage(stage), &state,
1728 &outputs_written);
1729 }
1730
1731 return so;
1732 }
1733
1734 static void
1735 panfrost_delete_shader_state(
1736 struct pipe_context *pctx,
1737 void *so)
1738 {
1739 struct panfrost_shader_variants *cso = (struct panfrost_shader_variants *) so;
1740
1741 if (cso->base.type == PIPE_SHADER_IR_TGSI) {
1742 DBG("Deleting TGSI shader leaks duplicated tokens\n");
1743 }
1744
1745 for (unsigned i = 0; i < cso->variant_count; ++i) {
1746 struct panfrost_shader_state *shader_state = &cso->variants[i];
1747 panfrost_bo_unreference(shader_state->bo);
1748 shader_state->bo = NULL;
1749 }
1750
1751 free(so);
1752 }
1753
1754 static void *
1755 panfrost_create_sampler_state(
1756 struct pipe_context *pctx,
1757 const struct pipe_sampler_state *cso)
1758 {
1759 struct panfrost_sampler_state *so = CALLOC_STRUCT(panfrost_sampler_state);
1760 so->base = *cso;
1761
1762 /* sampler_state corresponds to mali_sampler_descriptor, which we can generate entirely here */
1763
1764 bool min_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
1765 bool mag_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
1766 bool mip_linear = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR;
1767
1768 unsigned min_filter = min_nearest ? MALI_SAMP_MIN_NEAREST : 0;
1769 unsigned mag_filter = mag_nearest ? MALI_SAMP_MAG_NEAREST : 0;
1770 unsigned mip_filter = mip_linear ?
1771 (MALI_SAMP_MIP_LINEAR_1 | MALI_SAMP_MIP_LINEAR_2) : 0;
1772 unsigned normalized = cso->normalized_coords ? MALI_SAMP_NORM_COORDS : 0;
1773
1774 struct mali_sampler_descriptor sampler_descriptor = {
1775 .filter_mode = min_filter | mag_filter | mip_filter | normalized,
1776 .wrap_s = translate_tex_wrap(cso->wrap_s),
1777 .wrap_t = translate_tex_wrap(cso->wrap_t),
1778 .wrap_r = translate_tex_wrap(cso->wrap_r),
1779 .compare_func = panfrost_flip_compare_func(
1780 panfrost_translate_compare_func(
1781 cso->compare_func)),
1782 .border_color = {
1783 cso->border_color.f[0],
1784 cso->border_color.f[1],
1785 cso->border_color.f[2],
1786 cso->border_color.f[3]
1787 },
1788 .min_lod = FIXED_16(cso->min_lod),
1789 .max_lod = FIXED_16(cso->max_lod),
1790 .lod_bias = FIXED_16(cso->lod_bias),
1791 .seamless_cube_map = cso->seamless_cube_map,
1792 };
1793
1794 /* If necessary, we disable mipmapping in the sampler descriptor by
1795 * clamping the LOD as tight as possible (from 0 to epsilon,
1796 * essentially -- remember these are fixed point numbers, so
1797 * epsilon=1/256) */
1798
1799 if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1800 sampler_descriptor.max_lod = sampler_descriptor.min_lod;
1801
1802 /* Enforce that there is something in the middle by adding epsilon*/
1803
1804 if (sampler_descriptor.min_lod == sampler_descriptor.max_lod)
1805 sampler_descriptor.max_lod++;
1806
1807 /* Sanity check */
1808 assert(sampler_descriptor.max_lod > sampler_descriptor.min_lod);
1809
1810 so->hw = sampler_descriptor;
1811
1812 return so;
1813 }
1814
1815 static void
1816 panfrost_bind_sampler_states(
1817 struct pipe_context *pctx,
1818 enum pipe_shader_type shader,
1819 unsigned start_slot, unsigned num_sampler,
1820 void **sampler)
1821 {
1822 assert(start_slot == 0);
1823
1824 struct panfrost_context *ctx = pan_context(pctx);
1825
1826 /* XXX: Should upload, not just copy? */
1827 ctx->sampler_count[shader] = num_sampler;
1828 memcpy(ctx->samplers[shader], sampler, num_sampler * sizeof (void *));
1829
1830 ctx->dirty |= PAN_DIRTY_SAMPLERS;
1831 }
1832
1833 static bool
1834 panfrost_variant_matches(
1835 struct panfrost_context *ctx,
1836 struct panfrost_shader_state *variant,
1837 enum pipe_shader_type type)
1838 {
1839 struct pipe_rasterizer_state *rasterizer = &ctx->rasterizer->base;
1840 struct pipe_alpha_state *alpha = &ctx->depth_stencil->alpha;
1841
1842 bool is_fragment = (type == PIPE_SHADER_FRAGMENT);
1843
1844 if (is_fragment && (alpha->enabled || variant->alpha_state.enabled)) {
1845 /* Make sure enable state is at least the same */
1846 if (alpha->enabled != variant->alpha_state.enabled) {
1847 return false;
1848 }
1849
1850 /* Check that the contents of the test are the same */
1851 bool same_func = alpha->func == variant->alpha_state.func;
1852 bool same_ref = alpha->ref_value == variant->alpha_state.ref_value;
1853
1854 if (!(same_func && same_ref)) {
1855 return false;
1856 }
1857 }
1858
1859 if (is_fragment && rasterizer && (rasterizer->sprite_coord_enable |
1860 variant->point_sprite_mask)) {
1861 /* Ensure the same varyings are turned to point sprites */
1862 if (rasterizer->sprite_coord_enable != variant->point_sprite_mask)
1863 return false;
1864
1865 /* Ensure the orientation is correct */
1866 bool upper_left =
1867 rasterizer->sprite_coord_mode ==
1868 PIPE_SPRITE_COORD_UPPER_LEFT;
1869
1870 if (variant->point_sprite_upper_left != upper_left)
1871 return false;
1872 }
1873
1874 /* Otherwise, we're good to go */
1875 return true;
1876 }
1877
1878 /**
1879 * Fix an uncompiled shader's stream output info, and produce a bitmask
1880 * of which VARYING_SLOT_* are captured for stream output.
1881 *
1882 * Core Gallium stores output->register_index as a "slot" number, where
1883 * slots are assigned consecutively to all outputs in info->outputs_written.
1884 * This naive packing of outputs doesn't work for us - we too have slots,
1885 * but the layout is defined by the VUE map, which we won't have until we
1886 * compile a specific shader variant. So, we remap these and simply store
1887 * VARYING_SLOT_* in our copy's output->register_index fields.
1888 *
1889 * We then produce a bitmask of outputs which are used for SO.
1890 *
1891 * Implementation from iris.
1892 */
1893
1894 static uint64_t
1895 update_so_info(struct pipe_stream_output_info *so_info,
1896 uint64_t outputs_written)
1897 {
1898 uint64_t so_outputs = 0;
1899 uint8_t reverse_map[64] = {0};
1900 unsigned slot = 0;
1901
1902 while (outputs_written)
1903 reverse_map[slot++] = u_bit_scan64(&outputs_written);
1904
1905 for (unsigned i = 0; i < so_info->num_outputs; i++) {
1906 struct pipe_stream_output *output = &so_info->output[i];
1907
1908 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
1909 output->register_index = reverse_map[output->register_index];
1910
1911 so_outputs |= 1ull << output->register_index;
1912 }
1913
1914 return so_outputs;
1915 }
1916
1917 static void
1918 panfrost_bind_shader_state(
1919 struct pipe_context *pctx,
1920 void *hwcso,
1921 enum pipe_shader_type type)
1922 {
1923 struct panfrost_context *ctx = pan_context(pctx);
1924
1925 ctx->shader[type] = hwcso;
1926
1927 if (type == PIPE_SHADER_FRAGMENT)
1928 ctx->dirty |= PAN_DIRTY_FS;
1929 else
1930 ctx->dirty |= PAN_DIRTY_VS;
1931
1932 if (!hwcso) return;
1933
1934 /* Match the appropriate variant */
1935
1936 signed variant = -1;
1937 struct panfrost_shader_variants *variants = (struct panfrost_shader_variants *) hwcso;
1938
1939 for (unsigned i = 0; i < variants->variant_count; ++i) {
1940 if (panfrost_variant_matches(ctx, &variants->variants[i], type)) {
1941 variant = i;
1942 break;
1943 }
1944 }
1945
1946 if (variant == -1) {
1947 /* No variant matched, so create a new one */
1948 variant = variants->variant_count++;
1949 assert(variants->variant_count < MAX_SHADER_VARIANTS);
1950
1951 struct panfrost_shader_state *v =
1952 &variants->variants[variant];
1953
1954 if (type == PIPE_SHADER_FRAGMENT) {
1955 v->alpha_state = ctx->depth_stencil->alpha;
1956
1957 if (ctx->rasterizer) {
1958 v->point_sprite_mask = ctx->rasterizer->base.sprite_coord_enable;
1959 v->point_sprite_upper_left =
1960 ctx->rasterizer->base.sprite_coord_mode ==
1961 PIPE_SPRITE_COORD_UPPER_LEFT;
1962 }
1963 }
1964
1965 variants->variants[variant].tripipe = calloc(1, sizeof(struct mali_shader_meta));
1966
1967 }
1968
1969 /* Select this variant */
1970 variants->active_variant = variant;
1971
1972 struct panfrost_shader_state *shader_state = &variants->variants[variant];
1973 assert(panfrost_variant_matches(ctx, shader_state, type));
1974
1975 /* We finally have a variant, so compile it */
1976
1977 if (!shader_state->compiled) {
1978 uint64_t outputs_written = 0;
1979
1980 panfrost_shader_compile(ctx, shader_state->tripipe,
1981 variants->base.type,
1982 variants->base.type == PIPE_SHADER_IR_NIR ?
1983 variants->base.ir.nir :
1984 variants->base.tokens,
1985 tgsi_processor_to_shader_stage(type), shader_state,
1986 &outputs_written);
1987
1988 shader_state->compiled = true;
1989
1990 /* Fixup the stream out information, since what Gallium returns
1991 * normally is mildly insane */
1992
1993 shader_state->stream_output = variants->base.stream_output;
1994 shader_state->so_mask =
1995 update_so_info(&shader_state->stream_output, outputs_written);
1996 }
1997 }
1998
1999 static void *
2000 panfrost_create_vs_state(struct pipe_context *pctx, const struct pipe_shader_state *hwcso)
2001 {
2002 return panfrost_create_shader_state(pctx, hwcso, PIPE_SHADER_VERTEX);
2003 }
2004
2005 static void *
2006 panfrost_create_fs_state(struct pipe_context *pctx, const struct pipe_shader_state *hwcso)
2007 {
2008 return panfrost_create_shader_state(pctx, hwcso, PIPE_SHADER_FRAGMENT);
2009 }
2010
2011 static void
2012 panfrost_bind_vs_state(struct pipe_context *pctx, void *hwcso)
2013 {
2014 panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_VERTEX);
2015 }
2016
2017 static void
2018 panfrost_bind_fs_state(struct pipe_context *pctx, void *hwcso)
2019 {
2020 panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_FRAGMENT);
2021 }
2022
2023 static void
2024 panfrost_set_vertex_buffers(
2025 struct pipe_context *pctx,
2026 unsigned start_slot,
2027 unsigned num_buffers,
2028 const struct pipe_vertex_buffer *buffers)
2029 {
2030 struct panfrost_context *ctx = pan_context(pctx);
2031
2032 util_set_vertex_buffers_mask(ctx->vertex_buffers, &ctx->vb_mask, buffers, start_slot, num_buffers);
2033 }
2034
2035 static void
2036 panfrost_set_constant_buffer(
2037 struct pipe_context *pctx,
2038 enum pipe_shader_type shader, uint index,
2039 const struct pipe_constant_buffer *buf)
2040 {
2041 struct panfrost_context *ctx = pan_context(pctx);
2042 struct panfrost_constant_buffer *pbuf = &ctx->constant_buffer[shader];
2043
2044 util_copy_constant_buffer(&pbuf->cb[index], buf);
2045
2046 unsigned mask = (1 << index);
2047
2048 if (unlikely(!buf)) {
2049 pbuf->enabled_mask &= ~mask;
2050 pbuf->dirty_mask &= ~mask;
2051 return;
2052 }
2053
2054 pbuf->enabled_mask |= mask;
2055 pbuf->dirty_mask |= mask;
2056 }
2057
2058 static void
2059 panfrost_set_stencil_ref(
2060 struct pipe_context *pctx,
2061 const struct pipe_stencil_ref *ref)
2062 {
2063 struct panfrost_context *ctx = pan_context(pctx);
2064 ctx->stencil_ref = *ref;
2065
2066 /* Shader core dirty */
2067 ctx->dirty |= PAN_DIRTY_FS;
2068 }
2069
2070 static enum mali_texture_type
2071 panfrost_translate_texture_type(enum pipe_texture_target t) {
2072 switch (t)
2073 {
2074 case PIPE_BUFFER:
2075 case PIPE_TEXTURE_1D:
2076 case PIPE_TEXTURE_1D_ARRAY:
2077 return MALI_TEX_1D;
2078
2079 case PIPE_TEXTURE_2D:
2080 case PIPE_TEXTURE_2D_ARRAY:
2081 case PIPE_TEXTURE_RECT:
2082 return MALI_TEX_2D;
2083
2084 case PIPE_TEXTURE_3D:
2085 return MALI_TEX_3D;
2086
2087 case PIPE_TEXTURE_CUBE:
2088 case PIPE_TEXTURE_CUBE_ARRAY:
2089 return MALI_TEX_CUBE;
2090
2091 default:
2092 unreachable("Unknown target");
2093 }
2094 }
2095
2096 static struct pipe_sampler_view *
2097 panfrost_create_sampler_view(
2098 struct pipe_context *pctx,
2099 struct pipe_resource *texture,
2100 const struct pipe_sampler_view *template)
2101 {
2102 struct panfrost_sampler_view *so = rzalloc(pctx, struct panfrost_sampler_view);
2103 int bytes_per_pixel = util_format_get_blocksize(texture->format);
2104
2105 pipe_reference(NULL, &texture->reference);
2106
2107 struct panfrost_resource *prsrc = (struct panfrost_resource *) texture;
2108 assert(prsrc->bo);
2109
2110 so->base = *template;
2111 so->base.texture = texture;
2112 so->base.reference.count = 1;
2113 so->base.context = pctx;
2114
2115 /* sampler_views correspond to texture descriptors, minus the texture
2116 * (data) itself. So, we serialise the descriptor here and cache it for
2117 * later. */
2118
2119 const struct util_format_description *desc = util_format_description(prsrc->base.format);
2120
2121 unsigned char user_swizzle[4] = {
2122 template->swizzle_r,
2123 template->swizzle_g,
2124 template->swizzle_b,
2125 template->swizzle_a
2126 };
2127
2128 enum mali_format format = panfrost_find_format(desc);
2129
2130 /* Check if we need to set a custom stride by computing the "expected"
2131 * stride and comparing it to what the BO actually wants. Only applies
2132 * to linear textures, since tiled/compressed textures have strict
2133 * alignment requirements for their strides as it is */
2134
2135 unsigned first_level = template->u.tex.first_level;
2136 unsigned last_level = template->u.tex.last_level;
2137
2138 if (prsrc->layout == PAN_LINEAR) {
2139 for (unsigned l = first_level; l <= last_level; ++l) {
2140 unsigned actual_stride = prsrc->slices[l].stride;
2141 unsigned width = u_minify(texture->width0, l);
2142 unsigned comp_stride = width * bytes_per_pixel;
2143
2144 if (comp_stride != actual_stride) {
2145 so->manual_stride = true;
2146 break;
2147 }
2148 }
2149 }
2150
2151 /* In the hardware, array_size refers specifically to array textures,
2152 * whereas in Gallium, it also covers cubemaps */
2153
2154 unsigned array_size = texture->array_size;
2155
2156 if (template->target == PIPE_TEXTURE_CUBE) {
2157 /* TODO: Cubemap arrays */
2158 assert(array_size == 6);
2159 array_size /= 6;
2160 }
2161
2162 struct mali_texture_descriptor texture_descriptor = {
2163 .width = MALI_POSITIVE(u_minify(texture->width0, first_level)),
2164 .height = MALI_POSITIVE(u_minify(texture->height0, first_level)),
2165 .depth = MALI_POSITIVE(u_minify(texture->depth0, first_level)),
2166 .array_size = MALI_POSITIVE(array_size),
2167
2168 .format = {
2169 .swizzle = panfrost_translate_swizzle_4(desc->swizzle),
2170 .format = format,
2171 .srgb = desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB,
2172 .type = panfrost_translate_texture_type(template->target),
2173 .unknown2 = 0x1,
2174 },
2175
2176 .swizzle = panfrost_translate_swizzle_4(user_swizzle)
2177 };
2178
2179 texture_descriptor.levels = last_level - first_level;
2180
2181 so->hw = texture_descriptor;
2182
2183 return (struct pipe_sampler_view *) so;
2184 }
2185
2186 static void
2187 panfrost_set_sampler_views(
2188 struct pipe_context *pctx,
2189 enum pipe_shader_type shader,
2190 unsigned start_slot, unsigned num_views,
2191 struct pipe_sampler_view **views)
2192 {
2193 struct panfrost_context *ctx = pan_context(pctx);
2194 unsigned new_nr = 0;
2195 unsigned i;
2196
2197 assert(start_slot == 0);
2198
2199 for (i = 0; i < num_views; ++i) {
2200 if (views[i])
2201 new_nr = i + 1;
2202 pipe_sampler_view_reference((struct pipe_sampler_view **)&ctx->sampler_views[shader][i],
2203 views[i]);
2204 }
2205
2206 for (; i < ctx->sampler_view_count[shader]; i++) {
2207 pipe_sampler_view_reference((struct pipe_sampler_view **)&ctx->sampler_views[shader][i],
2208 NULL);
2209 }
2210 ctx->sampler_view_count[shader] = new_nr;
2211
2212 ctx->dirty |= PAN_DIRTY_TEXTURES;
2213 }
2214
2215 static void
2216 panfrost_sampler_view_destroy(
2217 struct pipe_context *pctx,
2218 struct pipe_sampler_view *view)
2219 {
2220 pipe_resource_reference(&view->texture, NULL);
2221 ralloc_free(view);
2222 }
2223
2224 static void
2225 panfrost_set_shader_buffers(
2226 struct pipe_context *pctx,
2227 enum pipe_shader_type shader,
2228 unsigned start, unsigned count,
2229 const struct pipe_shader_buffer *buffers,
2230 unsigned writable_bitmask)
2231 {
2232 struct panfrost_context *ctx = pan_context(pctx);
2233
2234 util_set_shader_buffers_mask(ctx->ssbo[shader], &ctx->ssbo_mask[shader],
2235 buffers, start, count);
2236 }
2237
2238 /* Hints that a framebuffer should use AFBC where possible */
2239
2240 static void
2241 panfrost_hint_afbc(
2242 struct panfrost_screen *screen,
2243 const struct pipe_framebuffer_state *fb)
2244 {
2245 /* AFBC implemenation incomplete; hide it */
2246 if (!(pan_debug & PAN_DBG_AFBC)) return;
2247
2248 /* Hint AFBC to the resources bound to each color buffer */
2249
2250 for (unsigned i = 0; i < fb->nr_cbufs; ++i) {
2251 struct pipe_surface *surf = fb->cbufs[i];
2252 struct panfrost_resource *rsrc = pan_resource(surf->texture);
2253 panfrost_resource_hint_layout(screen, rsrc, PAN_AFBC, 1);
2254 }
2255
2256 /* Also hint it to the depth buffer */
2257
2258 if (fb->zsbuf) {
2259 struct panfrost_resource *rsrc = pan_resource(fb->zsbuf->texture);
2260 panfrost_resource_hint_layout(screen, rsrc, PAN_AFBC, 1);
2261 }
2262 }
2263
2264 static void
2265 panfrost_set_framebuffer_state(struct pipe_context *pctx,
2266 const struct pipe_framebuffer_state *fb)
2267 {
2268 struct panfrost_context *ctx = pan_context(pctx);
2269
2270 panfrost_hint_afbc(pan_screen(pctx->screen), fb);
2271 util_copy_framebuffer_state(&ctx->pipe_framebuffer, fb);
2272 ctx->batch = NULL;
2273 panfrost_invalidate_frame(ctx);
2274 }
2275
2276 static void *
2277 panfrost_create_depth_stencil_state(struct pipe_context *pipe,
2278 const struct pipe_depth_stencil_alpha_state *depth_stencil)
2279 {
2280 return mem_dup(depth_stencil, sizeof(*depth_stencil));
2281 }
2282
2283 static void
2284 panfrost_bind_depth_stencil_state(struct pipe_context *pipe,
2285 void *cso)
2286 {
2287 struct panfrost_context *ctx = pan_context(pipe);
2288 struct pipe_depth_stencil_alpha_state *depth_stencil = cso;
2289 ctx->depth_stencil = depth_stencil;
2290
2291 if (!depth_stencil)
2292 return;
2293
2294 /* Alpha does not exist in the hardware (it's not in ES3), so it's
2295 * emulated in the fragment shader */
2296
2297 if (depth_stencil->alpha.enabled) {
2298 /* We need to trigger a new shader (maybe) */
2299 ctx->base.bind_fs_state(&ctx->base, ctx->shader[PIPE_SHADER_FRAGMENT]);
2300 }
2301
2302 /* Stencil state */
2303 SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_STENCIL_TEST, depth_stencil->stencil[0].enabled);
2304
2305 panfrost_make_stencil_state(&depth_stencil->stencil[0], &ctx->fragment_shader_core.stencil_front);
2306 ctx->fragment_shader_core.stencil_mask_front = depth_stencil->stencil[0].writemask;
2307
2308 /* If back-stencil is not enabled, use the front values */
2309 bool back_enab = ctx->depth_stencil->stencil[1].enabled;
2310 unsigned back_index = back_enab ? 1 : 0;
2311
2312 panfrost_make_stencil_state(&depth_stencil->stencil[back_index], &ctx->fragment_shader_core.stencil_back);
2313 ctx->fragment_shader_core.stencil_mask_back = depth_stencil->stencil[back_index].writemask;
2314
2315 /* Depth state (TODO: Refactor) */
2316 SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_DEPTH_WRITEMASK,
2317 depth_stencil->depth.writemask);
2318
2319 int func = depth_stencil->depth.enabled ? depth_stencil->depth.func : PIPE_FUNC_ALWAYS;
2320
2321 ctx->fragment_shader_core.unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
2322 ctx->fragment_shader_core.unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(func));
2323
2324 /* Bounds test not implemented */
2325 assert(!depth_stencil->depth.bounds_test);
2326
2327 ctx->dirty |= PAN_DIRTY_FS;
2328 }
2329
2330 static void
2331 panfrost_delete_depth_stencil_state(struct pipe_context *pipe, void *depth)
2332 {
2333 free( depth );
2334 }
2335
2336 static void
2337 panfrost_set_sample_mask(struct pipe_context *pipe,
2338 unsigned sample_mask)
2339 {
2340 }
2341
2342 static void
2343 panfrost_set_clip_state(struct pipe_context *pipe,
2344 const struct pipe_clip_state *clip)
2345 {
2346 //struct panfrost_context *panfrost = pan_context(pipe);
2347 }
2348
2349 static void
2350 panfrost_set_viewport_states(struct pipe_context *pipe,
2351 unsigned start_slot,
2352 unsigned num_viewports,
2353 const struct pipe_viewport_state *viewports)
2354 {
2355 struct panfrost_context *ctx = pan_context(pipe);
2356
2357 assert(start_slot == 0);
2358 assert(num_viewports == 1);
2359
2360 ctx->pipe_viewport = *viewports;
2361 }
2362
2363 static void
2364 panfrost_set_scissor_states(struct pipe_context *pipe,
2365 unsigned start_slot,
2366 unsigned num_scissors,
2367 const struct pipe_scissor_state *scissors)
2368 {
2369 struct panfrost_context *ctx = pan_context(pipe);
2370
2371 assert(start_slot == 0);
2372 assert(num_scissors == 1);
2373
2374 ctx->scissor = *scissors;
2375 }
2376
2377 static void
2378 panfrost_set_polygon_stipple(struct pipe_context *pipe,
2379 const struct pipe_poly_stipple *stipple)
2380 {
2381 //struct panfrost_context *panfrost = pan_context(pipe);
2382 }
2383
2384 static void
2385 panfrost_set_active_query_state(struct pipe_context *pipe,
2386 bool enable)
2387 {
2388 struct panfrost_context *ctx = pan_context(pipe);
2389 ctx->active_queries = enable;
2390 }
2391
2392 static void
2393 panfrost_destroy(struct pipe_context *pipe)
2394 {
2395 struct panfrost_context *panfrost = pan_context(pipe);
2396
2397 if (panfrost->blitter)
2398 util_blitter_destroy(panfrost->blitter);
2399
2400 if (panfrost->blitter_wallpaper)
2401 util_blitter_destroy(panfrost->blitter_wallpaper);
2402
2403 util_unreference_framebuffer_state(&panfrost->pipe_framebuffer);
2404 u_upload_destroy(pipe->stream_uploader);
2405
2406 ralloc_free(pipe);
2407 }
2408
2409 static struct pipe_query *
2410 panfrost_create_query(struct pipe_context *pipe,
2411 unsigned type,
2412 unsigned index)
2413 {
2414 struct panfrost_query *q = rzalloc(pipe, struct panfrost_query);
2415
2416 q->type = type;
2417 q->index = index;
2418
2419 return (struct pipe_query *) q;
2420 }
2421
2422 static void
2423 panfrost_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
2424 {
2425 struct panfrost_query *query = (struct panfrost_query *) q;
2426
2427 if (query->bo) {
2428 panfrost_bo_unreference(query->bo);
2429 query->bo = NULL;
2430 }
2431
2432 ralloc_free(q);
2433 }
2434
2435 static bool
2436 panfrost_begin_query(struct pipe_context *pipe, struct pipe_query *q)
2437 {
2438 struct panfrost_context *ctx = pan_context(pipe);
2439 struct panfrost_query *query = (struct panfrost_query *) q;
2440
2441 switch (query->type) {
2442 case PIPE_QUERY_OCCLUSION_COUNTER:
2443 case PIPE_QUERY_OCCLUSION_PREDICATE:
2444 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
2445 /* Allocate a bo for the query results to be stored */
2446 if (!query->bo) {
2447 query->bo = panfrost_bo_create(
2448 pan_screen(ctx->base.screen),
2449 sizeof(unsigned), 0);
2450 }
2451
2452 unsigned *result = (unsigned *)query->bo->cpu;
2453 *result = 0; /* Default to 0 if nothing at all drawn. */
2454 ctx->occlusion_query = query;
2455 break;
2456
2457 /* Geometry statistics are computed in the driver. XXX: geom/tess
2458 * shaders.. */
2459
2460 case PIPE_QUERY_PRIMITIVES_GENERATED:
2461 query->start = ctx->prims_generated;
2462 break;
2463 case PIPE_QUERY_PRIMITIVES_EMITTED:
2464 query->start = ctx->tf_prims_generated;
2465 break;
2466
2467 default:
2468 fprintf(stderr, "Skipping query %u\n", query->type);
2469 break;
2470 }
2471
2472 return true;
2473 }
2474
2475 static bool
2476 panfrost_end_query(struct pipe_context *pipe, struct pipe_query *q)
2477 {
2478 struct panfrost_context *ctx = pan_context(pipe);
2479 struct panfrost_query *query = (struct panfrost_query *) q;
2480
2481 switch (query->type) {
2482 case PIPE_QUERY_OCCLUSION_COUNTER:
2483 case PIPE_QUERY_OCCLUSION_PREDICATE:
2484 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
2485 ctx->occlusion_query = NULL;
2486 break;
2487 case PIPE_QUERY_PRIMITIVES_GENERATED:
2488 query->end = ctx->prims_generated;
2489 break;
2490 case PIPE_QUERY_PRIMITIVES_EMITTED:
2491 query->end = ctx->tf_prims_generated;
2492 break;
2493 }
2494
2495 return true;
2496 }
2497
2498 static bool
2499 panfrost_get_query_result(struct pipe_context *pipe,
2500 struct pipe_query *q,
2501 bool wait,
2502 union pipe_query_result *vresult)
2503 {
2504 struct panfrost_query *query = (struct panfrost_query *) q;
2505 struct panfrost_context *ctx = pan_context(pipe);
2506
2507
2508 switch (query->type) {
2509 case PIPE_QUERY_OCCLUSION_COUNTER:
2510 case PIPE_QUERY_OCCLUSION_PREDICATE:
2511 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
2512 /* Flush first */
2513 panfrost_flush_all_batches(ctx, true);
2514
2515 /* Read back the query results */
2516 unsigned *result = (unsigned *) query->bo->cpu;
2517 unsigned passed = *result;
2518
2519 if (query->type == PIPE_QUERY_OCCLUSION_COUNTER) {
2520 vresult->u64 = passed;
2521 } else {
2522 vresult->b = !!passed;
2523 }
2524
2525 break;
2526
2527 case PIPE_QUERY_PRIMITIVES_GENERATED:
2528 case PIPE_QUERY_PRIMITIVES_EMITTED:
2529 panfrost_flush_all_batches(ctx, true);
2530 vresult->u64 = query->end - query->start;
2531 break;
2532
2533 default:
2534 DBG("Skipped query get %u\n", query->type);
2535 break;
2536 }
2537
2538 return true;
2539 }
2540
2541 static struct pipe_stream_output_target *
2542 panfrost_create_stream_output_target(struct pipe_context *pctx,
2543 struct pipe_resource *prsc,
2544 unsigned buffer_offset,
2545 unsigned buffer_size)
2546 {
2547 struct pipe_stream_output_target *target;
2548
2549 target = rzalloc(pctx, struct pipe_stream_output_target);
2550
2551 if (!target)
2552 return NULL;
2553
2554 pipe_reference_init(&target->reference, 1);
2555 pipe_resource_reference(&target->buffer, prsc);
2556
2557 target->context = pctx;
2558 target->buffer_offset = buffer_offset;
2559 target->buffer_size = buffer_size;
2560
2561 return target;
2562 }
2563
2564 static void
2565 panfrost_stream_output_target_destroy(struct pipe_context *pctx,
2566 struct pipe_stream_output_target *target)
2567 {
2568 pipe_resource_reference(&target->buffer, NULL);
2569 ralloc_free(target);
2570 }
2571
2572 static void
2573 panfrost_set_stream_output_targets(struct pipe_context *pctx,
2574 unsigned num_targets,
2575 struct pipe_stream_output_target **targets,
2576 const unsigned *offsets)
2577 {
2578 struct panfrost_context *ctx = pan_context(pctx);
2579 struct panfrost_streamout *so = &ctx->streamout;
2580
2581 assert(num_targets <= ARRAY_SIZE(so->targets));
2582
2583 for (unsigned i = 0; i < num_targets; i++) {
2584 if (offsets[i] != -1)
2585 so->offsets[i] = offsets[i];
2586
2587 pipe_so_target_reference(&so->targets[i], targets[i]);
2588 }
2589
2590 for (unsigned i = 0; i < so->num_targets; i++)
2591 pipe_so_target_reference(&so->targets[i], NULL);
2592
2593 so->num_targets = num_targets;
2594 }
2595
2596 struct pipe_context *
2597 panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
2598 {
2599 struct panfrost_context *ctx = rzalloc(screen, struct panfrost_context);
2600 struct pipe_context *gallium = (struct pipe_context *) ctx;
2601
2602 gallium->screen = screen;
2603
2604 gallium->destroy = panfrost_destroy;
2605
2606 gallium->set_framebuffer_state = panfrost_set_framebuffer_state;
2607
2608 gallium->flush = panfrost_flush;
2609 gallium->clear = panfrost_clear;
2610 gallium->draw_vbo = panfrost_draw_vbo;
2611
2612 gallium->set_vertex_buffers = panfrost_set_vertex_buffers;
2613 gallium->set_constant_buffer = panfrost_set_constant_buffer;
2614 gallium->set_shader_buffers = panfrost_set_shader_buffers;
2615
2616 gallium->set_stencil_ref = panfrost_set_stencil_ref;
2617
2618 gallium->create_sampler_view = panfrost_create_sampler_view;
2619 gallium->set_sampler_views = panfrost_set_sampler_views;
2620 gallium->sampler_view_destroy = panfrost_sampler_view_destroy;
2621
2622 gallium->create_rasterizer_state = panfrost_create_rasterizer_state;
2623 gallium->bind_rasterizer_state = panfrost_bind_rasterizer_state;
2624 gallium->delete_rasterizer_state = panfrost_generic_cso_delete;
2625
2626 gallium->create_vertex_elements_state = panfrost_create_vertex_elements_state;
2627 gallium->bind_vertex_elements_state = panfrost_bind_vertex_elements_state;
2628 gallium->delete_vertex_elements_state = panfrost_generic_cso_delete;
2629
2630 gallium->create_fs_state = panfrost_create_fs_state;
2631 gallium->delete_fs_state = panfrost_delete_shader_state;
2632 gallium->bind_fs_state = panfrost_bind_fs_state;
2633
2634 gallium->create_vs_state = panfrost_create_vs_state;
2635 gallium->delete_vs_state = panfrost_delete_shader_state;
2636 gallium->bind_vs_state = panfrost_bind_vs_state;
2637
2638 gallium->create_sampler_state = panfrost_create_sampler_state;
2639 gallium->delete_sampler_state = panfrost_generic_cso_delete;
2640 gallium->bind_sampler_states = panfrost_bind_sampler_states;
2641
2642 gallium->create_depth_stencil_alpha_state = panfrost_create_depth_stencil_state;
2643 gallium->bind_depth_stencil_alpha_state = panfrost_bind_depth_stencil_state;
2644 gallium->delete_depth_stencil_alpha_state = panfrost_delete_depth_stencil_state;
2645
2646 gallium->set_sample_mask = panfrost_set_sample_mask;
2647
2648 gallium->set_clip_state = panfrost_set_clip_state;
2649 gallium->set_viewport_states = panfrost_set_viewport_states;
2650 gallium->set_scissor_states = panfrost_set_scissor_states;
2651 gallium->set_polygon_stipple = panfrost_set_polygon_stipple;
2652 gallium->set_active_query_state = panfrost_set_active_query_state;
2653
2654 gallium->create_query = panfrost_create_query;
2655 gallium->destroy_query = panfrost_destroy_query;
2656 gallium->begin_query = panfrost_begin_query;
2657 gallium->end_query = panfrost_end_query;
2658 gallium->get_query_result = panfrost_get_query_result;
2659
2660 gallium->create_stream_output_target = panfrost_create_stream_output_target;
2661 gallium->stream_output_target_destroy = panfrost_stream_output_target_destroy;
2662 gallium->set_stream_output_targets = panfrost_set_stream_output_targets;
2663
2664 panfrost_resource_context_init(gallium);
2665 panfrost_blend_context_init(gallium);
2666 panfrost_compute_context_init(gallium);
2667
2668 /* XXX: leaks */
2669 gallium->stream_uploader = u_upload_create_default(gallium);
2670 gallium->const_uploader = gallium->stream_uploader;
2671 assert(gallium->stream_uploader);
2672
2673 /* Midgard supports ES modes, plus QUADS/QUAD_STRIPS/POLYGON */
2674 ctx->draw_modes = (1 << (PIPE_PRIM_POLYGON + 1)) - 1;
2675
2676 ctx->primconvert = util_primconvert_create(gallium, ctx->draw_modes);
2677
2678 ctx->blitter = util_blitter_create(gallium);
2679 ctx->blitter_wallpaper = util_blitter_create(gallium);
2680
2681 assert(ctx->blitter);
2682 assert(ctx->blitter_wallpaper);
2683
2684 /* Prepare for render! */
2685
2686 panfrost_batch_init(ctx);
2687 panfrost_emit_vertex_payload(ctx);
2688 panfrost_emit_tiler_payload(ctx);
2689 panfrost_invalidate_frame(ctx);
2690 panfrost_default_shader_backend(ctx);
2691
2692 return gallium;
2693 }