08b799b66bf837501663768dc0301bb499f40347
[mesa.git] / src / gallium / drivers / panfrost / pan_context.c
1 /*
2 * © Copyright 2018 Alyssa Rosenzweig
3 * Copyright © 2014-2017 Broadcom
4 * Copyright (C) 2017 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 */
26
27 #include <sys/poll.h>
28 #include <errno.h>
29
30 #include "pan_bo.h"
31 #include "pan_context.h"
32 #include "pan_format.h"
33
34 #include "util/macros.h"
35 #include "util/u_format.h"
36 #include "util/u_inlines.h"
37 #include "util/u_upload_mgr.h"
38 #include "util/u_memory.h"
39 #include "util/u_vbuf.h"
40 #include "util/half_float.h"
41 #include "util/u_helpers.h"
42 #include "util/u_format.h"
43 #include "util/u_prim.h"
44 #include "util/u_prim_restart.h"
45 #include "indices/u_primconvert.h"
46 #include "tgsi/tgsi_parse.h"
47 #include "tgsi/tgsi_from_mesa.h"
48 #include "util/u_math.h"
49
50 #include "pan_screen.h"
51 #include "pan_blending.h"
52 #include "pan_blend_shaders.h"
53 #include "pan_util.h"
54
55 /* Framebuffer descriptor */
56
57 static struct midgard_tiler_descriptor
58 panfrost_emit_midg_tiler(struct panfrost_batch *batch, unsigned vertex_count)
59 {
60 struct midgard_tiler_descriptor t = {};
61 unsigned height = batch->key.height;
62 unsigned width = batch->key.width;
63
64 t.hierarchy_mask =
65 panfrost_choose_hierarchy_mask(width, height, vertex_count);
66
67 /* Compute the polygon header size and use that to offset the body */
68
69 unsigned header_size = panfrost_tiler_header_size(
70 width, height, t.hierarchy_mask);
71
72 t.polygon_list_size = panfrost_tiler_full_size(
73 width, height, t.hierarchy_mask);
74
75 /* Sanity check */
76
77 if (t.hierarchy_mask) {
78 struct panfrost_bo *tiler_heap;
79
80 tiler_heap = panfrost_batch_get_tiler_heap(batch);
81 t.polygon_list = panfrost_batch_get_polygon_list(batch,
82 header_size +
83 t.polygon_list_size);
84
85
86 /* Allow the entire tiler heap */
87 t.heap_start = tiler_heap->gpu;
88 t.heap_end = tiler_heap->gpu + tiler_heap->size;
89 } else {
90 struct panfrost_bo *tiler_dummy;
91
92 tiler_dummy = panfrost_batch_get_tiler_dummy(batch);
93
94 /* The tiler is disabled, so don't allow the tiler heap */
95 t.heap_start = tiler_dummy->gpu;
96 t.heap_end = t.heap_start;
97
98 /* Use a dummy polygon list */
99 t.polygon_list = tiler_dummy->gpu;
100
101 /* Disable the tiler */
102 t.hierarchy_mask |= MALI_TILER_DISABLED;
103 }
104
105 t.polygon_list_body =
106 t.polygon_list + header_size;
107
108 return t;
109 }
110
111 struct mali_single_framebuffer
112 panfrost_emit_sfbd(struct panfrost_batch *batch, unsigned vertex_count)
113 {
114 unsigned width = batch->key.width;
115 unsigned height = batch->key.height;
116
117 struct mali_single_framebuffer framebuffer = {
118 .width = MALI_POSITIVE(width),
119 .height = MALI_POSITIVE(height),
120 .unknown2 = 0x1f,
121 .format = 0x30000000,
122 .clear_flags = 0x1000,
123 .unknown_address_0 = panfrost_batch_get_scratchpad(batch)->gpu,
124 .tiler = panfrost_emit_midg_tiler(batch, vertex_count),
125 };
126
127 return framebuffer;
128 }
129
130 struct bifrost_framebuffer
131 panfrost_emit_mfbd(struct panfrost_batch *batch, unsigned vertex_count)
132 {
133 unsigned width = batch->key.width;
134 unsigned height = batch->key.height;
135
136 struct bifrost_framebuffer framebuffer = {
137 .unk0 = 0x1e5, /* 1e4 if no spill */
138 .width1 = MALI_POSITIVE(width),
139 .height1 = MALI_POSITIVE(height),
140 .width2 = MALI_POSITIVE(width),
141 .height2 = MALI_POSITIVE(height),
142
143 .unk1 = 0x1080,
144
145 .rt_count_1 = MALI_POSITIVE(batch->key.nr_cbufs),
146 .rt_count_2 = 4,
147
148 .unknown2 = 0x1f,
149
150 .scratchpad = panfrost_batch_get_scratchpad(batch)->gpu,
151 .tiler = panfrost_emit_midg_tiler(batch, vertex_count)
152 };
153
154 return framebuffer;
155 }
156
157 static void
158 panfrost_clear(
159 struct pipe_context *pipe,
160 unsigned buffers,
161 const union pipe_color_union *color,
162 double depth, unsigned stencil)
163 {
164 struct panfrost_context *ctx = pan_context(pipe);
165 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
166
167 panfrost_batch_add_fbo_bos(batch);
168 panfrost_batch_clear(batch, buffers, color, depth, stencil);
169 }
170
171 static mali_ptr
172 panfrost_attach_vt_mfbd(struct panfrost_batch *batch)
173 {
174 struct bifrost_framebuffer mfbd = panfrost_emit_mfbd(batch, ~0);
175
176 return panfrost_upload_transient(batch, &mfbd, sizeof(mfbd)) | MALI_MFBD;
177 }
178
179 static mali_ptr
180 panfrost_attach_vt_sfbd(struct panfrost_batch *batch)
181 {
182 struct mali_single_framebuffer sfbd = panfrost_emit_sfbd(batch, ~0);
183
184 return panfrost_upload_transient(batch, &sfbd, sizeof(sfbd)) | MALI_SFBD;
185 }
186
187 static void
188 panfrost_attach_vt_framebuffer(struct panfrost_context *ctx)
189 {
190 /* Skip the attach if we can */
191
192 if (ctx->payloads[PIPE_SHADER_VERTEX].postfix.framebuffer) {
193 assert(ctx->payloads[PIPE_SHADER_FRAGMENT].postfix.framebuffer);
194 return;
195 }
196
197 struct panfrost_screen *screen = pan_screen(ctx->base.screen);
198 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
199
200 if (!batch->framebuffer)
201 batch->framebuffer = screen->require_sfbd ?
202 panfrost_attach_vt_sfbd(batch) :
203 panfrost_attach_vt_mfbd(batch);
204
205 for (unsigned i = 0; i < PIPE_SHADER_TYPES; ++i)
206 ctx->payloads[i].postfix.framebuffer = batch->framebuffer;
207 }
208
209 /* Reset per-frame context, called on context initialisation as well as after
210 * flushing a frame */
211
212 void
213 panfrost_invalidate_frame(struct panfrost_context *ctx)
214 {
215 for (unsigned i = 0; i < PIPE_SHADER_TYPES; ++i)
216 ctx->payloads[i].postfix.framebuffer = 0;
217
218 if (ctx->rasterizer)
219 ctx->dirty |= PAN_DIRTY_RASTERIZER;
220
221 /* XXX */
222 ctx->dirty |= PAN_DIRTY_SAMPLERS | PAN_DIRTY_TEXTURES;
223
224 /* TODO: When does this need to be handled? */
225 ctx->active_queries = true;
226 }
227
228 /* In practice, every field of these payloads should be configurable
229 * arbitrarily, which means these functions are basically catch-all's for
230 * as-of-yet unwavering unknowns */
231
232 static void
233 panfrost_emit_vertex_payload(struct panfrost_context *ctx)
234 {
235 /* 0x2 bit clear on 32-bit T6XX */
236
237 struct midgard_payload_vertex_tiler payload = {
238 .gl_enables = 0x4 | 0x2,
239 };
240
241 /* Vertex and compute are closely coupled, so share a payload */
242
243 memcpy(&ctx->payloads[PIPE_SHADER_VERTEX], &payload, sizeof(payload));
244 memcpy(&ctx->payloads[PIPE_SHADER_COMPUTE], &payload, sizeof(payload));
245 }
246
247 static void
248 panfrost_emit_tiler_payload(struct panfrost_context *ctx)
249 {
250 struct midgard_payload_vertex_tiler payload = {
251 .prefix = {
252 .zero1 = 0xffff, /* Why is this only seen on test-quad-textured? */
253 },
254 };
255
256 memcpy(&ctx->payloads[PIPE_SHADER_FRAGMENT], &payload, sizeof(payload));
257 }
258
259 static unsigned
260 translate_tex_wrap(enum pipe_tex_wrap w)
261 {
262 switch (w) {
263 case PIPE_TEX_WRAP_REPEAT:
264 return MALI_WRAP_REPEAT;
265
266 /* TODO: lower GL_CLAMP? */
267 case PIPE_TEX_WRAP_CLAMP:
268 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
269 return MALI_WRAP_CLAMP_TO_EDGE;
270
271 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
272 return MALI_WRAP_CLAMP_TO_BORDER;
273
274 case PIPE_TEX_WRAP_MIRROR_REPEAT:
275 return MALI_WRAP_MIRRORED_REPEAT;
276
277 default:
278 unreachable("Invalid wrap");
279 }
280 }
281
282 static unsigned
283 panfrost_translate_compare_func(enum pipe_compare_func in)
284 {
285 switch (in) {
286 case PIPE_FUNC_NEVER:
287 return MALI_FUNC_NEVER;
288
289 case PIPE_FUNC_LESS:
290 return MALI_FUNC_LESS;
291
292 case PIPE_FUNC_EQUAL:
293 return MALI_FUNC_EQUAL;
294
295 case PIPE_FUNC_LEQUAL:
296 return MALI_FUNC_LEQUAL;
297
298 case PIPE_FUNC_GREATER:
299 return MALI_FUNC_GREATER;
300
301 case PIPE_FUNC_NOTEQUAL:
302 return MALI_FUNC_NOTEQUAL;
303
304 case PIPE_FUNC_GEQUAL:
305 return MALI_FUNC_GEQUAL;
306
307 case PIPE_FUNC_ALWAYS:
308 return MALI_FUNC_ALWAYS;
309
310 default:
311 unreachable("Invalid func");
312 }
313 }
314
315 static unsigned
316 panfrost_translate_alt_compare_func(enum pipe_compare_func in)
317 {
318 switch (in) {
319 case PIPE_FUNC_NEVER:
320 return MALI_ALT_FUNC_NEVER;
321
322 case PIPE_FUNC_LESS:
323 return MALI_ALT_FUNC_LESS;
324
325 case PIPE_FUNC_EQUAL:
326 return MALI_ALT_FUNC_EQUAL;
327
328 case PIPE_FUNC_LEQUAL:
329 return MALI_ALT_FUNC_LEQUAL;
330
331 case PIPE_FUNC_GREATER:
332 return MALI_ALT_FUNC_GREATER;
333
334 case PIPE_FUNC_NOTEQUAL:
335 return MALI_ALT_FUNC_NOTEQUAL;
336
337 case PIPE_FUNC_GEQUAL:
338 return MALI_ALT_FUNC_GEQUAL;
339
340 case PIPE_FUNC_ALWAYS:
341 return MALI_ALT_FUNC_ALWAYS;
342
343 default:
344 unreachable("Invalid alt func");
345 }
346 }
347
348 static unsigned
349 panfrost_translate_stencil_op(enum pipe_stencil_op in)
350 {
351 switch (in) {
352 case PIPE_STENCIL_OP_KEEP:
353 return MALI_STENCIL_KEEP;
354
355 case PIPE_STENCIL_OP_ZERO:
356 return MALI_STENCIL_ZERO;
357
358 case PIPE_STENCIL_OP_REPLACE:
359 return MALI_STENCIL_REPLACE;
360
361 case PIPE_STENCIL_OP_INCR:
362 return MALI_STENCIL_INCR;
363
364 case PIPE_STENCIL_OP_DECR:
365 return MALI_STENCIL_DECR;
366
367 case PIPE_STENCIL_OP_INCR_WRAP:
368 return MALI_STENCIL_INCR_WRAP;
369
370 case PIPE_STENCIL_OP_DECR_WRAP:
371 return MALI_STENCIL_DECR_WRAP;
372
373 case PIPE_STENCIL_OP_INVERT:
374 return MALI_STENCIL_INVERT;
375
376 default:
377 unreachable("Invalid stencil op");
378 }
379 }
380
381 static void
382 panfrost_make_stencil_state(const struct pipe_stencil_state *in, struct mali_stencil_test *out)
383 {
384 out->ref = 0; /* Gallium gets it from elsewhere */
385
386 out->mask = in->valuemask;
387 out->func = panfrost_translate_compare_func(in->func);
388 out->sfail = panfrost_translate_stencil_op(in->fail_op);
389 out->dpfail = panfrost_translate_stencil_op(in->zfail_op);
390 out->dppass = panfrost_translate_stencil_op(in->zpass_op);
391 }
392
393 static void
394 panfrost_default_shader_backend(struct panfrost_context *ctx)
395 {
396 struct mali_shader_meta shader = {
397 .alpha_coverage = ~MALI_ALPHA_COVERAGE(0.000000),
398
399 .unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x3010,
400 .unknown2_4 = MALI_NO_MSAA | 0x4e0,
401 };
402
403 /* unknown2_4 has 0x10 bit set on T6XX. We don't know why this is
404 * required (independent of 32-bit/64-bit descriptors), or why it's not
405 * used on later GPU revisions. Otherwise, all shader jobs fault on
406 * these earlier chips (perhaps this is a chicken bit of some kind).
407 * More investigation is needed. */
408
409 if (ctx->is_t6xx) {
410 shader.unknown2_4 |= 0x10;
411 }
412
413 struct pipe_stencil_state default_stencil = {
414 .enabled = 0,
415 .func = PIPE_FUNC_ALWAYS,
416 .fail_op = MALI_STENCIL_KEEP,
417 .zfail_op = MALI_STENCIL_KEEP,
418 .zpass_op = MALI_STENCIL_KEEP,
419 .writemask = 0xFF,
420 .valuemask = 0xFF
421 };
422
423 panfrost_make_stencil_state(&default_stencil, &shader.stencil_front);
424 shader.stencil_mask_front = default_stencil.writemask;
425
426 panfrost_make_stencil_state(&default_stencil, &shader.stencil_back);
427 shader.stencil_mask_back = default_stencil.writemask;
428
429 if (default_stencil.enabled)
430 shader.unknown2_4 |= MALI_STENCIL_TEST;
431
432 memcpy(&ctx->fragment_shader_core, &shader, sizeof(shader));
433 }
434
435 /* Generates a vertex/tiler job. This is, in some sense, the heart of the
436 * graphics command stream. It should be called once per draw, accordding to
437 * presentations. Set is_tiler for "tiler" jobs (fragment shader jobs, but in
438 * Mali parlance, "fragment" refers to framebuffer writeout). Clear it for
439 * vertex jobs. */
440
441 struct panfrost_transfer
442 panfrost_vertex_tiler_job(struct panfrost_context *ctx, bool is_tiler)
443 {
444 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
445 struct mali_job_descriptor_header job = {
446 .job_type = is_tiler ? JOB_TYPE_TILER : JOB_TYPE_VERTEX,
447 .job_descriptor_size = 1,
448 };
449
450 struct midgard_payload_vertex_tiler *payload = is_tiler ? &ctx->payloads[PIPE_SHADER_FRAGMENT] : &ctx->payloads[PIPE_SHADER_VERTEX];
451
452 struct panfrost_transfer transfer = panfrost_allocate_transient(batch, sizeof(job) + sizeof(*payload));
453 memcpy(transfer.cpu, &job, sizeof(job));
454 memcpy(transfer.cpu + sizeof(job), payload, sizeof(*payload));
455 return transfer;
456 }
457
458 mali_ptr
459 panfrost_vertex_buffer_address(struct panfrost_context *ctx, unsigned i)
460 {
461 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[i];
462 struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer.resource);
463
464 return rsrc->bo->gpu + buf->buffer_offset;
465 }
466
467 static bool
468 panfrost_writes_point_size(struct panfrost_context *ctx)
469 {
470 assert(ctx->shader[PIPE_SHADER_VERTEX]);
471 struct panfrost_shader_state *vs = &ctx->shader[PIPE_SHADER_VERTEX]->variants[ctx->shader[PIPE_SHADER_VERTEX]->active_variant];
472
473 return vs->writes_point_size && ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.draw_mode == MALI_POINTS;
474 }
475
476 /* Stage the attribute descriptors so we can adjust src_offset
477 * to let BOs align nicely */
478
479 static void
480 panfrost_stage_attributes(struct panfrost_context *ctx)
481 {
482 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
483 struct panfrost_vertex_state *so = ctx->vertex;
484
485 size_t sz = sizeof(struct mali_attr_meta) * so->num_elements;
486 struct panfrost_transfer transfer = panfrost_allocate_transient(batch, sz);
487 struct mali_attr_meta *target = (struct mali_attr_meta *) transfer.cpu;
488
489 /* Copy as-is for the first pass */
490 memcpy(target, so->hw, sz);
491
492 /* Fixup offsets for the second pass. Recall that the hardware
493 * calculates attribute addresses as:
494 *
495 * addr = base + (stride * vtx) + src_offset;
496 *
497 * However, on Mali, base must be aligned to 64-bytes, so we
498 * instead let:
499 *
500 * base' = base & ~63 = base - (base & 63)
501 *
502 * To compensate when using base' (see emit_vertex_data), we have
503 * to adjust src_offset by the masked off piece:
504 *
505 * addr' = base' + (stride * vtx) + (src_offset + (base & 63))
506 * = base - (base & 63) + (stride * vtx) + src_offset + (base & 63)
507 * = base + (stride * vtx) + src_offset
508 * = addr;
509 *
510 * QED.
511 */
512
513 unsigned start = ctx->payloads[PIPE_SHADER_VERTEX].offset_start;
514
515 for (unsigned i = 0; i < so->num_elements; ++i) {
516 unsigned vbi = so->pipe[i].vertex_buffer_index;
517 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
518 mali_ptr addr = panfrost_vertex_buffer_address(ctx, vbi);
519
520 /* Adjust by the masked off bits of the offset */
521 target[i].src_offset += (addr & 63);
522
523 /* Also, somewhat obscurely per-instance data needs to be
524 * offset in response to a delayed start in an indexed draw */
525
526 if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start) {
527 target[i].src_offset -= buf->stride * start;
528 }
529
530
531 }
532
533 ctx->payloads[PIPE_SHADER_VERTEX].postfix.attribute_meta = transfer.gpu;
534 }
535
536 static void
537 panfrost_upload_sampler_descriptors(struct panfrost_context *ctx)
538 {
539 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
540 size_t desc_size = sizeof(struct mali_sampler_descriptor);
541
542 for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) {
543 mali_ptr upload = 0;
544
545 if (ctx->sampler_count[t] && ctx->sampler_view_count[t]) {
546 size_t transfer_size = desc_size * ctx->sampler_count[t];
547
548 struct panfrost_transfer transfer =
549 panfrost_allocate_transient(batch, transfer_size);
550
551 struct mali_sampler_descriptor *desc =
552 (struct mali_sampler_descriptor *) transfer.cpu;
553
554 for (int i = 0; i < ctx->sampler_count[t]; ++i)
555 desc[i] = ctx->samplers[t][i]->hw;
556
557 upload = transfer.gpu;
558 }
559
560 ctx->payloads[t].postfix.sampler_descriptor = upload;
561 }
562 }
563
564 static enum mali_texture_layout
565 panfrost_layout_for_texture(struct panfrost_resource *rsrc)
566 {
567 /* TODO: other linear depth textures */
568 bool is_depth = rsrc->base.format == PIPE_FORMAT_Z32_UNORM;
569
570 switch (rsrc->layout) {
571 case PAN_AFBC:
572 return MALI_TEXTURE_AFBC;
573 case PAN_TILED:
574 assert(!is_depth);
575 return MALI_TEXTURE_TILED;
576 case PAN_LINEAR:
577 return is_depth ? MALI_TEXTURE_TILED : MALI_TEXTURE_LINEAR;
578 default:
579 unreachable("Invalid texture layout");
580 }
581 }
582
583 static mali_ptr
584 panfrost_upload_tex(
585 struct panfrost_context *ctx,
586 struct panfrost_sampler_view *view)
587 {
588 if (!view)
589 return (mali_ptr) 0;
590
591 struct pipe_sampler_view *pview = &view->base;
592 struct panfrost_resource *rsrc = pan_resource(pview->texture);
593
594 /* Do we interleave an explicit stride with every element? */
595
596 bool has_manual_stride = view->manual_stride;
597
598 /* For easy access */
599
600 bool is_buffer = pview->target == PIPE_BUFFER;
601 unsigned first_level = is_buffer ? 0 : pview->u.tex.first_level;
602 unsigned last_level = is_buffer ? 0 : pview->u.tex.last_level;
603 unsigned first_layer = is_buffer ? 0 : pview->u.tex.first_layer;
604 unsigned last_layer = is_buffer ? 0 : pview->u.tex.last_layer;
605
606 /* Lower-bit is set when sampling from colour AFBC */
607 bool is_afbc = rsrc->layout == PAN_AFBC;
608 bool is_zs = rsrc->base.bind & PIPE_BIND_DEPTH_STENCIL;
609 unsigned afbc_bit = (is_afbc && !is_zs) ? 1 : 0;
610
611 /* Add the BO to the job so it's retained until the job is done. */
612 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
613 panfrost_batch_add_bo(batch, rsrc->bo);
614
615 /* Add the usage flags in, since they can change across the CSO
616 * lifetime due to layout switches */
617
618 view->hw.format.layout = panfrost_layout_for_texture(rsrc);
619 view->hw.format.manual_stride = has_manual_stride;
620
621 /* Inject the addresses in, interleaving mip levels, cube faces, and
622 * strides in that order */
623
624 unsigned idx = 0;
625
626 for (unsigned l = first_level; l <= last_level; ++l) {
627 for (unsigned f = first_layer; f <= last_layer; ++f) {
628
629 view->hw.payload[idx++] =
630 panfrost_get_texture_address(rsrc, l, f) + afbc_bit;
631
632 if (has_manual_stride) {
633 view->hw.payload[idx++] =
634 rsrc->slices[l].stride;
635 }
636 }
637 }
638
639 return panfrost_upload_transient(batch, &view->hw,
640 sizeof(struct mali_texture_descriptor));
641 }
642
643 static void
644 panfrost_upload_texture_descriptors(struct panfrost_context *ctx)
645 {
646 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
647
648 for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) {
649 mali_ptr trampoline = 0;
650
651 if (ctx->sampler_view_count[t]) {
652 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
653
654 for (int i = 0; i < ctx->sampler_view_count[t]; ++i)
655 trampolines[i] =
656 panfrost_upload_tex(ctx, ctx->sampler_views[t][i]);
657
658 trampoline = panfrost_upload_transient(batch, trampolines, sizeof(uint64_t) * ctx->sampler_view_count[t]);
659 }
660
661 ctx->payloads[t].postfix.texture_trampoline = trampoline;
662 }
663 }
664
665 struct sysval_uniform {
666 union {
667 float f[4];
668 int32_t i[4];
669 uint32_t u[4];
670 uint64_t du[2];
671 };
672 };
673
674 static void panfrost_upload_viewport_scale_sysval(struct panfrost_context *ctx,
675 struct sysval_uniform *uniform)
676 {
677 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
678
679 uniform->f[0] = vp->scale[0];
680 uniform->f[1] = vp->scale[1];
681 uniform->f[2] = vp->scale[2];
682 }
683
684 static void panfrost_upload_viewport_offset_sysval(struct panfrost_context *ctx,
685 struct sysval_uniform *uniform)
686 {
687 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
688
689 uniform->f[0] = vp->translate[0];
690 uniform->f[1] = vp->translate[1];
691 uniform->f[2] = vp->translate[2];
692 }
693
694 static void panfrost_upload_txs_sysval(struct panfrost_context *ctx,
695 enum pipe_shader_type st,
696 unsigned int sysvalid,
697 struct sysval_uniform *uniform)
698 {
699 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
700 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
701 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
702 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
703
704 assert(dim);
705 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
706
707 if (dim > 1)
708 uniform->i[1] = u_minify(tex->texture->height0,
709 tex->u.tex.first_level);
710
711 if (dim > 2)
712 uniform->i[2] = u_minify(tex->texture->depth0,
713 tex->u.tex.first_level);
714
715 if (is_array)
716 uniform->i[dim] = tex->texture->array_size;
717 }
718
719 static void panfrost_upload_ssbo_sysval(
720 struct panfrost_context *ctx,
721 enum pipe_shader_type st,
722 unsigned ssbo_id,
723 struct sysval_uniform *uniform)
724 {
725 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
726 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
727
728 /* Compute address */
729 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
730 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
731
732 panfrost_batch_add_bo(batch, bo);
733
734 /* Upload address and size as sysval */
735 uniform->du[0] = bo->gpu + sb.buffer_offset;
736 uniform->u[2] = sb.buffer_size;
737 }
738
739 static void panfrost_upload_num_work_groups_sysval(struct panfrost_context *ctx,
740 struct sysval_uniform *uniform)
741 {
742 uniform->u[0] = ctx->compute_grid->grid[0];
743 uniform->u[1] = ctx->compute_grid->grid[1];
744 uniform->u[2] = ctx->compute_grid->grid[2];
745 }
746
747 static void panfrost_upload_sysvals(struct panfrost_context *ctx, void *buf,
748 struct panfrost_shader_state *ss,
749 enum pipe_shader_type st)
750 {
751 struct sysval_uniform *uniforms = (void *)buf;
752
753 for (unsigned i = 0; i < ss->sysval_count; ++i) {
754 int sysval = ss->sysval[i];
755
756 switch (PAN_SYSVAL_TYPE(sysval)) {
757 case PAN_SYSVAL_VIEWPORT_SCALE:
758 panfrost_upload_viewport_scale_sysval(ctx, &uniforms[i]);
759 break;
760 case PAN_SYSVAL_VIEWPORT_OFFSET:
761 panfrost_upload_viewport_offset_sysval(ctx, &uniforms[i]);
762 break;
763 case PAN_SYSVAL_TEXTURE_SIZE:
764 panfrost_upload_txs_sysval(ctx, st, PAN_SYSVAL_ID(sysval),
765 &uniforms[i]);
766 break;
767 case PAN_SYSVAL_SSBO:
768 panfrost_upload_ssbo_sysval(ctx, st, PAN_SYSVAL_ID(sysval),
769 &uniforms[i]);
770 break;
771 case PAN_SYSVAL_NUM_WORK_GROUPS:
772 panfrost_upload_num_work_groups_sysval(ctx, &uniforms[i]);
773 break;
774
775 default:
776 assert(0);
777 }
778 }
779 }
780
781 static const void *
782 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf, unsigned index)
783 {
784 struct pipe_constant_buffer *cb = &buf->cb[index];
785 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
786
787 if (rsrc)
788 return rsrc->bo->cpu;
789 else if (cb->user_buffer)
790 return cb->user_buffer;
791 else
792 unreachable("No constant buffer");
793 }
794
795 static mali_ptr
796 panfrost_map_constant_buffer_gpu(
797 struct panfrost_context *ctx,
798 struct panfrost_constant_buffer *buf,
799 unsigned index)
800 {
801 struct pipe_constant_buffer *cb = &buf->cb[index];
802 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
803 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
804
805 if (rsrc) {
806 panfrost_batch_add_bo(batch, rsrc->bo);
807 return rsrc->bo->gpu;
808 } else if (cb->user_buffer) {
809 return panfrost_upload_transient(batch, cb->user_buffer, cb->buffer_size);
810 } else {
811 unreachable("No constant buffer");
812 }
813 }
814
815 /* Compute number of UBOs active (more specifically, compute the highest UBO
816 * number addressable -- if there are gaps, include them in the count anyway).
817 * We always include UBO #0 in the count, since we *need* uniforms enabled for
818 * sysvals. */
819
820 static unsigned
821 panfrost_ubo_count(struct panfrost_context *ctx, enum pipe_shader_type stage)
822 {
823 unsigned mask = ctx->constant_buffer[stage].enabled_mask | 1;
824 return 32 - __builtin_clz(mask);
825 }
826
827 /* Fixes up a shader state with current state, returning a GPU address to the
828 * patched shader */
829
830 static mali_ptr
831 panfrost_patch_shader_state(
832 struct panfrost_context *ctx,
833 struct panfrost_shader_state *ss,
834 enum pipe_shader_type stage,
835 bool should_upload)
836 {
837 ss->tripipe->texture_count = ctx->sampler_view_count[stage];
838 ss->tripipe->sampler_count = ctx->sampler_count[stage];
839
840 ss->tripipe->midgard1.flags = 0x220;
841
842 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
843 ss->tripipe->midgard1.uniform_buffer_count = ubo_count;
844
845 /* We can't reuse over frames; that's not safe. The descriptor must be
846 * transient uploaded */
847
848 if (should_upload) {
849 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
850
851 return panfrost_upload_transient(batch, ss->tripipe,
852 sizeof(struct mali_shader_meta));
853 }
854
855 /* If we don't need an upload, don't bother */
856 return 0;
857
858 }
859
860 static void
861 panfrost_patch_shader_state_compute(
862 struct panfrost_context *ctx,
863 enum pipe_shader_type stage,
864 bool should_upload)
865 {
866 struct panfrost_shader_variants *all = ctx->shader[stage];
867
868 if (!all) {
869 ctx->payloads[stage].postfix._shader_upper = 0;
870 return;
871 }
872
873 struct panfrost_shader_state *s = &all->variants[all->active_variant];
874
875 ctx->payloads[stage].postfix._shader_upper =
876 panfrost_patch_shader_state(ctx, s, stage, should_upload) >> 4;
877 }
878
879 /* Go through dirty flags and actualise them in the cmdstream. */
880
881 void
882 panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
883 {
884 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
885 struct panfrost_screen *screen = pan_screen(ctx->base.screen);
886
887 panfrost_batch_add_fbo_bos(batch);
888 panfrost_attach_vt_framebuffer(ctx);
889
890 if (with_vertex_data) {
891 panfrost_emit_vertex_data(batch);
892
893 /* Varyings emitted for -all- geometry */
894 unsigned total_count = ctx->padded_count * ctx->instance_count;
895 panfrost_emit_varying_descriptor(ctx, total_count);
896 }
897
898 bool msaa = ctx->rasterizer->base.multisample;
899
900 if (ctx->dirty & PAN_DIRTY_RASTERIZER) {
901 ctx->payloads[PIPE_SHADER_FRAGMENT].gl_enables = ctx->rasterizer->tiler_gl_enables;
902
903 /* TODO: Sample size */
904 SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_HAS_MSAA, msaa);
905 SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_NO_MSAA, !msaa);
906 }
907
908 panfrost_batch_set_requirements(batch);
909
910 if (ctx->occlusion_query) {
911 ctx->payloads[PIPE_SHADER_FRAGMENT].gl_enables |= MALI_OCCLUSION_QUERY | MALI_OCCLUSION_PRECISE;
912 ctx->payloads[PIPE_SHADER_FRAGMENT].postfix.occlusion_counter = ctx->occlusion_query->transfer.gpu;
913 }
914
915 panfrost_patch_shader_state_compute(ctx, PIPE_SHADER_VERTEX, true);
916 panfrost_patch_shader_state_compute(ctx, PIPE_SHADER_COMPUTE, true);
917
918 if (ctx->dirty & (PAN_DIRTY_RASTERIZER | PAN_DIRTY_VS)) {
919 /* Check if we need to link the gl_PointSize varying */
920 if (!panfrost_writes_point_size(ctx)) {
921 /* If the size is constant, write it out. Otherwise,
922 * don't touch primitive_size (since we would clobber
923 * the pointer there) */
924
925 ctx->payloads[PIPE_SHADER_FRAGMENT].primitive_size.constant = ctx->rasterizer->base.line_width;
926 }
927 }
928
929 /* TODO: Maybe dirty track FS, maybe not. For now, it's transient. */
930 if (ctx->shader[PIPE_SHADER_FRAGMENT])
931 ctx->dirty |= PAN_DIRTY_FS;
932
933 if (ctx->dirty & PAN_DIRTY_FS) {
934 assert(ctx->shader[PIPE_SHADER_FRAGMENT]);
935 struct panfrost_shader_state *variant = &ctx->shader[PIPE_SHADER_FRAGMENT]->variants[ctx->shader[PIPE_SHADER_FRAGMENT]->active_variant];
936
937 panfrost_patch_shader_state(ctx, variant, PIPE_SHADER_FRAGMENT, false);
938
939 panfrost_batch_add_bo(batch, variant->bo);
940
941 #define COPY(name) ctx->fragment_shader_core.name = variant->tripipe->name
942
943 COPY(shader);
944 COPY(attribute_count);
945 COPY(varying_count);
946 COPY(texture_count);
947 COPY(sampler_count);
948 COPY(midgard1.uniform_count);
949 COPY(midgard1.uniform_buffer_count);
950 COPY(midgard1.work_count);
951 COPY(midgard1.flags);
952 COPY(midgard1.unknown2);
953
954 #undef COPY
955
956 /* Get blending setup */
957 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
958
959 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
960
961 for (unsigned c = 0; c < rt_count; ++c)
962 blend[c] = panfrost_get_blend_for_context(ctx, c);
963
964 /* If there is a blend shader, work registers are shared. XXX: opt */
965
966 for (unsigned c = 0; c < rt_count; ++c) {
967 if (blend[c].is_shader)
968 ctx->fragment_shader_core.midgard1.work_count = 16;
969 }
970
971 /* Set late due to depending on render state */
972 unsigned flags = ctx->fragment_shader_core.midgard1.flags;
973
974 /* Depending on whether it's legal to in the given shader, we
975 * try to enable early-z testing (or forward-pixel kill?) */
976
977 if (!variant->can_discard)
978 flags |= MALI_EARLY_Z;
979
980 /* Any time texturing is used, derivatives are implicitly
981 * calculated, so we need to enable helper invocations */
982
983 if (variant->helper_invocations)
984 flags |= MALI_HELPER_INVOCATIONS;
985
986 ctx->fragment_shader_core.midgard1.flags = flags;
987
988 /* Assign the stencil refs late */
989
990 unsigned front_ref = ctx->stencil_ref.ref_value[0];
991 unsigned back_ref = ctx->stencil_ref.ref_value[1];
992 bool back_enab = ctx->depth_stencil->stencil[1].enabled;
993
994 ctx->fragment_shader_core.stencil_front.ref = front_ref;
995 ctx->fragment_shader_core.stencil_back.ref = back_enab ? back_ref : front_ref;
996
997 /* CAN_DISCARD should be set if the fragment shader possibly
998 * contains a 'discard' instruction. It is likely this is
999 * related to optimizations related to forward-pixel kill, as
1000 * per "Mali Performance 3: Is EGL_BUFFER_PRESERVED a good
1001 * thing?" by Peter Harris
1002 */
1003
1004 if (variant->can_discard) {
1005 ctx->fragment_shader_core.unknown2_3 |= MALI_CAN_DISCARD;
1006 ctx->fragment_shader_core.midgard1.flags |= 0x400;
1007 }
1008
1009 /* Even on MFBD, the shader descriptor gets blend shaders. It's
1010 * *also* copied to the blend_meta appended (by convention),
1011 * but this is the field actually read by the hardware. (Or
1012 * maybe both are read...?) */
1013
1014 if (blend[0].is_shader) {
1015 ctx->fragment_shader_core.blend.shader =
1016 blend[0].shader.bo->gpu | blend[0].shader.first_tag;
1017 } else {
1018 ctx->fragment_shader_core.blend.shader = 0;
1019 }
1020
1021 if (screen->require_sfbd) {
1022 /* When only a single render target platform is used, the blend
1023 * information is inside the shader meta itself. We
1024 * additionally need to signal CAN_DISCARD for nontrivial blend
1025 * modes (so we're able to read back the destination buffer) */
1026
1027 if (!blend[0].is_shader) {
1028 ctx->fragment_shader_core.blend.equation =
1029 *blend[0].equation.equation;
1030 ctx->fragment_shader_core.blend.constant =
1031 blend[0].equation.constant;
1032 }
1033
1034 if (!blend[0].no_blending) {
1035 ctx->fragment_shader_core.unknown2_3 |= MALI_CAN_DISCARD;
1036 }
1037 }
1038
1039 size_t size = sizeof(struct mali_shader_meta) + (sizeof(struct midgard_blend_rt) * rt_count);
1040 struct panfrost_transfer transfer = panfrost_allocate_transient(batch, size);
1041 memcpy(transfer.cpu, &ctx->fragment_shader_core, sizeof(struct mali_shader_meta));
1042
1043 ctx->payloads[PIPE_SHADER_FRAGMENT].postfix._shader_upper = (transfer.gpu) >> 4;
1044
1045 if (!screen->require_sfbd) {
1046 /* Additional blend descriptor tacked on for jobs using MFBD */
1047
1048 struct midgard_blend_rt rts[4];
1049
1050 for (unsigned i = 0; i < rt_count; ++i) {
1051 unsigned blend_count = 0x200;
1052
1053 if (blend[i].is_shader) {
1054 /* For a blend shader, the bottom nibble corresponds to
1055 * the number of work registers used, which signals the
1056 * -existence- of a blend shader */
1057
1058 assert(blend[i].shader.work_count >= 2);
1059 blend_count |= MIN2(blend[i].shader.work_count, 3);
1060 } else {
1061 /* Otherwise, the bottom bit simply specifies if
1062 * blending (anything other than REPLACE) is enabled */
1063
1064 if (!blend[i].no_blending)
1065 blend_count |= 0x1;
1066 }
1067
1068
1069 bool is_srgb =
1070 (ctx->pipe_framebuffer.nr_cbufs > i) &&
1071 (ctx->pipe_framebuffer.cbufs[i]) &&
1072 util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
1073
1074 rts[i].flags = blend_count;
1075
1076 if (is_srgb)
1077 rts[i].flags |= MALI_BLEND_SRGB;
1078
1079 if (!ctx->blend->base.dither)
1080 rts[i].flags |= MALI_BLEND_NO_DITHER;
1081
1082 /* TODO: sRGB in blend shaders is currently
1083 * unimplemented. Contact me (Alyssa) if you're
1084 * interested in working on this. We have
1085 * native Midgard ops for helping here, but
1086 * they're not well-understood yet. */
1087
1088 assert(!(is_srgb && blend[i].is_shader));
1089
1090 if (blend[i].is_shader) {
1091 rts[i].blend.shader = blend[i].shader.bo->gpu | blend[i].shader.first_tag;
1092 } else {
1093 rts[i].blend.equation = *blend[i].equation.equation;
1094 rts[i].blend.constant = blend[i].equation.constant;
1095 }
1096 }
1097
1098 memcpy(transfer.cpu + sizeof(struct mali_shader_meta), rts, sizeof(rts[0]) * rt_count);
1099 }
1100 }
1101
1102 /* We stage to transient, so always dirty.. */
1103 if (ctx->vertex)
1104 panfrost_stage_attributes(ctx);
1105
1106 if (ctx->dirty & PAN_DIRTY_SAMPLERS)
1107 panfrost_upload_sampler_descriptors(ctx);
1108
1109 if (ctx->dirty & PAN_DIRTY_TEXTURES)
1110 panfrost_upload_texture_descriptors(ctx);
1111
1112 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
1113
1114 for (int i = 0; i < PIPE_SHADER_TYPES; ++i) {
1115 struct panfrost_shader_variants *all = ctx->shader[i];
1116
1117 if (!all)
1118 continue;
1119
1120 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[i];
1121
1122 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1123
1124 panfrost_batch_add_bo(batch, ss->bo);
1125
1126 /* Uniforms are implicitly UBO #0 */
1127 bool has_uniforms = buf->enabled_mask & (1 << 0);
1128
1129 /* Allocate room for the sysval and the uniforms */
1130 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1131 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1132 size_t size = sys_size + uniform_size;
1133 struct panfrost_transfer transfer = panfrost_allocate_transient(batch, size);
1134
1135 /* Upload sysvals requested by the shader */
1136 panfrost_upload_sysvals(ctx, transfer.cpu, ss, i);
1137
1138 /* Upload uniforms */
1139 if (has_uniforms) {
1140 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1141 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1142 }
1143
1144 int uniform_count =
1145 ctx->shader[i]->variants[ctx->shader[i]->active_variant].uniform_count;
1146
1147 struct mali_vertex_tiler_postfix *postfix =
1148 &ctx->payloads[i].postfix;
1149
1150 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1151 * uploaded */
1152
1153 unsigned ubo_count = panfrost_ubo_count(ctx, i);
1154 assert(ubo_count >= 1);
1155
1156 size_t sz = sizeof(struct mali_uniform_buffer_meta) * ubo_count;
1157 struct mali_uniform_buffer_meta ubos[PAN_MAX_CONST_BUFFERS];
1158
1159 /* Upload uniforms as a UBO */
1160 ubos[0].size = MALI_POSITIVE((2 + uniform_count));
1161 ubos[0].ptr = transfer.gpu >> 2;
1162
1163 /* The rest are honest-to-goodness UBOs */
1164
1165 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1166 size_t usz = buf->cb[ubo].buffer_size;
1167
1168 bool enabled = buf->enabled_mask & (1 << ubo);
1169 bool empty = usz == 0;
1170
1171 if (!enabled || empty) {
1172 /* Stub out disabled UBOs to catch accesses */
1173
1174 ubos[ubo].size = 0;
1175 ubos[ubo].ptr = 0xDEAD0000;
1176 continue;
1177 }
1178
1179 mali_ptr gpu = panfrost_map_constant_buffer_gpu(ctx, buf, ubo);
1180
1181 unsigned bytes_per_field = 16;
1182 unsigned aligned = ALIGN_POT(usz, bytes_per_field);
1183 unsigned fields = aligned / bytes_per_field;
1184
1185 ubos[ubo].size = MALI_POSITIVE(fields);
1186 ubos[ubo].ptr = gpu >> 2;
1187 }
1188
1189 mali_ptr ubufs = panfrost_upload_transient(batch, ubos, sz);
1190 postfix->uniforms = transfer.gpu;
1191 postfix->uniform_buffers = ubufs;
1192
1193 buf->dirty_mask = 0;
1194 }
1195
1196 /* TODO: Upload the viewport somewhere more appropriate */
1197
1198 /* Clip bounds are encoded as floats. The viewport itself is encoded as
1199 * (somewhat) asymmetric ints. */
1200 const struct pipe_scissor_state *ss = &ctx->scissor;
1201
1202 struct mali_viewport view = {
1203 /* By default, do no viewport clipping, i.e. clip to (-inf,
1204 * inf) in each direction. Clipping to the viewport in theory
1205 * should work, but in practice causes issues when we're not
1206 * explicitly trying to scissor */
1207
1208 .clip_minx = -INFINITY,
1209 .clip_miny = -INFINITY,
1210 .clip_maxx = INFINITY,
1211 .clip_maxy = INFINITY,
1212 };
1213
1214 /* Always scissor to the viewport by default. */
1215 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
1216 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
1217
1218 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
1219 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
1220
1221 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
1222 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
1223
1224 /* Apply the scissor test */
1225
1226 unsigned minx, miny, maxx, maxy;
1227
1228 if (ss && ctx->rasterizer && ctx->rasterizer->base.scissor) {
1229 minx = MAX2(ss->minx, vp_minx);
1230 miny = MAX2(ss->miny, vp_miny);
1231 maxx = MIN2(ss->maxx, vp_maxx);
1232 maxy = MIN2(ss->maxy, vp_maxy);
1233 } else {
1234 minx = vp_minx;
1235 miny = vp_miny;
1236 maxx = vp_maxx;
1237 maxy = vp_maxy;
1238 }
1239
1240 /* Hardware needs the min/max to be strictly ordered, so flip if we
1241 * need to. The viewport transformation in the vertex shader will
1242 * handle the negatives if we don't */
1243
1244 if (miny > maxy) {
1245 unsigned temp = miny;
1246 miny = maxy;
1247 maxy = temp;
1248 }
1249
1250 if (minx > maxx) {
1251 unsigned temp = minx;
1252 minx = maxx;
1253 maxx = temp;
1254 }
1255
1256 if (minz > maxz) {
1257 float temp = minz;
1258 minz = maxz;
1259 maxz = temp;
1260 }
1261
1262 /* Clamp to the framebuffer size as a last check */
1263
1264 minx = MIN2(ctx->pipe_framebuffer.width, minx);
1265 maxx = MIN2(ctx->pipe_framebuffer.width, maxx);
1266
1267 miny = MIN2(ctx->pipe_framebuffer.height, miny);
1268 maxy = MIN2(ctx->pipe_framebuffer.height, maxy);
1269
1270 /* Update the job, unless we're doing wallpapering (whose lack of
1271 * scissor we can ignore, since if we "miss" a tile of wallpaper, it'll
1272 * just... be faster :) */
1273
1274 if (!ctx->wallpaper_batch)
1275 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
1276
1277 /* Upload */
1278
1279 view.viewport0[0] = minx;
1280 view.viewport1[0] = MALI_POSITIVE(maxx);
1281
1282 view.viewport0[1] = miny;
1283 view.viewport1[1] = MALI_POSITIVE(maxy);
1284
1285 view.clip_minz = minz;
1286 view.clip_maxz = maxz;
1287
1288 ctx->payloads[PIPE_SHADER_FRAGMENT].postfix.viewport =
1289 panfrost_upload_transient(batch,
1290 &view,
1291 sizeof(struct mali_viewport));
1292
1293 ctx->dirty = 0;
1294 }
1295
1296 /* Corresponds to exactly one draw, but does not submit anything */
1297
1298 static void
1299 panfrost_queue_draw(struct panfrost_context *ctx)
1300 {
1301 /* Handle dirty flags now */
1302 panfrost_emit_for_draw(ctx, true);
1303
1304 /* If rasterizer discard is enable, only submit the vertex */
1305
1306 bool rasterizer_discard = ctx->rasterizer
1307 && ctx->rasterizer->base.rasterizer_discard;
1308
1309 struct panfrost_transfer vertex = panfrost_vertex_tiler_job(ctx, false);
1310 struct panfrost_transfer tiler;
1311
1312 if (!rasterizer_discard)
1313 tiler = panfrost_vertex_tiler_job(ctx, true);
1314
1315 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
1316
1317 if (rasterizer_discard)
1318 panfrost_scoreboard_queue_vertex_job(batch, vertex, FALSE);
1319 else if (ctx->wallpaper_batch)
1320 panfrost_scoreboard_queue_fused_job_prepend(batch, vertex, tiler);
1321 else
1322 panfrost_scoreboard_queue_fused_job(batch, vertex, tiler);
1323 }
1324
1325 /* The entire frame is in memory -- send it off to the kernel! */
1326
1327 void
1328 panfrost_flush(
1329 struct pipe_context *pipe,
1330 struct pipe_fence_handle **fence,
1331 unsigned flags)
1332 {
1333 struct panfrost_context *ctx = pan_context(pipe);
1334 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
1335
1336 /* Submit the frame itself */
1337 panfrost_batch_submit(batch);
1338
1339 if (fence) {
1340 struct panfrost_fence *f = panfrost_fence_create(ctx);
1341 pipe->screen->fence_reference(pipe->screen, fence, NULL);
1342 *fence = (struct pipe_fence_handle *)f;
1343 }
1344 }
1345
1346 #define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_##c;
1347
1348 static int
1349 g2m_draw_mode(enum pipe_prim_type mode)
1350 {
1351 switch (mode) {
1352 DEFINE_CASE(POINTS);
1353 DEFINE_CASE(LINES);
1354 DEFINE_CASE(LINE_LOOP);
1355 DEFINE_CASE(LINE_STRIP);
1356 DEFINE_CASE(TRIANGLES);
1357 DEFINE_CASE(TRIANGLE_STRIP);
1358 DEFINE_CASE(TRIANGLE_FAN);
1359 DEFINE_CASE(QUADS);
1360 DEFINE_CASE(QUAD_STRIP);
1361 DEFINE_CASE(POLYGON);
1362
1363 default:
1364 unreachable("Invalid draw mode");
1365 }
1366 }
1367
1368 #undef DEFINE_CASE
1369
1370 static unsigned
1371 panfrost_translate_index_size(unsigned size)
1372 {
1373 switch (size) {
1374 case 1:
1375 return MALI_DRAW_INDEXED_UINT8;
1376
1377 case 2:
1378 return MALI_DRAW_INDEXED_UINT16;
1379
1380 case 4:
1381 return MALI_DRAW_INDEXED_UINT32;
1382
1383 default:
1384 unreachable("Invalid index size");
1385 }
1386 }
1387
1388 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
1389 * good for the duration of the draw (transient), could last longer */
1390
1391 static mali_ptr
1392 panfrost_get_index_buffer_mapped(struct panfrost_context *ctx, const struct pipe_draw_info *info)
1393 {
1394 struct panfrost_resource *rsrc = (struct panfrost_resource *) (info->index.resource);
1395
1396 off_t offset = info->start * info->index_size;
1397 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
1398
1399 if (!info->has_user_indices) {
1400 /* Only resources can be directly mapped */
1401 panfrost_batch_add_bo(batch, rsrc->bo);
1402 return rsrc->bo->gpu + offset;
1403 } else {
1404 /* Otherwise, we need to upload to transient memory */
1405 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
1406 return panfrost_upload_transient(batch, ibuf8 + offset, info->count * info->index_size);
1407 }
1408 }
1409
1410 static bool
1411 panfrost_scissor_culls_everything(struct panfrost_context *ctx)
1412 {
1413 const struct pipe_scissor_state *ss = &ctx->scissor;
1414
1415 /* Check if we're scissoring at all */
1416
1417 if (!(ctx->rasterizer && ctx->rasterizer->base.scissor))
1418 return false;
1419
1420 return (ss->minx == ss->maxx) || (ss->miny == ss->maxy);
1421 }
1422
1423 /* Count generated primitives (when there is no geom/tess shaders) for
1424 * transform feedback */
1425
1426 static void
1427 panfrost_statistics_record(
1428 struct panfrost_context *ctx,
1429 const struct pipe_draw_info *info)
1430 {
1431 if (!ctx->active_queries)
1432 return;
1433
1434 uint32_t prims = u_prims_for_vertices(info->mode, info->count);
1435 ctx->prims_generated += prims;
1436
1437 if (!ctx->streamout.num_targets)
1438 return;
1439
1440 ctx->tf_prims_generated += prims;
1441 }
1442
1443 static void
1444 panfrost_draw_vbo(
1445 struct pipe_context *pipe,
1446 const struct pipe_draw_info *info)
1447 {
1448 struct panfrost_context *ctx = pan_context(pipe);
1449
1450 /* First of all, check the scissor to see if anything is drawn at all.
1451 * If it's not, we drop the draw (mostly a conformance issue;
1452 * well-behaved apps shouldn't hit this) */
1453
1454 if (panfrost_scissor_culls_everything(ctx))
1455 return;
1456
1457 int mode = info->mode;
1458
1459 /* Fallback unsupported restart index */
1460 unsigned primitive_index = (1 << (info->index_size * 8)) - 1;
1461
1462 if (info->primitive_restart && info->index_size
1463 && info->restart_index != primitive_index) {
1464 util_draw_vbo_without_prim_restart(pipe, info);
1465 return;
1466 }
1467
1468 /* Fallback for unsupported modes */
1469
1470 assert(ctx->rasterizer != NULL);
1471
1472 if (!(ctx->draw_modes & (1 << mode))) {
1473 if (mode == PIPE_PRIM_QUADS && info->count == 4 && !ctx->rasterizer->base.flatshade) {
1474 mode = PIPE_PRIM_TRIANGLE_FAN;
1475 } else {
1476 if (info->count < 4) {
1477 /* Degenerate case? */
1478 return;
1479 }
1480
1481 util_primconvert_save_rasterizer_state(ctx->primconvert, &ctx->rasterizer->base);
1482 util_primconvert_draw_vbo(ctx->primconvert, info);
1483 return;
1484 }
1485 }
1486
1487 ctx->payloads[PIPE_SHADER_VERTEX].offset_start = info->start;
1488 ctx->payloads[PIPE_SHADER_FRAGMENT].offset_start = info->start;
1489
1490 /* Now that we have a guaranteed terminating path, find the job.
1491 * Assignment commented out to prevent unused warning */
1492
1493 /* struct panfrost_batch *batch = */ panfrost_get_batch_for_fbo(ctx);
1494
1495 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.draw_mode = g2m_draw_mode(mode);
1496
1497 /* Take into account a negative bias */
1498 ctx->vertex_count = info->count + abs(info->index_bias);
1499 ctx->instance_count = info->instance_count;
1500 ctx->active_prim = info->mode;
1501
1502 /* For non-indexed draws, they're the same */
1503 unsigned vertex_count = ctx->vertex_count;
1504
1505 unsigned draw_flags = 0;
1506
1507 /* The draw flags interpret how primitive size is interpreted */
1508
1509 if (panfrost_writes_point_size(ctx))
1510 draw_flags |= MALI_DRAW_VARYING_SIZE;
1511
1512 if (info->primitive_restart)
1513 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
1514
1515 /* For higher amounts of vertices (greater than what fits in a 16-bit
1516 * short), the other value is needed, otherwise there will be bizarre
1517 * rendering artefacts. It's not clear what these values mean yet. This
1518 * change is also needed for instancing and sometimes points (perhaps
1519 * related to dynamically setting gl_PointSize) */
1520
1521 bool is_points = mode == PIPE_PRIM_POINTS;
1522 bool many_verts = ctx->vertex_count > 0xFFFF;
1523 bool instanced = ctx->instance_count > 1;
1524
1525 draw_flags |= (is_points || many_verts || instanced) ? 0x3000 : 0x18000;
1526
1527 /* This doesn't make much sense */
1528 if (mode == PIPE_PRIM_LINE_STRIP) {
1529 draw_flags |= 0x800;
1530 }
1531
1532 panfrost_statistics_record(ctx, info);
1533
1534 if (info->index_size) {
1535 /* Calculate the min/max index used so we can figure out how
1536 * many times to invoke the vertex shader */
1537
1538 /* Fetch / calculate index bounds */
1539 unsigned min_index = 0, max_index = 0;
1540
1541 if (info->max_index == ~0u) {
1542 u_vbuf_get_minmax_index(pipe, info, &min_index, &max_index);
1543 } else {
1544 min_index = info->min_index;
1545 max_index = info->max_index;
1546 }
1547
1548 /* Use the corresponding values */
1549 vertex_count = max_index - min_index + 1;
1550 ctx->payloads[PIPE_SHADER_VERTEX].offset_start = min_index + info->index_bias;
1551 ctx->payloads[PIPE_SHADER_FRAGMENT].offset_start = min_index + info->index_bias;
1552
1553 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.offset_bias_correction = -min_index;
1554 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.index_count = MALI_POSITIVE(info->count);
1555
1556 //assert(!info->restart_index); /* TODO: Research */
1557
1558 draw_flags |= panfrost_translate_index_size(info->index_size);
1559 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.indices = panfrost_get_index_buffer_mapped(ctx, info);
1560 } else {
1561 /* Index count == vertex count, if no indexing is applied, as
1562 * if it is internally indexed in the expected order */
1563
1564 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.offset_bias_correction = 0;
1565 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.index_count = MALI_POSITIVE(ctx->vertex_count);
1566
1567 /* Reverse index state */
1568 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.indices = (u64) NULL;
1569 }
1570
1571 /* Dispatch "compute jobs" for the vertex/tiler pair as (1,
1572 * vertex_count, 1) */
1573
1574 panfrost_pack_work_groups_fused(
1575 &ctx->payloads[PIPE_SHADER_VERTEX].prefix,
1576 &ctx->payloads[PIPE_SHADER_FRAGMENT].prefix,
1577 1, vertex_count, info->instance_count,
1578 1, 1, 1);
1579
1580 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.unknown_draw = draw_flags;
1581
1582 /* Encode the padded vertex count */
1583
1584 if (info->instance_count > 1) {
1585 /* Triangles have non-even vertex counts so they change how
1586 * padding works internally */
1587
1588 bool is_triangle =
1589 mode == PIPE_PRIM_TRIANGLES ||
1590 mode == PIPE_PRIM_TRIANGLE_STRIP ||
1591 mode == PIPE_PRIM_TRIANGLE_FAN;
1592
1593 struct pan_shift_odd so =
1594 panfrost_padded_vertex_count(vertex_count, !is_triangle);
1595
1596 ctx->payloads[PIPE_SHADER_VERTEX].instance_shift = so.shift;
1597 ctx->payloads[PIPE_SHADER_FRAGMENT].instance_shift = so.shift;
1598
1599 ctx->payloads[PIPE_SHADER_VERTEX].instance_odd = so.odd;
1600 ctx->payloads[PIPE_SHADER_FRAGMENT].instance_odd = so.odd;
1601
1602 ctx->padded_count = pan_expand_shift_odd(so);
1603 } else {
1604 ctx->padded_count = ctx->vertex_count;
1605
1606 /* Reset instancing state */
1607 ctx->payloads[PIPE_SHADER_VERTEX].instance_shift = 0;
1608 ctx->payloads[PIPE_SHADER_VERTEX].instance_odd = 0;
1609 ctx->payloads[PIPE_SHADER_FRAGMENT].instance_shift = 0;
1610 ctx->payloads[PIPE_SHADER_FRAGMENT].instance_odd = 0;
1611 }
1612
1613 /* Fire off the draw itself */
1614 panfrost_queue_draw(ctx);
1615
1616 /* Increment transform feedback offsets */
1617
1618 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1619 unsigned output_count = u_stream_outputs_for_vertices(
1620 ctx->active_prim, ctx->vertex_count);
1621
1622 ctx->streamout.offsets[i] += output_count;
1623 }
1624 }
1625
1626 /* CSO state */
1627
1628 static void
1629 panfrost_generic_cso_delete(struct pipe_context *pctx, void *hwcso)
1630 {
1631 free(hwcso);
1632 }
1633
1634 static void *
1635 panfrost_create_rasterizer_state(
1636 struct pipe_context *pctx,
1637 const struct pipe_rasterizer_state *cso)
1638 {
1639 struct panfrost_rasterizer *so = CALLOC_STRUCT(panfrost_rasterizer);
1640
1641 so->base = *cso;
1642
1643 /* Bitmask, unknown meaning of the start value. 0x105 on 32-bit T6XX */
1644 so->tiler_gl_enables = 0x7;
1645
1646 if (cso->front_ccw)
1647 so->tiler_gl_enables |= MALI_FRONT_CCW_TOP;
1648
1649 if (cso->cull_face & PIPE_FACE_FRONT)
1650 so->tiler_gl_enables |= MALI_CULL_FACE_FRONT;
1651
1652 if (cso->cull_face & PIPE_FACE_BACK)
1653 so->tiler_gl_enables |= MALI_CULL_FACE_BACK;
1654
1655 return so;
1656 }
1657
1658 static void
1659 panfrost_bind_rasterizer_state(
1660 struct pipe_context *pctx,
1661 void *hwcso)
1662 {
1663 struct panfrost_context *ctx = pan_context(pctx);
1664
1665 /* TODO: Why can't rasterizer be NULL ever? Other drivers are fine.. */
1666 if (!hwcso)
1667 return;
1668
1669 ctx->rasterizer = hwcso;
1670 ctx->dirty |= PAN_DIRTY_RASTERIZER;
1671
1672 ctx->fragment_shader_core.depth_units = ctx->rasterizer->base.offset_units;
1673 ctx->fragment_shader_core.depth_factor = ctx->rasterizer->base.offset_scale;
1674
1675 /* Gauranteed with the core GL call, so don't expose ARB_polygon_offset */
1676 assert(ctx->rasterizer->base.offset_clamp == 0.0);
1677
1678 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
1679
1680 SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_DEPTH_RANGE_A, ctx->rasterizer->base.offset_tri);
1681 SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_DEPTH_RANGE_B, ctx->rasterizer->base.offset_tri);
1682
1683 /* Point sprites are emulated */
1684
1685 struct panfrost_shader_state *variant =
1686 ctx->shader[PIPE_SHADER_FRAGMENT] ? &ctx->shader[PIPE_SHADER_FRAGMENT]->variants[ctx->shader[PIPE_SHADER_FRAGMENT]->active_variant] : NULL;
1687
1688 if (ctx->rasterizer->base.sprite_coord_enable || (variant && variant->point_sprite_mask))
1689 ctx->base.bind_fs_state(&ctx->base, ctx->shader[PIPE_SHADER_FRAGMENT]);
1690 }
1691
1692 static void *
1693 panfrost_create_vertex_elements_state(
1694 struct pipe_context *pctx,
1695 unsigned num_elements,
1696 const struct pipe_vertex_element *elements)
1697 {
1698 struct panfrost_vertex_state *so = CALLOC_STRUCT(panfrost_vertex_state);
1699
1700 so->num_elements = num_elements;
1701 memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
1702
1703 for (int i = 0; i < num_elements; ++i) {
1704 so->hw[i].index = i;
1705
1706 enum pipe_format fmt = elements[i].src_format;
1707 const struct util_format_description *desc = util_format_description(fmt);
1708 so->hw[i].unknown1 = 0x2;
1709 so->hw[i].swizzle = panfrost_get_default_swizzle(desc->nr_channels);
1710
1711 so->hw[i].format = panfrost_find_format(desc);
1712
1713 /* The field itself should probably be shifted over */
1714 so->hw[i].src_offset = elements[i].src_offset;
1715 }
1716
1717 return so;
1718 }
1719
1720 static void
1721 panfrost_bind_vertex_elements_state(
1722 struct pipe_context *pctx,
1723 void *hwcso)
1724 {
1725 struct panfrost_context *ctx = pan_context(pctx);
1726
1727 ctx->vertex = hwcso;
1728 ctx->dirty |= PAN_DIRTY_VERTEX;
1729 }
1730
1731 static void *
1732 panfrost_create_shader_state(
1733 struct pipe_context *pctx,
1734 const struct pipe_shader_state *cso)
1735 {
1736 struct panfrost_shader_variants *so = CALLOC_STRUCT(panfrost_shader_variants);
1737 so->base = *cso;
1738
1739 /* Token deep copy to prevent memory corruption */
1740
1741 if (cso->type == PIPE_SHADER_IR_TGSI)
1742 so->base.tokens = tgsi_dup_tokens(so->base.tokens);
1743
1744 return so;
1745 }
1746
1747 static void
1748 panfrost_delete_shader_state(
1749 struct pipe_context *pctx,
1750 void *so)
1751 {
1752 struct panfrost_shader_variants *cso = (struct panfrost_shader_variants *) so;
1753
1754 if (cso->base.type == PIPE_SHADER_IR_TGSI) {
1755 DBG("Deleting TGSI shader leaks duplicated tokens\n");
1756 }
1757
1758 for (unsigned i = 0; i < cso->variant_count; ++i) {
1759 struct panfrost_shader_state *shader_state = &cso->variants[i];
1760 panfrost_bo_unreference(shader_state->bo);
1761 shader_state->bo = NULL;
1762 }
1763
1764 free(so);
1765 }
1766
1767 static void *
1768 panfrost_create_sampler_state(
1769 struct pipe_context *pctx,
1770 const struct pipe_sampler_state *cso)
1771 {
1772 struct panfrost_sampler_state *so = CALLOC_STRUCT(panfrost_sampler_state);
1773 so->base = *cso;
1774
1775 /* sampler_state corresponds to mali_sampler_descriptor, which we can generate entirely here */
1776
1777 bool min_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
1778 bool mag_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
1779 bool mip_linear = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR;
1780
1781 unsigned min_filter = min_nearest ? MALI_SAMP_MIN_NEAREST : 0;
1782 unsigned mag_filter = mag_nearest ? MALI_SAMP_MAG_NEAREST : 0;
1783 unsigned mip_filter = mip_linear ?
1784 (MALI_SAMP_MIP_LINEAR_1 | MALI_SAMP_MIP_LINEAR_2) : 0;
1785 unsigned normalized = cso->normalized_coords ? MALI_SAMP_NORM_COORDS : 0;
1786
1787 struct mali_sampler_descriptor sampler_descriptor = {
1788 .filter_mode = min_filter | mag_filter | mip_filter | normalized,
1789 .wrap_s = translate_tex_wrap(cso->wrap_s),
1790 .wrap_t = translate_tex_wrap(cso->wrap_t),
1791 .wrap_r = translate_tex_wrap(cso->wrap_r),
1792 .compare_func = panfrost_translate_alt_compare_func(cso->compare_func),
1793 .border_color = {
1794 cso->border_color.f[0],
1795 cso->border_color.f[1],
1796 cso->border_color.f[2],
1797 cso->border_color.f[3]
1798 },
1799 .min_lod = FIXED_16(cso->min_lod),
1800 .max_lod = FIXED_16(cso->max_lod),
1801 .seamless_cube_map = cso->seamless_cube_map,
1802 };
1803
1804 /* If necessary, we disable mipmapping in the sampler descriptor by
1805 * clamping the LOD as tight as possible (from 0 to epsilon,
1806 * essentially -- remember these are fixed point numbers, so
1807 * epsilon=1/256) */
1808
1809 if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1810 sampler_descriptor.max_lod = sampler_descriptor.min_lod;
1811
1812 /* Enforce that there is something in the middle by adding epsilon*/
1813
1814 if (sampler_descriptor.min_lod == sampler_descriptor.max_lod)
1815 sampler_descriptor.max_lod++;
1816
1817 /* Sanity check */
1818 assert(sampler_descriptor.max_lod > sampler_descriptor.min_lod);
1819
1820 so->hw = sampler_descriptor;
1821
1822 return so;
1823 }
1824
1825 static void
1826 panfrost_bind_sampler_states(
1827 struct pipe_context *pctx,
1828 enum pipe_shader_type shader,
1829 unsigned start_slot, unsigned num_sampler,
1830 void **sampler)
1831 {
1832 assert(start_slot == 0);
1833
1834 struct panfrost_context *ctx = pan_context(pctx);
1835
1836 /* XXX: Should upload, not just copy? */
1837 ctx->sampler_count[shader] = num_sampler;
1838 memcpy(ctx->samplers[shader], sampler, num_sampler * sizeof (void *));
1839
1840 ctx->dirty |= PAN_DIRTY_SAMPLERS;
1841 }
1842
1843 static bool
1844 panfrost_variant_matches(
1845 struct panfrost_context *ctx,
1846 struct panfrost_shader_state *variant,
1847 enum pipe_shader_type type)
1848 {
1849 struct pipe_rasterizer_state *rasterizer = &ctx->rasterizer->base;
1850 struct pipe_alpha_state *alpha = &ctx->depth_stencil->alpha;
1851
1852 bool is_fragment = (type == PIPE_SHADER_FRAGMENT);
1853
1854 if (is_fragment && (alpha->enabled || variant->alpha_state.enabled)) {
1855 /* Make sure enable state is at least the same */
1856 if (alpha->enabled != variant->alpha_state.enabled) {
1857 return false;
1858 }
1859
1860 /* Check that the contents of the test are the same */
1861 bool same_func = alpha->func == variant->alpha_state.func;
1862 bool same_ref = alpha->ref_value == variant->alpha_state.ref_value;
1863
1864 if (!(same_func && same_ref)) {
1865 return false;
1866 }
1867 }
1868
1869 if (is_fragment && rasterizer && (rasterizer->sprite_coord_enable |
1870 variant->point_sprite_mask)) {
1871 /* Ensure the same varyings are turned to point sprites */
1872 if (rasterizer->sprite_coord_enable != variant->point_sprite_mask)
1873 return false;
1874
1875 /* Ensure the orientation is correct */
1876 bool upper_left =
1877 rasterizer->sprite_coord_mode ==
1878 PIPE_SPRITE_COORD_UPPER_LEFT;
1879
1880 if (variant->point_sprite_upper_left != upper_left)
1881 return false;
1882 }
1883
1884 /* Otherwise, we're good to go */
1885 return true;
1886 }
1887
1888 /**
1889 * Fix an uncompiled shader's stream output info, and produce a bitmask
1890 * of which VARYING_SLOT_* are captured for stream output.
1891 *
1892 * Core Gallium stores output->register_index as a "slot" number, where
1893 * slots are assigned consecutively to all outputs in info->outputs_written.
1894 * This naive packing of outputs doesn't work for us - we too have slots,
1895 * but the layout is defined by the VUE map, which we won't have until we
1896 * compile a specific shader variant. So, we remap these and simply store
1897 * VARYING_SLOT_* in our copy's output->register_index fields.
1898 *
1899 * We then produce a bitmask of outputs which are used for SO.
1900 *
1901 * Implementation from iris.
1902 */
1903
1904 static uint64_t
1905 update_so_info(struct pipe_stream_output_info *so_info,
1906 uint64_t outputs_written)
1907 {
1908 uint64_t so_outputs = 0;
1909 uint8_t reverse_map[64] = {};
1910 unsigned slot = 0;
1911
1912 while (outputs_written)
1913 reverse_map[slot++] = u_bit_scan64(&outputs_written);
1914
1915 for (unsigned i = 0; i < so_info->num_outputs; i++) {
1916 struct pipe_stream_output *output = &so_info->output[i];
1917
1918 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
1919 output->register_index = reverse_map[output->register_index];
1920
1921 so_outputs |= 1ull << output->register_index;
1922 }
1923
1924 return so_outputs;
1925 }
1926
1927 static void
1928 panfrost_bind_shader_state(
1929 struct pipe_context *pctx,
1930 void *hwcso,
1931 enum pipe_shader_type type)
1932 {
1933 struct panfrost_context *ctx = pan_context(pctx);
1934
1935 ctx->shader[type] = hwcso;
1936
1937 if (type == PIPE_SHADER_FRAGMENT)
1938 ctx->dirty |= PAN_DIRTY_FS;
1939 else
1940 ctx->dirty |= PAN_DIRTY_VS;
1941
1942 if (!hwcso) return;
1943
1944 /* Match the appropriate variant */
1945
1946 signed variant = -1;
1947 struct panfrost_shader_variants *variants = (struct panfrost_shader_variants *) hwcso;
1948
1949 for (unsigned i = 0; i < variants->variant_count; ++i) {
1950 if (panfrost_variant_matches(ctx, &variants->variants[i], type)) {
1951 variant = i;
1952 break;
1953 }
1954 }
1955
1956 if (variant == -1) {
1957 /* No variant matched, so create a new one */
1958 variant = variants->variant_count++;
1959 assert(variants->variant_count < MAX_SHADER_VARIANTS);
1960
1961 struct panfrost_shader_state *v =
1962 &variants->variants[variant];
1963
1964 if (type == PIPE_SHADER_FRAGMENT) {
1965 v->alpha_state = ctx->depth_stencil->alpha;
1966
1967 if (ctx->rasterizer) {
1968 v->point_sprite_mask = ctx->rasterizer->base.sprite_coord_enable;
1969 v->point_sprite_upper_left =
1970 ctx->rasterizer->base.sprite_coord_mode ==
1971 PIPE_SPRITE_COORD_UPPER_LEFT;
1972 }
1973 }
1974
1975 variants->variants[variant].tripipe = calloc(1, sizeof(struct mali_shader_meta));
1976
1977 }
1978
1979 /* Select this variant */
1980 variants->active_variant = variant;
1981
1982 struct panfrost_shader_state *shader_state = &variants->variants[variant];
1983 assert(panfrost_variant_matches(ctx, shader_state, type));
1984
1985 /* We finally have a variant, so compile it */
1986
1987 if (!shader_state->compiled) {
1988 uint64_t outputs_written = 0;
1989
1990 panfrost_shader_compile(ctx, shader_state->tripipe,
1991 variants->base.type,
1992 variants->base.type == PIPE_SHADER_IR_NIR ?
1993 variants->base.ir.nir :
1994 variants->base.tokens,
1995 tgsi_processor_to_shader_stage(type), shader_state,
1996 &outputs_written);
1997
1998 shader_state->compiled = true;
1999
2000 /* Fixup the stream out information, since what Gallium returns
2001 * normally is mildly insane */
2002
2003 shader_state->stream_output = variants->base.stream_output;
2004 shader_state->so_mask =
2005 update_so_info(&shader_state->stream_output, outputs_written);
2006 }
2007 }
2008
2009 static void
2010 panfrost_bind_vs_state(struct pipe_context *pctx, void *hwcso)
2011 {
2012 panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_VERTEX);
2013 }
2014
2015 static void
2016 panfrost_bind_fs_state(struct pipe_context *pctx, void *hwcso)
2017 {
2018 panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_FRAGMENT);
2019 }
2020
2021 static void
2022 panfrost_set_vertex_buffers(
2023 struct pipe_context *pctx,
2024 unsigned start_slot,
2025 unsigned num_buffers,
2026 const struct pipe_vertex_buffer *buffers)
2027 {
2028 struct panfrost_context *ctx = pan_context(pctx);
2029
2030 util_set_vertex_buffers_mask(ctx->vertex_buffers, &ctx->vb_mask, buffers, start_slot, num_buffers);
2031 }
2032
2033 static void
2034 panfrost_set_constant_buffer(
2035 struct pipe_context *pctx,
2036 enum pipe_shader_type shader, uint index,
2037 const struct pipe_constant_buffer *buf)
2038 {
2039 struct panfrost_context *ctx = pan_context(pctx);
2040 struct panfrost_constant_buffer *pbuf = &ctx->constant_buffer[shader];
2041
2042 util_copy_constant_buffer(&pbuf->cb[index], buf);
2043
2044 unsigned mask = (1 << index);
2045
2046 if (unlikely(!buf)) {
2047 pbuf->enabled_mask &= ~mask;
2048 pbuf->dirty_mask &= ~mask;
2049 return;
2050 }
2051
2052 pbuf->enabled_mask |= mask;
2053 pbuf->dirty_mask |= mask;
2054 }
2055
2056 static void
2057 panfrost_set_stencil_ref(
2058 struct pipe_context *pctx,
2059 const struct pipe_stencil_ref *ref)
2060 {
2061 struct panfrost_context *ctx = pan_context(pctx);
2062 ctx->stencil_ref = *ref;
2063
2064 /* Shader core dirty */
2065 ctx->dirty |= PAN_DIRTY_FS;
2066 }
2067
2068 static enum mali_texture_type
2069 panfrost_translate_texture_type(enum pipe_texture_target t) {
2070 switch (t)
2071 {
2072 case PIPE_BUFFER:
2073 case PIPE_TEXTURE_1D:
2074 case PIPE_TEXTURE_1D_ARRAY:
2075 return MALI_TEX_1D;
2076
2077 case PIPE_TEXTURE_2D:
2078 case PIPE_TEXTURE_2D_ARRAY:
2079 case PIPE_TEXTURE_RECT:
2080 return MALI_TEX_2D;
2081
2082 case PIPE_TEXTURE_3D:
2083 return MALI_TEX_3D;
2084
2085 case PIPE_TEXTURE_CUBE:
2086 case PIPE_TEXTURE_CUBE_ARRAY:
2087 return MALI_TEX_CUBE;
2088
2089 default:
2090 unreachable("Unknown target");
2091 }
2092 }
2093
2094 static struct pipe_sampler_view *
2095 panfrost_create_sampler_view(
2096 struct pipe_context *pctx,
2097 struct pipe_resource *texture,
2098 const struct pipe_sampler_view *template)
2099 {
2100 struct panfrost_sampler_view *so = rzalloc(pctx, struct panfrost_sampler_view);
2101 int bytes_per_pixel = util_format_get_blocksize(texture->format);
2102
2103 pipe_reference(NULL, &texture->reference);
2104
2105 struct panfrost_resource *prsrc = (struct panfrost_resource *) texture;
2106 assert(prsrc->bo);
2107
2108 so->base = *template;
2109 so->base.texture = texture;
2110 so->base.reference.count = 1;
2111 so->base.context = pctx;
2112
2113 /* sampler_views correspond to texture descriptors, minus the texture
2114 * (data) itself. So, we serialise the descriptor here and cache it for
2115 * later. */
2116
2117 const struct util_format_description *desc = util_format_description(prsrc->base.format);
2118
2119 unsigned char user_swizzle[4] = {
2120 template->swizzle_r,
2121 template->swizzle_g,
2122 template->swizzle_b,
2123 template->swizzle_a
2124 };
2125
2126 enum mali_format format = panfrost_find_format(desc);
2127
2128 /* Check if we need to set a custom stride by computing the "expected"
2129 * stride and comparing it to what the BO actually wants. Only applies
2130 * to linear textures, since tiled/compressed textures have strict
2131 * alignment requirements for their strides as it is */
2132
2133 unsigned first_level = template->u.tex.first_level;
2134 unsigned last_level = template->u.tex.last_level;
2135
2136 if (prsrc->layout == PAN_LINEAR) {
2137 for (unsigned l = first_level; l <= last_level; ++l) {
2138 unsigned actual_stride = prsrc->slices[l].stride;
2139 unsigned width = u_minify(texture->width0, l);
2140 unsigned comp_stride = width * bytes_per_pixel;
2141
2142 if (comp_stride != actual_stride) {
2143 so->manual_stride = true;
2144 break;
2145 }
2146 }
2147 }
2148
2149 /* In the hardware, array_size refers specifically to array textures,
2150 * whereas in Gallium, it also covers cubemaps */
2151
2152 unsigned array_size = texture->array_size;
2153
2154 if (template->target == PIPE_TEXTURE_CUBE) {
2155 /* TODO: Cubemap arrays */
2156 assert(array_size == 6);
2157 array_size /= 6;
2158 }
2159
2160 struct mali_texture_descriptor texture_descriptor = {
2161 .width = MALI_POSITIVE(u_minify(texture->width0, first_level)),
2162 .height = MALI_POSITIVE(u_minify(texture->height0, first_level)),
2163 .depth = MALI_POSITIVE(u_minify(texture->depth0, first_level)),
2164 .array_size = MALI_POSITIVE(array_size),
2165
2166 .format = {
2167 .swizzle = panfrost_translate_swizzle_4(desc->swizzle),
2168 .format = format,
2169 .srgb = desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB,
2170 .type = panfrost_translate_texture_type(template->target),
2171 .unknown2 = 0x1,
2172 },
2173
2174 .swizzle = panfrost_translate_swizzle_4(user_swizzle)
2175 };
2176
2177 texture_descriptor.levels = last_level - first_level;
2178
2179 so->hw = texture_descriptor;
2180
2181 return (struct pipe_sampler_view *) so;
2182 }
2183
2184 static void
2185 panfrost_set_sampler_views(
2186 struct pipe_context *pctx,
2187 enum pipe_shader_type shader,
2188 unsigned start_slot, unsigned num_views,
2189 struct pipe_sampler_view **views)
2190 {
2191 struct panfrost_context *ctx = pan_context(pctx);
2192
2193 assert(start_slot == 0);
2194
2195 unsigned new_nr = 0;
2196 for (unsigned i = 0; i < num_views; ++i) {
2197 if (views[i])
2198 new_nr = i + 1;
2199 }
2200
2201 ctx->sampler_view_count[shader] = new_nr;
2202 memcpy(ctx->sampler_views[shader], views, num_views * sizeof (void *));
2203
2204 ctx->dirty |= PAN_DIRTY_TEXTURES;
2205 }
2206
2207 static void
2208 panfrost_sampler_view_destroy(
2209 struct pipe_context *pctx,
2210 struct pipe_sampler_view *view)
2211 {
2212 pipe_resource_reference(&view->texture, NULL);
2213 ralloc_free(view);
2214 }
2215
2216 static void
2217 panfrost_set_shader_buffers(
2218 struct pipe_context *pctx,
2219 enum pipe_shader_type shader,
2220 unsigned start, unsigned count,
2221 const struct pipe_shader_buffer *buffers,
2222 unsigned writable_bitmask)
2223 {
2224 struct panfrost_context *ctx = pan_context(pctx);
2225
2226 util_set_shader_buffers_mask(ctx->ssbo[shader], &ctx->ssbo_mask[shader],
2227 buffers, start, count);
2228 }
2229
2230 /* Hints that a framebuffer should use AFBC where possible */
2231
2232 static void
2233 panfrost_hint_afbc(
2234 struct panfrost_screen *screen,
2235 const struct pipe_framebuffer_state *fb)
2236 {
2237 /* AFBC implemenation incomplete; hide it */
2238 if (!(pan_debug & PAN_DBG_AFBC)) return;
2239
2240 /* Hint AFBC to the resources bound to each color buffer */
2241
2242 for (unsigned i = 0; i < fb->nr_cbufs; ++i) {
2243 struct pipe_surface *surf = fb->cbufs[i];
2244 struct panfrost_resource *rsrc = pan_resource(surf->texture);
2245 panfrost_resource_hint_layout(screen, rsrc, PAN_AFBC, 1);
2246 }
2247
2248 /* Also hint it to the depth buffer */
2249
2250 if (fb->zsbuf) {
2251 struct panfrost_resource *rsrc = pan_resource(fb->zsbuf->texture);
2252 panfrost_resource_hint_layout(screen, rsrc, PAN_AFBC, 1);
2253 }
2254 }
2255
2256 static void
2257 panfrost_set_framebuffer_state(struct pipe_context *pctx,
2258 const struct pipe_framebuffer_state *fb)
2259 {
2260 struct panfrost_context *ctx = pan_context(pctx);
2261
2262 /* Flush when switching framebuffers, but not if the framebuffer
2263 * state is being restored by u_blitter
2264 */
2265
2266 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
2267 bool is_scanout = panfrost_batch_is_scanout(batch);
2268 bool has_draws = batch->last_job.gpu;
2269
2270 /* Bail out early when the current and new states are the same. */
2271 if (util_framebuffer_state_equal(&ctx->pipe_framebuffer, fb))
2272 return;
2273
2274 /* The wallpaper logic sets a new FB state before doing the blit and
2275 * restore the old one when it's done. Those FB states are reported to
2276 * be different because the surface they are pointing to are different,
2277 * but those surfaces actually point to the same cbufs/zbufs. In that
2278 * case we definitely don't want new FB descs to be emitted/attached
2279 * since the job is expected to be flushed just after the blit is done,
2280 * so let's just copy the new state and return here.
2281 */
2282 if (ctx->wallpaper_batch) {
2283 util_copy_framebuffer_state(&ctx->pipe_framebuffer, fb);
2284 return;
2285 }
2286
2287 if (!is_scanout || has_draws)
2288 panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
2289 else
2290 assert(!ctx->payloads[PIPE_SHADER_VERTEX].postfix.framebuffer &&
2291 !ctx->payloads[PIPE_SHADER_FRAGMENT].postfix.framebuffer);
2292
2293 /* Invalidate the FBO job cache since we've just been assigned a new
2294 * FB state.
2295 */
2296 ctx->batch = NULL;
2297
2298 util_copy_framebuffer_state(&ctx->pipe_framebuffer, fb);
2299
2300 /* Given that we're rendering, we'd love to have compression */
2301 struct panfrost_screen *screen = pan_screen(ctx->base.screen);
2302
2303 panfrost_hint_afbc(screen, &ctx->pipe_framebuffer);
2304 for (unsigned i = 0; i < PIPE_SHADER_TYPES; ++i)
2305 ctx->payloads[i].postfix.framebuffer = 0;
2306 }
2307
2308 static void *
2309 panfrost_create_depth_stencil_state(struct pipe_context *pipe,
2310 const struct pipe_depth_stencil_alpha_state *depth_stencil)
2311 {
2312 return mem_dup(depth_stencil, sizeof(*depth_stencil));
2313 }
2314
2315 static void
2316 panfrost_bind_depth_stencil_state(struct pipe_context *pipe,
2317 void *cso)
2318 {
2319 struct panfrost_context *ctx = pan_context(pipe);
2320 struct pipe_depth_stencil_alpha_state *depth_stencil = cso;
2321 ctx->depth_stencil = depth_stencil;
2322
2323 if (!depth_stencil)
2324 return;
2325
2326 /* Alpha does not exist in the hardware (it's not in ES3), so it's
2327 * emulated in the fragment shader */
2328
2329 if (depth_stencil->alpha.enabled) {
2330 /* We need to trigger a new shader (maybe) */
2331 ctx->base.bind_fs_state(&ctx->base, ctx->shader[PIPE_SHADER_FRAGMENT]);
2332 }
2333
2334 /* Stencil state */
2335 SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_STENCIL_TEST, depth_stencil->stencil[0].enabled);
2336
2337 panfrost_make_stencil_state(&depth_stencil->stencil[0], &ctx->fragment_shader_core.stencil_front);
2338 ctx->fragment_shader_core.stencil_mask_front = depth_stencil->stencil[0].writemask;
2339
2340 /* If back-stencil is not enabled, use the front values */
2341 bool back_enab = ctx->depth_stencil->stencil[1].enabled;
2342 unsigned back_index = back_enab ? 1 : 0;
2343
2344 panfrost_make_stencil_state(&depth_stencil->stencil[back_index], &ctx->fragment_shader_core.stencil_back);
2345 ctx->fragment_shader_core.stencil_mask_back = depth_stencil->stencil[back_index].writemask;
2346
2347 /* Depth state (TODO: Refactor) */
2348 SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_DEPTH_TEST, depth_stencil->depth.enabled);
2349
2350 int func = depth_stencil->depth.enabled ? depth_stencil->depth.func : PIPE_FUNC_ALWAYS;
2351
2352 ctx->fragment_shader_core.unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
2353 ctx->fragment_shader_core.unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(func));
2354
2355 /* Bounds test not implemented */
2356 assert(!depth_stencil->depth.bounds_test);
2357
2358 ctx->dirty |= PAN_DIRTY_FS;
2359 }
2360
2361 static void
2362 panfrost_delete_depth_stencil_state(struct pipe_context *pipe, void *depth)
2363 {
2364 free( depth );
2365 }
2366
2367 static void
2368 panfrost_set_sample_mask(struct pipe_context *pipe,
2369 unsigned sample_mask)
2370 {
2371 }
2372
2373 static void
2374 panfrost_set_clip_state(struct pipe_context *pipe,
2375 const struct pipe_clip_state *clip)
2376 {
2377 //struct panfrost_context *panfrost = pan_context(pipe);
2378 }
2379
2380 static void
2381 panfrost_set_viewport_states(struct pipe_context *pipe,
2382 unsigned start_slot,
2383 unsigned num_viewports,
2384 const struct pipe_viewport_state *viewports)
2385 {
2386 struct panfrost_context *ctx = pan_context(pipe);
2387
2388 assert(start_slot == 0);
2389 assert(num_viewports == 1);
2390
2391 ctx->pipe_viewport = *viewports;
2392 }
2393
2394 static void
2395 panfrost_set_scissor_states(struct pipe_context *pipe,
2396 unsigned start_slot,
2397 unsigned num_scissors,
2398 const struct pipe_scissor_state *scissors)
2399 {
2400 struct panfrost_context *ctx = pan_context(pipe);
2401
2402 assert(start_slot == 0);
2403 assert(num_scissors == 1);
2404
2405 ctx->scissor = *scissors;
2406 }
2407
2408 static void
2409 panfrost_set_polygon_stipple(struct pipe_context *pipe,
2410 const struct pipe_poly_stipple *stipple)
2411 {
2412 //struct panfrost_context *panfrost = pan_context(pipe);
2413 }
2414
2415 static void
2416 panfrost_set_active_query_state(struct pipe_context *pipe,
2417 bool enable)
2418 {
2419 struct panfrost_context *ctx = pan_context(pipe);
2420 ctx->active_queries = enable;
2421 }
2422
2423 static void
2424 panfrost_destroy(struct pipe_context *pipe)
2425 {
2426 struct panfrost_context *panfrost = pan_context(pipe);
2427
2428 if (panfrost->blitter)
2429 util_blitter_destroy(panfrost->blitter);
2430
2431 if (panfrost->blitter_wallpaper)
2432 util_blitter_destroy(panfrost->blitter_wallpaper);
2433
2434 ralloc_free(pipe);
2435 }
2436
2437 static struct pipe_query *
2438 panfrost_create_query(struct pipe_context *pipe,
2439 unsigned type,
2440 unsigned index)
2441 {
2442 struct panfrost_query *q = rzalloc(pipe, struct panfrost_query);
2443
2444 q->type = type;
2445 q->index = index;
2446
2447 return (struct pipe_query *) q;
2448 }
2449
2450 static void
2451 panfrost_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
2452 {
2453 ralloc_free(q);
2454 }
2455
2456 static bool
2457 panfrost_begin_query(struct pipe_context *pipe, struct pipe_query *q)
2458 {
2459 struct panfrost_context *ctx = pan_context(pipe);
2460 struct panfrost_query *query = (struct panfrost_query *) q;
2461 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
2462
2463 switch (query->type) {
2464 case PIPE_QUERY_OCCLUSION_COUNTER:
2465 case PIPE_QUERY_OCCLUSION_PREDICATE:
2466 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
2467 /* Allocate a word for the query results to be stored */
2468 query->transfer = panfrost_allocate_transient(batch, sizeof(unsigned));
2469 ctx->occlusion_query = query;
2470 break;
2471
2472 /* Geometry statistics are computed in the driver. XXX: geom/tess
2473 * shaders.. */
2474
2475 case PIPE_QUERY_PRIMITIVES_GENERATED:
2476 query->start = ctx->prims_generated;
2477 break;
2478 case PIPE_QUERY_PRIMITIVES_EMITTED:
2479 query->start = ctx->tf_prims_generated;
2480 break;
2481
2482 default:
2483 fprintf(stderr, "Skipping query %u\n", query->type);
2484 break;
2485 }
2486
2487 return true;
2488 }
2489
2490 static bool
2491 panfrost_end_query(struct pipe_context *pipe, struct pipe_query *q)
2492 {
2493 struct panfrost_context *ctx = pan_context(pipe);
2494 struct panfrost_query *query = (struct panfrost_query *) q;
2495
2496 switch (query->type) {
2497 case PIPE_QUERY_OCCLUSION_COUNTER:
2498 case PIPE_QUERY_OCCLUSION_PREDICATE:
2499 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
2500 ctx->occlusion_query = NULL;
2501 break;
2502 case PIPE_QUERY_PRIMITIVES_GENERATED:
2503 query->end = ctx->prims_generated;
2504 break;
2505 case PIPE_QUERY_PRIMITIVES_EMITTED:
2506 query->end = ctx->tf_prims_generated;
2507 break;
2508 }
2509
2510 return true;
2511 }
2512
2513 static bool
2514 panfrost_get_query_result(struct pipe_context *pipe,
2515 struct pipe_query *q,
2516 bool wait,
2517 union pipe_query_result *vresult)
2518 {
2519 struct panfrost_query *query = (struct panfrost_query *) q;
2520
2521
2522 switch (query->type) {
2523 case PIPE_QUERY_OCCLUSION_COUNTER:
2524 case PIPE_QUERY_OCCLUSION_PREDICATE:
2525 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
2526 /* Flush first */
2527 panfrost_flush(pipe, NULL, PIPE_FLUSH_END_OF_FRAME);
2528
2529 /* Read back the query results */
2530 unsigned *result = (unsigned *) query->transfer.cpu;
2531 unsigned passed = *result;
2532
2533 if (query->type == PIPE_QUERY_OCCLUSION_COUNTER) {
2534 vresult->u64 = passed;
2535 } else {
2536 vresult->b = !!passed;
2537 }
2538
2539 break;
2540
2541 case PIPE_QUERY_PRIMITIVES_GENERATED:
2542 case PIPE_QUERY_PRIMITIVES_EMITTED:
2543 panfrost_flush(pipe, NULL, PIPE_FLUSH_END_OF_FRAME);
2544 vresult->u64 = query->end - query->start;
2545 break;
2546
2547 default:
2548 DBG("Skipped query get %u\n", query->type);
2549 break;
2550 }
2551
2552 return true;
2553 }
2554
2555 static struct pipe_stream_output_target *
2556 panfrost_create_stream_output_target(struct pipe_context *pctx,
2557 struct pipe_resource *prsc,
2558 unsigned buffer_offset,
2559 unsigned buffer_size)
2560 {
2561 struct pipe_stream_output_target *target;
2562
2563 target = rzalloc(pctx, struct pipe_stream_output_target);
2564
2565 if (!target)
2566 return NULL;
2567
2568 pipe_reference_init(&target->reference, 1);
2569 pipe_resource_reference(&target->buffer, prsc);
2570
2571 target->context = pctx;
2572 target->buffer_offset = buffer_offset;
2573 target->buffer_size = buffer_size;
2574
2575 return target;
2576 }
2577
2578 static void
2579 panfrost_stream_output_target_destroy(struct pipe_context *pctx,
2580 struct pipe_stream_output_target *target)
2581 {
2582 pipe_resource_reference(&target->buffer, NULL);
2583 ralloc_free(target);
2584 }
2585
2586 static void
2587 panfrost_set_stream_output_targets(struct pipe_context *pctx,
2588 unsigned num_targets,
2589 struct pipe_stream_output_target **targets,
2590 const unsigned *offsets)
2591 {
2592 struct panfrost_context *ctx = pan_context(pctx);
2593 struct panfrost_streamout *so = &ctx->streamout;
2594
2595 assert(num_targets <= ARRAY_SIZE(so->targets));
2596
2597 for (unsigned i = 0; i < num_targets; i++) {
2598 if (offsets[i] != -1)
2599 so->offsets[i] = offsets[i];
2600
2601 pipe_so_target_reference(&so->targets[i], targets[i]);
2602 }
2603
2604 for (unsigned i = 0; i < so->num_targets; i++)
2605 pipe_so_target_reference(&so->targets[i], NULL);
2606
2607 so->num_targets = num_targets;
2608 }
2609
2610 struct pipe_context *
2611 panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
2612 {
2613 struct panfrost_context *ctx = rzalloc(screen, struct panfrost_context);
2614 struct panfrost_screen *pscreen = pan_screen(screen);
2615 struct pipe_context *gallium = (struct pipe_context *) ctx;
2616
2617 ctx->is_t6xx = pscreen->gpu_id < 0x0700; /* Literally, "earlier than T700" */
2618
2619 gallium->screen = screen;
2620
2621 gallium->destroy = panfrost_destroy;
2622
2623 gallium->set_framebuffer_state = panfrost_set_framebuffer_state;
2624
2625 gallium->flush = panfrost_flush;
2626 gallium->clear = panfrost_clear;
2627 gallium->draw_vbo = panfrost_draw_vbo;
2628
2629 gallium->set_vertex_buffers = panfrost_set_vertex_buffers;
2630 gallium->set_constant_buffer = panfrost_set_constant_buffer;
2631 gallium->set_shader_buffers = panfrost_set_shader_buffers;
2632
2633 gallium->set_stencil_ref = panfrost_set_stencil_ref;
2634
2635 gallium->create_sampler_view = panfrost_create_sampler_view;
2636 gallium->set_sampler_views = panfrost_set_sampler_views;
2637 gallium->sampler_view_destroy = panfrost_sampler_view_destroy;
2638
2639 gallium->create_rasterizer_state = panfrost_create_rasterizer_state;
2640 gallium->bind_rasterizer_state = panfrost_bind_rasterizer_state;
2641 gallium->delete_rasterizer_state = panfrost_generic_cso_delete;
2642
2643 gallium->create_vertex_elements_state = panfrost_create_vertex_elements_state;
2644 gallium->bind_vertex_elements_state = panfrost_bind_vertex_elements_state;
2645 gallium->delete_vertex_elements_state = panfrost_generic_cso_delete;
2646
2647 gallium->create_fs_state = panfrost_create_shader_state;
2648 gallium->delete_fs_state = panfrost_delete_shader_state;
2649 gallium->bind_fs_state = panfrost_bind_fs_state;
2650
2651 gallium->create_vs_state = panfrost_create_shader_state;
2652 gallium->delete_vs_state = panfrost_delete_shader_state;
2653 gallium->bind_vs_state = panfrost_bind_vs_state;
2654
2655 gallium->create_sampler_state = panfrost_create_sampler_state;
2656 gallium->delete_sampler_state = panfrost_generic_cso_delete;
2657 gallium->bind_sampler_states = panfrost_bind_sampler_states;
2658
2659 gallium->create_depth_stencil_alpha_state = panfrost_create_depth_stencil_state;
2660 gallium->bind_depth_stencil_alpha_state = panfrost_bind_depth_stencil_state;
2661 gallium->delete_depth_stencil_alpha_state = panfrost_delete_depth_stencil_state;
2662
2663 gallium->set_sample_mask = panfrost_set_sample_mask;
2664
2665 gallium->set_clip_state = panfrost_set_clip_state;
2666 gallium->set_viewport_states = panfrost_set_viewport_states;
2667 gallium->set_scissor_states = panfrost_set_scissor_states;
2668 gallium->set_polygon_stipple = panfrost_set_polygon_stipple;
2669 gallium->set_active_query_state = panfrost_set_active_query_state;
2670
2671 gallium->create_query = panfrost_create_query;
2672 gallium->destroy_query = panfrost_destroy_query;
2673 gallium->begin_query = panfrost_begin_query;
2674 gallium->end_query = panfrost_end_query;
2675 gallium->get_query_result = panfrost_get_query_result;
2676
2677 gallium->create_stream_output_target = panfrost_create_stream_output_target;
2678 gallium->stream_output_target_destroy = panfrost_stream_output_target_destroy;
2679 gallium->set_stream_output_targets = panfrost_set_stream_output_targets;
2680
2681 panfrost_resource_context_init(gallium);
2682 panfrost_blend_context_init(gallium);
2683 panfrost_compute_context_init(gallium);
2684
2685 ASSERTED int ret;
2686
2687 ret = drmSyncobjCreate(pscreen->fd, DRM_SYNCOBJ_CREATE_SIGNALED,
2688 &ctx->out_sync);
2689 assert(!ret);
2690
2691 /* XXX: leaks */
2692 gallium->stream_uploader = u_upload_create_default(gallium);
2693 gallium->const_uploader = gallium->stream_uploader;
2694 assert(gallium->stream_uploader);
2695
2696 /* Midgard supports ES modes, plus QUADS/QUAD_STRIPS/POLYGON */
2697 ctx->draw_modes = (1 << (PIPE_PRIM_POLYGON + 1)) - 1;
2698
2699 ctx->primconvert = util_primconvert_create(gallium, ctx->draw_modes);
2700
2701 ctx->blitter = util_blitter_create(gallium);
2702 ctx->blitter_wallpaper = util_blitter_create(gallium);
2703
2704 assert(ctx->blitter);
2705 assert(ctx->blitter_wallpaper);
2706
2707 /* Prepare for render! */
2708
2709 panfrost_batch_init(ctx);
2710 panfrost_emit_vertex_payload(ctx);
2711 panfrost_emit_tiler_payload(ctx);
2712 panfrost_invalidate_frame(ctx);
2713 panfrost_default_shader_backend(ctx);
2714
2715 return gallium;
2716 }