panfrost: Split stack_shift nibble from unk0
[mesa.git] / src / gallium / drivers / panfrost / pan_context.c
1 /*
2 * © Copyright 2018 Alyssa Rosenzweig
3 * Copyright © 2014-2017 Broadcom
4 * Copyright (C) 2017 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 */
26
27 #include <sys/poll.h>
28 #include <errno.h>
29
30 #include "pan_bo.h"
31 #include "pan_context.h"
32 #include "pan_format.h"
33 #include "panfrost-quirks.h"
34
35 #include "util/macros.h"
36 #include "util/format/u_format.h"
37 #include "util/u_inlines.h"
38 #include "util/u_upload_mgr.h"
39 #include "util/u_memory.h"
40 #include "util/u_vbuf.h"
41 #include "util/half_float.h"
42 #include "util/u_helpers.h"
43 #include "util/format/u_format.h"
44 #include "util/u_prim.h"
45 #include "util/u_prim_restart.h"
46 #include "indices/u_primconvert.h"
47 #include "tgsi/tgsi_parse.h"
48 #include "tgsi/tgsi_from_mesa.h"
49 #include "util/u_math.h"
50
51 #include "pan_screen.h"
52 #include "pan_blending.h"
53 #include "pan_blend_shaders.h"
54 #include "pan_util.h"
55
56 /* Framebuffer descriptor */
57
58 static struct midgard_tiler_descriptor
59 panfrost_emit_midg_tiler(struct panfrost_batch *batch, unsigned vertex_count)
60 {
61 struct panfrost_screen *screen = pan_screen(batch->ctx->base.screen);
62 bool hierarchy = !(screen->quirks & MIDGARD_NO_HIER_TILING);
63 struct midgard_tiler_descriptor t = {0};
64 unsigned height = batch->key.height;
65 unsigned width = batch->key.width;
66
67 t.hierarchy_mask =
68 panfrost_choose_hierarchy_mask(width, height, vertex_count, hierarchy);
69
70 /* Compute the polygon header size and use that to offset the body */
71
72 unsigned header_size = panfrost_tiler_header_size(
73 width, height, t.hierarchy_mask, hierarchy);
74
75 t.polygon_list_size = panfrost_tiler_full_size(
76 width, height, t.hierarchy_mask, hierarchy);
77
78 /* Sanity check */
79
80 if (vertex_count) {
81 struct panfrost_bo *tiler_heap;
82
83 tiler_heap = panfrost_batch_get_tiler_heap(batch);
84 t.polygon_list = panfrost_batch_get_polygon_list(batch,
85 header_size +
86 t.polygon_list_size);
87
88
89 /* Allow the entire tiler heap */
90 t.heap_start = tiler_heap->gpu;
91 t.heap_end = tiler_heap->gpu + tiler_heap->size;
92 } else {
93 struct panfrost_bo *tiler_dummy;
94
95 tiler_dummy = panfrost_batch_get_tiler_dummy(batch);
96 header_size = MALI_TILER_MINIMUM_HEADER_SIZE;
97
98 /* The tiler is disabled, so don't allow the tiler heap */
99 t.heap_start = tiler_dummy->gpu;
100 t.heap_end = t.heap_start;
101
102 /* Use a dummy polygon list */
103 t.polygon_list = tiler_dummy->gpu;
104
105 /* Disable the tiler */
106 if (hierarchy)
107 t.hierarchy_mask |= MALI_TILER_DISABLED;
108 else {
109 t.hierarchy_mask = MALI_TILER_USER;
110 t.polygon_list_size = MALI_TILER_MINIMUM_HEADER_SIZE + 4;
111
112 /* We don't have a WRITE_VALUE job, so write the polygon list manually */
113 uint32_t *polygon_list_body = (uint32_t *) (tiler_dummy->cpu + header_size);
114 polygon_list_body[0] = 0xa0000000; /* TODO: Just that? */
115 }
116 }
117
118 t.polygon_list_body =
119 t.polygon_list + header_size;
120
121 return t;
122 }
123
124 struct mali_single_framebuffer
125 panfrost_emit_sfbd(struct panfrost_batch *batch, unsigned vertex_count)
126 {
127 unsigned width = batch->key.width;
128 unsigned height = batch->key.height;
129
130 struct mali_single_framebuffer framebuffer = {
131 .width = MALI_POSITIVE(width),
132 .height = MALI_POSITIVE(height),
133 .unknown2 = 0x1f,
134 .format = {
135 .unk3 = 0x3,
136 },
137 .clear_flags = 0x1000,
138 .scratchpad = panfrost_batch_get_scratchpad(batch)->gpu,
139 .tiler = panfrost_emit_midg_tiler(batch, vertex_count),
140 };
141
142 return framebuffer;
143 }
144
145 struct bifrost_framebuffer
146 panfrost_emit_mfbd(struct panfrost_batch *batch, unsigned vertex_count)
147 {
148 unsigned width = batch->key.width;
149 unsigned height = batch->key.height;
150
151 struct bifrost_framebuffer framebuffer = {
152 .stack_shift = 0x5,
153 .unk0 = 0x1e,
154 .width1 = MALI_POSITIVE(width),
155 .height1 = MALI_POSITIVE(height),
156 .width2 = MALI_POSITIVE(width),
157 .height2 = MALI_POSITIVE(height),
158
159 .unk1 = 0x1080,
160
161 .rt_count_1 = MALI_POSITIVE(batch->key.nr_cbufs),
162 .rt_count_2 = 4,
163
164 .unknown2 = 0x1f,
165
166 .scratchpad = panfrost_batch_get_scratchpad(batch)->gpu,
167 .tiler = panfrost_emit_midg_tiler(batch, vertex_count)
168 };
169
170 return framebuffer;
171 }
172
173 static void
174 panfrost_clear(
175 struct pipe_context *pipe,
176 unsigned buffers,
177 const union pipe_color_union *color,
178 double depth, unsigned stencil)
179 {
180 struct panfrost_context *ctx = pan_context(pipe);
181
182 /* TODO: panfrost_get_fresh_batch_for_fbo() instantiates a new batch if
183 * the existing batch targeting this FBO has draws. We could probably
184 * avoid that by replacing plain clears by quad-draws with a specific
185 * color/depth/stencil value, thus avoiding the generation of extra
186 * fragment jobs.
187 */
188 struct panfrost_batch *batch = panfrost_get_fresh_batch_for_fbo(ctx);
189
190 panfrost_batch_add_fbo_bos(batch);
191 panfrost_batch_clear(batch, buffers, color, depth, stencil);
192 }
193
194 static mali_ptr
195 panfrost_attach_vt_mfbd(struct panfrost_batch *batch)
196 {
197 struct bifrost_framebuffer mfbd = panfrost_emit_mfbd(batch, ~0);
198
199 return panfrost_upload_transient(batch, &mfbd, sizeof(mfbd)) | MALI_MFBD;
200 }
201
202 static mali_ptr
203 panfrost_attach_vt_sfbd(struct panfrost_batch *batch)
204 {
205 struct mali_single_framebuffer sfbd = panfrost_emit_sfbd(batch, ~0);
206
207 return panfrost_upload_transient(batch, &sfbd, sizeof(sfbd)) | MALI_SFBD;
208 }
209
210 static void
211 panfrost_attach_vt_framebuffer(struct panfrost_context *ctx)
212 {
213 /* Skip the attach if we can */
214
215 if (ctx->payloads[PIPE_SHADER_VERTEX].postfix.framebuffer) {
216 assert(ctx->payloads[PIPE_SHADER_FRAGMENT].postfix.framebuffer);
217 return;
218 }
219
220 struct panfrost_screen *screen = pan_screen(ctx->base.screen);
221 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
222
223 if (!batch->framebuffer)
224 batch->framebuffer = (screen->quirks & MIDGARD_SFBD) ?
225 panfrost_attach_vt_sfbd(batch) :
226 panfrost_attach_vt_mfbd(batch);
227
228 for (unsigned i = 0; i < PIPE_SHADER_TYPES; ++i)
229 ctx->payloads[i].postfix.framebuffer = batch->framebuffer;
230 }
231
232 /* Reset per-frame context, called on context initialisation as well as after
233 * flushing a frame */
234
235 void
236 panfrost_invalidate_frame(struct panfrost_context *ctx)
237 {
238 for (unsigned i = 0; i < PIPE_SHADER_TYPES; ++i)
239 ctx->payloads[i].postfix.framebuffer = 0;
240
241 if (ctx->rasterizer)
242 ctx->dirty |= PAN_DIRTY_RASTERIZER;
243
244 /* XXX */
245 ctx->dirty |= PAN_DIRTY_SAMPLERS | PAN_DIRTY_TEXTURES;
246
247 /* TODO: When does this need to be handled? */
248 ctx->active_queries = true;
249 }
250
251 /* In practice, every field of these payloads should be configurable
252 * arbitrarily, which means these functions are basically catch-all's for
253 * as-of-yet unwavering unknowns */
254
255 static void
256 panfrost_emit_vertex_payload(struct panfrost_context *ctx)
257 {
258 /* 0x2 bit clear on 32-bit T6XX */
259
260 struct midgard_payload_vertex_tiler payload = {
261 .gl_enables = 0x4 | 0x2,
262 };
263
264 /* Vertex and compute are closely coupled, so share a payload */
265
266 memcpy(&ctx->payloads[PIPE_SHADER_VERTEX], &payload, sizeof(payload));
267 memcpy(&ctx->payloads[PIPE_SHADER_COMPUTE], &payload, sizeof(payload));
268 }
269
270 static void
271 panfrost_emit_tiler_payload(struct panfrost_context *ctx)
272 {
273 struct midgard_payload_vertex_tiler payload = {
274 .prefix = {
275 .zero1 = 0xffff, /* Why is this only seen on test-quad-textured? */
276 },
277 };
278
279 memcpy(&ctx->payloads[PIPE_SHADER_FRAGMENT], &payload, sizeof(payload));
280 }
281
282 static unsigned
283 translate_tex_wrap(enum pipe_tex_wrap w)
284 {
285 switch (w) {
286 case PIPE_TEX_WRAP_REPEAT:
287 return MALI_WRAP_REPEAT;
288
289 /* TODO: lower GL_CLAMP? */
290 case PIPE_TEX_WRAP_CLAMP:
291 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
292 return MALI_WRAP_CLAMP_TO_EDGE;
293
294 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
295 return MALI_WRAP_CLAMP_TO_BORDER;
296
297 case PIPE_TEX_WRAP_MIRROR_REPEAT:
298 return MALI_WRAP_MIRRORED_REPEAT;
299
300 default:
301 unreachable("Invalid wrap");
302 }
303 }
304
305 static unsigned
306 panfrost_translate_compare_func(enum pipe_compare_func in)
307 {
308 switch (in) {
309 case PIPE_FUNC_NEVER:
310 return MALI_FUNC_NEVER;
311
312 case PIPE_FUNC_LESS:
313 return MALI_FUNC_LESS;
314
315 case PIPE_FUNC_EQUAL:
316 return MALI_FUNC_EQUAL;
317
318 case PIPE_FUNC_LEQUAL:
319 return MALI_FUNC_LEQUAL;
320
321 case PIPE_FUNC_GREATER:
322 return MALI_FUNC_GREATER;
323
324 case PIPE_FUNC_NOTEQUAL:
325 return MALI_FUNC_NOTEQUAL;
326
327 case PIPE_FUNC_GEQUAL:
328 return MALI_FUNC_GEQUAL;
329
330 case PIPE_FUNC_ALWAYS:
331 return MALI_FUNC_ALWAYS;
332
333 default:
334 unreachable("Invalid func");
335 }
336 }
337
338 static unsigned
339 panfrost_translate_alt_compare_func(enum pipe_compare_func in)
340 {
341 switch (in) {
342 case PIPE_FUNC_NEVER:
343 return MALI_ALT_FUNC_NEVER;
344
345 case PIPE_FUNC_LESS:
346 return MALI_ALT_FUNC_LESS;
347
348 case PIPE_FUNC_EQUAL:
349 return MALI_ALT_FUNC_EQUAL;
350
351 case PIPE_FUNC_LEQUAL:
352 return MALI_ALT_FUNC_LEQUAL;
353
354 case PIPE_FUNC_GREATER:
355 return MALI_ALT_FUNC_GREATER;
356
357 case PIPE_FUNC_NOTEQUAL:
358 return MALI_ALT_FUNC_NOTEQUAL;
359
360 case PIPE_FUNC_GEQUAL:
361 return MALI_ALT_FUNC_GEQUAL;
362
363 case PIPE_FUNC_ALWAYS:
364 return MALI_ALT_FUNC_ALWAYS;
365
366 default:
367 unreachable("Invalid alt func");
368 }
369 }
370
371 static unsigned
372 panfrost_translate_stencil_op(enum pipe_stencil_op in)
373 {
374 switch (in) {
375 case PIPE_STENCIL_OP_KEEP:
376 return MALI_STENCIL_KEEP;
377
378 case PIPE_STENCIL_OP_ZERO:
379 return MALI_STENCIL_ZERO;
380
381 case PIPE_STENCIL_OP_REPLACE:
382 return MALI_STENCIL_REPLACE;
383
384 case PIPE_STENCIL_OP_INCR:
385 return MALI_STENCIL_INCR;
386
387 case PIPE_STENCIL_OP_DECR:
388 return MALI_STENCIL_DECR;
389
390 case PIPE_STENCIL_OP_INCR_WRAP:
391 return MALI_STENCIL_INCR_WRAP;
392
393 case PIPE_STENCIL_OP_DECR_WRAP:
394 return MALI_STENCIL_DECR_WRAP;
395
396 case PIPE_STENCIL_OP_INVERT:
397 return MALI_STENCIL_INVERT;
398
399 default:
400 unreachable("Invalid stencil op");
401 }
402 }
403
404 static void
405 panfrost_make_stencil_state(const struct pipe_stencil_state *in, struct mali_stencil_test *out)
406 {
407 out->ref = 0; /* Gallium gets it from elsewhere */
408
409 out->mask = in->valuemask;
410 out->func = panfrost_translate_compare_func(in->func);
411 out->sfail = panfrost_translate_stencil_op(in->fail_op);
412 out->dpfail = panfrost_translate_stencil_op(in->zfail_op);
413 out->dppass = panfrost_translate_stencil_op(in->zpass_op);
414 }
415
416 static void
417 panfrost_default_shader_backend(struct panfrost_context *ctx)
418 {
419 struct panfrost_screen *screen = pan_screen(ctx->base.screen);
420 struct mali_shader_meta shader = {
421 .alpha_coverage = ~MALI_ALPHA_COVERAGE(0.000000),
422
423 .unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x3010,
424 .unknown2_4 = MALI_NO_MSAA | 0x4e0,
425 };
426
427 /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this is
428 * required (independent of 32-bit/64-bit descriptors), or why it's not
429 * used on later GPU revisions. Otherwise, all shader jobs fault on
430 * these earlier chips (perhaps this is a chicken bit of some kind).
431 * More investigation is needed. */
432
433 if (screen->quirks & MIDGARD_SFBD)
434 shader.unknown2_4 |= 0x10;
435
436 struct pipe_stencil_state default_stencil = {
437 .enabled = 0,
438 .func = PIPE_FUNC_ALWAYS,
439 .fail_op = MALI_STENCIL_KEEP,
440 .zfail_op = MALI_STENCIL_KEEP,
441 .zpass_op = MALI_STENCIL_KEEP,
442 .writemask = 0xFF,
443 .valuemask = 0xFF
444 };
445
446 panfrost_make_stencil_state(&default_stencil, &shader.stencil_front);
447 shader.stencil_mask_front = default_stencil.writemask;
448
449 panfrost_make_stencil_state(&default_stencil, &shader.stencil_back);
450 shader.stencil_mask_back = default_stencil.writemask;
451
452 if (default_stencil.enabled)
453 shader.unknown2_4 |= MALI_STENCIL_TEST;
454
455 memcpy(&ctx->fragment_shader_core, &shader, sizeof(shader));
456 }
457
458 /* Generates a vertex/tiler job. This is, in some sense, the heart of the
459 * graphics command stream. It should be called once per draw, accordding to
460 * presentations. Set is_tiler for "tiler" jobs (fragment shader jobs, but in
461 * Mali parlance, "fragment" refers to framebuffer writeout). Clear it for
462 * vertex jobs. */
463
464 struct panfrost_transfer
465 panfrost_vertex_tiler_job(struct panfrost_context *ctx, bool is_tiler)
466 {
467 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
468 struct mali_job_descriptor_header job = {
469 .job_type = is_tiler ? JOB_TYPE_TILER : JOB_TYPE_VERTEX,
470 .job_descriptor_size = 1,
471 };
472
473 struct midgard_payload_vertex_tiler *payload = is_tiler ? &ctx->payloads[PIPE_SHADER_FRAGMENT] : &ctx->payloads[PIPE_SHADER_VERTEX];
474
475 struct panfrost_transfer transfer = panfrost_allocate_transient(batch, sizeof(job) + sizeof(*payload));
476 memcpy(transfer.cpu, &job, sizeof(job));
477 memcpy(transfer.cpu + sizeof(job), payload, sizeof(*payload));
478 return transfer;
479 }
480
481 mali_ptr
482 panfrost_vertex_buffer_address(struct panfrost_context *ctx, unsigned i)
483 {
484 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[i];
485 struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer.resource);
486
487 return rsrc->bo->gpu + buf->buffer_offset;
488 }
489
490 static bool
491 panfrost_writes_point_size(struct panfrost_context *ctx)
492 {
493 assert(ctx->shader[PIPE_SHADER_VERTEX]);
494 struct panfrost_shader_state *vs = &ctx->shader[PIPE_SHADER_VERTEX]->variants[ctx->shader[PIPE_SHADER_VERTEX]->active_variant];
495
496 return vs->writes_point_size && ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.draw_mode == MALI_POINTS;
497 }
498
499 /* Stage the attribute descriptors so we can adjust src_offset
500 * to let BOs align nicely */
501
502 static void
503 panfrost_stage_attributes(struct panfrost_context *ctx)
504 {
505 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
506 struct panfrost_vertex_state *so = ctx->vertex;
507
508 size_t sz = sizeof(struct mali_attr_meta) * so->num_elements;
509 struct panfrost_transfer transfer = panfrost_allocate_transient(batch, sz);
510 struct mali_attr_meta *target = (struct mali_attr_meta *) transfer.cpu;
511
512 /* Copy as-is for the first pass */
513 memcpy(target, so->hw, sz);
514
515 /* Fixup offsets for the second pass. Recall that the hardware
516 * calculates attribute addresses as:
517 *
518 * addr = base + (stride * vtx) + src_offset;
519 *
520 * However, on Mali, base must be aligned to 64-bytes, so we
521 * instead let:
522 *
523 * base' = base & ~63 = base - (base & 63)
524 *
525 * To compensate when using base' (see emit_vertex_data), we have
526 * to adjust src_offset by the masked off piece:
527 *
528 * addr' = base' + (stride * vtx) + (src_offset + (base & 63))
529 * = base - (base & 63) + (stride * vtx) + src_offset + (base & 63)
530 * = base + (stride * vtx) + src_offset
531 * = addr;
532 *
533 * QED.
534 */
535
536 unsigned start = ctx->payloads[PIPE_SHADER_VERTEX].offset_start;
537
538 for (unsigned i = 0; i < so->num_elements; ++i) {
539 unsigned vbi = so->pipe[i].vertex_buffer_index;
540 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
541 mali_ptr addr = panfrost_vertex_buffer_address(ctx, vbi);
542
543 /* Adjust by the masked off bits of the offset */
544 target[i].src_offset += (addr & 63);
545
546 /* Also, somewhat obscurely per-instance data needs to be
547 * offset in response to a delayed start in an indexed draw */
548
549 if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start) {
550 target[i].src_offset -= buf->stride * start;
551 }
552
553
554 }
555
556 ctx->payloads[PIPE_SHADER_VERTEX].postfix.attribute_meta = transfer.gpu;
557 }
558
559 static void
560 panfrost_upload_sampler_descriptors(struct panfrost_context *ctx)
561 {
562 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
563 size_t desc_size = sizeof(struct mali_sampler_descriptor);
564
565 for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) {
566 mali_ptr upload = 0;
567
568 if (ctx->sampler_count[t] && ctx->sampler_view_count[t]) {
569 size_t transfer_size = desc_size * ctx->sampler_count[t];
570
571 struct panfrost_transfer transfer =
572 panfrost_allocate_transient(batch, transfer_size);
573
574 struct mali_sampler_descriptor *desc =
575 (struct mali_sampler_descriptor *) transfer.cpu;
576
577 for (int i = 0; i < ctx->sampler_count[t]; ++i)
578 desc[i] = ctx->samplers[t][i]->hw;
579
580 upload = transfer.gpu;
581 }
582
583 ctx->payloads[t].postfix.sampler_descriptor = upload;
584 }
585 }
586
587 static enum mali_texture_layout
588 panfrost_layout_for_texture(struct panfrost_resource *rsrc)
589 {
590 /* TODO: other linear depth textures */
591 bool is_depth = rsrc->base.format == PIPE_FORMAT_Z32_UNORM;
592
593 switch (rsrc->layout) {
594 case PAN_AFBC:
595 return MALI_TEXTURE_AFBC;
596 case PAN_TILED:
597 assert(!is_depth);
598 return MALI_TEXTURE_TILED;
599 case PAN_LINEAR:
600 return is_depth ? MALI_TEXTURE_TILED : MALI_TEXTURE_LINEAR;
601 default:
602 unreachable("Invalid texture layout");
603 }
604 }
605
606 static mali_ptr
607 panfrost_upload_tex(
608 struct panfrost_context *ctx,
609 enum pipe_shader_type st,
610 struct panfrost_sampler_view *view)
611 {
612 if (!view)
613 return (mali_ptr) 0;
614
615 struct pipe_sampler_view *pview = &view->base;
616 struct panfrost_resource *rsrc = pan_resource(pview->texture);
617
618 /* Do we interleave an explicit stride with every element? */
619
620 bool has_manual_stride = view->manual_stride;
621
622 /* For easy access */
623
624 bool is_buffer = pview->target == PIPE_BUFFER;
625 unsigned first_level = is_buffer ? 0 : pview->u.tex.first_level;
626 unsigned last_level = is_buffer ? 0 : pview->u.tex.last_level;
627 unsigned first_layer = is_buffer ? 0 : pview->u.tex.first_layer;
628 unsigned last_layer = is_buffer ? 0 : pview->u.tex.last_layer;
629
630 /* Lower-bit is set when sampling from colour AFBC */
631 bool is_afbc = rsrc->layout == PAN_AFBC;
632 bool is_zs = rsrc->base.bind & PIPE_BIND_DEPTH_STENCIL;
633 unsigned afbc_bit = (is_afbc && !is_zs) ? 1 : 0;
634
635 /* Add the BO to the job so it's retained until the job is done. */
636 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
637 panfrost_batch_add_bo(batch, rsrc->bo,
638 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
639 panfrost_bo_access_for_stage(st));
640
641 /* Add the usage flags in, since they can change across the CSO
642 * lifetime due to layout switches */
643
644 view->hw.format.layout = panfrost_layout_for_texture(rsrc);
645 view->hw.format.manual_stride = has_manual_stride;
646
647 /* Inject the addresses in, interleaving mip levels, cube faces, and
648 * strides in that order */
649
650 unsigned idx = 0;
651
652 for (unsigned l = first_level; l <= last_level; ++l) {
653 for (unsigned f = first_layer; f <= last_layer; ++f) {
654
655 view->hw.payload[idx++] =
656 panfrost_get_texture_address(rsrc, l, f) + afbc_bit;
657
658 if (has_manual_stride) {
659 view->hw.payload[idx++] =
660 rsrc->slices[l].stride;
661 }
662 }
663 }
664
665 return panfrost_upload_transient(batch, &view->hw,
666 sizeof(struct mali_texture_descriptor));
667 }
668
669 static void
670 panfrost_upload_texture_descriptors(struct panfrost_context *ctx)
671 {
672 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
673
674 for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) {
675 mali_ptr trampoline = 0;
676
677 if (ctx->sampler_view_count[t]) {
678 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
679
680 for (int i = 0; i < ctx->sampler_view_count[t]; ++i)
681 trampolines[i] =
682 panfrost_upload_tex(ctx, t, ctx->sampler_views[t][i]);
683
684 trampoline = panfrost_upload_transient(batch, trampolines, sizeof(uint64_t) * ctx->sampler_view_count[t]);
685 }
686
687 ctx->payloads[t].postfix.texture_trampoline = trampoline;
688 }
689 }
690
691 struct sysval_uniform {
692 union {
693 float f[4];
694 int32_t i[4];
695 uint32_t u[4];
696 uint64_t du[2];
697 };
698 };
699
700 static void panfrost_upload_viewport_scale_sysval(struct panfrost_context *ctx,
701 struct sysval_uniform *uniform)
702 {
703 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
704
705 uniform->f[0] = vp->scale[0];
706 uniform->f[1] = vp->scale[1];
707 uniform->f[2] = vp->scale[2];
708 }
709
710 static void panfrost_upload_viewport_offset_sysval(struct panfrost_context *ctx,
711 struct sysval_uniform *uniform)
712 {
713 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
714
715 uniform->f[0] = vp->translate[0];
716 uniform->f[1] = vp->translate[1];
717 uniform->f[2] = vp->translate[2];
718 }
719
720 static void panfrost_upload_txs_sysval(struct panfrost_context *ctx,
721 enum pipe_shader_type st,
722 unsigned int sysvalid,
723 struct sysval_uniform *uniform)
724 {
725 unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
726 unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
727 bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
728 struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
729
730 assert(dim);
731 uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
732
733 if (dim > 1)
734 uniform->i[1] = u_minify(tex->texture->height0,
735 tex->u.tex.first_level);
736
737 if (dim > 2)
738 uniform->i[2] = u_minify(tex->texture->depth0,
739 tex->u.tex.first_level);
740
741 if (is_array)
742 uniform->i[dim] = tex->texture->array_size;
743 }
744
745 static void panfrost_upload_ssbo_sysval(
746 struct panfrost_context *ctx,
747 enum pipe_shader_type st,
748 unsigned ssbo_id,
749 struct sysval_uniform *uniform)
750 {
751 assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
752 struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
753
754 /* Compute address */
755 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
756 struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
757
758 panfrost_batch_add_bo(batch, bo,
759 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
760 panfrost_bo_access_for_stage(st));
761
762 /* Upload address and size as sysval */
763 uniform->du[0] = bo->gpu + sb.buffer_offset;
764 uniform->u[2] = sb.buffer_size;
765 }
766
767 static void
768 panfrost_upload_sampler_sysval(
769 struct panfrost_context *ctx,
770 enum pipe_shader_type st,
771 unsigned sampler_index,
772 struct sysval_uniform *uniform)
773 {
774 struct pipe_sampler_state *sampl =
775 &ctx->samplers[st][sampler_index]->base;
776
777 uniform->f[0] = sampl->min_lod;
778 uniform->f[1] = sampl->max_lod;
779 uniform->f[2] = sampl->lod_bias;
780
781 /* Even without any errata, Midgard represents "no mipmapping" as
782 * fixing the LOD with the clamps; keep behaviour consistent. c.f.
783 * panfrost_create_sampler_state which also explains our choice of
784 * epsilon value (again to keep behaviour consistent) */
785
786 if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
787 uniform->f[1] = uniform->f[0] + (1.0/256.0);
788 }
789
790 static void panfrost_upload_num_work_groups_sysval(struct panfrost_context *ctx,
791 struct sysval_uniform *uniform)
792 {
793 uniform->u[0] = ctx->compute_grid->grid[0];
794 uniform->u[1] = ctx->compute_grid->grid[1];
795 uniform->u[2] = ctx->compute_grid->grid[2];
796 }
797
798 static void panfrost_upload_sysvals(struct panfrost_context *ctx, void *buf,
799 struct panfrost_shader_state *ss,
800 enum pipe_shader_type st)
801 {
802 struct sysval_uniform *uniforms = (void *)buf;
803
804 for (unsigned i = 0; i < ss->sysval_count; ++i) {
805 int sysval = ss->sysval[i];
806
807 switch (PAN_SYSVAL_TYPE(sysval)) {
808 case PAN_SYSVAL_VIEWPORT_SCALE:
809 panfrost_upload_viewport_scale_sysval(ctx, &uniforms[i]);
810 break;
811 case PAN_SYSVAL_VIEWPORT_OFFSET:
812 panfrost_upload_viewport_offset_sysval(ctx, &uniforms[i]);
813 break;
814 case PAN_SYSVAL_TEXTURE_SIZE:
815 panfrost_upload_txs_sysval(ctx, st, PAN_SYSVAL_ID(sysval),
816 &uniforms[i]);
817 break;
818 case PAN_SYSVAL_SSBO:
819 panfrost_upload_ssbo_sysval(ctx, st, PAN_SYSVAL_ID(sysval),
820 &uniforms[i]);
821 break;
822 case PAN_SYSVAL_NUM_WORK_GROUPS:
823 panfrost_upload_num_work_groups_sysval(ctx, &uniforms[i]);
824 break;
825 case PAN_SYSVAL_SAMPLER:
826 panfrost_upload_sampler_sysval(ctx, st, PAN_SYSVAL_ID(sysval),
827 &uniforms[i]);
828 break;
829 default:
830 assert(0);
831 }
832 }
833 }
834
835 static const void *
836 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf, unsigned index)
837 {
838 struct pipe_constant_buffer *cb = &buf->cb[index];
839 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
840
841 if (rsrc)
842 return rsrc->bo->cpu;
843 else if (cb->user_buffer)
844 return cb->user_buffer;
845 else
846 unreachable("No constant buffer");
847 }
848
849 static mali_ptr
850 panfrost_map_constant_buffer_gpu(
851 struct panfrost_context *ctx,
852 enum pipe_shader_type st,
853 struct panfrost_constant_buffer *buf,
854 unsigned index)
855 {
856 struct pipe_constant_buffer *cb = &buf->cb[index];
857 struct panfrost_resource *rsrc = pan_resource(cb->buffer);
858 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
859
860 if (rsrc) {
861 panfrost_batch_add_bo(batch, rsrc->bo,
862 PAN_BO_ACCESS_SHARED |
863 PAN_BO_ACCESS_READ |
864 panfrost_bo_access_for_stage(st));
865 return rsrc->bo->gpu;
866 } else if (cb->user_buffer) {
867 return panfrost_upload_transient(batch, cb->user_buffer, cb->buffer_size);
868 } else {
869 unreachable("No constant buffer");
870 }
871 }
872
873 /* Compute number of UBOs active (more specifically, compute the highest UBO
874 * number addressable -- if there are gaps, include them in the count anyway).
875 * We always include UBO #0 in the count, since we *need* uniforms enabled for
876 * sysvals. */
877
878 static unsigned
879 panfrost_ubo_count(struct panfrost_context *ctx, enum pipe_shader_type stage)
880 {
881 unsigned mask = ctx->constant_buffer[stage].enabled_mask | 1;
882 return 32 - __builtin_clz(mask);
883 }
884
885 /* Fixes up a shader state with current state */
886
887 static void
888 panfrost_patch_shader_state(struct panfrost_context *ctx,
889 enum pipe_shader_type stage)
890 {
891 struct panfrost_shader_variants *all = ctx->shader[stage];
892
893 if (!all) {
894 ctx->payloads[stage].postfix.shader = 0;
895 return;
896 }
897
898 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
899
900 ss->tripipe->texture_count = ctx->sampler_view_count[stage];
901 ss->tripipe->sampler_count = ctx->sampler_count[stage];
902
903 ss->tripipe->midgard1.flags = 0x220;
904
905 unsigned ubo_count = panfrost_ubo_count(ctx, stage);
906 ss->tripipe->midgard1.uniform_buffer_count = ubo_count;
907
908 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
909
910 /* Add the shader BO to the batch. */
911 panfrost_batch_add_bo(batch, ss->bo,
912 PAN_BO_ACCESS_PRIVATE |
913 PAN_BO_ACCESS_READ |
914 panfrost_bo_access_for_stage(stage));
915
916 ctx->payloads[stage].postfix.shader = panfrost_upload_transient(batch,
917 ss->tripipe,
918 sizeof(struct mali_shader_meta));
919 }
920
921 /* Go through dirty flags and actualise them in the cmdstream. */
922
923 void
924 panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
925 {
926 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
927 struct panfrost_screen *screen = pan_screen(ctx->base.screen);
928
929 panfrost_batch_add_fbo_bos(batch);
930 panfrost_attach_vt_framebuffer(ctx);
931
932 if (with_vertex_data) {
933 panfrost_emit_vertex_data(batch);
934
935 /* Varyings emitted for -all- geometry */
936 unsigned total_count = ctx->padded_count * ctx->instance_count;
937 panfrost_emit_varying_descriptor(ctx, total_count);
938 }
939
940 bool msaa = ctx->rasterizer->base.multisample;
941
942 if (ctx->dirty & PAN_DIRTY_RASTERIZER) {
943 ctx->payloads[PIPE_SHADER_FRAGMENT].gl_enables = ctx->rasterizer->tiler_gl_enables;
944
945 /* TODO: Sample size */
946 SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_HAS_MSAA, msaa);
947 SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_NO_MSAA, !msaa);
948 }
949
950 panfrost_batch_set_requirements(batch);
951
952 if (ctx->occlusion_query) {
953 ctx->payloads[PIPE_SHADER_FRAGMENT].gl_enables |= MALI_OCCLUSION_QUERY;
954 ctx->payloads[PIPE_SHADER_FRAGMENT].postfix.occlusion_counter = ctx->occlusion_query->bo->gpu;
955 }
956
957 panfrost_patch_shader_state(ctx, PIPE_SHADER_VERTEX);
958 panfrost_patch_shader_state(ctx, PIPE_SHADER_COMPUTE);
959
960 if (ctx->dirty & (PAN_DIRTY_RASTERIZER | PAN_DIRTY_VS)) {
961 /* Check if we need to link the gl_PointSize varying */
962 if (!panfrost_writes_point_size(ctx)) {
963 /* If the size is constant, write it out. Otherwise,
964 * don't touch primitive_size (since we would clobber
965 * the pointer there) */
966
967 ctx->payloads[PIPE_SHADER_FRAGMENT].primitive_size.constant = ctx->rasterizer->base.line_width;
968 }
969 }
970
971 /* TODO: Maybe dirty track FS, maybe not. For now, it's transient. */
972 if (ctx->shader[PIPE_SHADER_FRAGMENT])
973 ctx->dirty |= PAN_DIRTY_FS;
974
975 if (ctx->dirty & PAN_DIRTY_FS) {
976 assert(ctx->shader[PIPE_SHADER_FRAGMENT]);
977 struct panfrost_shader_state *variant = &ctx->shader[PIPE_SHADER_FRAGMENT]->variants[ctx->shader[PIPE_SHADER_FRAGMENT]->active_variant];
978
979 panfrost_patch_shader_state(ctx, PIPE_SHADER_FRAGMENT);
980
981 #define COPY(name) ctx->fragment_shader_core.name = variant->tripipe->name
982
983 COPY(shader);
984 COPY(attribute_count);
985 COPY(varying_count);
986 COPY(texture_count);
987 COPY(sampler_count);
988 COPY(midgard1.uniform_count);
989 COPY(midgard1.uniform_buffer_count);
990 COPY(midgard1.work_count);
991 COPY(midgard1.flags);
992 COPY(midgard1.unknown2);
993
994 #undef COPY
995
996 /* Get blending setup */
997 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
998
999 struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
1000
1001 for (unsigned c = 0; c < rt_count; ++c)
1002 blend[c] = panfrost_get_blend_for_context(ctx, c);
1003
1004 /* If there is a blend shader, work registers are shared. XXX: opt */
1005
1006 for (unsigned c = 0; c < rt_count; ++c) {
1007 if (blend[c].is_shader)
1008 ctx->fragment_shader_core.midgard1.work_count = 16;
1009 }
1010
1011 /* Depending on whether it's legal to in the given shader, we
1012 * try to enable early-z testing (or forward-pixel kill?) */
1013
1014 SET_BIT(ctx->fragment_shader_core.midgard1.flags, MALI_EARLY_Z, !variant->can_discard);
1015
1016 /* Any time texturing is used, derivatives are implicitly
1017 * calculated, so we need to enable helper invocations */
1018
1019 SET_BIT(ctx->fragment_shader_core.midgard1.flags, MALI_HELPER_INVOCATIONS, variant->helper_invocations);
1020
1021 /* Assign the stencil refs late */
1022
1023 unsigned front_ref = ctx->stencil_ref.ref_value[0];
1024 unsigned back_ref = ctx->stencil_ref.ref_value[1];
1025 bool back_enab = ctx->depth_stencil->stencil[1].enabled;
1026
1027 ctx->fragment_shader_core.stencil_front.ref = front_ref;
1028 ctx->fragment_shader_core.stencil_back.ref = back_enab ? back_ref : front_ref;
1029
1030 /* CAN_DISCARD should be set if the fragment shader possibly
1031 * contains a 'discard' instruction. It is likely this is
1032 * related to optimizations related to forward-pixel kill, as
1033 * per "Mali Performance 3: Is EGL_BUFFER_PRESERVED a good
1034 * thing?" by Peter Harris
1035 */
1036
1037 SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_CAN_DISCARD, variant->can_discard);
1038 SET_BIT(ctx->fragment_shader_core.midgard1.flags, 0x400, variant->can_discard);
1039
1040 /* Even on MFBD, the shader descriptor gets blend shaders. It's
1041 * *also* copied to the blend_meta appended (by convention),
1042 * but this is the field actually read by the hardware. (Or
1043 * maybe both are read...?) */
1044
1045 if (blend[0].is_shader) {
1046 ctx->fragment_shader_core.blend.shader =
1047 blend[0].shader.bo->gpu | blend[0].shader.first_tag;
1048 } else {
1049 ctx->fragment_shader_core.blend.shader = 0;
1050 }
1051
1052 if (screen->quirks & MIDGARD_SFBD) {
1053 /* When only a single render target platform is used, the blend
1054 * information is inside the shader meta itself. We
1055 * additionally need to signal CAN_DISCARD for nontrivial blend
1056 * modes (so we're able to read back the destination buffer) */
1057
1058 SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_HAS_BLEND_SHADER, blend[0].is_shader);
1059
1060 if (!blend[0].is_shader) {
1061 ctx->fragment_shader_core.blend.equation =
1062 *blend[0].equation.equation;
1063 ctx->fragment_shader_core.blend.constant =
1064 blend[0].equation.constant;
1065 }
1066
1067 SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_CAN_DISCARD, !blend[0].no_blending);
1068 }
1069
1070 size_t size = sizeof(struct mali_shader_meta) + (sizeof(struct midgard_blend_rt) * rt_count);
1071 struct panfrost_transfer transfer = panfrost_allocate_transient(batch, size);
1072 memcpy(transfer.cpu, &ctx->fragment_shader_core, sizeof(struct mali_shader_meta));
1073
1074 ctx->payloads[PIPE_SHADER_FRAGMENT].postfix.shader = transfer.gpu;
1075
1076 if (!(screen->quirks & MIDGARD_SFBD)) {
1077 /* Additional blend descriptor tacked on for jobs using MFBD */
1078
1079 struct midgard_blend_rt rts[4];
1080
1081 for (unsigned i = 0; i < rt_count; ++i) {
1082 rts[i].flags = 0x200;
1083
1084 bool is_srgb =
1085 (ctx->pipe_framebuffer.nr_cbufs > i) &&
1086 (ctx->pipe_framebuffer.cbufs[i]) &&
1087 util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
1088
1089 SET_BIT(rts[i].flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
1090 SET_BIT(rts[i].flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
1091 SET_BIT(rts[i].flags, MALI_BLEND_SRGB, is_srgb);
1092 SET_BIT(rts[i].flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
1093
1094 /* TODO: sRGB in blend shaders is currently
1095 * unimplemented. Contact me (Alyssa) if you're
1096 * interested in working on this. We have
1097 * native Midgard ops for helping here, but
1098 * they're not well-understood yet. */
1099
1100 assert(!(is_srgb && blend[i].is_shader));
1101
1102 if (blend[i].is_shader) {
1103 rts[i].blend.shader = blend[i].shader.bo->gpu | blend[i].shader.first_tag;
1104 } else {
1105 rts[i].blend.equation = *blend[i].equation.equation;
1106 rts[i].blend.constant = blend[i].equation.constant;
1107 }
1108 }
1109
1110 memcpy(transfer.cpu + sizeof(struct mali_shader_meta), rts, sizeof(rts[0]) * rt_count);
1111 }
1112 }
1113
1114 /* We stage to transient, so always dirty.. */
1115 if (ctx->vertex)
1116 panfrost_stage_attributes(ctx);
1117
1118 if (ctx->dirty & PAN_DIRTY_SAMPLERS)
1119 panfrost_upload_sampler_descriptors(ctx);
1120
1121 if (ctx->dirty & PAN_DIRTY_TEXTURES)
1122 panfrost_upload_texture_descriptors(ctx);
1123
1124 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
1125
1126 for (int i = 0; i < PIPE_SHADER_TYPES; ++i) {
1127 struct panfrost_shader_variants *all = ctx->shader[i];
1128
1129 if (!all)
1130 continue;
1131
1132 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[i];
1133
1134 struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1135
1136 /* Uniforms are implicitly UBO #0 */
1137 bool has_uniforms = buf->enabled_mask & (1 << 0);
1138
1139 /* Allocate room for the sysval and the uniforms */
1140 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1141 size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1142 size_t size = sys_size + uniform_size;
1143 struct panfrost_transfer transfer = panfrost_allocate_transient(batch, size);
1144
1145 /* Upload sysvals requested by the shader */
1146 panfrost_upload_sysvals(ctx, transfer.cpu, ss, i);
1147
1148 /* Upload uniforms */
1149 if (has_uniforms) {
1150 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1151 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1152 }
1153
1154 int uniform_count =
1155 ctx->shader[i]->variants[ctx->shader[i]->active_variant].uniform_count;
1156
1157 struct mali_vertex_tiler_postfix *postfix =
1158 &ctx->payloads[i].postfix;
1159
1160 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1161 * uploaded */
1162
1163 unsigned ubo_count = panfrost_ubo_count(ctx, i);
1164 assert(ubo_count >= 1);
1165
1166 size_t sz = sizeof(struct mali_uniform_buffer_meta) * ubo_count;
1167 struct mali_uniform_buffer_meta ubos[PAN_MAX_CONST_BUFFERS];
1168
1169 /* Upload uniforms as a UBO */
1170 ubos[0].size = MALI_POSITIVE((2 + uniform_count));
1171 ubos[0].ptr = transfer.gpu >> 2;
1172
1173 /* The rest are honest-to-goodness UBOs */
1174
1175 for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1176 size_t usz = buf->cb[ubo].buffer_size;
1177
1178 bool enabled = buf->enabled_mask & (1 << ubo);
1179 bool empty = usz == 0;
1180
1181 if (!enabled || empty) {
1182 /* Stub out disabled UBOs to catch accesses */
1183
1184 ubos[ubo].size = 0;
1185 ubos[ubo].ptr = 0xDEAD0000;
1186 continue;
1187 }
1188
1189 mali_ptr gpu = panfrost_map_constant_buffer_gpu(ctx, i, buf, ubo);
1190
1191 unsigned bytes_per_field = 16;
1192 unsigned aligned = ALIGN_POT(usz, bytes_per_field);
1193 unsigned fields = aligned / bytes_per_field;
1194
1195 ubos[ubo].size = MALI_POSITIVE(fields);
1196 ubos[ubo].ptr = gpu >> 2;
1197 }
1198
1199 mali_ptr ubufs = panfrost_upload_transient(batch, ubos, sz);
1200 postfix->uniforms = transfer.gpu;
1201 postfix->uniform_buffers = ubufs;
1202
1203 buf->dirty_mask = 0;
1204 }
1205
1206 /* TODO: Upload the viewport somewhere more appropriate */
1207
1208 /* Clip bounds are encoded as floats. The viewport itself is encoded as
1209 * (somewhat) asymmetric ints. */
1210 const struct pipe_scissor_state *ss = &ctx->scissor;
1211
1212 struct mali_viewport view = {
1213 /* By default, do no viewport clipping, i.e. clip to (-inf,
1214 * inf) in each direction. Clipping to the viewport in theory
1215 * should work, but in practice causes issues when we're not
1216 * explicitly trying to scissor */
1217
1218 .clip_minx = -INFINITY,
1219 .clip_miny = -INFINITY,
1220 .clip_maxx = INFINITY,
1221 .clip_maxy = INFINITY,
1222 };
1223
1224 /* Always scissor to the viewport by default. */
1225 float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
1226 float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
1227
1228 float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
1229 float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
1230
1231 float minz = (vp->translate[2] - fabsf(vp->scale[2]));
1232 float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
1233
1234 /* Apply the scissor test */
1235
1236 unsigned minx, miny, maxx, maxy;
1237
1238 if (ss && ctx->rasterizer && ctx->rasterizer->base.scissor) {
1239 minx = MAX2(ss->minx, vp_minx);
1240 miny = MAX2(ss->miny, vp_miny);
1241 maxx = MIN2(ss->maxx, vp_maxx);
1242 maxy = MIN2(ss->maxy, vp_maxy);
1243 } else {
1244 minx = vp_minx;
1245 miny = vp_miny;
1246 maxx = vp_maxx;
1247 maxy = vp_maxy;
1248 }
1249
1250 /* Hardware needs the min/max to be strictly ordered, so flip if we
1251 * need to. The viewport transformation in the vertex shader will
1252 * handle the negatives if we don't */
1253
1254 if (miny > maxy) {
1255 unsigned temp = miny;
1256 miny = maxy;
1257 maxy = temp;
1258 }
1259
1260 if (minx > maxx) {
1261 unsigned temp = minx;
1262 minx = maxx;
1263 maxx = temp;
1264 }
1265
1266 if (minz > maxz) {
1267 float temp = minz;
1268 minz = maxz;
1269 maxz = temp;
1270 }
1271
1272 /* Clamp to the framebuffer size as a last check */
1273
1274 minx = MIN2(ctx->pipe_framebuffer.width, minx);
1275 maxx = MIN2(ctx->pipe_framebuffer.width, maxx);
1276
1277 miny = MIN2(ctx->pipe_framebuffer.height, miny);
1278 maxy = MIN2(ctx->pipe_framebuffer.height, maxy);
1279
1280 /* Update the job, unless we're doing wallpapering (whose lack of
1281 * scissor we can ignore, since if we "miss" a tile of wallpaper, it'll
1282 * just... be faster :) */
1283
1284 if (!ctx->wallpaper_batch)
1285 panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
1286
1287 /* Upload */
1288
1289 view.viewport0[0] = minx;
1290 view.viewport1[0] = MALI_POSITIVE(maxx);
1291
1292 view.viewport0[1] = miny;
1293 view.viewport1[1] = MALI_POSITIVE(maxy);
1294
1295 view.clip_minz = minz;
1296 view.clip_maxz = maxz;
1297
1298 ctx->payloads[PIPE_SHADER_FRAGMENT].postfix.viewport =
1299 panfrost_upload_transient(batch,
1300 &view,
1301 sizeof(struct mali_viewport));
1302
1303 ctx->dirty = 0;
1304 }
1305
1306 /* Corresponds to exactly one draw, but does not submit anything */
1307
1308 static void
1309 panfrost_queue_draw(struct panfrost_context *ctx)
1310 {
1311 /* Handle dirty flags now */
1312 panfrost_emit_for_draw(ctx, true);
1313
1314 /* If rasterizer discard is enable, only submit the vertex */
1315
1316 bool rasterizer_discard = ctx->rasterizer
1317 && ctx->rasterizer->base.rasterizer_discard;
1318
1319 struct panfrost_transfer vertex = panfrost_vertex_tiler_job(ctx, false);
1320 struct panfrost_transfer tiler;
1321
1322 if (!rasterizer_discard)
1323 tiler = panfrost_vertex_tiler_job(ctx, true);
1324
1325 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
1326
1327 if (rasterizer_discard)
1328 panfrost_scoreboard_queue_vertex_job(batch, vertex, FALSE);
1329 else if (ctx->wallpaper_batch && batch->first_tiler.gpu)
1330 panfrost_scoreboard_queue_fused_job_prepend(batch, vertex, tiler);
1331 else
1332 panfrost_scoreboard_queue_fused_job(batch, vertex, tiler);
1333 }
1334
1335 /* The entire frame is in memory -- send it off to the kernel! */
1336
1337 void
1338 panfrost_flush(
1339 struct pipe_context *pipe,
1340 struct pipe_fence_handle **fence,
1341 unsigned flags)
1342 {
1343 struct panfrost_context *ctx = pan_context(pipe);
1344 struct util_dynarray fences;
1345
1346 /* We must collect the fences before the flush is done, otherwise we'll
1347 * lose track of them.
1348 */
1349 if (fence) {
1350 util_dynarray_init(&fences, NULL);
1351 hash_table_foreach(ctx->batches, hentry) {
1352 struct panfrost_batch *batch = hentry->data;
1353
1354 panfrost_batch_fence_reference(batch->out_sync);
1355 util_dynarray_append(&fences,
1356 struct panfrost_batch_fence *,
1357 batch->out_sync);
1358 }
1359 }
1360
1361 /* Submit all pending jobs */
1362 panfrost_flush_all_batches(ctx, false);
1363
1364 if (fence) {
1365 struct panfrost_fence *f = panfrost_fence_create(ctx, &fences);
1366 pipe->screen->fence_reference(pipe->screen, fence, NULL);
1367 *fence = (struct pipe_fence_handle *)f;
1368
1369 util_dynarray_foreach(&fences, struct panfrost_batch_fence *, fence)
1370 panfrost_batch_fence_unreference(*fence);
1371
1372 util_dynarray_fini(&fences);
1373 }
1374 }
1375
1376 #define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_##c;
1377
1378 static int
1379 g2m_draw_mode(enum pipe_prim_type mode)
1380 {
1381 switch (mode) {
1382 DEFINE_CASE(POINTS);
1383 DEFINE_CASE(LINES);
1384 DEFINE_CASE(LINE_LOOP);
1385 DEFINE_CASE(LINE_STRIP);
1386 DEFINE_CASE(TRIANGLES);
1387 DEFINE_CASE(TRIANGLE_STRIP);
1388 DEFINE_CASE(TRIANGLE_FAN);
1389 DEFINE_CASE(QUADS);
1390 DEFINE_CASE(QUAD_STRIP);
1391 DEFINE_CASE(POLYGON);
1392
1393 default:
1394 unreachable("Invalid draw mode");
1395 }
1396 }
1397
1398 #undef DEFINE_CASE
1399
1400 static unsigned
1401 panfrost_translate_index_size(unsigned size)
1402 {
1403 switch (size) {
1404 case 1:
1405 return MALI_DRAW_INDEXED_UINT8;
1406
1407 case 2:
1408 return MALI_DRAW_INDEXED_UINT16;
1409
1410 case 4:
1411 return MALI_DRAW_INDEXED_UINT32;
1412
1413 default:
1414 unreachable("Invalid index size");
1415 }
1416 }
1417
1418 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
1419 * good for the duration of the draw (transient), could last longer */
1420
1421 static mali_ptr
1422 panfrost_get_index_buffer_mapped(struct panfrost_context *ctx, const struct pipe_draw_info *info)
1423 {
1424 struct panfrost_resource *rsrc = (struct panfrost_resource *) (info->index.resource);
1425
1426 off_t offset = info->start * info->index_size;
1427 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
1428
1429 if (!info->has_user_indices) {
1430 /* Only resources can be directly mapped */
1431 panfrost_batch_add_bo(batch, rsrc->bo,
1432 PAN_BO_ACCESS_SHARED |
1433 PAN_BO_ACCESS_READ |
1434 PAN_BO_ACCESS_VERTEX_TILER);
1435 return rsrc->bo->gpu + offset;
1436 } else {
1437 /* Otherwise, we need to upload to transient memory */
1438 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
1439 return panfrost_upload_transient(batch, ibuf8 + offset, info->count * info->index_size);
1440 }
1441 }
1442
1443 static bool
1444 panfrost_scissor_culls_everything(struct panfrost_context *ctx)
1445 {
1446 const struct pipe_scissor_state *ss = &ctx->scissor;
1447
1448 /* Check if we're scissoring at all */
1449
1450 if (!(ctx->rasterizer && ctx->rasterizer->base.scissor))
1451 return false;
1452
1453 return (ss->minx == ss->maxx) || (ss->miny == ss->maxy);
1454 }
1455
1456 /* Count generated primitives (when there is no geom/tess shaders) for
1457 * transform feedback */
1458
1459 static void
1460 panfrost_statistics_record(
1461 struct panfrost_context *ctx,
1462 const struct pipe_draw_info *info)
1463 {
1464 if (!ctx->active_queries)
1465 return;
1466
1467 uint32_t prims = u_prims_for_vertices(info->mode, info->count);
1468 ctx->prims_generated += prims;
1469
1470 if (!ctx->streamout.num_targets)
1471 return;
1472
1473 ctx->tf_prims_generated += prims;
1474 }
1475
1476 static void
1477 panfrost_draw_vbo(
1478 struct pipe_context *pipe,
1479 const struct pipe_draw_info *info)
1480 {
1481 struct panfrost_context *ctx = pan_context(pipe);
1482
1483 /* First of all, check the scissor to see if anything is drawn at all.
1484 * If it's not, we drop the draw (mostly a conformance issue;
1485 * well-behaved apps shouldn't hit this) */
1486
1487 if (panfrost_scissor_culls_everything(ctx))
1488 return;
1489
1490 int mode = info->mode;
1491
1492 /* Fallback unsupported restart index */
1493 unsigned primitive_index = (1 << (info->index_size * 8)) - 1;
1494
1495 if (info->primitive_restart && info->index_size
1496 && info->restart_index != primitive_index) {
1497 util_draw_vbo_without_prim_restart(pipe, info);
1498 return;
1499 }
1500
1501 /* Fallback for unsupported modes */
1502
1503 assert(ctx->rasterizer != NULL);
1504
1505 if (!(ctx->draw_modes & (1 << mode))) {
1506 if (mode == PIPE_PRIM_QUADS && info->count == 4 && !ctx->rasterizer->base.flatshade) {
1507 mode = PIPE_PRIM_TRIANGLE_FAN;
1508 } else {
1509 if (info->count < 4) {
1510 /* Degenerate case? */
1511 return;
1512 }
1513
1514 util_primconvert_save_rasterizer_state(ctx->primconvert, &ctx->rasterizer->base);
1515 util_primconvert_draw_vbo(ctx->primconvert, info);
1516 return;
1517 }
1518 }
1519
1520 ctx->payloads[PIPE_SHADER_VERTEX].offset_start = info->start;
1521 ctx->payloads[PIPE_SHADER_FRAGMENT].offset_start = info->start;
1522
1523 /* Now that we have a guaranteed terminating path, find the job.
1524 * Assignment commented out to prevent unused warning */
1525
1526 /* struct panfrost_batch *batch = */ panfrost_get_batch_for_fbo(ctx);
1527
1528 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.draw_mode = g2m_draw_mode(mode);
1529
1530 /* Take into account a negative bias */
1531 ctx->vertex_count = info->count + abs(info->index_bias);
1532 ctx->instance_count = info->instance_count;
1533 ctx->active_prim = info->mode;
1534
1535 /* For non-indexed draws, they're the same */
1536 unsigned vertex_count = ctx->vertex_count;
1537
1538 unsigned draw_flags = 0;
1539
1540 /* The draw flags interpret how primitive size is interpreted */
1541
1542 if (panfrost_writes_point_size(ctx))
1543 draw_flags |= MALI_DRAW_VARYING_SIZE;
1544
1545 if (info->primitive_restart)
1546 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
1547
1548 /* These doesn't make much sense */
1549
1550 draw_flags |= 0x3000;
1551
1552 if (mode == PIPE_PRIM_LINE_STRIP) {
1553 draw_flags |= 0x800;
1554 }
1555
1556 panfrost_statistics_record(ctx, info);
1557
1558 if (info->index_size) {
1559 /* Calculate the min/max index used so we can figure out how
1560 * many times to invoke the vertex shader */
1561
1562 /* Fetch / calculate index bounds */
1563 unsigned min_index = 0, max_index = 0;
1564
1565 if (info->max_index == ~0u) {
1566 u_vbuf_get_minmax_index(pipe, info, &min_index, &max_index);
1567 } else {
1568 min_index = info->min_index;
1569 max_index = info->max_index;
1570 }
1571
1572 /* Use the corresponding values */
1573 vertex_count = max_index - min_index + 1;
1574 ctx->payloads[PIPE_SHADER_VERTEX].offset_start = min_index + info->index_bias;
1575 ctx->payloads[PIPE_SHADER_FRAGMENT].offset_start = min_index + info->index_bias;
1576
1577 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.offset_bias_correction = -min_index;
1578 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.index_count = MALI_POSITIVE(info->count);
1579
1580 //assert(!info->restart_index); /* TODO: Research */
1581
1582 draw_flags |= panfrost_translate_index_size(info->index_size);
1583 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.indices = panfrost_get_index_buffer_mapped(ctx, info);
1584 } else {
1585 /* Index count == vertex count, if no indexing is applied, as
1586 * if it is internally indexed in the expected order */
1587
1588 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.offset_bias_correction = 0;
1589 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.index_count = MALI_POSITIVE(ctx->vertex_count);
1590
1591 /* Reverse index state */
1592 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.indices = (u64) NULL;
1593 }
1594
1595 /* Dispatch "compute jobs" for the vertex/tiler pair as (1,
1596 * vertex_count, 1) */
1597
1598 panfrost_pack_work_groups_fused(
1599 &ctx->payloads[PIPE_SHADER_VERTEX].prefix,
1600 &ctx->payloads[PIPE_SHADER_FRAGMENT].prefix,
1601 1, vertex_count, info->instance_count,
1602 1, 1, 1);
1603
1604 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.unknown_draw = draw_flags;
1605
1606 /* Encode the padded vertex count */
1607
1608 if (info->instance_count > 1) {
1609 /* Triangles have non-even vertex counts so they change how
1610 * padding works internally */
1611
1612 bool is_triangle =
1613 mode == PIPE_PRIM_TRIANGLES ||
1614 mode == PIPE_PRIM_TRIANGLE_STRIP ||
1615 mode == PIPE_PRIM_TRIANGLE_FAN;
1616
1617 struct pan_shift_odd so =
1618 panfrost_padded_vertex_count(vertex_count, !is_triangle);
1619
1620 ctx->payloads[PIPE_SHADER_VERTEX].instance_shift = so.shift;
1621 ctx->payloads[PIPE_SHADER_FRAGMENT].instance_shift = so.shift;
1622
1623 ctx->payloads[PIPE_SHADER_VERTEX].instance_odd = so.odd;
1624 ctx->payloads[PIPE_SHADER_FRAGMENT].instance_odd = so.odd;
1625
1626 ctx->padded_count = pan_expand_shift_odd(so);
1627 } else {
1628 ctx->padded_count = vertex_count;
1629
1630 /* Reset instancing state */
1631 ctx->payloads[PIPE_SHADER_VERTEX].instance_shift = 0;
1632 ctx->payloads[PIPE_SHADER_VERTEX].instance_odd = 0;
1633 ctx->payloads[PIPE_SHADER_FRAGMENT].instance_shift = 0;
1634 ctx->payloads[PIPE_SHADER_FRAGMENT].instance_odd = 0;
1635 }
1636
1637 /* Fire off the draw itself */
1638 panfrost_queue_draw(ctx);
1639
1640 /* Increment transform feedback offsets */
1641
1642 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
1643 unsigned output_count = u_stream_outputs_for_vertices(
1644 ctx->active_prim, ctx->vertex_count);
1645
1646 ctx->streamout.offsets[i] += output_count;
1647 }
1648 }
1649
1650 /* CSO state */
1651
1652 static void
1653 panfrost_generic_cso_delete(struct pipe_context *pctx, void *hwcso)
1654 {
1655 free(hwcso);
1656 }
1657
1658 static void *
1659 panfrost_create_rasterizer_state(
1660 struct pipe_context *pctx,
1661 const struct pipe_rasterizer_state *cso)
1662 {
1663 struct panfrost_rasterizer *so = CALLOC_STRUCT(panfrost_rasterizer);
1664
1665 so->base = *cso;
1666
1667 /* Bitmask, unknown meaning of the start value. 0x105 on 32-bit T6XX */
1668 so->tiler_gl_enables = 0x7;
1669
1670 if (cso->front_ccw)
1671 so->tiler_gl_enables |= MALI_FRONT_CCW_TOP;
1672
1673 if (cso->cull_face & PIPE_FACE_FRONT)
1674 so->tiler_gl_enables |= MALI_CULL_FACE_FRONT;
1675
1676 if (cso->cull_face & PIPE_FACE_BACK)
1677 so->tiler_gl_enables |= MALI_CULL_FACE_BACK;
1678
1679 return so;
1680 }
1681
1682 static void
1683 panfrost_bind_rasterizer_state(
1684 struct pipe_context *pctx,
1685 void *hwcso)
1686 {
1687 struct panfrost_context *ctx = pan_context(pctx);
1688
1689 /* TODO: Why can't rasterizer be NULL ever? Other drivers are fine.. */
1690 if (!hwcso)
1691 return;
1692
1693 ctx->rasterizer = hwcso;
1694 ctx->dirty |= PAN_DIRTY_RASTERIZER;
1695
1696 ctx->fragment_shader_core.depth_units = ctx->rasterizer->base.offset_units * 2.0f;
1697 ctx->fragment_shader_core.depth_factor = ctx->rasterizer->base.offset_scale;
1698
1699 /* Gauranteed with the core GL call, so don't expose ARB_polygon_offset */
1700 assert(ctx->rasterizer->base.offset_clamp == 0.0);
1701
1702 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
1703
1704 SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_DEPTH_RANGE_A, ctx->rasterizer->base.offset_tri);
1705 SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_DEPTH_RANGE_B, ctx->rasterizer->base.offset_tri);
1706
1707 /* Point sprites are emulated */
1708
1709 struct panfrost_shader_state *variant =
1710 ctx->shader[PIPE_SHADER_FRAGMENT] ? &ctx->shader[PIPE_SHADER_FRAGMENT]->variants[ctx->shader[PIPE_SHADER_FRAGMENT]->active_variant] : NULL;
1711
1712 if (ctx->rasterizer->base.sprite_coord_enable || (variant && variant->point_sprite_mask))
1713 ctx->base.bind_fs_state(&ctx->base, ctx->shader[PIPE_SHADER_FRAGMENT]);
1714 }
1715
1716 static void *
1717 panfrost_create_vertex_elements_state(
1718 struct pipe_context *pctx,
1719 unsigned num_elements,
1720 const struct pipe_vertex_element *elements)
1721 {
1722 struct panfrost_vertex_state *so = CALLOC_STRUCT(panfrost_vertex_state);
1723
1724 so->num_elements = num_elements;
1725 memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
1726
1727 for (int i = 0; i < num_elements; ++i) {
1728 so->hw[i].index = i;
1729
1730 enum pipe_format fmt = elements[i].src_format;
1731 const struct util_format_description *desc = util_format_description(fmt);
1732 so->hw[i].unknown1 = 0x2;
1733 so->hw[i].swizzle = panfrost_get_default_swizzle(desc->nr_channels);
1734
1735 so->hw[i].format = panfrost_find_format(desc);
1736
1737 /* The field itself should probably be shifted over */
1738 so->hw[i].src_offset = elements[i].src_offset;
1739 }
1740
1741 return so;
1742 }
1743
1744 static void
1745 panfrost_bind_vertex_elements_state(
1746 struct pipe_context *pctx,
1747 void *hwcso)
1748 {
1749 struct panfrost_context *ctx = pan_context(pctx);
1750
1751 ctx->vertex = hwcso;
1752 ctx->dirty |= PAN_DIRTY_VERTEX;
1753 }
1754
1755 static void *
1756 panfrost_create_shader_state(
1757 struct pipe_context *pctx,
1758 const struct pipe_shader_state *cso)
1759 {
1760 struct panfrost_shader_variants *so = CALLOC_STRUCT(panfrost_shader_variants);
1761 so->base = *cso;
1762
1763 /* Token deep copy to prevent memory corruption */
1764
1765 if (cso->type == PIPE_SHADER_IR_TGSI)
1766 so->base.tokens = tgsi_dup_tokens(so->base.tokens);
1767
1768 return so;
1769 }
1770
1771 static void
1772 panfrost_delete_shader_state(
1773 struct pipe_context *pctx,
1774 void *so)
1775 {
1776 struct panfrost_shader_variants *cso = (struct panfrost_shader_variants *) so;
1777
1778 if (cso->base.type == PIPE_SHADER_IR_TGSI) {
1779 DBG("Deleting TGSI shader leaks duplicated tokens\n");
1780 }
1781
1782 for (unsigned i = 0; i < cso->variant_count; ++i) {
1783 struct panfrost_shader_state *shader_state = &cso->variants[i];
1784 panfrost_bo_unreference(shader_state->bo);
1785 shader_state->bo = NULL;
1786 }
1787
1788 free(so);
1789 }
1790
1791 static void *
1792 panfrost_create_sampler_state(
1793 struct pipe_context *pctx,
1794 const struct pipe_sampler_state *cso)
1795 {
1796 struct panfrost_sampler_state *so = CALLOC_STRUCT(panfrost_sampler_state);
1797 so->base = *cso;
1798
1799 /* sampler_state corresponds to mali_sampler_descriptor, which we can generate entirely here */
1800
1801 bool min_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
1802 bool mag_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
1803 bool mip_linear = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR;
1804
1805 unsigned min_filter = min_nearest ? MALI_SAMP_MIN_NEAREST : 0;
1806 unsigned mag_filter = mag_nearest ? MALI_SAMP_MAG_NEAREST : 0;
1807 unsigned mip_filter = mip_linear ?
1808 (MALI_SAMP_MIP_LINEAR_1 | MALI_SAMP_MIP_LINEAR_2) : 0;
1809 unsigned normalized = cso->normalized_coords ? MALI_SAMP_NORM_COORDS : 0;
1810
1811 struct mali_sampler_descriptor sampler_descriptor = {
1812 .filter_mode = min_filter | mag_filter | mip_filter | normalized,
1813 .wrap_s = translate_tex_wrap(cso->wrap_s),
1814 .wrap_t = translate_tex_wrap(cso->wrap_t),
1815 .wrap_r = translate_tex_wrap(cso->wrap_r),
1816 .compare_func = panfrost_translate_alt_compare_func(cso->compare_func),
1817 .border_color = {
1818 cso->border_color.f[0],
1819 cso->border_color.f[1],
1820 cso->border_color.f[2],
1821 cso->border_color.f[3]
1822 },
1823 .min_lod = FIXED_16(cso->min_lod),
1824 .max_lod = FIXED_16(cso->max_lod),
1825 .lod_bias = FIXED_16(cso->lod_bias),
1826 .seamless_cube_map = cso->seamless_cube_map,
1827 };
1828
1829 /* If necessary, we disable mipmapping in the sampler descriptor by
1830 * clamping the LOD as tight as possible (from 0 to epsilon,
1831 * essentially -- remember these are fixed point numbers, so
1832 * epsilon=1/256) */
1833
1834 if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1835 sampler_descriptor.max_lod = sampler_descriptor.min_lod;
1836
1837 /* Enforce that there is something in the middle by adding epsilon*/
1838
1839 if (sampler_descriptor.min_lod == sampler_descriptor.max_lod)
1840 sampler_descriptor.max_lod++;
1841
1842 /* Sanity check */
1843 assert(sampler_descriptor.max_lod > sampler_descriptor.min_lod);
1844
1845 so->hw = sampler_descriptor;
1846
1847 return so;
1848 }
1849
1850 static void
1851 panfrost_bind_sampler_states(
1852 struct pipe_context *pctx,
1853 enum pipe_shader_type shader,
1854 unsigned start_slot, unsigned num_sampler,
1855 void **sampler)
1856 {
1857 assert(start_slot == 0);
1858
1859 struct panfrost_context *ctx = pan_context(pctx);
1860
1861 /* XXX: Should upload, not just copy? */
1862 ctx->sampler_count[shader] = num_sampler;
1863 memcpy(ctx->samplers[shader], sampler, num_sampler * sizeof (void *));
1864
1865 ctx->dirty |= PAN_DIRTY_SAMPLERS;
1866 }
1867
1868 static bool
1869 panfrost_variant_matches(
1870 struct panfrost_context *ctx,
1871 struct panfrost_shader_state *variant,
1872 enum pipe_shader_type type)
1873 {
1874 struct pipe_rasterizer_state *rasterizer = &ctx->rasterizer->base;
1875 struct pipe_alpha_state *alpha = &ctx->depth_stencil->alpha;
1876
1877 bool is_fragment = (type == PIPE_SHADER_FRAGMENT);
1878
1879 if (is_fragment && (alpha->enabled || variant->alpha_state.enabled)) {
1880 /* Make sure enable state is at least the same */
1881 if (alpha->enabled != variant->alpha_state.enabled) {
1882 return false;
1883 }
1884
1885 /* Check that the contents of the test are the same */
1886 bool same_func = alpha->func == variant->alpha_state.func;
1887 bool same_ref = alpha->ref_value == variant->alpha_state.ref_value;
1888
1889 if (!(same_func && same_ref)) {
1890 return false;
1891 }
1892 }
1893
1894 if (is_fragment && rasterizer && (rasterizer->sprite_coord_enable |
1895 variant->point_sprite_mask)) {
1896 /* Ensure the same varyings are turned to point sprites */
1897 if (rasterizer->sprite_coord_enable != variant->point_sprite_mask)
1898 return false;
1899
1900 /* Ensure the orientation is correct */
1901 bool upper_left =
1902 rasterizer->sprite_coord_mode ==
1903 PIPE_SPRITE_COORD_UPPER_LEFT;
1904
1905 if (variant->point_sprite_upper_left != upper_left)
1906 return false;
1907 }
1908
1909 /* Otherwise, we're good to go */
1910 return true;
1911 }
1912
1913 /**
1914 * Fix an uncompiled shader's stream output info, and produce a bitmask
1915 * of which VARYING_SLOT_* are captured for stream output.
1916 *
1917 * Core Gallium stores output->register_index as a "slot" number, where
1918 * slots are assigned consecutively to all outputs in info->outputs_written.
1919 * This naive packing of outputs doesn't work for us - we too have slots,
1920 * but the layout is defined by the VUE map, which we won't have until we
1921 * compile a specific shader variant. So, we remap these and simply store
1922 * VARYING_SLOT_* in our copy's output->register_index fields.
1923 *
1924 * We then produce a bitmask of outputs which are used for SO.
1925 *
1926 * Implementation from iris.
1927 */
1928
1929 static uint64_t
1930 update_so_info(struct pipe_stream_output_info *so_info,
1931 uint64_t outputs_written)
1932 {
1933 uint64_t so_outputs = 0;
1934 uint8_t reverse_map[64] = {0};
1935 unsigned slot = 0;
1936
1937 while (outputs_written)
1938 reverse_map[slot++] = u_bit_scan64(&outputs_written);
1939
1940 for (unsigned i = 0; i < so_info->num_outputs; i++) {
1941 struct pipe_stream_output *output = &so_info->output[i];
1942
1943 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
1944 output->register_index = reverse_map[output->register_index];
1945
1946 so_outputs |= 1ull << output->register_index;
1947 }
1948
1949 return so_outputs;
1950 }
1951
1952 static void
1953 panfrost_bind_shader_state(
1954 struct pipe_context *pctx,
1955 void *hwcso,
1956 enum pipe_shader_type type)
1957 {
1958 struct panfrost_context *ctx = pan_context(pctx);
1959
1960 ctx->shader[type] = hwcso;
1961
1962 if (type == PIPE_SHADER_FRAGMENT)
1963 ctx->dirty |= PAN_DIRTY_FS;
1964 else
1965 ctx->dirty |= PAN_DIRTY_VS;
1966
1967 if (!hwcso) return;
1968
1969 /* Match the appropriate variant */
1970
1971 signed variant = -1;
1972 struct panfrost_shader_variants *variants = (struct panfrost_shader_variants *) hwcso;
1973
1974 for (unsigned i = 0; i < variants->variant_count; ++i) {
1975 if (panfrost_variant_matches(ctx, &variants->variants[i], type)) {
1976 variant = i;
1977 break;
1978 }
1979 }
1980
1981 if (variant == -1) {
1982 /* No variant matched, so create a new one */
1983 variant = variants->variant_count++;
1984 assert(variants->variant_count < MAX_SHADER_VARIANTS);
1985
1986 struct panfrost_shader_state *v =
1987 &variants->variants[variant];
1988
1989 if (type == PIPE_SHADER_FRAGMENT) {
1990 v->alpha_state = ctx->depth_stencil->alpha;
1991
1992 if (ctx->rasterizer) {
1993 v->point_sprite_mask = ctx->rasterizer->base.sprite_coord_enable;
1994 v->point_sprite_upper_left =
1995 ctx->rasterizer->base.sprite_coord_mode ==
1996 PIPE_SPRITE_COORD_UPPER_LEFT;
1997 }
1998 }
1999
2000 variants->variants[variant].tripipe = calloc(1, sizeof(struct mali_shader_meta));
2001
2002 }
2003
2004 /* Select this variant */
2005 variants->active_variant = variant;
2006
2007 struct panfrost_shader_state *shader_state = &variants->variants[variant];
2008 assert(panfrost_variant_matches(ctx, shader_state, type));
2009
2010 /* We finally have a variant, so compile it */
2011
2012 if (!shader_state->compiled) {
2013 uint64_t outputs_written = 0;
2014
2015 panfrost_shader_compile(ctx, shader_state->tripipe,
2016 variants->base.type,
2017 variants->base.type == PIPE_SHADER_IR_NIR ?
2018 variants->base.ir.nir :
2019 variants->base.tokens,
2020 tgsi_processor_to_shader_stage(type), shader_state,
2021 &outputs_written);
2022
2023 shader_state->compiled = true;
2024
2025 /* Fixup the stream out information, since what Gallium returns
2026 * normally is mildly insane */
2027
2028 shader_state->stream_output = variants->base.stream_output;
2029 shader_state->so_mask =
2030 update_so_info(&shader_state->stream_output, outputs_written);
2031 }
2032 }
2033
2034 static void
2035 panfrost_bind_vs_state(struct pipe_context *pctx, void *hwcso)
2036 {
2037 panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_VERTEX);
2038 }
2039
2040 static void
2041 panfrost_bind_fs_state(struct pipe_context *pctx, void *hwcso)
2042 {
2043 panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_FRAGMENT);
2044 }
2045
2046 static void
2047 panfrost_set_vertex_buffers(
2048 struct pipe_context *pctx,
2049 unsigned start_slot,
2050 unsigned num_buffers,
2051 const struct pipe_vertex_buffer *buffers)
2052 {
2053 struct panfrost_context *ctx = pan_context(pctx);
2054
2055 util_set_vertex_buffers_mask(ctx->vertex_buffers, &ctx->vb_mask, buffers, start_slot, num_buffers);
2056 }
2057
2058 static void
2059 panfrost_set_constant_buffer(
2060 struct pipe_context *pctx,
2061 enum pipe_shader_type shader, uint index,
2062 const struct pipe_constant_buffer *buf)
2063 {
2064 struct panfrost_context *ctx = pan_context(pctx);
2065 struct panfrost_constant_buffer *pbuf = &ctx->constant_buffer[shader];
2066
2067 util_copy_constant_buffer(&pbuf->cb[index], buf);
2068
2069 unsigned mask = (1 << index);
2070
2071 if (unlikely(!buf)) {
2072 pbuf->enabled_mask &= ~mask;
2073 pbuf->dirty_mask &= ~mask;
2074 return;
2075 }
2076
2077 pbuf->enabled_mask |= mask;
2078 pbuf->dirty_mask |= mask;
2079 }
2080
2081 static void
2082 panfrost_set_stencil_ref(
2083 struct pipe_context *pctx,
2084 const struct pipe_stencil_ref *ref)
2085 {
2086 struct panfrost_context *ctx = pan_context(pctx);
2087 ctx->stencil_ref = *ref;
2088
2089 /* Shader core dirty */
2090 ctx->dirty |= PAN_DIRTY_FS;
2091 }
2092
2093 static enum mali_texture_type
2094 panfrost_translate_texture_type(enum pipe_texture_target t) {
2095 switch (t)
2096 {
2097 case PIPE_BUFFER:
2098 case PIPE_TEXTURE_1D:
2099 case PIPE_TEXTURE_1D_ARRAY:
2100 return MALI_TEX_1D;
2101
2102 case PIPE_TEXTURE_2D:
2103 case PIPE_TEXTURE_2D_ARRAY:
2104 case PIPE_TEXTURE_RECT:
2105 return MALI_TEX_2D;
2106
2107 case PIPE_TEXTURE_3D:
2108 return MALI_TEX_3D;
2109
2110 case PIPE_TEXTURE_CUBE:
2111 case PIPE_TEXTURE_CUBE_ARRAY:
2112 return MALI_TEX_CUBE;
2113
2114 default:
2115 unreachable("Unknown target");
2116 }
2117 }
2118
2119 static struct pipe_sampler_view *
2120 panfrost_create_sampler_view(
2121 struct pipe_context *pctx,
2122 struct pipe_resource *texture,
2123 const struct pipe_sampler_view *template)
2124 {
2125 struct panfrost_sampler_view *so = rzalloc(pctx, struct panfrost_sampler_view);
2126 int bytes_per_pixel = util_format_get_blocksize(texture->format);
2127
2128 pipe_reference(NULL, &texture->reference);
2129
2130 struct panfrost_resource *prsrc = (struct panfrost_resource *) texture;
2131 assert(prsrc->bo);
2132
2133 so->base = *template;
2134 so->base.texture = texture;
2135 so->base.reference.count = 1;
2136 so->base.context = pctx;
2137
2138 /* sampler_views correspond to texture descriptors, minus the texture
2139 * (data) itself. So, we serialise the descriptor here and cache it for
2140 * later. */
2141
2142 const struct util_format_description *desc = util_format_description(prsrc->base.format);
2143
2144 unsigned char user_swizzle[4] = {
2145 template->swizzle_r,
2146 template->swizzle_g,
2147 template->swizzle_b,
2148 template->swizzle_a
2149 };
2150
2151 enum mali_format format = panfrost_find_format(desc);
2152
2153 /* Check if we need to set a custom stride by computing the "expected"
2154 * stride and comparing it to what the BO actually wants. Only applies
2155 * to linear textures, since tiled/compressed textures have strict
2156 * alignment requirements for their strides as it is */
2157
2158 unsigned first_level = template->u.tex.first_level;
2159 unsigned last_level = template->u.tex.last_level;
2160
2161 if (prsrc->layout == PAN_LINEAR) {
2162 for (unsigned l = first_level; l <= last_level; ++l) {
2163 unsigned actual_stride = prsrc->slices[l].stride;
2164 unsigned width = u_minify(texture->width0, l);
2165 unsigned comp_stride = width * bytes_per_pixel;
2166
2167 if (comp_stride != actual_stride) {
2168 so->manual_stride = true;
2169 break;
2170 }
2171 }
2172 }
2173
2174 /* In the hardware, array_size refers specifically to array textures,
2175 * whereas in Gallium, it also covers cubemaps */
2176
2177 unsigned array_size = texture->array_size;
2178
2179 if (template->target == PIPE_TEXTURE_CUBE) {
2180 /* TODO: Cubemap arrays */
2181 assert(array_size == 6);
2182 array_size /= 6;
2183 }
2184
2185 struct mali_texture_descriptor texture_descriptor = {
2186 .width = MALI_POSITIVE(u_minify(texture->width0, first_level)),
2187 .height = MALI_POSITIVE(u_minify(texture->height0, first_level)),
2188 .depth = MALI_POSITIVE(u_minify(texture->depth0, first_level)),
2189 .array_size = MALI_POSITIVE(array_size),
2190
2191 .format = {
2192 .swizzle = panfrost_translate_swizzle_4(desc->swizzle),
2193 .format = format,
2194 .srgb = desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB,
2195 .type = panfrost_translate_texture_type(template->target),
2196 .unknown2 = 0x1,
2197 },
2198
2199 .swizzle = panfrost_translate_swizzle_4(user_swizzle)
2200 };
2201
2202 texture_descriptor.levels = last_level - first_level;
2203
2204 so->hw = texture_descriptor;
2205
2206 return (struct pipe_sampler_view *) so;
2207 }
2208
2209 static void
2210 panfrost_set_sampler_views(
2211 struct pipe_context *pctx,
2212 enum pipe_shader_type shader,
2213 unsigned start_slot, unsigned num_views,
2214 struct pipe_sampler_view **views)
2215 {
2216 struct panfrost_context *ctx = pan_context(pctx);
2217 unsigned new_nr = 0;
2218 unsigned i;
2219
2220 assert(start_slot == 0);
2221
2222 for (i = 0; i < num_views; ++i) {
2223 if (views[i])
2224 new_nr = i + 1;
2225 pipe_sampler_view_reference((struct pipe_sampler_view **)&ctx->sampler_views[shader][i],
2226 views[i]);
2227 }
2228
2229 for (; i < ctx->sampler_view_count[shader]; i++) {
2230 pipe_sampler_view_reference((struct pipe_sampler_view **)&ctx->sampler_views[shader][i],
2231 NULL);
2232 }
2233 ctx->sampler_view_count[shader] = new_nr;
2234
2235 ctx->dirty |= PAN_DIRTY_TEXTURES;
2236 }
2237
2238 static void
2239 panfrost_sampler_view_destroy(
2240 struct pipe_context *pctx,
2241 struct pipe_sampler_view *view)
2242 {
2243 pipe_resource_reference(&view->texture, NULL);
2244 ralloc_free(view);
2245 }
2246
2247 static void
2248 panfrost_set_shader_buffers(
2249 struct pipe_context *pctx,
2250 enum pipe_shader_type shader,
2251 unsigned start, unsigned count,
2252 const struct pipe_shader_buffer *buffers,
2253 unsigned writable_bitmask)
2254 {
2255 struct panfrost_context *ctx = pan_context(pctx);
2256
2257 util_set_shader_buffers_mask(ctx->ssbo[shader], &ctx->ssbo_mask[shader],
2258 buffers, start, count);
2259 }
2260
2261 /* Hints that a framebuffer should use AFBC where possible */
2262
2263 static void
2264 panfrost_hint_afbc(
2265 struct panfrost_screen *screen,
2266 const struct pipe_framebuffer_state *fb)
2267 {
2268 /* AFBC implemenation incomplete; hide it */
2269 if (!(pan_debug & PAN_DBG_AFBC)) return;
2270
2271 /* Hint AFBC to the resources bound to each color buffer */
2272
2273 for (unsigned i = 0; i < fb->nr_cbufs; ++i) {
2274 struct pipe_surface *surf = fb->cbufs[i];
2275 struct panfrost_resource *rsrc = pan_resource(surf->texture);
2276 panfrost_resource_hint_layout(screen, rsrc, PAN_AFBC, 1);
2277 }
2278
2279 /* Also hint it to the depth buffer */
2280
2281 if (fb->zsbuf) {
2282 struct panfrost_resource *rsrc = pan_resource(fb->zsbuf->texture);
2283 panfrost_resource_hint_layout(screen, rsrc, PAN_AFBC, 1);
2284 }
2285 }
2286
2287 static void
2288 panfrost_set_framebuffer_state(struct pipe_context *pctx,
2289 const struct pipe_framebuffer_state *fb)
2290 {
2291 struct panfrost_context *ctx = pan_context(pctx);
2292
2293 panfrost_hint_afbc(pan_screen(pctx->screen), fb);
2294 util_copy_framebuffer_state(&ctx->pipe_framebuffer, fb);
2295 ctx->batch = NULL;
2296 panfrost_invalidate_frame(ctx);
2297 }
2298
2299 static void *
2300 panfrost_create_depth_stencil_state(struct pipe_context *pipe,
2301 const struct pipe_depth_stencil_alpha_state *depth_stencil)
2302 {
2303 return mem_dup(depth_stencil, sizeof(*depth_stencil));
2304 }
2305
2306 static void
2307 panfrost_bind_depth_stencil_state(struct pipe_context *pipe,
2308 void *cso)
2309 {
2310 struct panfrost_context *ctx = pan_context(pipe);
2311 struct pipe_depth_stencil_alpha_state *depth_stencil = cso;
2312 ctx->depth_stencil = depth_stencil;
2313
2314 if (!depth_stencil)
2315 return;
2316
2317 /* Alpha does not exist in the hardware (it's not in ES3), so it's
2318 * emulated in the fragment shader */
2319
2320 if (depth_stencil->alpha.enabled) {
2321 /* We need to trigger a new shader (maybe) */
2322 ctx->base.bind_fs_state(&ctx->base, ctx->shader[PIPE_SHADER_FRAGMENT]);
2323 }
2324
2325 /* Stencil state */
2326 SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_STENCIL_TEST, depth_stencil->stencil[0].enabled);
2327
2328 panfrost_make_stencil_state(&depth_stencil->stencil[0], &ctx->fragment_shader_core.stencil_front);
2329 ctx->fragment_shader_core.stencil_mask_front = depth_stencil->stencil[0].writemask;
2330
2331 /* If back-stencil is not enabled, use the front values */
2332 bool back_enab = ctx->depth_stencil->stencil[1].enabled;
2333 unsigned back_index = back_enab ? 1 : 0;
2334
2335 panfrost_make_stencil_state(&depth_stencil->stencil[back_index], &ctx->fragment_shader_core.stencil_back);
2336 ctx->fragment_shader_core.stencil_mask_back = depth_stencil->stencil[back_index].writemask;
2337
2338 /* Depth state (TODO: Refactor) */
2339 SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_DEPTH_WRITEMASK,
2340 depth_stencil->depth.writemask);
2341
2342 int func = depth_stencil->depth.enabled ? depth_stencil->depth.func : PIPE_FUNC_ALWAYS;
2343
2344 ctx->fragment_shader_core.unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
2345 ctx->fragment_shader_core.unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(func));
2346
2347 /* Bounds test not implemented */
2348 assert(!depth_stencil->depth.bounds_test);
2349
2350 ctx->dirty |= PAN_DIRTY_FS;
2351 }
2352
2353 static void
2354 panfrost_delete_depth_stencil_state(struct pipe_context *pipe, void *depth)
2355 {
2356 free( depth );
2357 }
2358
2359 static void
2360 panfrost_set_sample_mask(struct pipe_context *pipe,
2361 unsigned sample_mask)
2362 {
2363 }
2364
2365 static void
2366 panfrost_set_clip_state(struct pipe_context *pipe,
2367 const struct pipe_clip_state *clip)
2368 {
2369 //struct panfrost_context *panfrost = pan_context(pipe);
2370 }
2371
2372 static void
2373 panfrost_set_viewport_states(struct pipe_context *pipe,
2374 unsigned start_slot,
2375 unsigned num_viewports,
2376 const struct pipe_viewport_state *viewports)
2377 {
2378 struct panfrost_context *ctx = pan_context(pipe);
2379
2380 assert(start_slot == 0);
2381 assert(num_viewports == 1);
2382
2383 ctx->pipe_viewport = *viewports;
2384 }
2385
2386 static void
2387 panfrost_set_scissor_states(struct pipe_context *pipe,
2388 unsigned start_slot,
2389 unsigned num_scissors,
2390 const struct pipe_scissor_state *scissors)
2391 {
2392 struct panfrost_context *ctx = pan_context(pipe);
2393
2394 assert(start_slot == 0);
2395 assert(num_scissors == 1);
2396
2397 ctx->scissor = *scissors;
2398 }
2399
2400 static void
2401 panfrost_set_polygon_stipple(struct pipe_context *pipe,
2402 const struct pipe_poly_stipple *stipple)
2403 {
2404 //struct panfrost_context *panfrost = pan_context(pipe);
2405 }
2406
2407 static void
2408 panfrost_set_active_query_state(struct pipe_context *pipe,
2409 bool enable)
2410 {
2411 struct panfrost_context *ctx = pan_context(pipe);
2412 ctx->active_queries = enable;
2413 }
2414
2415 static void
2416 panfrost_destroy(struct pipe_context *pipe)
2417 {
2418 struct panfrost_context *panfrost = pan_context(pipe);
2419
2420 if (panfrost->blitter)
2421 util_blitter_destroy(panfrost->blitter);
2422
2423 if (panfrost->blitter_wallpaper)
2424 util_blitter_destroy(panfrost->blitter_wallpaper);
2425
2426 util_unreference_framebuffer_state(&panfrost->pipe_framebuffer);
2427 u_upload_destroy(pipe->stream_uploader);
2428
2429 ralloc_free(pipe);
2430 }
2431
2432 static struct pipe_query *
2433 panfrost_create_query(struct pipe_context *pipe,
2434 unsigned type,
2435 unsigned index)
2436 {
2437 struct panfrost_query *q = rzalloc(pipe, struct panfrost_query);
2438
2439 q->type = type;
2440 q->index = index;
2441
2442 return (struct pipe_query *) q;
2443 }
2444
2445 static void
2446 panfrost_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
2447 {
2448 struct panfrost_query *query = (struct panfrost_query *) q;
2449
2450 if (query->bo) {
2451 panfrost_bo_unreference(query->bo);
2452 query->bo = NULL;
2453 }
2454
2455 ralloc_free(q);
2456 }
2457
2458 static bool
2459 panfrost_begin_query(struct pipe_context *pipe, struct pipe_query *q)
2460 {
2461 struct panfrost_context *ctx = pan_context(pipe);
2462 struct panfrost_query *query = (struct panfrost_query *) q;
2463
2464 switch (query->type) {
2465 case PIPE_QUERY_OCCLUSION_COUNTER:
2466 case PIPE_QUERY_OCCLUSION_PREDICATE:
2467 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
2468 /* Allocate a bo for the query results to be stored */
2469 if (!query->bo) {
2470 query->bo = panfrost_bo_create(
2471 pan_screen(ctx->base.screen),
2472 sizeof(unsigned), 0);
2473 }
2474
2475 unsigned *result = (unsigned *)query->bo->cpu;
2476 *result = 0; /* Default to 0 if nothing at all drawn. */
2477 ctx->occlusion_query = query;
2478 break;
2479
2480 /* Geometry statistics are computed in the driver. XXX: geom/tess
2481 * shaders.. */
2482
2483 case PIPE_QUERY_PRIMITIVES_GENERATED:
2484 query->start = ctx->prims_generated;
2485 break;
2486 case PIPE_QUERY_PRIMITIVES_EMITTED:
2487 query->start = ctx->tf_prims_generated;
2488 break;
2489
2490 default:
2491 fprintf(stderr, "Skipping query %u\n", query->type);
2492 break;
2493 }
2494
2495 return true;
2496 }
2497
2498 static bool
2499 panfrost_end_query(struct pipe_context *pipe, struct pipe_query *q)
2500 {
2501 struct panfrost_context *ctx = pan_context(pipe);
2502 struct panfrost_query *query = (struct panfrost_query *) q;
2503
2504 switch (query->type) {
2505 case PIPE_QUERY_OCCLUSION_COUNTER:
2506 case PIPE_QUERY_OCCLUSION_PREDICATE:
2507 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
2508 ctx->occlusion_query = NULL;
2509 break;
2510 case PIPE_QUERY_PRIMITIVES_GENERATED:
2511 query->end = ctx->prims_generated;
2512 break;
2513 case PIPE_QUERY_PRIMITIVES_EMITTED:
2514 query->end = ctx->tf_prims_generated;
2515 break;
2516 }
2517
2518 return true;
2519 }
2520
2521 static bool
2522 panfrost_get_query_result(struct pipe_context *pipe,
2523 struct pipe_query *q,
2524 bool wait,
2525 union pipe_query_result *vresult)
2526 {
2527 struct panfrost_query *query = (struct panfrost_query *) q;
2528 struct panfrost_context *ctx = pan_context(pipe);
2529
2530
2531 switch (query->type) {
2532 case PIPE_QUERY_OCCLUSION_COUNTER:
2533 case PIPE_QUERY_OCCLUSION_PREDICATE:
2534 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
2535 /* Flush first */
2536 panfrost_flush_all_batches(ctx, true);
2537
2538 /* Read back the query results */
2539 unsigned *result = (unsigned *) query->bo->cpu;
2540 unsigned passed = *result;
2541
2542 if (query->type == PIPE_QUERY_OCCLUSION_COUNTER) {
2543 vresult->u64 = passed;
2544 } else {
2545 vresult->b = !!passed;
2546 }
2547
2548 break;
2549
2550 case PIPE_QUERY_PRIMITIVES_GENERATED:
2551 case PIPE_QUERY_PRIMITIVES_EMITTED:
2552 panfrost_flush_all_batches(ctx, true);
2553 vresult->u64 = query->end - query->start;
2554 break;
2555
2556 default:
2557 DBG("Skipped query get %u\n", query->type);
2558 break;
2559 }
2560
2561 return true;
2562 }
2563
2564 static struct pipe_stream_output_target *
2565 panfrost_create_stream_output_target(struct pipe_context *pctx,
2566 struct pipe_resource *prsc,
2567 unsigned buffer_offset,
2568 unsigned buffer_size)
2569 {
2570 struct pipe_stream_output_target *target;
2571
2572 target = rzalloc(pctx, struct pipe_stream_output_target);
2573
2574 if (!target)
2575 return NULL;
2576
2577 pipe_reference_init(&target->reference, 1);
2578 pipe_resource_reference(&target->buffer, prsc);
2579
2580 target->context = pctx;
2581 target->buffer_offset = buffer_offset;
2582 target->buffer_size = buffer_size;
2583
2584 return target;
2585 }
2586
2587 static void
2588 panfrost_stream_output_target_destroy(struct pipe_context *pctx,
2589 struct pipe_stream_output_target *target)
2590 {
2591 pipe_resource_reference(&target->buffer, NULL);
2592 ralloc_free(target);
2593 }
2594
2595 static void
2596 panfrost_set_stream_output_targets(struct pipe_context *pctx,
2597 unsigned num_targets,
2598 struct pipe_stream_output_target **targets,
2599 const unsigned *offsets)
2600 {
2601 struct panfrost_context *ctx = pan_context(pctx);
2602 struct panfrost_streamout *so = &ctx->streamout;
2603
2604 assert(num_targets <= ARRAY_SIZE(so->targets));
2605
2606 for (unsigned i = 0; i < num_targets; i++) {
2607 if (offsets[i] != -1)
2608 so->offsets[i] = offsets[i];
2609
2610 pipe_so_target_reference(&so->targets[i], targets[i]);
2611 }
2612
2613 for (unsigned i = 0; i < so->num_targets; i++)
2614 pipe_so_target_reference(&so->targets[i], NULL);
2615
2616 so->num_targets = num_targets;
2617 }
2618
2619 struct pipe_context *
2620 panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
2621 {
2622 struct panfrost_context *ctx = rzalloc(screen, struct panfrost_context);
2623 struct panfrost_screen *pscreen = pan_screen(screen);
2624 struct pipe_context *gallium = (struct pipe_context *) ctx;
2625
2626 gallium->screen = screen;
2627
2628 gallium->destroy = panfrost_destroy;
2629
2630 gallium->set_framebuffer_state = panfrost_set_framebuffer_state;
2631
2632 gallium->flush = panfrost_flush;
2633 gallium->clear = panfrost_clear;
2634 gallium->draw_vbo = panfrost_draw_vbo;
2635
2636 gallium->set_vertex_buffers = panfrost_set_vertex_buffers;
2637 gallium->set_constant_buffer = panfrost_set_constant_buffer;
2638 gallium->set_shader_buffers = panfrost_set_shader_buffers;
2639
2640 gallium->set_stencil_ref = panfrost_set_stencil_ref;
2641
2642 gallium->create_sampler_view = panfrost_create_sampler_view;
2643 gallium->set_sampler_views = panfrost_set_sampler_views;
2644 gallium->sampler_view_destroy = panfrost_sampler_view_destroy;
2645
2646 gallium->create_rasterizer_state = panfrost_create_rasterizer_state;
2647 gallium->bind_rasterizer_state = panfrost_bind_rasterizer_state;
2648 gallium->delete_rasterizer_state = panfrost_generic_cso_delete;
2649
2650 gallium->create_vertex_elements_state = panfrost_create_vertex_elements_state;
2651 gallium->bind_vertex_elements_state = panfrost_bind_vertex_elements_state;
2652 gallium->delete_vertex_elements_state = panfrost_generic_cso_delete;
2653
2654 gallium->create_fs_state = panfrost_create_shader_state;
2655 gallium->delete_fs_state = panfrost_delete_shader_state;
2656 gallium->bind_fs_state = panfrost_bind_fs_state;
2657
2658 gallium->create_vs_state = panfrost_create_shader_state;
2659 gallium->delete_vs_state = panfrost_delete_shader_state;
2660 gallium->bind_vs_state = panfrost_bind_vs_state;
2661
2662 gallium->create_sampler_state = panfrost_create_sampler_state;
2663 gallium->delete_sampler_state = panfrost_generic_cso_delete;
2664 gallium->bind_sampler_states = panfrost_bind_sampler_states;
2665
2666 gallium->create_depth_stencil_alpha_state = panfrost_create_depth_stencil_state;
2667 gallium->bind_depth_stencil_alpha_state = panfrost_bind_depth_stencil_state;
2668 gallium->delete_depth_stencil_alpha_state = panfrost_delete_depth_stencil_state;
2669
2670 gallium->set_sample_mask = panfrost_set_sample_mask;
2671
2672 gallium->set_clip_state = panfrost_set_clip_state;
2673 gallium->set_viewport_states = panfrost_set_viewport_states;
2674 gallium->set_scissor_states = panfrost_set_scissor_states;
2675 gallium->set_polygon_stipple = panfrost_set_polygon_stipple;
2676 gallium->set_active_query_state = panfrost_set_active_query_state;
2677
2678 gallium->create_query = panfrost_create_query;
2679 gallium->destroy_query = panfrost_destroy_query;
2680 gallium->begin_query = panfrost_begin_query;
2681 gallium->end_query = panfrost_end_query;
2682 gallium->get_query_result = panfrost_get_query_result;
2683
2684 gallium->create_stream_output_target = panfrost_create_stream_output_target;
2685 gallium->stream_output_target_destroy = panfrost_stream_output_target_destroy;
2686 gallium->set_stream_output_targets = panfrost_set_stream_output_targets;
2687
2688 panfrost_resource_context_init(gallium);
2689 panfrost_blend_context_init(gallium);
2690 panfrost_compute_context_init(gallium);
2691
2692 /* XXX: leaks */
2693 gallium->stream_uploader = u_upload_create_default(gallium);
2694 gallium->const_uploader = gallium->stream_uploader;
2695 assert(gallium->stream_uploader);
2696
2697 /* Midgard supports ES modes, plus QUADS/QUAD_STRIPS/POLYGON */
2698 ctx->draw_modes = (1 << (PIPE_PRIM_POLYGON + 1)) - 1;
2699
2700 ctx->primconvert = util_primconvert_create(gallium, ctx->draw_modes);
2701
2702 ctx->blitter = util_blitter_create(gallium);
2703 ctx->blitter_wallpaper = util_blitter_create(gallium);
2704
2705 assert(ctx->blitter);
2706 assert(ctx->blitter_wallpaper);
2707
2708 /* Prepare for render! */
2709
2710 panfrost_batch_init(ctx);
2711 panfrost_emit_vertex_payload(ctx);
2712 panfrost_emit_tiler_payload(ctx);
2713 panfrost_invalidate_frame(ctx);
2714 panfrost_default_shader_backend(ctx);
2715
2716 return gallium;
2717 }