panfrost: Hoist blend constant into Midgard-specific struct
[mesa.git] / src / gallium / drivers / panfrost / pan_context.c
1 /*
2 * © Copyright 2018 Alyssa Rosenzweig
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 */
24
25 #include <sys/poll.h>
26 #include <errno.h>
27
28 #include "pan_context.h"
29 #include "pan_swizzle.h"
30 #include "pan_format.h"
31
32 #include "util/macros.h"
33 #include "util/u_format.h"
34 #include "util/u_inlines.h"
35 #include "util/u_upload_mgr.h"
36 #include "util/u_memory.h"
37 #include "util/u_vbuf.h"
38 #include "util/half_float.h"
39 #include "util/u_helpers.h"
40 #include "util/u_format.h"
41 #include "indices/u_primconvert.h"
42 #include "tgsi/tgsi_parse.h"
43 #include "util/u_math.h"
44
45 #include "pan_screen.h"
46 #include "pan_blending.h"
47 #include "pan_blend_shaders.h"
48 #include "pan_util.h"
49 #include "pan_wallpaper.h"
50
51 static int performance_counter_number = 0;
52 extern const char *pan_counters_base;
53
54 /* Do not actually send anything to the GPU; merely generate the cmdstream as fast as possible. Disables framebuffer writes */
55 //#define DRY_RUN
56
57 /* Can a given format support AFBC? Not all can. */
58
59 static bool
60 panfrost_can_afbc(enum pipe_format format)
61 {
62 const struct util_format_description *desc =
63 util_format_description(format);
64
65 if (util_format_is_rgba8_variant(desc))
66 return true;
67
68 /* TODO: AFBC of other formats */
69
70 return false;
71 }
72
73 /* AFBC is enabled on a per-resource basis (AFBC enabling is theoretically
74 * indepdent between color buffers and depth/stencil). To enable, we allocate
75 * the AFBC metadata buffer and mark that it is enabled. We do -not- actually
76 * edit the fragment job here. This routine should be called ONCE per
77 * AFBC-compressed buffer, rather than on every frame. */
78
79 static void
80 panfrost_enable_afbc(struct panfrost_context *ctx, struct panfrost_resource *rsrc, bool ds)
81 {
82 if (ctx->require_sfbd) {
83 DBG("AFBC not supported yet on SFBD\n");
84 assert(0);
85 }
86
87 struct pipe_context *gallium = (struct pipe_context *) ctx;
88 struct panfrost_screen *screen = pan_screen(gallium->screen);
89 /* AFBC metadata is 16 bytes per tile */
90 int tile_w = (rsrc->base.width0 + (MALI_TILE_LENGTH - 1)) >> MALI_TILE_SHIFT;
91 int tile_h = (rsrc->base.height0 + (MALI_TILE_LENGTH - 1)) >> MALI_TILE_SHIFT;
92 int bytes_per_pixel = util_format_get_blocksize(rsrc->base.format);
93 int stride = bytes_per_pixel * ALIGN(rsrc->base.width0, 16);
94
95 stride *= 2; /* TODO: Should this be carried over? */
96 int main_size = stride * rsrc->base.height0;
97 rsrc->bo->afbc_metadata_size = tile_w * tile_h * 16;
98
99 /* Allocate the AFBC slab itself, large enough to hold the above */
100 screen->driver->allocate_slab(screen, &rsrc->bo->afbc_slab,
101 (rsrc->bo->afbc_metadata_size + main_size + 4095) / 4096,
102 true, 0, 0, 0);
103
104 rsrc->bo->layout = PAN_AFBC;
105
106 /* Compressed textured reads use a tagged pointer to the metadata */
107
108 rsrc->bo->gpu = rsrc->bo->afbc_slab.gpu | (ds ? 0 : 1);
109 rsrc->bo->cpu = rsrc->bo->afbc_slab.cpu;
110 rsrc->bo->gem_handle = rsrc->bo->afbc_slab.gem_handle;
111 }
112
113 static void
114 panfrost_enable_checksum(struct panfrost_context *ctx, struct panfrost_resource *rsrc)
115 {
116 struct pipe_context *gallium = (struct pipe_context *) ctx;
117 struct panfrost_screen *screen = pan_screen(gallium->screen);
118 int tile_w = (rsrc->base.width0 + (MALI_TILE_LENGTH - 1)) >> MALI_TILE_SHIFT;
119 int tile_h = (rsrc->base.height0 + (MALI_TILE_LENGTH - 1)) >> MALI_TILE_SHIFT;
120
121 /* 8 byte checksum per tile */
122 rsrc->bo->checksum_stride = tile_w * 8;
123 int pages = (((rsrc->bo->checksum_stride * tile_h) + 4095) / 4096);
124 screen->driver->allocate_slab(screen, &rsrc->bo->checksum_slab, pages, false, 0, 0, 0);
125
126 rsrc->bo->has_checksum = true;
127 }
128
129 /* Framebuffer descriptor */
130
131 static void
132 panfrost_set_framebuffer_resolution(struct mali_single_framebuffer *fb, int w, int h)
133 {
134 fb->width = MALI_POSITIVE(w);
135 fb->height = MALI_POSITIVE(h);
136
137 /* No idea why this is needed, but it's how resolution_check is
138 * calculated. It's not clear to us yet why the hardware wants this.
139 * The formula itself was discovered mostly by manual bruteforce and
140 * aggressive algebraic simplification. */
141
142 fb->resolution_check = ((w + h) / 3) << 4;
143 }
144
145 struct mali_single_framebuffer
146 panfrost_emit_sfbd(struct panfrost_context *ctx)
147 {
148 struct mali_single_framebuffer framebuffer = {
149 .unknown2 = 0x1f,
150 .format = 0x30000000,
151 .clear_flags = 0x1000,
152 .unknown_address_0 = ctx->scratchpad.gpu,
153 .unknown_address_1 = ctx->misc_0.gpu,
154 .unknown_address_2 = ctx->misc_0.gpu + 40960,
155 .tiler_flags = 0xf0,
156 .tiler_heap_free = ctx->tiler_heap.gpu,
157 .tiler_heap_end = ctx->tiler_heap.gpu + ctx->tiler_heap.size,
158 };
159
160 panfrost_set_framebuffer_resolution(&framebuffer, ctx->pipe_framebuffer.width, ctx->pipe_framebuffer.height);
161
162 return framebuffer;
163 }
164
165 struct bifrost_framebuffer
166 panfrost_emit_mfbd(struct panfrost_context *ctx)
167 {
168 struct bifrost_framebuffer framebuffer = {
169 /* It is not yet clear what tiler_meta means or how it's
170 * calculated, but we can tell the lower 32-bits are a
171 * (monotonically increasing?) function of tile count and
172 * geometry complexity; I suspect it defines a memory size of
173 * some kind? for the tiler. It's really unclear at the
174 * moment... but to add to the confusion, the hardware is happy
175 * enough to accept a zero in this field, so we don't even have
176 * to worry about it right now.
177 *
178 * The byte (just after the 32-bit mark) is much more
179 * interesting. The higher nibble I've only ever seen as 0xF,
180 * but the lower one I've seen as 0x0 or 0xF, and it's not
181 * obvious what the difference is. But what -is- obvious is
182 * that when the lower nibble is zero, performance is severely
183 * degraded compared to when the lower nibble is set.
184 * Evidently, that nibble enables some sort of fast path,
185 * perhaps relating to caching or tile flush? Regardless, at
186 * this point there's no clear reason not to set it, aside from
187 * substantially increased memory requirements (of the misc_0
188 * buffer) */
189
190 .tiler_meta = ((uint64_t) 0xff << 32) | 0x0,
191
192 .width1 = MALI_POSITIVE(ctx->pipe_framebuffer.width),
193 .height1 = MALI_POSITIVE(ctx->pipe_framebuffer.height),
194 .width2 = MALI_POSITIVE(ctx->pipe_framebuffer.width),
195 .height2 = MALI_POSITIVE(ctx->pipe_framebuffer.height),
196
197 .unk1 = 0x1080,
198
199 /* TODO: MRT */
200 .rt_count_1 = MALI_POSITIVE(1),
201 .rt_count_2 = 4,
202
203 .unknown2 = 0x1f,
204
205 /* Corresponds to unknown_address_X of SFBD */
206 .scratchpad = ctx->scratchpad.gpu,
207 .tiler_scratch_start = ctx->misc_0.gpu,
208
209 /* The constant added here is, like the lower word of
210 * tiler_meta, (loosely) another product of framebuffer size
211 * and geometry complexity. It must be sufficiently large for
212 * the tiler_meta fast path to work; if it's too small, there
213 * will be DATA_INVALID_FAULTs. Conversely, it must be less
214 * than the total size of misc_0, or else there's no room. It's
215 * possible this constant configures a partition between two
216 * parts of misc_0? We haven't investigated the functionality,
217 * as these buffers are internally used by the hardware
218 * (presumably by the tiler) but not seemingly touched by the driver
219 */
220
221 .tiler_scratch_middle = ctx->misc_0.gpu + 0xf0000,
222
223 .tiler_heap_start = ctx->tiler_heap.gpu,
224 .tiler_heap_end = ctx->tiler_heap.gpu + ctx->tiler_heap.size,
225 };
226
227 return framebuffer;
228 }
229
230 /* Are we currently rendering to the screen (rather than an FBO)? */
231
232 bool
233 panfrost_is_scanout(struct panfrost_context *ctx)
234 {
235 /* If there is no color buffer, it's an FBO */
236 if (!ctx->pipe_framebuffer.nr_cbufs)
237 return false;
238
239 /* If we're too early that no framebuffer was sent, it's scanout */
240 if (!ctx->pipe_framebuffer.cbufs[0])
241 return true;
242
243 return ctx->pipe_framebuffer.cbufs[0]->texture->bind & PIPE_BIND_DISPLAY_TARGET ||
244 ctx->pipe_framebuffer.cbufs[0]->texture->bind & PIPE_BIND_SCANOUT ||
245 ctx->pipe_framebuffer.cbufs[0]->texture->bind & PIPE_BIND_SHARED;
246 }
247
248 static uint32_t
249 pan_pack_color(const union pipe_color_union *color, enum pipe_format format)
250 {
251 /* Alpha magicked to 1.0 if there is no alpha */
252
253 bool has_alpha = util_format_has_alpha(format);
254 float clear_alpha = has_alpha ? color->f[3] : 1.0f;
255
256 /* Packed color depends on the framebuffer format */
257
258 const struct util_format_description *desc =
259 util_format_description(format);
260
261 if (util_format_is_rgba8_variant(desc)) {
262 return (float_to_ubyte(clear_alpha) << 24) |
263 (float_to_ubyte(color->f[2]) << 16) |
264 (float_to_ubyte(color->f[1]) << 8) |
265 (float_to_ubyte(color->f[0]) << 0);
266 } else if (format == PIPE_FORMAT_B5G6R5_UNORM) {
267 /* First, we convert the components to R5, G6, B5 separately */
268 unsigned r5 = CLAMP(color->f[0], 0.0, 1.0) * 31.0;
269 unsigned g6 = CLAMP(color->f[1], 0.0, 1.0) * 63.0;
270 unsigned b5 = CLAMP(color->f[2], 0.0, 1.0) * 31.0;
271
272 /* Then we pack into a sparse u32. TODO: Why these shifts? */
273 return (b5 << 25) | (g6 << 14) | (r5 << 5);
274 } else {
275 /* Unknown format */
276 assert(0);
277 }
278
279 return 0;
280 }
281
282 static void
283 panfrost_clear(
284 struct pipe_context *pipe,
285 unsigned buffers,
286 const union pipe_color_union *color,
287 double depth, unsigned stencil)
288 {
289 struct panfrost_context *ctx = pan_context(pipe);
290 struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
291
292 if (buffers & PIPE_CLEAR_COLOR) {
293 enum pipe_format format = ctx->pipe_framebuffer.cbufs[0]->format;
294 job->clear_color = pan_pack_color(color, format);
295 }
296
297 if (buffers & PIPE_CLEAR_DEPTH) {
298 job->clear_depth = depth;
299 }
300
301 if (buffers & PIPE_CLEAR_STENCIL) {
302 job->clear_stencil = stencil;
303 }
304
305 job->clear |= buffers;
306 }
307
308 static mali_ptr
309 panfrost_attach_vt_mfbd(struct panfrost_context *ctx)
310 {
311 /* MFBD needs a sequential semi-render target upload, but what exactly this is, is beyond me for now */
312 struct bifrost_render_target rts_list[] = {
313 {
314 .chunknown = {
315 .unk = 0x30005,
316 },
317 .framebuffer = ctx->misc_0.gpu,
318 .zero2 = 0x3,
319 },
320 };
321
322 /* Allocate memory for the three components */
323 int size = 1024 + sizeof(ctx->vt_framebuffer_mfbd) + sizeof(rts_list);
324 struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, size);
325
326 /* Opaque 1024-block */
327 rts_list[0].chunknown.pointer = transfer.gpu;
328
329 memcpy(transfer.cpu + 1024, &ctx->vt_framebuffer_mfbd, sizeof(ctx->vt_framebuffer_mfbd));
330 memcpy(transfer.cpu + 1024 + sizeof(ctx->vt_framebuffer_mfbd), rts_list, sizeof(rts_list));
331
332 return (transfer.gpu + 1024) | MALI_MFBD;
333 }
334
335 static mali_ptr
336 panfrost_attach_vt_sfbd(struct panfrost_context *ctx)
337 {
338 return panfrost_upload_transient(ctx, &ctx->vt_framebuffer_sfbd, sizeof(ctx->vt_framebuffer_sfbd)) | MALI_SFBD;
339 }
340
341 static void
342 panfrost_attach_vt_framebuffer(struct panfrost_context *ctx)
343 {
344 mali_ptr framebuffer = ctx->require_sfbd ?
345 panfrost_attach_vt_sfbd(ctx) :
346 panfrost_attach_vt_mfbd(ctx);
347
348 ctx->payload_vertex.postfix.framebuffer = framebuffer;
349 ctx->payload_tiler.postfix.framebuffer = framebuffer;
350 }
351
352 /* Reset per-frame context, called on context initialisation as well as after
353 * flushing a frame */
354
355 static void
356 panfrost_invalidate_frame(struct panfrost_context *ctx)
357 {
358 unsigned transient_count = ctx->transient_pools[ctx->cmdstream_i].entry_index*ctx->transient_pools[0].entry_size + ctx->transient_pools[ctx->cmdstream_i].entry_offset;
359 DBG("Uploaded transient %d bytes\n", transient_count);
360
361 /* Rotate cmdstream */
362 if ((++ctx->cmdstream_i) == (sizeof(ctx->transient_pools) / sizeof(ctx->transient_pools[0])))
363 ctx->cmdstream_i = 0;
364
365 if (ctx->require_sfbd)
366 ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx);
367 else
368 ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx);
369
370 /* Reset varyings allocated */
371 ctx->varying_height = 0;
372
373 /* The transient cmdstream is dirty every frame; the only bits worth preserving
374 * (textures, shaders, etc) are in other buffers anyways */
375
376 ctx->transient_pools[ctx->cmdstream_i].entry_index = 0;
377 ctx->transient_pools[ctx->cmdstream_i].entry_offset = 0;
378
379 /* Regenerate payloads */
380 panfrost_attach_vt_framebuffer(ctx);
381
382 if (ctx->rasterizer)
383 ctx->dirty |= PAN_DIRTY_RASTERIZER;
384
385 /* XXX */
386 ctx->dirty |= PAN_DIRTY_SAMPLERS | PAN_DIRTY_TEXTURES;
387 }
388
389 /* In practice, every field of these payloads should be configurable
390 * arbitrarily, which means these functions are basically catch-all's for
391 * as-of-yet unwavering unknowns */
392
393 static void
394 panfrost_emit_vertex_payload(struct panfrost_context *ctx)
395 {
396 struct midgard_payload_vertex_tiler payload = {
397 .prefix = {
398 .workgroups_z_shift = 32,
399 .workgroups_x_shift_2 = 0x2,
400 .workgroups_x_shift_3 = 0x5,
401 },
402 .gl_enables = 0x4 | (ctx->is_t6xx ? 0 : 0x2),
403 };
404
405 memcpy(&ctx->payload_vertex, &payload, sizeof(payload));
406 }
407
408 static void
409 panfrost_emit_tiler_payload(struct panfrost_context *ctx)
410 {
411 struct midgard_payload_vertex_tiler payload = {
412 .prefix = {
413 .workgroups_z_shift = 32,
414 .workgroups_x_shift_2 = 0x2,
415 .workgroups_x_shift_3 = 0x6,
416
417 .zero1 = 0xffff, /* Why is this only seen on test-quad-textured? */
418 },
419 };
420
421 memcpy(&ctx->payload_tiler, &payload, sizeof(payload));
422 }
423
424 static unsigned
425 translate_tex_wrap(enum pipe_tex_wrap w)
426 {
427 switch (w) {
428 case PIPE_TEX_WRAP_REPEAT:
429 return MALI_WRAP_REPEAT;
430
431 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
432 return MALI_WRAP_CLAMP_TO_EDGE;
433
434 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
435 return MALI_WRAP_CLAMP_TO_BORDER;
436
437 case PIPE_TEX_WRAP_MIRROR_REPEAT:
438 return MALI_WRAP_MIRRORED_REPEAT;
439
440 default:
441 unreachable("Invalid wrap");
442 }
443 }
444
445 static unsigned
446 translate_tex_filter(enum pipe_tex_filter f)
447 {
448 switch (f) {
449 case PIPE_TEX_FILTER_NEAREST:
450 return MALI_NEAREST;
451
452 case PIPE_TEX_FILTER_LINEAR:
453 return MALI_LINEAR;
454
455 default:
456 unreachable("Invalid filter");
457 }
458 }
459
460 static unsigned
461 translate_mip_filter(enum pipe_tex_mipfilter f)
462 {
463 return (f == PIPE_TEX_MIPFILTER_LINEAR) ? MALI_MIP_LINEAR : 0;
464 }
465
466 static unsigned
467 panfrost_translate_compare_func(enum pipe_compare_func in)
468 {
469 switch (in) {
470 case PIPE_FUNC_NEVER:
471 return MALI_FUNC_NEVER;
472
473 case PIPE_FUNC_LESS:
474 return MALI_FUNC_LESS;
475
476 case PIPE_FUNC_EQUAL:
477 return MALI_FUNC_EQUAL;
478
479 case PIPE_FUNC_LEQUAL:
480 return MALI_FUNC_LEQUAL;
481
482 case PIPE_FUNC_GREATER:
483 return MALI_FUNC_GREATER;
484
485 case PIPE_FUNC_NOTEQUAL:
486 return MALI_FUNC_NOTEQUAL;
487
488 case PIPE_FUNC_GEQUAL:
489 return MALI_FUNC_GEQUAL;
490
491 case PIPE_FUNC_ALWAYS:
492 return MALI_FUNC_ALWAYS;
493
494 default:
495 unreachable("Invalid func");
496 }
497 }
498
499 static unsigned
500 panfrost_translate_alt_compare_func(enum pipe_compare_func in)
501 {
502 switch (in) {
503 case PIPE_FUNC_NEVER:
504 return MALI_ALT_FUNC_NEVER;
505
506 case PIPE_FUNC_LESS:
507 return MALI_ALT_FUNC_LESS;
508
509 case PIPE_FUNC_EQUAL:
510 return MALI_ALT_FUNC_EQUAL;
511
512 case PIPE_FUNC_LEQUAL:
513 return MALI_ALT_FUNC_LEQUAL;
514
515 case PIPE_FUNC_GREATER:
516 return MALI_ALT_FUNC_GREATER;
517
518 case PIPE_FUNC_NOTEQUAL:
519 return MALI_ALT_FUNC_NOTEQUAL;
520
521 case PIPE_FUNC_GEQUAL:
522 return MALI_ALT_FUNC_GEQUAL;
523
524 case PIPE_FUNC_ALWAYS:
525 return MALI_ALT_FUNC_ALWAYS;
526
527 default:
528 unreachable("Invalid alt func");
529 }
530 }
531
532 static unsigned
533 panfrost_translate_stencil_op(enum pipe_stencil_op in)
534 {
535 switch (in) {
536 case PIPE_STENCIL_OP_KEEP:
537 return MALI_STENCIL_KEEP;
538
539 case PIPE_STENCIL_OP_ZERO:
540 return MALI_STENCIL_ZERO;
541
542 case PIPE_STENCIL_OP_REPLACE:
543 return MALI_STENCIL_REPLACE;
544
545 case PIPE_STENCIL_OP_INCR:
546 return MALI_STENCIL_INCR;
547
548 case PIPE_STENCIL_OP_DECR:
549 return MALI_STENCIL_DECR;
550
551 case PIPE_STENCIL_OP_INCR_WRAP:
552 return MALI_STENCIL_INCR_WRAP;
553
554 case PIPE_STENCIL_OP_DECR_WRAP:
555 return MALI_STENCIL_DECR_WRAP;
556
557 case PIPE_STENCIL_OP_INVERT:
558 return MALI_STENCIL_INVERT;
559
560 default:
561 unreachable("Invalid stencil op");
562 }
563 }
564
565 static void
566 panfrost_make_stencil_state(const struct pipe_stencil_state *in, struct mali_stencil_test *out)
567 {
568 out->ref = 0; /* Gallium gets it from elsewhere */
569
570 out->mask = in->valuemask;
571 out->func = panfrost_translate_compare_func(in->func);
572 out->sfail = panfrost_translate_stencil_op(in->fail_op);
573 out->dpfail = panfrost_translate_stencil_op(in->zfail_op);
574 out->dppass = panfrost_translate_stencil_op(in->zpass_op);
575 }
576
577 static void
578 panfrost_default_shader_backend(struct panfrost_context *ctx)
579 {
580 struct mali_shader_meta shader = {
581 .alpha_coverage = ~MALI_ALPHA_COVERAGE(0.000000),
582
583 .unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x3010,
584 .unknown2_4 = MALI_NO_MSAA | 0x4e0,
585 };
586
587 if (ctx->is_t6xx) {
588 shader.unknown2_4 |= 0x10;
589 }
590
591 struct pipe_stencil_state default_stencil = {
592 .enabled = 0,
593 .func = PIPE_FUNC_ALWAYS,
594 .fail_op = MALI_STENCIL_KEEP,
595 .zfail_op = MALI_STENCIL_KEEP,
596 .zpass_op = MALI_STENCIL_KEEP,
597 .writemask = 0xFF,
598 .valuemask = 0xFF
599 };
600
601 panfrost_make_stencil_state(&default_stencil, &shader.stencil_front);
602 shader.stencil_mask_front = default_stencil.writemask;
603
604 panfrost_make_stencil_state(&default_stencil, &shader.stencil_back);
605 shader.stencil_mask_back = default_stencil.writemask;
606
607 if (default_stencil.enabled)
608 shader.unknown2_4 |= MALI_STENCIL_TEST;
609
610 memcpy(&ctx->fragment_shader_core, &shader, sizeof(shader));
611 }
612
613 /* Generates a vertex/tiler job. This is, in some sense, the heart of the
614 * graphics command stream. It should be called once per draw, accordding to
615 * presentations. Set is_tiler for "tiler" jobs (fragment shader jobs, but in
616 * Mali parlance, "fragment" refers to framebuffer writeout). Clear it for
617 * vertex jobs. */
618
619 struct panfrost_transfer
620 panfrost_vertex_tiler_job(struct panfrost_context *ctx, bool is_tiler, bool is_elided_tiler)
621 {
622 /* Each draw call corresponds to two jobs, and we want to offset to leave room for the set-value job */
623 int draw_job_index = 1 + (2 * ctx->draw_count);
624
625 struct mali_job_descriptor_header job = {
626 .job_type = is_tiler ? JOB_TYPE_TILER : JOB_TYPE_VERTEX,
627 .job_index = draw_job_index + (is_tiler ? 1 : 0),
628 #ifdef __LP64__
629 .job_descriptor_size = 1,
630 #endif
631 };
632
633 /* Only non-elided tiler jobs have dependencies which are known at this point */
634
635 if (is_tiler && !is_elided_tiler) {
636 /* Tiler jobs depend on vertex jobs */
637
638 job.job_dependency_index_1 = draw_job_index;
639
640 /* Tiler jobs also depend on the previous tiler job */
641
642 if (ctx->draw_count)
643 job.job_dependency_index_2 = draw_job_index - 1;
644 }
645
646 struct midgard_payload_vertex_tiler *payload = is_tiler ? &ctx->payload_tiler : &ctx->payload_vertex;
647
648 /* There's some padding hacks on 32-bit */
649
650 #ifdef __LP64__
651 int offset = 0;
652 #else
653 int offset = 4;
654 #endif
655 struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, sizeof(job) + sizeof(*payload));
656 memcpy(transfer.cpu, &job, sizeof(job));
657 memcpy(transfer.cpu + sizeof(job) - offset, payload, sizeof(*payload));
658 return transfer;
659 }
660
661 /* Generates a set value job. It's unclear what exactly this does, why it's
662 * necessary, and when to call it. */
663
664 static void
665 panfrost_set_value_job(struct panfrost_context *ctx)
666 {
667 struct mali_job_descriptor_header job = {
668 .job_type = JOB_TYPE_SET_VALUE,
669 .job_descriptor_size = 1,
670 .job_index = 1 + (2 * ctx->draw_count),
671 };
672
673 struct mali_payload_set_value payload = {
674 .out = ctx->misc_0.gpu,
675 .unknown = 0x3,
676 };
677
678 struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, sizeof(job) + sizeof(payload));
679 memcpy(transfer.cpu, &job, sizeof(job));
680 memcpy(transfer.cpu + sizeof(job), &payload, sizeof(payload));
681
682 ctx->u_set_value_job = (struct mali_job_descriptor_header *) transfer.cpu;
683 ctx->set_value_job = transfer.gpu;
684 }
685
686 static mali_ptr
687 panfrost_emit_varyings(
688 struct panfrost_context *ctx,
689 union mali_attr *slot,
690 unsigned stride,
691 unsigned count)
692 {
693 mali_ptr varying_address = ctx->varying_mem.gpu + ctx->varying_height;
694
695 /* Fill out the descriptor */
696 slot->elements = varying_address | MALI_ATTR_LINEAR;
697 slot->stride = stride;
698 slot->size = stride * count;
699
700 ctx->varying_height += ALIGN(slot->size, 64);
701 assert(ctx->varying_height < ctx->varying_mem.size);
702
703 return varying_address;
704 }
705
706 static void
707 panfrost_emit_point_coord(union mali_attr *slot)
708 {
709 slot->elements = MALI_VARYING_POINT_COORD | MALI_ATTR_LINEAR;
710 slot->stride = slot->size = 0;
711 }
712
713 static void
714 panfrost_emit_varying_descriptor(
715 struct panfrost_context *ctx,
716 unsigned invocation_count)
717 {
718 /* Load the shaders */
719
720 struct panfrost_shader_state *vs = &ctx->vs->variants[ctx->vs->active_variant];
721 struct panfrost_shader_state *fs = &ctx->fs->variants[ctx->fs->active_variant];
722
723 /* Allocate the varying descriptor */
724
725 size_t vs_size = sizeof(struct mali_attr_meta) * vs->tripipe->varying_count;
726 size_t fs_size = sizeof(struct mali_attr_meta) * fs->tripipe->varying_count;
727
728 struct panfrost_transfer trans = panfrost_allocate_transient(ctx,
729 vs_size + fs_size);
730
731 memcpy(trans.cpu, vs->varyings, vs_size);
732 memcpy(trans.cpu + vs_size, fs->varyings, fs_size);
733
734 ctx->payload_vertex.postfix.varying_meta = trans.gpu;
735 ctx->payload_tiler.postfix.varying_meta = trans.gpu + vs_size;
736
737 /* Buffer indices must be in this order per our convention */
738 union mali_attr varyings[PIPE_MAX_ATTRIBS];
739 unsigned idx = 0;
740
741 /* General varyings -- use the VS's, since those are more likely to be
742 * accurate on desktop */
743
744 panfrost_emit_varyings(ctx, &varyings[idx++],
745 vs->general_varying_stride, invocation_count);
746
747 /* fp32 vec4 gl_Position */
748 ctx->payload_tiler.postfix.position_varying =
749 panfrost_emit_varyings(ctx, &varyings[idx++],
750 sizeof(float) * 4, invocation_count);
751
752
753 if (vs->writes_point_size || fs->reads_point_coord) {
754 /* fp16 vec1 gl_PointSize */
755 ctx->payload_tiler.primitive_size.pointer =
756 panfrost_emit_varyings(ctx, &varyings[idx++],
757 2, invocation_count);
758 }
759
760 if (fs->reads_point_coord) {
761 /* Special descriptor */
762 panfrost_emit_point_coord(&varyings[idx++]);
763 }
764
765 mali_ptr varyings_p = panfrost_upload_transient(ctx, &varyings, idx * sizeof(union mali_attr));
766 ctx->payload_vertex.postfix.varyings = varyings_p;
767 ctx->payload_tiler.postfix.varyings = varyings_p;
768 }
769
770 static mali_ptr
771 panfrost_vertex_buffer_address(struct panfrost_context *ctx, unsigned i)
772 {
773 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[i];
774 struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer.resource);
775
776 return rsrc->bo->gpu + buf->buffer_offset;
777 }
778
779 /* Emits attributes and varying descriptors, which should be called every draw,
780 * excepting some obscure circumstances */
781
782 static void
783 panfrost_emit_vertex_data(struct panfrost_context *ctx, struct panfrost_job *job)
784 {
785 /* Staged mali_attr, and index into them. i =/= k, depending on the
786 * vertex buffer mask */
787 union mali_attr attrs[PIPE_MAX_ATTRIBS];
788 unsigned k = 0;
789
790 unsigned invocation_count = MALI_NEGATIVE(ctx->payload_tiler.prefix.invocation_count);
791
792 for (int i = 0; i < ARRAY_SIZE(ctx->vertex_buffers); ++i) {
793 if (!(ctx->vb_mask & (1 << i))) continue;
794
795 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[i];
796 struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer.resource);
797
798 if (!rsrc) continue;
799
800 /* Align to 64 bytes by masking off the lower bits. This
801 * will be adjusted back when we fixup the src_offset in
802 * mali_attr_meta */
803
804 mali_ptr addr = panfrost_vertex_buffer_address(ctx, i) & ~63;
805
806 /* Offset vertex count by draw_start to make sure we upload enough */
807 attrs[k].stride = buf->stride;
808 attrs[k].size = rsrc->base.width0;
809
810 panfrost_job_add_bo(job, rsrc->bo);
811 attrs[k].elements = addr | MALI_ATTR_LINEAR;
812
813 ++k;
814 }
815
816 ctx->payload_vertex.postfix.attributes = panfrost_upload_transient(ctx, attrs, k * sizeof(union mali_attr));
817
818 panfrost_emit_varying_descriptor(ctx, invocation_count);
819 }
820
821 static bool
822 panfrost_writes_point_size(struct panfrost_context *ctx)
823 {
824 assert(ctx->vs);
825 struct panfrost_shader_state *vs = &ctx->vs->variants[ctx->vs->active_variant];
826
827 return vs->writes_point_size && ctx->payload_tiler.prefix.draw_mode == MALI_POINTS;
828 }
829
830 /* Stage the attribute descriptors so we can adjust src_offset
831 * to let BOs align nicely */
832
833 static void
834 panfrost_stage_attributes(struct panfrost_context *ctx)
835 {
836 struct panfrost_vertex_state *so = ctx->vertex;
837
838 size_t sz = sizeof(struct mali_attr_meta) * so->num_elements;
839 struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, sz);
840 struct mali_attr_meta *target = (struct mali_attr_meta *) transfer.cpu;
841
842 /* Copy as-is for the first pass */
843 memcpy(target, so->hw, sz);
844
845 /* Fixup offsets for the second pass. Recall that the hardware
846 * calculates attribute addresses as:
847 *
848 * addr = base + (stride * vtx) + src_offset;
849 *
850 * However, on Mali, base must be aligned to 64-bytes, so we
851 * instead let:
852 *
853 * base' = base & ~63 = base - (base & 63)
854 *
855 * To compensate when using base' (see emit_vertex_data), we have
856 * to adjust src_offset by the masked off piece:
857 *
858 * addr' = base' + (stride * vtx) + (src_offset + (base & 63))
859 * = base - (base & 63) + (stride * vtx) + src_offset + (base & 63)
860 * = base + (stride * vtx) + src_offset
861 * = addr;
862 *
863 * QED.
864 */
865
866 for (unsigned i = 0; i < so->num_elements; ++i) {
867 unsigned vbi = so->pipe[i].vertex_buffer_index;
868 mali_ptr addr = panfrost_vertex_buffer_address(ctx, vbi);
869
870 /* Adjust by the masked off bits of the offset */
871 target[i].src_offset += (addr & 63);
872 }
873
874 ctx->payload_vertex.postfix.attribute_meta = transfer.gpu;
875 }
876
877 /* Go through dirty flags and actualise them in the cmdstream. */
878
879 void
880 panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
881 {
882 struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
883
884 if (with_vertex_data) {
885 panfrost_emit_vertex_data(ctx, job);
886 }
887
888 bool msaa = ctx->rasterizer->base.multisample;
889
890 if (ctx->dirty & PAN_DIRTY_RASTERIZER) {
891 ctx->payload_tiler.gl_enables = ctx->rasterizer->tiler_gl_enables;
892
893 /* TODO: Sample size */
894 SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_HAS_MSAA, msaa);
895 SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_NO_MSAA, !msaa);
896 }
897
898 /* Enable job requirements at draw-time */
899
900 if (msaa)
901 job->requirements |= PAN_REQ_MSAA;
902
903 if (ctx->depth_stencil->depth.writemask)
904 job->requirements |= PAN_REQ_DEPTH_WRITE;
905
906 if (ctx->occlusion_query) {
907 ctx->payload_tiler.gl_enables |= MALI_OCCLUSION_QUERY | MALI_OCCLUSION_PRECISE;
908 ctx->payload_tiler.postfix.occlusion_counter = ctx->occlusion_query->transfer.gpu;
909 }
910
911 if (ctx->dirty & PAN_DIRTY_VS) {
912 assert(ctx->vs);
913
914 struct panfrost_shader_state *vs = &ctx->vs->variants[ctx->vs->active_variant];
915
916 /* Late shader descriptor assignments */
917
918 vs->tripipe->texture_count = ctx->sampler_view_count[PIPE_SHADER_VERTEX];
919 vs->tripipe->sampler_count = ctx->sampler_count[PIPE_SHADER_VERTEX];
920
921 /* Who knows */
922 vs->tripipe->midgard1.unknown1 = 0x2201;
923
924 ctx->payload_vertex.postfix._shader_upper = vs->tripipe_gpu >> 4;
925 }
926
927 if (ctx->dirty & (PAN_DIRTY_RASTERIZER | PAN_DIRTY_VS)) {
928 /* Check if we need to link the gl_PointSize varying */
929 if (!panfrost_writes_point_size(ctx)) {
930 /* If the size is constant, write it out. Otherwise,
931 * don't touch primitive_size (since we would clobber
932 * the pointer there) */
933
934 ctx->payload_tiler.primitive_size.constant = ctx->rasterizer->base.line_width;
935 }
936 }
937
938 /* TODO: Maybe dirty track FS, maybe not. For now, it's transient. */
939 if (ctx->fs)
940 ctx->dirty |= PAN_DIRTY_FS;
941
942 if (ctx->dirty & PAN_DIRTY_FS) {
943 assert(ctx->fs);
944 struct panfrost_shader_state *variant = &ctx->fs->variants[ctx->fs->active_variant];
945
946 #define COPY(name) ctx->fragment_shader_core.name = variant->tripipe->name
947
948 COPY(shader);
949 COPY(attribute_count);
950 COPY(varying_count);
951 COPY(midgard1.uniform_count);
952 COPY(midgard1.work_count);
953 COPY(midgard1.unknown2);
954
955 #undef COPY
956 /* If there is a blend shader, work registers are shared */
957
958 if (ctx->blend->has_blend_shader)
959 ctx->fragment_shader_core.midgard1.work_count = /*MAX2(ctx->fragment_shader_core.midgard1.work_count, ctx->blend->blend_work_count)*/16;
960
961 /* Set late due to depending on render state */
962 /* The one at the end seems to mean "1 UBO" */
963 ctx->fragment_shader_core.midgard1.unknown1 = MALI_NO_ALPHA_TO_COVERAGE | 0x200 | 0x2201;
964
965 /* Assign texture/sample count right before upload */
966 ctx->fragment_shader_core.texture_count = ctx->sampler_view_count[PIPE_SHADER_FRAGMENT];
967 ctx->fragment_shader_core.sampler_count = ctx->sampler_count[PIPE_SHADER_FRAGMENT];
968
969 /* Assign the stencil refs late */
970 ctx->fragment_shader_core.stencil_front.ref = ctx->stencil_ref.ref_value[0];
971 ctx->fragment_shader_core.stencil_back.ref = ctx->stencil_ref.ref_value[1];
972
973 /* CAN_DISCARD should be set if the fragment shader possibly
974 * contains a 'discard' instruction. It is likely this is
975 * related to optimizations related to forward-pixel kill, as
976 * per "Mali Performance 3: Is EGL_BUFFER_PRESERVED a good
977 * thing?" by Peter Harris
978 */
979
980 if (variant->can_discard) {
981 ctx->fragment_shader_core.unknown2_3 |= MALI_CAN_DISCARD;
982 ctx->fragment_shader_core.midgard1.unknown1 &= ~MALI_NO_ALPHA_TO_COVERAGE;
983 ctx->fragment_shader_core.midgard1.unknown1 |= 0x4000;
984 ctx->fragment_shader_core.midgard1.unknown1 = 0x4200;
985 }
986
987 /* Check if we're using the default blend descriptor (fast path) */
988
989 bool no_blending =
990 !ctx->blend->has_blend_shader &&
991 (ctx->blend->equation.rgb_mode == 0x122) &&
992 (ctx->blend->equation.alpha_mode == 0x122) &&
993 (ctx->blend->equation.color_mask == 0xf);
994
995 /* Even on MFBD, the shader descriptor gets blend shaders. It's
996 * *also* copied to the blend_meta appended (by convention),
997 * but this is the field actually read by the hardware. (Or
998 * maybe both are read...?) */
999
1000 if (ctx->blend->has_blend_shader) {
1001 ctx->fragment_shader_core.blend.shader = ctx->blend->blend_shader;
1002 }
1003
1004 if (ctx->require_sfbd) {
1005 /* When only a single render target platform is used, the blend
1006 * information is inside the shader meta itself. We
1007 * additionally need to signal CAN_DISCARD for nontrivial blend
1008 * modes (so we're able to read back the destination buffer) */
1009
1010 if (!ctx->blend->has_blend_shader) {
1011 ctx->fragment_shader_core.blend.equation = ctx->blend->equation;
1012 ctx->fragment_shader_core.blend.constant = ctx->blend->constant;
1013 }
1014
1015 if (!no_blending) {
1016 ctx->fragment_shader_core.unknown2_3 |= MALI_CAN_DISCARD;
1017 }
1018 }
1019
1020 size_t size = sizeof(struct mali_shader_meta) + sizeof(struct midgard_blend_rt);
1021 struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, size);
1022 memcpy(transfer.cpu, &ctx->fragment_shader_core, sizeof(struct mali_shader_meta));
1023
1024 ctx->payload_tiler.postfix._shader_upper = (transfer.gpu) >> 4;
1025
1026 if (!ctx->require_sfbd) {
1027 /* Additional blend descriptor tacked on for jobs using MFBD */
1028
1029 unsigned blend_count = 0x200;
1030
1031 if (ctx->blend->has_blend_shader) {
1032 /* For a blend shader, the bottom nibble corresponds to
1033 * the number of work registers used, which signals the
1034 * -existence- of a blend shader */
1035
1036 assert(ctx->blend->blend_work_count >= 2);
1037 blend_count |= MIN2(ctx->blend->blend_work_count, 3);
1038 } else {
1039 /* Otherwise, the bottom bit simply specifies if
1040 * blending (anything other than REPLACE) is enabled */
1041
1042
1043 if (!no_blending)
1044 blend_count |= 0x1;
1045 }
1046
1047 struct midgard_blend_rt rts[4];
1048
1049 /* TODO: MRT */
1050
1051 for (unsigned i = 0; i < 1; ++i) {
1052 rts[i].flags = blend_count;
1053
1054 if (ctx->blend->has_blend_shader) {
1055 rts[i].blend.shader = ctx->blend->blend_shader;
1056 } else {
1057 rts[i].blend.equation = ctx->blend->equation;
1058 rts[i].blend.constant = ctx->blend->constant;
1059 }
1060 }
1061
1062 memcpy(transfer.cpu + sizeof(struct mali_shader_meta), rts, sizeof(rts[0]) * 1);
1063 }
1064 }
1065
1066 /* We stage to transient, so always dirty.. */
1067 panfrost_stage_attributes(ctx);
1068
1069 if (ctx->dirty & PAN_DIRTY_SAMPLERS) {
1070 /* Upload samplers back to back, no padding */
1071
1072 for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) {
1073 if (!ctx->sampler_count[t]) continue;
1074
1075 struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, sizeof(struct mali_sampler_descriptor) * ctx->sampler_count[t]);
1076 struct mali_sampler_descriptor *desc = (struct mali_sampler_descriptor *) transfer.cpu;
1077
1078 for (int i = 0; i < ctx->sampler_count[t]; ++i) {
1079 desc[i] = ctx->samplers[t][i]->hw;
1080 }
1081
1082 if (t == PIPE_SHADER_FRAGMENT)
1083 ctx->payload_tiler.postfix.sampler_descriptor = transfer.gpu;
1084 else if (t == PIPE_SHADER_VERTEX)
1085 ctx->payload_vertex.postfix.sampler_descriptor = transfer.gpu;
1086 else
1087 assert(0);
1088 }
1089 }
1090
1091 if (ctx->dirty & PAN_DIRTY_TEXTURES) {
1092 for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) {
1093 /* Shortcircuit */
1094 if (!ctx->sampler_view_count[t]) continue;
1095
1096 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1097
1098 for (int i = 0; i < ctx->sampler_view_count[t]; ++i) {
1099 if (!ctx->sampler_views[t][i])
1100 continue;
1101
1102 struct pipe_resource *tex_rsrc = ctx->sampler_views[t][i]->base.texture;
1103 struct panfrost_resource *rsrc = (struct panfrost_resource *) tex_rsrc;
1104
1105 /* Inject the addresses in, interleaving cube
1106 * faces and mip levels appropriately. */
1107
1108 for (int l = 0; l <= tex_rsrc->last_level; ++l) {
1109 for (int f = 0; f < tex_rsrc->array_size; ++f) {
1110 unsigned idx = (l * tex_rsrc->array_size) + f;
1111
1112 ctx->sampler_views[t][i]->hw.swizzled_bitmaps[idx] =
1113 rsrc->bo->gpu +
1114 rsrc->bo->slices[l].offset +
1115 f * rsrc->bo->cubemap_stride;
1116 }
1117 }
1118
1119 /* Inject the strides */
1120 unsigned usage2 = ctx->sampler_views[t][i]->hw.format.usage2;
1121
1122 if (usage2 & MALI_TEX_MANUAL_STRIDE) {
1123 unsigned idx = tex_rsrc->last_level * tex_rsrc->array_size;
1124 idx += tex_rsrc->array_size;
1125
1126 ctx->sampler_views[t][i]->hw.swizzled_bitmaps[idx] =
1127 rsrc->bo->slices[0].stride;
1128 }
1129
1130 trampolines[i] = panfrost_upload_transient(ctx, &ctx->sampler_views[t][i]->hw, sizeof(struct mali_texture_descriptor));
1131 }
1132
1133 mali_ptr trampoline = panfrost_upload_transient(ctx, trampolines, sizeof(uint64_t) * ctx->sampler_view_count[t]);
1134
1135 if (t == PIPE_SHADER_FRAGMENT)
1136 ctx->payload_tiler.postfix.texture_trampoline = trampoline;
1137 else if (t == PIPE_SHADER_VERTEX)
1138 ctx->payload_vertex.postfix.texture_trampoline = trampoline;
1139 else
1140 assert(0);
1141 }
1142 }
1143
1144 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
1145
1146 /* For flipped-Y buffers (signaled by negative scale), the translate is
1147 * flipped as well */
1148
1149 bool invert_y = vp->scale[1] < 0.0;
1150 float translate_y = vp->translate[1];
1151
1152 if (invert_y)
1153 translate_y = ctx->pipe_framebuffer.height - translate_y;
1154
1155 for (int i = 0; i <= PIPE_SHADER_FRAGMENT; ++i) {
1156 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[i];
1157
1158 struct panfrost_shader_state *vs = &ctx->vs->variants[ctx->vs->active_variant];
1159 struct panfrost_shader_state *fs = &ctx->fs->variants[ctx->fs->active_variant];
1160 struct panfrost_shader_state *ss = (i == PIPE_SHADER_FRAGMENT) ? fs : vs;
1161
1162 /* Allocate room for the sysval and the uniforms */
1163 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1164 size_t size = sys_size + buf->size;
1165 struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, size);
1166
1167 /* Upload sysvals requested by the shader */
1168 float *uniforms = (float *) transfer.cpu;
1169 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1170 int sysval = ss->sysval[i];
1171
1172 if (sysval == PAN_SYSVAL_VIEWPORT_SCALE) {
1173 uniforms[4*i + 0] = vp->scale[0];
1174 uniforms[4*i + 1] = fabsf(vp->scale[1]);
1175 uniforms[4*i + 2] = vp->scale[2];
1176 } else if (sysval == PAN_SYSVAL_VIEWPORT_OFFSET) {
1177 uniforms[4*i + 0] = vp->translate[0];
1178 uniforms[4*i + 1] = translate_y;
1179 uniforms[4*i + 2] = vp->translate[2];
1180 } else {
1181 assert(0);
1182 }
1183 }
1184
1185 /* Upload uniforms */
1186 memcpy(transfer.cpu + sys_size, buf->buffer, buf->size);
1187
1188 int uniform_count = 0;
1189
1190 struct mali_vertex_tiler_postfix *postfix;
1191
1192 switch (i) {
1193 case PIPE_SHADER_VERTEX:
1194 uniform_count = ctx->vs->variants[ctx->vs->active_variant].uniform_count;
1195 postfix = &ctx->payload_vertex.postfix;
1196 break;
1197
1198 case PIPE_SHADER_FRAGMENT:
1199 uniform_count = ctx->fs->variants[ctx->fs->active_variant].uniform_count;
1200 postfix = &ctx->payload_tiler.postfix;
1201 break;
1202
1203 default:
1204 unreachable("Invalid shader stage\n");
1205 }
1206
1207 /* Also attach the same buffer as a UBO for extended access */
1208
1209 struct mali_uniform_buffer_meta uniform_buffers[] = {
1210 {
1211 .size = MALI_POSITIVE((2 + uniform_count)),
1212 .ptr = transfer.gpu >> 2,
1213 },
1214 };
1215
1216 mali_ptr ubufs = panfrost_upload_transient(ctx, uniform_buffers, sizeof(uniform_buffers));
1217 postfix->uniforms = transfer.gpu;
1218 postfix->uniform_buffers = ubufs;
1219
1220 buf->dirty = 0;
1221 }
1222
1223 /* TODO: Upload the viewport somewhere more appropriate */
1224
1225 /* Clip bounds are encoded as floats. The viewport itself is encoded as
1226 * (somewhat) asymmetric ints. */
1227 const struct pipe_scissor_state *ss = &ctx->scissor;
1228
1229 struct mali_viewport view = {
1230 /* By default, do no viewport clipping, i.e. clip to (-inf,
1231 * inf) in each direction. Clipping to the viewport in theory
1232 * should work, but in practice causes issues when we're not
1233 * explicitly trying to scissor */
1234
1235 .clip_minx = -inff,
1236 .clip_miny = -inff,
1237 .clip_maxx = inff,
1238 .clip_maxy = inff,
1239
1240 .clip_minz = 0.0,
1241 .clip_maxz = 1.0,
1242 };
1243
1244 /* Always scissor to the viewport by default. */
1245 view.viewport0[0] = (int) (vp->translate[0] - vp->scale[0]);
1246 view.viewport1[0] = MALI_POSITIVE((int) (vp->translate[0] + vp->scale[0]));
1247
1248 view.viewport0[1] = (int) (translate_y - fabs(vp->scale[1]));
1249 view.viewport1[1] = MALI_POSITIVE((int) (translate_y + fabs(vp->scale[1])));
1250
1251 if (ss && ctx->rasterizer && ctx->rasterizer->base.scissor) {
1252 /* Invert scissor if needed */
1253 unsigned miny = invert_y ?
1254 ctx->pipe_framebuffer.height - ss->maxy : ss->miny;
1255
1256 unsigned maxy = invert_y ?
1257 ctx->pipe_framebuffer.height - ss->miny : ss->maxy;
1258
1259 /* Set the actual scissor */
1260 view.viewport0[0] = ss->minx;
1261 view.viewport0[1] = miny;
1262 view.viewport1[0] = MALI_POSITIVE(ss->maxx);
1263 view.viewport1[1] = MALI_POSITIVE(maxy);
1264 }
1265
1266 ctx->payload_tiler.postfix.viewport =
1267 panfrost_upload_transient(ctx,
1268 &view,
1269 sizeof(struct mali_viewport));
1270
1271 ctx->dirty = 0;
1272 }
1273
1274 /* Corresponds to exactly one draw, but does not submit anything */
1275
1276 static void
1277 panfrost_queue_draw(struct panfrost_context *ctx)
1278 {
1279 /* TODO: Expand the array? */
1280 if (ctx->draw_count >= MAX_DRAW_CALLS) {
1281 DBG("Job buffer overflow, ignoring draw\n");
1282 assert(0);
1283 }
1284
1285 /* Handle dirty flags now */
1286 panfrost_emit_for_draw(ctx, true);
1287
1288 struct panfrost_transfer vertex = panfrost_vertex_tiler_job(ctx, false, false);
1289 struct panfrost_transfer tiler = panfrost_vertex_tiler_job(ctx, true, false);
1290
1291 ctx->u_vertex_jobs[ctx->vertex_job_count] = (struct mali_job_descriptor_header *) vertex.cpu;
1292 ctx->vertex_jobs[ctx->vertex_job_count++] = vertex.gpu;
1293
1294 ctx->u_tiler_jobs[ctx->tiler_job_count] = (struct mali_job_descriptor_header *) tiler.cpu;
1295 ctx->tiler_jobs[ctx->tiler_job_count++] = tiler.gpu;
1296
1297 ctx->draw_count++;
1298 }
1299
1300 /* At the end of the frame, the vertex and tiler jobs are linked together and
1301 * then the fragment job is plonked at the end. Set value job is first for
1302 * unknown reasons. */
1303
1304 static void
1305 panfrost_link_job_pair(struct mali_job_descriptor_header *first, mali_ptr next)
1306 {
1307 if (first->job_descriptor_size)
1308 first->next_job_64 = (u64) (uintptr_t) next;
1309 else
1310 first->next_job_32 = (u32) (uintptr_t) next;
1311 }
1312
1313 static void
1314 panfrost_link_jobs(struct panfrost_context *ctx)
1315 {
1316 if (ctx->draw_count) {
1317 /* Generate the set_value_job */
1318 panfrost_set_value_job(ctx);
1319
1320 /* Have the first vertex job depend on the set value job */
1321 ctx->u_vertex_jobs[0]->job_dependency_index_1 = ctx->u_set_value_job->job_index;
1322
1323 /* SV -> V */
1324 panfrost_link_job_pair(ctx->u_set_value_job, ctx->vertex_jobs[0]);
1325 }
1326
1327 /* V -> V/T ; T -> T/null */
1328 for (int i = 0; i < ctx->vertex_job_count; ++i) {
1329 bool isLast = (i + 1) == ctx->vertex_job_count;
1330
1331 panfrost_link_job_pair(ctx->u_vertex_jobs[i], isLast ? ctx->tiler_jobs[0] : ctx->vertex_jobs[i + 1]);
1332 }
1333
1334 /* T -> T/null */
1335 for (int i = 0; i < ctx->tiler_job_count; ++i) {
1336 bool isLast = (i + 1) == ctx->tiler_job_count;
1337 panfrost_link_job_pair(ctx->u_tiler_jobs[i], isLast ? 0 : ctx->tiler_jobs[i + 1]);
1338 }
1339 }
1340
1341 /* The entire frame is in memory -- send it off to the kernel! */
1342
1343 static void
1344 panfrost_submit_frame(struct panfrost_context *ctx, bool flush_immediate,
1345 struct pipe_fence_handle **fence,
1346 struct panfrost_job *job)
1347 {
1348 struct pipe_context *gallium = (struct pipe_context *) ctx;
1349 struct panfrost_screen *screen = pan_screen(gallium->screen);
1350
1351 /* Edge case if screen is cleared and nothing else */
1352 bool has_draws = ctx->draw_count > 0;
1353
1354 /* Workaround a bizarre lockup (a hardware errata?) */
1355 if (!has_draws)
1356 flush_immediate = true;
1357
1358 /* A number of jobs are batched -- this must be linked and cleared */
1359 panfrost_link_jobs(ctx);
1360
1361 ctx->draw_count = 0;
1362 ctx->vertex_job_count = 0;
1363 ctx->tiler_job_count = 0;
1364
1365 #ifndef DRY_RUN
1366
1367 bool is_scanout = panfrost_is_scanout(ctx);
1368 screen->driver->submit_vs_fs_job(ctx, has_draws, is_scanout);
1369
1370 /* If visual, we can stall a frame */
1371
1372 if (!flush_immediate)
1373 screen->driver->force_flush_fragment(ctx, fence);
1374
1375 screen->last_fragment_flushed = false;
1376 screen->last_job = job;
1377
1378 /* If readback, flush now (hurts the pipelined performance) */
1379 if (flush_immediate)
1380 screen->driver->force_flush_fragment(ctx, fence);
1381
1382 if (screen->driver->dump_counters && pan_counters_base) {
1383 screen->driver->dump_counters(screen);
1384
1385 char filename[128];
1386 snprintf(filename, sizeof(filename), "%s/frame%d.mdgprf", pan_counters_base, ++performance_counter_number);
1387 FILE *fp = fopen(filename, "wb");
1388 fwrite(screen->perf_counters.cpu, 4096, sizeof(uint32_t), fp);
1389 fclose(fp);
1390 }
1391
1392 #endif
1393 }
1394
1395 void
1396 panfrost_flush(
1397 struct pipe_context *pipe,
1398 struct pipe_fence_handle **fence,
1399 unsigned flags)
1400 {
1401 struct panfrost_context *ctx = pan_context(pipe);
1402 struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
1403
1404 /* Nothing to do! */
1405 if (!ctx->draw_count && !job->clear) return;
1406
1407 /* Whether to stall the pipeline for immediately correct results */
1408 bool flush_immediate = flags & PIPE_FLUSH_END_OF_FRAME;
1409
1410 /* Submit the frame itself */
1411 panfrost_submit_frame(ctx, flush_immediate, fence, job);
1412
1413 /* Prepare for the next frame */
1414 panfrost_invalidate_frame(ctx);
1415 }
1416
1417 #define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_##c;
1418
1419 static int
1420 g2m_draw_mode(enum pipe_prim_type mode)
1421 {
1422 switch (mode) {
1423 DEFINE_CASE(POINTS);
1424 DEFINE_CASE(LINES);
1425 DEFINE_CASE(LINE_LOOP);
1426 DEFINE_CASE(LINE_STRIP);
1427 DEFINE_CASE(TRIANGLES);
1428 DEFINE_CASE(TRIANGLE_STRIP);
1429 DEFINE_CASE(TRIANGLE_FAN);
1430 DEFINE_CASE(QUADS);
1431 DEFINE_CASE(QUAD_STRIP);
1432 DEFINE_CASE(POLYGON);
1433
1434 default:
1435 unreachable("Invalid draw mode");
1436 }
1437 }
1438
1439 #undef DEFINE_CASE
1440
1441 static unsigned
1442 panfrost_translate_index_size(unsigned size)
1443 {
1444 switch (size) {
1445 case 1:
1446 return MALI_DRAW_INDEXED_UINT8;
1447
1448 case 2:
1449 return MALI_DRAW_INDEXED_UINT16;
1450
1451 case 4:
1452 return MALI_DRAW_INDEXED_UINT32;
1453
1454 default:
1455 unreachable("Invalid index size");
1456 }
1457 }
1458
1459 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
1460 * good for the duration of the draw (transient), could last longer */
1461
1462 static mali_ptr
1463 panfrost_get_index_buffer_mapped(struct panfrost_context *ctx, const struct pipe_draw_info *info)
1464 {
1465 struct panfrost_resource *rsrc = (struct panfrost_resource *) (info->index.resource);
1466
1467 off_t offset = info->start * info->index_size;
1468
1469 if (!info->has_user_indices) {
1470 /* Only resources can be directly mapped */
1471 return rsrc->bo->gpu + offset;
1472 } else {
1473 /* Otherwise, we need to upload to transient memory */
1474 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
1475 return panfrost_upload_transient(ctx, ibuf8 + offset, info->count * info->index_size);
1476 }
1477 }
1478
1479 static void
1480 panfrost_draw_vbo(
1481 struct pipe_context *pipe,
1482 const struct pipe_draw_info *info)
1483 {
1484 struct panfrost_context *ctx = pan_context(pipe);
1485
1486 ctx->payload_vertex.draw_start = info->start;
1487 ctx->payload_tiler.draw_start = info->start;
1488
1489 int mode = info->mode;
1490
1491 /* Fallback for unsupported modes */
1492
1493 if (!(ctx->draw_modes & (1 << mode))) {
1494 if (mode == PIPE_PRIM_QUADS && info->count == 4 && ctx->rasterizer && !ctx->rasterizer->base.flatshade) {
1495 mode = PIPE_PRIM_TRIANGLE_FAN;
1496 } else {
1497 if (info->count < 4) {
1498 /* Degenerate case? */
1499 return;
1500 }
1501
1502 util_primconvert_save_rasterizer_state(ctx->primconvert, &ctx->rasterizer->base);
1503 util_primconvert_draw_vbo(ctx->primconvert, info);
1504 return;
1505 }
1506 }
1507
1508 /* Now that we have a guaranteed terminating path, find the job.
1509 * Assignment commented out to prevent unused warning */
1510
1511 /* struct panfrost_job *job = */ panfrost_get_job_for_fbo(ctx);
1512
1513 ctx->payload_tiler.prefix.draw_mode = g2m_draw_mode(mode);
1514
1515 ctx->vertex_count = info->count;
1516
1517 /* For non-indexed draws, they're the same */
1518 unsigned invocation_count = ctx->vertex_count;
1519
1520 unsigned draw_flags = 0;
1521
1522 /* The draw flags interpret how primitive size is interpreted */
1523
1524 if (panfrost_writes_point_size(ctx))
1525 draw_flags |= MALI_DRAW_VARYING_SIZE;
1526
1527 /* For higher amounts of vertices (greater than what fits in a 16-bit
1528 * short), the other value is needed, otherwise there will be bizarre
1529 * rendering artefacts. It's not clear what these values mean yet. */
1530
1531 draw_flags |= (mode == PIPE_PRIM_POINTS || ctx->vertex_count > 65535) ? 0x3000 : 0x18000;
1532
1533 if (info->index_size) {
1534 /* Calculate the min/max index used so we can figure out how
1535 * many times to invoke the vertex shader */
1536
1537 /* Fetch / calculate index bounds */
1538 unsigned min_index = 0, max_index = 0;
1539
1540 if (info->max_index == ~0u) {
1541 u_vbuf_get_minmax_index(pipe, info, &min_index, &max_index);
1542 } else {
1543 min_index = info->min_index;
1544 max_index = info->max_index;
1545 }
1546
1547 /* Use the corresponding values */
1548 invocation_count = max_index - min_index + 1;
1549 ctx->payload_vertex.draw_start = min_index;
1550 ctx->payload_tiler.draw_start = min_index;
1551
1552 ctx->payload_tiler.prefix.negative_start = -min_index;
1553 ctx->payload_tiler.prefix.index_count = MALI_POSITIVE(info->count);
1554
1555 //assert(!info->restart_index); /* TODO: Research */
1556 assert(!info->index_bias);
1557
1558 draw_flags |= panfrost_translate_index_size(info->index_size);
1559 ctx->payload_tiler.prefix.indices = panfrost_get_index_buffer_mapped(ctx, info);
1560 } else {
1561 /* Index count == vertex count, if no indexing is applied, as
1562 * if it is internally indexed in the expected order */
1563
1564 ctx->payload_tiler.prefix.negative_start = 0;
1565 ctx->payload_tiler.prefix.index_count = MALI_POSITIVE(ctx->vertex_count);
1566
1567 /* Reverse index state */
1568 ctx->payload_tiler.prefix.indices = (uintptr_t) NULL;
1569 }
1570
1571 ctx->payload_vertex.prefix.invocation_count = MALI_POSITIVE(invocation_count);
1572 ctx->payload_tiler.prefix.invocation_count = MALI_POSITIVE(invocation_count);
1573 ctx->payload_tiler.prefix.unknown_draw = draw_flags;
1574
1575 /* Fire off the draw itself */
1576 panfrost_queue_draw(ctx);
1577 }
1578
1579 /* CSO state */
1580
1581 static void
1582 panfrost_generic_cso_delete(struct pipe_context *pctx, void *hwcso)
1583 {
1584 free(hwcso);
1585 }
1586
1587 static void *
1588 panfrost_create_rasterizer_state(
1589 struct pipe_context *pctx,
1590 const struct pipe_rasterizer_state *cso)
1591 {
1592 struct panfrost_context *ctx = pan_context(pctx);
1593 struct panfrost_rasterizer *so = CALLOC_STRUCT(panfrost_rasterizer);
1594
1595 so->base = *cso;
1596
1597 /* Bitmask, unknown meaning of the start value */
1598 so->tiler_gl_enables = ctx->is_t6xx ? 0x105 : 0x7;
1599
1600 so->tiler_gl_enables |= MALI_FRONT_FACE(
1601 cso->front_ccw ? MALI_CCW : MALI_CW);
1602
1603 if (cso->cull_face & PIPE_FACE_FRONT)
1604 so->tiler_gl_enables |= MALI_CULL_FACE_FRONT;
1605
1606 if (cso->cull_face & PIPE_FACE_BACK)
1607 so->tiler_gl_enables |= MALI_CULL_FACE_BACK;
1608
1609 return so;
1610 }
1611
1612 static void
1613 panfrost_bind_rasterizer_state(
1614 struct pipe_context *pctx,
1615 void *hwcso)
1616 {
1617 struct panfrost_context *ctx = pan_context(pctx);
1618
1619 /* TODO: Why can't rasterizer be NULL ever? Other drivers are fine.. */
1620 if (!hwcso)
1621 return;
1622
1623 ctx->rasterizer = hwcso;
1624 ctx->dirty |= PAN_DIRTY_RASTERIZER;
1625 }
1626
1627 static void *
1628 panfrost_create_vertex_elements_state(
1629 struct pipe_context *pctx,
1630 unsigned num_elements,
1631 const struct pipe_vertex_element *elements)
1632 {
1633 struct panfrost_vertex_state *so = CALLOC_STRUCT(panfrost_vertex_state);
1634
1635 so->num_elements = num_elements;
1636 memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
1637
1638 /* XXX: What the cornball? This is totally, 100%, unapologetically
1639 * nonsense. And yet it somehow fixes a regression in -bshadow
1640 * (previously, we allocated the descriptor here... a newer commit
1641 * removed that allocation, and then memory corruption led to
1642 * shader_meta getting overwritten in bad ways and then the whole test
1643 * case falling apart . TODO: LOOK INTO PLEASE XXX XXX BAD XXX XXX XXX
1644 */
1645 panfrost_allocate_chunk(pan_context(pctx), 0, HEAP_DESCRIPTOR);
1646
1647 for (int i = 0; i < num_elements; ++i) {
1648 so->hw[i].index = elements[i].vertex_buffer_index;
1649
1650 enum pipe_format fmt = elements[i].src_format;
1651 const struct util_format_description *desc = util_format_description(fmt);
1652 so->hw[i].unknown1 = 0x2;
1653 so->hw[i].swizzle = panfrost_get_default_swizzle(desc->nr_channels);
1654
1655 so->hw[i].format = panfrost_find_format(desc);
1656
1657 /* The field itself should probably be shifted over */
1658 so->hw[i].src_offset = elements[i].src_offset;
1659 }
1660
1661 return so;
1662 }
1663
1664 static void
1665 panfrost_bind_vertex_elements_state(
1666 struct pipe_context *pctx,
1667 void *hwcso)
1668 {
1669 struct panfrost_context *ctx = pan_context(pctx);
1670
1671 ctx->vertex = hwcso;
1672 ctx->dirty |= PAN_DIRTY_VERTEX;
1673 }
1674
1675 static void *
1676 panfrost_create_shader_state(
1677 struct pipe_context *pctx,
1678 const struct pipe_shader_state *cso)
1679 {
1680 struct panfrost_shader_variants *so = CALLOC_STRUCT(panfrost_shader_variants);
1681 so->base = *cso;
1682
1683 /* Token deep copy to prevent memory corruption */
1684
1685 if (cso->type == PIPE_SHADER_IR_TGSI)
1686 so->base.tokens = tgsi_dup_tokens(so->base.tokens);
1687
1688 return so;
1689 }
1690
1691 static void
1692 panfrost_delete_shader_state(
1693 struct pipe_context *pctx,
1694 void *so)
1695 {
1696 struct panfrost_shader_variants *cso = (struct panfrost_shader_variants *) so;
1697
1698 if (cso->base.type == PIPE_SHADER_IR_TGSI) {
1699 DBG("Deleting TGSI shader leaks duplicated tokens\n");
1700 }
1701
1702 free(so);
1703 }
1704
1705 static void *
1706 panfrost_create_sampler_state(
1707 struct pipe_context *pctx,
1708 const struct pipe_sampler_state *cso)
1709 {
1710 struct panfrost_sampler_state *so = CALLOC_STRUCT(panfrost_sampler_state);
1711 so->base = *cso;
1712
1713 /* sampler_state corresponds to mali_sampler_descriptor, which we can generate entirely here */
1714
1715 struct mali_sampler_descriptor sampler_descriptor = {
1716 .filter_mode = MALI_TEX_MIN(translate_tex_filter(cso->min_img_filter))
1717 | MALI_TEX_MAG(translate_tex_filter(cso->mag_img_filter))
1718 | translate_mip_filter(cso->min_mip_filter)
1719 | 0x20,
1720
1721 .wrap_s = translate_tex_wrap(cso->wrap_s),
1722 .wrap_t = translate_tex_wrap(cso->wrap_t),
1723 .wrap_r = translate_tex_wrap(cso->wrap_r),
1724 .compare_func = panfrost_translate_alt_compare_func(cso->compare_func),
1725 .border_color = {
1726 cso->border_color.f[0],
1727 cso->border_color.f[1],
1728 cso->border_color.f[2],
1729 cso->border_color.f[3]
1730 },
1731 .min_lod = FIXED_16(cso->min_lod),
1732 .max_lod = FIXED_16(cso->max_lod),
1733 .unknown2 = 1,
1734 };
1735
1736 so->hw = sampler_descriptor;
1737
1738 return so;
1739 }
1740
1741 static void
1742 panfrost_bind_sampler_states(
1743 struct pipe_context *pctx,
1744 enum pipe_shader_type shader,
1745 unsigned start_slot, unsigned num_sampler,
1746 void **sampler)
1747 {
1748 assert(start_slot == 0);
1749
1750 struct panfrost_context *ctx = pan_context(pctx);
1751
1752 /* XXX: Should upload, not just copy? */
1753 ctx->sampler_count[shader] = num_sampler;
1754 memcpy(ctx->samplers[shader], sampler, num_sampler * sizeof (void *));
1755
1756 ctx->dirty |= PAN_DIRTY_SAMPLERS;
1757 }
1758
1759 static bool
1760 panfrost_variant_matches(struct panfrost_context *ctx, struct panfrost_shader_state *variant)
1761 {
1762 struct pipe_alpha_state *alpha = &ctx->depth_stencil->alpha;
1763
1764 if (alpha->enabled || variant->alpha_state.enabled) {
1765 /* Make sure enable state is at least the same */
1766 if (alpha->enabled != variant->alpha_state.enabled) {
1767 return false;
1768 }
1769
1770 /* Check that the contents of the test are the same */
1771 bool same_func = alpha->func == variant->alpha_state.func;
1772 bool same_ref = alpha->ref_value == variant->alpha_state.ref_value;
1773
1774 if (!(same_func && same_ref)) {
1775 return false;
1776 }
1777 }
1778 /* Otherwise, we're good to go */
1779 return true;
1780 }
1781
1782 static void
1783 panfrost_bind_fs_state(
1784 struct pipe_context *pctx,
1785 void *hwcso)
1786 {
1787 struct panfrost_context *ctx = pan_context(pctx);
1788
1789 ctx->fs = hwcso;
1790
1791 if (hwcso) {
1792 /* Match the appropriate variant */
1793
1794 signed variant = -1;
1795
1796 struct panfrost_shader_variants *variants = (struct panfrost_shader_variants *) hwcso;
1797
1798 for (unsigned i = 0; i < variants->variant_count; ++i) {
1799 if (panfrost_variant_matches(ctx, &variants->variants[i])) {
1800 variant = i;
1801 break;
1802 }
1803 }
1804
1805 if (variant == -1) {
1806 /* No variant matched, so create a new one */
1807 variant = variants->variant_count++;
1808 assert(variants->variant_count < MAX_SHADER_VARIANTS);
1809
1810 variants->variants[variant].base = hwcso;
1811 variants->variants[variant].alpha_state = ctx->depth_stencil->alpha;
1812
1813 /* Allocate the mapped descriptor ahead-of-time. TODO: Use for FS as well as VS */
1814 struct panfrost_context *ctx = pan_context(pctx);
1815 struct panfrost_transfer transfer = panfrost_allocate_chunk(ctx, sizeof(struct mali_shader_meta), HEAP_DESCRIPTOR);
1816
1817 variants->variants[variant].tripipe = (struct mali_shader_meta *) transfer.cpu;
1818 variants->variants[variant].tripipe_gpu = transfer.gpu;
1819
1820 }
1821
1822 /* Select this variant */
1823 variants->active_variant = variant;
1824
1825 struct panfrost_shader_state *shader_state = &variants->variants[variant];
1826 assert(panfrost_variant_matches(ctx, shader_state));
1827
1828 /* Now we have a variant selected, so compile and go */
1829
1830 if (!shader_state->compiled) {
1831 panfrost_shader_compile(ctx, shader_state->tripipe, NULL, JOB_TYPE_TILER, shader_state);
1832 shader_state->compiled = true;
1833 }
1834 }
1835
1836 ctx->dirty |= PAN_DIRTY_FS;
1837 }
1838
1839 static void
1840 panfrost_bind_vs_state(
1841 struct pipe_context *pctx,
1842 void *hwcso)
1843 {
1844 struct panfrost_context *ctx = pan_context(pctx);
1845
1846 ctx->vs = hwcso;
1847
1848 if (hwcso) {
1849 if (!ctx->vs->variants[0].compiled) {
1850 ctx->vs->variants[0].base = hwcso;
1851
1852 /* TODO DRY from above */
1853 struct panfrost_transfer transfer = panfrost_allocate_chunk(ctx, sizeof(struct mali_shader_meta), HEAP_DESCRIPTOR);
1854 ctx->vs->variants[0].tripipe = (struct mali_shader_meta *) transfer.cpu;
1855 ctx->vs->variants[0].tripipe_gpu = transfer.gpu;
1856
1857 panfrost_shader_compile(ctx, ctx->vs->variants[0].tripipe, NULL, JOB_TYPE_VERTEX, &ctx->vs->variants[0]);
1858 ctx->vs->variants[0].compiled = true;
1859 }
1860 }
1861
1862 ctx->dirty |= PAN_DIRTY_VS;
1863 }
1864
1865 static void
1866 panfrost_set_vertex_buffers(
1867 struct pipe_context *pctx,
1868 unsigned start_slot,
1869 unsigned num_buffers,
1870 const struct pipe_vertex_buffer *buffers)
1871 {
1872 struct panfrost_context *ctx = pan_context(pctx);
1873
1874 util_set_vertex_buffers_mask(ctx->vertex_buffers, &ctx->vb_mask, buffers, start_slot, num_buffers);
1875 }
1876
1877 static void
1878 panfrost_set_constant_buffer(
1879 struct pipe_context *pctx,
1880 enum pipe_shader_type shader, uint index,
1881 const struct pipe_constant_buffer *buf)
1882 {
1883 struct panfrost_context *ctx = pan_context(pctx);
1884 struct panfrost_constant_buffer *pbuf = &ctx->constant_buffer[shader];
1885
1886 size_t sz = buf ? buf->buffer_size : 0;
1887
1888 /* Free previous buffer */
1889
1890 pbuf->dirty = true;
1891 pbuf->size = sz;
1892
1893 if (pbuf->buffer) {
1894 free(pbuf->buffer);
1895 pbuf->buffer = NULL;
1896 }
1897
1898 /* If unbinding, we're done */
1899
1900 if (!buf)
1901 return;
1902
1903 /* Multiple constant buffers not yet supported */
1904 assert(index == 0);
1905
1906 const uint8_t *cpu;
1907
1908 struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer);
1909
1910 if (rsrc) {
1911 cpu = rsrc->bo->cpu;
1912 } else if (buf->user_buffer) {
1913 cpu = buf->user_buffer;
1914 } else {
1915 DBG("No constant buffer?\n");
1916 return;
1917 }
1918
1919 /* Copy the constant buffer into the driver context for later upload */
1920
1921 pbuf->buffer = malloc(sz);
1922 memcpy(pbuf->buffer, cpu + buf->buffer_offset, sz);
1923 }
1924
1925 static void
1926 panfrost_set_stencil_ref(
1927 struct pipe_context *pctx,
1928 const struct pipe_stencil_ref *ref)
1929 {
1930 struct panfrost_context *ctx = pan_context(pctx);
1931 ctx->stencil_ref = *ref;
1932
1933 /* Shader core dirty */
1934 ctx->dirty |= PAN_DIRTY_FS;
1935 }
1936
1937 static struct pipe_sampler_view *
1938 panfrost_create_sampler_view(
1939 struct pipe_context *pctx,
1940 struct pipe_resource *texture,
1941 const struct pipe_sampler_view *template)
1942 {
1943 struct panfrost_sampler_view *so = CALLOC_STRUCT(panfrost_sampler_view);
1944 int bytes_per_pixel = util_format_get_blocksize(texture->format);
1945
1946 pipe_reference(NULL, &texture->reference);
1947
1948 struct panfrost_resource *prsrc = (struct panfrost_resource *) texture;
1949 assert(prsrc->bo);
1950
1951 so->base = *template;
1952 so->base.texture = texture;
1953 so->base.reference.count = 1;
1954 so->base.context = pctx;
1955
1956 /* sampler_views correspond to texture descriptors, minus the texture
1957 * (data) itself. So, we serialise the descriptor here and cache it for
1958 * later. */
1959
1960 /* Make sure it's something with which we're familiar */
1961 assert(bytes_per_pixel >= 1 && bytes_per_pixel <= 4);
1962
1963 /* TODO: Detect from format better */
1964 const struct util_format_description *desc = util_format_description(prsrc->base.format);
1965
1966 unsigned char user_swizzle[4] = {
1967 template->swizzle_r,
1968 template->swizzle_g,
1969 template->swizzle_b,
1970 template->swizzle_a
1971 };
1972
1973 enum mali_format format = panfrost_find_format(desc);
1974
1975 bool is_depth = desc->format == PIPE_FORMAT_Z32_UNORM;
1976
1977 unsigned usage2_layout = 0x10;
1978
1979 switch (prsrc->bo->layout) {
1980 case PAN_AFBC:
1981 usage2_layout |= 0x8 | 0x4;
1982 break;
1983 case PAN_TILED:
1984 usage2_layout |= 0x1;
1985 break;
1986 case PAN_LINEAR:
1987 usage2_layout |= is_depth ? 0x1 : 0x2;
1988 break;
1989 default:
1990 assert(0);
1991 break;
1992 }
1993
1994 /* Check if we need to set a custom stride by computing the "expected"
1995 * stride and comparing it to what the BO actually wants. Only applies
1996 * to linear textures TODO: Mipmap? */
1997
1998 unsigned actual_stride = prsrc->bo->slices[0].stride;
1999
2000 if (prsrc->bo->layout == PAN_LINEAR &&
2001 template->u.tex.last_level == 0 &&
2002 template->u.tex.first_level == 0 &&
2003 (texture->width0 * bytes_per_pixel) != actual_stride) {
2004 usage2_layout |= MALI_TEX_MANUAL_STRIDE;
2005 }
2006
2007 struct mali_texture_descriptor texture_descriptor = {
2008 .width = MALI_POSITIVE(texture->width0),
2009 .height = MALI_POSITIVE(texture->height0),
2010 .depth = MALI_POSITIVE(texture->depth0),
2011
2012 /* TODO: Decode */
2013 .format = {
2014 .swizzle = panfrost_translate_swizzle_4(desc->swizzle),
2015 .format = format,
2016
2017 .usage1 = 0x0,
2018 .is_not_cubemap = texture->target != PIPE_TEXTURE_CUBE,
2019
2020 .usage2 = usage2_layout
2021 },
2022
2023 .swizzle = panfrost_translate_swizzle_4(user_swizzle)
2024 };
2025
2026 /* TODO: Other base levels require adjusting dimensions / level numbers / etc */
2027 assert (template->u.tex.first_level == 0);
2028
2029 /* Disable mipmapping for now to avoid regressions while automipmapping
2030 * is being implemented. TODO: Remove me once automipmaps work */
2031
2032 //texture_descriptor.nr_mipmap_levels = template->u.tex.last_level - template->u.tex.first_level;
2033 texture_descriptor.nr_mipmap_levels = 0;
2034
2035 so->hw = texture_descriptor;
2036
2037 return (struct pipe_sampler_view *) so;
2038 }
2039
2040 static void
2041 panfrost_set_sampler_views(
2042 struct pipe_context *pctx,
2043 enum pipe_shader_type shader,
2044 unsigned start_slot, unsigned num_views,
2045 struct pipe_sampler_view **views)
2046 {
2047 struct panfrost_context *ctx = pan_context(pctx);
2048
2049 assert(start_slot == 0);
2050
2051 ctx->sampler_view_count[shader] = num_views;
2052 memcpy(ctx->sampler_views[shader], views, num_views * sizeof (void *));
2053
2054 ctx->dirty |= PAN_DIRTY_TEXTURES;
2055 }
2056
2057 static void
2058 panfrost_sampler_view_destroy(
2059 struct pipe_context *pctx,
2060 struct pipe_sampler_view *views)
2061 {
2062 //struct panfrost_context *ctx = pan_context(pctx);
2063
2064 /* TODO */
2065
2066 free(views);
2067 }
2068
2069 static void
2070 panfrost_set_framebuffer_state(struct pipe_context *pctx,
2071 const struct pipe_framebuffer_state *fb)
2072 {
2073 struct panfrost_context *ctx = pan_context(pctx);
2074
2075 /* Flush when switching away from an FBO */
2076
2077 if (!panfrost_is_scanout(ctx)) {
2078 panfrost_flush(pctx, NULL, 0);
2079 }
2080
2081 ctx->pipe_framebuffer.nr_cbufs = fb->nr_cbufs;
2082 ctx->pipe_framebuffer.samples = fb->samples;
2083 ctx->pipe_framebuffer.layers = fb->layers;
2084 ctx->pipe_framebuffer.width = fb->width;
2085 ctx->pipe_framebuffer.height = fb->height;
2086
2087 for (int i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
2088 struct pipe_surface *cb = i < fb->nr_cbufs ? fb->cbufs[i] : NULL;
2089
2090 /* check if changing cbuf */
2091 if (ctx->pipe_framebuffer.cbufs[i] == cb) continue;
2092
2093 if (cb && (i != 0)) {
2094 DBG("XXX: Multiple render targets not supported before t7xx!\n");
2095 assert(0);
2096 }
2097
2098 /* assign new */
2099 pipe_surface_reference(&ctx->pipe_framebuffer.cbufs[i], cb);
2100
2101 if (!cb)
2102 continue;
2103
2104 if (ctx->require_sfbd)
2105 ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx);
2106 else
2107 ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx);
2108
2109 panfrost_attach_vt_framebuffer(ctx);
2110
2111 struct panfrost_resource *tex = ((struct panfrost_resource *) ctx->pipe_framebuffer.cbufs[i]->texture);
2112 enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
2113 bool is_scanout = panfrost_is_scanout(ctx);
2114
2115 if (!is_scanout && tex->bo->layout != PAN_AFBC && panfrost_can_afbc(format)) {
2116 /* The blob is aggressive about enabling AFBC. As such,
2117 * it's pretty much necessary to use it here, since we
2118 * have no traces of non-compressed FBO. */
2119
2120 panfrost_enable_afbc(ctx, tex, false);
2121 }
2122
2123 if (!is_scanout && !tex->bo->has_checksum) {
2124 /* Enable transaction elimination if we can */
2125 panfrost_enable_checksum(ctx, tex);
2126 }
2127 }
2128
2129 {
2130 struct pipe_surface *zb = fb->zsbuf;
2131
2132 if (ctx->pipe_framebuffer.zsbuf != zb) {
2133 pipe_surface_reference(&ctx->pipe_framebuffer.zsbuf, zb);
2134
2135 if (zb) {
2136 /* FBO has depth */
2137
2138 if (ctx->require_sfbd)
2139 ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx);
2140 else
2141 ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx);
2142
2143 panfrost_attach_vt_framebuffer(ctx);
2144
2145 /* Keep the depth FBO linear */
2146 }
2147 }
2148 }
2149 }
2150
2151 static void *
2152 panfrost_create_blend_state(struct pipe_context *pipe,
2153 const struct pipe_blend_state *blend)
2154 {
2155 struct panfrost_context *ctx = pan_context(pipe);
2156 struct panfrost_blend_state *so = CALLOC_STRUCT(panfrost_blend_state);
2157 so->base = *blend;
2158
2159 /* TODO: The following features are not yet implemented */
2160 assert(!blend->logicop_enable);
2161 assert(!blend->alpha_to_coverage);
2162 assert(!blend->alpha_to_one);
2163
2164 /* Compile the blend state, first as fixed-function if we can */
2165
2166 if (panfrost_make_fixed_blend_mode(&blend->rt[0], so, blend->rt[0].colormask, &ctx->blend_color))
2167 return so;
2168
2169 /* If we can't, compile a blend shader instead */
2170
2171 panfrost_make_blend_shader(ctx, so, &ctx->blend_color);
2172
2173 return so;
2174 }
2175
2176 static void
2177 panfrost_bind_blend_state(struct pipe_context *pipe,
2178 void *cso)
2179 {
2180 struct panfrost_context *ctx = pan_context(pipe);
2181 struct pipe_blend_state *blend = (struct pipe_blend_state *) cso;
2182 struct panfrost_blend_state *pblend = (struct panfrost_blend_state *) cso;
2183 ctx->blend = pblend;
2184
2185 if (!blend)
2186 return;
2187
2188 SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_NO_DITHER, !blend->dither);
2189
2190 /* TODO: Attach color */
2191
2192 /* Shader itself is not dirty, but the shader core is */
2193 ctx->dirty |= PAN_DIRTY_FS;
2194 }
2195
2196 static void
2197 panfrost_delete_blend_state(struct pipe_context *pipe,
2198 void *blend)
2199 {
2200 struct panfrost_blend_state *so = (struct panfrost_blend_state *) blend;
2201
2202 if (so->has_blend_shader) {
2203 DBG("Deleting blend state leak blend shaders bytecode\n");
2204 }
2205
2206 free(blend);
2207 }
2208
2209 static void
2210 panfrost_set_blend_color(struct pipe_context *pipe,
2211 const struct pipe_blend_color *blend_color)
2212 {
2213 struct panfrost_context *ctx = pan_context(pipe);
2214
2215 /* If blend_color is we're unbinding, so ctx->blend_color is now undefined -> nothing to do */
2216
2217 if (blend_color) {
2218 ctx->blend_color = *blend_color;
2219
2220 /* The blend mode depends on the blend constant color, due to the
2221 * fixed/programmable split. So, we're forced to regenerate the blend
2222 * equation */
2223
2224 /* TODO: Attach color */
2225 }
2226 }
2227
2228 static void *
2229 panfrost_create_depth_stencil_state(struct pipe_context *pipe,
2230 const struct pipe_depth_stencil_alpha_state *depth_stencil)
2231 {
2232 return mem_dup(depth_stencil, sizeof(*depth_stencil));
2233 }
2234
2235 static void
2236 panfrost_bind_depth_stencil_state(struct pipe_context *pipe,
2237 void *cso)
2238 {
2239 struct panfrost_context *ctx = pan_context(pipe);
2240 struct pipe_depth_stencil_alpha_state *depth_stencil = cso;
2241 ctx->depth_stencil = depth_stencil;
2242
2243 if (!depth_stencil)
2244 return;
2245
2246 /* Alpha does not exist in the hardware (it's not in ES3), so it's
2247 * emulated in the fragment shader */
2248
2249 if (depth_stencil->alpha.enabled) {
2250 /* We need to trigger a new shader (maybe) */
2251 ctx->base.bind_fs_state(&ctx->base, ctx->fs);
2252 }
2253
2254 /* Stencil state */
2255 SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_STENCIL_TEST, depth_stencil->stencil[0].enabled); /* XXX: which one? */
2256
2257 panfrost_make_stencil_state(&depth_stencil->stencil[0], &ctx->fragment_shader_core.stencil_front);
2258 ctx->fragment_shader_core.stencil_mask_front = depth_stencil->stencil[0].writemask;
2259
2260 panfrost_make_stencil_state(&depth_stencil->stencil[1], &ctx->fragment_shader_core.stencil_back);
2261 ctx->fragment_shader_core.stencil_mask_back = depth_stencil->stencil[1].writemask;
2262
2263 /* Depth state (TODO: Refactor) */
2264 SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_DEPTH_TEST, depth_stencil->depth.enabled);
2265
2266 int func = depth_stencil->depth.enabled ? depth_stencil->depth.func : PIPE_FUNC_ALWAYS;
2267
2268 ctx->fragment_shader_core.unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
2269 ctx->fragment_shader_core.unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(func));
2270
2271 /* Bounds test not implemented */
2272 assert(!depth_stencil->depth.bounds_test);
2273
2274 ctx->dirty |= PAN_DIRTY_FS;
2275 }
2276
2277 static void
2278 panfrost_delete_depth_stencil_state(struct pipe_context *pipe, void *depth)
2279 {
2280 free( depth );
2281 }
2282
2283 static void
2284 panfrost_set_sample_mask(struct pipe_context *pipe,
2285 unsigned sample_mask)
2286 {
2287 }
2288
2289 static void
2290 panfrost_set_clip_state(struct pipe_context *pipe,
2291 const struct pipe_clip_state *clip)
2292 {
2293 //struct panfrost_context *panfrost = pan_context(pipe);
2294 }
2295
2296 static void
2297 panfrost_set_viewport_states(struct pipe_context *pipe,
2298 unsigned start_slot,
2299 unsigned num_viewports,
2300 const struct pipe_viewport_state *viewports)
2301 {
2302 struct panfrost_context *ctx = pan_context(pipe);
2303
2304 assert(start_slot == 0);
2305 assert(num_viewports == 1);
2306
2307 ctx->pipe_viewport = *viewports;
2308
2309 #if 0
2310 /* TODO: What if not centered? */
2311 float w = abs(viewports->scale[0]) * 2.0;
2312 float h = abs(viewports->scale[1]) * 2.0;
2313
2314 ctx->viewport.viewport1[0] = MALI_POSITIVE((int) w);
2315 ctx->viewport.viewport1[1] = MALI_POSITIVE((int) h);
2316 #endif
2317 }
2318
2319 static void
2320 panfrost_set_scissor_states(struct pipe_context *pipe,
2321 unsigned start_slot,
2322 unsigned num_scissors,
2323 const struct pipe_scissor_state *scissors)
2324 {
2325 struct panfrost_context *ctx = pan_context(pipe);
2326
2327 assert(start_slot == 0);
2328 assert(num_scissors == 1);
2329
2330 ctx->scissor = *scissors;
2331 }
2332
2333 static void
2334 panfrost_set_polygon_stipple(struct pipe_context *pipe,
2335 const struct pipe_poly_stipple *stipple)
2336 {
2337 //struct panfrost_context *panfrost = pan_context(pipe);
2338 }
2339
2340 static void
2341 panfrost_set_active_query_state(struct pipe_context *pipe,
2342 boolean enable)
2343 {
2344 //struct panfrost_context *panfrost = pan_context(pipe);
2345 }
2346
2347 static void
2348 panfrost_destroy(struct pipe_context *pipe)
2349 {
2350 struct panfrost_context *panfrost = pan_context(pipe);
2351 struct panfrost_screen *screen = pan_screen(pipe->screen);
2352
2353 if (panfrost->blitter)
2354 util_blitter_destroy(panfrost->blitter);
2355
2356 screen->driver->free_slab(screen, &panfrost->scratchpad);
2357 screen->driver->free_slab(screen, &panfrost->varying_mem);
2358 screen->driver->free_slab(screen, &panfrost->shaders);
2359 screen->driver->free_slab(screen, &panfrost->tiler_heap);
2360 screen->driver->free_slab(screen, &panfrost->misc_0);
2361 }
2362
2363 static struct pipe_query *
2364 panfrost_create_query(struct pipe_context *pipe,
2365 unsigned type,
2366 unsigned index)
2367 {
2368 struct panfrost_query *q = CALLOC_STRUCT(panfrost_query);
2369
2370 q->type = type;
2371 q->index = index;
2372
2373 return (struct pipe_query *) q;
2374 }
2375
2376 static void
2377 panfrost_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
2378 {
2379 FREE(q);
2380 }
2381
2382 static boolean
2383 panfrost_begin_query(struct pipe_context *pipe, struct pipe_query *q)
2384 {
2385 struct panfrost_context *ctx = pan_context(pipe);
2386 struct panfrost_query *query = (struct panfrost_query *) q;
2387
2388 switch (query->type) {
2389 case PIPE_QUERY_OCCLUSION_COUNTER:
2390 case PIPE_QUERY_OCCLUSION_PREDICATE:
2391 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
2392 {
2393 /* Allocate a word for the query results to be stored */
2394 query->transfer = panfrost_allocate_chunk(ctx, sizeof(unsigned), HEAP_DESCRIPTOR);
2395
2396 ctx->occlusion_query = query;
2397
2398 break;
2399 }
2400
2401 default:
2402 DBG("Skipping query %d\n", query->type);
2403 break;
2404 }
2405
2406 return true;
2407 }
2408
2409 static bool
2410 panfrost_end_query(struct pipe_context *pipe, struct pipe_query *q)
2411 {
2412 struct panfrost_context *ctx = pan_context(pipe);
2413 ctx->occlusion_query = NULL;
2414 return true;
2415 }
2416
2417 static boolean
2418 panfrost_get_query_result(struct pipe_context *pipe,
2419 struct pipe_query *q,
2420 boolean wait,
2421 union pipe_query_result *vresult)
2422 {
2423 /* STUB */
2424 struct panfrost_query *query = (struct panfrost_query *) q;
2425
2426 /* We need to flush out the jobs to actually run the counter, TODO
2427 * check wait, TODO wallpaper after if needed */
2428
2429 panfrost_flush(pipe, NULL, PIPE_FLUSH_END_OF_FRAME);
2430
2431 switch (query->type) {
2432 case PIPE_QUERY_OCCLUSION_COUNTER:
2433 case PIPE_QUERY_OCCLUSION_PREDICATE:
2434 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: {
2435 /* Read back the query results */
2436 unsigned *result = (unsigned *) query->transfer.cpu;
2437 unsigned passed = *result;
2438
2439 if (query->type == PIPE_QUERY_OCCLUSION_COUNTER) {
2440 vresult->u64 = passed;
2441 } else {
2442 vresult->b = !!passed;
2443 }
2444
2445 break;
2446 }
2447 default:
2448 DBG("Skipped query get %d\n", query->type);
2449 break;
2450 }
2451
2452 return true;
2453 }
2454
2455 static struct pipe_stream_output_target *
2456 panfrost_create_stream_output_target(struct pipe_context *pctx,
2457 struct pipe_resource *prsc,
2458 unsigned buffer_offset,
2459 unsigned buffer_size)
2460 {
2461 struct pipe_stream_output_target *target;
2462
2463 target = CALLOC_STRUCT(pipe_stream_output_target);
2464
2465 if (!target)
2466 return NULL;
2467
2468 pipe_reference_init(&target->reference, 1);
2469 pipe_resource_reference(&target->buffer, prsc);
2470
2471 target->context = pctx;
2472 target->buffer_offset = buffer_offset;
2473 target->buffer_size = buffer_size;
2474
2475 return target;
2476 }
2477
2478 static void
2479 panfrost_stream_output_target_destroy(struct pipe_context *pctx,
2480 struct pipe_stream_output_target *target)
2481 {
2482 pipe_resource_reference(&target->buffer, NULL);
2483 free(target);
2484 }
2485
2486 static void
2487 panfrost_set_stream_output_targets(struct pipe_context *pctx,
2488 unsigned num_targets,
2489 struct pipe_stream_output_target **targets,
2490 const unsigned *offsets)
2491 {
2492 /* STUB */
2493 }
2494
2495 static void
2496 panfrost_setup_hardware(struct panfrost_context *ctx)
2497 {
2498 struct pipe_context *gallium = (struct pipe_context *) ctx;
2499 struct panfrost_screen *screen = pan_screen(gallium->screen);
2500
2501 for (int i = 0; i < ARRAY_SIZE(ctx->transient_pools); ++i) {
2502 /* Allocate the beginning of the transient pool */
2503 int entry_size = (1 << 22); /* 4MB */
2504
2505 ctx->transient_pools[i].entry_size = entry_size;
2506 ctx->transient_pools[i].entry_count = 1;
2507
2508 ctx->transient_pools[i].entries[0] = (struct panfrost_memory_entry *) pb_slab_alloc(&screen->slabs, entry_size, HEAP_TRANSIENT);
2509 }
2510
2511 screen->driver->allocate_slab(screen, &ctx->scratchpad, 64, false, 0, 0, 0);
2512 screen->driver->allocate_slab(screen, &ctx->varying_mem, 16384, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_COHERENT_LOCAL, 0, 0);
2513 screen->driver->allocate_slab(screen, &ctx->shaders, 4096, true, PAN_ALLOCATE_EXECUTE, 0, 0);
2514 screen->driver->allocate_slab(screen, &ctx->tiler_heap, 32768, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_GROWABLE, 1, 128);
2515 screen->driver->allocate_slab(screen, &ctx->misc_0, 128*128, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_GROWABLE, 1, 128);
2516
2517 }
2518
2519 /* New context creation, which also does hardware initialisation since I don't
2520 * know the better way to structure this :smirk: */
2521
2522 struct pipe_context *
2523 panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
2524 {
2525 struct panfrost_context *ctx = CALLOC_STRUCT(panfrost_context);
2526 struct panfrost_screen *pscreen = pan_screen(screen);
2527 memset(ctx, 0, sizeof(*ctx));
2528 struct pipe_context *gallium = (struct pipe_context *) ctx;
2529 unsigned gpu_id;
2530
2531 gpu_id = pscreen->driver->query_gpu_version(pscreen);
2532
2533 ctx->is_t6xx = gpu_id <= 0x0750; /* For now, this flag means T760 or less */
2534 ctx->require_sfbd = gpu_id < 0x0750; /* T760 is the first to support MFBD */
2535
2536 gallium->screen = screen;
2537
2538 gallium->destroy = panfrost_destroy;
2539
2540 gallium->set_framebuffer_state = panfrost_set_framebuffer_state;
2541
2542 gallium->flush = panfrost_flush;
2543 gallium->clear = panfrost_clear;
2544 gallium->draw_vbo = panfrost_draw_vbo;
2545
2546 gallium->set_vertex_buffers = panfrost_set_vertex_buffers;
2547 gallium->set_constant_buffer = panfrost_set_constant_buffer;
2548
2549 gallium->set_stencil_ref = panfrost_set_stencil_ref;
2550
2551 gallium->create_sampler_view = panfrost_create_sampler_view;
2552 gallium->set_sampler_views = panfrost_set_sampler_views;
2553 gallium->sampler_view_destroy = panfrost_sampler_view_destroy;
2554
2555 gallium->create_rasterizer_state = panfrost_create_rasterizer_state;
2556 gallium->bind_rasterizer_state = panfrost_bind_rasterizer_state;
2557 gallium->delete_rasterizer_state = panfrost_generic_cso_delete;
2558
2559 gallium->create_vertex_elements_state = panfrost_create_vertex_elements_state;
2560 gallium->bind_vertex_elements_state = panfrost_bind_vertex_elements_state;
2561 gallium->delete_vertex_elements_state = panfrost_generic_cso_delete;
2562
2563 gallium->create_fs_state = panfrost_create_shader_state;
2564 gallium->delete_fs_state = panfrost_delete_shader_state;
2565 gallium->bind_fs_state = panfrost_bind_fs_state;
2566
2567 gallium->create_vs_state = panfrost_create_shader_state;
2568 gallium->delete_vs_state = panfrost_delete_shader_state;
2569 gallium->bind_vs_state = panfrost_bind_vs_state;
2570
2571 gallium->create_sampler_state = panfrost_create_sampler_state;
2572 gallium->delete_sampler_state = panfrost_generic_cso_delete;
2573 gallium->bind_sampler_states = panfrost_bind_sampler_states;
2574
2575 gallium->create_blend_state = panfrost_create_blend_state;
2576 gallium->bind_blend_state = panfrost_bind_blend_state;
2577 gallium->delete_blend_state = panfrost_delete_blend_state;
2578
2579 gallium->set_blend_color = panfrost_set_blend_color;
2580
2581 gallium->create_depth_stencil_alpha_state = panfrost_create_depth_stencil_state;
2582 gallium->bind_depth_stencil_alpha_state = panfrost_bind_depth_stencil_state;
2583 gallium->delete_depth_stencil_alpha_state = panfrost_delete_depth_stencil_state;
2584
2585 gallium->set_sample_mask = panfrost_set_sample_mask;
2586
2587 gallium->set_clip_state = panfrost_set_clip_state;
2588 gallium->set_viewport_states = panfrost_set_viewport_states;
2589 gallium->set_scissor_states = panfrost_set_scissor_states;
2590 gallium->set_polygon_stipple = panfrost_set_polygon_stipple;
2591 gallium->set_active_query_state = panfrost_set_active_query_state;
2592
2593 gallium->create_query = panfrost_create_query;
2594 gallium->destroy_query = panfrost_destroy_query;
2595 gallium->begin_query = panfrost_begin_query;
2596 gallium->end_query = panfrost_end_query;
2597 gallium->get_query_result = panfrost_get_query_result;
2598
2599 gallium->create_stream_output_target = panfrost_create_stream_output_target;
2600 gallium->stream_output_target_destroy = panfrost_stream_output_target_destroy;
2601 gallium->set_stream_output_targets = panfrost_set_stream_output_targets;
2602
2603 panfrost_resource_context_init(gallium);
2604
2605 pscreen->driver->init_context(ctx);
2606
2607 panfrost_setup_hardware(ctx);
2608
2609 /* XXX: leaks */
2610 gallium->stream_uploader = u_upload_create_default(gallium);
2611 gallium->const_uploader = gallium->stream_uploader;
2612 assert(gallium->stream_uploader);
2613
2614 /* Midgard supports ES modes, plus QUADS/QUAD_STRIPS/POLYGON */
2615 ctx->draw_modes = (1 << (PIPE_PRIM_POLYGON + 1)) - 1;
2616
2617 ctx->primconvert = util_primconvert_create(gallium, ctx->draw_modes);
2618
2619 ctx->blitter = util_blitter_create(gallium);
2620 assert(ctx->blitter);
2621
2622 /* Prepare for render! */
2623
2624 panfrost_job_init(ctx);
2625 panfrost_emit_vertex_payload(ctx);
2626 panfrost_emit_tiler_payload(ctx);
2627 panfrost_invalidate_frame(ctx);
2628 panfrost_default_shader_backend(ctx);
2629 panfrost_generate_space_filler_indices();
2630
2631 return gallium;
2632 }