panfrost: Disable pipelining temporarily
[mesa.git] / src / gallium / drivers / panfrost / pan_context.c
1 /*
2 * © Copyright 2018 Alyssa Rosenzweig
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 */
24
25 #include <sys/poll.h>
26 #include <errno.h>
27
28 #include "pan_context.h"
29 #include "pan_swizzle.h"
30 #include "pan_format.h"
31
32 #include "util/macros.h"
33 #include "util/u_format.h"
34 #include "util/u_inlines.h"
35 #include "util/u_upload_mgr.h"
36 #include "util/u_memory.h"
37 #include "util/u_vbuf.h"
38 #include "util/half_float.h"
39 #include "util/u_helpers.h"
40 #include "util/u_format.h"
41 #include "indices/u_primconvert.h"
42 #include "tgsi/tgsi_parse.h"
43 #include "util/u_math.h"
44
45 #include "pan_screen.h"
46 #include "pan_blending.h"
47 #include "pan_blend_shaders.h"
48 #include "pan_util.h"
49 #include "pan_tiler.h"
50
51 static int performance_counter_number = 0;
52 extern const char *pan_counters_base;
53
54 /* Do not actually send anything to the GPU; merely generate the cmdstream as fast as possible. Disables framebuffer writes */
55 //#define DRY_RUN
56
57 static enum mali_job_type
58 panfrost_job_type_for_pipe(enum pipe_shader_type type)
59 {
60 switch (type) {
61 case PIPE_SHADER_VERTEX:
62 return JOB_TYPE_VERTEX;
63
64 case PIPE_SHADER_FRAGMENT:
65 /* Note: JOB_TYPE_FRAGMENT is different.
66 * JOB_TYPE_FRAGMENT actually executes the
67 * fragment shader, but JOB_TYPE_TILER is how you
68 * specify it*/
69 return JOB_TYPE_TILER;
70
71 case PIPE_SHADER_GEOMETRY:
72 return JOB_TYPE_GEOMETRY;
73
74 case PIPE_SHADER_COMPUTE:
75 return JOB_TYPE_COMPUTE;
76
77 default:
78 unreachable("Unsupported shader stage");
79 }
80 }
81
82 static void
83 panfrost_enable_checksum(struct panfrost_context *ctx, struct panfrost_resource *rsrc)
84 {
85 struct pipe_context *gallium = (struct pipe_context *) ctx;
86 struct panfrost_screen *screen = pan_screen(gallium->screen);
87 int tile_w = (rsrc->base.width0 + (MALI_TILE_LENGTH - 1)) >> MALI_TILE_SHIFT;
88 int tile_h = (rsrc->base.height0 + (MALI_TILE_LENGTH - 1)) >> MALI_TILE_SHIFT;
89
90 /* 8 byte checksum per tile */
91 rsrc->bo->checksum_stride = tile_w * 8;
92 int pages = (((rsrc->bo->checksum_stride * tile_h) + 4095) / 4096);
93 screen->driver->allocate_slab(screen, &rsrc->bo->checksum_slab, pages, false, 0, 0, 0);
94
95 rsrc->bo->has_checksum = true;
96 }
97
98 /* Framebuffer descriptor */
99
100 static void
101 panfrost_set_framebuffer_resolution(struct mali_single_framebuffer *fb, int w, int h)
102 {
103 fb->width = MALI_POSITIVE(w);
104 fb->height = MALI_POSITIVE(h);
105
106 /* No idea why this is needed, but it's how resolution_check is
107 * calculated. It's not clear to us yet why the hardware wants this.
108 * The formula itself was discovered mostly by manual bruteforce and
109 * aggressive algebraic simplification. */
110
111 fb->tiler_resolution_check = ((w + h) / 3) << 4;
112 }
113
114 struct mali_single_framebuffer
115 panfrost_emit_sfbd(struct panfrost_context *ctx, unsigned vertex_count)
116 {
117 struct mali_single_framebuffer framebuffer = {
118 .unknown2 = 0x1f,
119 .format = 0x30000000,
120 .clear_flags = 0x1000,
121 .unknown_address_0 = ctx->scratchpad.gpu,
122 .tiler_polygon_list = ctx->tiler_polygon_list.gpu,
123 .tiler_polygon_list_body = ctx->tiler_polygon_list.gpu + 40960,
124 .tiler_hierarchy_mask = 0xF0,
125 .tiler_flags = 0x0,
126 .tiler_heap_free = ctx->tiler_heap.gpu,
127 .tiler_heap_end = ctx->tiler_heap.gpu + ctx->tiler_heap.size,
128 };
129
130 panfrost_set_framebuffer_resolution(&framebuffer, ctx->pipe_framebuffer.width, ctx->pipe_framebuffer.height);
131
132 return framebuffer;
133 }
134
135 struct bifrost_framebuffer
136 panfrost_emit_mfbd(struct panfrost_context *ctx, unsigned vertex_count)
137 {
138 unsigned width = ctx->pipe_framebuffer.width;
139 unsigned height = ctx->pipe_framebuffer.height;
140
141 struct bifrost_framebuffer framebuffer = {
142 .width1 = MALI_POSITIVE(width),
143 .height1 = MALI_POSITIVE(height),
144 .width2 = MALI_POSITIVE(width),
145 .height2 = MALI_POSITIVE(height),
146
147 .unk1 = 0x1080,
148
149 /* TODO: MRT */
150 .rt_count_1 = MALI_POSITIVE(1),
151 .rt_count_2 = 4,
152
153 .unknown2 = 0x1f,
154
155 .scratchpad = ctx->scratchpad.gpu,
156 };
157
158 framebuffer.tiler_hierarchy_mask =
159 panfrost_choose_hierarchy_mask(width, height, vertex_count);
160
161 /* Compute the polygon header size and use that to offset the body */
162
163 unsigned header_size = panfrost_tiler_header_size(
164 width, height, framebuffer.tiler_hierarchy_mask);
165
166 unsigned body_size = panfrost_tiler_body_size(
167 width, height, framebuffer.tiler_hierarchy_mask);
168
169 /* Sanity check */
170
171 unsigned total_size = header_size + body_size;
172
173 if (framebuffer.tiler_hierarchy_mask) {
174 assert(ctx->tiler_polygon_list.size >= total_size);
175
176 /* Specify allocated tiler structures */
177 framebuffer.tiler_polygon_list = ctx->tiler_polygon_list.gpu;
178
179 /* Allow the entire tiler heap */
180 framebuffer.tiler_heap_start = ctx->tiler_heap.gpu;
181 framebuffer.tiler_heap_end =
182 ctx->tiler_heap.gpu + ctx->tiler_heap.size;
183 } else {
184 /* The tiler is disabled, so don't allow the tiler heap */
185 framebuffer.tiler_heap_start = ctx->tiler_heap.gpu;
186 framebuffer.tiler_heap_end = framebuffer.tiler_heap_start;
187
188 /* Use a dummy polygon list */
189 framebuffer.tiler_polygon_list = ctx->tiler_dummy.gpu;
190
191 /* Also, set a "tiler disabled?" flag? */
192 framebuffer.tiler_hierarchy_mask |= 0x1000;
193 }
194
195 framebuffer.tiler_polygon_list_body =
196 framebuffer.tiler_polygon_list + header_size;
197
198 framebuffer.tiler_polygon_list_size =
199 header_size + body_size;
200
201
202
203 return framebuffer;
204 }
205
206 /* Are we currently rendering to the screen (rather than an FBO)? */
207
208 bool
209 panfrost_is_scanout(struct panfrost_context *ctx)
210 {
211 /* If there is no color buffer, it's an FBO */
212 if (!ctx->pipe_framebuffer.nr_cbufs)
213 return false;
214
215 /* If we're too early that no framebuffer was sent, it's scanout */
216 if (!ctx->pipe_framebuffer.cbufs[0])
217 return true;
218
219 return ctx->pipe_framebuffer.cbufs[0]->texture->bind & PIPE_BIND_DISPLAY_TARGET ||
220 ctx->pipe_framebuffer.cbufs[0]->texture->bind & PIPE_BIND_SCANOUT ||
221 ctx->pipe_framebuffer.cbufs[0]->texture->bind & PIPE_BIND_SHARED;
222 }
223
224 static uint32_t
225 pan_pack_color(const union pipe_color_union *color, enum pipe_format format)
226 {
227 /* Alpha magicked to 1.0 if there is no alpha */
228
229 bool has_alpha = util_format_has_alpha(format);
230 float clear_alpha = has_alpha ? color->f[3] : 1.0f;
231
232 /* Packed color depends on the framebuffer format */
233
234 const struct util_format_description *desc =
235 util_format_description(format);
236
237 if (util_format_is_rgba8_variant(desc)) {
238 return (float_to_ubyte(clear_alpha) << 24) |
239 (float_to_ubyte(color->f[2]) << 16) |
240 (float_to_ubyte(color->f[1]) << 8) |
241 (float_to_ubyte(color->f[0]) << 0);
242 } else if (format == PIPE_FORMAT_B5G6R5_UNORM) {
243 /* First, we convert the components to R5, G6, B5 separately */
244 unsigned r5 = CLAMP(color->f[0], 0.0, 1.0) * 31.0;
245 unsigned g6 = CLAMP(color->f[1], 0.0, 1.0) * 63.0;
246 unsigned b5 = CLAMP(color->f[2], 0.0, 1.0) * 31.0;
247
248 /* Then we pack into a sparse u32. TODO: Why these shifts? */
249 return (b5 << 25) | (g6 << 14) | (r5 << 5);
250 } else {
251 /* Unknown format */
252 assert(0);
253 }
254
255 return 0;
256 }
257
258 static void
259 panfrost_clear(
260 struct pipe_context *pipe,
261 unsigned buffers,
262 const union pipe_color_union *color,
263 double depth, unsigned stencil)
264 {
265 struct panfrost_context *ctx = pan_context(pipe);
266 struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
267
268 if (buffers & PIPE_CLEAR_COLOR) {
269 enum pipe_format format = ctx->pipe_framebuffer.cbufs[0]->format;
270 job->clear_color = pan_pack_color(color, format);
271 }
272
273 if (buffers & PIPE_CLEAR_DEPTH) {
274 job->clear_depth = depth;
275 }
276
277 if (buffers & PIPE_CLEAR_STENCIL) {
278 job->clear_stencil = stencil;
279 }
280
281 job->clear |= buffers;
282 }
283
284 static mali_ptr
285 panfrost_attach_vt_mfbd(struct panfrost_context *ctx)
286 {
287 return panfrost_upload_transient(ctx, &ctx->vt_framebuffer_mfbd, sizeof(ctx->vt_framebuffer_mfbd)) | MALI_MFBD;
288 }
289
290 static mali_ptr
291 panfrost_attach_vt_sfbd(struct panfrost_context *ctx)
292 {
293 return panfrost_upload_transient(ctx, &ctx->vt_framebuffer_sfbd, sizeof(ctx->vt_framebuffer_sfbd)) | MALI_SFBD;
294 }
295
296 static void
297 panfrost_attach_vt_framebuffer(struct panfrost_context *ctx)
298 {
299 mali_ptr framebuffer = ctx->require_sfbd ?
300 panfrost_attach_vt_sfbd(ctx) :
301 panfrost_attach_vt_mfbd(ctx);
302
303 ctx->payload_vertex.postfix.framebuffer = framebuffer;
304 ctx->payload_tiler.postfix.framebuffer = framebuffer;
305 }
306
307 /* Reset per-frame context, called on context initialisation as well as after
308 * flushing a frame */
309
310 static void
311 panfrost_invalidate_frame(struct panfrost_context *ctx)
312 {
313 unsigned transient_count = ctx->transient_pools[ctx->cmdstream_i].entry_index*ctx->transient_pools[0].entry_size + ctx->transient_pools[ctx->cmdstream_i].entry_offset;
314 DBG("Uploaded transient %d bytes\n", transient_count);
315
316 /* Rotate cmdstream */
317 if ((++ctx->cmdstream_i) == (sizeof(ctx->transient_pools) / sizeof(ctx->transient_pools[0])))
318 ctx->cmdstream_i = 0;
319
320 if (ctx->require_sfbd)
321 ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx, ~0);
322 else
323 ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx, ~0);
324
325 /* Reset varyings allocated */
326 ctx->varying_height = 0;
327
328 /* The transient cmdstream is dirty every frame; the only bits worth preserving
329 * (textures, shaders, etc) are in other buffers anyways */
330
331 ctx->transient_pools[ctx->cmdstream_i].entry_index = 0;
332 ctx->transient_pools[ctx->cmdstream_i].entry_offset = 0;
333
334 /* Regenerate payloads */
335 panfrost_attach_vt_framebuffer(ctx);
336
337 if (ctx->rasterizer)
338 ctx->dirty |= PAN_DIRTY_RASTERIZER;
339
340 /* XXX */
341 ctx->dirty |= PAN_DIRTY_SAMPLERS | PAN_DIRTY_TEXTURES;
342
343 /* Reset job counters */
344 ctx->draw_count = 0;
345 ctx->vertex_job_count = 0;
346 ctx->tiler_job_count = 0;
347 }
348
349 /* In practice, every field of these payloads should be configurable
350 * arbitrarily, which means these functions are basically catch-all's for
351 * as-of-yet unwavering unknowns */
352
353 static void
354 panfrost_emit_vertex_payload(struct panfrost_context *ctx)
355 {
356 struct midgard_payload_vertex_tiler payload = {
357 .prefix = {
358 .workgroups_z_shift = 32,
359 .workgroups_x_shift_2 = 0x2,
360 .workgroups_x_shift_3 = 0x5,
361 },
362 .gl_enables = 0x4 | (ctx->is_t6xx ? 0 : 0x2),
363 };
364
365 memcpy(&ctx->payload_vertex, &payload, sizeof(payload));
366 }
367
368 static void
369 panfrost_emit_tiler_payload(struct panfrost_context *ctx)
370 {
371 struct midgard_payload_vertex_tiler payload = {
372 .prefix = {
373 .workgroups_z_shift = 32,
374 .workgroups_x_shift_2 = 0x2,
375 .workgroups_x_shift_3 = 0x6,
376
377 .zero1 = 0xffff, /* Why is this only seen on test-quad-textured? */
378 },
379 };
380
381 memcpy(&ctx->payload_tiler, &payload, sizeof(payload));
382 }
383
384 static unsigned
385 translate_tex_wrap(enum pipe_tex_wrap w)
386 {
387 switch (w) {
388 case PIPE_TEX_WRAP_REPEAT:
389 return MALI_WRAP_REPEAT;
390
391 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
392 return MALI_WRAP_CLAMP_TO_EDGE;
393
394 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
395 return MALI_WRAP_CLAMP_TO_BORDER;
396
397 case PIPE_TEX_WRAP_MIRROR_REPEAT:
398 return MALI_WRAP_MIRRORED_REPEAT;
399
400 default:
401 unreachable("Invalid wrap");
402 }
403 }
404
405 static unsigned
406 translate_tex_filter(enum pipe_tex_filter f)
407 {
408 switch (f) {
409 case PIPE_TEX_FILTER_NEAREST:
410 return MALI_NEAREST;
411
412 case PIPE_TEX_FILTER_LINEAR:
413 return MALI_LINEAR;
414
415 default:
416 unreachable("Invalid filter");
417 }
418 }
419
420 static unsigned
421 translate_mip_filter(enum pipe_tex_mipfilter f)
422 {
423 return (f == PIPE_TEX_MIPFILTER_LINEAR) ? MALI_MIP_LINEAR : 0;
424 }
425
426 static unsigned
427 panfrost_translate_compare_func(enum pipe_compare_func in)
428 {
429 switch (in) {
430 case PIPE_FUNC_NEVER:
431 return MALI_FUNC_NEVER;
432
433 case PIPE_FUNC_LESS:
434 return MALI_FUNC_LESS;
435
436 case PIPE_FUNC_EQUAL:
437 return MALI_FUNC_EQUAL;
438
439 case PIPE_FUNC_LEQUAL:
440 return MALI_FUNC_LEQUAL;
441
442 case PIPE_FUNC_GREATER:
443 return MALI_FUNC_GREATER;
444
445 case PIPE_FUNC_NOTEQUAL:
446 return MALI_FUNC_NOTEQUAL;
447
448 case PIPE_FUNC_GEQUAL:
449 return MALI_FUNC_GEQUAL;
450
451 case PIPE_FUNC_ALWAYS:
452 return MALI_FUNC_ALWAYS;
453
454 default:
455 unreachable("Invalid func");
456 }
457 }
458
459 static unsigned
460 panfrost_translate_alt_compare_func(enum pipe_compare_func in)
461 {
462 switch (in) {
463 case PIPE_FUNC_NEVER:
464 return MALI_ALT_FUNC_NEVER;
465
466 case PIPE_FUNC_LESS:
467 return MALI_ALT_FUNC_LESS;
468
469 case PIPE_FUNC_EQUAL:
470 return MALI_ALT_FUNC_EQUAL;
471
472 case PIPE_FUNC_LEQUAL:
473 return MALI_ALT_FUNC_LEQUAL;
474
475 case PIPE_FUNC_GREATER:
476 return MALI_ALT_FUNC_GREATER;
477
478 case PIPE_FUNC_NOTEQUAL:
479 return MALI_ALT_FUNC_NOTEQUAL;
480
481 case PIPE_FUNC_GEQUAL:
482 return MALI_ALT_FUNC_GEQUAL;
483
484 case PIPE_FUNC_ALWAYS:
485 return MALI_ALT_FUNC_ALWAYS;
486
487 default:
488 unreachable("Invalid alt func");
489 }
490 }
491
492 static unsigned
493 panfrost_translate_stencil_op(enum pipe_stencil_op in)
494 {
495 switch (in) {
496 case PIPE_STENCIL_OP_KEEP:
497 return MALI_STENCIL_KEEP;
498
499 case PIPE_STENCIL_OP_ZERO:
500 return MALI_STENCIL_ZERO;
501
502 case PIPE_STENCIL_OP_REPLACE:
503 return MALI_STENCIL_REPLACE;
504
505 case PIPE_STENCIL_OP_INCR:
506 return MALI_STENCIL_INCR;
507
508 case PIPE_STENCIL_OP_DECR:
509 return MALI_STENCIL_DECR;
510
511 case PIPE_STENCIL_OP_INCR_WRAP:
512 return MALI_STENCIL_INCR_WRAP;
513
514 case PIPE_STENCIL_OP_DECR_WRAP:
515 return MALI_STENCIL_DECR_WRAP;
516
517 case PIPE_STENCIL_OP_INVERT:
518 return MALI_STENCIL_INVERT;
519
520 default:
521 unreachable("Invalid stencil op");
522 }
523 }
524
525 static void
526 panfrost_make_stencil_state(const struct pipe_stencil_state *in, struct mali_stencil_test *out)
527 {
528 out->ref = 0; /* Gallium gets it from elsewhere */
529
530 out->mask = in->valuemask;
531 out->func = panfrost_translate_compare_func(in->func);
532 out->sfail = panfrost_translate_stencil_op(in->fail_op);
533 out->dpfail = panfrost_translate_stencil_op(in->zfail_op);
534 out->dppass = panfrost_translate_stencil_op(in->zpass_op);
535 }
536
537 static void
538 panfrost_default_shader_backend(struct panfrost_context *ctx)
539 {
540 struct mali_shader_meta shader = {
541 .alpha_coverage = ~MALI_ALPHA_COVERAGE(0.000000),
542
543 .unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x3010,
544 .unknown2_4 = MALI_NO_MSAA | 0x4e0,
545 };
546
547 if (ctx->is_t6xx) {
548 shader.unknown2_4 |= 0x10;
549 }
550
551 struct pipe_stencil_state default_stencil = {
552 .enabled = 0,
553 .func = PIPE_FUNC_ALWAYS,
554 .fail_op = MALI_STENCIL_KEEP,
555 .zfail_op = MALI_STENCIL_KEEP,
556 .zpass_op = MALI_STENCIL_KEEP,
557 .writemask = 0xFF,
558 .valuemask = 0xFF
559 };
560
561 panfrost_make_stencil_state(&default_stencil, &shader.stencil_front);
562 shader.stencil_mask_front = default_stencil.writemask;
563
564 panfrost_make_stencil_state(&default_stencil, &shader.stencil_back);
565 shader.stencil_mask_back = default_stencil.writemask;
566
567 if (default_stencil.enabled)
568 shader.unknown2_4 |= MALI_STENCIL_TEST;
569
570 memcpy(&ctx->fragment_shader_core, &shader, sizeof(shader));
571 }
572
573 static void
574 panfrost_link_job_pair(struct mali_job_descriptor_header *first, mali_ptr next)
575 {
576 if (first->job_descriptor_size)
577 first->next_job_64 = (u64) (uintptr_t) next;
578 else
579 first->next_job_32 = (u32) (uintptr_t) next;
580 }
581
582 /* Generates a vertex/tiler job. This is, in some sense, the heart of the
583 * graphics command stream. It should be called once per draw, accordding to
584 * presentations. Set is_tiler for "tiler" jobs (fragment shader jobs, but in
585 * Mali parlance, "fragment" refers to framebuffer writeout). Clear it for
586 * vertex jobs. */
587
588 struct panfrost_transfer
589 panfrost_vertex_tiler_job(struct panfrost_context *ctx, bool is_tiler)
590 {
591 /* Each draw call corresponds to two jobs, and the set-value job is first */
592 int draw_job_index = 1 + (2 * ctx->draw_count) + 1;
593
594 struct mali_job_descriptor_header job = {
595 .job_type = is_tiler ? JOB_TYPE_TILER : JOB_TYPE_VERTEX,
596 .job_index = draw_job_index + (is_tiler ? 1 : 0),
597 #ifdef __LP64__
598 .job_descriptor_size = 1,
599 #endif
600 };
601
602 struct midgard_payload_vertex_tiler *payload = is_tiler ? &ctx->payload_tiler : &ctx->payload_vertex;
603
604 /* There's some padding hacks on 32-bit */
605
606 #ifdef __LP64__
607 int offset = 0;
608 #else
609 int offset = 4;
610 #endif
611 struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, sizeof(job) + sizeof(*payload));
612
613 if (is_tiler) {
614 /* Tiler jobs depend on vertex jobs */
615
616 job.job_dependency_index_1 = draw_job_index;
617
618 /* Tiler jobs also depend on the previous tiler job */
619
620 if (ctx->draw_count) {
621 job.job_dependency_index_2 = draw_job_index - 1;
622 /* Previous tiler job points to this tiler job */
623 panfrost_link_job_pair(ctx->u_tiler_jobs[ctx->draw_count - 1], transfer.gpu);
624 } else {
625 /* The only vertex job so far points to first tiler job */
626 panfrost_link_job_pair(ctx->u_vertex_jobs[0], transfer.gpu);
627 }
628 } else {
629 if (ctx->draw_count) {
630 /* Previous vertex job points to this vertex job */
631 panfrost_link_job_pair(ctx->u_vertex_jobs[ctx->draw_count - 1], transfer.gpu);
632
633 /* Last vertex job points to first tiler job */
634 panfrost_link_job_pair(&job, ctx->tiler_jobs[0]);
635 } else {
636 /* Have the first vertex job depend on the set value job */
637 job.job_dependency_index_1 = ctx->u_set_value_job->job_index;
638 panfrost_link_job_pair(ctx->u_set_value_job, transfer.gpu);
639 }
640 }
641
642 memcpy(transfer.cpu, &job, sizeof(job));
643 memcpy(transfer.cpu + sizeof(job) - offset, payload, sizeof(*payload));
644 return transfer;
645 }
646
647 /* Generates a set value job. It's unclear what exactly this does, why it's
648 * necessary, and when to call it. */
649
650 static void
651 panfrost_set_value_job(struct panfrost_context *ctx)
652 {
653 struct mali_job_descriptor_header job = {
654 .job_type = JOB_TYPE_SET_VALUE,
655 .job_descriptor_size = 1,
656 .job_index = 1,
657 };
658
659 struct mali_payload_set_value payload = {
660 .out = ctx->tiler_polygon_list.gpu,
661 .unknown = 0x3,
662 };
663
664 struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, sizeof(job) + sizeof(payload));
665 memcpy(transfer.cpu, &job, sizeof(job));
666 memcpy(transfer.cpu + sizeof(job), &payload, sizeof(payload));
667
668 ctx->u_set_value_job = (struct mali_job_descriptor_header *) transfer.cpu;
669 ctx->set_value_job = transfer.gpu;
670 }
671
672 static mali_ptr
673 panfrost_emit_varyings(
674 struct panfrost_context *ctx,
675 union mali_attr *slot,
676 unsigned stride,
677 unsigned count)
678 {
679 mali_ptr varying_address = ctx->varying_mem.gpu + ctx->varying_height;
680
681 /* Fill out the descriptor */
682 slot->elements = varying_address | MALI_ATTR_LINEAR;
683 slot->stride = stride;
684 slot->size = stride * count;
685
686 ctx->varying_height += ALIGN(slot->size, 64);
687 assert(ctx->varying_height < ctx->varying_mem.size);
688
689 return varying_address;
690 }
691
692 static void
693 panfrost_emit_point_coord(union mali_attr *slot)
694 {
695 slot->elements = MALI_VARYING_POINT_COORD | MALI_ATTR_LINEAR;
696 slot->stride = slot->size = 0;
697 }
698
699 static void
700 panfrost_emit_varying_descriptor(
701 struct panfrost_context *ctx,
702 unsigned invocation_count)
703 {
704 /* Load the shaders */
705
706 struct panfrost_shader_state *vs = &ctx->vs->variants[ctx->vs->active_variant];
707 struct panfrost_shader_state *fs = &ctx->fs->variants[ctx->fs->active_variant];
708 unsigned int num_gen_varyings = 0;
709
710 /* Allocate the varying descriptor */
711
712 size_t vs_size = sizeof(struct mali_attr_meta) * vs->tripipe->varying_count;
713 size_t fs_size = sizeof(struct mali_attr_meta) * fs->tripipe->varying_count;
714
715 struct panfrost_transfer trans = panfrost_allocate_transient(ctx,
716 vs_size + fs_size);
717
718 /*
719 * Assign ->src_offset now that we know about all the general purpose
720 * varyings that will be used by the fragment and vertex shaders.
721 */
722 for (unsigned i = 0; i < vs->tripipe->varying_count; i++) {
723 /*
724 * General purpose varyings have ->index set to 0, skip other
725 * entries.
726 */
727 if (vs->varyings[i].index)
728 continue;
729
730 vs->varyings[i].src_offset = 16 * (num_gen_varyings++);
731 }
732
733 for (unsigned i = 0; i < fs->tripipe->varying_count; i++) {
734 unsigned j;
735
736 if (fs->varyings[i].index)
737 continue;
738
739 /*
740 * Re-use the VS general purpose varying pos if it exists,
741 * create a new one otherwise.
742 */
743 for (j = 0; j < vs->tripipe->varying_count; j++) {
744 if (fs->varyings_loc[i] == vs->varyings_loc[j])
745 break;
746 }
747
748 if (j < vs->tripipe->varying_count)
749 fs->varyings[i].src_offset = vs->varyings[j].src_offset;
750 else
751 fs->varyings[i].src_offset = 16 * (num_gen_varyings++);
752 }
753
754 memcpy(trans.cpu, vs->varyings, vs_size);
755 memcpy(trans.cpu + vs_size, fs->varyings, fs_size);
756
757 ctx->payload_vertex.postfix.varying_meta = trans.gpu;
758 ctx->payload_tiler.postfix.varying_meta = trans.gpu + vs_size;
759
760 /* Buffer indices must be in this order per our convention */
761 union mali_attr varyings[PIPE_MAX_ATTRIBS];
762 unsigned idx = 0;
763
764 panfrost_emit_varyings(ctx, &varyings[idx++], num_gen_varyings * 16,
765 invocation_count);
766
767 /* fp32 vec4 gl_Position */
768 ctx->payload_tiler.postfix.position_varying =
769 panfrost_emit_varyings(ctx, &varyings[idx++],
770 sizeof(float) * 4, invocation_count);
771
772
773 if (vs->writes_point_size || fs->reads_point_coord) {
774 /* fp16 vec1 gl_PointSize */
775 ctx->payload_tiler.primitive_size.pointer =
776 panfrost_emit_varyings(ctx, &varyings[idx++],
777 2, invocation_count);
778 }
779
780 if (fs->reads_point_coord) {
781 /* Special descriptor */
782 panfrost_emit_point_coord(&varyings[idx++]);
783 }
784
785 mali_ptr varyings_p = panfrost_upload_transient(ctx, &varyings, idx * sizeof(union mali_attr));
786 ctx->payload_vertex.postfix.varyings = varyings_p;
787 ctx->payload_tiler.postfix.varyings = varyings_p;
788 }
789
790 static mali_ptr
791 panfrost_vertex_buffer_address(struct panfrost_context *ctx, unsigned i)
792 {
793 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[i];
794 struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer.resource);
795
796 return rsrc->bo->gpu + buf->buffer_offset;
797 }
798
799 /* Emits attributes and varying descriptors, which should be called every draw,
800 * excepting some obscure circumstances */
801
802 static void
803 panfrost_emit_vertex_data(struct panfrost_context *ctx, struct panfrost_job *job)
804 {
805 /* Staged mali_attr, and index into them. i =/= k, depending on the
806 * vertex buffer mask */
807 union mali_attr attrs[PIPE_MAX_ATTRIBS];
808 unsigned k = 0;
809
810 unsigned invocation_count = MALI_NEGATIVE(ctx->payload_tiler.prefix.invocation_count);
811
812 for (int i = 0; i < ARRAY_SIZE(ctx->vertex_buffers); ++i) {
813 if (!(ctx->vb_mask & (1 << i))) continue;
814
815 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[i];
816 struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer.resource);
817
818 if (!rsrc) continue;
819
820 /* Align to 64 bytes by masking off the lower bits. This
821 * will be adjusted back when we fixup the src_offset in
822 * mali_attr_meta */
823
824 mali_ptr addr = panfrost_vertex_buffer_address(ctx, i) & ~63;
825
826 /* Offset vertex count by draw_start to make sure we upload enough */
827 attrs[k].stride = buf->stride;
828 attrs[k].size = rsrc->base.width0;
829
830 panfrost_job_add_bo(job, rsrc->bo);
831 attrs[k].elements = addr | MALI_ATTR_LINEAR;
832
833 ++k;
834 }
835
836 ctx->payload_vertex.postfix.attributes = panfrost_upload_transient(ctx, attrs, k * sizeof(union mali_attr));
837
838 panfrost_emit_varying_descriptor(ctx, invocation_count);
839 }
840
841 static bool
842 panfrost_writes_point_size(struct panfrost_context *ctx)
843 {
844 assert(ctx->vs);
845 struct panfrost_shader_state *vs = &ctx->vs->variants[ctx->vs->active_variant];
846
847 return vs->writes_point_size && ctx->payload_tiler.prefix.draw_mode == MALI_POINTS;
848 }
849
850 /* Stage the attribute descriptors so we can adjust src_offset
851 * to let BOs align nicely */
852
853 static void
854 panfrost_stage_attributes(struct panfrost_context *ctx)
855 {
856 struct panfrost_vertex_state *so = ctx->vertex;
857
858 size_t sz = sizeof(struct mali_attr_meta) * so->num_elements;
859 struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, sz);
860 struct mali_attr_meta *target = (struct mali_attr_meta *) transfer.cpu;
861
862 /* Copy as-is for the first pass */
863 memcpy(target, so->hw, sz);
864
865 /* Fixup offsets for the second pass. Recall that the hardware
866 * calculates attribute addresses as:
867 *
868 * addr = base + (stride * vtx) + src_offset;
869 *
870 * However, on Mali, base must be aligned to 64-bytes, so we
871 * instead let:
872 *
873 * base' = base & ~63 = base - (base & 63)
874 *
875 * To compensate when using base' (see emit_vertex_data), we have
876 * to adjust src_offset by the masked off piece:
877 *
878 * addr' = base' + (stride * vtx) + (src_offset + (base & 63))
879 * = base - (base & 63) + (stride * vtx) + src_offset + (base & 63)
880 * = base + (stride * vtx) + src_offset
881 * = addr;
882 *
883 * QED.
884 */
885
886 for (unsigned i = 0; i < so->num_elements; ++i) {
887 unsigned vbi = so->pipe[i].vertex_buffer_index;
888 mali_ptr addr = panfrost_vertex_buffer_address(ctx, vbi);
889
890 /* Adjust by the masked off bits of the offset */
891 target[i].src_offset += (addr & 63);
892 }
893
894 ctx->payload_vertex.postfix.attribute_meta = transfer.gpu;
895 }
896
897 static void
898 panfrost_upload_sampler_descriptors(struct panfrost_context *ctx)
899 {
900 size_t desc_size = sizeof(struct mali_sampler_descriptor);
901
902 for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) {
903 mali_ptr upload = 0;
904
905 if (ctx->sampler_count[t] && ctx->sampler_view_count[t]) {
906 size_t transfer_size = desc_size * ctx->sampler_count[t];
907
908 struct panfrost_transfer transfer =
909 panfrost_allocate_transient(ctx, transfer_size);
910
911 struct mali_sampler_descriptor *desc =
912 (struct mali_sampler_descriptor *) transfer.cpu;
913
914 for (int i = 0; i < ctx->sampler_count[t]; ++i)
915 desc[i] = ctx->samplers[t][i]->hw;
916
917 upload = transfer.gpu;
918 }
919
920 if (t == PIPE_SHADER_FRAGMENT)
921 ctx->payload_tiler.postfix.sampler_descriptor = upload;
922 else if (t == PIPE_SHADER_VERTEX)
923 ctx->payload_vertex.postfix.sampler_descriptor = upload;
924 else
925 assert(0);
926 }
927 }
928
929 /* Computes the address to a texture at a particular slice */
930
931 static mali_ptr
932 panfrost_get_texture_address(
933 struct panfrost_resource *rsrc,
934 unsigned level, unsigned face)
935 {
936 unsigned level_offset = rsrc->bo->slices[level].offset;
937 unsigned face_offset = face * rsrc->bo->cubemap_stride;
938
939 return rsrc->bo->gpu + level_offset + face_offset;
940
941 }
942
943 static mali_ptr
944 panfrost_upload_tex(
945 struct panfrost_context *ctx,
946 struct panfrost_sampler_view *view)
947 {
948 if (!view)
949 return (mali_ptr) NULL;
950
951 struct pipe_sampler_view *pview = &view->base;
952 struct panfrost_resource *rsrc = pan_resource(pview->texture);
953
954 /* Do we interleave an explicit stride with every element? */
955
956 bool has_manual_stride =
957 view->hw.format.usage2 & MALI_TEX_MANUAL_STRIDE;
958
959 /* For easy access */
960
961 assert(pview->target != PIPE_BUFFER);
962 unsigned first_level = pview->u.tex.first_level;
963 unsigned last_level = pview->u.tex.last_level;
964
965 /* Inject the addresses in, interleaving mip levels, cube faces, and
966 * strides in that order */
967
968 unsigned idx = 0;
969
970 for (unsigned l = first_level; l <= last_level; ++l) {
971 for (unsigned f = 0; f < pview->texture->array_size; ++f) {
972 view->hw.payload[idx++] =
973 panfrost_get_texture_address(rsrc, l, f);
974
975 if (has_manual_stride) {
976 view->hw.payload[idx++] =
977 rsrc->bo->slices[l].stride;
978 }
979 }
980 }
981
982 return panfrost_upload_transient(ctx, &view->hw,
983 sizeof(struct mali_texture_descriptor));
984 }
985
986 static void
987 panfrost_upload_texture_descriptors(struct panfrost_context *ctx)
988 {
989 for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) {
990 mali_ptr trampoline = 0;
991
992 if (ctx->sampler_view_count[t]) {
993 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
994
995 for (int i = 0; i < ctx->sampler_view_count[t]; ++i)
996 trampolines[i] =
997 panfrost_upload_tex(ctx, ctx->sampler_views[t][i]);
998
999 trampoline = panfrost_upload_transient(ctx, trampolines, sizeof(uint64_t) * ctx->sampler_view_count[t]);
1000 }
1001
1002 if (t == PIPE_SHADER_FRAGMENT)
1003 ctx->payload_tiler.postfix.texture_trampoline = trampoline;
1004 else if (t == PIPE_SHADER_VERTEX)
1005 ctx->payload_vertex.postfix.texture_trampoline = trampoline;
1006 else
1007 assert(0);
1008 }
1009 }
1010
1011 /* Go through dirty flags and actualise them in the cmdstream. */
1012
1013 void
1014 panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
1015 {
1016 struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
1017
1018 if (with_vertex_data) {
1019 panfrost_emit_vertex_data(ctx, job);
1020 }
1021
1022 bool msaa = ctx->rasterizer->base.multisample;
1023
1024 if (ctx->dirty & PAN_DIRTY_RASTERIZER) {
1025 ctx->payload_tiler.gl_enables = ctx->rasterizer->tiler_gl_enables;
1026
1027 /* TODO: Sample size */
1028 SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_HAS_MSAA, msaa);
1029 SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_NO_MSAA, !msaa);
1030 }
1031
1032 /* Enable job requirements at draw-time */
1033
1034 if (msaa)
1035 job->requirements |= PAN_REQ_MSAA;
1036
1037 if (ctx->depth_stencil->depth.writemask)
1038 job->requirements |= PAN_REQ_DEPTH_WRITE;
1039
1040 if (ctx->occlusion_query) {
1041 ctx->payload_tiler.gl_enables |= MALI_OCCLUSION_QUERY | MALI_OCCLUSION_PRECISE;
1042 ctx->payload_tiler.postfix.occlusion_counter = ctx->occlusion_query->transfer.gpu;
1043 }
1044
1045 if (ctx->dirty & PAN_DIRTY_VS) {
1046 assert(ctx->vs);
1047
1048 struct panfrost_shader_state *vs = &ctx->vs->variants[ctx->vs->active_variant];
1049
1050 /* Late shader descriptor assignments */
1051
1052 vs->tripipe->texture_count = ctx->sampler_view_count[PIPE_SHADER_VERTEX];
1053 vs->tripipe->sampler_count = ctx->sampler_count[PIPE_SHADER_VERTEX];
1054
1055 /* Who knows */
1056 vs->tripipe->midgard1.unknown1 = 0x2201;
1057
1058 ctx->payload_vertex.postfix._shader_upper = vs->tripipe_gpu >> 4;
1059 }
1060
1061 if (ctx->dirty & (PAN_DIRTY_RASTERIZER | PAN_DIRTY_VS)) {
1062 /* Check if we need to link the gl_PointSize varying */
1063 if (!panfrost_writes_point_size(ctx)) {
1064 /* If the size is constant, write it out. Otherwise,
1065 * don't touch primitive_size (since we would clobber
1066 * the pointer there) */
1067
1068 ctx->payload_tiler.primitive_size.constant = ctx->rasterizer->base.line_width;
1069 }
1070 }
1071
1072 /* TODO: Maybe dirty track FS, maybe not. For now, it's transient. */
1073 if (ctx->fs)
1074 ctx->dirty |= PAN_DIRTY_FS;
1075
1076 if (ctx->dirty & PAN_DIRTY_FS) {
1077 assert(ctx->fs);
1078 struct panfrost_shader_state *variant = &ctx->fs->variants[ctx->fs->active_variant];
1079
1080 #define COPY(name) ctx->fragment_shader_core.name = variant->tripipe->name
1081
1082 COPY(shader);
1083 COPY(attribute_count);
1084 COPY(varying_count);
1085 COPY(midgard1.uniform_count);
1086 COPY(midgard1.work_count);
1087 COPY(midgard1.unknown2);
1088
1089 #undef COPY
1090 /* If there is a blend shader, work registers are shared */
1091
1092 if (ctx->blend->has_blend_shader)
1093 ctx->fragment_shader_core.midgard1.work_count = /*MAX2(ctx->fragment_shader_core.midgard1.work_count, ctx->blend->blend_work_count)*/16;
1094
1095 /* Set late due to depending on render state */
1096 /* The one at the end seems to mean "1 UBO" */
1097 unsigned flags = MALI_EARLY_Z | 0x200 | 0x2000 | 0x1;
1098
1099 /* Any time texturing is used, derivatives are implicitly
1100 * calculated, so we need to enable helper invocations */
1101
1102 if (ctx->sampler_view_count[PIPE_SHADER_FRAGMENT])
1103 flags |= MALI_HELPER_INVOCATIONS;
1104
1105 ctx->fragment_shader_core.midgard1.unknown1 = flags;
1106
1107 /* Assign texture/sample count right before upload */
1108 ctx->fragment_shader_core.texture_count = ctx->sampler_view_count[PIPE_SHADER_FRAGMENT];
1109 ctx->fragment_shader_core.sampler_count = ctx->sampler_count[PIPE_SHADER_FRAGMENT];
1110
1111 /* Assign the stencil refs late */
1112 ctx->fragment_shader_core.stencil_front.ref = ctx->stencil_ref.ref_value[0];
1113 ctx->fragment_shader_core.stencil_back.ref = ctx->stencil_ref.ref_value[1];
1114
1115 /* CAN_DISCARD should be set if the fragment shader possibly
1116 * contains a 'discard' instruction. It is likely this is
1117 * related to optimizations related to forward-pixel kill, as
1118 * per "Mali Performance 3: Is EGL_BUFFER_PRESERVED a good
1119 * thing?" by Peter Harris
1120 */
1121
1122 if (variant->can_discard) {
1123 ctx->fragment_shader_core.unknown2_3 |= MALI_CAN_DISCARD;
1124 ctx->fragment_shader_core.midgard1.unknown1 &= ~MALI_EARLY_Z;
1125 ctx->fragment_shader_core.midgard1.unknown1 |= 0x4000;
1126 ctx->fragment_shader_core.midgard1.unknown1 = 0x4200;
1127 }
1128
1129 /* Check if we're using the default blend descriptor (fast path) */
1130
1131 bool no_blending =
1132 !ctx->blend->has_blend_shader &&
1133 (ctx->blend->equation.rgb_mode == 0x122) &&
1134 (ctx->blend->equation.alpha_mode == 0x122) &&
1135 (ctx->blend->equation.color_mask == 0xf);
1136
1137 /* Even on MFBD, the shader descriptor gets blend shaders. It's
1138 * *also* copied to the blend_meta appended (by convention),
1139 * but this is the field actually read by the hardware. (Or
1140 * maybe both are read...?) */
1141
1142 if (ctx->blend->has_blend_shader) {
1143 ctx->fragment_shader_core.blend.shader = ctx->blend->blend_shader;
1144 }
1145
1146 if (ctx->require_sfbd) {
1147 /* When only a single render target platform is used, the blend
1148 * information is inside the shader meta itself. We
1149 * additionally need to signal CAN_DISCARD for nontrivial blend
1150 * modes (so we're able to read back the destination buffer) */
1151
1152 if (!ctx->blend->has_blend_shader) {
1153 ctx->fragment_shader_core.blend.equation = ctx->blend->equation;
1154 ctx->fragment_shader_core.blend.constant = ctx->blend->constant;
1155 }
1156
1157 if (!no_blending) {
1158 ctx->fragment_shader_core.unknown2_3 |= MALI_CAN_DISCARD;
1159 }
1160 }
1161
1162 size_t size = sizeof(struct mali_shader_meta) + sizeof(struct midgard_blend_rt);
1163 struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, size);
1164 memcpy(transfer.cpu, &ctx->fragment_shader_core, sizeof(struct mali_shader_meta));
1165
1166 ctx->payload_tiler.postfix._shader_upper = (transfer.gpu) >> 4;
1167
1168 if (!ctx->require_sfbd) {
1169 /* Additional blend descriptor tacked on for jobs using MFBD */
1170
1171 unsigned blend_count = 0x200;
1172
1173 if (ctx->blend->has_blend_shader) {
1174 /* For a blend shader, the bottom nibble corresponds to
1175 * the number of work registers used, which signals the
1176 * -existence- of a blend shader */
1177
1178 assert(ctx->blend->blend_work_count >= 2);
1179 blend_count |= MIN2(ctx->blend->blend_work_count, 3);
1180 } else {
1181 /* Otherwise, the bottom bit simply specifies if
1182 * blending (anything other than REPLACE) is enabled */
1183
1184
1185 if (!no_blending)
1186 blend_count |= 0x1;
1187 }
1188
1189 struct midgard_blend_rt rts[4];
1190
1191 /* TODO: MRT */
1192
1193 for (unsigned i = 0; i < 1; ++i) {
1194 rts[i].flags = blend_count;
1195
1196 if (ctx->blend->has_blend_shader) {
1197 rts[i].blend.shader = ctx->blend->blend_shader;
1198 } else {
1199 rts[i].blend.equation = ctx->blend->equation;
1200 rts[i].blend.constant = ctx->blend->constant;
1201 }
1202 }
1203
1204 memcpy(transfer.cpu + sizeof(struct mali_shader_meta), rts, sizeof(rts[0]) * 1);
1205 }
1206 }
1207
1208 /* We stage to transient, so always dirty.. */
1209 panfrost_stage_attributes(ctx);
1210
1211 if (ctx->dirty & PAN_DIRTY_SAMPLERS)
1212 panfrost_upload_sampler_descriptors(ctx);
1213
1214 if (ctx->dirty & PAN_DIRTY_TEXTURES)
1215 panfrost_upload_texture_descriptors(ctx);
1216
1217 const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
1218
1219 for (int i = 0; i <= PIPE_SHADER_FRAGMENT; ++i) {
1220 struct panfrost_constant_buffer *buf = &ctx->constant_buffer[i];
1221
1222 struct panfrost_shader_state *vs = &ctx->vs->variants[ctx->vs->active_variant];
1223 struct panfrost_shader_state *fs = &ctx->fs->variants[ctx->fs->active_variant];
1224 struct panfrost_shader_state *ss = (i == PIPE_SHADER_FRAGMENT) ? fs : vs;
1225
1226 /* Allocate room for the sysval and the uniforms */
1227 size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1228 size_t size = sys_size + buf->size;
1229 struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, size);
1230
1231 /* Upload sysvals requested by the shader */
1232 float *uniforms = (float *) transfer.cpu;
1233 for (unsigned i = 0; i < ss->sysval_count; ++i) {
1234 int sysval = ss->sysval[i];
1235
1236 if (sysval == PAN_SYSVAL_VIEWPORT_SCALE) {
1237 uniforms[4*i + 0] = vp->scale[0];
1238 uniforms[4*i + 1] = vp->scale[1];
1239 uniforms[4*i + 2] = vp->scale[2];
1240 } else if (sysval == PAN_SYSVAL_VIEWPORT_OFFSET) {
1241 uniforms[4*i + 0] = vp->translate[0];
1242 uniforms[4*i + 1] = vp->translate[1];
1243 uniforms[4*i + 2] = vp->translate[2];
1244 } else {
1245 assert(0);
1246 }
1247 }
1248
1249 /* Upload uniforms */
1250 memcpy(transfer.cpu + sys_size, buf->buffer, buf->size);
1251
1252 int uniform_count = 0;
1253
1254 struct mali_vertex_tiler_postfix *postfix;
1255
1256 switch (i) {
1257 case PIPE_SHADER_VERTEX:
1258 uniform_count = ctx->vs->variants[ctx->vs->active_variant].uniform_count;
1259 postfix = &ctx->payload_vertex.postfix;
1260 break;
1261
1262 case PIPE_SHADER_FRAGMENT:
1263 uniform_count = ctx->fs->variants[ctx->fs->active_variant].uniform_count;
1264 postfix = &ctx->payload_tiler.postfix;
1265 break;
1266
1267 default:
1268 unreachable("Invalid shader stage\n");
1269 }
1270
1271 /* Also attach the same buffer as a UBO for extended access */
1272
1273 struct mali_uniform_buffer_meta uniform_buffers[] = {
1274 {
1275 .size = MALI_POSITIVE((2 + uniform_count)),
1276 .ptr = transfer.gpu >> 2,
1277 },
1278 };
1279
1280 mali_ptr ubufs = panfrost_upload_transient(ctx, uniform_buffers, sizeof(uniform_buffers));
1281 postfix->uniforms = transfer.gpu;
1282 postfix->uniform_buffers = ubufs;
1283
1284 buf->dirty = 0;
1285 }
1286
1287 /* TODO: Upload the viewport somewhere more appropriate */
1288
1289 /* Clip bounds are encoded as floats. The viewport itself is encoded as
1290 * (somewhat) asymmetric ints. */
1291 const struct pipe_scissor_state *ss = &ctx->scissor;
1292
1293 struct mali_viewport view = {
1294 /* By default, do no viewport clipping, i.e. clip to (-inf,
1295 * inf) in each direction. Clipping to the viewport in theory
1296 * should work, but in practice causes issues when we're not
1297 * explicitly trying to scissor */
1298
1299 .clip_minx = -inff,
1300 .clip_miny = -inff,
1301 .clip_maxx = inff,
1302 .clip_maxy = inff,
1303
1304 .clip_minz = 0.0,
1305 .clip_maxz = 1.0,
1306 };
1307
1308 /* Always scissor to the viewport by default. */
1309 int minx = (int) (vp->translate[0] - vp->scale[0]);
1310 int maxx = (int) (vp->translate[0] + vp->scale[0]);
1311
1312 int miny = (int) (vp->translate[1] - vp->scale[1]);
1313 int maxy = (int) (vp->translate[1] + vp->scale[1]);
1314
1315 /* Apply the scissor test */
1316
1317 if (ss && ctx->rasterizer && ctx->rasterizer->base.scissor) {
1318 minx = ss->minx;
1319 maxx = ss->maxx;
1320 miny = ss->miny;
1321 maxy = ss->maxy;
1322 }
1323
1324 /* Hardware needs the min/max to be strictly ordered, so flip if we
1325 * need to. The viewport transformation in the vertex shader will
1326 * handle the negatives if we don't */
1327
1328 if (miny > maxy) {
1329 int temp = miny;
1330 miny = maxy;
1331 maxy = temp;
1332 }
1333
1334 if (minx > maxx) {
1335 int temp = minx;
1336 minx = maxx;
1337 maxx = temp;
1338 }
1339
1340 /* Clamp everything positive, just in case */
1341
1342 maxx = MAX2(0, maxx);
1343 maxy = MAX2(0, maxy);
1344 minx = MAX2(0, minx);
1345 miny = MAX2(0, miny);
1346
1347 /* Clamp to the framebuffer size as a last check */
1348
1349 minx = MIN2(ctx->pipe_framebuffer.width, minx);
1350 maxx = MIN2(ctx->pipe_framebuffer.width, maxx);
1351
1352 miny = MIN2(ctx->pipe_framebuffer.height, miny);
1353 maxy = MIN2(ctx->pipe_framebuffer.height, maxy);
1354
1355 /* Upload */
1356
1357 view.viewport0[0] = minx;
1358 view.viewport1[0] = MALI_POSITIVE(maxx);
1359
1360 view.viewport0[1] = miny;
1361 view.viewport1[1] = MALI_POSITIVE(maxy);
1362
1363 ctx->payload_tiler.postfix.viewport =
1364 panfrost_upload_transient(ctx,
1365 &view,
1366 sizeof(struct mali_viewport));
1367
1368 ctx->dirty = 0;
1369 }
1370
1371 /* Corresponds to exactly one draw, but does not submit anything */
1372
1373 static void
1374 panfrost_queue_draw(struct panfrost_context *ctx)
1375 {
1376 /* TODO: Expand the array? */
1377 if (ctx->draw_count >= MAX_DRAW_CALLS) {
1378 DBG("Job buffer overflow, ignoring draw\n");
1379 assert(0);
1380 }
1381
1382 /* Handle dirty flags now */
1383 panfrost_emit_for_draw(ctx, true);
1384
1385 /* We need a set_value job before any other draw jobs */
1386 if (ctx->draw_count == 0)
1387 panfrost_set_value_job(ctx);
1388
1389 struct panfrost_transfer vertex = panfrost_vertex_tiler_job(ctx, false);
1390 ctx->u_vertex_jobs[ctx->vertex_job_count] = (struct mali_job_descriptor_header *) vertex.cpu;
1391 ctx->vertex_jobs[ctx->vertex_job_count++] = vertex.gpu;
1392
1393 struct panfrost_transfer tiler = panfrost_vertex_tiler_job(ctx, true);
1394 ctx->u_tiler_jobs[ctx->tiler_job_count] = (struct mali_job_descriptor_header *) tiler.cpu;
1395 ctx->tiler_jobs[ctx->tiler_job_count++] = tiler.gpu;
1396
1397 ctx->draw_count++;
1398 }
1399
1400 /* The entire frame is in memory -- send it off to the kernel! */
1401
1402 static void
1403 panfrost_submit_frame(struct panfrost_context *ctx, bool flush_immediate,
1404 struct pipe_fence_handle **fence,
1405 struct panfrost_job *job)
1406 {
1407 struct pipe_context *gallium = (struct pipe_context *) ctx;
1408 struct panfrost_screen *screen = pan_screen(gallium->screen);
1409
1410 /* Edge case if screen is cleared and nothing else */
1411 bool has_draws = ctx->draw_count > 0;
1412
1413 #ifndef DRY_RUN
1414
1415 bool is_scanout = panfrost_is_scanout(ctx);
1416 screen->driver->submit_vs_fs_job(ctx, has_draws, is_scanout);
1417
1418 /* If visual, we can stall a frame */
1419
1420 if (!flush_immediate)
1421 screen->driver->force_flush_fragment(ctx, fence);
1422
1423 screen->last_fragment_flushed = false;
1424 screen->last_job = job;
1425
1426 /* If readback, flush now (hurts the pipelined performance) */
1427 if (flush_immediate)
1428 screen->driver->force_flush_fragment(ctx, fence);
1429
1430 if (screen->driver->dump_counters && pan_counters_base) {
1431 screen->driver->dump_counters(screen);
1432
1433 char filename[128];
1434 snprintf(filename, sizeof(filename), "%s/frame%d.mdgprf", pan_counters_base, ++performance_counter_number);
1435 FILE *fp = fopen(filename, "wb");
1436 fwrite(screen->perf_counters.cpu, 4096, sizeof(uint32_t), fp);
1437 fclose(fp);
1438 }
1439
1440 #endif
1441 }
1442
1443 static void
1444 panfrost_draw_wallpaper(struct pipe_context *pipe)
1445 {
1446 struct panfrost_context *ctx = pan_context(pipe);
1447
1448 /* Nothing to reload? */
1449 if (ctx->pipe_framebuffer.cbufs[0] == NULL)
1450 return;
1451
1452 /* Blit the wallpaper in */
1453 panfrost_blit_wallpaper(ctx);
1454
1455 /* We are flushing all queued draws and we know that no more jobs will
1456 * be added until the next frame.
1457 * We also know that the last jobs are the wallpaper jobs, and they
1458 * need to be linked so they execute right after the set_value job.
1459 */
1460
1461 /* set_value job to wallpaper vertex job */
1462 panfrost_link_job_pair(ctx->u_set_value_job, ctx->vertex_jobs[ctx->vertex_job_count - 1]);
1463 ctx->u_vertex_jobs[ctx->vertex_job_count - 1]->job_dependency_index_1 = ctx->u_set_value_job->job_index;
1464
1465 /* wallpaper vertex job to first vertex job */
1466 panfrost_link_job_pair(ctx->u_vertex_jobs[ctx->vertex_job_count - 1], ctx->vertex_jobs[0]);
1467 ctx->u_vertex_jobs[0]->job_dependency_index_1 = ctx->u_set_value_job->job_index;
1468
1469 /* last vertex job to wallpaper tiler job */
1470 panfrost_link_job_pair(ctx->u_vertex_jobs[ctx->vertex_job_count - 2], ctx->tiler_jobs[ctx->tiler_job_count - 1]);
1471 ctx->u_tiler_jobs[ctx->tiler_job_count - 1]->job_dependency_index_1 = ctx->u_vertex_jobs[ctx->vertex_job_count - 1]->job_index;
1472 ctx->u_tiler_jobs[ctx->tiler_job_count - 1]->job_dependency_index_2 = 0;
1473
1474 /* wallpaper tiler job to first tiler job */
1475 panfrost_link_job_pair(ctx->u_tiler_jobs[ctx->tiler_job_count - 1], ctx->tiler_jobs[0]);
1476 ctx->u_tiler_jobs[0]->job_dependency_index_1 = ctx->u_vertex_jobs[0]->job_index;
1477 ctx->u_tiler_jobs[0]->job_dependency_index_2 = ctx->u_tiler_jobs[ctx->tiler_job_count - 1]->job_index;
1478
1479 /* last tiler job to NULL */
1480 panfrost_link_job_pair(ctx->u_tiler_jobs[ctx->tiler_job_count - 2], 0);
1481 }
1482
1483 void
1484 panfrost_flush(
1485 struct pipe_context *pipe,
1486 struct pipe_fence_handle **fence,
1487 unsigned flags)
1488 {
1489 struct panfrost_context *ctx = pan_context(pipe);
1490 struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
1491
1492 /* Nothing to do! */
1493 if (!ctx->draw_count && !job->clear) return;
1494
1495 if (!job->clear)
1496 panfrost_draw_wallpaper(&ctx->base);
1497
1498 /* Whether to stall the pipeline for immediately correct results. Since
1499 * pipelined rendering is quite broken right now (to be fixed by the
1500 * panfrost_job refactor, just take the perf hit for correctness) */
1501 bool flush_immediate = /*flags & PIPE_FLUSH_END_OF_FRAME*/true;
1502
1503 /* Submit the frame itself */
1504 panfrost_submit_frame(ctx, flush_immediate, fence, job);
1505
1506 /* Prepare for the next frame */
1507 panfrost_invalidate_frame(ctx);
1508 }
1509
1510 #define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_##c;
1511
1512 static int
1513 g2m_draw_mode(enum pipe_prim_type mode)
1514 {
1515 switch (mode) {
1516 DEFINE_CASE(POINTS);
1517 DEFINE_CASE(LINES);
1518 DEFINE_CASE(LINE_LOOP);
1519 DEFINE_CASE(LINE_STRIP);
1520 DEFINE_CASE(TRIANGLES);
1521 DEFINE_CASE(TRIANGLE_STRIP);
1522 DEFINE_CASE(TRIANGLE_FAN);
1523 DEFINE_CASE(QUADS);
1524 DEFINE_CASE(QUAD_STRIP);
1525 DEFINE_CASE(POLYGON);
1526
1527 default:
1528 unreachable("Invalid draw mode");
1529 }
1530 }
1531
1532 #undef DEFINE_CASE
1533
1534 static unsigned
1535 panfrost_translate_index_size(unsigned size)
1536 {
1537 switch (size) {
1538 case 1:
1539 return MALI_DRAW_INDEXED_UINT8;
1540
1541 case 2:
1542 return MALI_DRAW_INDEXED_UINT16;
1543
1544 case 4:
1545 return MALI_DRAW_INDEXED_UINT32;
1546
1547 default:
1548 unreachable("Invalid index size");
1549 }
1550 }
1551
1552 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
1553 * good for the duration of the draw (transient), could last longer */
1554
1555 static mali_ptr
1556 panfrost_get_index_buffer_mapped(struct panfrost_context *ctx, const struct pipe_draw_info *info)
1557 {
1558 struct panfrost_resource *rsrc = (struct panfrost_resource *) (info->index.resource);
1559
1560 off_t offset = info->start * info->index_size;
1561
1562 if (!info->has_user_indices) {
1563 /* Only resources can be directly mapped */
1564 return rsrc->bo->gpu + offset;
1565 } else {
1566 /* Otherwise, we need to upload to transient memory */
1567 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
1568 return panfrost_upload_transient(ctx, ibuf8 + offset, info->count * info->index_size);
1569 }
1570 }
1571
1572 static void
1573 panfrost_draw_vbo(
1574 struct pipe_context *pipe,
1575 const struct pipe_draw_info *info)
1576 {
1577 struct panfrost_context *ctx = pan_context(pipe);
1578
1579 ctx->payload_vertex.draw_start = info->start;
1580 ctx->payload_tiler.draw_start = info->start;
1581
1582 int mode = info->mode;
1583
1584 /* Fallback for unsupported modes */
1585
1586 if (!(ctx->draw_modes & (1 << mode))) {
1587 if (mode == PIPE_PRIM_QUADS && info->count == 4 && ctx->rasterizer && !ctx->rasterizer->base.flatshade) {
1588 mode = PIPE_PRIM_TRIANGLE_FAN;
1589 } else {
1590 if (info->count < 4) {
1591 /* Degenerate case? */
1592 return;
1593 }
1594
1595 util_primconvert_save_rasterizer_state(ctx->primconvert, &ctx->rasterizer->base);
1596 util_primconvert_draw_vbo(ctx->primconvert, info);
1597 return;
1598 }
1599 }
1600
1601 /* Now that we have a guaranteed terminating path, find the job.
1602 * Assignment commented out to prevent unused warning */
1603
1604 /* struct panfrost_job *job = */ panfrost_get_job_for_fbo(ctx);
1605
1606 ctx->payload_tiler.prefix.draw_mode = g2m_draw_mode(mode);
1607
1608 ctx->vertex_count = info->count;
1609
1610 /* For non-indexed draws, they're the same */
1611 unsigned invocation_count = ctx->vertex_count;
1612
1613 unsigned draw_flags = 0;
1614
1615 /* The draw flags interpret how primitive size is interpreted */
1616
1617 if (panfrost_writes_point_size(ctx))
1618 draw_flags |= MALI_DRAW_VARYING_SIZE;
1619
1620 /* For higher amounts of vertices (greater than what fits in a 16-bit
1621 * short), the other value is needed, otherwise there will be bizarre
1622 * rendering artefacts. It's not clear what these values mean yet. */
1623
1624 draw_flags |= (mode == PIPE_PRIM_POINTS || ctx->vertex_count > 65535) ? 0x3000 : 0x18000;
1625
1626 if (info->index_size) {
1627 /* Calculate the min/max index used so we can figure out how
1628 * many times to invoke the vertex shader */
1629
1630 /* Fetch / calculate index bounds */
1631 unsigned min_index = 0, max_index = 0;
1632
1633 if (info->max_index == ~0u) {
1634 u_vbuf_get_minmax_index(pipe, info, &min_index, &max_index);
1635 } else {
1636 min_index = info->min_index;
1637 max_index = info->max_index;
1638 }
1639
1640 /* Use the corresponding values */
1641 invocation_count = max_index - min_index + 1;
1642 ctx->payload_vertex.draw_start = min_index;
1643 ctx->payload_tiler.draw_start = min_index;
1644
1645 ctx->payload_tiler.prefix.negative_start = -min_index;
1646 ctx->payload_tiler.prefix.index_count = MALI_POSITIVE(info->count);
1647
1648 //assert(!info->restart_index); /* TODO: Research */
1649 assert(!info->index_bias);
1650
1651 draw_flags |= panfrost_translate_index_size(info->index_size);
1652 ctx->payload_tiler.prefix.indices = panfrost_get_index_buffer_mapped(ctx, info);
1653 } else {
1654 /* Index count == vertex count, if no indexing is applied, as
1655 * if it is internally indexed in the expected order */
1656
1657 ctx->payload_tiler.prefix.negative_start = 0;
1658 ctx->payload_tiler.prefix.index_count = MALI_POSITIVE(ctx->vertex_count);
1659
1660 /* Reverse index state */
1661 ctx->payload_tiler.prefix.indices = (uintptr_t) NULL;
1662 }
1663
1664 ctx->payload_vertex.prefix.invocation_count = MALI_POSITIVE(invocation_count);
1665 ctx->payload_tiler.prefix.invocation_count = MALI_POSITIVE(invocation_count);
1666 ctx->payload_tiler.prefix.unknown_draw = draw_flags;
1667
1668 /* Fire off the draw itself */
1669 panfrost_queue_draw(ctx);
1670 }
1671
1672 /* CSO state */
1673
1674 static void
1675 panfrost_generic_cso_delete(struct pipe_context *pctx, void *hwcso)
1676 {
1677 free(hwcso);
1678 }
1679
1680 static void *
1681 panfrost_create_rasterizer_state(
1682 struct pipe_context *pctx,
1683 const struct pipe_rasterizer_state *cso)
1684 {
1685 struct panfrost_context *ctx = pan_context(pctx);
1686 struct panfrost_rasterizer *so = CALLOC_STRUCT(panfrost_rasterizer);
1687
1688 so->base = *cso;
1689
1690 /* Bitmask, unknown meaning of the start value */
1691 so->tiler_gl_enables = ctx->is_t6xx ? 0x105 : 0x7;
1692
1693 if (cso->front_ccw)
1694 so->tiler_gl_enables |= MALI_FRONT_CCW_TOP;
1695
1696 if (cso->cull_face & PIPE_FACE_FRONT)
1697 so->tiler_gl_enables |= MALI_CULL_FACE_FRONT;
1698
1699 if (cso->cull_face & PIPE_FACE_BACK)
1700 so->tiler_gl_enables |= MALI_CULL_FACE_BACK;
1701
1702 return so;
1703 }
1704
1705 static void
1706 panfrost_bind_rasterizer_state(
1707 struct pipe_context *pctx,
1708 void *hwcso)
1709 {
1710 struct panfrost_context *ctx = pan_context(pctx);
1711
1712 /* TODO: Why can't rasterizer be NULL ever? Other drivers are fine.. */
1713 if (!hwcso)
1714 return;
1715
1716 ctx->rasterizer = hwcso;
1717 ctx->dirty |= PAN_DIRTY_RASTERIZER;
1718 }
1719
1720 static void *
1721 panfrost_create_vertex_elements_state(
1722 struct pipe_context *pctx,
1723 unsigned num_elements,
1724 const struct pipe_vertex_element *elements)
1725 {
1726 struct panfrost_vertex_state *so = CALLOC_STRUCT(panfrost_vertex_state);
1727
1728 so->num_elements = num_elements;
1729 memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
1730
1731 /* XXX: What the cornball? This is totally, 100%, unapologetically
1732 * nonsense. And yet it somehow fixes a regression in -bshadow
1733 * (previously, we allocated the descriptor here... a newer commit
1734 * removed that allocation, and then memory corruption led to
1735 * shader_meta getting overwritten in bad ways and then the whole test
1736 * case falling apart . TODO: LOOK INTO PLEASE XXX XXX BAD XXX XXX XXX
1737 */
1738 panfrost_allocate_chunk(pan_context(pctx), 0, HEAP_DESCRIPTOR);
1739
1740 for (int i = 0; i < num_elements; ++i) {
1741 so->hw[i].index = elements[i].vertex_buffer_index;
1742
1743 enum pipe_format fmt = elements[i].src_format;
1744 const struct util_format_description *desc = util_format_description(fmt);
1745 so->hw[i].unknown1 = 0x2;
1746 so->hw[i].swizzle = panfrost_get_default_swizzle(desc->nr_channels);
1747
1748 so->hw[i].format = panfrost_find_format(desc);
1749
1750 /* The field itself should probably be shifted over */
1751 so->hw[i].src_offset = elements[i].src_offset;
1752 }
1753
1754 return so;
1755 }
1756
1757 static void
1758 panfrost_bind_vertex_elements_state(
1759 struct pipe_context *pctx,
1760 void *hwcso)
1761 {
1762 struct panfrost_context *ctx = pan_context(pctx);
1763
1764 ctx->vertex = hwcso;
1765 ctx->dirty |= PAN_DIRTY_VERTEX;
1766 }
1767
1768 static void *
1769 panfrost_create_shader_state(
1770 struct pipe_context *pctx,
1771 const struct pipe_shader_state *cso)
1772 {
1773 struct panfrost_shader_variants *so = CALLOC_STRUCT(panfrost_shader_variants);
1774 so->base = *cso;
1775
1776 /* Token deep copy to prevent memory corruption */
1777
1778 if (cso->type == PIPE_SHADER_IR_TGSI)
1779 so->base.tokens = tgsi_dup_tokens(so->base.tokens);
1780
1781 return so;
1782 }
1783
1784 static void
1785 panfrost_delete_shader_state(
1786 struct pipe_context *pctx,
1787 void *so)
1788 {
1789 struct panfrost_shader_variants *cso = (struct panfrost_shader_variants *) so;
1790
1791 if (cso->base.type == PIPE_SHADER_IR_TGSI) {
1792 DBG("Deleting TGSI shader leaks duplicated tokens\n");
1793 }
1794
1795 free(so);
1796 }
1797
1798 static void *
1799 panfrost_create_sampler_state(
1800 struct pipe_context *pctx,
1801 const struct pipe_sampler_state *cso)
1802 {
1803 struct panfrost_sampler_state *so = CALLOC_STRUCT(panfrost_sampler_state);
1804 so->base = *cso;
1805
1806 /* sampler_state corresponds to mali_sampler_descriptor, which we can generate entirely here */
1807
1808 struct mali_sampler_descriptor sampler_descriptor = {
1809 .filter_mode = MALI_TEX_MIN(translate_tex_filter(cso->min_img_filter))
1810 | MALI_TEX_MAG(translate_tex_filter(cso->mag_img_filter))
1811 | translate_mip_filter(cso->min_mip_filter)
1812 | 0x20,
1813
1814 .wrap_s = translate_tex_wrap(cso->wrap_s),
1815 .wrap_t = translate_tex_wrap(cso->wrap_t),
1816 .wrap_r = translate_tex_wrap(cso->wrap_r),
1817 .compare_func = panfrost_translate_alt_compare_func(cso->compare_func),
1818 .border_color = {
1819 cso->border_color.f[0],
1820 cso->border_color.f[1],
1821 cso->border_color.f[2],
1822 cso->border_color.f[3]
1823 },
1824 .min_lod = FIXED_16(cso->min_lod),
1825 .max_lod = FIXED_16(cso->max_lod),
1826 .unknown2 = 1,
1827 };
1828
1829 so->hw = sampler_descriptor;
1830
1831 return so;
1832 }
1833
1834 static void
1835 panfrost_bind_sampler_states(
1836 struct pipe_context *pctx,
1837 enum pipe_shader_type shader,
1838 unsigned start_slot, unsigned num_sampler,
1839 void **sampler)
1840 {
1841 assert(start_slot == 0);
1842
1843 struct panfrost_context *ctx = pan_context(pctx);
1844
1845 /* XXX: Should upload, not just copy? */
1846 ctx->sampler_count[shader] = num_sampler;
1847 memcpy(ctx->samplers[shader], sampler, num_sampler * sizeof (void *));
1848
1849 ctx->dirty |= PAN_DIRTY_SAMPLERS;
1850 }
1851
1852 static bool
1853 panfrost_variant_matches(
1854 struct panfrost_context *ctx,
1855 struct panfrost_shader_state *variant,
1856 enum pipe_shader_type type)
1857 {
1858 struct pipe_alpha_state *alpha = &ctx->depth_stencil->alpha;
1859
1860 bool is_fragment = (type == PIPE_SHADER_FRAGMENT);
1861
1862 if (is_fragment && (alpha->enabled || variant->alpha_state.enabled)) {
1863 /* Make sure enable state is at least the same */
1864 if (alpha->enabled != variant->alpha_state.enabled) {
1865 return false;
1866 }
1867
1868 /* Check that the contents of the test are the same */
1869 bool same_func = alpha->func == variant->alpha_state.func;
1870 bool same_ref = alpha->ref_value == variant->alpha_state.ref_value;
1871
1872 if (!(same_func && same_ref)) {
1873 return false;
1874 }
1875 }
1876 /* Otherwise, we're good to go */
1877 return true;
1878 }
1879
1880 static void
1881 panfrost_bind_shader_state(
1882 struct pipe_context *pctx,
1883 void *hwcso,
1884 enum pipe_shader_type type)
1885 {
1886 struct panfrost_context *ctx = pan_context(pctx);
1887
1888 if (type == PIPE_SHADER_FRAGMENT) {
1889 ctx->fs = hwcso;
1890 ctx->dirty |= PAN_DIRTY_FS;
1891 } else {
1892 assert(type == PIPE_SHADER_VERTEX);
1893 ctx->vs = hwcso;
1894 ctx->dirty |= PAN_DIRTY_VS;
1895 }
1896
1897 if (!hwcso) return;
1898
1899 /* Match the appropriate variant */
1900
1901 signed variant = -1;
1902 struct panfrost_shader_variants *variants = (struct panfrost_shader_variants *) hwcso;
1903
1904 for (unsigned i = 0; i < variants->variant_count; ++i) {
1905 if (panfrost_variant_matches(ctx, &variants->variants[i], type)) {
1906 variant = i;
1907 break;
1908 }
1909 }
1910
1911 if (variant == -1) {
1912 /* No variant matched, so create a new one */
1913 variant = variants->variant_count++;
1914 assert(variants->variant_count < MAX_SHADER_VARIANTS);
1915
1916 variants->variants[variant].base = hwcso;
1917
1918 if (type == PIPE_SHADER_FRAGMENT)
1919 variants->variants[variant].alpha_state = ctx->depth_stencil->alpha;
1920
1921 /* Allocate the mapped descriptor ahead-of-time. */
1922 struct panfrost_context *ctx = pan_context(pctx);
1923 struct panfrost_transfer transfer = panfrost_allocate_chunk(ctx, sizeof(struct mali_shader_meta), HEAP_DESCRIPTOR);
1924
1925 variants->variants[variant].tripipe = (struct mali_shader_meta *) transfer.cpu;
1926 variants->variants[variant].tripipe_gpu = transfer.gpu;
1927
1928 }
1929
1930 /* Select this variant */
1931 variants->active_variant = variant;
1932
1933 struct panfrost_shader_state *shader_state = &variants->variants[variant];
1934 assert(panfrost_variant_matches(ctx, shader_state, type));
1935
1936 /* We finally have a variant, so compile it */
1937
1938 if (!shader_state->compiled) {
1939 panfrost_shader_compile(ctx, shader_state->tripipe, NULL,
1940 panfrost_job_type_for_pipe(type), shader_state);
1941
1942 shader_state->compiled = true;
1943 }
1944 }
1945
1946 static void
1947 panfrost_bind_vs_state(struct pipe_context *pctx, void *hwcso)
1948 {
1949 panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_VERTEX);
1950 }
1951
1952 static void
1953 panfrost_bind_fs_state(struct pipe_context *pctx, void *hwcso)
1954 {
1955 panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_FRAGMENT);
1956 }
1957
1958 static void
1959 panfrost_set_vertex_buffers(
1960 struct pipe_context *pctx,
1961 unsigned start_slot,
1962 unsigned num_buffers,
1963 const struct pipe_vertex_buffer *buffers)
1964 {
1965 struct panfrost_context *ctx = pan_context(pctx);
1966
1967 util_set_vertex_buffers_mask(ctx->vertex_buffers, &ctx->vb_mask, buffers, start_slot, num_buffers);
1968 }
1969
1970 static void
1971 panfrost_set_constant_buffer(
1972 struct pipe_context *pctx,
1973 enum pipe_shader_type shader, uint index,
1974 const struct pipe_constant_buffer *buf)
1975 {
1976 struct panfrost_context *ctx = pan_context(pctx);
1977 struct panfrost_constant_buffer *pbuf = &ctx->constant_buffer[shader];
1978
1979 size_t sz = buf ? buf->buffer_size : 0;
1980
1981 /* Free previous buffer */
1982
1983 pbuf->dirty = true;
1984 pbuf->size = sz;
1985
1986 if (pbuf->buffer) {
1987 free(pbuf->buffer);
1988 pbuf->buffer = NULL;
1989 }
1990
1991 /* If unbinding, we're done */
1992
1993 if (!buf)
1994 return;
1995
1996 /* Multiple constant buffers not yet supported */
1997 assert(index == 0);
1998
1999 const uint8_t *cpu;
2000
2001 struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer);
2002
2003 if (rsrc) {
2004 cpu = rsrc->bo->cpu;
2005 } else if (buf->user_buffer) {
2006 cpu = buf->user_buffer;
2007 } else {
2008 DBG("No constant buffer?\n");
2009 return;
2010 }
2011
2012 /* Copy the constant buffer into the driver context for later upload */
2013
2014 pbuf->buffer = malloc(sz);
2015 memcpy(pbuf->buffer, cpu + buf->buffer_offset, sz);
2016 }
2017
2018 static void
2019 panfrost_set_stencil_ref(
2020 struct pipe_context *pctx,
2021 const struct pipe_stencil_ref *ref)
2022 {
2023 struct panfrost_context *ctx = pan_context(pctx);
2024 ctx->stencil_ref = *ref;
2025
2026 /* Shader core dirty */
2027 ctx->dirty |= PAN_DIRTY_FS;
2028 }
2029
2030 static struct pipe_sampler_view *
2031 panfrost_create_sampler_view(
2032 struct pipe_context *pctx,
2033 struct pipe_resource *texture,
2034 const struct pipe_sampler_view *template)
2035 {
2036 struct panfrost_sampler_view *so = CALLOC_STRUCT(panfrost_sampler_view);
2037 int bytes_per_pixel = util_format_get_blocksize(texture->format);
2038
2039 pipe_reference(NULL, &texture->reference);
2040
2041 struct panfrost_resource *prsrc = (struct panfrost_resource *) texture;
2042 assert(prsrc->bo);
2043
2044 so->base = *template;
2045 so->base.texture = texture;
2046 so->base.reference.count = 1;
2047 so->base.context = pctx;
2048
2049 /* sampler_views correspond to texture descriptors, minus the texture
2050 * (data) itself. So, we serialise the descriptor here and cache it for
2051 * later. */
2052
2053 /* Make sure it's something with which we're familiar */
2054 assert(bytes_per_pixel >= 1 && bytes_per_pixel <= 4);
2055
2056 /* TODO: Detect from format better */
2057 const struct util_format_description *desc = util_format_description(prsrc->base.format);
2058
2059 unsigned char user_swizzle[4] = {
2060 template->swizzle_r,
2061 template->swizzle_g,
2062 template->swizzle_b,
2063 template->swizzle_a
2064 };
2065
2066 enum mali_format format = panfrost_find_format(desc);
2067
2068 bool is_depth = desc->format == PIPE_FORMAT_Z32_UNORM;
2069
2070 unsigned usage2_layout = 0x10;
2071
2072 switch (prsrc->bo->layout) {
2073 case PAN_AFBC:
2074 usage2_layout |= 0x8 | 0x4;
2075 break;
2076 case PAN_TILED:
2077 usage2_layout |= 0x1;
2078 break;
2079 case PAN_LINEAR:
2080 usage2_layout |= is_depth ? 0x1 : 0x2;
2081 break;
2082 default:
2083 assert(0);
2084 break;
2085 }
2086
2087 /* Check if we need to set a custom stride by computing the "expected"
2088 * stride and comparing it to what the BO actually wants. Only applies
2089 * to linear textures, since tiled/compressed textures have strict
2090 * alignment requirements for their strides as it is */
2091
2092 unsigned first_level = template->u.tex.first_level;
2093 unsigned last_level = template->u.tex.last_level;
2094
2095 if (prsrc->bo->layout == PAN_LINEAR) {
2096 for (unsigned l = first_level; l <= last_level; ++l) {
2097 unsigned actual_stride = prsrc->bo->slices[l].stride;
2098 unsigned width = u_minify(texture->width0, l);
2099 unsigned comp_stride = width * bytes_per_pixel;
2100
2101 if (comp_stride != actual_stride) {
2102 usage2_layout |= MALI_TEX_MANUAL_STRIDE;
2103 break;
2104 }
2105 }
2106 }
2107
2108 /* In the hardware, array_size refers specifically to array textures,
2109 * whereas in Gallium, it also covers cubemaps */
2110
2111 unsigned array_size = texture->array_size;
2112
2113 if (texture->target == PIPE_TEXTURE_CUBE) {
2114 /* TODO: Cubemap arrays */
2115 assert(array_size == 6);
2116 }
2117
2118 struct mali_texture_descriptor texture_descriptor = {
2119 .width = MALI_POSITIVE(u_minify(texture->width0, first_level)),
2120 .height = MALI_POSITIVE(u_minify(texture->height0, first_level)),
2121 .depth = MALI_POSITIVE(u_minify(texture->depth0, first_level)),
2122 .array_size = MALI_POSITIVE(array_size),
2123
2124 /* TODO: Decode */
2125 .format = {
2126 .swizzle = panfrost_translate_swizzle_4(desc->swizzle),
2127 .format = format,
2128
2129 .usage1 = (texture->target == PIPE_TEXTURE_3D) ? MALI_TEX_3D : 0,
2130 .is_not_cubemap = texture->target != PIPE_TEXTURE_CUBE,
2131
2132 .usage2 = usage2_layout
2133 },
2134
2135 .swizzle = panfrost_translate_swizzle_4(user_swizzle)
2136 };
2137
2138 //texture_descriptor.nr_mipmap_levels = last_level - first_level;
2139
2140 so->hw = texture_descriptor;
2141
2142 return (struct pipe_sampler_view *) so;
2143 }
2144
2145 static void
2146 panfrost_set_sampler_views(
2147 struct pipe_context *pctx,
2148 enum pipe_shader_type shader,
2149 unsigned start_slot, unsigned num_views,
2150 struct pipe_sampler_view **views)
2151 {
2152 struct panfrost_context *ctx = pan_context(pctx);
2153
2154 assert(start_slot == 0);
2155
2156 unsigned new_nr = 0;
2157 for (unsigned i = 0; i < num_views; ++i) {
2158 if (views[i])
2159 new_nr = i + 1;
2160 }
2161
2162 ctx->sampler_view_count[shader] = new_nr;
2163 memcpy(ctx->sampler_views[shader], views, num_views * sizeof (void *));
2164
2165 ctx->dirty |= PAN_DIRTY_TEXTURES;
2166 }
2167
2168 static void
2169 panfrost_sampler_view_destroy(
2170 struct pipe_context *pctx,
2171 struct pipe_sampler_view *view)
2172 {
2173 pipe_resource_reference(&view->texture, NULL);
2174 free(view);
2175 }
2176
2177 static void
2178 panfrost_set_framebuffer_state(struct pipe_context *pctx,
2179 const struct pipe_framebuffer_state *fb)
2180 {
2181 struct panfrost_context *ctx = pan_context(pctx);
2182
2183 /* Flush when switching framebuffers, but not if the framebuffer
2184 * state is being restored by u_blitter
2185 */
2186
2187 bool is_scanout = panfrost_is_scanout(ctx);
2188 bool has_draws = ctx->draw_count > 0;
2189
2190 if (!ctx->blitter->running && (!is_scanout || has_draws)) {
2191 panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
2192 }
2193
2194 ctx->pipe_framebuffer.nr_cbufs = fb->nr_cbufs;
2195 ctx->pipe_framebuffer.samples = fb->samples;
2196 ctx->pipe_framebuffer.layers = fb->layers;
2197 ctx->pipe_framebuffer.width = fb->width;
2198 ctx->pipe_framebuffer.height = fb->height;
2199
2200 for (int i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
2201 struct pipe_surface *cb = i < fb->nr_cbufs ? fb->cbufs[i] : NULL;
2202
2203 /* check if changing cbuf */
2204 if (ctx->pipe_framebuffer.cbufs[i] == cb) continue;
2205
2206 if (cb && (i != 0)) {
2207 DBG("XXX: Multiple render targets not supported before t7xx!\n");
2208 assert(0);
2209 }
2210
2211 /* assign new */
2212 pipe_surface_reference(&ctx->pipe_framebuffer.cbufs[i], cb);
2213
2214 if (!cb)
2215 continue;
2216
2217 if (ctx->require_sfbd)
2218 ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx, ~0);
2219 else
2220 ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx, ~0);
2221
2222 panfrost_attach_vt_framebuffer(ctx);
2223
2224 struct panfrost_resource *tex = ((struct panfrost_resource *) ctx->pipe_framebuffer.cbufs[i]->texture);
2225 enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
2226
2227 bool can_afbc = panfrost_format_supports_afbc(format);
2228 bool is_scanout = panfrost_is_scanout(ctx);
2229
2230 if (!is_scanout && tex->bo->layout != PAN_AFBC && can_afbc)
2231 panfrost_enable_afbc(ctx, tex, false);
2232
2233 if (!is_scanout && !tex->bo->has_checksum)
2234 panfrost_enable_checksum(ctx, tex);
2235 }
2236
2237 {
2238 struct pipe_surface *zb = fb->zsbuf;
2239
2240 if (ctx->pipe_framebuffer.zsbuf != zb) {
2241 pipe_surface_reference(&ctx->pipe_framebuffer.zsbuf, zb);
2242
2243 if (zb) {
2244 if (ctx->require_sfbd)
2245 ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx, ~0);
2246 else
2247 ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx, ~0);
2248
2249 panfrost_attach_vt_framebuffer(ctx);
2250
2251 struct panfrost_resource *tex = pan_resource(zb->texture);
2252 bool can_afbc = panfrost_format_supports_afbc(zb->format);
2253 bool is_scanout = panfrost_is_scanout(ctx);
2254
2255 if (!is_scanout && tex->bo->layout != PAN_AFBC && can_afbc)
2256 panfrost_enable_afbc(ctx, tex, true);
2257 }
2258 }
2259 }
2260 }
2261
2262 static void *
2263 panfrost_create_blend_state(struct pipe_context *pipe,
2264 const struct pipe_blend_state *blend)
2265 {
2266 struct panfrost_context *ctx = pan_context(pipe);
2267 struct panfrost_blend_state *so = CALLOC_STRUCT(panfrost_blend_state);
2268 so->base = *blend;
2269
2270 /* TODO: The following features are not yet implemented */
2271 assert(!blend->logicop_enable);
2272 assert(!blend->alpha_to_coverage);
2273 assert(!blend->alpha_to_one);
2274
2275 /* Compile the blend state, first as fixed-function if we can */
2276
2277 if (panfrost_make_fixed_blend_mode(&blend->rt[0], so, blend->rt[0].colormask, &ctx->blend_color))
2278 return so;
2279
2280 /* If we can't, compile a blend shader instead */
2281
2282 panfrost_make_blend_shader(ctx, so, &ctx->blend_color);
2283
2284 return so;
2285 }
2286
2287 static void
2288 panfrost_bind_blend_state(struct pipe_context *pipe,
2289 void *cso)
2290 {
2291 struct panfrost_context *ctx = pan_context(pipe);
2292 struct pipe_blend_state *blend = (struct pipe_blend_state *) cso;
2293 struct panfrost_blend_state *pblend = (struct panfrost_blend_state *) cso;
2294 ctx->blend = pblend;
2295
2296 if (!blend)
2297 return;
2298
2299 SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_NO_DITHER, !blend->dither);
2300
2301 /* TODO: Attach color */
2302
2303 /* Shader itself is not dirty, but the shader core is */
2304 ctx->dirty |= PAN_DIRTY_FS;
2305 }
2306
2307 static void
2308 panfrost_delete_blend_state(struct pipe_context *pipe,
2309 void *blend)
2310 {
2311 struct panfrost_blend_state *so = (struct panfrost_blend_state *) blend;
2312
2313 if (so->has_blend_shader) {
2314 DBG("Deleting blend state leak blend shaders bytecode\n");
2315 }
2316
2317 free(blend);
2318 }
2319
2320 static void
2321 panfrost_set_blend_color(struct pipe_context *pipe,
2322 const struct pipe_blend_color *blend_color)
2323 {
2324 struct panfrost_context *ctx = pan_context(pipe);
2325
2326 /* If blend_color is we're unbinding, so ctx->blend_color is now undefined -> nothing to do */
2327
2328 if (blend_color) {
2329 ctx->blend_color = *blend_color;
2330
2331 /* The blend mode depends on the blend constant color, due to the
2332 * fixed/programmable split. So, we're forced to regenerate the blend
2333 * equation */
2334
2335 /* TODO: Attach color */
2336 }
2337 }
2338
2339 static void *
2340 panfrost_create_depth_stencil_state(struct pipe_context *pipe,
2341 const struct pipe_depth_stencil_alpha_state *depth_stencil)
2342 {
2343 return mem_dup(depth_stencil, sizeof(*depth_stencil));
2344 }
2345
2346 static void
2347 panfrost_bind_depth_stencil_state(struct pipe_context *pipe,
2348 void *cso)
2349 {
2350 struct panfrost_context *ctx = pan_context(pipe);
2351 struct pipe_depth_stencil_alpha_state *depth_stencil = cso;
2352 ctx->depth_stencil = depth_stencil;
2353
2354 if (!depth_stencil)
2355 return;
2356
2357 /* Alpha does not exist in the hardware (it's not in ES3), so it's
2358 * emulated in the fragment shader */
2359
2360 if (depth_stencil->alpha.enabled) {
2361 /* We need to trigger a new shader (maybe) */
2362 ctx->base.bind_fs_state(&ctx->base, ctx->fs);
2363 }
2364
2365 /* Stencil state */
2366 SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_STENCIL_TEST, depth_stencil->stencil[0].enabled); /* XXX: which one? */
2367
2368 panfrost_make_stencil_state(&depth_stencil->stencil[0], &ctx->fragment_shader_core.stencil_front);
2369 ctx->fragment_shader_core.stencil_mask_front = depth_stencil->stencil[0].writemask;
2370
2371 panfrost_make_stencil_state(&depth_stencil->stencil[1], &ctx->fragment_shader_core.stencil_back);
2372 ctx->fragment_shader_core.stencil_mask_back = depth_stencil->stencil[1].writemask;
2373
2374 /* Depth state (TODO: Refactor) */
2375 SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_DEPTH_TEST, depth_stencil->depth.enabled);
2376
2377 int func = depth_stencil->depth.enabled ? depth_stencil->depth.func : PIPE_FUNC_ALWAYS;
2378
2379 ctx->fragment_shader_core.unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
2380 ctx->fragment_shader_core.unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(func));
2381
2382 /* Bounds test not implemented */
2383 assert(!depth_stencil->depth.bounds_test);
2384
2385 ctx->dirty |= PAN_DIRTY_FS;
2386 }
2387
2388 static void
2389 panfrost_delete_depth_stencil_state(struct pipe_context *pipe, void *depth)
2390 {
2391 free( depth );
2392 }
2393
2394 static void
2395 panfrost_set_sample_mask(struct pipe_context *pipe,
2396 unsigned sample_mask)
2397 {
2398 }
2399
2400 static void
2401 panfrost_set_clip_state(struct pipe_context *pipe,
2402 const struct pipe_clip_state *clip)
2403 {
2404 //struct panfrost_context *panfrost = pan_context(pipe);
2405 }
2406
2407 static void
2408 panfrost_set_viewport_states(struct pipe_context *pipe,
2409 unsigned start_slot,
2410 unsigned num_viewports,
2411 const struct pipe_viewport_state *viewports)
2412 {
2413 struct panfrost_context *ctx = pan_context(pipe);
2414
2415 assert(start_slot == 0);
2416 assert(num_viewports == 1);
2417
2418 ctx->pipe_viewport = *viewports;
2419 }
2420
2421 static void
2422 panfrost_set_scissor_states(struct pipe_context *pipe,
2423 unsigned start_slot,
2424 unsigned num_scissors,
2425 const struct pipe_scissor_state *scissors)
2426 {
2427 struct panfrost_context *ctx = pan_context(pipe);
2428
2429 assert(start_slot == 0);
2430 assert(num_scissors == 1);
2431
2432 ctx->scissor = *scissors;
2433 }
2434
2435 static void
2436 panfrost_set_polygon_stipple(struct pipe_context *pipe,
2437 const struct pipe_poly_stipple *stipple)
2438 {
2439 //struct panfrost_context *panfrost = pan_context(pipe);
2440 }
2441
2442 static void
2443 panfrost_set_active_query_state(struct pipe_context *pipe,
2444 boolean enable)
2445 {
2446 //struct panfrost_context *panfrost = pan_context(pipe);
2447 }
2448
2449 static void
2450 panfrost_destroy(struct pipe_context *pipe)
2451 {
2452 struct panfrost_context *panfrost = pan_context(pipe);
2453 struct panfrost_screen *screen = pan_screen(pipe->screen);
2454
2455 if (panfrost->blitter)
2456 util_blitter_destroy(panfrost->blitter);
2457
2458 screen->driver->free_slab(screen, &panfrost->scratchpad);
2459 screen->driver->free_slab(screen, &panfrost->varying_mem);
2460 screen->driver->free_slab(screen, &panfrost->shaders);
2461 screen->driver->free_slab(screen, &panfrost->tiler_heap);
2462 screen->driver->free_slab(screen, &panfrost->tiler_polygon_list);
2463 }
2464
2465 static struct pipe_query *
2466 panfrost_create_query(struct pipe_context *pipe,
2467 unsigned type,
2468 unsigned index)
2469 {
2470 struct panfrost_query *q = CALLOC_STRUCT(panfrost_query);
2471
2472 q->type = type;
2473 q->index = index;
2474
2475 return (struct pipe_query *) q;
2476 }
2477
2478 static void
2479 panfrost_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
2480 {
2481 FREE(q);
2482 }
2483
2484 static boolean
2485 panfrost_begin_query(struct pipe_context *pipe, struct pipe_query *q)
2486 {
2487 struct panfrost_context *ctx = pan_context(pipe);
2488 struct panfrost_query *query = (struct panfrost_query *) q;
2489
2490 switch (query->type) {
2491 case PIPE_QUERY_OCCLUSION_COUNTER:
2492 case PIPE_QUERY_OCCLUSION_PREDICATE:
2493 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
2494 {
2495 /* Allocate a word for the query results to be stored */
2496 query->transfer = panfrost_allocate_chunk(ctx, sizeof(unsigned), HEAP_DESCRIPTOR);
2497
2498 ctx->occlusion_query = query;
2499
2500 break;
2501 }
2502
2503 default:
2504 DBG("Skipping query %d\n", query->type);
2505 break;
2506 }
2507
2508 return true;
2509 }
2510
2511 static bool
2512 panfrost_end_query(struct pipe_context *pipe, struct pipe_query *q)
2513 {
2514 struct panfrost_context *ctx = pan_context(pipe);
2515 ctx->occlusion_query = NULL;
2516 return true;
2517 }
2518
2519 static boolean
2520 panfrost_get_query_result(struct pipe_context *pipe,
2521 struct pipe_query *q,
2522 boolean wait,
2523 union pipe_query_result *vresult)
2524 {
2525 /* STUB */
2526 struct panfrost_query *query = (struct panfrost_query *) q;
2527
2528 /* We need to flush out the jobs to actually run the counter, TODO
2529 * check wait, TODO wallpaper after if needed */
2530
2531 panfrost_flush(pipe, NULL, PIPE_FLUSH_END_OF_FRAME);
2532
2533 switch (query->type) {
2534 case PIPE_QUERY_OCCLUSION_COUNTER:
2535 case PIPE_QUERY_OCCLUSION_PREDICATE:
2536 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: {
2537 /* Read back the query results */
2538 unsigned *result = (unsigned *) query->transfer.cpu;
2539 unsigned passed = *result;
2540
2541 if (query->type == PIPE_QUERY_OCCLUSION_COUNTER) {
2542 vresult->u64 = passed;
2543 } else {
2544 vresult->b = !!passed;
2545 }
2546
2547 break;
2548 }
2549 default:
2550 DBG("Skipped query get %d\n", query->type);
2551 break;
2552 }
2553
2554 return true;
2555 }
2556
2557 static struct pipe_stream_output_target *
2558 panfrost_create_stream_output_target(struct pipe_context *pctx,
2559 struct pipe_resource *prsc,
2560 unsigned buffer_offset,
2561 unsigned buffer_size)
2562 {
2563 struct pipe_stream_output_target *target;
2564
2565 target = CALLOC_STRUCT(pipe_stream_output_target);
2566
2567 if (!target)
2568 return NULL;
2569
2570 pipe_reference_init(&target->reference, 1);
2571 pipe_resource_reference(&target->buffer, prsc);
2572
2573 target->context = pctx;
2574 target->buffer_offset = buffer_offset;
2575 target->buffer_size = buffer_size;
2576
2577 return target;
2578 }
2579
2580 static void
2581 panfrost_stream_output_target_destroy(struct pipe_context *pctx,
2582 struct pipe_stream_output_target *target)
2583 {
2584 pipe_resource_reference(&target->buffer, NULL);
2585 free(target);
2586 }
2587
2588 static void
2589 panfrost_set_stream_output_targets(struct pipe_context *pctx,
2590 unsigned num_targets,
2591 struct pipe_stream_output_target **targets,
2592 const unsigned *offsets)
2593 {
2594 /* STUB */
2595 }
2596
2597 static void
2598 panfrost_setup_hardware(struct panfrost_context *ctx)
2599 {
2600 struct pipe_context *gallium = (struct pipe_context *) ctx;
2601 struct panfrost_screen *screen = pan_screen(gallium->screen);
2602
2603 for (int i = 0; i < ARRAY_SIZE(ctx->transient_pools); ++i) {
2604 /* Allocate the beginning of the transient pool */
2605 int entry_size = (1 << 22); /* 4MB */
2606
2607 ctx->transient_pools[i].entry_size = entry_size;
2608 ctx->transient_pools[i].entry_count = 1;
2609
2610 ctx->transient_pools[i].entries[0] = (struct panfrost_memory_entry *) pb_slab_alloc(&screen->slabs, entry_size, HEAP_TRANSIENT);
2611 }
2612
2613 screen->driver->allocate_slab(screen, &ctx->scratchpad, 64, false, 0, 0, 0);
2614 screen->driver->allocate_slab(screen, &ctx->varying_mem, 16384, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_COHERENT_LOCAL, 0, 0);
2615 screen->driver->allocate_slab(screen, &ctx->shaders, 4096, true, PAN_ALLOCATE_EXECUTE, 0, 0);
2616 screen->driver->allocate_slab(screen, &ctx->tiler_heap, 32768, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_GROWABLE, 1, 128);
2617 screen->driver->allocate_slab(screen, &ctx->tiler_polygon_list, 128*128, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_GROWABLE, 1, 128);
2618 screen->driver->allocate_slab(screen, &ctx->tiler_dummy, 1, false, PAN_ALLOCATE_INVISIBLE, 0, 0);
2619
2620 }
2621
2622 /* New context creation, which also does hardware initialisation since I don't
2623 * know the better way to structure this :smirk: */
2624
2625 struct pipe_context *
2626 panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
2627 {
2628 struct panfrost_context *ctx = CALLOC_STRUCT(panfrost_context);
2629 struct panfrost_screen *pscreen = pan_screen(screen);
2630 memset(ctx, 0, sizeof(*ctx));
2631 struct pipe_context *gallium = (struct pipe_context *) ctx;
2632 unsigned gpu_id;
2633
2634 gpu_id = pscreen->driver->query_gpu_version(pscreen);
2635
2636 ctx->is_t6xx = gpu_id <= 0x0750; /* For now, this flag means T760 or less */
2637 ctx->require_sfbd = gpu_id < 0x0750; /* T760 is the first to support MFBD */
2638
2639 gallium->screen = screen;
2640
2641 gallium->destroy = panfrost_destroy;
2642
2643 gallium->set_framebuffer_state = panfrost_set_framebuffer_state;
2644
2645 gallium->flush = panfrost_flush;
2646 gallium->clear = panfrost_clear;
2647 gallium->draw_vbo = panfrost_draw_vbo;
2648
2649 gallium->set_vertex_buffers = panfrost_set_vertex_buffers;
2650 gallium->set_constant_buffer = panfrost_set_constant_buffer;
2651
2652 gallium->set_stencil_ref = panfrost_set_stencil_ref;
2653
2654 gallium->create_sampler_view = panfrost_create_sampler_view;
2655 gallium->set_sampler_views = panfrost_set_sampler_views;
2656 gallium->sampler_view_destroy = panfrost_sampler_view_destroy;
2657
2658 gallium->create_rasterizer_state = panfrost_create_rasterizer_state;
2659 gallium->bind_rasterizer_state = panfrost_bind_rasterizer_state;
2660 gallium->delete_rasterizer_state = panfrost_generic_cso_delete;
2661
2662 gallium->create_vertex_elements_state = panfrost_create_vertex_elements_state;
2663 gallium->bind_vertex_elements_state = panfrost_bind_vertex_elements_state;
2664 gallium->delete_vertex_elements_state = panfrost_generic_cso_delete;
2665
2666 gallium->create_fs_state = panfrost_create_shader_state;
2667 gallium->delete_fs_state = panfrost_delete_shader_state;
2668 gallium->bind_fs_state = panfrost_bind_fs_state;
2669
2670 gallium->create_vs_state = panfrost_create_shader_state;
2671 gallium->delete_vs_state = panfrost_delete_shader_state;
2672 gallium->bind_vs_state = panfrost_bind_vs_state;
2673
2674 gallium->create_sampler_state = panfrost_create_sampler_state;
2675 gallium->delete_sampler_state = panfrost_generic_cso_delete;
2676 gallium->bind_sampler_states = panfrost_bind_sampler_states;
2677
2678 gallium->create_blend_state = panfrost_create_blend_state;
2679 gallium->bind_blend_state = panfrost_bind_blend_state;
2680 gallium->delete_blend_state = panfrost_delete_blend_state;
2681
2682 gallium->set_blend_color = panfrost_set_blend_color;
2683
2684 gallium->create_depth_stencil_alpha_state = panfrost_create_depth_stencil_state;
2685 gallium->bind_depth_stencil_alpha_state = panfrost_bind_depth_stencil_state;
2686 gallium->delete_depth_stencil_alpha_state = panfrost_delete_depth_stencil_state;
2687
2688 gallium->set_sample_mask = panfrost_set_sample_mask;
2689
2690 gallium->set_clip_state = panfrost_set_clip_state;
2691 gallium->set_viewport_states = panfrost_set_viewport_states;
2692 gallium->set_scissor_states = panfrost_set_scissor_states;
2693 gallium->set_polygon_stipple = panfrost_set_polygon_stipple;
2694 gallium->set_active_query_state = panfrost_set_active_query_state;
2695
2696 gallium->create_query = panfrost_create_query;
2697 gallium->destroy_query = panfrost_destroy_query;
2698 gallium->begin_query = panfrost_begin_query;
2699 gallium->end_query = panfrost_end_query;
2700 gallium->get_query_result = panfrost_get_query_result;
2701
2702 gallium->create_stream_output_target = panfrost_create_stream_output_target;
2703 gallium->stream_output_target_destroy = panfrost_stream_output_target_destroy;
2704 gallium->set_stream_output_targets = panfrost_set_stream_output_targets;
2705
2706 panfrost_resource_context_init(gallium);
2707
2708 pscreen->driver->init_context(ctx);
2709
2710 panfrost_setup_hardware(ctx);
2711
2712 /* XXX: leaks */
2713 gallium->stream_uploader = u_upload_create_default(gallium);
2714 gallium->const_uploader = gallium->stream_uploader;
2715 assert(gallium->stream_uploader);
2716
2717 /* Midgard supports ES modes, plus QUADS/QUAD_STRIPS/POLYGON */
2718 ctx->draw_modes = (1 << (PIPE_PRIM_POLYGON + 1)) - 1;
2719
2720 ctx->primconvert = util_primconvert_create(gallium, ctx->draw_modes);
2721
2722 ctx->blitter = util_blitter_create(gallium);
2723 assert(ctx->blitter);
2724
2725 /* Prepare for render! */
2726
2727 panfrost_job_init(ctx);
2728 panfrost_emit_vertex_payload(ctx);
2729 panfrost_emit_tiler_payload(ctx);
2730 panfrost_invalidate_frame(ctx);
2731 panfrost_default_shader_backend(ctx);
2732 panfrost_generate_space_filler_indices();
2733
2734 return gallium;
2735 }