panfrost: Add a panfrost_sampler_desc_init() helper
[mesa.git] / src / gallium / drivers / panfrost / pan_context.c
1 /*
2 * © Copyright 2018 Alyssa Rosenzweig
3 * Copyright © 2014-2017 Broadcom
4 * Copyright (C) 2017 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 */
26
27 #include <sys/poll.h>
28 #include <errno.h>
29
30 #include "pan_bo.h"
31 #include "pan_context.h"
32 #include "pan_minmax_cache.h"
33 #include "panfrost-quirks.h"
34
35 #include "util/macros.h"
36 #include "util/format/u_format.h"
37 #include "util/u_inlines.h"
38 #include "util/u_upload_mgr.h"
39 #include "util/u_memory.h"
40 #include "util/u_vbuf.h"
41 #include "util/half_float.h"
42 #include "util/u_helpers.h"
43 #include "util/format/u_format.h"
44 #include "util/u_prim.h"
45 #include "util/u_prim_restart.h"
46 #include "indices/u_primconvert.h"
47 #include "tgsi/tgsi_parse.h"
48 #include "tgsi/tgsi_from_mesa.h"
49 #include "util/u_math.h"
50
51 #include "pan_screen.h"
52 #include "pan_blending.h"
53 #include "pan_blend_shaders.h"
54 #include "pan_cmdstream.h"
55 #include "pan_util.h"
56 #include "pandecode/decode.h"
57
58 struct midgard_tiler_descriptor
59 panfrost_emit_midg_tiler(struct panfrost_batch *batch, unsigned vertex_count)
60 {
61 struct panfrost_screen *screen = pan_screen(batch->ctx->base.screen);
62 bool hierarchy = !(screen->quirks & MIDGARD_NO_HIER_TILING);
63 struct midgard_tiler_descriptor t = {0};
64 unsigned height = batch->key.height;
65 unsigned width = batch->key.width;
66
67 t.hierarchy_mask =
68 panfrost_choose_hierarchy_mask(width, height, vertex_count, hierarchy);
69
70 /* Compute the polygon header size and use that to offset the body */
71
72 unsigned header_size = panfrost_tiler_header_size(
73 width, height, t.hierarchy_mask, hierarchy);
74
75 t.polygon_list_size = panfrost_tiler_full_size(
76 width, height, t.hierarchy_mask, hierarchy);
77
78 /* Sanity check */
79
80 if (vertex_count) {
81 struct panfrost_bo *tiler_heap;
82
83 tiler_heap = panfrost_batch_get_tiler_heap(batch);
84 t.polygon_list = panfrost_batch_get_polygon_list(batch,
85 header_size +
86 t.polygon_list_size);
87
88
89 /* Allow the entire tiler heap */
90 t.heap_start = tiler_heap->gpu;
91 t.heap_end = tiler_heap->gpu + tiler_heap->size;
92 } else {
93 struct panfrost_bo *tiler_dummy;
94
95 tiler_dummy = panfrost_batch_get_tiler_dummy(batch);
96 header_size = MALI_TILER_MINIMUM_HEADER_SIZE;
97
98 /* The tiler is disabled, so don't allow the tiler heap */
99 t.heap_start = tiler_dummy->gpu;
100 t.heap_end = t.heap_start;
101
102 /* Use a dummy polygon list */
103 t.polygon_list = tiler_dummy->gpu;
104
105 /* Disable the tiler */
106 if (hierarchy)
107 t.hierarchy_mask |= MALI_TILER_DISABLED;
108 else {
109 t.hierarchy_mask = MALI_TILER_USER;
110 t.polygon_list_size = MALI_TILER_MINIMUM_HEADER_SIZE + 4;
111
112 /* We don't have a WRITE_VALUE job, so write the polygon list manually */
113 uint32_t *polygon_list_body = (uint32_t *) (tiler_dummy->cpu + header_size);
114 polygon_list_body[0] = 0xa0000000; /* TODO: Just that? */
115 }
116 }
117
118 t.polygon_list_body =
119 t.polygon_list + header_size;
120
121 return t;
122 }
123
124 static void
125 panfrost_clear(
126 struct pipe_context *pipe,
127 unsigned buffers,
128 const union pipe_color_union *color,
129 double depth, unsigned stencil)
130 {
131 struct panfrost_context *ctx = pan_context(pipe);
132
133 /* TODO: panfrost_get_fresh_batch_for_fbo() instantiates a new batch if
134 * the existing batch targeting this FBO has draws. We could probably
135 * avoid that by replacing plain clears by quad-draws with a specific
136 * color/depth/stencil value, thus avoiding the generation of extra
137 * fragment jobs.
138 */
139 struct panfrost_batch *batch = panfrost_get_fresh_batch_for_fbo(ctx);
140
141 panfrost_batch_add_fbo_bos(batch);
142 panfrost_batch_clear(batch, buffers, color, depth, stencil);
143 }
144
145 /* Reset per-frame context, called on context initialisation as well as after
146 * flushing a frame */
147
148 void
149 panfrost_invalidate_frame(struct panfrost_context *ctx)
150 {
151 for (unsigned i = 0; i < PIPE_SHADER_TYPES; ++i)
152 ctx->payloads[i].postfix.shared_memory = 0;
153
154 /* TODO: When does this need to be handled? */
155 ctx->active_queries = true;
156 }
157
158 /* In practice, every field of these payloads should be configurable
159 * arbitrarily, which means these functions are basically catch-all's for
160 * as-of-yet unwavering unknowns */
161
162 static void
163 panfrost_emit_vertex_payload(struct panfrost_context *ctx)
164 {
165 /* 0x2 bit clear on 32-bit T6XX */
166
167 struct midgard_payload_vertex_tiler payload = {
168 .gl_enables = 0x4 | 0x2,
169 };
170
171 /* Vertex and compute are closely coupled, so share a payload */
172
173 memcpy(&ctx->payloads[PIPE_SHADER_VERTEX], &payload, sizeof(payload));
174 memcpy(&ctx->payloads[PIPE_SHADER_COMPUTE], &payload, sizeof(payload));
175 }
176
177 bool
178 panfrost_writes_point_size(struct panfrost_context *ctx)
179 {
180 assert(ctx->shader[PIPE_SHADER_VERTEX]);
181 struct panfrost_shader_state *vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
182
183 return vs->writes_point_size && ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.draw_mode == MALI_POINTS;
184 }
185
186 /* Stage the attribute descriptors so we can adjust src_offset
187 * to let BOs align nicely */
188
189 static void
190 panfrost_stage_attributes(struct panfrost_context *ctx)
191 {
192 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
193 struct panfrost_vertex_state *so = ctx->vertex;
194
195 size_t sz = sizeof(struct mali_attr_meta) * PAN_MAX_ATTRIBUTE;
196 struct panfrost_transfer transfer = panfrost_allocate_transient(batch, sz);
197 struct mali_attr_meta *target = (struct mali_attr_meta *) transfer.cpu;
198
199 /* Copy as-is for the first pass */
200 memcpy(target, so->hw, sz);
201
202 /* Fixup offsets for the second pass. Recall that the hardware
203 * calculates attribute addresses as:
204 *
205 * addr = base + (stride * vtx) + src_offset;
206 *
207 * However, on Mali, base must be aligned to 64-bytes, so we
208 * instead let:
209 *
210 * base' = base & ~63 = base - (base & 63)
211 *
212 * To compensate when using base' (see emit_vertex_data), we have
213 * to adjust src_offset by the masked off piece:
214 *
215 * addr' = base' + (stride * vtx) + (src_offset + (base & 63))
216 * = base - (base & 63) + (stride * vtx) + src_offset + (base & 63)
217 * = base + (stride * vtx) + src_offset
218 * = addr;
219 *
220 * QED.
221 */
222
223 unsigned start = ctx->payloads[PIPE_SHADER_VERTEX].offset_start;
224
225 for (unsigned i = 0; i < so->num_elements; ++i) {
226 unsigned vbi = so->pipe[i].vertex_buffer_index;
227 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
228 struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer.resource);
229 mali_ptr addr = rsrc->bo->gpu + buf->buffer_offset;
230
231 /* Adjust by the masked off bits of the offset. Make sure we
232 * read src_offset from so->hw (which is not GPU visible)
233 * rather than target (which is) due to caching effects */
234
235 unsigned src_offset = so->hw[i].src_offset;
236 src_offset += (addr & 63);
237
238 /* Also, somewhat obscurely per-instance data needs to be
239 * offset in response to a delayed start in an indexed draw */
240
241 if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
242 src_offset -= buf->stride * start;
243
244 target[i].src_offset = src_offset;
245 }
246
247 /* Let's also include vertex builtins */
248
249 struct mali_attr_meta builtin = {
250 .format = MALI_R32UI,
251 .swizzle = panfrost_get_default_swizzle(1)
252 };
253
254 /* See mali_attr_meta specification for the magic number */
255
256 builtin.index = so->vertexid_index;
257 memcpy(&target[PAN_VERTEX_ID], &builtin, 4);
258
259 builtin.index = so->vertexid_index + 1;
260 memcpy(&target[PAN_INSTANCE_ID], &builtin, 4);
261
262 ctx->payloads[PIPE_SHADER_VERTEX].postfix.attribute_meta = transfer.gpu;
263 }
264
265 static void
266 panfrost_upload_sampler_descriptors(struct panfrost_context *ctx)
267 {
268 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
269 size_t desc_size = sizeof(struct mali_sampler_descriptor);
270
271 for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) {
272 mali_ptr upload = 0;
273
274 if (ctx->sampler_count[t]) {
275 size_t transfer_size = desc_size * ctx->sampler_count[t];
276
277 struct panfrost_transfer transfer =
278 panfrost_allocate_transient(batch, transfer_size);
279
280 struct mali_sampler_descriptor *desc =
281 (struct mali_sampler_descriptor *) transfer.cpu;
282
283 for (int i = 0; i < ctx->sampler_count[t]; ++i)
284 desc[i] = ctx->samplers[t][i]->hw;
285
286 upload = transfer.gpu;
287 }
288
289 ctx->payloads[t].postfix.sampler_descriptor = upload;
290 }
291 }
292
293 static mali_ptr
294 panfrost_upload_tex(
295 struct panfrost_context *ctx,
296 enum pipe_shader_type st,
297 struct panfrost_sampler_view *view)
298 {
299 if (!view)
300 return (mali_ptr) 0;
301
302 struct pipe_sampler_view *pview = &view->base;
303 struct panfrost_resource *rsrc = pan_resource(pview->texture);
304
305 /* Add the BO to the job so it's retained until the job is done. */
306 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
307
308 panfrost_batch_add_bo(batch, rsrc->bo,
309 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
310 panfrost_bo_access_for_stage(st));
311
312 panfrost_batch_add_bo(batch, view->bo,
313 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
314 panfrost_bo_access_for_stage(st));
315
316 return view->bo->gpu;
317 }
318
319 static void
320 panfrost_upload_texture_descriptors(struct panfrost_context *ctx)
321 {
322 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
323
324 for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) {
325 mali_ptr trampoline = 0;
326
327 if (ctx->sampler_view_count[t]) {
328 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
329
330 for (int i = 0; i < ctx->sampler_view_count[t]; ++i)
331 trampolines[i] =
332 panfrost_upload_tex(ctx, t, ctx->sampler_views[t][i]);
333
334 trampoline = panfrost_upload_transient(batch, trampolines, sizeof(uint64_t) * ctx->sampler_view_count[t]);
335 }
336
337 ctx->payloads[t].postfix.texture_trampoline = trampoline;
338 }
339 }
340
341 /* Compute number of UBOs active (more specifically, compute the highest UBO
342 * number addressable -- if there are gaps, include them in the count anyway).
343 * We always include UBO #0 in the count, since we *need* uniforms enabled for
344 * sysvals. */
345
346 unsigned
347 panfrost_ubo_count(struct panfrost_context *ctx, enum pipe_shader_type stage)
348 {
349 unsigned mask = ctx->constant_buffer[stage].enabled_mask | 1;
350 return 32 - __builtin_clz(mask);
351 }
352
353 /* Go through dirty flags and actualise them in the cmdstream. */
354
355 static void
356 panfrost_emit_for_draw(struct panfrost_context *ctx)
357 {
358 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
359
360 panfrost_batch_add_fbo_bos(batch);
361
362 for (int i = 0; i <= PIPE_SHADER_FRAGMENT; ++i)
363 panfrost_vt_attach_framebuffer(ctx, &ctx->payloads[i]);
364
365 panfrost_emit_vertex_data(batch);
366
367 /* Varyings emitted for -all- geometry */
368 unsigned total_count = ctx->padded_count * ctx->instance_count;
369 panfrost_emit_varying_descriptor(ctx, total_count);
370
371 panfrost_batch_set_requirements(batch);
372
373 panfrost_vt_update_rasterizer(ctx, &ctx->payloads[PIPE_SHADER_FRAGMENT]);
374 panfrost_vt_update_occlusion_query(ctx, &ctx->payloads[PIPE_SHADER_FRAGMENT]);
375
376 panfrost_emit_shader_meta(batch, PIPE_SHADER_VERTEX,
377 &ctx->payloads[PIPE_SHADER_VERTEX]);
378 panfrost_emit_shader_meta(batch, PIPE_SHADER_FRAGMENT,
379 &ctx->payloads[PIPE_SHADER_FRAGMENT]);
380
381 /* We stage to transient, so always dirty.. */
382 if (ctx->vertex)
383 panfrost_stage_attributes(ctx);
384
385 panfrost_upload_sampler_descriptors(ctx);
386 panfrost_upload_texture_descriptors(ctx);
387
388 for (int i = 0; i <= PIPE_SHADER_FRAGMENT; ++i)
389 panfrost_emit_const_buf(batch, i, &ctx->payloads[i]);
390
391 /* TODO: Upload the viewport somewhere more appropriate */
392
393 panfrost_emit_viewport(batch, &ctx->payloads[PIPE_SHADER_FRAGMENT]);
394 }
395
396 /* Corresponds to exactly one draw, but does not submit anything */
397
398 static void
399 panfrost_queue_draw(struct panfrost_context *ctx)
400 {
401 /* Handle dirty flags now */
402 panfrost_emit_for_draw(ctx);
403
404 /* If rasterizer discard is enable, only submit the vertex */
405
406 bool rasterizer_discard = ctx->rasterizer
407 && ctx->rasterizer->base.rasterizer_discard;
408
409
410 struct midgard_payload_vertex_tiler *vertex_payload = &ctx->payloads[PIPE_SHADER_VERTEX];
411 struct midgard_payload_vertex_tiler *tiler_payload = &ctx->payloads[PIPE_SHADER_FRAGMENT];
412
413 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
414 bool wallpapering = ctx->wallpaper_batch && batch->tiler_dep;
415
416 if (wallpapering) {
417 /* Inject in reverse order, with "predicted" job indices. THIS IS A HACK XXX */
418 panfrost_new_job(batch, JOB_TYPE_TILER, false, batch->job_index + 2, tiler_payload, sizeof(*tiler_payload), true);
419 panfrost_new_job(batch, JOB_TYPE_VERTEX, false, 0, vertex_payload, sizeof(*vertex_payload), true);
420 } else {
421 unsigned vertex = panfrost_new_job(batch, JOB_TYPE_VERTEX, false, 0, vertex_payload, sizeof(*vertex_payload), false);
422
423 if (!rasterizer_discard)
424 panfrost_new_job(batch, JOB_TYPE_TILER, false, vertex, tiler_payload, sizeof(*tiler_payload), false);
425 }
426
427 panfrost_batch_adjust_stack_size(batch);
428 }
429
430 /* The entire frame is in memory -- send it off to the kernel! */
431
432 void
433 panfrost_flush(
434 struct pipe_context *pipe,
435 struct pipe_fence_handle **fence,
436 unsigned flags)
437 {
438 struct panfrost_context *ctx = pan_context(pipe);
439 struct util_dynarray fences;
440
441 /* We must collect the fences before the flush is done, otherwise we'll
442 * lose track of them.
443 */
444 if (fence) {
445 util_dynarray_init(&fences, NULL);
446 hash_table_foreach(ctx->batches, hentry) {
447 struct panfrost_batch *batch = hentry->data;
448
449 panfrost_batch_fence_reference(batch->out_sync);
450 util_dynarray_append(&fences,
451 struct panfrost_batch_fence *,
452 batch->out_sync);
453 }
454 }
455
456 /* Submit all pending jobs */
457 panfrost_flush_all_batches(ctx, false);
458
459 if (fence) {
460 struct panfrost_fence *f = panfrost_fence_create(ctx, &fences);
461 pipe->screen->fence_reference(pipe->screen, fence, NULL);
462 *fence = (struct pipe_fence_handle *)f;
463
464 util_dynarray_foreach(&fences, struct panfrost_batch_fence *, fence)
465 panfrost_batch_fence_unreference(*fence);
466
467 util_dynarray_fini(&fences);
468 }
469
470 if (pan_debug & PAN_DBG_TRACE)
471 pandecode_next_frame();
472 }
473
474 #define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_##c;
475
476 static int
477 g2m_draw_mode(enum pipe_prim_type mode)
478 {
479 switch (mode) {
480 DEFINE_CASE(POINTS);
481 DEFINE_CASE(LINES);
482 DEFINE_CASE(LINE_LOOP);
483 DEFINE_CASE(LINE_STRIP);
484 DEFINE_CASE(TRIANGLES);
485 DEFINE_CASE(TRIANGLE_STRIP);
486 DEFINE_CASE(TRIANGLE_FAN);
487 DEFINE_CASE(QUADS);
488 DEFINE_CASE(QUAD_STRIP);
489 DEFINE_CASE(POLYGON);
490
491 default:
492 unreachable("Invalid draw mode");
493 }
494 }
495
496 #undef DEFINE_CASE
497
498 static unsigned
499 panfrost_translate_index_size(unsigned size)
500 {
501 switch (size) {
502 case 1:
503 return MALI_DRAW_INDEXED_UINT8;
504
505 case 2:
506 return MALI_DRAW_INDEXED_UINT16;
507
508 case 4:
509 return MALI_DRAW_INDEXED_UINT32;
510
511 default:
512 unreachable("Invalid index size");
513 }
514 }
515
516 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
517 * good for the duration of the draw (transient), could last longer. Also get
518 * the bounds on the index buffer for the range accessed by the draw. We do
519 * these operations together because there are natural optimizations which
520 * require them to be together. */
521
522 static mali_ptr
523 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx, const struct pipe_draw_info *info, unsigned *min_index, unsigned *max_index)
524 {
525 struct panfrost_resource *rsrc = (struct panfrost_resource *) (info->index.resource);
526
527 off_t offset = info->start * info->index_size;
528 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
529 mali_ptr out = 0;
530
531 bool needs_indices = true;
532
533 if (info->max_index != ~0u) {
534 *min_index = info->min_index;
535 *max_index = info->max_index;
536 needs_indices = false;
537 }
538
539 if (!info->has_user_indices) {
540 /* Only resources can be directly mapped */
541 panfrost_batch_add_bo(batch, rsrc->bo,
542 PAN_BO_ACCESS_SHARED |
543 PAN_BO_ACCESS_READ |
544 PAN_BO_ACCESS_VERTEX_TILER);
545 out = rsrc->bo->gpu + offset;
546
547 /* Check the cache */
548 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache, info->start, info->count,
549 min_index, max_index);
550 } else {
551 /* Otherwise, we need to upload to transient memory */
552 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
553 out = panfrost_upload_transient(batch, ibuf8 + offset, info->count * info->index_size);
554 }
555
556 if (needs_indices) {
557 /* Fallback */
558 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
559
560 if (!info->has_user_indices) {
561 panfrost_minmax_cache_add(rsrc->index_cache, info->start, info->count,
562 *min_index, *max_index);
563 }
564 }
565
566
567 return out;
568 }
569
570 static bool
571 panfrost_scissor_culls_everything(struct panfrost_context *ctx)
572 {
573 const struct pipe_scissor_state *ss = &ctx->scissor;
574
575 /* Check if we're scissoring at all */
576
577 if (!(ctx->rasterizer && ctx->rasterizer->base.scissor))
578 return false;
579
580 return (ss->minx == ss->maxx) || (ss->miny == ss->maxy);
581 }
582
583 /* Count generated primitives (when there is no geom/tess shaders) for
584 * transform feedback */
585
586 static void
587 panfrost_statistics_record(
588 struct panfrost_context *ctx,
589 const struct pipe_draw_info *info)
590 {
591 if (!ctx->active_queries)
592 return;
593
594 uint32_t prims = u_prims_for_vertices(info->mode, info->count);
595 ctx->prims_generated += prims;
596
597 if (!ctx->streamout.num_targets)
598 return;
599
600 ctx->tf_prims_generated += prims;
601 }
602
603 static void
604 panfrost_draw_vbo(
605 struct pipe_context *pipe,
606 const struct pipe_draw_info *info)
607 {
608 struct panfrost_context *ctx = pan_context(pipe);
609
610 /* First of all, check the scissor to see if anything is drawn at all.
611 * If it's not, we drop the draw (mostly a conformance issue;
612 * well-behaved apps shouldn't hit this) */
613
614 if (panfrost_scissor_culls_everything(ctx))
615 return;
616
617 int mode = info->mode;
618
619 /* Fallback unsupported restart index */
620 unsigned primitive_index = (1 << (info->index_size * 8)) - 1;
621
622 if (info->primitive_restart && info->index_size
623 && info->restart_index != primitive_index) {
624 util_draw_vbo_without_prim_restart(pipe, info);
625 return;
626 }
627
628 /* Fallback for unsupported modes */
629
630 assert(ctx->rasterizer != NULL);
631
632 if (!(ctx->draw_modes & (1 << mode))) {
633 if (mode == PIPE_PRIM_QUADS && info->count == 4 && !ctx->rasterizer->base.flatshade) {
634 mode = PIPE_PRIM_TRIANGLE_FAN;
635 } else {
636 if (info->count < 4) {
637 /* Degenerate case? */
638 return;
639 }
640
641 util_primconvert_save_rasterizer_state(ctx->primconvert, &ctx->rasterizer->base);
642 util_primconvert_draw_vbo(ctx->primconvert, info);
643 return;
644 }
645 }
646
647 ctx->payloads[PIPE_SHADER_VERTEX].offset_start = info->start;
648 ctx->payloads[PIPE_SHADER_FRAGMENT].offset_start = info->start;
649
650 /* Now that we have a guaranteed terminating path, find the job.
651 * Assignment commented out to prevent unused warning */
652
653 /* struct panfrost_batch *batch = */ panfrost_get_batch_for_fbo(ctx);
654
655 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.draw_mode = g2m_draw_mode(mode);
656
657 /* Take into account a negative bias */
658 ctx->vertex_count = info->count + abs(info->index_bias);
659 ctx->instance_count = info->instance_count;
660 ctx->active_prim = info->mode;
661
662 /* For non-indexed draws, they're the same */
663 unsigned vertex_count = ctx->vertex_count;
664
665 unsigned draw_flags = 0;
666
667 /* The draw flags interpret how primitive size is interpreted */
668
669 if (panfrost_writes_point_size(ctx))
670 draw_flags |= MALI_DRAW_VARYING_SIZE;
671
672 if (info->primitive_restart)
673 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
674
675 /* These doesn't make much sense */
676
677 draw_flags |= 0x3000;
678
679 if (ctx->rasterizer && ctx->rasterizer->base.flatshade_first)
680 draw_flags |= MALI_DRAW_FLATSHADE_FIRST;
681
682 panfrost_statistics_record(ctx, info);
683
684 if (info->index_size) {
685 unsigned min_index = 0, max_index = 0;
686 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.indices =
687 panfrost_get_index_buffer_bounded(ctx, info, &min_index, &max_index);
688
689 /* Use the corresponding values */
690 vertex_count = max_index - min_index + 1;
691 ctx->payloads[PIPE_SHADER_VERTEX].offset_start = min_index + info->index_bias;
692 ctx->payloads[PIPE_SHADER_FRAGMENT].offset_start = min_index + info->index_bias;
693
694 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.offset_bias_correction = -min_index;
695 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.index_count = MALI_POSITIVE(info->count);
696
697 draw_flags |= panfrost_translate_index_size(info->index_size);
698 } else {
699 /* Index count == vertex count, if no indexing is applied, as
700 * if it is internally indexed in the expected order */
701
702 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.offset_bias_correction = 0;
703 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.index_count = MALI_POSITIVE(ctx->vertex_count);
704
705 /* Reverse index state */
706 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.indices = (mali_ptr) 0;
707 }
708
709 /* Dispatch "compute jobs" for the vertex/tiler pair as (1,
710 * vertex_count, 1) */
711
712 panfrost_pack_work_groups_fused(
713 &ctx->payloads[PIPE_SHADER_VERTEX].prefix,
714 &ctx->payloads[PIPE_SHADER_FRAGMENT].prefix,
715 1, vertex_count, info->instance_count,
716 1, 1, 1);
717
718 ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.unknown_draw = draw_flags;
719
720 /* Encode the padded vertex count */
721
722 if (info->instance_count > 1) {
723 ctx->padded_count = panfrost_padded_vertex_count(vertex_count);
724
725 unsigned shift = __builtin_ctz(ctx->padded_count);
726 unsigned k = ctx->padded_count >> (shift + 1);
727
728 ctx->payloads[PIPE_SHADER_VERTEX].instance_shift = shift;
729 ctx->payloads[PIPE_SHADER_FRAGMENT].instance_shift = shift;
730
731 ctx->payloads[PIPE_SHADER_VERTEX].instance_odd = k;
732 ctx->payloads[PIPE_SHADER_FRAGMENT].instance_odd = k;
733 } else {
734 ctx->padded_count = vertex_count;
735
736 /* Reset instancing state */
737 ctx->payloads[PIPE_SHADER_VERTEX].instance_shift = 0;
738 ctx->payloads[PIPE_SHADER_VERTEX].instance_odd = 0;
739 ctx->payloads[PIPE_SHADER_FRAGMENT].instance_shift = 0;
740 ctx->payloads[PIPE_SHADER_FRAGMENT].instance_odd = 0;
741 }
742
743 /* Fire off the draw itself */
744 panfrost_queue_draw(ctx);
745
746 /* Increment transform feedback offsets */
747
748 for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
749 unsigned output_count = u_stream_outputs_for_vertices(
750 ctx->active_prim, ctx->vertex_count);
751
752 ctx->streamout.offsets[i] += output_count;
753 }
754 }
755
756 /* CSO state */
757
758 static void
759 panfrost_generic_cso_delete(struct pipe_context *pctx, void *hwcso)
760 {
761 free(hwcso);
762 }
763
764 static void *
765 panfrost_create_rasterizer_state(
766 struct pipe_context *pctx,
767 const struct pipe_rasterizer_state *cso)
768 {
769 struct panfrost_rasterizer *so = CALLOC_STRUCT(panfrost_rasterizer);
770
771 so->base = *cso;
772
773 return so;
774 }
775
776 static void
777 panfrost_bind_rasterizer_state(
778 struct pipe_context *pctx,
779 void *hwcso)
780 {
781 struct panfrost_context *ctx = pan_context(pctx);
782
783 ctx->rasterizer = hwcso;
784
785 if (!hwcso)
786 return;
787
788 /* Gauranteed with the core GL call, so don't expose ARB_polygon_offset */
789 assert(ctx->rasterizer->base.offset_clamp == 0.0);
790
791 /* Point sprites are emulated */
792
793 struct panfrost_shader_state *variant = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
794
795 if (ctx->rasterizer->base.sprite_coord_enable || (variant && variant->point_sprite_mask))
796 ctx->base.bind_fs_state(&ctx->base, ctx->shader[PIPE_SHADER_FRAGMENT]);
797 }
798
799 static void *
800 panfrost_create_vertex_elements_state(
801 struct pipe_context *pctx,
802 unsigned num_elements,
803 const struct pipe_vertex_element *elements)
804 {
805 struct panfrost_vertex_state *so = CALLOC_STRUCT(panfrost_vertex_state);
806
807 so->num_elements = num_elements;
808 memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
809
810 for (int i = 0; i < num_elements; ++i) {
811 so->hw[i].index = i;
812
813 enum pipe_format fmt = elements[i].src_format;
814 const struct util_format_description *desc = util_format_description(fmt);
815 so->hw[i].unknown1 = 0x2;
816 so->hw[i].swizzle = panfrost_get_default_swizzle(desc->nr_channels);
817
818 so->hw[i].format = panfrost_find_format(desc);
819
820 /* The field itself should probably be shifted over */
821 so->hw[i].src_offset = elements[i].src_offset;
822 }
823
824 return so;
825 }
826
827 static void
828 panfrost_bind_vertex_elements_state(
829 struct pipe_context *pctx,
830 void *hwcso)
831 {
832 struct panfrost_context *ctx = pan_context(pctx);
833 ctx->vertex = hwcso;
834 }
835
836 static void *
837 panfrost_create_shader_state(
838 struct pipe_context *pctx,
839 const struct pipe_shader_state *cso,
840 enum pipe_shader_type stage)
841 {
842 struct panfrost_shader_variants *so = CALLOC_STRUCT(panfrost_shader_variants);
843 so->base = *cso;
844
845 /* Token deep copy to prevent memory corruption */
846
847 if (cso->type == PIPE_SHADER_IR_TGSI)
848 so->base.tokens = tgsi_dup_tokens(so->base.tokens);
849
850 /* Precompile for shader-db if we need to */
851 if (unlikely((pan_debug & PAN_DBG_PRECOMPILE) && cso->type == PIPE_SHADER_IR_NIR)) {
852 struct panfrost_context *ctx = pan_context(pctx);
853
854 struct panfrost_shader_state state;
855 uint64_t outputs_written;
856
857 panfrost_shader_compile(ctx, PIPE_SHADER_IR_NIR,
858 so->base.ir.nir,
859 tgsi_processor_to_shader_stage(stage),
860 &state, &outputs_written);
861 }
862
863 return so;
864 }
865
866 static void
867 panfrost_delete_shader_state(
868 struct pipe_context *pctx,
869 void *so)
870 {
871 struct panfrost_shader_variants *cso = (struct panfrost_shader_variants *) so;
872
873 if (cso->base.type == PIPE_SHADER_IR_TGSI) {
874 DBG("Deleting TGSI shader leaks duplicated tokens\n");
875 }
876
877 for (unsigned i = 0; i < cso->variant_count; ++i) {
878 struct panfrost_shader_state *shader_state = &cso->variants[i];
879 panfrost_bo_unreference(shader_state->bo);
880 shader_state->bo = NULL;
881 }
882 free(cso->variants);
883
884 free(so);
885 }
886
887 static void *
888 panfrost_create_sampler_state(
889 struct pipe_context *pctx,
890 const struct pipe_sampler_state *cso)
891 {
892 struct panfrost_sampler_state *so = CALLOC_STRUCT(panfrost_sampler_state);
893 so->base = *cso;
894
895 panfrost_sampler_desc_init(cso, &so->hw);
896
897 return so;
898 }
899
900 static void
901 panfrost_bind_sampler_states(
902 struct pipe_context *pctx,
903 enum pipe_shader_type shader,
904 unsigned start_slot, unsigned num_sampler,
905 void **sampler)
906 {
907 assert(start_slot == 0);
908
909 struct panfrost_context *ctx = pan_context(pctx);
910
911 /* XXX: Should upload, not just copy? */
912 ctx->sampler_count[shader] = num_sampler;
913 memcpy(ctx->samplers[shader], sampler, num_sampler * sizeof (void *));
914 }
915
916 static bool
917 panfrost_variant_matches(
918 struct panfrost_context *ctx,
919 struct panfrost_shader_state *variant,
920 enum pipe_shader_type type)
921 {
922 struct pipe_rasterizer_state *rasterizer = &ctx->rasterizer->base;
923 struct pipe_alpha_state *alpha = &ctx->depth_stencil->alpha;
924
925 bool is_fragment = (type == PIPE_SHADER_FRAGMENT);
926
927 if (is_fragment && (alpha->enabled || variant->alpha_state.enabled)) {
928 /* Make sure enable state is at least the same */
929 if (alpha->enabled != variant->alpha_state.enabled) {
930 return false;
931 }
932
933 /* Check that the contents of the test are the same */
934 bool same_func = alpha->func == variant->alpha_state.func;
935 bool same_ref = alpha->ref_value == variant->alpha_state.ref_value;
936
937 if (!(same_func && same_ref)) {
938 return false;
939 }
940 }
941
942 if (is_fragment && rasterizer && (rasterizer->sprite_coord_enable |
943 variant->point_sprite_mask)) {
944 /* Ensure the same varyings are turned to point sprites */
945 if (rasterizer->sprite_coord_enable != variant->point_sprite_mask)
946 return false;
947
948 /* Ensure the orientation is correct */
949 bool upper_left =
950 rasterizer->sprite_coord_mode ==
951 PIPE_SPRITE_COORD_UPPER_LEFT;
952
953 if (variant->point_sprite_upper_left != upper_left)
954 return false;
955 }
956
957 /* Otherwise, we're good to go */
958 return true;
959 }
960
961 /**
962 * Fix an uncompiled shader's stream output info, and produce a bitmask
963 * of which VARYING_SLOT_* are captured for stream output.
964 *
965 * Core Gallium stores output->register_index as a "slot" number, where
966 * slots are assigned consecutively to all outputs in info->outputs_written.
967 * This naive packing of outputs doesn't work for us - we too have slots,
968 * but the layout is defined by the VUE map, which we won't have until we
969 * compile a specific shader variant. So, we remap these and simply store
970 * VARYING_SLOT_* in our copy's output->register_index fields.
971 *
972 * We then produce a bitmask of outputs which are used for SO.
973 *
974 * Implementation from iris.
975 */
976
977 static uint64_t
978 update_so_info(struct pipe_stream_output_info *so_info,
979 uint64_t outputs_written)
980 {
981 uint64_t so_outputs = 0;
982 uint8_t reverse_map[64] = {0};
983 unsigned slot = 0;
984
985 while (outputs_written)
986 reverse_map[slot++] = u_bit_scan64(&outputs_written);
987
988 for (unsigned i = 0; i < so_info->num_outputs; i++) {
989 struct pipe_stream_output *output = &so_info->output[i];
990
991 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
992 output->register_index = reverse_map[output->register_index];
993
994 so_outputs |= 1ull << output->register_index;
995 }
996
997 return so_outputs;
998 }
999
1000 static void
1001 panfrost_bind_shader_state(
1002 struct pipe_context *pctx,
1003 void *hwcso,
1004 enum pipe_shader_type type)
1005 {
1006 struct panfrost_context *ctx = pan_context(pctx);
1007 ctx->shader[type] = hwcso;
1008
1009 if (!hwcso) return;
1010
1011 /* Match the appropriate variant */
1012
1013 signed variant = -1;
1014 struct panfrost_shader_variants *variants = (struct panfrost_shader_variants *) hwcso;
1015
1016 for (unsigned i = 0; i < variants->variant_count; ++i) {
1017 if (panfrost_variant_matches(ctx, &variants->variants[i], type)) {
1018 variant = i;
1019 break;
1020 }
1021 }
1022
1023 if (variant == -1) {
1024 /* No variant matched, so create a new one */
1025 variant = variants->variant_count++;
1026
1027 if (variants->variant_count > variants->variant_space) {
1028 unsigned old_space = variants->variant_space;
1029
1030 variants->variant_space *= 2;
1031 if (variants->variant_space == 0)
1032 variants->variant_space = 1;
1033
1034 /* Arbitrary limit to stop runaway programs from
1035 * creating an unbounded number of shader variants. */
1036 assert(variants->variant_space < 1024);
1037
1038 unsigned msize = sizeof(struct panfrost_shader_state);
1039 variants->variants = realloc(variants->variants,
1040 variants->variant_space * msize);
1041
1042 memset(&variants->variants[old_space], 0,
1043 (variants->variant_space - old_space) * msize);
1044 }
1045
1046 struct panfrost_shader_state *v =
1047 &variants->variants[variant];
1048
1049 if (type == PIPE_SHADER_FRAGMENT) {
1050 v->alpha_state = ctx->depth_stencil->alpha;
1051
1052 if (ctx->rasterizer) {
1053 v->point_sprite_mask = ctx->rasterizer->base.sprite_coord_enable;
1054 v->point_sprite_upper_left =
1055 ctx->rasterizer->base.sprite_coord_mode ==
1056 PIPE_SPRITE_COORD_UPPER_LEFT;
1057 }
1058 }
1059 }
1060
1061 /* Select this variant */
1062 variants->active_variant = variant;
1063
1064 struct panfrost_shader_state *shader_state = &variants->variants[variant];
1065 assert(panfrost_variant_matches(ctx, shader_state, type));
1066
1067 /* We finally have a variant, so compile it */
1068
1069 if (!shader_state->compiled) {
1070 uint64_t outputs_written = 0;
1071
1072 panfrost_shader_compile(ctx, variants->base.type,
1073 variants->base.type == PIPE_SHADER_IR_NIR ?
1074 variants->base.ir.nir :
1075 variants->base.tokens,
1076 tgsi_processor_to_shader_stage(type),
1077 shader_state,
1078 &outputs_written);
1079
1080 shader_state->compiled = true;
1081
1082 /* Fixup the stream out information, since what Gallium returns
1083 * normally is mildly insane */
1084
1085 shader_state->stream_output = variants->base.stream_output;
1086 shader_state->so_mask =
1087 update_so_info(&shader_state->stream_output, outputs_written);
1088 }
1089 }
1090
1091 static void *
1092 panfrost_create_vs_state(struct pipe_context *pctx, const struct pipe_shader_state *hwcso)
1093 {
1094 return panfrost_create_shader_state(pctx, hwcso, PIPE_SHADER_VERTEX);
1095 }
1096
1097 static void *
1098 panfrost_create_fs_state(struct pipe_context *pctx, const struct pipe_shader_state *hwcso)
1099 {
1100 return panfrost_create_shader_state(pctx, hwcso, PIPE_SHADER_FRAGMENT);
1101 }
1102
1103 static void
1104 panfrost_bind_vs_state(struct pipe_context *pctx, void *hwcso)
1105 {
1106 panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_VERTEX);
1107 }
1108
1109 static void
1110 panfrost_bind_fs_state(struct pipe_context *pctx, void *hwcso)
1111 {
1112 panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_FRAGMENT);
1113 }
1114
1115 static void
1116 panfrost_set_vertex_buffers(
1117 struct pipe_context *pctx,
1118 unsigned start_slot,
1119 unsigned num_buffers,
1120 const struct pipe_vertex_buffer *buffers)
1121 {
1122 struct panfrost_context *ctx = pan_context(pctx);
1123
1124 util_set_vertex_buffers_mask(ctx->vertex_buffers, &ctx->vb_mask, buffers, start_slot, num_buffers);
1125 }
1126
1127 static void
1128 panfrost_set_constant_buffer(
1129 struct pipe_context *pctx,
1130 enum pipe_shader_type shader, uint index,
1131 const struct pipe_constant_buffer *buf)
1132 {
1133 struct panfrost_context *ctx = pan_context(pctx);
1134 struct panfrost_constant_buffer *pbuf = &ctx->constant_buffer[shader];
1135
1136 util_copy_constant_buffer(&pbuf->cb[index], buf);
1137
1138 unsigned mask = (1 << index);
1139
1140 if (unlikely(!buf)) {
1141 pbuf->enabled_mask &= ~mask;
1142 pbuf->dirty_mask &= ~mask;
1143 return;
1144 }
1145
1146 pbuf->enabled_mask |= mask;
1147 pbuf->dirty_mask |= mask;
1148 }
1149
1150 static void
1151 panfrost_set_stencil_ref(
1152 struct pipe_context *pctx,
1153 const struct pipe_stencil_ref *ref)
1154 {
1155 struct panfrost_context *ctx = pan_context(pctx);
1156 ctx->stencil_ref = *ref;
1157 }
1158
1159 static enum mali_texture_type
1160 panfrost_translate_texture_type(enum pipe_texture_target t) {
1161 switch (t)
1162 {
1163 case PIPE_BUFFER:
1164 case PIPE_TEXTURE_1D:
1165 case PIPE_TEXTURE_1D_ARRAY:
1166 return MALI_TEX_1D;
1167
1168 case PIPE_TEXTURE_2D:
1169 case PIPE_TEXTURE_2D_ARRAY:
1170 case PIPE_TEXTURE_RECT:
1171 return MALI_TEX_2D;
1172
1173 case PIPE_TEXTURE_3D:
1174 return MALI_TEX_3D;
1175
1176 case PIPE_TEXTURE_CUBE:
1177 case PIPE_TEXTURE_CUBE_ARRAY:
1178 return MALI_TEX_CUBE;
1179
1180 default:
1181 unreachable("Unknown target");
1182 }
1183 }
1184
1185 static struct pipe_sampler_view *
1186 panfrost_create_sampler_view(
1187 struct pipe_context *pctx,
1188 struct pipe_resource *texture,
1189 const struct pipe_sampler_view *template)
1190 {
1191 struct panfrost_screen *screen = pan_screen(pctx->screen);
1192 struct panfrost_sampler_view *so = rzalloc(pctx, struct panfrost_sampler_view);
1193
1194 pipe_reference(NULL, &texture->reference);
1195
1196 struct panfrost_resource *prsrc = (struct panfrost_resource *) texture;
1197 assert(prsrc->bo);
1198
1199 so->base = *template;
1200 so->base.texture = texture;
1201 so->base.reference.count = 1;
1202 so->base.context = pctx;
1203
1204 unsigned char user_swizzle[4] = {
1205 template->swizzle_r,
1206 template->swizzle_g,
1207 template->swizzle_b,
1208 template->swizzle_a
1209 };
1210
1211 /* In the hardware, array_size refers specifically to array textures,
1212 * whereas in Gallium, it also covers cubemaps */
1213
1214 unsigned array_size = texture->array_size;
1215
1216 if (template->target == PIPE_TEXTURE_CUBE) {
1217 /* TODO: Cubemap arrays */
1218 assert(array_size == 6);
1219 array_size /= 6;
1220 }
1221
1222 enum mali_texture_type type =
1223 panfrost_translate_texture_type(template->target);
1224
1225 unsigned size = panfrost_estimate_texture_size(
1226 template->u.tex.first_level,
1227 template->u.tex.last_level,
1228 template->u.tex.first_layer,
1229 template->u.tex.last_layer,
1230 type, prsrc->layout);
1231
1232 so->bo = panfrost_bo_create(screen, size, 0);
1233
1234 panfrost_new_texture(
1235 so->bo->cpu,
1236 texture->width0, texture->height0,
1237 texture->depth0, array_size,
1238 template->format,
1239 type, prsrc->layout,
1240 template->u.tex.first_level,
1241 template->u.tex.last_level,
1242 template->u.tex.first_layer,
1243 template->u.tex.last_layer,
1244 prsrc->cubemap_stride,
1245 panfrost_translate_swizzle_4(user_swizzle),
1246 prsrc->bo->gpu,
1247 prsrc->slices);
1248
1249 return (struct pipe_sampler_view *) so;
1250 }
1251
1252 static void
1253 panfrost_set_sampler_views(
1254 struct pipe_context *pctx,
1255 enum pipe_shader_type shader,
1256 unsigned start_slot, unsigned num_views,
1257 struct pipe_sampler_view **views)
1258 {
1259 struct panfrost_context *ctx = pan_context(pctx);
1260 unsigned new_nr = 0;
1261 unsigned i;
1262
1263 assert(start_slot == 0);
1264
1265 for (i = 0; i < num_views; ++i) {
1266 if (views[i])
1267 new_nr = i + 1;
1268 pipe_sampler_view_reference((struct pipe_sampler_view **)&ctx->sampler_views[shader][i],
1269 views[i]);
1270 }
1271
1272 for (; i < ctx->sampler_view_count[shader]; i++) {
1273 pipe_sampler_view_reference((struct pipe_sampler_view **)&ctx->sampler_views[shader][i],
1274 NULL);
1275 }
1276 ctx->sampler_view_count[shader] = new_nr;
1277 }
1278
1279 static void
1280 panfrost_sampler_view_destroy(
1281 struct pipe_context *pctx,
1282 struct pipe_sampler_view *pview)
1283 {
1284 struct panfrost_sampler_view *view = (struct panfrost_sampler_view *) pview;
1285
1286 pipe_resource_reference(&pview->texture, NULL);
1287 panfrost_bo_unreference(view->bo);
1288 ralloc_free(view);
1289 }
1290
1291 static void
1292 panfrost_set_shader_buffers(
1293 struct pipe_context *pctx,
1294 enum pipe_shader_type shader,
1295 unsigned start, unsigned count,
1296 const struct pipe_shader_buffer *buffers,
1297 unsigned writable_bitmask)
1298 {
1299 struct panfrost_context *ctx = pan_context(pctx);
1300
1301 util_set_shader_buffers_mask(ctx->ssbo[shader], &ctx->ssbo_mask[shader],
1302 buffers, start, count);
1303 }
1304
1305 /* Hints that a framebuffer should use AFBC where possible */
1306
1307 static void
1308 panfrost_hint_afbc(
1309 struct panfrost_screen *screen,
1310 const struct pipe_framebuffer_state *fb)
1311 {
1312 /* AFBC implemenation incomplete; hide it */
1313 if (!(pan_debug & PAN_DBG_AFBC)) return;
1314
1315 /* Hint AFBC to the resources bound to each color buffer */
1316
1317 for (unsigned i = 0; i < fb->nr_cbufs; ++i) {
1318 struct pipe_surface *surf = fb->cbufs[i];
1319 struct panfrost_resource *rsrc = pan_resource(surf->texture);
1320 panfrost_resource_hint_layout(screen, rsrc, MALI_TEXTURE_AFBC, 1);
1321 }
1322
1323 /* Also hint it to the depth buffer */
1324
1325 if (fb->zsbuf) {
1326 struct panfrost_resource *rsrc = pan_resource(fb->zsbuf->texture);
1327 panfrost_resource_hint_layout(screen, rsrc, MALI_TEXTURE_AFBC, 1);
1328 }
1329 }
1330
1331 static void
1332 panfrost_set_framebuffer_state(struct pipe_context *pctx,
1333 const struct pipe_framebuffer_state *fb)
1334 {
1335 struct panfrost_context *ctx = pan_context(pctx);
1336
1337 panfrost_hint_afbc(pan_screen(pctx->screen), fb);
1338 util_copy_framebuffer_state(&ctx->pipe_framebuffer, fb);
1339 ctx->batch = NULL;
1340 panfrost_invalidate_frame(ctx);
1341 }
1342
1343 static void *
1344 panfrost_create_depth_stencil_state(struct pipe_context *pipe,
1345 const struct pipe_depth_stencil_alpha_state *depth_stencil)
1346 {
1347 return mem_dup(depth_stencil, sizeof(*depth_stencil));
1348 }
1349
1350 static void
1351 panfrost_bind_depth_stencil_state(struct pipe_context *pipe,
1352 void *cso)
1353 {
1354 struct panfrost_context *ctx = pan_context(pipe);
1355 struct pipe_depth_stencil_alpha_state *depth_stencil = cso;
1356 ctx->depth_stencil = depth_stencil;
1357
1358 if (!depth_stencil)
1359 return;
1360
1361 /* Alpha does not exist in the hardware (it's not in ES3), so it's
1362 * emulated in the fragment shader */
1363
1364 if (depth_stencil->alpha.enabled) {
1365 /* We need to trigger a new shader (maybe) */
1366 ctx->base.bind_fs_state(&ctx->base, ctx->shader[PIPE_SHADER_FRAGMENT]);
1367 }
1368
1369 /* Bounds test not implemented */
1370 assert(!depth_stencil->depth.bounds_test);
1371 }
1372
1373 static void
1374 panfrost_delete_depth_stencil_state(struct pipe_context *pipe, void *depth)
1375 {
1376 free( depth );
1377 }
1378
1379 static void
1380 panfrost_set_sample_mask(struct pipe_context *pipe,
1381 unsigned sample_mask)
1382 {
1383 }
1384
1385 static void
1386 panfrost_set_clip_state(struct pipe_context *pipe,
1387 const struct pipe_clip_state *clip)
1388 {
1389 //struct panfrost_context *panfrost = pan_context(pipe);
1390 }
1391
1392 static void
1393 panfrost_set_viewport_states(struct pipe_context *pipe,
1394 unsigned start_slot,
1395 unsigned num_viewports,
1396 const struct pipe_viewport_state *viewports)
1397 {
1398 struct panfrost_context *ctx = pan_context(pipe);
1399
1400 assert(start_slot == 0);
1401 assert(num_viewports == 1);
1402
1403 ctx->pipe_viewport = *viewports;
1404 }
1405
1406 static void
1407 panfrost_set_scissor_states(struct pipe_context *pipe,
1408 unsigned start_slot,
1409 unsigned num_scissors,
1410 const struct pipe_scissor_state *scissors)
1411 {
1412 struct panfrost_context *ctx = pan_context(pipe);
1413
1414 assert(start_slot == 0);
1415 assert(num_scissors == 1);
1416
1417 ctx->scissor = *scissors;
1418 }
1419
1420 static void
1421 panfrost_set_polygon_stipple(struct pipe_context *pipe,
1422 const struct pipe_poly_stipple *stipple)
1423 {
1424 //struct panfrost_context *panfrost = pan_context(pipe);
1425 }
1426
1427 static void
1428 panfrost_set_active_query_state(struct pipe_context *pipe,
1429 bool enable)
1430 {
1431 struct panfrost_context *ctx = pan_context(pipe);
1432 ctx->active_queries = enable;
1433 }
1434
1435 static void
1436 panfrost_destroy(struct pipe_context *pipe)
1437 {
1438 struct panfrost_context *panfrost = pan_context(pipe);
1439
1440 if (panfrost->blitter)
1441 util_blitter_destroy(panfrost->blitter);
1442
1443 if (panfrost->blitter_wallpaper)
1444 util_blitter_destroy(panfrost->blitter_wallpaper);
1445
1446 util_unreference_framebuffer_state(&panfrost->pipe_framebuffer);
1447 u_upload_destroy(pipe->stream_uploader);
1448
1449 ralloc_free(pipe);
1450 }
1451
1452 static struct pipe_query *
1453 panfrost_create_query(struct pipe_context *pipe,
1454 unsigned type,
1455 unsigned index)
1456 {
1457 struct panfrost_query *q = rzalloc(pipe, struct panfrost_query);
1458
1459 q->type = type;
1460 q->index = index;
1461
1462 return (struct pipe_query *) q;
1463 }
1464
1465 static void
1466 panfrost_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
1467 {
1468 struct panfrost_query *query = (struct panfrost_query *) q;
1469
1470 if (query->bo) {
1471 panfrost_bo_unreference(query->bo);
1472 query->bo = NULL;
1473 }
1474
1475 ralloc_free(q);
1476 }
1477
1478 static bool
1479 panfrost_begin_query(struct pipe_context *pipe, struct pipe_query *q)
1480 {
1481 struct panfrost_context *ctx = pan_context(pipe);
1482 struct panfrost_query *query = (struct panfrost_query *) q;
1483
1484 switch (query->type) {
1485 case PIPE_QUERY_OCCLUSION_COUNTER:
1486 case PIPE_QUERY_OCCLUSION_PREDICATE:
1487 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
1488 /* Allocate a bo for the query results to be stored */
1489 if (!query->bo) {
1490 query->bo = panfrost_bo_create(
1491 pan_screen(ctx->base.screen),
1492 sizeof(unsigned), 0);
1493 }
1494
1495 unsigned *result = (unsigned *)query->bo->cpu;
1496 *result = 0; /* Default to 0 if nothing at all drawn. */
1497 ctx->occlusion_query = query;
1498 break;
1499
1500 /* Geometry statistics are computed in the driver. XXX: geom/tess
1501 * shaders.. */
1502
1503 case PIPE_QUERY_PRIMITIVES_GENERATED:
1504 query->start = ctx->prims_generated;
1505 break;
1506 case PIPE_QUERY_PRIMITIVES_EMITTED:
1507 query->start = ctx->tf_prims_generated;
1508 break;
1509
1510 default:
1511 DBG("Skipping query %u\n", query->type);
1512 break;
1513 }
1514
1515 return true;
1516 }
1517
1518 static bool
1519 panfrost_end_query(struct pipe_context *pipe, struct pipe_query *q)
1520 {
1521 struct panfrost_context *ctx = pan_context(pipe);
1522 struct panfrost_query *query = (struct panfrost_query *) q;
1523
1524 switch (query->type) {
1525 case PIPE_QUERY_OCCLUSION_COUNTER:
1526 case PIPE_QUERY_OCCLUSION_PREDICATE:
1527 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
1528 ctx->occlusion_query = NULL;
1529 break;
1530 case PIPE_QUERY_PRIMITIVES_GENERATED:
1531 query->end = ctx->prims_generated;
1532 break;
1533 case PIPE_QUERY_PRIMITIVES_EMITTED:
1534 query->end = ctx->tf_prims_generated;
1535 break;
1536 }
1537
1538 return true;
1539 }
1540
1541 static bool
1542 panfrost_get_query_result(struct pipe_context *pipe,
1543 struct pipe_query *q,
1544 bool wait,
1545 union pipe_query_result *vresult)
1546 {
1547 struct panfrost_query *query = (struct panfrost_query *) q;
1548 struct panfrost_context *ctx = pan_context(pipe);
1549
1550
1551 switch (query->type) {
1552 case PIPE_QUERY_OCCLUSION_COUNTER:
1553 case PIPE_QUERY_OCCLUSION_PREDICATE:
1554 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
1555 /* Flush first */
1556 panfrost_flush_all_batches(ctx, true);
1557
1558 /* Read back the query results */
1559 unsigned *result = (unsigned *) query->bo->cpu;
1560 unsigned passed = *result;
1561
1562 if (query->type == PIPE_QUERY_OCCLUSION_COUNTER) {
1563 vresult->u64 = passed;
1564 } else {
1565 vresult->b = !!passed;
1566 }
1567
1568 break;
1569
1570 case PIPE_QUERY_PRIMITIVES_GENERATED:
1571 case PIPE_QUERY_PRIMITIVES_EMITTED:
1572 panfrost_flush_all_batches(ctx, true);
1573 vresult->u64 = query->end - query->start;
1574 break;
1575
1576 default:
1577 DBG("Skipped query get %u\n", query->type);
1578 break;
1579 }
1580
1581 return true;
1582 }
1583
1584 static struct pipe_stream_output_target *
1585 panfrost_create_stream_output_target(struct pipe_context *pctx,
1586 struct pipe_resource *prsc,
1587 unsigned buffer_offset,
1588 unsigned buffer_size)
1589 {
1590 struct pipe_stream_output_target *target;
1591
1592 target = rzalloc(pctx, struct pipe_stream_output_target);
1593
1594 if (!target)
1595 return NULL;
1596
1597 pipe_reference_init(&target->reference, 1);
1598 pipe_resource_reference(&target->buffer, prsc);
1599
1600 target->context = pctx;
1601 target->buffer_offset = buffer_offset;
1602 target->buffer_size = buffer_size;
1603
1604 return target;
1605 }
1606
1607 static void
1608 panfrost_stream_output_target_destroy(struct pipe_context *pctx,
1609 struct pipe_stream_output_target *target)
1610 {
1611 pipe_resource_reference(&target->buffer, NULL);
1612 ralloc_free(target);
1613 }
1614
1615 static void
1616 panfrost_set_stream_output_targets(struct pipe_context *pctx,
1617 unsigned num_targets,
1618 struct pipe_stream_output_target **targets,
1619 const unsigned *offsets)
1620 {
1621 struct panfrost_context *ctx = pan_context(pctx);
1622 struct panfrost_streamout *so = &ctx->streamout;
1623
1624 assert(num_targets <= ARRAY_SIZE(so->targets));
1625
1626 for (unsigned i = 0; i < num_targets; i++) {
1627 if (offsets[i] != -1)
1628 so->offsets[i] = offsets[i];
1629
1630 pipe_so_target_reference(&so->targets[i], targets[i]);
1631 }
1632
1633 for (unsigned i = 0; i < so->num_targets; i++)
1634 pipe_so_target_reference(&so->targets[i], NULL);
1635
1636 so->num_targets = num_targets;
1637 }
1638
1639 struct pipe_context *
1640 panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
1641 {
1642 struct panfrost_context *ctx = rzalloc(screen, struct panfrost_context);
1643 struct pipe_context *gallium = (struct pipe_context *) ctx;
1644
1645 gallium->screen = screen;
1646
1647 gallium->destroy = panfrost_destroy;
1648
1649 gallium->set_framebuffer_state = panfrost_set_framebuffer_state;
1650
1651 gallium->flush = panfrost_flush;
1652 gallium->clear = panfrost_clear;
1653 gallium->draw_vbo = panfrost_draw_vbo;
1654
1655 gallium->set_vertex_buffers = panfrost_set_vertex_buffers;
1656 gallium->set_constant_buffer = panfrost_set_constant_buffer;
1657 gallium->set_shader_buffers = panfrost_set_shader_buffers;
1658
1659 gallium->set_stencil_ref = panfrost_set_stencil_ref;
1660
1661 gallium->create_sampler_view = panfrost_create_sampler_view;
1662 gallium->set_sampler_views = panfrost_set_sampler_views;
1663 gallium->sampler_view_destroy = panfrost_sampler_view_destroy;
1664
1665 gallium->create_rasterizer_state = panfrost_create_rasterizer_state;
1666 gallium->bind_rasterizer_state = panfrost_bind_rasterizer_state;
1667 gallium->delete_rasterizer_state = panfrost_generic_cso_delete;
1668
1669 gallium->create_vertex_elements_state = panfrost_create_vertex_elements_state;
1670 gallium->bind_vertex_elements_state = panfrost_bind_vertex_elements_state;
1671 gallium->delete_vertex_elements_state = panfrost_generic_cso_delete;
1672
1673 gallium->create_fs_state = panfrost_create_fs_state;
1674 gallium->delete_fs_state = panfrost_delete_shader_state;
1675 gallium->bind_fs_state = panfrost_bind_fs_state;
1676
1677 gallium->create_vs_state = panfrost_create_vs_state;
1678 gallium->delete_vs_state = panfrost_delete_shader_state;
1679 gallium->bind_vs_state = panfrost_bind_vs_state;
1680
1681 gallium->create_sampler_state = panfrost_create_sampler_state;
1682 gallium->delete_sampler_state = panfrost_generic_cso_delete;
1683 gallium->bind_sampler_states = panfrost_bind_sampler_states;
1684
1685 gallium->create_depth_stencil_alpha_state = panfrost_create_depth_stencil_state;
1686 gallium->bind_depth_stencil_alpha_state = panfrost_bind_depth_stencil_state;
1687 gallium->delete_depth_stencil_alpha_state = panfrost_delete_depth_stencil_state;
1688
1689 gallium->set_sample_mask = panfrost_set_sample_mask;
1690
1691 gallium->set_clip_state = panfrost_set_clip_state;
1692 gallium->set_viewport_states = panfrost_set_viewport_states;
1693 gallium->set_scissor_states = panfrost_set_scissor_states;
1694 gallium->set_polygon_stipple = panfrost_set_polygon_stipple;
1695 gallium->set_active_query_state = panfrost_set_active_query_state;
1696
1697 gallium->create_query = panfrost_create_query;
1698 gallium->destroy_query = panfrost_destroy_query;
1699 gallium->begin_query = panfrost_begin_query;
1700 gallium->end_query = panfrost_end_query;
1701 gallium->get_query_result = panfrost_get_query_result;
1702
1703 gallium->create_stream_output_target = panfrost_create_stream_output_target;
1704 gallium->stream_output_target_destroy = panfrost_stream_output_target_destroy;
1705 gallium->set_stream_output_targets = panfrost_set_stream_output_targets;
1706
1707 panfrost_resource_context_init(gallium);
1708 panfrost_blend_context_init(gallium);
1709 panfrost_compute_context_init(gallium);
1710
1711 /* XXX: leaks */
1712 gallium->stream_uploader = u_upload_create_default(gallium);
1713 gallium->const_uploader = gallium->stream_uploader;
1714 assert(gallium->stream_uploader);
1715
1716 /* Midgard supports ES modes, plus QUADS/QUAD_STRIPS/POLYGON */
1717 ctx->draw_modes = (1 << (PIPE_PRIM_POLYGON + 1)) - 1;
1718
1719 ctx->primconvert = util_primconvert_create(gallium, ctx->draw_modes);
1720
1721 ctx->blitter = util_blitter_create(gallium);
1722 ctx->blitter_wallpaper = util_blitter_create(gallium);
1723
1724 assert(ctx->blitter);
1725 assert(ctx->blitter_wallpaper);
1726
1727 /* Prepare for render! */
1728
1729 panfrost_batch_init(ctx);
1730 panfrost_emit_vertex_payload(ctx);
1731 panfrost_invalidate_frame(ctx);
1732
1733 return gallium;
1734 }