2 * © Copyright 2018 Alyssa Rosenzweig
3 * Copyright © 2014-2017 Broadcom
4 * Copyright (C) 2017 Intel Corporation
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 #include "pan_context.h"
32 #include "pan_minmax_cache.h"
33 #include "panfrost-quirks.h"
35 #include "util/macros.h"
36 #include "util/format/u_format.h"
37 #include "util/u_inlines.h"
38 #include "util/u_upload_mgr.h"
39 #include "util/u_memory.h"
40 #include "util/u_vbuf.h"
41 #include "util/half_float.h"
42 #include "util/u_helpers.h"
43 #include "util/format/u_format.h"
44 #include "util/u_prim.h"
45 #include "util/u_prim_restart.h"
46 #include "indices/u_primconvert.h"
47 #include "tgsi/tgsi_parse.h"
48 #include "tgsi/tgsi_from_mesa.h"
49 #include "util/u_math.h"
51 #include "pan_screen.h"
52 #include "pan_blending.h"
53 #include "pan_blend_shaders.h"
54 #include "pan_cmdstream.h"
56 #include "pandecode/decode.h"
58 struct midgard_tiler_descriptor
59 panfrost_emit_midg_tiler(struct panfrost_batch
*batch
, unsigned vertex_count
)
61 struct panfrost_screen
*screen
= pan_screen(batch
->ctx
->base
.screen
);
62 bool hierarchy
= !(screen
->quirks
& MIDGARD_NO_HIER_TILING
);
63 struct midgard_tiler_descriptor t
= {0};
64 unsigned height
= batch
->key
.height
;
65 unsigned width
= batch
->key
.width
;
68 panfrost_choose_hierarchy_mask(width
, height
, vertex_count
, hierarchy
);
70 /* Compute the polygon header size and use that to offset the body */
72 unsigned header_size
= panfrost_tiler_header_size(
73 width
, height
, t
.hierarchy_mask
, hierarchy
);
75 t
.polygon_list_size
= panfrost_tiler_full_size(
76 width
, height
, t
.hierarchy_mask
, hierarchy
);
81 struct panfrost_bo
*tiler_heap
;
83 tiler_heap
= panfrost_batch_get_tiler_heap(batch
);
84 t
.polygon_list
= panfrost_batch_get_polygon_list(batch
,
89 /* Allow the entire tiler heap */
90 t
.heap_start
= tiler_heap
->gpu
;
91 t
.heap_end
= tiler_heap
->gpu
+ tiler_heap
->size
;
93 struct panfrost_bo
*tiler_dummy
;
95 tiler_dummy
= panfrost_batch_get_tiler_dummy(batch
);
96 header_size
= MALI_TILER_MINIMUM_HEADER_SIZE
;
98 /* The tiler is disabled, so don't allow the tiler heap */
99 t
.heap_start
= tiler_dummy
->gpu
;
100 t
.heap_end
= t
.heap_start
;
102 /* Use a dummy polygon list */
103 t
.polygon_list
= tiler_dummy
->gpu
;
105 /* Disable the tiler */
107 t
.hierarchy_mask
|= MALI_TILER_DISABLED
;
109 t
.hierarchy_mask
= MALI_TILER_USER
;
110 t
.polygon_list_size
= MALI_TILER_MINIMUM_HEADER_SIZE
+ 4;
112 /* We don't have a WRITE_VALUE job, so write the polygon list manually */
113 uint32_t *polygon_list_body
= (uint32_t *) (tiler_dummy
->cpu
+ header_size
);
114 polygon_list_body
[0] = 0xa0000000; /* TODO: Just that? */
118 t
.polygon_list_body
=
119 t
.polygon_list
+ header_size
;
126 struct pipe_context
*pipe
,
128 const union pipe_color_union
*color
,
129 double depth
, unsigned stencil
)
131 struct panfrost_context
*ctx
= pan_context(pipe
);
133 /* TODO: panfrost_get_fresh_batch_for_fbo() instantiates a new batch if
134 * the existing batch targeting this FBO has draws. We could probably
135 * avoid that by replacing plain clears by quad-draws with a specific
136 * color/depth/stencil value, thus avoiding the generation of extra
139 struct panfrost_batch
*batch
= panfrost_get_fresh_batch_for_fbo(ctx
);
141 panfrost_batch_add_fbo_bos(batch
);
142 panfrost_batch_clear(batch
, buffers
, color
, depth
, stencil
);
145 /* Reset per-frame context, called on context initialisation as well as after
146 * flushing a frame */
149 panfrost_invalidate_frame(struct panfrost_context
*ctx
)
151 for (unsigned i
= 0; i
< PIPE_SHADER_TYPES
; ++i
)
152 ctx
->payloads
[i
].postfix
.shared_memory
= 0;
154 /* TODO: When does this need to be handled? */
155 ctx
->active_queries
= true;
158 /* In practice, every field of these payloads should be configurable
159 * arbitrarily, which means these functions are basically catch-all's for
160 * as-of-yet unwavering unknowns */
163 panfrost_emit_vertex_payload(struct panfrost_context
*ctx
)
165 /* 0x2 bit clear on 32-bit T6XX */
167 struct midgard_payload_vertex_tiler payload
= {
168 .gl_enables
= 0x4 | 0x2,
171 /* Vertex and compute are closely coupled, so share a payload */
173 memcpy(&ctx
->payloads
[PIPE_SHADER_VERTEX
], &payload
, sizeof(payload
));
174 memcpy(&ctx
->payloads
[PIPE_SHADER_COMPUTE
], &payload
, sizeof(payload
));
178 panfrost_writes_point_size(struct panfrost_context
*ctx
)
180 assert(ctx
->shader
[PIPE_SHADER_VERTEX
]);
181 struct panfrost_shader_state
*vs
= panfrost_get_shader_state(ctx
, PIPE_SHADER_VERTEX
);
183 return vs
->writes_point_size
&& ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.draw_mode
== MALI_POINTS
;
186 /* Stage the attribute descriptors so we can adjust src_offset
187 * to let BOs align nicely */
190 panfrost_stage_attributes(struct panfrost_context
*ctx
)
192 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
193 struct panfrost_vertex_state
*so
= ctx
->vertex
;
195 size_t sz
= sizeof(struct mali_attr_meta
) * PAN_MAX_ATTRIBUTE
;
196 struct panfrost_transfer transfer
= panfrost_allocate_transient(batch
, sz
);
197 struct mali_attr_meta
*target
= (struct mali_attr_meta
*) transfer
.cpu
;
199 /* Copy as-is for the first pass */
200 memcpy(target
, so
->hw
, sz
);
202 /* Fixup offsets for the second pass. Recall that the hardware
203 * calculates attribute addresses as:
205 * addr = base + (stride * vtx) + src_offset;
207 * However, on Mali, base must be aligned to 64-bytes, so we
210 * base' = base & ~63 = base - (base & 63)
212 * To compensate when using base' (see emit_vertex_data), we have
213 * to adjust src_offset by the masked off piece:
215 * addr' = base' + (stride * vtx) + (src_offset + (base & 63))
216 * = base - (base & 63) + (stride * vtx) + src_offset + (base & 63)
217 * = base + (stride * vtx) + src_offset
223 unsigned start
= ctx
->payloads
[PIPE_SHADER_VERTEX
].offset_start
;
225 for (unsigned i
= 0; i
< so
->num_elements
; ++i
) {
226 unsigned vbi
= so
->pipe
[i
].vertex_buffer_index
;
227 struct pipe_vertex_buffer
*buf
= &ctx
->vertex_buffers
[vbi
];
228 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) (buf
->buffer
.resource
);
229 mali_ptr addr
= rsrc
->bo
->gpu
+ buf
->buffer_offset
;
231 /* Adjust by the masked off bits of the offset. Make sure we
232 * read src_offset from so->hw (which is not GPU visible)
233 * rather than target (which is) due to caching effects */
235 unsigned src_offset
= so
->hw
[i
].src_offset
;
236 src_offset
+= (addr
& 63);
238 /* Also, somewhat obscurely per-instance data needs to be
239 * offset in response to a delayed start in an indexed draw */
241 if (so
->pipe
[i
].instance_divisor
&& ctx
->instance_count
> 1 && start
)
242 src_offset
-= buf
->stride
* start
;
244 target
[i
].src_offset
= src_offset
;
247 /* Let's also include vertex builtins */
249 struct mali_attr_meta builtin
= {
250 .format
= MALI_R32UI
,
251 .swizzle
= panfrost_get_default_swizzle(1)
254 /* See mali_attr_meta specification for the magic number */
256 builtin
.index
= so
->vertexid_index
;
257 memcpy(&target
[PAN_VERTEX_ID
], &builtin
, 4);
259 builtin
.index
= so
->vertexid_index
+ 1;
260 memcpy(&target
[PAN_INSTANCE_ID
], &builtin
, 4);
262 ctx
->payloads
[PIPE_SHADER_VERTEX
].postfix
.attribute_meta
= transfer
.gpu
;
266 panfrost_upload_sampler_descriptors(struct panfrost_context
*ctx
)
268 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
269 size_t desc_size
= sizeof(struct mali_sampler_descriptor
);
271 for (int t
= 0; t
<= PIPE_SHADER_FRAGMENT
; ++t
) {
274 if (ctx
->sampler_count
[t
]) {
275 size_t transfer_size
= desc_size
* ctx
->sampler_count
[t
];
277 struct panfrost_transfer transfer
=
278 panfrost_allocate_transient(batch
, transfer_size
);
280 struct mali_sampler_descriptor
*desc
=
281 (struct mali_sampler_descriptor
*) transfer
.cpu
;
283 for (int i
= 0; i
< ctx
->sampler_count
[t
]; ++i
)
284 desc
[i
] = ctx
->samplers
[t
][i
]->hw
;
286 upload
= transfer
.gpu
;
289 ctx
->payloads
[t
].postfix
.sampler_descriptor
= upload
;
295 struct panfrost_context
*ctx
,
296 enum pipe_shader_type st
,
297 struct panfrost_sampler_view
*view
)
302 struct pipe_sampler_view
*pview
= &view
->base
;
303 struct panfrost_resource
*rsrc
= pan_resource(pview
->texture
);
305 /* Add the BO to the job so it's retained until the job is done. */
306 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
308 panfrost_batch_add_bo(batch
, rsrc
->bo
,
309 PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_READ
|
310 panfrost_bo_access_for_stage(st
));
312 panfrost_batch_add_bo(batch
, view
->bo
,
313 PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_READ
|
314 panfrost_bo_access_for_stage(st
));
316 return view
->bo
->gpu
;
320 panfrost_upload_texture_descriptors(struct panfrost_context
*ctx
)
322 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
324 for (int t
= 0; t
<= PIPE_SHADER_FRAGMENT
; ++t
) {
325 mali_ptr trampoline
= 0;
327 if (ctx
->sampler_view_count
[t
]) {
328 uint64_t trampolines
[PIPE_MAX_SHADER_SAMPLER_VIEWS
];
330 for (int i
= 0; i
< ctx
->sampler_view_count
[t
]; ++i
)
332 panfrost_upload_tex(ctx
, t
, ctx
->sampler_views
[t
][i
]);
334 trampoline
= panfrost_upload_transient(batch
, trampolines
, sizeof(uint64_t) * ctx
->sampler_view_count
[t
]);
337 ctx
->payloads
[t
].postfix
.texture_trampoline
= trampoline
;
341 /* Compute number of UBOs active (more specifically, compute the highest UBO
342 * number addressable -- if there are gaps, include them in the count anyway).
343 * We always include UBO #0 in the count, since we *need* uniforms enabled for
347 panfrost_ubo_count(struct panfrost_context
*ctx
, enum pipe_shader_type stage
)
349 unsigned mask
= ctx
->constant_buffer
[stage
].enabled_mask
| 1;
350 return 32 - __builtin_clz(mask
);
353 /* Go through dirty flags and actualise them in the cmdstream. */
356 panfrost_emit_for_draw(struct panfrost_context
*ctx
)
358 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
360 panfrost_batch_add_fbo_bos(batch
);
362 for (int i
= 0; i
<= PIPE_SHADER_FRAGMENT
; ++i
)
363 panfrost_vt_attach_framebuffer(ctx
, &ctx
->payloads
[i
]);
365 panfrost_emit_vertex_data(batch
);
367 /* Varyings emitted for -all- geometry */
368 unsigned total_count
= ctx
->padded_count
* ctx
->instance_count
;
369 panfrost_emit_varying_descriptor(ctx
, total_count
);
371 panfrost_batch_set_requirements(batch
);
373 panfrost_vt_update_rasterizer(ctx
, &ctx
->payloads
[PIPE_SHADER_FRAGMENT
]);
374 panfrost_vt_update_occlusion_query(ctx
, &ctx
->payloads
[PIPE_SHADER_FRAGMENT
]);
376 panfrost_emit_shader_meta(batch
, PIPE_SHADER_VERTEX
,
377 &ctx
->payloads
[PIPE_SHADER_VERTEX
]);
378 panfrost_emit_shader_meta(batch
, PIPE_SHADER_FRAGMENT
,
379 &ctx
->payloads
[PIPE_SHADER_FRAGMENT
]);
381 /* We stage to transient, so always dirty.. */
383 panfrost_stage_attributes(ctx
);
385 panfrost_upload_sampler_descriptors(ctx
);
386 panfrost_upload_texture_descriptors(ctx
);
388 for (int i
= 0; i
<= PIPE_SHADER_FRAGMENT
; ++i
)
389 panfrost_emit_const_buf(batch
, i
, &ctx
->payloads
[i
]);
391 /* TODO: Upload the viewport somewhere more appropriate */
393 panfrost_emit_viewport(batch
, &ctx
->payloads
[PIPE_SHADER_FRAGMENT
]);
396 /* Corresponds to exactly one draw, but does not submit anything */
399 panfrost_queue_draw(struct panfrost_context
*ctx
)
401 /* Handle dirty flags now */
402 panfrost_emit_for_draw(ctx
);
404 /* If rasterizer discard is enable, only submit the vertex */
406 bool rasterizer_discard
= ctx
->rasterizer
407 && ctx
->rasterizer
->base
.rasterizer_discard
;
410 struct midgard_payload_vertex_tiler
*vertex_payload
= &ctx
->payloads
[PIPE_SHADER_VERTEX
];
411 struct midgard_payload_vertex_tiler
*tiler_payload
= &ctx
->payloads
[PIPE_SHADER_FRAGMENT
];
413 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
414 bool wallpapering
= ctx
->wallpaper_batch
&& batch
->tiler_dep
;
417 /* Inject in reverse order, with "predicted" job indices. THIS IS A HACK XXX */
418 panfrost_new_job(batch
, JOB_TYPE_TILER
, false, batch
->job_index
+ 2, tiler_payload
, sizeof(*tiler_payload
), true);
419 panfrost_new_job(batch
, JOB_TYPE_VERTEX
, false, 0, vertex_payload
, sizeof(*vertex_payload
), true);
421 unsigned vertex
= panfrost_new_job(batch
, JOB_TYPE_VERTEX
, false, 0, vertex_payload
, sizeof(*vertex_payload
), false);
423 if (!rasterizer_discard
)
424 panfrost_new_job(batch
, JOB_TYPE_TILER
, false, vertex
, tiler_payload
, sizeof(*tiler_payload
), false);
427 panfrost_batch_adjust_stack_size(batch
);
430 /* The entire frame is in memory -- send it off to the kernel! */
434 struct pipe_context
*pipe
,
435 struct pipe_fence_handle
**fence
,
438 struct panfrost_context
*ctx
= pan_context(pipe
);
439 struct util_dynarray fences
;
441 /* We must collect the fences before the flush is done, otherwise we'll
442 * lose track of them.
445 util_dynarray_init(&fences
, NULL
);
446 hash_table_foreach(ctx
->batches
, hentry
) {
447 struct panfrost_batch
*batch
= hentry
->data
;
449 panfrost_batch_fence_reference(batch
->out_sync
);
450 util_dynarray_append(&fences
,
451 struct panfrost_batch_fence
*,
456 /* Submit all pending jobs */
457 panfrost_flush_all_batches(ctx
, false);
460 struct panfrost_fence
*f
= panfrost_fence_create(ctx
, &fences
);
461 pipe
->screen
->fence_reference(pipe
->screen
, fence
, NULL
);
462 *fence
= (struct pipe_fence_handle
*)f
;
464 util_dynarray_foreach(&fences
, struct panfrost_batch_fence
*, fence
)
465 panfrost_batch_fence_unreference(*fence
);
467 util_dynarray_fini(&fences
);
470 if (pan_debug
& PAN_DBG_TRACE
)
471 pandecode_next_frame();
474 #define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_##c;
477 g2m_draw_mode(enum pipe_prim_type mode
)
482 DEFINE_CASE(LINE_LOOP
);
483 DEFINE_CASE(LINE_STRIP
);
484 DEFINE_CASE(TRIANGLES
);
485 DEFINE_CASE(TRIANGLE_STRIP
);
486 DEFINE_CASE(TRIANGLE_FAN
);
488 DEFINE_CASE(QUAD_STRIP
);
489 DEFINE_CASE(POLYGON
);
492 unreachable("Invalid draw mode");
499 panfrost_translate_index_size(unsigned size
)
503 return MALI_DRAW_INDEXED_UINT8
;
506 return MALI_DRAW_INDEXED_UINT16
;
509 return MALI_DRAW_INDEXED_UINT32
;
512 unreachable("Invalid index size");
516 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
517 * good for the duration of the draw (transient), could last longer. Also get
518 * the bounds on the index buffer for the range accessed by the draw. We do
519 * these operations together because there are natural optimizations which
520 * require them to be together. */
523 panfrost_get_index_buffer_bounded(struct panfrost_context
*ctx
, const struct pipe_draw_info
*info
, unsigned *min_index
, unsigned *max_index
)
525 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) (info
->index
.resource
);
527 off_t offset
= info
->start
* info
->index_size
;
528 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
531 bool needs_indices
= true;
533 if (info
->max_index
!= ~0u) {
534 *min_index
= info
->min_index
;
535 *max_index
= info
->max_index
;
536 needs_indices
= false;
539 if (!info
->has_user_indices
) {
540 /* Only resources can be directly mapped */
541 panfrost_batch_add_bo(batch
, rsrc
->bo
,
542 PAN_BO_ACCESS_SHARED
|
544 PAN_BO_ACCESS_VERTEX_TILER
);
545 out
= rsrc
->bo
->gpu
+ offset
;
547 /* Check the cache */
548 needs_indices
= !panfrost_minmax_cache_get(rsrc
->index_cache
, info
->start
, info
->count
,
549 min_index
, max_index
);
551 /* Otherwise, we need to upload to transient memory */
552 const uint8_t *ibuf8
= (const uint8_t *) info
->index
.user
;
553 out
= panfrost_upload_transient(batch
, ibuf8
+ offset
, info
->count
* info
->index_size
);
558 u_vbuf_get_minmax_index(&ctx
->base
, info
, min_index
, max_index
);
560 if (!info
->has_user_indices
) {
561 panfrost_minmax_cache_add(rsrc
->index_cache
, info
->start
, info
->count
,
562 *min_index
, *max_index
);
571 panfrost_scissor_culls_everything(struct panfrost_context
*ctx
)
573 const struct pipe_scissor_state
*ss
= &ctx
->scissor
;
575 /* Check if we're scissoring at all */
577 if (!(ctx
->rasterizer
&& ctx
->rasterizer
->base
.scissor
))
580 return (ss
->minx
== ss
->maxx
) || (ss
->miny
== ss
->maxy
);
583 /* Count generated primitives (when there is no geom/tess shaders) for
584 * transform feedback */
587 panfrost_statistics_record(
588 struct panfrost_context
*ctx
,
589 const struct pipe_draw_info
*info
)
591 if (!ctx
->active_queries
)
594 uint32_t prims
= u_prims_for_vertices(info
->mode
, info
->count
);
595 ctx
->prims_generated
+= prims
;
597 if (!ctx
->streamout
.num_targets
)
600 ctx
->tf_prims_generated
+= prims
;
605 struct pipe_context
*pipe
,
606 const struct pipe_draw_info
*info
)
608 struct panfrost_context
*ctx
= pan_context(pipe
);
610 /* First of all, check the scissor to see if anything is drawn at all.
611 * If it's not, we drop the draw (mostly a conformance issue;
612 * well-behaved apps shouldn't hit this) */
614 if (panfrost_scissor_culls_everything(ctx
))
617 int mode
= info
->mode
;
619 /* Fallback unsupported restart index */
620 unsigned primitive_index
= (1 << (info
->index_size
* 8)) - 1;
622 if (info
->primitive_restart
&& info
->index_size
623 && info
->restart_index
!= primitive_index
) {
624 util_draw_vbo_without_prim_restart(pipe
, info
);
628 /* Fallback for unsupported modes */
630 assert(ctx
->rasterizer
!= NULL
);
632 if (!(ctx
->draw_modes
& (1 << mode
))) {
633 if (mode
== PIPE_PRIM_QUADS
&& info
->count
== 4 && !ctx
->rasterizer
->base
.flatshade
) {
634 mode
= PIPE_PRIM_TRIANGLE_FAN
;
636 if (info
->count
< 4) {
637 /* Degenerate case? */
641 util_primconvert_save_rasterizer_state(ctx
->primconvert
, &ctx
->rasterizer
->base
);
642 util_primconvert_draw_vbo(ctx
->primconvert
, info
);
647 ctx
->payloads
[PIPE_SHADER_VERTEX
].offset_start
= info
->start
;
648 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].offset_start
= info
->start
;
650 /* Now that we have a guaranteed terminating path, find the job.
651 * Assignment commented out to prevent unused warning */
653 /* struct panfrost_batch *batch = */ panfrost_get_batch_for_fbo(ctx
);
655 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.draw_mode
= g2m_draw_mode(mode
);
657 /* Take into account a negative bias */
658 ctx
->vertex_count
= info
->count
+ abs(info
->index_bias
);
659 ctx
->instance_count
= info
->instance_count
;
660 ctx
->active_prim
= info
->mode
;
662 /* For non-indexed draws, they're the same */
663 unsigned vertex_count
= ctx
->vertex_count
;
665 unsigned draw_flags
= 0;
667 /* The draw flags interpret how primitive size is interpreted */
669 if (panfrost_writes_point_size(ctx
))
670 draw_flags
|= MALI_DRAW_VARYING_SIZE
;
672 if (info
->primitive_restart
)
673 draw_flags
|= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX
;
675 /* These doesn't make much sense */
677 draw_flags
|= 0x3000;
679 if (ctx
->rasterizer
&& ctx
->rasterizer
->base
.flatshade_first
)
680 draw_flags
|= MALI_DRAW_FLATSHADE_FIRST
;
682 panfrost_statistics_record(ctx
, info
);
684 if (info
->index_size
) {
685 unsigned min_index
= 0, max_index
= 0;
686 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.indices
=
687 panfrost_get_index_buffer_bounded(ctx
, info
, &min_index
, &max_index
);
689 /* Use the corresponding values */
690 vertex_count
= max_index
- min_index
+ 1;
691 ctx
->payloads
[PIPE_SHADER_VERTEX
].offset_start
= min_index
+ info
->index_bias
;
692 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].offset_start
= min_index
+ info
->index_bias
;
694 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.offset_bias_correction
= -min_index
;
695 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.index_count
= MALI_POSITIVE(info
->count
);
697 draw_flags
|= panfrost_translate_index_size(info
->index_size
);
699 /* Index count == vertex count, if no indexing is applied, as
700 * if it is internally indexed in the expected order */
702 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.offset_bias_correction
= 0;
703 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.index_count
= MALI_POSITIVE(ctx
->vertex_count
);
705 /* Reverse index state */
706 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.indices
= (mali_ptr
) 0;
709 /* Dispatch "compute jobs" for the vertex/tiler pair as (1,
710 * vertex_count, 1) */
712 panfrost_pack_work_groups_fused(
713 &ctx
->payloads
[PIPE_SHADER_VERTEX
].prefix
,
714 &ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
,
715 1, vertex_count
, info
->instance_count
,
718 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.unknown_draw
= draw_flags
;
720 /* Encode the padded vertex count */
722 if (info
->instance_count
> 1) {
723 ctx
->padded_count
= panfrost_padded_vertex_count(vertex_count
);
725 unsigned shift
= __builtin_ctz(ctx
->padded_count
);
726 unsigned k
= ctx
->padded_count
>> (shift
+ 1);
728 ctx
->payloads
[PIPE_SHADER_VERTEX
].instance_shift
= shift
;
729 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].instance_shift
= shift
;
731 ctx
->payloads
[PIPE_SHADER_VERTEX
].instance_odd
= k
;
732 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].instance_odd
= k
;
734 ctx
->padded_count
= vertex_count
;
736 /* Reset instancing state */
737 ctx
->payloads
[PIPE_SHADER_VERTEX
].instance_shift
= 0;
738 ctx
->payloads
[PIPE_SHADER_VERTEX
].instance_odd
= 0;
739 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].instance_shift
= 0;
740 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].instance_odd
= 0;
743 /* Fire off the draw itself */
744 panfrost_queue_draw(ctx
);
746 /* Increment transform feedback offsets */
748 for (unsigned i
= 0; i
< ctx
->streamout
.num_targets
; ++i
) {
749 unsigned output_count
= u_stream_outputs_for_vertices(
750 ctx
->active_prim
, ctx
->vertex_count
);
752 ctx
->streamout
.offsets
[i
] += output_count
;
759 panfrost_generic_cso_delete(struct pipe_context
*pctx
, void *hwcso
)
765 panfrost_create_rasterizer_state(
766 struct pipe_context
*pctx
,
767 const struct pipe_rasterizer_state
*cso
)
769 struct panfrost_rasterizer
*so
= CALLOC_STRUCT(panfrost_rasterizer
);
777 panfrost_bind_rasterizer_state(
778 struct pipe_context
*pctx
,
781 struct panfrost_context
*ctx
= pan_context(pctx
);
783 ctx
->rasterizer
= hwcso
;
788 /* Gauranteed with the core GL call, so don't expose ARB_polygon_offset */
789 assert(ctx
->rasterizer
->base
.offset_clamp
== 0.0);
791 /* Point sprites are emulated */
793 struct panfrost_shader_state
*variant
= panfrost_get_shader_state(ctx
, PIPE_SHADER_FRAGMENT
);
795 if (ctx
->rasterizer
->base
.sprite_coord_enable
|| (variant
&& variant
->point_sprite_mask
))
796 ctx
->base
.bind_fs_state(&ctx
->base
, ctx
->shader
[PIPE_SHADER_FRAGMENT
]);
800 panfrost_create_vertex_elements_state(
801 struct pipe_context
*pctx
,
802 unsigned num_elements
,
803 const struct pipe_vertex_element
*elements
)
805 struct panfrost_vertex_state
*so
= CALLOC_STRUCT(panfrost_vertex_state
);
807 so
->num_elements
= num_elements
;
808 memcpy(so
->pipe
, elements
, sizeof(*elements
) * num_elements
);
810 for (int i
= 0; i
< num_elements
; ++i
) {
813 enum pipe_format fmt
= elements
[i
].src_format
;
814 const struct util_format_description
*desc
= util_format_description(fmt
);
815 so
->hw
[i
].unknown1
= 0x2;
816 so
->hw
[i
].swizzle
= panfrost_get_default_swizzle(desc
->nr_channels
);
818 so
->hw
[i
].format
= panfrost_find_format(desc
);
820 /* The field itself should probably be shifted over */
821 so
->hw
[i
].src_offset
= elements
[i
].src_offset
;
828 panfrost_bind_vertex_elements_state(
829 struct pipe_context
*pctx
,
832 struct panfrost_context
*ctx
= pan_context(pctx
);
837 panfrost_create_shader_state(
838 struct pipe_context
*pctx
,
839 const struct pipe_shader_state
*cso
,
840 enum pipe_shader_type stage
)
842 struct panfrost_shader_variants
*so
= CALLOC_STRUCT(panfrost_shader_variants
);
845 /* Token deep copy to prevent memory corruption */
847 if (cso
->type
== PIPE_SHADER_IR_TGSI
)
848 so
->base
.tokens
= tgsi_dup_tokens(so
->base
.tokens
);
850 /* Precompile for shader-db if we need to */
851 if (unlikely((pan_debug
& PAN_DBG_PRECOMPILE
) && cso
->type
== PIPE_SHADER_IR_NIR
)) {
852 struct panfrost_context
*ctx
= pan_context(pctx
);
854 struct panfrost_shader_state state
;
855 uint64_t outputs_written
;
857 panfrost_shader_compile(ctx
, PIPE_SHADER_IR_NIR
,
859 tgsi_processor_to_shader_stage(stage
),
860 &state
, &outputs_written
);
867 panfrost_delete_shader_state(
868 struct pipe_context
*pctx
,
871 struct panfrost_shader_variants
*cso
= (struct panfrost_shader_variants
*) so
;
873 if (cso
->base
.type
== PIPE_SHADER_IR_TGSI
) {
874 DBG("Deleting TGSI shader leaks duplicated tokens\n");
877 for (unsigned i
= 0; i
< cso
->variant_count
; ++i
) {
878 struct panfrost_shader_state
*shader_state
= &cso
->variants
[i
];
879 panfrost_bo_unreference(shader_state
->bo
);
880 shader_state
->bo
= NULL
;
888 panfrost_create_sampler_state(
889 struct pipe_context
*pctx
,
890 const struct pipe_sampler_state
*cso
)
892 struct panfrost_sampler_state
*so
= CALLOC_STRUCT(panfrost_sampler_state
);
895 panfrost_sampler_desc_init(cso
, &so
->hw
);
901 panfrost_bind_sampler_states(
902 struct pipe_context
*pctx
,
903 enum pipe_shader_type shader
,
904 unsigned start_slot
, unsigned num_sampler
,
907 assert(start_slot
== 0);
909 struct panfrost_context
*ctx
= pan_context(pctx
);
911 /* XXX: Should upload, not just copy? */
912 ctx
->sampler_count
[shader
] = num_sampler
;
913 memcpy(ctx
->samplers
[shader
], sampler
, num_sampler
* sizeof (void *));
917 panfrost_variant_matches(
918 struct panfrost_context
*ctx
,
919 struct panfrost_shader_state
*variant
,
920 enum pipe_shader_type type
)
922 struct pipe_rasterizer_state
*rasterizer
= &ctx
->rasterizer
->base
;
923 struct pipe_alpha_state
*alpha
= &ctx
->depth_stencil
->alpha
;
925 bool is_fragment
= (type
== PIPE_SHADER_FRAGMENT
);
927 if (is_fragment
&& (alpha
->enabled
|| variant
->alpha_state
.enabled
)) {
928 /* Make sure enable state is at least the same */
929 if (alpha
->enabled
!= variant
->alpha_state
.enabled
) {
933 /* Check that the contents of the test are the same */
934 bool same_func
= alpha
->func
== variant
->alpha_state
.func
;
935 bool same_ref
= alpha
->ref_value
== variant
->alpha_state
.ref_value
;
937 if (!(same_func
&& same_ref
)) {
942 if (is_fragment
&& rasterizer
&& (rasterizer
->sprite_coord_enable
|
943 variant
->point_sprite_mask
)) {
944 /* Ensure the same varyings are turned to point sprites */
945 if (rasterizer
->sprite_coord_enable
!= variant
->point_sprite_mask
)
948 /* Ensure the orientation is correct */
950 rasterizer
->sprite_coord_mode
==
951 PIPE_SPRITE_COORD_UPPER_LEFT
;
953 if (variant
->point_sprite_upper_left
!= upper_left
)
957 /* Otherwise, we're good to go */
962 * Fix an uncompiled shader's stream output info, and produce a bitmask
963 * of which VARYING_SLOT_* are captured for stream output.
965 * Core Gallium stores output->register_index as a "slot" number, where
966 * slots are assigned consecutively to all outputs in info->outputs_written.
967 * This naive packing of outputs doesn't work for us - we too have slots,
968 * but the layout is defined by the VUE map, which we won't have until we
969 * compile a specific shader variant. So, we remap these and simply store
970 * VARYING_SLOT_* in our copy's output->register_index fields.
972 * We then produce a bitmask of outputs which are used for SO.
974 * Implementation from iris.
978 update_so_info(struct pipe_stream_output_info
*so_info
,
979 uint64_t outputs_written
)
981 uint64_t so_outputs
= 0;
982 uint8_t reverse_map
[64] = {0};
985 while (outputs_written
)
986 reverse_map
[slot
++] = u_bit_scan64(&outputs_written
);
988 for (unsigned i
= 0; i
< so_info
->num_outputs
; i
++) {
989 struct pipe_stream_output
*output
= &so_info
->output
[i
];
991 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
992 output
->register_index
= reverse_map
[output
->register_index
];
994 so_outputs
|= 1ull << output
->register_index
;
1001 panfrost_bind_shader_state(
1002 struct pipe_context
*pctx
,
1004 enum pipe_shader_type type
)
1006 struct panfrost_context
*ctx
= pan_context(pctx
);
1007 ctx
->shader
[type
] = hwcso
;
1011 /* Match the appropriate variant */
1013 signed variant
= -1;
1014 struct panfrost_shader_variants
*variants
= (struct panfrost_shader_variants
*) hwcso
;
1016 for (unsigned i
= 0; i
< variants
->variant_count
; ++i
) {
1017 if (panfrost_variant_matches(ctx
, &variants
->variants
[i
], type
)) {
1023 if (variant
== -1) {
1024 /* No variant matched, so create a new one */
1025 variant
= variants
->variant_count
++;
1027 if (variants
->variant_count
> variants
->variant_space
) {
1028 unsigned old_space
= variants
->variant_space
;
1030 variants
->variant_space
*= 2;
1031 if (variants
->variant_space
== 0)
1032 variants
->variant_space
= 1;
1034 /* Arbitrary limit to stop runaway programs from
1035 * creating an unbounded number of shader variants. */
1036 assert(variants
->variant_space
< 1024);
1038 unsigned msize
= sizeof(struct panfrost_shader_state
);
1039 variants
->variants
= realloc(variants
->variants
,
1040 variants
->variant_space
* msize
);
1042 memset(&variants
->variants
[old_space
], 0,
1043 (variants
->variant_space
- old_space
) * msize
);
1046 struct panfrost_shader_state
*v
=
1047 &variants
->variants
[variant
];
1049 if (type
== PIPE_SHADER_FRAGMENT
) {
1050 v
->alpha_state
= ctx
->depth_stencil
->alpha
;
1052 if (ctx
->rasterizer
) {
1053 v
->point_sprite_mask
= ctx
->rasterizer
->base
.sprite_coord_enable
;
1054 v
->point_sprite_upper_left
=
1055 ctx
->rasterizer
->base
.sprite_coord_mode
==
1056 PIPE_SPRITE_COORD_UPPER_LEFT
;
1061 /* Select this variant */
1062 variants
->active_variant
= variant
;
1064 struct panfrost_shader_state
*shader_state
= &variants
->variants
[variant
];
1065 assert(panfrost_variant_matches(ctx
, shader_state
, type
));
1067 /* We finally have a variant, so compile it */
1069 if (!shader_state
->compiled
) {
1070 uint64_t outputs_written
= 0;
1072 panfrost_shader_compile(ctx
, variants
->base
.type
,
1073 variants
->base
.type
== PIPE_SHADER_IR_NIR
?
1074 variants
->base
.ir
.nir
:
1075 variants
->base
.tokens
,
1076 tgsi_processor_to_shader_stage(type
),
1080 shader_state
->compiled
= true;
1082 /* Fixup the stream out information, since what Gallium returns
1083 * normally is mildly insane */
1085 shader_state
->stream_output
= variants
->base
.stream_output
;
1086 shader_state
->so_mask
=
1087 update_so_info(&shader_state
->stream_output
, outputs_written
);
1092 panfrost_create_vs_state(struct pipe_context
*pctx
, const struct pipe_shader_state
*hwcso
)
1094 return panfrost_create_shader_state(pctx
, hwcso
, PIPE_SHADER_VERTEX
);
1098 panfrost_create_fs_state(struct pipe_context
*pctx
, const struct pipe_shader_state
*hwcso
)
1100 return panfrost_create_shader_state(pctx
, hwcso
, PIPE_SHADER_FRAGMENT
);
1104 panfrost_bind_vs_state(struct pipe_context
*pctx
, void *hwcso
)
1106 panfrost_bind_shader_state(pctx
, hwcso
, PIPE_SHADER_VERTEX
);
1110 panfrost_bind_fs_state(struct pipe_context
*pctx
, void *hwcso
)
1112 panfrost_bind_shader_state(pctx
, hwcso
, PIPE_SHADER_FRAGMENT
);
1116 panfrost_set_vertex_buffers(
1117 struct pipe_context
*pctx
,
1118 unsigned start_slot
,
1119 unsigned num_buffers
,
1120 const struct pipe_vertex_buffer
*buffers
)
1122 struct panfrost_context
*ctx
= pan_context(pctx
);
1124 util_set_vertex_buffers_mask(ctx
->vertex_buffers
, &ctx
->vb_mask
, buffers
, start_slot
, num_buffers
);
1128 panfrost_set_constant_buffer(
1129 struct pipe_context
*pctx
,
1130 enum pipe_shader_type shader
, uint index
,
1131 const struct pipe_constant_buffer
*buf
)
1133 struct panfrost_context
*ctx
= pan_context(pctx
);
1134 struct panfrost_constant_buffer
*pbuf
= &ctx
->constant_buffer
[shader
];
1136 util_copy_constant_buffer(&pbuf
->cb
[index
], buf
);
1138 unsigned mask
= (1 << index
);
1140 if (unlikely(!buf
)) {
1141 pbuf
->enabled_mask
&= ~mask
;
1142 pbuf
->dirty_mask
&= ~mask
;
1146 pbuf
->enabled_mask
|= mask
;
1147 pbuf
->dirty_mask
|= mask
;
1151 panfrost_set_stencil_ref(
1152 struct pipe_context
*pctx
,
1153 const struct pipe_stencil_ref
*ref
)
1155 struct panfrost_context
*ctx
= pan_context(pctx
);
1156 ctx
->stencil_ref
= *ref
;
1159 static enum mali_texture_type
1160 panfrost_translate_texture_type(enum pipe_texture_target t
) {
1164 case PIPE_TEXTURE_1D
:
1165 case PIPE_TEXTURE_1D_ARRAY
:
1168 case PIPE_TEXTURE_2D
:
1169 case PIPE_TEXTURE_2D_ARRAY
:
1170 case PIPE_TEXTURE_RECT
:
1173 case PIPE_TEXTURE_3D
:
1176 case PIPE_TEXTURE_CUBE
:
1177 case PIPE_TEXTURE_CUBE_ARRAY
:
1178 return MALI_TEX_CUBE
;
1181 unreachable("Unknown target");
1185 static struct pipe_sampler_view
*
1186 panfrost_create_sampler_view(
1187 struct pipe_context
*pctx
,
1188 struct pipe_resource
*texture
,
1189 const struct pipe_sampler_view
*template)
1191 struct panfrost_screen
*screen
= pan_screen(pctx
->screen
);
1192 struct panfrost_sampler_view
*so
= rzalloc(pctx
, struct panfrost_sampler_view
);
1194 pipe_reference(NULL
, &texture
->reference
);
1196 struct panfrost_resource
*prsrc
= (struct panfrost_resource
*) texture
;
1199 so
->base
= *template;
1200 so
->base
.texture
= texture
;
1201 so
->base
.reference
.count
= 1;
1202 so
->base
.context
= pctx
;
1204 unsigned char user_swizzle
[4] = {
1205 template->swizzle_r
,
1206 template->swizzle_g
,
1207 template->swizzle_b
,
1211 /* In the hardware, array_size refers specifically to array textures,
1212 * whereas in Gallium, it also covers cubemaps */
1214 unsigned array_size
= texture
->array_size
;
1216 if (template->target
== PIPE_TEXTURE_CUBE
) {
1217 /* TODO: Cubemap arrays */
1218 assert(array_size
== 6);
1222 enum mali_texture_type type
=
1223 panfrost_translate_texture_type(template->target
);
1225 unsigned size
= panfrost_estimate_texture_size(
1226 template->u
.tex
.first_level
,
1227 template->u
.tex
.last_level
,
1228 template->u
.tex
.first_layer
,
1229 template->u
.tex
.last_layer
,
1230 type
, prsrc
->layout
);
1232 so
->bo
= panfrost_bo_create(screen
, size
, 0);
1234 panfrost_new_texture(
1236 texture
->width0
, texture
->height0
,
1237 texture
->depth0
, array_size
,
1239 type
, prsrc
->layout
,
1240 template->u
.tex
.first_level
,
1241 template->u
.tex
.last_level
,
1242 template->u
.tex
.first_layer
,
1243 template->u
.tex
.last_layer
,
1244 prsrc
->cubemap_stride
,
1245 panfrost_translate_swizzle_4(user_swizzle
),
1249 return (struct pipe_sampler_view
*) so
;
1253 panfrost_set_sampler_views(
1254 struct pipe_context
*pctx
,
1255 enum pipe_shader_type shader
,
1256 unsigned start_slot
, unsigned num_views
,
1257 struct pipe_sampler_view
**views
)
1259 struct panfrost_context
*ctx
= pan_context(pctx
);
1260 unsigned new_nr
= 0;
1263 assert(start_slot
== 0);
1265 for (i
= 0; i
< num_views
; ++i
) {
1268 pipe_sampler_view_reference((struct pipe_sampler_view
**)&ctx
->sampler_views
[shader
][i
],
1272 for (; i
< ctx
->sampler_view_count
[shader
]; i
++) {
1273 pipe_sampler_view_reference((struct pipe_sampler_view
**)&ctx
->sampler_views
[shader
][i
],
1276 ctx
->sampler_view_count
[shader
] = new_nr
;
1280 panfrost_sampler_view_destroy(
1281 struct pipe_context
*pctx
,
1282 struct pipe_sampler_view
*pview
)
1284 struct panfrost_sampler_view
*view
= (struct panfrost_sampler_view
*) pview
;
1286 pipe_resource_reference(&pview
->texture
, NULL
);
1287 panfrost_bo_unreference(view
->bo
);
1292 panfrost_set_shader_buffers(
1293 struct pipe_context
*pctx
,
1294 enum pipe_shader_type shader
,
1295 unsigned start
, unsigned count
,
1296 const struct pipe_shader_buffer
*buffers
,
1297 unsigned writable_bitmask
)
1299 struct panfrost_context
*ctx
= pan_context(pctx
);
1301 util_set_shader_buffers_mask(ctx
->ssbo
[shader
], &ctx
->ssbo_mask
[shader
],
1302 buffers
, start
, count
);
1305 /* Hints that a framebuffer should use AFBC where possible */
1309 struct panfrost_screen
*screen
,
1310 const struct pipe_framebuffer_state
*fb
)
1312 /* AFBC implemenation incomplete; hide it */
1313 if (!(pan_debug
& PAN_DBG_AFBC
)) return;
1315 /* Hint AFBC to the resources bound to each color buffer */
1317 for (unsigned i
= 0; i
< fb
->nr_cbufs
; ++i
) {
1318 struct pipe_surface
*surf
= fb
->cbufs
[i
];
1319 struct panfrost_resource
*rsrc
= pan_resource(surf
->texture
);
1320 panfrost_resource_hint_layout(screen
, rsrc
, MALI_TEXTURE_AFBC
, 1);
1323 /* Also hint it to the depth buffer */
1326 struct panfrost_resource
*rsrc
= pan_resource(fb
->zsbuf
->texture
);
1327 panfrost_resource_hint_layout(screen
, rsrc
, MALI_TEXTURE_AFBC
, 1);
1332 panfrost_set_framebuffer_state(struct pipe_context
*pctx
,
1333 const struct pipe_framebuffer_state
*fb
)
1335 struct panfrost_context
*ctx
= pan_context(pctx
);
1337 panfrost_hint_afbc(pan_screen(pctx
->screen
), fb
);
1338 util_copy_framebuffer_state(&ctx
->pipe_framebuffer
, fb
);
1340 panfrost_invalidate_frame(ctx
);
1344 panfrost_create_depth_stencil_state(struct pipe_context
*pipe
,
1345 const struct pipe_depth_stencil_alpha_state
*depth_stencil
)
1347 return mem_dup(depth_stencil
, sizeof(*depth_stencil
));
1351 panfrost_bind_depth_stencil_state(struct pipe_context
*pipe
,
1354 struct panfrost_context
*ctx
= pan_context(pipe
);
1355 struct pipe_depth_stencil_alpha_state
*depth_stencil
= cso
;
1356 ctx
->depth_stencil
= depth_stencil
;
1361 /* Alpha does not exist in the hardware (it's not in ES3), so it's
1362 * emulated in the fragment shader */
1364 if (depth_stencil
->alpha
.enabled
) {
1365 /* We need to trigger a new shader (maybe) */
1366 ctx
->base
.bind_fs_state(&ctx
->base
, ctx
->shader
[PIPE_SHADER_FRAGMENT
]);
1369 /* Bounds test not implemented */
1370 assert(!depth_stencil
->depth
.bounds_test
);
1374 panfrost_delete_depth_stencil_state(struct pipe_context
*pipe
, void *depth
)
1380 panfrost_set_sample_mask(struct pipe_context
*pipe
,
1381 unsigned sample_mask
)
1386 panfrost_set_clip_state(struct pipe_context
*pipe
,
1387 const struct pipe_clip_state
*clip
)
1389 //struct panfrost_context *panfrost = pan_context(pipe);
1393 panfrost_set_viewport_states(struct pipe_context
*pipe
,
1394 unsigned start_slot
,
1395 unsigned num_viewports
,
1396 const struct pipe_viewport_state
*viewports
)
1398 struct panfrost_context
*ctx
= pan_context(pipe
);
1400 assert(start_slot
== 0);
1401 assert(num_viewports
== 1);
1403 ctx
->pipe_viewport
= *viewports
;
1407 panfrost_set_scissor_states(struct pipe_context
*pipe
,
1408 unsigned start_slot
,
1409 unsigned num_scissors
,
1410 const struct pipe_scissor_state
*scissors
)
1412 struct panfrost_context
*ctx
= pan_context(pipe
);
1414 assert(start_slot
== 0);
1415 assert(num_scissors
== 1);
1417 ctx
->scissor
= *scissors
;
1421 panfrost_set_polygon_stipple(struct pipe_context
*pipe
,
1422 const struct pipe_poly_stipple
*stipple
)
1424 //struct panfrost_context *panfrost = pan_context(pipe);
1428 panfrost_set_active_query_state(struct pipe_context
*pipe
,
1431 struct panfrost_context
*ctx
= pan_context(pipe
);
1432 ctx
->active_queries
= enable
;
1436 panfrost_destroy(struct pipe_context
*pipe
)
1438 struct panfrost_context
*panfrost
= pan_context(pipe
);
1440 if (panfrost
->blitter
)
1441 util_blitter_destroy(panfrost
->blitter
);
1443 if (panfrost
->blitter_wallpaper
)
1444 util_blitter_destroy(panfrost
->blitter_wallpaper
);
1446 util_unreference_framebuffer_state(&panfrost
->pipe_framebuffer
);
1447 u_upload_destroy(pipe
->stream_uploader
);
1452 static struct pipe_query
*
1453 panfrost_create_query(struct pipe_context
*pipe
,
1457 struct panfrost_query
*q
= rzalloc(pipe
, struct panfrost_query
);
1462 return (struct pipe_query
*) q
;
1466 panfrost_destroy_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
1468 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
1471 panfrost_bo_unreference(query
->bo
);
1479 panfrost_begin_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
1481 struct panfrost_context
*ctx
= pan_context(pipe
);
1482 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
1484 switch (query
->type
) {
1485 case PIPE_QUERY_OCCLUSION_COUNTER
:
1486 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1487 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
1488 /* Allocate a bo for the query results to be stored */
1490 query
->bo
= panfrost_bo_create(
1491 pan_screen(ctx
->base
.screen
),
1492 sizeof(unsigned), 0);
1495 unsigned *result
= (unsigned *)query
->bo
->cpu
;
1496 *result
= 0; /* Default to 0 if nothing at all drawn. */
1497 ctx
->occlusion_query
= query
;
1500 /* Geometry statistics are computed in the driver. XXX: geom/tess
1503 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1504 query
->start
= ctx
->prims_generated
;
1506 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1507 query
->start
= ctx
->tf_prims_generated
;
1511 DBG("Skipping query %u\n", query
->type
);
1519 panfrost_end_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
1521 struct panfrost_context
*ctx
= pan_context(pipe
);
1522 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
1524 switch (query
->type
) {
1525 case PIPE_QUERY_OCCLUSION_COUNTER
:
1526 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1527 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
1528 ctx
->occlusion_query
= NULL
;
1530 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1531 query
->end
= ctx
->prims_generated
;
1533 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1534 query
->end
= ctx
->tf_prims_generated
;
1542 panfrost_get_query_result(struct pipe_context
*pipe
,
1543 struct pipe_query
*q
,
1545 union pipe_query_result
*vresult
)
1547 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
1548 struct panfrost_context
*ctx
= pan_context(pipe
);
1551 switch (query
->type
) {
1552 case PIPE_QUERY_OCCLUSION_COUNTER
:
1553 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1554 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
1556 panfrost_flush_all_batches(ctx
, true);
1558 /* Read back the query results */
1559 unsigned *result
= (unsigned *) query
->bo
->cpu
;
1560 unsigned passed
= *result
;
1562 if (query
->type
== PIPE_QUERY_OCCLUSION_COUNTER
) {
1563 vresult
->u64
= passed
;
1565 vresult
->b
= !!passed
;
1570 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1571 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1572 panfrost_flush_all_batches(ctx
, true);
1573 vresult
->u64
= query
->end
- query
->start
;
1577 DBG("Skipped query get %u\n", query
->type
);
1584 static struct pipe_stream_output_target
*
1585 panfrost_create_stream_output_target(struct pipe_context
*pctx
,
1586 struct pipe_resource
*prsc
,
1587 unsigned buffer_offset
,
1588 unsigned buffer_size
)
1590 struct pipe_stream_output_target
*target
;
1592 target
= rzalloc(pctx
, struct pipe_stream_output_target
);
1597 pipe_reference_init(&target
->reference
, 1);
1598 pipe_resource_reference(&target
->buffer
, prsc
);
1600 target
->context
= pctx
;
1601 target
->buffer_offset
= buffer_offset
;
1602 target
->buffer_size
= buffer_size
;
1608 panfrost_stream_output_target_destroy(struct pipe_context
*pctx
,
1609 struct pipe_stream_output_target
*target
)
1611 pipe_resource_reference(&target
->buffer
, NULL
);
1612 ralloc_free(target
);
1616 panfrost_set_stream_output_targets(struct pipe_context
*pctx
,
1617 unsigned num_targets
,
1618 struct pipe_stream_output_target
**targets
,
1619 const unsigned *offsets
)
1621 struct panfrost_context
*ctx
= pan_context(pctx
);
1622 struct panfrost_streamout
*so
= &ctx
->streamout
;
1624 assert(num_targets
<= ARRAY_SIZE(so
->targets
));
1626 for (unsigned i
= 0; i
< num_targets
; i
++) {
1627 if (offsets
[i
] != -1)
1628 so
->offsets
[i
] = offsets
[i
];
1630 pipe_so_target_reference(&so
->targets
[i
], targets
[i
]);
1633 for (unsigned i
= 0; i
< so
->num_targets
; i
++)
1634 pipe_so_target_reference(&so
->targets
[i
], NULL
);
1636 so
->num_targets
= num_targets
;
1639 struct pipe_context
*
1640 panfrost_create_context(struct pipe_screen
*screen
, void *priv
, unsigned flags
)
1642 struct panfrost_context
*ctx
= rzalloc(screen
, struct panfrost_context
);
1643 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
1645 gallium
->screen
= screen
;
1647 gallium
->destroy
= panfrost_destroy
;
1649 gallium
->set_framebuffer_state
= panfrost_set_framebuffer_state
;
1651 gallium
->flush
= panfrost_flush
;
1652 gallium
->clear
= panfrost_clear
;
1653 gallium
->draw_vbo
= panfrost_draw_vbo
;
1655 gallium
->set_vertex_buffers
= panfrost_set_vertex_buffers
;
1656 gallium
->set_constant_buffer
= panfrost_set_constant_buffer
;
1657 gallium
->set_shader_buffers
= panfrost_set_shader_buffers
;
1659 gallium
->set_stencil_ref
= panfrost_set_stencil_ref
;
1661 gallium
->create_sampler_view
= panfrost_create_sampler_view
;
1662 gallium
->set_sampler_views
= panfrost_set_sampler_views
;
1663 gallium
->sampler_view_destroy
= panfrost_sampler_view_destroy
;
1665 gallium
->create_rasterizer_state
= panfrost_create_rasterizer_state
;
1666 gallium
->bind_rasterizer_state
= panfrost_bind_rasterizer_state
;
1667 gallium
->delete_rasterizer_state
= panfrost_generic_cso_delete
;
1669 gallium
->create_vertex_elements_state
= panfrost_create_vertex_elements_state
;
1670 gallium
->bind_vertex_elements_state
= panfrost_bind_vertex_elements_state
;
1671 gallium
->delete_vertex_elements_state
= panfrost_generic_cso_delete
;
1673 gallium
->create_fs_state
= panfrost_create_fs_state
;
1674 gallium
->delete_fs_state
= panfrost_delete_shader_state
;
1675 gallium
->bind_fs_state
= panfrost_bind_fs_state
;
1677 gallium
->create_vs_state
= panfrost_create_vs_state
;
1678 gallium
->delete_vs_state
= panfrost_delete_shader_state
;
1679 gallium
->bind_vs_state
= panfrost_bind_vs_state
;
1681 gallium
->create_sampler_state
= panfrost_create_sampler_state
;
1682 gallium
->delete_sampler_state
= panfrost_generic_cso_delete
;
1683 gallium
->bind_sampler_states
= panfrost_bind_sampler_states
;
1685 gallium
->create_depth_stencil_alpha_state
= panfrost_create_depth_stencil_state
;
1686 gallium
->bind_depth_stencil_alpha_state
= panfrost_bind_depth_stencil_state
;
1687 gallium
->delete_depth_stencil_alpha_state
= panfrost_delete_depth_stencil_state
;
1689 gallium
->set_sample_mask
= panfrost_set_sample_mask
;
1691 gallium
->set_clip_state
= panfrost_set_clip_state
;
1692 gallium
->set_viewport_states
= panfrost_set_viewport_states
;
1693 gallium
->set_scissor_states
= panfrost_set_scissor_states
;
1694 gallium
->set_polygon_stipple
= panfrost_set_polygon_stipple
;
1695 gallium
->set_active_query_state
= panfrost_set_active_query_state
;
1697 gallium
->create_query
= panfrost_create_query
;
1698 gallium
->destroy_query
= panfrost_destroy_query
;
1699 gallium
->begin_query
= panfrost_begin_query
;
1700 gallium
->end_query
= panfrost_end_query
;
1701 gallium
->get_query_result
= panfrost_get_query_result
;
1703 gallium
->create_stream_output_target
= panfrost_create_stream_output_target
;
1704 gallium
->stream_output_target_destroy
= panfrost_stream_output_target_destroy
;
1705 gallium
->set_stream_output_targets
= panfrost_set_stream_output_targets
;
1707 panfrost_resource_context_init(gallium
);
1708 panfrost_blend_context_init(gallium
);
1709 panfrost_compute_context_init(gallium
);
1712 gallium
->stream_uploader
= u_upload_create_default(gallium
);
1713 gallium
->const_uploader
= gallium
->stream_uploader
;
1714 assert(gallium
->stream_uploader
);
1716 /* Midgard supports ES modes, plus QUADS/QUAD_STRIPS/POLYGON */
1717 ctx
->draw_modes
= (1 << (PIPE_PRIM_POLYGON
+ 1)) - 1;
1719 ctx
->primconvert
= util_primconvert_create(gallium
, ctx
->draw_modes
);
1721 ctx
->blitter
= util_blitter_create(gallium
);
1722 ctx
->blitter_wallpaper
= util_blitter_create(gallium
);
1724 assert(ctx
->blitter
);
1725 assert(ctx
->blitter_wallpaper
);
1727 /* Prepare for render! */
1729 panfrost_batch_init(ctx
);
1730 panfrost_emit_vertex_payload(ctx
);
1731 panfrost_invalidate_frame(ctx
);