2 * © Copyright 2018 Alyssa Rosenzweig
3 * Copyright © 2014-2017 Broadcom
4 * Copyright (C) 2017 Intel Corporation
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 #include "pan_context.h"
32 #include "pan_minmax_cache.h"
33 #include "panfrost-quirks.h"
35 #include "util/macros.h"
36 #include "util/format/u_format.h"
37 #include "util/u_inlines.h"
38 #include "util/u_upload_mgr.h"
39 #include "util/u_memory.h"
40 #include "util/u_vbuf.h"
41 #include "util/half_float.h"
42 #include "util/u_helpers.h"
43 #include "util/format/u_format.h"
44 #include "util/u_prim.h"
45 #include "util/u_prim_restart.h"
46 #include "indices/u_primconvert.h"
47 #include "tgsi/tgsi_parse.h"
48 #include "tgsi/tgsi_from_mesa.h"
49 #include "util/u_math.h"
51 #include "pan_screen.h"
52 #include "pan_blending.h"
53 #include "pan_blend_shaders.h"
54 #include "pan_cmdstream.h"
56 #include "pandecode/decode.h"
57 #include "util/pan_lower_framebuffer.h"
59 struct midgard_tiler_descriptor
60 panfrost_emit_midg_tiler(struct panfrost_batch
*batch
, unsigned vertex_count
)
62 struct panfrost_device
*device
= pan_device(batch
->ctx
->base
.screen
);
63 bool hierarchy
= !(device
->quirks
& MIDGARD_NO_HIER_TILING
);
64 struct midgard_tiler_descriptor t
= {0};
65 unsigned height
= batch
->key
.height
;
66 unsigned width
= batch
->key
.width
;
69 panfrost_choose_hierarchy_mask(width
, height
, vertex_count
, hierarchy
);
71 /* Compute the polygon header size and use that to offset the body */
73 unsigned header_size
= panfrost_tiler_header_size(
74 width
, height
, t
.hierarchy_mask
, hierarchy
);
76 t
.polygon_list_size
= panfrost_tiler_full_size(
77 width
, height
, t
.hierarchy_mask
, hierarchy
);
82 struct panfrost_bo
*tiler_heap
;
84 tiler_heap
= panfrost_batch_get_tiler_heap(batch
);
85 t
.polygon_list
= panfrost_batch_get_polygon_list(batch
,
90 /* Allow the entire tiler heap */
91 t
.heap_start
= tiler_heap
->gpu
;
92 t
.heap_end
= tiler_heap
->gpu
+ tiler_heap
->size
;
94 struct panfrost_bo
*tiler_dummy
;
96 tiler_dummy
= panfrost_batch_get_tiler_dummy(batch
);
97 header_size
= MALI_TILER_MINIMUM_HEADER_SIZE
;
99 /* The tiler is disabled, so don't allow the tiler heap */
100 t
.heap_start
= tiler_dummy
->gpu
;
101 t
.heap_end
= t
.heap_start
;
103 /* Use a dummy polygon list */
104 t
.polygon_list
= tiler_dummy
->gpu
;
106 /* Disable the tiler */
108 t
.hierarchy_mask
|= MALI_TILER_DISABLED
;
110 t
.hierarchy_mask
= MALI_TILER_USER
;
111 t
.polygon_list_size
= MALI_TILER_MINIMUM_HEADER_SIZE
+ 4;
113 /* We don't have a WRITE_VALUE job, so write the polygon list manually */
114 uint32_t *polygon_list_body
= (uint32_t *) (tiler_dummy
->cpu
+ header_size
);
115 polygon_list_body
[0] = 0xa0000000; /* TODO: Just that? */
119 t
.polygon_list_body
=
120 t
.polygon_list
+ header_size
;
127 struct pipe_context
*pipe
,
129 const struct pipe_scissor_state
*scissor_state
,
130 const union pipe_color_union
*color
,
131 double depth
, unsigned stencil
)
133 struct panfrost_context
*ctx
= pan_context(pipe
);
135 /* TODO: panfrost_get_fresh_batch_for_fbo() instantiates a new batch if
136 * the existing batch targeting this FBO has draws. We could probably
137 * avoid that by replacing plain clears by quad-draws with a specific
138 * color/depth/stencil value, thus avoiding the generation of extra
141 struct panfrost_batch
*batch
= panfrost_get_fresh_batch_for_fbo(ctx
);
143 panfrost_batch_add_fbo_bos(batch
);
144 panfrost_batch_clear(batch
, buffers
, color
, depth
, stencil
);
147 /* Reset per-frame context, called on context initialisation as well as after
148 * flushing a frame */
151 panfrost_invalidate_frame(struct panfrost_context
*ctx
)
153 /* TODO: When does this need to be handled? */
154 ctx
->active_queries
= true;
158 panfrost_writes_point_size(struct panfrost_context
*ctx
)
160 assert(ctx
->shader
[PIPE_SHADER_VERTEX
]);
161 struct panfrost_shader_state
*vs
= panfrost_get_shader_state(ctx
, PIPE_SHADER_VERTEX
);
163 return vs
->writes_point_size
&& ctx
->active_prim
== PIPE_PRIM_POINTS
;
167 panfrost_vertex_state_upd_attr_offs(struct panfrost_context
*ctx
,
168 struct mali_vertex_tiler_postfix
*vertex_postfix
)
173 struct panfrost_vertex_state
*so
= ctx
->vertex
;
175 /* Fixup offsets for the second pass. Recall that the hardware
176 * calculates attribute addresses as:
178 * addr = base + (stride * vtx) + src_offset;
180 * However, on Mali, base must be aligned to 64-bytes, so we
183 * base' = base & ~63 = base - (base & 63)
185 * To compensate when using base' (see emit_vertex_data), we have
186 * to adjust src_offset by the masked off piece:
188 * addr' = base' + (stride * vtx) + (src_offset + (base & 63))
189 * = base - (base & 63) + (stride * vtx) + src_offset + (base & 63)
190 * = base + (stride * vtx) + src_offset
196 unsigned start
= vertex_postfix
->offset_start
;
198 for (unsigned i
= 0; i
< so
->num_elements
; ++i
) {
199 unsigned vbi
= so
->pipe
[i
].vertex_buffer_index
;
200 struct pipe_vertex_buffer
*buf
= &ctx
->vertex_buffers
[vbi
];
202 /* Adjust by the masked off bits of the offset. Make sure we
203 * read src_offset from so->hw (which is not GPU visible)
204 * rather than target (which is) due to caching effects */
206 unsigned src_offset
= so
->pipe
[i
].src_offset
;
208 /* BOs aligned to 4k so guaranteed aligned to 64 */
209 src_offset
+= (buf
->buffer_offset
& 63);
211 /* Also, somewhat obscurely per-instance data needs to be
212 * offset in response to a delayed start in an indexed draw */
214 if (so
->pipe
[i
].instance_divisor
&& ctx
->instance_count
> 1 && start
)
215 src_offset
-= buf
->stride
* start
;
217 so
->hw
[i
].src_offset
= src_offset
;
221 /* Compute number of UBOs active (more specifically, compute the highest UBO
222 * number addressable -- if there are gaps, include them in the count anyway).
223 * We always include UBO #0 in the count, since we *need* uniforms enabled for
227 panfrost_ubo_count(struct panfrost_context
*ctx
, enum pipe_shader_type stage
)
229 unsigned mask
= ctx
->constant_buffer
[stage
].enabled_mask
| 1;
230 return 32 - __builtin_clz(mask
);
233 /* The entire frame is in memory -- send it off to the kernel! */
237 struct pipe_context
*pipe
,
238 struct pipe_fence_handle
**fence
,
241 struct panfrost_context
*ctx
= pan_context(pipe
);
242 struct panfrost_device
*dev
= pan_device(pipe
->screen
);
243 struct util_dynarray fences
;
245 /* We must collect the fences before the flush is done, otherwise we'll
246 * lose track of them.
249 util_dynarray_init(&fences
, NULL
);
250 hash_table_foreach(ctx
->batches
, hentry
) {
251 struct panfrost_batch
*batch
= hentry
->data
;
253 panfrost_batch_fence_reference(batch
->out_sync
);
254 util_dynarray_append(&fences
,
255 struct panfrost_batch_fence
*,
260 /* Submit all pending jobs */
261 panfrost_flush_all_batches(ctx
);
264 struct panfrost_fence
*f
= panfrost_fence_create(ctx
, &fences
);
265 pipe
->screen
->fence_reference(pipe
->screen
, fence
, NULL
);
266 *fence
= (struct pipe_fence_handle
*)f
;
268 util_dynarray_foreach(&fences
, struct panfrost_batch_fence
*, fence
)
269 panfrost_batch_fence_unreference(*fence
);
271 util_dynarray_fini(&fences
);
274 if (dev
->debug
& PAN_DBG_TRACE
)
275 pandecode_next_frame();
279 panfrost_texture_barrier(struct pipe_context
*pipe
, unsigned flags
)
281 struct panfrost_context
*ctx
= pan_context(pipe
);
282 panfrost_flush_all_batches(ctx
);
285 #define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_##c;
288 g2m_draw_mode(enum pipe_prim_type mode
)
293 DEFINE_CASE(LINE_LOOP
);
294 DEFINE_CASE(LINE_STRIP
);
295 DEFINE_CASE(TRIANGLES
);
296 DEFINE_CASE(TRIANGLE_STRIP
);
297 DEFINE_CASE(TRIANGLE_FAN
);
299 DEFINE_CASE(QUAD_STRIP
);
300 DEFINE_CASE(POLYGON
);
303 unreachable("Invalid draw mode");
310 panfrost_scissor_culls_everything(struct panfrost_context
*ctx
)
312 const struct pipe_scissor_state
*ss
= &ctx
->scissor
;
314 /* Check if we're scissoring at all */
316 if (!(ctx
->rasterizer
&& ctx
->rasterizer
->base
.scissor
))
319 return (ss
->minx
== ss
->maxx
) || (ss
->miny
== ss
->maxy
);
322 /* Count generated primitives (when there is no geom/tess shaders) for
323 * transform feedback */
326 panfrost_statistics_record(
327 struct panfrost_context
*ctx
,
328 const struct pipe_draw_info
*info
)
330 if (!ctx
->active_queries
)
333 uint32_t prims
= u_prims_for_vertices(info
->mode
, info
->count
);
334 ctx
->prims_generated
+= prims
;
336 if (!ctx
->streamout
.num_targets
)
339 ctx
->tf_prims_generated
+= prims
;
343 panfrost_update_streamout_offsets(struct panfrost_context
*ctx
)
345 for (unsigned i
= 0; i
< ctx
->streamout
.num_targets
; ++i
) {
348 count
= u_stream_outputs_for_vertices(ctx
->active_prim
,
350 ctx
->streamout
.offsets
[i
] += count
;
356 struct pipe_context
*pipe
,
357 const struct pipe_draw_info
*info
)
359 struct panfrost_context
*ctx
= pan_context(pipe
);
361 /* First of all, check the scissor to see if anything is drawn at all.
362 * If it's not, we drop the draw (mostly a conformance issue;
363 * well-behaved apps shouldn't hit this) */
365 if (panfrost_scissor_culls_everything(ctx
))
368 int mode
= info
->mode
;
370 /* Fallback unsupported restart index */
371 unsigned primitive_index
= (1 << (info
->index_size
* 8)) - 1;
373 if (info
->primitive_restart
&& info
->index_size
374 && info
->restart_index
!= primitive_index
) {
375 util_draw_vbo_without_prim_restart(pipe
, info
);
379 /* Fallback for unsupported modes */
381 assert(ctx
->rasterizer
!= NULL
);
383 if (!(ctx
->draw_modes
& (1 << mode
))) {
384 if (mode
== PIPE_PRIM_QUADS
&& info
->count
== 4 && !ctx
->rasterizer
->base
.flatshade
) {
385 mode
= PIPE_PRIM_TRIANGLE_FAN
;
387 if (info
->count
< 4) {
388 /* Degenerate case? */
392 util_primconvert_save_rasterizer_state(ctx
->primconvert
, &ctx
->rasterizer
->base
);
393 util_primconvert_draw_vbo(ctx
->primconvert
, info
);
398 /* Now that we have a guaranteed terminating path, find the job.
399 * Assignment commented out to prevent unused warning */
401 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
403 panfrost_batch_add_fbo_bos(batch
);
404 panfrost_batch_set_requirements(batch
);
406 /* Take into account a negative bias */
407 ctx
->vertex_count
= info
->count
+ abs(info
->index_bias
);
408 ctx
->instance_count
= info
->instance_count
;
409 ctx
->active_prim
= info
->mode
;
411 struct mali_vertex_tiler_prefix vertex_prefix
, tiler_prefix
;
412 struct mali_vertex_tiler_postfix vertex_postfix
, tiler_postfix
;
413 union midgard_primitive_size primitive_size
;
414 unsigned vertex_count
;
416 panfrost_vt_init(ctx
, PIPE_SHADER_VERTEX
, &vertex_prefix
, &vertex_postfix
);
417 panfrost_vt_init(ctx
, PIPE_SHADER_FRAGMENT
, &tiler_prefix
, &tiler_postfix
);
419 panfrost_vt_set_draw_info(ctx
, info
, g2m_draw_mode(mode
),
420 &vertex_postfix
, &tiler_prefix
,
421 &tiler_postfix
, &vertex_count
,
424 panfrost_statistics_record(ctx
, info
);
426 /* Dispatch "compute jobs" for the vertex/tiler pair as (1,
427 * vertex_count, 1) */
429 panfrost_pack_work_groups_fused(&vertex_prefix
, &tiler_prefix
,
430 1, vertex_count
, info
->instance_count
,
433 /* Emit all sort of descriptors. */
434 panfrost_emit_vertex_data(batch
, &vertex_postfix
);
435 panfrost_emit_varying_descriptor(batch
,
438 &vertex_postfix
, &tiler_postfix
,
440 panfrost_emit_shader_meta(batch
, PIPE_SHADER_VERTEX
, &vertex_postfix
);
441 panfrost_emit_shader_meta(batch
, PIPE_SHADER_FRAGMENT
, &tiler_postfix
);
442 panfrost_emit_vertex_attr_meta(batch
, &vertex_postfix
);
443 panfrost_emit_sampler_descriptors(batch
, PIPE_SHADER_VERTEX
, &vertex_postfix
);
444 panfrost_emit_sampler_descriptors(batch
, PIPE_SHADER_FRAGMENT
, &tiler_postfix
);
445 panfrost_emit_texture_descriptors(batch
, PIPE_SHADER_VERTEX
, &vertex_postfix
);
446 panfrost_emit_texture_descriptors(batch
, PIPE_SHADER_FRAGMENT
, &tiler_postfix
);
447 panfrost_emit_const_buf(batch
, PIPE_SHADER_VERTEX
, &vertex_postfix
);
448 panfrost_emit_const_buf(batch
, PIPE_SHADER_FRAGMENT
, &tiler_postfix
);
449 panfrost_emit_viewport(batch
, &tiler_postfix
);
451 panfrost_vt_update_primitive_size(ctx
, &tiler_prefix
, &primitive_size
);
453 /* Fire off the draw itself */
454 panfrost_emit_vertex_tiler_jobs(batch
, &vertex_prefix
, &vertex_postfix
,
455 &tiler_prefix
, &tiler_postfix
,
458 /* Adjust the batch stack size based on the new shader stack sizes. */
459 panfrost_batch_adjust_stack_size(batch
);
461 /* Increment transform feedback offsets */
462 panfrost_update_streamout_offsets(ctx
);
468 panfrost_generic_cso_delete(struct pipe_context
*pctx
, void *hwcso
)
474 panfrost_create_rasterizer_state(
475 struct pipe_context
*pctx
,
476 const struct pipe_rasterizer_state
*cso
)
478 struct panfrost_rasterizer
*so
= CALLOC_STRUCT(panfrost_rasterizer
);
486 panfrost_bind_rasterizer_state(
487 struct pipe_context
*pctx
,
490 struct panfrost_context
*ctx
= pan_context(pctx
);
492 ctx
->rasterizer
= hwcso
;
497 /* Gauranteed with the core GL call, so don't expose ARB_polygon_offset */
498 assert(ctx
->rasterizer
->base
.offset_clamp
== 0.0);
500 /* Point sprites are emulated */
502 struct panfrost_shader_state
*variant
= panfrost_get_shader_state(ctx
, PIPE_SHADER_FRAGMENT
);
504 if (ctx
->rasterizer
->base
.sprite_coord_enable
|| (variant
&& variant
->point_sprite_mask
))
505 ctx
->base
.bind_fs_state(&ctx
->base
, ctx
->shader
[PIPE_SHADER_FRAGMENT
]);
509 panfrost_create_vertex_elements_state(
510 struct pipe_context
*pctx
,
511 unsigned num_elements
,
512 const struct pipe_vertex_element
*elements
)
514 struct panfrost_vertex_state
*so
= CALLOC_STRUCT(panfrost_vertex_state
);
515 struct panfrost_device
*dev
= pan_device(pctx
->screen
);
517 so
->num_elements
= num_elements
;
518 memcpy(so
->pipe
, elements
, sizeof(*elements
) * num_elements
);
520 for (int i
= 0; i
< num_elements
; ++i
) {
523 enum pipe_format fmt
= elements
[i
].src_format
;
524 const struct util_format_description
*desc
= util_format_description(fmt
);
525 so
->hw
[i
].unknown1
= 0x2;
527 if (dev
->quirks
& HAS_SWIZZLES
)
528 so
->hw
[i
].swizzle
= panfrost_translate_swizzle_4(desc
->swizzle
);
530 so
->hw
[i
].swizzle
= panfrost_bifrost_swizzle(desc
->nr_channels
);
532 enum mali_format hw_format
= panfrost_pipe_format_table
[desc
->format
].hw
;
533 so
->hw
[i
].format
= hw_format
;
537 /* Let's also prepare vertex builtins */
538 so
->hw
[PAN_VERTEX_ID
].format
= MALI_R32UI
;
539 if (dev
->quirks
& HAS_SWIZZLES
)
540 so
->hw
[PAN_VERTEX_ID
].swizzle
= panfrost_get_default_swizzle(1);
542 so
->hw
[PAN_VERTEX_ID
].swizzle
= panfrost_bifrost_swizzle(1);
544 so
->hw
[PAN_INSTANCE_ID
].format
= MALI_R32UI
;
545 if (dev
->quirks
& HAS_SWIZZLES
)
546 so
->hw
[PAN_INSTANCE_ID
].swizzle
= panfrost_get_default_swizzle(1);
548 so
->hw
[PAN_INSTANCE_ID
].swizzle
= panfrost_bifrost_swizzle(1);
554 panfrost_bind_vertex_elements_state(
555 struct pipe_context
*pctx
,
558 struct panfrost_context
*ctx
= pan_context(pctx
);
563 panfrost_create_shader_state(
564 struct pipe_context
*pctx
,
565 const struct pipe_shader_state
*cso
,
566 enum pipe_shader_type stage
)
568 struct panfrost_shader_variants
*so
= CALLOC_STRUCT(panfrost_shader_variants
);
569 struct panfrost_device
*dev
= pan_device(pctx
->screen
);
572 /* Token deep copy to prevent memory corruption */
574 if (cso
->type
== PIPE_SHADER_IR_TGSI
)
575 so
->base
.tokens
= tgsi_dup_tokens(so
->base
.tokens
);
577 /* Precompile for shader-db if we need to */
578 if (unlikely((dev
->debug
& PAN_DBG_PRECOMPILE
) && cso
->type
== PIPE_SHADER_IR_NIR
)) {
579 struct panfrost_context
*ctx
= pan_context(pctx
);
581 struct panfrost_shader_state state
;
582 uint64_t outputs_written
;
584 panfrost_shader_compile(ctx
, PIPE_SHADER_IR_NIR
,
586 tgsi_processor_to_shader_stage(stage
),
587 &state
, &outputs_written
);
594 panfrost_delete_shader_state(
595 struct pipe_context
*pctx
,
598 struct panfrost_shader_variants
*cso
= (struct panfrost_shader_variants
*) so
;
600 if (cso
->base
.type
== PIPE_SHADER_IR_TGSI
) {
601 /* TODO: leaks TGSI tokens! */
604 for (unsigned i
= 0; i
< cso
->variant_count
; ++i
) {
605 struct panfrost_shader_state
*shader_state
= &cso
->variants
[i
];
606 panfrost_bo_unreference(shader_state
->bo
);
607 shader_state
->bo
= NULL
;
615 panfrost_create_sampler_state(
616 struct pipe_context
*pctx
,
617 const struct pipe_sampler_state
*cso
)
619 struct panfrost_sampler_state
*so
= CALLOC_STRUCT(panfrost_sampler_state
);
620 struct panfrost_device
*device
= pan_device(pctx
->screen
);
624 if (device
->quirks
& IS_BIFROST
)
625 panfrost_sampler_desc_init_bifrost(cso
, &so
->bifrost_hw
);
627 panfrost_sampler_desc_init(cso
, &so
->midgard_hw
);
633 panfrost_bind_sampler_states(
634 struct pipe_context
*pctx
,
635 enum pipe_shader_type shader
,
636 unsigned start_slot
, unsigned num_sampler
,
639 assert(start_slot
== 0);
641 struct panfrost_context
*ctx
= pan_context(pctx
);
643 /* XXX: Should upload, not just copy? */
644 ctx
->sampler_count
[shader
] = num_sampler
;
645 memcpy(ctx
->samplers
[shader
], sampler
, num_sampler
* sizeof (void *));
649 panfrost_variant_matches(
650 struct panfrost_context
*ctx
,
651 struct panfrost_shader_state
*variant
,
652 enum pipe_shader_type type
)
654 struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
655 struct pipe_rasterizer_state
*rasterizer
= &ctx
->rasterizer
->base
;
656 struct pipe_alpha_state
*alpha
= &ctx
->depth_stencil
->alpha
;
658 bool is_fragment
= (type
== PIPE_SHADER_FRAGMENT
);
660 if (is_fragment
&& (alpha
->enabled
|| variant
->alpha_state
.enabled
)) {
661 /* Make sure enable state is at least the same */
662 if (alpha
->enabled
!= variant
->alpha_state
.enabled
) {
666 /* Check that the contents of the test are the same */
667 bool same_func
= alpha
->func
== variant
->alpha_state
.func
;
668 bool same_ref
= alpha
->ref_value
== variant
->alpha_state
.ref_value
;
670 if (!(same_func
&& same_ref
)) {
675 if (variant
->outputs_read
) {
676 struct pipe_framebuffer_state
*fb
= &ctx
->pipe_framebuffer
;
679 BITSET_FOREACH_SET(i
, &variant
->outputs_read
, 8) {
680 enum pipe_format fmt
= PIPE_FORMAT_R8G8B8A8_UNORM
;
682 if ((fb
->nr_cbufs
> i
) && fb
->cbufs
[i
])
683 fmt
= fb
->cbufs
[i
]->format
;
685 const struct util_format_description
*desc
=
686 util_format_description(fmt
);
688 if (pan_format_class_load(desc
, dev
->quirks
) == PAN_FORMAT_NATIVE
)
689 fmt
= PIPE_FORMAT_NONE
;
691 if (variant
->rt_formats
[i
] != fmt
)
696 /* Point sprites TODO on bifrost, always pass */
697 if (is_fragment
&& rasterizer
&& (rasterizer
->sprite_coord_enable
|
698 variant
->point_sprite_mask
)
699 && !(dev
->quirks
& IS_BIFROST
)) {
700 /* Ensure the same varyings are turned to point sprites */
701 if (rasterizer
->sprite_coord_enable
!= variant
->point_sprite_mask
)
704 /* Ensure the orientation is correct */
706 rasterizer
->sprite_coord_mode
==
707 PIPE_SPRITE_COORD_UPPER_LEFT
;
709 if (variant
->point_sprite_upper_left
!= upper_left
)
713 /* Otherwise, we're good to go */
718 * Fix an uncompiled shader's stream output info, and produce a bitmask
719 * of which VARYING_SLOT_* are captured for stream output.
721 * Core Gallium stores output->register_index as a "slot" number, where
722 * slots are assigned consecutively to all outputs in info->outputs_written.
723 * This naive packing of outputs doesn't work for us - we too have slots,
724 * but the layout is defined by the VUE map, which we won't have until we
725 * compile a specific shader variant. So, we remap these and simply store
726 * VARYING_SLOT_* in our copy's output->register_index fields.
728 * We then produce a bitmask of outputs which are used for SO.
730 * Implementation from iris.
734 update_so_info(struct pipe_stream_output_info
*so_info
,
735 uint64_t outputs_written
)
737 uint64_t so_outputs
= 0;
738 uint8_t reverse_map
[64] = {0};
741 while (outputs_written
)
742 reverse_map
[slot
++] = u_bit_scan64(&outputs_written
);
744 for (unsigned i
= 0; i
< so_info
->num_outputs
; i
++) {
745 struct pipe_stream_output
*output
= &so_info
->output
[i
];
747 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
748 output
->register_index
= reverse_map
[output
->register_index
];
750 so_outputs
|= 1ull << output
->register_index
;
757 panfrost_bind_shader_state(
758 struct pipe_context
*pctx
,
760 enum pipe_shader_type type
)
762 struct panfrost_context
*ctx
= pan_context(pctx
);
763 struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
764 ctx
->shader
[type
] = hwcso
;
768 /* Match the appropriate variant */
771 struct panfrost_shader_variants
*variants
= (struct panfrost_shader_variants
*) hwcso
;
773 for (unsigned i
= 0; i
< variants
->variant_count
; ++i
) {
774 if (panfrost_variant_matches(ctx
, &variants
->variants
[i
], type
)) {
781 /* No variant matched, so create a new one */
782 variant
= variants
->variant_count
++;
784 if (variants
->variant_count
> variants
->variant_space
) {
785 unsigned old_space
= variants
->variant_space
;
787 variants
->variant_space
*= 2;
788 if (variants
->variant_space
== 0)
789 variants
->variant_space
= 1;
791 /* Arbitrary limit to stop runaway programs from
792 * creating an unbounded number of shader variants. */
793 assert(variants
->variant_space
< 1024);
795 unsigned msize
= sizeof(struct panfrost_shader_state
);
796 variants
->variants
= realloc(variants
->variants
,
797 variants
->variant_space
* msize
);
799 memset(&variants
->variants
[old_space
], 0,
800 (variants
->variant_space
- old_space
) * msize
);
803 struct panfrost_shader_state
*v
=
804 &variants
->variants
[variant
];
806 if (type
== PIPE_SHADER_FRAGMENT
) {
807 v
->alpha_state
= ctx
->depth_stencil
->alpha
;
809 struct pipe_framebuffer_state
*fb
= &ctx
->pipe_framebuffer
;
810 for (unsigned i
= 0; i
< fb
->nr_cbufs
; ++i
) {
811 enum pipe_format fmt
= PIPE_FORMAT_R8G8B8A8_UNORM
;
813 if ((fb
->nr_cbufs
> i
) && fb
->cbufs
[i
])
814 fmt
= fb
->cbufs
[i
]->format
;
816 const struct util_format_description
*desc
=
817 util_format_description(fmt
);
819 if (pan_format_class_load(desc
, dev
->quirks
) == PAN_FORMAT_NATIVE
)
820 fmt
= PIPE_FORMAT_NONE
;
822 v
->rt_formats
[i
] = fmt
;
825 /* Point sprites are TODO on Bifrost */
826 if (ctx
->rasterizer
&& !(dev
->quirks
& IS_BIFROST
)) {
827 v
->point_sprite_mask
= ctx
->rasterizer
->base
.sprite_coord_enable
;
828 v
->point_sprite_upper_left
=
829 ctx
->rasterizer
->base
.sprite_coord_mode
==
830 PIPE_SPRITE_COORD_UPPER_LEFT
;
835 /* Select this variant */
836 variants
->active_variant
= variant
;
838 struct panfrost_shader_state
*shader_state
= &variants
->variants
[variant
];
839 assert(panfrost_variant_matches(ctx
, shader_state
, type
));
841 /* We finally have a variant, so compile it */
843 if (!shader_state
->compiled
) {
844 uint64_t outputs_written
= 0;
846 panfrost_shader_compile(ctx
, variants
->base
.type
,
847 variants
->base
.type
== PIPE_SHADER_IR_NIR
?
848 variants
->base
.ir
.nir
:
849 variants
->base
.tokens
,
850 tgsi_processor_to_shader_stage(type
),
854 shader_state
->compiled
= true;
856 /* Fixup the stream out information, since what Gallium returns
857 * normally is mildly insane */
859 shader_state
->stream_output
= variants
->base
.stream_output
;
860 shader_state
->so_mask
=
861 update_so_info(&shader_state
->stream_output
, outputs_written
);
866 panfrost_create_vs_state(struct pipe_context
*pctx
, const struct pipe_shader_state
*hwcso
)
868 return panfrost_create_shader_state(pctx
, hwcso
, PIPE_SHADER_VERTEX
);
872 panfrost_create_fs_state(struct pipe_context
*pctx
, const struct pipe_shader_state
*hwcso
)
874 return panfrost_create_shader_state(pctx
, hwcso
, PIPE_SHADER_FRAGMENT
);
878 panfrost_bind_vs_state(struct pipe_context
*pctx
, void *hwcso
)
880 panfrost_bind_shader_state(pctx
, hwcso
, PIPE_SHADER_VERTEX
);
884 panfrost_bind_fs_state(struct pipe_context
*pctx
, void *hwcso
)
886 panfrost_bind_shader_state(pctx
, hwcso
, PIPE_SHADER_FRAGMENT
);
890 panfrost_set_vertex_buffers(
891 struct pipe_context
*pctx
,
893 unsigned num_buffers
,
894 const struct pipe_vertex_buffer
*buffers
)
896 struct panfrost_context
*ctx
= pan_context(pctx
);
898 util_set_vertex_buffers_mask(ctx
->vertex_buffers
, &ctx
->vb_mask
, buffers
, start_slot
, num_buffers
);
902 panfrost_set_constant_buffer(
903 struct pipe_context
*pctx
,
904 enum pipe_shader_type shader
, uint index
,
905 const struct pipe_constant_buffer
*buf
)
907 struct panfrost_context
*ctx
= pan_context(pctx
);
908 struct panfrost_constant_buffer
*pbuf
= &ctx
->constant_buffer
[shader
];
910 util_copy_constant_buffer(&pbuf
->cb
[index
], buf
);
912 unsigned mask
= (1 << index
);
914 if (unlikely(!buf
)) {
915 pbuf
->enabled_mask
&= ~mask
;
916 pbuf
->dirty_mask
&= ~mask
;
920 pbuf
->enabled_mask
|= mask
;
921 pbuf
->dirty_mask
|= mask
;
925 panfrost_set_stencil_ref(
926 struct pipe_context
*pctx
,
927 const struct pipe_stencil_ref
*ref
)
929 struct panfrost_context
*ctx
= pan_context(pctx
);
930 ctx
->stencil_ref
= *ref
;
934 panfrost_create_sampler_view_bo(struct panfrost_sampler_view
*so
,
935 struct pipe_context
*pctx
,
936 struct pipe_resource
*texture
)
938 struct panfrost_device
*device
= pan_device(pctx
->screen
);
939 struct panfrost_resource
*prsrc
= (struct panfrost_resource
*)texture
;
940 enum pipe_format format
= so
->base
.format
;
943 /* Format to access the stencil portion of a Z32_S8 texture */
944 if (format
== PIPE_FORMAT_X32_S8X24_UINT
) {
945 assert(prsrc
->separate_stencil
);
946 texture
= &prsrc
->separate_stencil
->base
;
947 prsrc
= (struct panfrost_resource
*)texture
;
948 format
= texture
->format
;
951 const struct util_format_description
*desc
= util_format_description(format
);
953 bool fake_rgtc
= !panfrost_supports_compressed_format(device
, MALI_BC4_UNORM
);
955 if (desc
->layout
== UTIL_FORMAT_LAYOUT_RGTC
&& fake_rgtc
) {
957 format
= PIPE_FORMAT_R8G8B8A8_SNORM
;
959 format
= PIPE_FORMAT_R8G8B8A8_UNORM
;
960 desc
= util_format_description(format
);
963 so
->texture_bo
= prsrc
->bo
->gpu
;
964 so
->layout
= prsrc
->layout
;
966 unsigned char user_swizzle
[4] = {
973 /* In the hardware, array_size refers specifically to array textures,
974 * whereas in Gallium, it also covers cubemaps */
976 unsigned array_size
= texture
->array_size
;
977 unsigned depth
= texture
->depth0
;
979 if (so
->base
.target
== PIPE_TEXTURE_CUBE
) {
980 /* TODO: Cubemap arrays */
981 assert(array_size
== 6);
985 /* MSAA only supported for 2D textures (and 2D texture arrays via an
986 * extension currently unimplemented */
988 if (so
->base
.target
== PIPE_TEXTURE_2D
) {
990 depth
= texture
->nr_samples
;
992 /* MSAA only supported for 2D textures */
993 assert(texture
->nr_samples
<= 1);
996 enum mali_texture_type type
=
997 panfrost_translate_texture_type(so
->base
.target
);
999 if (device
->quirks
& IS_BIFROST
) {
1000 unsigned char composed_swizzle
[4];
1001 util_format_compose_swizzles(desc
->swizzle
, user_swizzle
, composed_swizzle
);
1003 unsigned size
= panfrost_estimate_texture_payload_size(
1004 so
->base
.u
.tex
.first_level
,
1005 so
->base
.u
.tex
.last_level
,
1006 so
->base
.u
.tex
.first_layer
,
1007 so
->base
.u
.tex
.last_layer
,
1008 texture
->nr_samples
,
1009 type
, prsrc
->layout
);
1011 so
->bo
= panfrost_bo_create(device
, size
, 0);
1013 so
->bifrost_descriptor
= rzalloc(pctx
, struct bifrost_texture_descriptor
);
1014 panfrost_new_texture_bifrost(
1015 so
->bifrost_descriptor
,
1016 texture
->width0
, texture
->height0
,
1019 type
, prsrc
->layout
,
1020 so
->base
.u
.tex
.first_level
,
1021 so
->base
.u
.tex
.last_level
,
1022 so
->base
.u
.tex
.first_layer
,
1023 so
->base
.u
.tex
.last_layer
,
1024 texture
->nr_samples
,
1025 prsrc
->cubemap_stride
,
1026 panfrost_translate_swizzle_4(composed_swizzle
),
1031 unsigned size
= panfrost_estimate_texture_payload_size(
1032 so
->base
.u
.tex
.first_level
,
1033 so
->base
.u
.tex
.last_level
,
1034 so
->base
.u
.tex
.first_layer
,
1035 so
->base
.u
.tex
.last_layer
,
1036 texture
->nr_samples
,
1037 type
, prsrc
->layout
);
1038 size
+= sizeof(struct mali_texture_descriptor
);
1040 so
->bo
= panfrost_bo_create(device
, size
, 0);
1042 panfrost_new_texture(
1044 texture
->width0
, texture
->height0
,
1047 type
, prsrc
->layout
,
1048 so
->base
.u
.tex
.first_level
,
1049 so
->base
.u
.tex
.last_level
,
1050 so
->base
.u
.tex
.first_layer
,
1051 so
->base
.u
.tex
.last_layer
,
1052 texture
->nr_samples
,
1053 prsrc
->cubemap_stride
,
1054 panfrost_translate_swizzle_4(user_swizzle
),
1060 static struct pipe_sampler_view
*
1061 panfrost_create_sampler_view(
1062 struct pipe_context
*pctx
,
1063 struct pipe_resource
*texture
,
1064 const struct pipe_sampler_view
*template)
1066 struct panfrost_sampler_view
*so
= rzalloc(pctx
, struct panfrost_sampler_view
);
1068 pipe_reference(NULL
, &texture
->reference
);
1070 so
->base
= *template;
1071 so
->base
.texture
= texture
;
1072 so
->base
.reference
.count
= 1;
1073 so
->base
.context
= pctx
;
1075 panfrost_create_sampler_view_bo(so
, pctx
, texture
);
1077 return (struct pipe_sampler_view
*) so
;
1081 panfrost_set_sampler_views(
1082 struct pipe_context
*pctx
,
1083 enum pipe_shader_type shader
,
1084 unsigned start_slot
, unsigned num_views
,
1085 struct pipe_sampler_view
**views
)
1087 struct panfrost_context
*ctx
= pan_context(pctx
);
1088 unsigned new_nr
= 0;
1091 assert(start_slot
== 0);
1093 for (i
= 0; i
< num_views
; ++i
) {
1096 pipe_sampler_view_reference((struct pipe_sampler_view
**)&ctx
->sampler_views
[shader
][i
],
1100 for (; i
< ctx
->sampler_view_count
[shader
]; i
++) {
1101 pipe_sampler_view_reference((struct pipe_sampler_view
**)&ctx
->sampler_views
[shader
][i
],
1104 ctx
->sampler_view_count
[shader
] = new_nr
;
1108 panfrost_sampler_view_destroy(
1109 struct pipe_context
*pctx
,
1110 struct pipe_sampler_view
*pview
)
1112 struct panfrost_sampler_view
*view
= (struct panfrost_sampler_view
*) pview
;
1114 pipe_resource_reference(&pview
->texture
, NULL
);
1115 panfrost_bo_unreference(view
->bo
);
1116 if (view
->bifrost_descriptor
)
1117 ralloc_free(view
->bifrost_descriptor
);
1122 panfrost_set_shader_buffers(
1123 struct pipe_context
*pctx
,
1124 enum pipe_shader_type shader
,
1125 unsigned start
, unsigned count
,
1126 const struct pipe_shader_buffer
*buffers
,
1127 unsigned writable_bitmask
)
1129 struct panfrost_context
*ctx
= pan_context(pctx
);
1131 util_set_shader_buffers_mask(ctx
->ssbo
[shader
], &ctx
->ssbo_mask
[shader
],
1132 buffers
, start
, count
);
1135 /* Hints that a framebuffer should use AFBC where possible */
1139 struct panfrost_device
*device
,
1140 const struct pipe_framebuffer_state
*fb
)
1142 /* AFBC implemenation incomplete; hide it */
1143 if (!(device
->debug
& PAN_DBG_AFBC
)) return;
1145 /* Hint AFBC to the resources bound to each color buffer */
1147 for (unsigned i
= 0; i
< fb
->nr_cbufs
; ++i
) {
1148 struct pipe_surface
*surf
= fb
->cbufs
[i
];
1149 struct panfrost_resource
*rsrc
= pan_resource(surf
->texture
);
1150 panfrost_resource_hint_layout(device
, rsrc
, MALI_TEXTURE_AFBC
, 1);
1153 /* Also hint it to the depth buffer */
1156 struct panfrost_resource
*rsrc
= pan_resource(fb
->zsbuf
->texture
);
1157 panfrost_resource_hint_layout(device
, rsrc
, MALI_TEXTURE_AFBC
, 1);
1162 panfrost_set_framebuffer_state(struct pipe_context
*pctx
,
1163 const struct pipe_framebuffer_state
*fb
)
1165 struct panfrost_context
*ctx
= pan_context(pctx
);
1167 panfrost_hint_afbc(pan_device(pctx
->screen
), fb
);
1168 util_copy_framebuffer_state(&ctx
->pipe_framebuffer
, fb
);
1170 panfrost_invalidate_frame(ctx
);
1172 /* We may need to generate a new variant if the fragment shader is
1173 * keyed to the framebuffer format (due to EXT_framebuffer_fetch) */
1174 struct panfrost_shader_variants
*fs
= ctx
->shader
[PIPE_SHADER_FRAGMENT
];
1176 if (fs
&& fs
->variant_count
&& fs
->variants
[fs
->active_variant
].outputs_read
)
1177 ctx
->base
.bind_fs_state(&ctx
->base
, fs
);
1181 panfrost_create_depth_stencil_state(struct pipe_context
*pipe
,
1182 const struct pipe_depth_stencil_alpha_state
*depth_stencil
)
1184 return mem_dup(depth_stencil
, sizeof(*depth_stencil
));
1188 panfrost_bind_depth_stencil_state(struct pipe_context
*pipe
,
1191 struct panfrost_context
*ctx
= pan_context(pipe
);
1192 struct pipe_depth_stencil_alpha_state
*depth_stencil
= cso
;
1193 ctx
->depth_stencil
= depth_stencil
;
1198 /* Alpha does not exist in the hardware (it's not in ES3), so it's
1199 * emulated in the fragment shader */
1201 if (depth_stencil
->alpha
.enabled
) {
1202 /* We need to trigger a new shader (maybe) */
1203 ctx
->base
.bind_fs_state(&ctx
->base
, ctx
->shader
[PIPE_SHADER_FRAGMENT
]);
1206 /* Bounds test not implemented */
1207 assert(!depth_stencil
->depth
.bounds_test
);
1211 panfrost_delete_depth_stencil_state(struct pipe_context
*pipe
, void *depth
)
1217 panfrost_set_sample_mask(struct pipe_context
*pipe
,
1218 unsigned sample_mask
)
1220 struct panfrost_context
*ctx
= pan_context(pipe
);
1221 ctx
->sample_mask
= sample_mask
;
1225 panfrost_set_min_samples(struct pipe_context
*pipe
,
1226 unsigned min_samples
)
1228 struct panfrost_context
*ctx
= pan_context(pipe
);
1229 ctx
->min_samples
= min_samples
;
1234 panfrost_set_clip_state(struct pipe_context
*pipe
,
1235 const struct pipe_clip_state
*clip
)
1237 //struct panfrost_context *panfrost = pan_context(pipe);
1241 panfrost_set_viewport_states(struct pipe_context
*pipe
,
1242 unsigned start_slot
,
1243 unsigned num_viewports
,
1244 const struct pipe_viewport_state
*viewports
)
1246 struct panfrost_context
*ctx
= pan_context(pipe
);
1248 assert(start_slot
== 0);
1249 assert(num_viewports
== 1);
1251 ctx
->pipe_viewport
= *viewports
;
1255 panfrost_set_scissor_states(struct pipe_context
*pipe
,
1256 unsigned start_slot
,
1257 unsigned num_scissors
,
1258 const struct pipe_scissor_state
*scissors
)
1260 struct panfrost_context
*ctx
= pan_context(pipe
);
1262 assert(start_slot
== 0);
1263 assert(num_scissors
== 1);
1265 ctx
->scissor
= *scissors
;
1269 panfrost_set_polygon_stipple(struct pipe_context
*pipe
,
1270 const struct pipe_poly_stipple
*stipple
)
1272 //struct panfrost_context *panfrost = pan_context(pipe);
1276 panfrost_set_active_query_state(struct pipe_context
*pipe
,
1279 struct panfrost_context
*ctx
= pan_context(pipe
);
1280 ctx
->active_queries
= enable
;
1284 panfrost_destroy(struct pipe_context
*pipe
)
1286 struct panfrost_context
*panfrost
= pan_context(pipe
);
1288 if (panfrost
->blitter
)
1289 util_blitter_destroy(panfrost
->blitter
);
1291 if (panfrost
->blitter_wallpaper
)
1292 util_blitter_destroy(panfrost
->blitter_wallpaper
);
1294 util_unreference_framebuffer_state(&panfrost
->pipe_framebuffer
);
1295 u_upload_destroy(pipe
->stream_uploader
);
1300 static struct pipe_query
*
1301 panfrost_create_query(struct pipe_context
*pipe
,
1305 struct panfrost_query
*q
= rzalloc(pipe
, struct panfrost_query
);
1310 return (struct pipe_query
*) q
;
1314 panfrost_destroy_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
1316 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
1319 panfrost_bo_unreference(query
->bo
);
1327 panfrost_begin_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
1329 struct panfrost_context
*ctx
= pan_context(pipe
);
1330 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
1332 switch (query
->type
) {
1333 case PIPE_QUERY_OCCLUSION_COUNTER
:
1334 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1335 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
1336 /* Allocate a bo for the query results to be stored */
1338 query
->bo
= panfrost_bo_create(
1339 pan_device(ctx
->base
.screen
),
1340 sizeof(unsigned), 0);
1343 unsigned *result
= (unsigned *)query
->bo
->cpu
;
1344 *result
= 0; /* Default to 0 if nothing at all drawn. */
1345 ctx
->occlusion_query
= query
;
1348 /* Geometry statistics are computed in the driver. XXX: geom/tess
1351 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1352 query
->start
= ctx
->prims_generated
;
1354 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1355 query
->start
= ctx
->tf_prims_generated
;
1359 /* TODO: timestamp queries, etc? */
1367 panfrost_end_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
1369 struct panfrost_context
*ctx
= pan_context(pipe
);
1370 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
1372 switch (query
->type
) {
1373 case PIPE_QUERY_OCCLUSION_COUNTER
:
1374 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1375 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
1376 ctx
->occlusion_query
= NULL
;
1378 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1379 query
->end
= ctx
->prims_generated
;
1381 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1382 query
->end
= ctx
->tf_prims_generated
;
1390 panfrost_get_query_result(struct pipe_context
*pipe
,
1391 struct pipe_query
*q
,
1393 union pipe_query_result
*vresult
)
1395 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
1396 struct panfrost_context
*ctx
= pan_context(pipe
);
1399 switch (query
->type
) {
1400 case PIPE_QUERY_OCCLUSION_COUNTER
:
1401 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1402 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
1403 panfrost_flush_batches_accessing_bo(ctx
, query
->bo
, false);
1404 panfrost_bo_wait(query
->bo
, INT64_MAX
, false);
1406 /* Read back the query results */
1407 unsigned *result
= (unsigned *) query
->bo
->cpu
;
1408 unsigned passed
= *result
;
1410 if (query
->type
== PIPE_QUERY_OCCLUSION_COUNTER
) {
1411 vresult
->u64
= passed
;
1413 vresult
->b
= !!passed
;
1418 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1419 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1420 panfrost_flush_all_batches(ctx
);
1421 vresult
->u64
= query
->end
- query
->start
;
1425 /* TODO: more queries */
1432 static struct pipe_stream_output_target
*
1433 panfrost_create_stream_output_target(struct pipe_context
*pctx
,
1434 struct pipe_resource
*prsc
,
1435 unsigned buffer_offset
,
1436 unsigned buffer_size
)
1438 struct pipe_stream_output_target
*target
;
1440 target
= rzalloc(pctx
, struct pipe_stream_output_target
);
1445 pipe_reference_init(&target
->reference
, 1);
1446 pipe_resource_reference(&target
->buffer
, prsc
);
1448 target
->context
= pctx
;
1449 target
->buffer_offset
= buffer_offset
;
1450 target
->buffer_size
= buffer_size
;
1456 panfrost_stream_output_target_destroy(struct pipe_context
*pctx
,
1457 struct pipe_stream_output_target
*target
)
1459 pipe_resource_reference(&target
->buffer
, NULL
);
1460 ralloc_free(target
);
1464 panfrost_set_stream_output_targets(struct pipe_context
*pctx
,
1465 unsigned num_targets
,
1466 struct pipe_stream_output_target
**targets
,
1467 const unsigned *offsets
)
1469 struct panfrost_context
*ctx
= pan_context(pctx
);
1470 struct panfrost_streamout
*so
= &ctx
->streamout
;
1472 assert(num_targets
<= ARRAY_SIZE(so
->targets
));
1474 for (unsigned i
= 0; i
< num_targets
; i
++) {
1475 if (offsets
[i
] != -1)
1476 so
->offsets
[i
] = offsets
[i
];
1478 pipe_so_target_reference(&so
->targets
[i
], targets
[i
]);
1481 for (unsigned i
= 0; i
< so
->num_targets
; i
++)
1482 pipe_so_target_reference(&so
->targets
[i
], NULL
);
1484 so
->num_targets
= num_targets
;
1487 struct pipe_context
*
1488 panfrost_create_context(struct pipe_screen
*screen
, void *priv
, unsigned flags
)
1490 struct panfrost_context
*ctx
= rzalloc(screen
, struct panfrost_context
);
1491 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
1492 struct panfrost_device
*dev
= pan_device(screen
);
1494 gallium
->screen
= screen
;
1496 gallium
->destroy
= panfrost_destroy
;
1498 gallium
->set_framebuffer_state
= panfrost_set_framebuffer_state
;
1500 gallium
->flush
= panfrost_flush
;
1501 gallium
->clear
= panfrost_clear
;
1502 gallium
->draw_vbo
= panfrost_draw_vbo
;
1503 gallium
->texture_barrier
= panfrost_texture_barrier
;
1505 gallium
->set_vertex_buffers
= panfrost_set_vertex_buffers
;
1506 gallium
->set_constant_buffer
= panfrost_set_constant_buffer
;
1507 gallium
->set_shader_buffers
= panfrost_set_shader_buffers
;
1509 gallium
->set_stencil_ref
= panfrost_set_stencil_ref
;
1511 gallium
->create_sampler_view
= panfrost_create_sampler_view
;
1512 gallium
->set_sampler_views
= panfrost_set_sampler_views
;
1513 gallium
->sampler_view_destroy
= panfrost_sampler_view_destroy
;
1515 gallium
->create_rasterizer_state
= panfrost_create_rasterizer_state
;
1516 gallium
->bind_rasterizer_state
= panfrost_bind_rasterizer_state
;
1517 gallium
->delete_rasterizer_state
= panfrost_generic_cso_delete
;
1519 gallium
->create_vertex_elements_state
= panfrost_create_vertex_elements_state
;
1520 gallium
->bind_vertex_elements_state
= panfrost_bind_vertex_elements_state
;
1521 gallium
->delete_vertex_elements_state
= panfrost_generic_cso_delete
;
1523 gallium
->create_fs_state
= panfrost_create_fs_state
;
1524 gallium
->delete_fs_state
= panfrost_delete_shader_state
;
1525 gallium
->bind_fs_state
= panfrost_bind_fs_state
;
1527 gallium
->create_vs_state
= panfrost_create_vs_state
;
1528 gallium
->delete_vs_state
= panfrost_delete_shader_state
;
1529 gallium
->bind_vs_state
= panfrost_bind_vs_state
;
1531 gallium
->create_sampler_state
= panfrost_create_sampler_state
;
1532 gallium
->delete_sampler_state
= panfrost_generic_cso_delete
;
1533 gallium
->bind_sampler_states
= panfrost_bind_sampler_states
;
1535 gallium
->create_depth_stencil_alpha_state
= panfrost_create_depth_stencil_state
;
1536 gallium
->bind_depth_stencil_alpha_state
= panfrost_bind_depth_stencil_state
;
1537 gallium
->delete_depth_stencil_alpha_state
= panfrost_delete_depth_stencil_state
;
1539 gallium
->set_sample_mask
= panfrost_set_sample_mask
;
1540 gallium
->set_min_samples
= panfrost_set_min_samples
;
1542 gallium
->set_clip_state
= panfrost_set_clip_state
;
1543 gallium
->set_viewport_states
= panfrost_set_viewport_states
;
1544 gallium
->set_scissor_states
= panfrost_set_scissor_states
;
1545 gallium
->set_polygon_stipple
= panfrost_set_polygon_stipple
;
1546 gallium
->set_active_query_state
= panfrost_set_active_query_state
;
1548 gallium
->create_query
= panfrost_create_query
;
1549 gallium
->destroy_query
= panfrost_destroy_query
;
1550 gallium
->begin_query
= panfrost_begin_query
;
1551 gallium
->end_query
= panfrost_end_query
;
1552 gallium
->get_query_result
= panfrost_get_query_result
;
1554 gallium
->create_stream_output_target
= panfrost_create_stream_output_target
;
1555 gallium
->stream_output_target_destroy
= panfrost_stream_output_target_destroy
;
1556 gallium
->set_stream_output_targets
= panfrost_set_stream_output_targets
;
1558 panfrost_resource_context_init(gallium
);
1559 panfrost_blend_context_init(gallium
);
1560 panfrost_compute_context_init(gallium
);
1562 gallium
->stream_uploader
= u_upload_create_default(gallium
);
1563 gallium
->const_uploader
= gallium
->stream_uploader
;
1564 assert(gallium
->stream_uploader
);
1566 /* All of our GPUs support ES mode. Midgard supports additionally
1567 * QUADS/QUAD_STRIPS/POLYGON. Bifrost supports just QUADS. */
1569 ctx
->draw_modes
= (1 << (PIPE_PRIM_QUADS
+ 1)) - 1;
1571 if (!(dev
->quirks
& IS_BIFROST
)) {
1572 ctx
->draw_modes
|= (1 << PIPE_PRIM_QUAD_STRIP
);
1573 ctx
->draw_modes
|= (1 << PIPE_PRIM_POLYGON
);
1576 ctx
->primconvert
= util_primconvert_create(gallium
, ctx
->draw_modes
);
1578 ctx
->blitter
= util_blitter_create(gallium
);
1579 ctx
->blitter_wallpaper
= util_blitter_create(gallium
);
1581 assert(ctx
->blitter
);
1582 assert(ctx
->blitter_wallpaper
);
1584 /* Prepare for render! */
1586 panfrost_batch_init(ctx
);
1587 panfrost_invalidate_frame(ctx
);
1589 if (!(dev
->quirks
& IS_BIFROST
)) {
1590 for (unsigned c
= 0; c
< PIPE_MAX_COLOR_BUFS
; ++c
)
1591 ctx
->blit_blend
.rt
[c
].shaders
= _mesa_hash_table_u64_create(ctx
);
1594 /* By default mask everything on */
1595 ctx
->sample_mask
= ~0;