2 * © Copyright 2018 Alyssa Rosenzweig
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #include "pan_context.h"
29 #include "pan_swizzle.h"
30 #include "pan_format.h"
32 #include "util/macros.h"
33 #include "util/u_format.h"
34 #include "util/u_inlines.h"
35 #include "util/u_upload_mgr.h"
36 #include "util/u_memory.h"
37 #include "util/u_vbuf.h"
38 #include "util/half_float.h"
39 #include "util/u_helpers.h"
40 #include "util/u_format.h"
41 #include "indices/u_primconvert.h"
42 #include "tgsi/tgsi_parse.h"
43 #include "util/u_math.h"
45 #include "pan_screen.h"
46 #include "pan_blending.h"
47 #include "pan_blend_shaders.h"
49 #include "pan_tiler.h"
51 static int performance_counter_number
= 0;
52 extern const char *pan_counters_base
;
54 /* Do not actually send anything to the GPU; merely generate the cmdstream as fast as possible. Disables framebuffer writes */
57 static enum mali_job_type
58 panfrost_job_type_for_pipe(enum pipe_shader_type type
)
61 case PIPE_SHADER_VERTEX
:
62 return JOB_TYPE_VERTEX
;
64 case PIPE_SHADER_FRAGMENT
:
65 /* Note: JOB_TYPE_FRAGMENT is different.
66 * JOB_TYPE_FRAGMENT actually executes the
67 * fragment shader, but JOB_TYPE_TILER is how you
69 return JOB_TYPE_TILER
;
71 case PIPE_SHADER_GEOMETRY
:
72 return JOB_TYPE_GEOMETRY
;
74 case PIPE_SHADER_COMPUTE
:
75 return JOB_TYPE_COMPUTE
;
78 unreachable("Unsupported shader stage");
83 panfrost_enable_checksum(struct panfrost_context
*ctx
, struct panfrost_resource
*rsrc
)
85 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
86 struct panfrost_screen
*screen
= pan_screen(gallium
->screen
);
87 int tile_w
= (rsrc
->base
.width0
+ (MALI_TILE_LENGTH
- 1)) >> MALI_TILE_SHIFT
;
88 int tile_h
= (rsrc
->base
.height0
+ (MALI_TILE_LENGTH
- 1)) >> MALI_TILE_SHIFT
;
90 /* 8 byte checksum per tile */
91 rsrc
->bo
->checksum_stride
= tile_w
* 8;
92 int pages
= (((rsrc
->bo
->checksum_stride
* tile_h
) + 4095) / 4096);
93 screen
->driver
->allocate_slab(screen
, &rsrc
->bo
->checksum_slab
, pages
, false, 0, 0, 0);
95 rsrc
->bo
->has_checksum
= true;
98 /* Framebuffer descriptor */
101 panfrost_set_framebuffer_resolution(struct mali_single_framebuffer
*fb
, int w
, int h
)
103 fb
->width
= MALI_POSITIVE(w
);
104 fb
->height
= MALI_POSITIVE(h
);
106 /* No idea why this is needed, but it's how resolution_check is
107 * calculated. It's not clear to us yet why the hardware wants this.
108 * The formula itself was discovered mostly by manual bruteforce and
109 * aggressive algebraic simplification. */
111 fb
->tiler_resolution_check
= ((w
+ h
) / 3) << 4;
114 struct mali_single_framebuffer
115 panfrost_emit_sfbd(struct panfrost_context
*ctx
, unsigned vertex_count
)
117 struct mali_single_framebuffer framebuffer
= {
119 .format
= 0x30000000,
120 .clear_flags
= 0x1000,
121 .unknown_address_0
= ctx
->scratchpad
.gpu
,
122 .tiler_polygon_list
= ctx
->tiler_polygon_list
.gpu
,
123 .tiler_polygon_list_body
= ctx
->tiler_polygon_list
.gpu
+ 40960,
124 .tiler_hierarchy_mask
= 0xF0,
126 .tiler_heap_free
= ctx
->tiler_heap
.gpu
,
127 .tiler_heap_end
= ctx
->tiler_heap
.gpu
+ ctx
->tiler_heap
.size
,
130 panfrost_set_framebuffer_resolution(&framebuffer
, ctx
->pipe_framebuffer
.width
, ctx
->pipe_framebuffer
.height
);
135 struct bifrost_framebuffer
136 panfrost_emit_mfbd(struct panfrost_context
*ctx
, unsigned vertex_count
)
138 unsigned width
= ctx
->pipe_framebuffer
.width
;
139 unsigned height
= ctx
->pipe_framebuffer
.height
;
141 struct bifrost_framebuffer framebuffer
= {
142 .width1
= MALI_POSITIVE(width
),
143 .height1
= MALI_POSITIVE(height
),
144 .width2
= MALI_POSITIVE(width
),
145 .height2
= MALI_POSITIVE(height
),
150 .rt_count_1
= MALI_POSITIVE(1),
155 .scratchpad
= ctx
->scratchpad
.gpu
,
158 framebuffer
.tiler_hierarchy_mask
=
159 panfrost_choose_hierarchy_mask(width
, height
, vertex_count
);
161 /* Compute the polygon header size and use that to offset the body */
163 unsigned header_size
= panfrost_tiler_header_size(
164 width
, height
, framebuffer
.tiler_hierarchy_mask
);
166 unsigned body_size
= panfrost_tiler_body_size(
167 width
, height
, framebuffer
.tiler_hierarchy_mask
);
171 unsigned total_size
= header_size
+ body_size
;
173 if (framebuffer
.tiler_hierarchy_mask
) {
174 assert(ctx
->tiler_polygon_list
.size
>= total_size
);
176 /* Specify allocated tiler structures */
177 framebuffer
.tiler_polygon_list
= ctx
->tiler_polygon_list
.gpu
;
179 /* Allow the entire tiler heap */
180 framebuffer
.tiler_heap_start
= ctx
->tiler_heap
.gpu
;
181 framebuffer
.tiler_heap_end
=
182 ctx
->tiler_heap
.gpu
+ ctx
->tiler_heap
.size
;
184 /* The tiler is disabled, so don't allow the tiler heap */
185 framebuffer
.tiler_heap_start
= ctx
->tiler_heap
.gpu
;
186 framebuffer
.tiler_heap_end
= framebuffer
.tiler_heap_start
;
188 /* Use a dummy polygon list */
189 framebuffer
.tiler_polygon_list
= ctx
->tiler_dummy
.gpu
;
191 /* Also, set a "tiler disabled?" flag? */
192 framebuffer
.tiler_hierarchy_mask
|= 0x1000;
195 framebuffer
.tiler_polygon_list_body
=
196 framebuffer
.tiler_polygon_list
+ header_size
;
198 framebuffer
.tiler_polygon_list_size
=
199 header_size
+ body_size
;
206 /* Are we currently rendering to the screen (rather than an FBO)? */
209 panfrost_is_scanout(struct panfrost_context
*ctx
)
211 /* If there is no color buffer, it's an FBO */
212 if (!ctx
->pipe_framebuffer
.nr_cbufs
)
215 /* If we're too early that no framebuffer was sent, it's scanout */
216 if (!ctx
->pipe_framebuffer
.cbufs
[0])
219 return ctx
->pipe_framebuffer
.cbufs
[0]->texture
->bind
& PIPE_BIND_DISPLAY_TARGET
||
220 ctx
->pipe_framebuffer
.cbufs
[0]->texture
->bind
& PIPE_BIND_SCANOUT
||
221 ctx
->pipe_framebuffer
.cbufs
[0]->texture
->bind
& PIPE_BIND_SHARED
;
225 pan_pack_color(const union pipe_color_union
*color
, enum pipe_format format
)
227 /* Alpha magicked to 1.0 if there is no alpha */
229 bool has_alpha
= util_format_has_alpha(format
);
230 float clear_alpha
= has_alpha
? color
->f
[3] : 1.0f
;
232 /* Packed color depends on the framebuffer format */
234 const struct util_format_description
*desc
=
235 util_format_description(format
);
237 if (util_format_is_rgba8_variant(desc
)) {
238 return (float_to_ubyte(clear_alpha
) << 24) |
239 (float_to_ubyte(color
->f
[2]) << 16) |
240 (float_to_ubyte(color
->f
[1]) << 8) |
241 (float_to_ubyte(color
->f
[0]) << 0);
242 } else if (format
== PIPE_FORMAT_B5G6R5_UNORM
) {
243 /* First, we convert the components to R5, G6, B5 separately */
244 unsigned r5
= CLAMP(color
->f
[0], 0.0, 1.0) * 31.0;
245 unsigned g6
= CLAMP(color
->f
[1], 0.0, 1.0) * 63.0;
246 unsigned b5
= CLAMP(color
->f
[2], 0.0, 1.0) * 31.0;
248 /* Then we pack into a sparse u32. TODO: Why these shifts? */
249 return (b5
<< 25) | (g6
<< 14) | (r5
<< 5);
260 struct pipe_context
*pipe
,
262 const union pipe_color_union
*color
,
263 double depth
, unsigned stencil
)
265 struct panfrost_context
*ctx
= pan_context(pipe
);
266 struct panfrost_job
*job
= panfrost_get_job_for_fbo(ctx
);
268 if (buffers
& PIPE_CLEAR_COLOR
) {
269 enum pipe_format format
= ctx
->pipe_framebuffer
.cbufs
[0]->format
;
270 job
->clear_color
= pan_pack_color(color
, format
);
273 if (buffers
& PIPE_CLEAR_DEPTH
) {
274 job
->clear_depth
= depth
;
277 if (buffers
& PIPE_CLEAR_STENCIL
) {
278 job
->clear_stencil
= stencil
;
281 job
->clear
|= buffers
;
285 panfrost_attach_vt_mfbd(struct panfrost_context
*ctx
)
287 return panfrost_upload_transient(ctx
, &ctx
->vt_framebuffer_mfbd
, sizeof(ctx
->vt_framebuffer_mfbd
)) | MALI_MFBD
;
291 panfrost_attach_vt_sfbd(struct panfrost_context
*ctx
)
293 return panfrost_upload_transient(ctx
, &ctx
->vt_framebuffer_sfbd
, sizeof(ctx
->vt_framebuffer_sfbd
)) | MALI_SFBD
;
297 panfrost_attach_vt_framebuffer(struct panfrost_context
*ctx
)
299 mali_ptr framebuffer
= ctx
->require_sfbd
?
300 panfrost_attach_vt_sfbd(ctx
) :
301 panfrost_attach_vt_mfbd(ctx
);
303 ctx
->payload_vertex
.postfix
.framebuffer
= framebuffer
;
304 ctx
->payload_tiler
.postfix
.framebuffer
= framebuffer
;
307 /* Reset per-frame context, called on context initialisation as well as after
308 * flushing a frame */
311 panfrost_invalidate_frame(struct panfrost_context
*ctx
)
313 unsigned transient_count
= ctx
->transient_pools
[ctx
->cmdstream_i
].entry_index
*ctx
->transient_pools
[0].entry_size
+ ctx
->transient_pools
[ctx
->cmdstream_i
].entry_offset
;
314 DBG("Uploaded transient %d bytes\n", transient_count
);
316 /* Rotate cmdstream */
317 if ((++ctx
->cmdstream_i
) == (sizeof(ctx
->transient_pools
) / sizeof(ctx
->transient_pools
[0])))
318 ctx
->cmdstream_i
= 0;
320 if (ctx
->require_sfbd
)
321 ctx
->vt_framebuffer_sfbd
= panfrost_emit_sfbd(ctx
, ~0);
323 ctx
->vt_framebuffer_mfbd
= panfrost_emit_mfbd(ctx
, ~0);
325 /* Reset varyings allocated */
326 ctx
->varying_height
= 0;
328 /* The transient cmdstream is dirty every frame; the only bits worth preserving
329 * (textures, shaders, etc) are in other buffers anyways */
331 ctx
->transient_pools
[ctx
->cmdstream_i
].entry_index
= 0;
332 ctx
->transient_pools
[ctx
->cmdstream_i
].entry_offset
= 0;
334 /* Regenerate payloads */
335 panfrost_attach_vt_framebuffer(ctx
);
338 ctx
->dirty
|= PAN_DIRTY_RASTERIZER
;
341 ctx
->dirty
|= PAN_DIRTY_SAMPLERS
| PAN_DIRTY_TEXTURES
;
343 /* Reset job counters */
345 ctx
->vertex_job_count
= 0;
346 ctx
->tiler_job_count
= 0;
349 /* In practice, every field of these payloads should be configurable
350 * arbitrarily, which means these functions are basically catch-all's for
351 * as-of-yet unwavering unknowns */
354 panfrost_emit_vertex_payload(struct panfrost_context
*ctx
)
356 struct midgard_payload_vertex_tiler payload
= {
358 .workgroups_z_shift
= 32,
359 .workgroups_x_shift_2
= 0x2,
360 .workgroups_x_shift_3
= 0x5,
362 .gl_enables
= 0x4 | (ctx
->is_t6xx
? 0 : 0x2),
365 memcpy(&ctx
->payload_vertex
, &payload
, sizeof(payload
));
369 panfrost_emit_tiler_payload(struct panfrost_context
*ctx
)
371 struct midgard_payload_vertex_tiler payload
= {
373 .workgroups_z_shift
= 32,
374 .workgroups_x_shift_2
= 0x2,
375 .workgroups_x_shift_3
= 0x6,
377 .zero1
= 0xffff, /* Why is this only seen on test-quad-textured? */
381 memcpy(&ctx
->payload_tiler
, &payload
, sizeof(payload
));
385 translate_tex_wrap(enum pipe_tex_wrap w
)
388 case PIPE_TEX_WRAP_REPEAT
:
389 return MALI_WRAP_REPEAT
;
391 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
392 return MALI_WRAP_CLAMP_TO_EDGE
;
394 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
395 return MALI_WRAP_CLAMP_TO_BORDER
;
397 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
398 return MALI_WRAP_MIRRORED_REPEAT
;
401 unreachable("Invalid wrap");
406 translate_tex_filter(enum pipe_tex_filter f
)
409 case PIPE_TEX_FILTER_NEAREST
:
412 case PIPE_TEX_FILTER_LINEAR
:
416 unreachable("Invalid filter");
421 translate_mip_filter(enum pipe_tex_mipfilter f
)
423 return (f
== PIPE_TEX_MIPFILTER_LINEAR
) ? MALI_MIP_LINEAR
: 0;
427 panfrost_translate_compare_func(enum pipe_compare_func in
)
430 case PIPE_FUNC_NEVER
:
431 return MALI_FUNC_NEVER
;
434 return MALI_FUNC_LESS
;
436 case PIPE_FUNC_EQUAL
:
437 return MALI_FUNC_EQUAL
;
439 case PIPE_FUNC_LEQUAL
:
440 return MALI_FUNC_LEQUAL
;
442 case PIPE_FUNC_GREATER
:
443 return MALI_FUNC_GREATER
;
445 case PIPE_FUNC_NOTEQUAL
:
446 return MALI_FUNC_NOTEQUAL
;
448 case PIPE_FUNC_GEQUAL
:
449 return MALI_FUNC_GEQUAL
;
451 case PIPE_FUNC_ALWAYS
:
452 return MALI_FUNC_ALWAYS
;
455 unreachable("Invalid func");
460 panfrost_translate_alt_compare_func(enum pipe_compare_func in
)
463 case PIPE_FUNC_NEVER
:
464 return MALI_ALT_FUNC_NEVER
;
467 return MALI_ALT_FUNC_LESS
;
469 case PIPE_FUNC_EQUAL
:
470 return MALI_ALT_FUNC_EQUAL
;
472 case PIPE_FUNC_LEQUAL
:
473 return MALI_ALT_FUNC_LEQUAL
;
475 case PIPE_FUNC_GREATER
:
476 return MALI_ALT_FUNC_GREATER
;
478 case PIPE_FUNC_NOTEQUAL
:
479 return MALI_ALT_FUNC_NOTEQUAL
;
481 case PIPE_FUNC_GEQUAL
:
482 return MALI_ALT_FUNC_GEQUAL
;
484 case PIPE_FUNC_ALWAYS
:
485 return MALI_ALT_FUNC_ALWAYS
;
488 unreachable("Invalid alt func");
493 panfrost_translate_stencil_op(enum pipe_stencil_op in
)
496 case PIPE_STENCIL_OP_KEEP
:
497 return MALI_STENCIL_KEEP
;
499 case PIPE_STENCIL_OP_ZERO
:
500 return MALI_STENCIL_ZERO
;
502 case PIPE_STENCIL_OP_REPLACE
:
503 return MALI_STENCIL_REPLACE
;
505 case PIPE_STENCIL_OP_INCR
:
506 return MALI_STENCIL_INCR
;
508 case PIPE_STENCIL_OP_DECR
:
509 return MALI_STENCIL_DECR
;
511 case PIPE_STENCIL_OP_INCR_WRAP
:
512 return MALI_STENCIL_INCR_WRAP
;
514 case PIPE_STENCIL_OP_DECR_WRAP
:
515 return MALI_STENCIL_DECR_WRAP
;
517 case PIPE_STENCIL_OP_INVERT
:
518 return MALI_STENCIL_INVERT
;
521 unreachable("Invalid stencil op");
526 panfrost_make_stencil_state(const struct pipe_stencil_state
*in
, struct mali_stencil_test
*out
)
528 out
->ref
= 0; /* Gallium gets it from elsewhere */
530 out
->mask
= in
->valuemask
;
531 out
->func
= panfrost_translate_compare_func(in
->func
);
532 out
->sfail
= panfrost_translate_stencil_op(in
->fail_op
);
533 out
->dpfail
= panfrost_translate_stencil_op(in
->zfail_op
);
534 out
->dppass
= panfrost_translate_stencil_op(in
->zpass_op
);
538 panfrost_default_shader_backend(struct panfrost_context
*ctx
)
540 struct mali_shader_meta shader
= {
541 .alpha_coverage
= ~MALI_ALPHA_COVERAGE(0.000000),
543 .unknown2_3
= MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS
) | 0x3010,
544 .unknown2_4
= MALI_NO_MSAA
| 0x4e0,
548 shader
.unknown2_4
|= 0x10;
551 struct pipe_stencil_state default_stencil
= {
553 .func
= PIPE_FUNC_ALWAYS
,
554 .fail_op
= MALI_STENCIL_KEEP
,
555 .zfail_op
= MALI_STENCIL_KEEP
,
556 .zpass_op
= MALI_STENCIL_KEEP
,
561 panfrost_make_stencil_state(&default_stencil
, &shader
.stencil_front
);
562 shader
.stencil_mask_front
= default_stencil
.writemask
;
564 panfrost_make_stencil_state(&default_stencil
, &shader
.stencil_back
);
565 shader
.stencil_mask_back
= default_stencil
.writemask
;
567 if (default_stencil
.enabled
)
568 shader
.unknown2_4
|= MALI_STENCIL_TEST
;
570 memcpy(&ctx
->fragment_shader_core
, &shader
, sizeof(shader
));
574 panfrost_link_job_pair(struct mali_job_descriptor_header
*first
, mali_ptr next
)
576 if (first
->job_descriptor_size
)
577 first
->next_job_64
= (u64
) (uintptr_t) next
;
579 first
->next_job_32
= (u32
) (uintptr_t) next
;
582 /* Generates a vertex/tiler job. This is, in some sense, the heart of the
583 * graphics command stream. It should be called once per draw, accordding to
584 * presentations. Set is_tiler for "tiler" jobs (fragment shader jobs, but in
585 * Mali parlance, "fragment" refers to framebuffer writeout). Clear it for
588 struct panfrost_transfer
589 panfrost_vertex_tiler_job(struct panfrost_context
*ctx
, bool is_tiler
)
591 /* Each draw call corresponds to two jobs, and the set-value job is first */
592 int draw_job_index
= 1 + (2 * ctx
->draw_count
) + 1;
594 struct mali_job_descriptor_header job
= {
595 .job_type
= is_tiler
? JOB_TYPE_TILER
: JOB_TYPE_VERTEX
,
596 .job_index
= draw_job_index
+ (is_tiler
? 1 : 0),
598 .job_descriptor_size
= 1,
602 struct midgard_payload_vertex_tiler
*payload
= is_tiler
? &ctx
->payload_tiler
: &ctx
->payload_vertex
;
604 /* There's some padding hacks on 32-bit */
611 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, sizeof(job
) + sizeof(*payload
));
614 /* Tiler jobs depend on vertex jobs */
616 job
.job_dependency_index_1
= draw_job_index
;
618 /* Tiler jobs also depend on the previous tiler job */
620 if (ctx
->draw_count
) {
621 job
.job_dependency_index_2
= draw_job_index
- 1;
622 /* Previous tiler job points to this tiler job */
623 panfrost_link_job_pair(ctx
->u_tiler_jobs
[ctx
->draw_count
- 1], transfer
.gpu
);
625 /* The only vertex job so far points to first tiler job */
626 panfrost_link_job_pair(ctx
->u_vertex_jobs
[0], transfer
.gpu
);
629 if (ctx
->draw_count
) {
630 /* Previous vertex job points to this vertex job */
631 panfrost_link_job_pair(ctx
->u_vertex_jobs
[ctx
->draw_count
- 1], transfer
.gpu
);
633 /* Last vertex job points to first tiler job */
634 panfrost_link_job_pair(&job
, ctx
->tiler_jobs
[0]);
636 /* Have the first vertex job depend on the set value job */
637 job
.job_dependency_index_1
= ctx
->u_set_value_job
->job_index
;
638 panfrost_link_job_pair(ctx
->u_set_value_job
, transfer
.gpu
);
642 memcpy(transfer
.cpu
, &job
, sizeof(job
));
643 memcpy(transfer
.cpu
+ sizeof(job
) - offset
, payload
, sizeof(*payload
));
647 /* Generates a set value job. It's unclear what exactly this does, why it's
648 * necessary, and when to call it. */
651 panfrost_set_value_job(struct panfrost_context
*ctx
)
653 struct mali_job_descriptor_header job
= {
654 .job_type
= JOB_TYPE_SET_VALUE
,
655 .job_descriptor_size
= 1,
659 struct mali_payload_set_value payload
= {
660 .out
= ctx
->tiler_polygon_list
.gpu
,
664 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, sizeof(job
) + sizeof(payload
));
665 memcpy(transfer
.cpu
, &job
, sizeof(job
));
666 memcpy(transfer
.cpu
+ sizeof(job
), &payload
, sizeof(payload
));
668 ctx
->u_set_value_job
= (struct mali_job_descriptor_header
*) transfer
.cpu
;
669 ctx
->set_value_job
= transfer
.gpu
;
673 panfrost_emit_varyings(
674 struct panfrost_context
*ctx
,
675 union mali_attr
*slot
,
679 mali_ptr varying_address
= ctx
->varying_mem
.gpu
+ ctx
->varying_height
;
681 /* Fill out the descriptor */
682 slot
->elements
= varying_address
| MALI_ATTR_LINEAR
;
683 slot
->stride
= stride
;
684 slot
->size
= stride
* count
;
686 ctx
->varying_height
+= ALIGN(slot
->size
, 64);
687 assert(ctx
->varying_height
< ctx
->varying_mem
.size
);
689 return varying_address
;
693 panfrost_emit_point_coord(union mali_attr
*slot
)
695 slot
->elements
= MALI_VARYING_POINT_COORD
| MALI_ATTR_LINEAR
;
696 slot
->stride
= slot
->size
= 0;
700 panfrost_emit_varying_descriptor(
701 struct panfrost_context
*ctx
,
702 unsigned invocation_count
)
704 /* Load the shaders */
706 struct panfrost_shader_state
*vs
= &ctx
->vs
->variants
[ctx
->vs
->active_variant
];
707 struct panfrost_shader_state
*fs
= &ctx
->fs
->variants
[ctx
->fs
->active_variant
];
708 unsigned int num_gen_varyings
= 0;
710 /* Allocate the varying descriptor */
712 size_t vs_size
= sizeof(struct mali_attr_meta
) * vs
->tripipe
->varying_count
;
713 size_t fs_size
= sizeof(struct mali_attr_meta
) * fs
->tripipe
->varying_count
;
715 struct panfrost_transfer trans
= panfrost_allocate_transient(ctx
,
719 * Assign ->src_offset now that we know about all the general purpose
720 * varyings that will be used by the fragment and vertex shaders.
722 for (unsigned i
= 0; i
< vs
->tripipe
->varying_count
; i
++) {
724 * General purpose varyings have ->index set to 0, skip other
727 if (vs
->varyings
[i
].index
)
730 vs
->varyings
[i
].src_offset
= 16 * (num_gen_varyings
++);
733 for (unsigned i
= 0; i
< fs
->tripipe
->varying_count
; i
++) {
736 if (fs
->varyings
[i
].index
)
740 * Re-use the VS general purpose varying pos if it exists,
741 * create a new one otherwise.
743 for (j
= 0; j
< vs
->tripipe
->varying_count
; j
++) {
744 if (fs
->varyings_loc
[i
] == vs
->varyings_loc
[j
])
748 if (j
< vs
->tripipe
->varying_count
)
749 fs
->varyings
[i
].src_offset
= vs
->varyings
[j
].src_offset
;
751 fs
->varyings
[i
].src_offset
= 16 * (num_gen_varyings
++);
754 memcpy(trans
.cpu
, vs
->varyings
, vs_size
);
755 memcpy(trans
.cpu
+ vs_size
, fs
->varyings
, fs_size
);
757 ctx
->payload_vertex
.postfix
.varying_meta
= trans
.gpu
;
758 ctx
->payload_tiler
.postfix
.varying_meta
= trans
.gpu
+ vs_size
;
760 /* Buffer indices must be in this order per our convention */
761 union mali_attr varyings
[PIPE_MAX_ATTRIBS
];
764 panfrost_emit_varyings(ctx
, &varyings
[idx
++], num_gen_varyings
* 16,
767 /* fp32 vec4 gl_Position */
768 ctx
->payload_tiler
.postfix
.position_varying
=
769 panfrost_emit_varyings(ctx
, &varyings
[idx
++],
770 sizeof(float) * 4, invocation_count
);
773 if (vs
->writes_point_size
|| fs
->reads_point_coord
) {
774 /* fp16 vec1 gl_PointSize */
775 ctx
->payload_tiler
.primitive_size
.pointer
=
776 panfrost_emit_varyings(ctx
, &varyings
[idx
++],
777 2, invocation_count
);
780 if (fs
->reads_point_coord
) {
781 /* Special descriptor */
782 panfrost_emit_point_coord(&varyings
[idx
++]);
785 mali_ptr varyings_p
= panfrost_upload_transient(ctx
, &varyings
, idx
* sizeof(union mali_attr
));
786 ctx
->payload_vertex
.postfix
.varyings
= varyings_p
;
787 ctx
->payload_tiler
.postfix
.varyings
= varyings_p
;
791 panfrost_vertex_buffer_address(struct panfrost_context
*ctx
, unsigned i
)
793 struct pipe_vertex_buffer
*buf
= &ctx
->vertex_buffers
[i
];
794 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) (buf
->buffer
.resource
);
796 return rsrc
->bo
->gpu
+ buf
->buffer_offset
;
799 /* Emits attributes and varying descriptors, which should be called every draw,
800 * excepting some obscure circumstances */
803 panfrost_emit_vertex_data(struct panfrost_context
*ctx
, struct panfrost_job
*job
)
805 /* Staged mali_attr, and index into them. i =/= k, depending on the
806 * vertex buffer mask */
807 union mali_attr attrs
[PIPE_MAX_ATTRIBS
];
810 unsigned invocation_count
= MALI_NEGATIVE(ctx
->payload_tiler
.prefix
.invocation_count
);
812 for (int i
= 0; i
< ARRAY_SIZE(ctx
->vertex_buffers
); ++i
) {
813 if (!(ctx
->vb_mask
& (1 << i
))) continue;
815 struct pipe_vertex_buffer
*buf
= &ctx
->vertex_buffers
[i
];
816 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) (buf
->buffer
.resource
);
820 /* Align to 64 bytes by masking off the lower bits. This
821 * will be adjusted back when we fixup the src_offset in
824 mali_ptr addr
= panfrost_vertex_buffer_address(ctx
, i
) & ~63;
826 /* Offset vertex count by draw_start to make sure we upload enough */
827 attrs
[k
].stride
= buf
->stride
;
828 attrs
[k
].size
= rsrc
->base
.width0
;
830 panfrost_job_add_bo(job
, rsrc
->bo
);
831 attrs
[k
].elements
= addr
| MALI_ATTR_LINEAR
;
836 ctx
->payload_vertex
.postfix
.attributes
= panfrost_upload_transient(ctx
, attrs
, k
* sizeof(union mali_attr
));
838 panfrost_emit_varying_descriptor(ctx
, invocation_count
);
842 panfrost_writes_point_size(struct panfrost_context
*ctx
)
845 struct panfrost_shader_state
*vs
= &ctx
->vs
->variants
[ctx
->vs
->active_variant
];
847 return vs
->writes_point_size
&& ctx
->payload_tiler
.prefix
.draw_mode
== MALI_POINTS
;
850 /* Stage the attribute descriptors so we can adjust src_offset
851 * to let BOs align nicely */
854 panfrost_stage_attributes(struct panfrost_context
*ctx
)
856 struct panfrost_vertex_state
*so
= ctx
->vertex
;
858 size_t sz
= sizeof(struct mali_attr_meta
) * so
->num_elements
;
859 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, sz
);
860 struct mali_attr_meta
*target
= (struct mali_attr_meta
*) transfer
.cpu
;
862 /* Copy as-is for the first pass */
863 memcpy(target
, so
->hw
, sz
);
865 /* Fixup offsets for the second pass. Recall that the hardware
866 * calculates attribute addresses as:
868 * addr = base + (stride * vtx) + src_offset;
870 * However, on Mali, base must be aligned to 64-bytes, so we
873 * base' = base & ~63 = base - (base & 63)
875 * To compensate when using base' (see emit_vertex_data), we have
876 * to adjust src_offset by the masked off piece:
878 * addr' = base' + (stride * vtx) + (src_offset + (base & 63))
879 * = base - (base & 63) + (stride * vtx) + src_offset + (base & 63)
880 * = base + (stride * vtx) + src_offset
886 for (unsigned i
= 0; i
< so
->num_elements
; ++i
) {
887 unsigned vbi
= so
->pipe
[i
].vertex_buffer_index
;
888 mali_ptr addr
= panfrost_vertex_buffer_address(ctx
, vbi
);
890 /* Adjust by the masked off bits of the offset */
891 target
[i
].src_offset
+= (addr
& 63);
894 ctx
->payload_vertex
.postfix
.attribute_meta
= transfer
.gpu
;
898 panfrost_upload_sampler_descriptors(struct panfrost_context
*ctx
)
900 size_t desc_size
= sizeof(struct mali_sampler_descriptor
);
902 for (int t
= 0; t
<= PIPE_SHADER_FRAGMENT
; ++t
) {
905 if (ctx
->sampler_count
[t
] && ctx
->sampler_view_count
[t
]) {
906 size_t transfer_size
= desc_size
* ctx
->sampler_count
[t
];
908 struct panfrost_transfer transfer
=
909 panfrost_allocate_transient(ctx
, transfer_size
);
911 struct mali_sampler_descriptor
*desc
=
912 (struct mali_sampler_descriptor
*) transfer
.cpu
;
914 for (int i
= 0; i
< ctx
->sampler_count
[t
]; ++i
)
915 desc
[i
] = ctx
->samplers
[t
][i
]->hw
;
917 upload
= transfer
.gpu
;
920 if (t
== PIPE_SHADER_FRAGMENT
)
921 ctx
->payload_tiler
.postfix
.sampler_descriptor
= upload
;
922 else if (t
== PIPE_SHADER_VERTEX
)
923 ctx
->payload_vertex
.postfix
.sampler_descriptor
= upload
;
929 /* Computes the address to a texture at a particular slice */
932 panfrost_get_texture_address(
933 struct panfrost_resource
*rsrc
,
934 unsigned level
, unsigned face
)
936 unsigned level_offset
= rsrc
->bo
->slices
[level
].offset
;
937 unsigned face_offset
= face
* rsrc
->bo
->cubemap_stride
;
939 return rsrc
->bo
->gpu
+ level_offset
+ face_offset
;
945 struct panfrost_context
*ctx
,
946 struct panfrost_sampler_view
*view
)
949 return (mali_ptr
) NULL
;
951 struct pipe_sampler_view
*pview
= &view
->base
;
952 struct panfrost_resource
*rsrc
= pan_resource(pview
->texture
);
954 /* Do we interleave an explicit stride with every element? */
956 bool has_manual_stride
=
957 view
->hw
.format
.usage2
& MALI_TEX_MANUAL_STRIDE
;
959 /* For easy access */
961 assert(pview
->target
!= PIPE_BUFFER
);
962 unsigned first_level
= pview
->u
.tex
.first_level
;
963 unsigned last_level
= pview
->u
.tex
.last_level
;
965 /* Inject the addresses in, interleaving mip levels, cube faces, and
966 * strides in that order */
970 for (unsigned l
= first_level
; l
<= last_level
; ++l
) {
971 for (unsigned f
= 0; f
< pview
->texture
->array_size
; ++f
) {
972 view
->hw
.payload
[idx
++] =
973 panfrost_get_texture_address(rsrc
, l
, f
);
975 if (has_manual_stride
) {
976 view
->hw
.payload
[idx
++] =
977 rsrc
->bo
->slices
[l
].stride
;
982 return panfrost_upload_transient(ctx
, &view
->hw
,
983 sizeof(struct mali_texture_descriptor
));
987 panfrost_upload_texture_descriptors(struct panfrost_context
*ctx
)
989 for (int t
= 0; t
<= PIPE_SHADER_FRAGMENT
; ++t
) {
990 mali_ptr trampoline
= 0;
992 if (ctx
->sampler_view_count
[t
]) {
993 uint64_t trampolines
[PIPE_MAX_SHADER_SAMPLER_VIEWS
];
995 for (int i
= 0; i
< ctx
->sampler_view_count
[t
]; ++i
)
997 panfrost_upload_tex(ctx
, ctx
->sampler_views
[t
][i
]);
999 trampoline
= panfrost_upload_transient(ctx
, trampolines
, sizeof(uint64_t) * ctx
->sampler_view_count
[t
]);
1002 if (t
== PIPE_SHADER_FRAGMENT
)
1003 ctx
->payload_tiler
.postfix
.texture_trampoline
= trampoline
;
1004 else if (t
== PIPE_SHADER_VERTEX
)
1005 ctx
->payload_vertex
.postfix
.texture_trampoline
= trampoline
;
1011 struct sysval_uniform
{
1019 static void panfrost_upload_viewport_scale_sysval(struct panfrost_context
*ctx
,
1020 struct sysval_uniform
*uniform
)
1022 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
1024 uniform
->f
[0] = vp
->scale
[0];
1025 uniform
->f
[1] = vp
->scale
[1];
1026 uniform
->f
[2] = vp
->scale
[2];
1029 static void panfrost_upload_viewport_offset_sysval(struct panfrost_context
*ctx
,
1030 struct sysval_uniform
*uniform
)
1032 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
1034 uniform
->f
[0] = vp
->translate
[0];
1035 uniform
->f
[1] = vp
->translate
[1];
1036 uniform
->f
[2] = vp
->translate
[2];
1039 static void panfrost_upload_sysvals(struct panfrost_context
*ctx
, void *buf
,
1040 struct panfrost_shader_state
*ss
,
1041 enum pipe_shader_type st
)
1043 struct sysval_uniform
*uniforms
= (void *)buf
;
1045 for (unsigned i
= 0; i
< ss
->sysval_count
; ++i
) {
1046 int sysval
= ss
->sysval
[i
];
1048 switch (PAN_SYSVAL_TYPE(sysval
)) {
1049 case PAN_SYSVAL_VIEWPORT_SCALE
:
1050 panfrost_upload_viewport_scale_sysval(ctx
, &uniforms
[i
]);
1052 case PAN_SYSVAL_VIEWPORT_OFFSET
:
1053 panfrost_upload_viewport_offset_sysval(ctx
, &uniforms
[i
]);
1061 /* Go through dirty flags and actualise them in the cmdstream. */
1064 panfrost_emit_for_draw(struct panfrost_context
*ctx
, bool with_vertex_data
)
1066 struct panfrost_job
*job
= panfrost_get_job_for_fbo(ctx
);
1068 if (with_vertex_data
) {
1069 panfrost_emit_vertex_data(ctx
, job
);
1072 bool msaa
= ctx
->rasterizer
->base
.multisample
;
1074 if (ctx
->dirty
& PAN_DIRTY_RASTERIZER
) {
1075 ctx
->payload_tiler
.gl_enables
= ctx
->rasterizer
->tiler_gl_enables
;
1077 /* TODO: Sample size */
1078 SET_BIT(ctx
->fragment_shader_core
.unknown2_3
, MALI_HAS_MSAA
, msaa
);
1079 SET_BIT(ctx
->fragment_shader_core
.unknown2_4
, MALI_NO_MSAA
, !msaa
);
1082 /* Enable job requirements at draw-time */
1085 job
->requirements
|= PAN_REQ_MSAA
;
1087 if (ctx
->depth_stencil
->depth
.writemask
)
1088 job
->requirements
|= PAN_REQ_DEPTH_WRITE
;
1090 if (ctx
->occlusion_query
) {
1091 ctx
->payload_tiler
.gl_enables
|= MALI_OCCLUSION_QUERY
| MALI_OCCLUSION_PRECISE
;
1092 ctx
->payload_tiler
.postfix
.occlusion_counter
= ctx
->occlusion_query
->transfer
.gpu
;
1095 if (ctx
->dirty
& PAN_DIRTY_VS
) {
1098 struct panfrost_shader_state
*vs
= &ctx
->vs
->variants
[ctx
->vs
->active_variant
];
1100 /* Late shader descriptor assignments */
1102 vs
->tripipe
->texture_count
= ctx
->sampler_view_count
[PIPE_SHADER_VERTEX
];
1103 vs
->tripipe
->sampler_count
= ctx
->sampler_count
[PIPE_SHADER_VERTEX
];
1106 vs
->tripipe
->midgard1
.unknown1
= 0x2201;
1108 ctx
->payload_vertex
.postfix
._shader_upper
= vs
->tripipe_gpu
>> 4;
1111 if (ctx
->dirty
& (PAN_DIRTY_RASTERIZER
| PAN_DIRTY_VS
)) {
1112 /* Check if we need to link the gl_PointSize varying */
1113 if (!panfrost_writes_point_size(ctx
)) {
1114 /* If the size is constant, write it out. Otherwise,
1115 * don't touch primitive_size (since we would clobber
1116 * the pointer there) */
1118 ctx
->payload_tiler
.primitive_size
.constant
= ctx
->rasterizer
->base
.line_width
;
1122 /* TODO: Maybe dirty track FS, maybe not. For now, it's transient. */
1124 ctx
->dirty
|= PAN_DIRTY_FS
;
1126 if (ctx
->dirty
& PAN_DIRTY_FS
) {
1128 struct panfrost_shader_state
*variant
= &ctx
->fs
->variants
[ctx
->fs
->active_variant
];
1130 #define COPY(name) ctx->fragment_shader_core.name = variant->tripipe->name
1133 COPY(attribute_count
);
1134 COPY(varying_count
);
1135 COPY(midgard1
.uniform_count
);
1136 COPY(midgard1
.work_count
);
1137 COPY(midgard1
.unknown2
);
1140 /* If there is a blend shader, work registers are shared */
1142 if (ctx
->blend
->has_blend_shader
)
1143 ctx
->fragment_shader_core
.midgard1
.work_count
= /*MAX2(ctx->fragment_shader_core.midgard1.work_count, ctx->blend->blend_work_count)*/16;
1145 /* Set late due to depending on render state */
1146 /* The one at the end seems to mean "1 UBO" */
1147 unsigned flags
= MALI_EARLY_Z
| 0x200 | 0x2000 | 0x1;
1149 /* Any time texturing is used, derivatives are implicitly
1150 * calculated, so we need to enable helper invocations */
1152 if (ctx
->sampler_view_count
[PIPE_SHADER_FRAGMENT
])
1153 flags
|= MALI_HELPER_INVOCATIONS
;
1155 ctx
->fragment_shader_core
.midgard1
.unknown1
= flags
;
1157 /* Assign texture/sample count right before upload */
1158 ctx
->fragment_shader_core
.texture_count
= ctx
->sampler_view_count
[PIPE_SHADER_FRAGMENT
];
1159 ctx
->fragment_shader_core
.sampler_count
= ctx
->sampler_count
[PIPE_SHADER_FRAGMENT
];
1161 /* Assign the stencil refs late */
1162 ctx
->fragment_shader_core
.stencil_front
.ref
= ctx
->stencil_ref
.ref_value
[0];
1163 ctx
->fragment_shader_core
.stencil_back
.ref
= ctx
->stencil_ref
.ref_value
[1];
1165 /* CAN_DISCARD should be set if the fragment shader possibly
1166 * contains a 'discard' instruction. It is likely this is
1167 * related to optimizations related to forward-pixel kill, as
1168 * per "Mali Performance 3: Is EGL_BUFFER_PRESERVED a good
1169 * thing?" by Peter Harris
1172 if (variant
->can_discard
) {
1173 ctx
->fragment_shader_core
.unknown2_3
|= MALI_CAN_DISCARD
;
1174 ctx
->fragment_shader_core
.midgard1
.unknown1
&= ~MALI_EARLY_Z
;
1175 ctx
->fragment_shader_core
.midgard1
.unknown1
|= 0x4000;
1176 ctx
->fragment_shader_core
.midgard1
.unknown1
= 0x4200;
1179 /* Check if we're using the default blend descriptor (fast path) */
1182 !ctx
->blend
->has_blend_shader
&&
1183 (ctx
->blend
->equation
.rgb_mode
== 0x122) &&
1184 (ctx
->blend
->equation
.alpha_mode
== 0x122) &&
1185 (ctx
->blend
->equation
.color_mask
== 0xf);
1187 /* Even on MFBD, the shader descriptor gets blend shaders. It's
1188 * *also* copied to the blend_meta appended (by convention),
1189 * but this is the field actually read by the hardware. (Or
1190 * maybe both are read...?) */
1192 if (ctx
->blend
->has_blend_shader
) {
1193 ctx
->fragment_shader_core
.blend
.shader
= ctx
->blend
->blend_shader
;
1195 ctx
->fragment_shader_core
.blend
.shader
= 0;
1198 if (ctx
->require_sfbd
) {
1199 /* When only a single render target platform is used, the blend
1200 * information is inside the shader meta itself. We
1201 * additionally need to signal CAN_DISCARD for nontrivial blend
1202 * modes (so we're able to read back the destination buffer) */
1204 if (!ctx
->blend
->has_blend_shader
) {
1205 ctx
->fragment_shader_core
.blend
.equation
= ctx
->blend
->equation
;
1206 ctx
->fragment_shader_core
.blend
.constant
= ctx
->blend
->constant
;
1210 ctx
->fragment_shader_core
.unknown2_3
|= MALI_CAN_DISCARD
;
1214 size_t size
= sizeof(struct mali_shader_meta
) + sizeof(struct midgard_blend_rt
);
1215 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, size
);
1216 memcpy(transfer
.cpu
, &ctx
->fragment_shader_core
, sizeof(struct mali_shader_meta
));
1218 ctx
->payload_tiler
.postfix
._shader_upper
= (transfer
.gpu
) >> 4;
1220 if (!ctx
->require_sfbd
) {
1221 /* Additional blend descriptor tacked on for jobs using MFBD */
1223 unsigned blend_count
= 0x200;
1225 if (ctx
->blend
->has_blend_shader
) {
1226 /* For a blend shader, the bottom nibble corresponds to
1227 * the number of work registers used, which signals the
1228 * -existence- of a blend shader */
1230 assert(ctx
->blend
->blend_work_count
>= 2);
1231 blend_count
|= MIN2(ctx
->blend
->blend_work_count
, 3);
1233 /* Otherwise, the bottom bit simply specifies if
1234 * blending (anything other than REPLACE) is enabled */
1241 struct midgard_blend_rt rts
[4];
1245 for (unsigned i
= 0; i
< 1; ++i
) {
1246 rts
[i
].flags
= blend_count
;
1248 if (ctx
->blend
->has_blend_shader
) {
1249 rts
[i
].blend
.shader
= ctx
->blend
->blend_shader
;
1251 rts
[i
].blend
.equation
= ctx
->blend
->equation
;
1252 rts
[i
].blend
.constant
= ctx
->blend
->constant
;
1256 memcpy(transfer
.cpu
+ sizeof(struct mali_shader_meta
), rts
, sizeof(rts
[0]) * 1);
1260 /* We stage to transient, so always dirty.. */
1261 panfrost_stage_attributes(ctx
);
1263 if (ctx
->dirty
& PAN_DIRTY_SAMPLERS
)
1264 panfrost_upload_sampler_descriptors(ctx
);
1266 if (ctx
->dirty
& PAN_DIRTY_TEXTURES
)
1267 panfrost_upload_texture_descriptors(ctx
);
1269 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
1271 for (int i
= 0; i
<= PIPE_SHADER_FRAGMENT
; ++i
) {
1272 struct panfrost_constant_buffer
*buf
= &ctx
->constant_buffer
[i
];
1274 struct panfrost_shader_state
*vs
= &ctx
->vs
->variants
[ctx
->vs
->active_variant
];
1275 struct panfrost_shader_state
*fs
= &ctx
->fs
->variants
[ctx
->fs
->active_variant
];
1276 struct panfrost_shader_state
*ss
= (i
== PIPE_SHADER_FRAGMENT
) ? fs
: vs
;
1278 /* Allocate room for the sysval and the uniforms */
1279 size_t sys_size
= sizeof(float) * 4 * ss
->sysval_count
;
1280 size_t size
= sys_size
+ buf
->size
;
1281 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, size
);
1283 /* Upload sysvals requested by the shader */
1284 panfrost_upload_sysvals(ctx
, transfer
.cpu
, ss
, i
);
1286 /* Upload uniforms */
1287 memcpy(transfer
.cpu
+ sys_size
, buf
->buffer
, buf
->size
);
1289 int uniform_count
= 0;
1291 struct mali_vertex_tiler_postfix
*postfix
;
1294 case PIPE_SHADER_VERTEX
:
1295 uniform_count
= ctx
->vs
->variants
[ctx
->vs
->active_variant
].uniform_count
;
1296 postfix
= &ctx
->payload_vertex
.postfix
;
1299 case PIPE_SHADER_FRAGMENT
:
1300 uniform_count
= ctx
->fs
->variants
[ctx
->fs
->active_variant
].uniform_count
;
1301 postfix
= &ctx
->payload_tiler
.postfix
;
1305 unreachable("Invalid shader stage\n");
1308 /* Also attach the same buffer as a UBO for extended access */
1310 struct mali_uniform_buffer_meta uniform_buffers
[] = {
1312 .size
= MALI_POSITIVE((2 + uniform_count
)),
1313 .ptr
= transfer
.gpu
>> 2,
1317 mali_ptr ubufs
= panfrost_upload_transient(ctx
, uniform_buffers
, sizeof(uniform_buffers
));
1318 postfix
->uniforms
= transfer
.gpu
;
1319 postfix
->uniform_buffers
= ubufs
;
1324 /* TODO: Upload the viewport somewhere more appropriate */
1326 /* Clip bounds are encoded as floats. The viewport itself is encoded as
1327 * (somewhat) asymmetric ints. */
1328 const struct pipe_scissor_state
*ss
= &ctx
->scissor
;
1330 struct mali_viewport view
= {
1331 /* By default, do no viewport clipping, i.e. clip to (-inf,
1332 * inf) in each direction. Clipping to the viewport in theory
1333 * should work, but in practice causes issues when we're not
1334 * explicitly trying to scissor */
1345 /* Always scissor to the viewport by default. */
1346 int minx
= (int) (vp
->translate
[0] - vp
->scale
[0]);
1347 int maxx
= (int) (vp
->translate
[0] + vp
->scale
[0]);
1349 int miny
= (int) (vp
->translate
[1] - vp
->scale
[1]);
1350 int maxy
= (int) (vp
->translate
[1] + vp
->scale
[1]);
1352 /* Apply the scissor test */
1354 if (ss
&& ctx
->rasterizer
&& ctx
->rasterizer
->base
.scissor
) {
1361 /* Hardware needs the min/max to be strictly ordered, so flip if we
1362 * need to. The viewport transformation in the vertex shader will
1363 * handle the negatives if we don't */
1377 /* Clamp everything positive, just in case */
1379 maxx
= MAX2(0, maxx
);
1380 maxy
= MAX2(0, maxy
);
1381 minx
= MAX2(0, minx
);
1382 miny
= MAX2(0, miny
);
1384 /* Clamp to the framebuffer size as a last check */
1386 minx
= MIN2(ctx
->pipe_framebuffer
.width
, minx
);
1387 maxx
= MIN2(ctx
->pipe_framebuffer
.width
, maxx
);
1389 miny
= MIN2(ctx
->pipe_framebuffer
.height
, miny
);
1390 maxy
= MIN2(ctx
->pipe_framebuffer
.height
, maxy
);
1394 view
.viewport0
[0] = minx
;
1395 view
.viewport1
[0] = MALI_POSITIVE(maxx
);
1397 view
.viewport0
[1] = miny
;
1398 view
.viewport1
[1] = MALI_POSITIVE(maxy
);
1400 ctx
->payload_tiler
.postfix
.viewport
=
1401 panfrost_upload_transient(ctx
,
1403 sizeof(struct mali_viewport
));
1408 /* Corresponds to exactly one draw, but does not submit anything */
1411 panfrost_queue_draw(struct panfrost_context
*ctx
)
1413 /* TODO: Expand the array? */
1414 if (ctx
->draw_count
>= MAX_DRAW_CALLS
) {
1415 DBG("Job buffer overflow, ignoring draw\n");
1419 /* Handle dirty flags now */
1420 panfrost_emit_for_draw(ctx
, true);
1422 /* We need a set_value job before any other draw jobs */
1423 if (ctx
->draw_count
== 0)
1424 panfrost_set_value_job(ctx
);
1426 struct panfrost_transfer vertex
= panfrost_vertex_tiler_job(ctx
, false);
1427 ctx
->u_vertex_jobs
[ctx
->vertex_job_count
] = (struct mali_job_descriptor_header
*) vertex
.cpu
;
1428 ctx
->vertex_jobs
[ctx
->vertex_job_count
++] = vertex
.gpu
;
1430 struct panfrost_transfer tiler
= panfrost_vertex_tiler_job(ctx
, true);
1431 ctx
->u_tiler_jobs
[ctx
->tiler_job_count
] = (struct mali_job_descriptor_header
*) tiler
.cpu
;
1432 ctx
->tiler_jobs
[ctx
->tiler_job_count
++] = tiler
.gpu
;
1437 /* The entire frame is in memory -- send it off to the kernel! */
1440 panfrost_submit_frame(struct panfrost_context
*ctx
, bool flush_immediate
,
1441 struct pipe_fence_handle
**fence
,
1442 struct panfrost_job
*job
)
1444 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
1445 struct panfrost_screen
*screen
= pan_screen(gallium
->screen
);
1447 /* Edge case if screen is cleared and nothing else */
1448 bool has_draws
= ctx
->draw_count
> 0;
1452 bool is_scanout
= panfrost_is_scanout(ctx
);
1453 screen
->driver
->submit_vs_fs_job(ctx
, has_draws
, is_scanout
);
1455 /* If visual, we can stall a frame */
1457 if (!flush_immediate
)
1458 screen
->driver
->force_flush_fragment(ctx
, fence
);
1460 screen
->last_fragment_flushed
= false;
1461 screen
->last_job
= job
;
1463 /* If readback, flush now (hurts the pipelined performance) */
1464 if (flush_immediate
)
1465 screen
->driver
->force_flush_fragment(ctx
, fence
);
1467 if (screen
->driver
->dump_counters
&& pan_counters_base
) {
1468 screen
->driver
->dump_counters(screen
);
1471 snprintf(filename
, sizeof(filename
), "%s/frame%d.mdgprf", pan_counters_base
, ++performance_counter_number
);
1472 FILE *fp
= fopen(filename
, "wb");
1473 fwrite(screen
->perf_counters
.cpu
, 4096, sizeof(uint32_t), fp
);
1481 panfrost_draw_wallpaper(struct pipe_context
*pipe
)
1483 struct panfrost_context
*ctx
= pan_context(pipe
);
1485 /* Nothing to reload? */
1486 if (ctx
->pipe_framebuffer
.cbufs
[0] == NULL
)
1489 /* Blit the wallpaper in */
1490 panfrost_blit_wallpaper(ctx
);
1492 /* We are flushing all queued draws and we know that no more jobs will
1493 * be added until the next frame.
1494 * We also know that the last jobs are the wallpaper jobs, and they
1495 * need to be linked so they execute right after the set_value job.
1498 /* set_value job to wallpaper vertex job */
1499 panfrost_link_job_pair(ctx
->u_set_value_job
, ctx
->vertex_jobs
[ctx
->vertex_job_count
- 1]);
1500 ctx
->u_vertex_jobs
[ctx
->vertex_job_count
- 1]->job_dependency_index_1
= ctx
->u_set_value_job
->job_index
;
1502 /* wallpaper vertex job to first vertex job */
1503 panfrost_link_job_pair(ctx
->u_vertex_jobs
[ctx
->vertex_job_count
- 1], ctx
->vertex_jobs
[0]);
1504 ctx
->u_vertex_jobs
[0]->job_dependency_index_1
= ctx
->u_set_value_job
->job_index
;
1506 /* last vertex job to wallpaper tiler job */
1507 panfrost_link_job_pair(ctx
->u_vertex_jobs
[ctx
->vertex_job_count
- 2], ctx
->tiler_jobs
[ctx
->tiler_job_count
- 1]);
1508 ctx
->u_tiler_jobs
[ctx
->tiler_job_count
- 1]->job_dependency_index_1
= ctx
->u_vertex_jobs
[ctx
->vertex_job_count
- 1]->job_index
;
1509 ctx
->u_tiler_jobs
[ctx
->tiler_job_count
- 1]->job_dependency_index_2
= 0;
1511 /* wallpaper tiler job to first tiler job */
1512 panfrost_link_job_pair(ctx
->u_tiler_jobs
[ctx
->tiler_job_count
- 1], ctx
->tiler_jobs
[0]);
1513 ctx
->u_tiler_jobs
[0]->job_dependency_index_1
= ctx
->u_vertex_jobs
[0]->job_index
;
1514 ctx
->u_tiler_jobs
[0]->job_dependency_index_2
= ctx
->u_tiler_jobs
[ctx
->tiler_job_count
- 1]->job_index
;
1516 /* last tiler job to NULL */
1517 panfrost_link_job_pair(ctx
->u_tiler_jobs
[ctx
->tiler_job_count
- 2], 0);
1522 struct pipe_context
*pipe
,
1523 struct pipe_fence_handle
**fence
,
1526 struct panfrost_context
*ctx
= pan_context(pipe
);
1527 struct panfrost_job
*job
= panfrost_get_job_for_fbo(ctx
);
1529 /* Nothing to do! */
1530 if (!ctx
->draw_count
&& !job
->clear
) return;
1533 panfrost_draw_wallpaper(&ctx
->base
);
1535 /* Whether to stall the pipeline for immediately correct results. Since
1536 * pipelined rendering is quite broken right now (to be fixed by the
1537 * panfrost_job refactor, just take the perf hit for correctness) */
1538 bool flush_immediate
= /*flags & PIPE_FLUSH_END_OF_FRAME*/true;
1540 /* Submit the frame itself */
1541 panfrost_submit_frame(ctx
, flush_immediate
, fence
, job
);
1543 /* Prepare for the next frame */
1544 panfrost_invalidate_frame(ctx
);
1547 #define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_##c;
1550 g2m_draw_mode(enum pipe_prim_type mode
)
1553 DEFINE_CASE(POINTS
);
1555 DEFINE_CASE(LINE_LOOP
);
1556 DEFINE_CASE(LINE_STRIP
);
1557 DEFINE_CASE(TRIANGLES
);
1558 DEFINE_CASE(TRIANGLE_STRIP
);
1559 DEFINE_CASE(TRIANGLE_FAN
);
1561 DEFINE_CASE(QUAD_STRIP
);
1562 DEFINE_CASE(POLYGON
);
1565 unreachable("Invalid draw mode");
1572 panfrost_translate_index_size(unsigned size
)
1576 return MALI_DRAW_INDEXED_UINT8
;
1579 return MALI_DRAW_INDEXED_UINT16
;
1582 return MALI_DRAW_INDEXED_UINT32
;
1585 unreachable("Invalid index size");
1589 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
1590 * good for the duration of the draw (transient), could last longer */
1593 panfrost_get_index_buffer_mapped(struct panfrost_context
*ctx
, const struct pipe_draw_info
*info
)
1595 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) (info
->index
.resource
);
1597 off_t offset
= info
->start
* info
->index_size
;
1599 if (!info
->has_user_indices
) {
1600 /* Only resources can be directly mapped */
1601 return rsrc
->bo
->gpu
+ offset
;
1603 /* Otherwise, we need to upload to transient memory */
1604 const uint8_t *ibuf8
= (const uint8_t *) info
->index
.user
;
1605 return panfrost_upload_transient(ctx
, ibuf8
+ offset
, info
->count
* info
->index_size
);
1610 panfrost_scissor_culls_everything(struct panfrost_context
*ctx
)
1612 const struct pipe_scissor_state
*ss
= &ctx
->scissor
;
1614 /* Check if we're scissoring at all */
1616 if (!(ss
&& ctx
->rasterizer
&& ctx
->rasterizer
->base
.scissor
))
1619 return (ss
->minx
== ss
->maxx
) && (ss
->miny
== ss
->maxy
);
1624 struct pipe_context
*pipe
,
1625 const struct pipe_draw_info
*info
)
1627 struct panfrost_context
*ctx
= pan_context(pipe
);
1629 /* First of all, check the scissor to see if anything is drawn at all.
1630 * If it's not, we drop the draw (mostly a conformance issue;
1631 * well-behaved apps shouldn't hit this) */
1633 if (panfrost_scissor_culls_everything(ctx
))
1636 ctx
->payload_vertex
.draw_start
= info
->start
;
1637 ctx
->payload_tiler
.draw_start
= info
->start
;
1639 int mode
= info
->mode
;
1641 /* Fallback for unsupported modes */
1643 if (!(ctx
->draw_modes
& (1 << mode
))) {
1644 if (mode
== PIPE_PRIM_QUADS
&& info
->count
== 4 && ctx
->rasterizer
&& !ctx
->rasterizer
->base
.flatshade
) {
1645 mode
= PIPE_PRIM_TRIANGLE_FAN
;
1647 if (info
->count
< 4) {
1648 /* Degenerate case? */
1652 util_primconvert_save_rasterizer_state(ctx
->primconvert
, &ctx
->rasterizer
->base
);
1653 util_primconvert_draw_vbo(ctx
->primconvert
, info
);
1658 /* Now that we have a guaranteed terminating path, find the job.
1659 * Assignment commented out to prevent unused warning */
1661 /* struct panfrost_job *job = */ panfrost_get_job_for_fbo(ctx
);
1663 ctx
->payload_tiler
.prefix
.draw_mode
= g2m_draw_mode(mode
);
1665 ctx
->vertex_count
= info
->count
;
1667 /* For non-indexed draws, they're the same */
1668 unsigned invocation_count
= ctx
->vertex_count
;
1670 unsigned draw_flags
= 0;
1672 /* The draw flags interpret how primitive size is interpreted */
1674 if (panfrost_writes_point_size(ctx
))
1675 draw_flags
|= MALI_DRAW_VARYING_SIZE
;
1677 /* For higher amounts of vertices (greater than what fits in a 16-bit
1678 * short), the other value is needed, otherwise there will be bizarre
1679 * rendering artefacts. It's not clear what these values mean yet. */
1681 draw_flags
|= (mode
== PIPE_PRIM_POINTS
|| ctx
->vertex_count
> 65535) ? 0x3000 : 0x18000;
1683 if (info
->index_size
) {
1684 /* Calculate the min/max index used so we can figure out how
1685 * many times to invoke the vertex shader */
1687 /* Fetch / calculate index bounds */
1688 unsigned min_index
= 0, max_index
= 0;
1690 if (info
->max_index
== ~0u) {
1691 u_vbuf_get_minmax_index(pipe
, info
, &min_index
, &max_index
);
1693 min_index
= info
->min_index
;
1694 max_index
= info
->max_index
;
1697 /* Use the corresponding values */
1698 invocation_count
= max_index
- min_index
+ 1;
1699 ctx
->payload_vertex
.draw_start
= min_index
;
1700 ctx
->payload_tiler
.draw_start
= min_index
;
1702 ctx
->payload_tiler
.prefix
.negative_start
= -min_index
;
1703 ctx
->payload_tiler
.prefix
.index_count
= MALI_POSITIVE(info
->count
);
1705 //assert(!info->restart_index); /* TODO: Research */
1706 assert(!info
->index_bias
);
1708 draw_flags
|= panfrost_translate_index_size(info
->index_size
);
1709 ctx
->payload_tiler
.prefix
.indices
= panfrost_get_index_buffer_mapped(ctx
, info
);
1711 /* Index count == vertex count, if no indexing is applied, as
1712 * if it is internally indexed in the expected order */
1714 ctx
->payload_tiler
.prefix
.negative_start
= 0;
1715 ctx
->payload_tiler
.prefix
.index_count
= MALI_POSITIVE(ctx
->vertex_count
);
1717 /* Reverse index state */
1718 ctx
->payload_tiler
.prefix
.indices
= (uintptr_t) NULL
;
1721 ctx
->payload_vertex
.prefix
.invocation_count
= MALI_POSITIVE(invocation_count
);
1722 ctx
->payload_tiler
.prefix
.invocation_count
= MALI_POSITIVE(invocation_count
);
1723 ctx
->payload_tiler
.prefix
.unknown_draw
= draw_flags
;
1725 /* Fire off the draw itself */
1726 panfrost_queue_draw(ctx
);
1732 panfrost_generic_cso_delete(struct pipe_context
*pctx
, void *hwcso
)
1738 panfrost_create_rasterizer_state(
1739 struct pipe_context
*pctx
,
1740 const struct pipe_rasterizer_state
*cso
)
1742 struct panfrost_context
*ctx
= pan_context(pctx
);
1743 struct panfrost_rasterizer
*so
= CALLOC_STRUCT(panfrost_rasterizer
);
1747 /* Bitmask, unknown meaning of the start value */
1748 so
->tiler_gl_enables
= ctx
->is_t6xx
? 0x105 : 0x7;
1751 so
->tiler_gl_enables
|= MALI_FRONT_CCW_TOP
;
1753 if (cso
->cull_face
& PIPE_FACE_FRONT
)
1754 so
->tiler_gl_enables
|= MALI_CULL_FACE_FRONT
;
1756 if (cso
->cull_face
& PIPE_FACE_BACK
)
1757 so
->tiler_gl_enables
|= MALI_CULL_FACE_BACK
;
1763 panfrost_bind_rasterizer_state(
1764 struct pipe_context
*pctx
,
1767 struct panfrost_context
*ctx
= pan_context(pctx
);
1769 /* TODO: Why can't rasterizer be NULL ever? Other drivers are fine.. */
1773 ctx
->rasterizer
= hwcso
;
1774 ctx
->dirty
|= PAN_DIRTY_RASTERIZER
;
1778 panfrost_create_vertex_elements_state(
1779 struct pipe_context
*pctx
,
1780 unsigned num_elements
,
1781 const struct pipe_vertex_element
*elements
)
1783 struct panfrost_vertex_state
*so
= CALLOC_STRUCT(panfrost_vertex_state
);
1785 so
->num_elements
= num_elements
;
1786 memcpy(so
->pipe
, elements
, sizeof(*elements
) * num_elements
);
1788 /* XXX: What the cornball? This is totally, 100%, unapologetically
1789 * nonsense. And yet it somehow fixes a regression in -bshadow
1790 * (previously, we allocated the descriptor here... a newer commit
1791 * removed that allocation, and then memory corruption led to
1792 * shader_meta getting overwritten in bad ways and then the whole test
1793 * case falling apart . TODO: LOOK INTO PLEASE XXX XXX BAD XXX XXX XXX
1795 panfrost_allocate_chunk(pan_context(pctx
), 0, HEAP_DESCRIPTOR
);
1797 for (int i
= 0; i
< num_elements
; ++i
) {
1798 so
->hw
[i
].index
= elements
[i
].vertex_buffer_index
;
1800 enum pipe_format fmt
= elements
[i
].src_format
;
1801 const struct util_format_description
*desc
= util_format_description(fmt
);
1802 so
->hw
[i
].unknown1
= 0x2;
1803 so
->hw
[i
].swizzle
= panfrost_get_default_swizzle(desc
->nr_channels
);
1805 so
->hw
[i
].format
= panfrost_find_format(desc
);
1807 /* The field itself should probably be shifted over */
1808 so
->hw
[i
].src_offset
= elements
[i
].src_offset
;
1815 panfrost_bind_vertex_elements_state(
1816 struct pipe_context
*pctx
,
1819 struct panfrost_context
*ctx
= pan_context(pctx
);
1821 ctx
->vertex
= hwcso
;
1822 ctx
->dirty
|= PAN_DIRTY_VERTEX
;
1826 panfrost_create_shader_state(
1827 struct pipe_context
*pctx
,
1828 const struct pipe_shader_state
*cso
)
1830 struct panfrost_shader_variants
*so
= CALLOC_STRUCT(panfrost_shader_variants
);
1833 /* Token deep copy to prevent memory corruption */
1835 if (cso
->type
== PIPE_SHADER_IR_TGSI
)
1836 so
->base
.tokens
= tgsi_dup_tokens(so
->base
.tokens
);
1842 panfrost_delete_shader_state(
1843 struct pipe_context
*pctx
,
1846 struct panfrost_shader_variants
*cso
= (struct panfrost_shader_variants
*) so
;
1848 if (cso
->base
.type
== PIPE_SHADER_IR_TGSI
) {
1849 DBG("Deleting TGSI shader leaks duplicated tokens\n");
1856 panfrost_create_sampler_state(
1857 struct pipe_context
*pctx
,
1858 const struct pipe_sampler_state
*cso
)
1860 struct panfrost_sampler_state
*so
= CALLOC_STRUCT(panfrost_sampler_state
);
1863 /* sampler_state corresponds to mali_sampler_descriptor, which we can generate entirely here */
1865 struct mali_sampler_descriptor sampler_descriptor
= {
1866 .filter_mode
= MALI_TEX_MIN(translate_tex_filter(cso
->min_img_filter
))
1867 | MALI_TEX_MAG(translate_tex_filter(cso
->mag_img_filter
))
1868 | translate_mip_filter(cso
->min_mip_filter
)
1871 .wrap_s
= translate_tex_wrap(cso
->wrap_s
),
1872 .wrap_t
= translate_tex_wrap(cso
->wrap_t
),
1873 .wrap_r
= translate_tex_wrap(cso
->wrap_r
),
1874 .compare_func
= panfrost_translate_alt_compare_func(cso
->compare_func
),
1876 cso
->border_color
.f
[0],
1877 cso
->border_color
.f
[1],
1878 cso
->border_color
.f
[2],
1879 cso
->border_color
.f
[3]
1881 .min_lod
= FIXED_16(cso
->min_lod
),
1882 .max_lod
= FIXED_16(cso
->max_lod
),
1886 so
->hw
= sampler_descriptor
;
1892 panfrost_bind_sampler_states(
1893 struct pipe_context
*pctx
,
1894 enum pipe_shader_type shader
,
1895 unsigned start_slot
, unsigned num_sampler
,
1898 assert(start_slot
== 0);
1900 struct panfrost_context
*ctx
= pan_context(pctx
);
1902 /* XXX: Should upload, not just copy? */
1903 ctx
->sampler_count
[shader
] = num_sampler
;
1904 memcpy(ctx
->samplers
[shader
], sampler
, num_sampler
* sizeof (void *));
1906 ctx
->dirty
|= PAN_DIRTY_SAMPLERS
;
1910 panfrost_variant_matches(
1911 struct panfrost_context
*ctx
,
1912 struct panfrost_shader_state
*variant
,
1913 enum pipe_shader_type type
)
1915 struct pipe_alpha_state
*alpha
= &ctx
->depth_stencil
->alpha
;
1917 bool is_fragment
= (type
== PIPE_SHADER_FRAGMENT
);
1919 if (is_fragment
&& (alpha
->enabled
|| variant
->alpha_state
.enabled
)) {
1920 /* Make sure enable state is at least the same */
1921 if (alpha
->enabled
!= variant
->alpha_state
.enabled
) {
1925 /* Check that the contents of the test are the same */
1926 bool same_func
= alpha
->func
== variant
->alpha_state
.func
;
1927 bool same_ref
= alpha
->ref_value
== variant
->alpha_state
.ref_value
;
1929 if (!(same_func
&& same_ref
)) {
1933 /* Otherwise, we're good to go */
1938 panfrost_bind_shader_state(
1939 struct pipe_context
*pctx
,
1941 enum pipe_shader_type type
)
1943 struct panfrost_context
*ctx
= pan_context(pctx
);
1945 if (type
== PIPE_SHADER_FRAGMENT
) {
1947 ctx
->dirty
|= PAN_DIRTY_FS
;
1949 assert(type
== PIPE_SHADER_VERTEX
);
1951 ctx
->dirty
|= PAN_DIRTY_VS
;
1956 /* Match the appropriate variant */
1958 signed variant
= -1;
1959 struct panfrost_shader_variants
*variants
= (struct panfrost_shader_variants
*) hwcso
;
1961 for (unsigned i
= 0; i
< variants
->variant_count
; ++i
) {
1962 if (panfrost_variant_matches(ctx
, &variants
->variants
[i
], type
)) {
1968 if (variant
== -1) {
1969 /* No variant matched, so create a new one */
1970 variant
= variants
->variant_count
++;
1971 assert(variants
->variant_count
< MAX_SHADER_VARIANTS
);
1973 variants
->variants
[variant
].base
= hwcso
;
1975 if (type
== PIPE_SHADER_FRAGMENT
)
1976 variants
->variants
[variant
].alpha_state
= ctx
->depth_stencil
->alpha
;
1978 /* Allocate the mapped descriptor ahead-of-time. */
1979 struct panfrost_context
*ctx
= pan_context(pctx
);
1980 struct panfrost_transfer transfer
= panfrost_allocate_chunk(ctx
, sizeof(struct mali_shader_meta
), HEAP_DESCRIPTOR
);
1982 variants
->variants
[variant
].tripipe
= (struct mali_shader_meta
*) transfer
.cpu
;
1983 variants
->variants
[variant
].tripipe_gpu
= transfer
.gpu
;
1987 /* Select this variant */
1988 variants
->active_variant
= variant
;
1990 struct panfrost_shader_state
*shader_state
= &variants
->variants
[variant
];
1991 assert(panfrost_variant_matches(ctx
, shader_state
, type
));
1993 /* We finally have a variant, so compile it */
1995 if (!shader_state
->compiled
) {
1996 panfrost_shader_compile(ctx
, shader_state
->tripipe
, NULL
,
1997 panfrost_job_type_for_pipe(type
), shader_state
);
1999 shader_state
->compiled
= true;
2004 panfrost_bind_vs_state(struct pipe_context
*pctx
, void *hwcso
)
2006 panfrost_bind_shader_state(pctx
, hwcso
, PIPE_SHADER_VERTEX
);
2010 panfrost_bind_fs_state(struct pipe_context
*pctx
, void *hwcso
)
2012 panfrost_bind_shader_state(pctx
, hwcso
, PIPE_SHADER_FRAGMENT
);
2016 panfrost_set_vertex_buffers(
2017 struct pipe_context
*pctx
,
2018 unsigned start_slot
,
2019 unsigned num_buffers
,
2020 const struct pipe_vertex_buffer
*buffers
)
2022 struct panfrost_context
*ctx
= pan_context(pctx
);
2024 util_set_vertex_buffers_mask(ctx
->vertex_buffers
, &ctx
->vb_mask
, buffers
, start_slot
, num_buffers
);
2028 panfrost_set_constant_buffer(
2029 struct pipe_context
*pctx
,
2030 enum pipe_shader_type shader
, uint index
,
2031 const struct pipe_constant_buffer
*buf
)
2033 struct panfrost_context
*ctx
= pan_context(pctx
);
2034 struct panfrost_constant_buffer
*pbuf
= &ctx
->constant_buffer
[shader
];
2036 size_t sz
= buf
? buf
->buffer_size
: 0;
2038 /* Free previous buffer */
2045 pbuf
->buffer
= NULL
;
2048 /* If unbinding, we're done */
2053 /* Multiple constant buffers not yet supported */
2058 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) (buf
->buffer
);
2061 cpu
= rsrc
->bo
->cpu
;
2062 } else if (buf
->user_buffer
) {
2063 cpu
= buf
->user_buffer
;
2065 DBG("No constant buffer?\n");
2069 /* Copy the constant buffer into the driver context for later upload */
2071 pbuf
->buffer
= malloc(sz
);
2072 memcpy(pbuf
->buffer
, cpu
+ buf
->buffer_offset
, sz
);
2076 panfrost_set_stencil_ref(
2077 struct pipe_context
*pctx
,
2078 const struct pipe_stencil_ref
*ref
)
2080 struct panfrost_context
*ctx
= pan_context(pctx
);
2081 ctx
->stencil_ref
= *ref
;
2083 /* Shader core dirty */
2084 ctx
->dirty
|= PAN_DIRTY_FS
;
2087 static struct pipe_sampler_view
*
2088 panfrost_create_sampler_view(
2089 struct pipe_context
*pctx
,
2090 struct pipe_resource
*texture
,
2091 const struct pipe_sampler_view
*template)
2093 struct panfrost_sampler_view
*so
= CALLOC_STRUCT(panfrost_sampler_view
);
2094 int bytes_per_pixel
= util_format_get_blocksize(texture
->format
);
2096 pipe_reference(NULL
, &texture
->reference
);
2098 struct panfrost_resource
*prsrc
= (struct panfrost_resource
*) texture
;
2101 so
->base
= *template;
2102 so
->base
.texture
= texture
;
2103 so
->base
.reference
.count
= 1;
2104 so
->base
.context
= pctx
;
2106 /* sampler_views correspond to texture descriptors, minus the texture
2107 * (data) itself. So, we serialise the descriptor here and cache it for
2110 /* Make sure it's something with which we're familiar */
2111 assert(bytes_per_pixel
>= 1 && bytes_per_pixel
<= 4);
2113 /* TODO: Detect from format better */
2114 const struct util_format_description
*desc
= util_format_description(prsrc
->base
.format
);
2116 unsigned char user_swizzle
[4] = {
2117 template->swizzle_r
,
2118 template->swizzle_g
,
2119 template->swizzle_b
,
2123 enum mali_format format
= panfrost_find_format(desc
);
2125 bool is_depth
= desc
->format
== PIPE_FORMAT_Z32_UNORM
;
2127 unsigned usage2_layout
= 0x10;
2129 switch (prsrc
->bo
->layout
) {
2131 usage2_layout
|= 0x8 | 0x4;
2134 usage2_layout
|= 0x1;
2137 usage2_layout
|= is_depth
? 0x1 : 0x2;
2144 /* Check if we need to set a custom stride by computing the "expected"
2145 * stride and comparing it to what the BO actually wants. Only applies
2146 * to linear textures, since tiled/compressed textures have strict
2147 * alignment requirements for their strides as it is */
2149 unsigned first_level
= template->u
.tex
.first_level
;
2150 unsigned last_level
= template->u
.tex
.last_level
;
2152 if (prsrc
->bo
->layout
== PAN_LINEAR
) {
2153 for (unsigned l
= first_level
; l
<= last_level
; ++l
) {
2154 unsigned actual_stride
= prsrc
->bo
->slices
[l
].stride
;
2155 unsigned width
= u_minify(texture
->width0
, l
);
2156 unsigned comp_stride
= width
* bytes_per_pixel
;
2158 if (comp_stride
!= actual_stride
) {
2159 usage2_layout
|= MALI_TEX_MANUAL_STRIDE
;
2165 /* In the hardware, array_size refers specifically to array textures,
2166 * whereas in Gallium, it also covers cubemaps */
2168 unsigned array_size
= texture
->array_size
;
2170 if (texture
->target
== PIPE_TEXTURE_CUBE
) {
2171 /* TODO: Cubemap arrays */
2172 assert(array_size
== 6);
2175 struct mali_texture_descriptor texture_descriptor
= {
2176 .width
= MALI_POSITIVE(u_minify(texture
->width0
, first_level
)),
2177 .height
= MALI_POSITIVE(u_minify(texture
->height0
, first_level
)),
2178 .depth
= MALI_POSITIVE(u_minify(texture
->depth0
, first_level
)),
2179 .array_size
= MALI_POSITIVE(array_size
),
2183 .swizzle
= panfrost_translate_swizzle_4(desc
->swizzle
),
2186 .usage1
= (texture
->target
== PIPE_TEXTURE_3D
) ? MALI_TEX_3D
: 0,
2187 .is_not_cubemap
= texture
->target
!= PIPE_TEXTURE_CUBE
,
2189 .usage2
= usage2_layout
2192 .swizzle
= panfrost_translate_swizzle_4(user_swizzle
)
2195 //texture_descriptor.nr_mipmap_levels = last_level - first_level;
2197 so
->hw
= texture_descriptor
;
2199 return (struct pipe_sampler_view
*) so
;
2203 panfrost_set_sampler_views(
2204 struct pipe_context
*pctx
,
2205 enum pipe_shader_type shader
,
2206 unsigned start_slot
, unsigned num_views
,
2207 struct pipe_sampler_view
**views
)
2209 struct panfrost_context
*ctx
= pan_context(pctx
);
2211 assert(start_slot
== 0);
2213 unsigned new_nr
= 0;
2214 for (unsigned i
= 0; i
< num_views
; ++i
) {
2219 ctx
->sampler_view_count
[shader
] = new_nr
;
2220 memcpy(ctx
->sampler_views
[shader
], views
, num_views
* sizeof (void *));
2222 ctx
->dirty
|= PAN_DIRTY_TEXTURES
;
2226 panfrost_sampler_view_destroy(
2227 struct pipe_context
*pctx
,
2228 struct pipe_sampler_view
*view
)
2230 pipe_resource_reference(&view
->texture
, NULL
);
2235 panfrost_set_framebuffer_state(struct pipe_context
*pctx
,
2236 const struct pipe_framebuffer_state
*fb
)
2238 struct panfrost_context
*ctx
= pan_context(pctx
);
2240 /* Flush when switching framebuffers, but not if the framebuffer
2241 * state is being restored by u_blitter
2244 bool is_scanout
= panfrost_is_scanout(ctx
);
2245 bool has_draws
= ctx
->draw_count
> 0;
2247 if (!ctx
->blitter
->running
&& (!is_scanout
|| has_draws
)) {
2248 panfrost_flush(pctx
, NULL
, PIPE_FLUSH_END_OF_FRAME
);
2251 ctx
->pipe_framebuffer
.nr_cbufs
= fb
->nr_cbufs
;
2252 ctx
->pipe_framebuffer
.samples
= fb
->samples
;
2253 ctx
->pipe_framebuffer
.layers
= fb
->layers
;
2254 ctx
->pipe_framebuffer
.width
= fb
->width
;
2255 ctx
->pipe_framebuffer
.height
= fb
->height
;
2257 for (int i
= 0; i
< PIPE_MAX_COLOR_BUFS
; i
++) {
2258 struct pipe_surface
*cb
= i
< fb
->nr_cbufs
? fb
->cbufs
[i
] : NULL
;
2260 /* check if changing cbuf */
2261 if (ctx
->pipe_framebuffer
.cbufs
[i
] == cb
) continue;
2263 if (cb
&& (i
!= 0)) {
2264 DBG("XXX: Multiple render targets not supported before t7xx!\n");
2269 pipe_surface_reference(&ctx
->pipe_framebuffer
.cbufs
[i
], cb
);
2274 if (ctx
->require_sfbd
)
2275 ctx
->vt_framebuffer_sfbd
= panfrost_emit_sfbd(ctx
, ~0);
2277 ctx
->vt_framebuffer_mfbd
= panfrost_emit_mfbd(ctx
, ~0);
2279 panfrost_attach_vt_framebuffer(ctx
);
2281 struct panfrost_resource
*tex
= ((struct panfrost_resource
*) ctx
->pipe_framebuffer
.cbufs
[i
]->texture
);
2282 enum pipe_format format
= ctx
->pipe_framebuffer
.cbufs
[i
]->format
;
2284 bool can_afbc
= panfrost_format_supports_afbc(format
);
2285 bool is_scanout
= panfrost_is_scanout(ctx
);
2287 if (!is_scanout
&& tex
->bo
->layout
!= PAN_AFBC
&& can_afbc
)
2288 panfrost_enable_afbc(ctx
, tex
, false);
2290 if (!is_scanout
&& !tex
->bo
->has_checksum
)
2291 panfrost_enable_checksum(ctx
, tex
);
2295 struct pipe_surface
*zb
= fb
->zsbuf
;
2297 if (ctx
->pipe_framebuffer
.zsbuf
!= zb
) {
2298 pipe_surface_reference(&ctx
->pipe_framebuffer
.zsbuf
, zb
);
2301 if (ctx
->require_sfbd
)
2302 ctx
->vt_framebuffer_sfbd
= panfrost_emit_sfbd(ctx
, ~0);
2304 ctx
->vt_framebuffer_mfbd
= panfrost_emit_mfbd(ctx
, ~0);
2306 panfrost_attach_vt_framebuffer(ctx
);
2308 struct panfrost_resource
*tex
= pan_resource(zb
->texture
);
2309 bool can_afbc
= panfrost_format_supports_afbc(zb
->format
);
2310 bool is_scanout
= panfrost_is_scanout(ctx
);
2312 if (!is_scanout
&& tex
->bo
->layout
!= PAN_AFBC
&& can_afbc
)
2313 panfrost_enable_afbc(ctx
, tex
, true);
2320 panfrost_create_blend_state(struct pipe_context
*pipe
,
2321 const struct pipe_blend_state
*blend
)
2323 struct panfrost_context
*ctx
= pan_context(pipe
);
2324 struct panfrost_blend_state
*so
= CALLOC_STRUCT(panfrost_blend_state
);
2327 /* TODO: The following features are not yet implemented */
2328 assert(!blend
->logicop_enable
);
2329 assert(!blend
->alpha_to_coverage
);
2330 assert(!blend
->alpha_to_one
);
2332 /* Compile the blend state, first as fixed-function if we can */
2334 if (panfrost_make_fixed_blend_mode(&blend
->rt
[0], so
, blend
->rt
[0].colormask
, &ctx
->blend_color
))
2337 /* If we can't, compile a blend shader instead */
2339 panfrost_make_blend_shader(ctx
, so
, &ctx
->blend_color
);
2345 panfrost_bind_blend_state(struct pipe_context
*pipe
,
2348 struct panfrost_context
*ctx
= pan_context(pipe
);
2349 struct pipe_blend_state
*blend
= (struct pipe_blend_state
*) cso
;
2350 struct panfrost_blend_state
*pblend
= (struct panfrost_blend_state
*) cso
;
2351 ctx
->blend
= pblend
;
2356 SET_BIT(ctx
->fragment_shader_core
.unknown2_4
, MALI_NO_DITHER
, !blend
->dither
);
2358 /* TODO: Attach color */
2360 /* Shader itself is not dirty, but the shader core is */
2361 ctx
->dirty
|= PAN_DIRTY_FS
;
2365 panfrost_delete_blend_state(struct pipe_context
*pipe
,
2368 struct panfrost_blend_state
*so
= (struct panfrost_blend_state
*) blend
;
2370 if (so
->has_blend_shader
) {
2371 DBG("Deleting blend state leak blend shaders bytecode\n");
2378 panfrost_set_blend_color(struct pipe_context
*pipe
,
2379 const struct pipe_blend_color
*blend_color
)
2381 struct panfrost_context
*ctx
= pan_context(pipe
);
2383 /* If blend_color is we're unbinding, so ctx->blend_color is now undefined -> nothing to do */
2386 ctx
->blend_color
= *blend_color
;
2388 /* The blend mode depends on the blend constant color, due to the
2389 * fixed/programmable split. So, we're forced to regenerate the blend
2392 /* TODO: Attach color */
2397 panfrost_create_depth_stencil_state(struct pipe_context
*pipe
,
2398 const struct pipe_depth_stencil_alpha_state
*depth_stencil
)
2400 return mem_dup(depth_stencil
, sizeof(*depth_stencil
));
2404 panfrost_bind_depth_stencil_state(struct pipe_context
*pipe
,
2407 struct panfrost_context
*ctx
= pan_context(pipe
);
2408 struct pipe_depth_stencil_alpha_state
*depth_stencil
= cso
;
2409 ctx
->depth_stencil
= depth_stencil
;
2414 /* Alpha does not exist in the hardware (it's not in ES3), so it's
2415 * emulated in the fragment shader */
2417 if (depth_stencil
->alpha
.enabled
) {
2418 /* We need to trigger a new shader (maybe) */
2419 ctx
->base
.bind_fs_state(&ctx
->base
, ctx
->fs
);
2423 SET_BIT(ctx
->fragment_shader_core
.unknown2_4
, MALI_STENCIL_TEST
, depth_stencil
->stencil
[0].enabled
); /* XXX: which one? */
2425 panfrost_make_stencil_state(&depth_stencil
->stencil
[0], &ctx
->fragment_shader_core
.stencil_front
);
2426 ctx
->fragment_shader_core
.stencil_mask_front
= depth_stencil
->stencil
[0].writemask
;
2428 panfrost_make_stencil_state(&depth_stencil
->stencil
[1], &ctx
->fragment_shader_core
.stencil_back
);
2429 ctx
->fragment_shader_core
.stencil_mask_back
= depth_stencil
->stencil
[1].writemask
;
2431 /* Depth state (TODO: Refactor) */
2432 SET_BIT(ctx
->fragment_shader_core
.unknown2_3
, MALI_DEPTH_TEST
, depth_stencil
->depth
.enabled
);
2434 int func
= depth_stencil
->depth
.enabled
? depth_stencil
->depth
.func
: PIPE_FUNC_ALWAYS
;
2436 ctx
->fragment_shader_core
.unknown2_3
&= ~MALI_DEPTH_FUNC_MASK
;
2437 ctx
->fragment_shader_core
.unknown2_3
|= MALI_DEPTH_FUNC(panfrost_translate_compare_func(func
));
2439 /* Bounds test not implemented */
2440 assert(!depth_stencil
->depth
.bounds_test
);
2442 ctx
->dirty
|= PAN_DIRTY_FS
;
2446 panfrost_delete_depth_stencil_state(struct pipe_context
*pipe
, void *depth
)
2452 panfrost_set_sample_mask(struct pipe_context
*pipe
,
2453 unsigned sample_mask
)
2458 panfrost_set_clip_state(struct pipe_context
*pipe
,
2459 const struct pipe_clip_state
*clip
)
2461 //struct panfrost_context *panfrost = pan_context(pipe);
2465 panfrost_set_viewport_states(struct pipe_context
*pipe
,
2466 unsigned start_slot
,
2467 unsigned num_viewports
,
2468 const struct pipe_viewport_state
*viewports
)
2470 struct panfrost_context
*ctx
= pan_context(pipe
);
2472 assert(start_slot
== 0);
2473 assert(num_viewports
== 1);
2475 ctx
->pipe_viewport
= *viewports
;
2479 panfrost_set_scissor_states(struct pipe_context
*pipe
,
2480 unsigned start_slot
,
2481 unsigned num_scissors
,
2482 const struct pipe_scissor_state
*scissors
)
2484 struct panfrost_context
*ctx
= pan_context(pipe
);
2486 assert(start_slot
== 0);
2487 assert(num_scissors
== 1);
2489 ctx
->scissor
= *scissors
;
2493 panfrost_set_polygon_stipple(struct pipe_context
*pipe
,
2494 const struct pipe_poly_stipple
*stipple
)
2496 //struct panfrost_context *panfrost = pan_context(pipe);
2500 panfrost_set_active_query_state(struct pipe_context
*pipe
,
2503 //struct panfrost_context *panfrost = pan_context(pipe);
2507 panfrost_destroy(struct pipe_context
*pipe
)
2509 struct panfrost_context
*panfrost
= pan_context(pipe
);
2510 struct panfrost_screen
*screen
= pan_screen(pipe
->screen
);
2512 if (panfrost
->blitter
)
2513 util_blitter_destroy(panfrost
->blitter
);
2515 screen
->driver
->free_slab(screen
, &panfrost
->scratchpad
);
2516 screen
->driver
->free_slab(screen
, &panfrost
->varying_mem
);
2517 screen
->driver
->free_slab(screen
, &panfrost
->shaders
);
2518 screen
->driver
->free_slab(screen
, &panfrost
->tiler_heap
);
2519 screen
->driver
->free_slab(screen
, &panfrost
->tiler_polygon_list
);
2522 static struct pipe_query
*
2523 panfrost_create_query(struct pipe_context
*pipe
,
2527 struct panfrost_query
*q
= CALLOC_STRUCT(panfrost_query
);
2532 return (struct pipe_query
*) q
;
2536 panfrost_destroy_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
2542 panfrost_begin_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
2544 struct panfrost_context
*ctx
= pan_context(pipe
);
2545 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
2547 switch (query
->type
) {
2548 case PIPE_QUERY_OCCLUSION_COUNTER
:
2549 case PIPE_QUERY_OCCLUSION_PREDICATE
:
2550 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
2552 /* Allocate a word for the query results to be stored */
2553 query
->transfer
= panfrost_allocate_chunk(ctx
, sizeof(unsigned), HEAP_DESCRIPTOR
);
2555 ctx
->occlusion_query
= query
;
2561 DBG("Skipping query %d\n", query
->type
);
2569 panfrost_end_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
2571 struct panfrost_context
*ctx
= pan_context(pipe
);
2572 ctx
->occlusion_query
= NULL
;
2577 panfrost_get_query_result(struct pipe_context
*pipe
,
2578 struct pipe_query
*q
,
2580 union pipe_query_result
*vresult
)
2583 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
2585 /* We need to flush out the jobs to actually run the counter, TODO
2586 * check wait, TODO wallpaper after if needed */
2588 panfrost_flush(pipe
, NULL
, PIPE_FLUSH_END_OF_FRAME
);
2590 switch (query
->type
) {
2591 case PIPE_QUERY_OCCLUSION_COUNTER
:
2592 case PIPE_QUERY_OCCLUSION_PREDICATE
:
2593 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
: {
2594 /* Read back the query results */
2595 unsigned *result
= (unsigned *) query
->transfer
.cpu
;
2596 unsigned passed
= *result
;
2598 if (query
->type
== PIPE_QUERY_OCCLUSION_COUNTER
) {
2599 vresult
->u64
= passed
;
2601 vresult
->b
= !!passed
;
2607 DBG("Skipped query get %d\n", query
->type
);
2614 static struct pipe_stream_output_target
*
2615 panfrost_create_stream_output_target(struct pipe_context
*pctx
,
2616 struct pipe_resource
*prsc
,
2617 unsigned buffer_offset
,
2618 unsigned buffer_size
)
2620 struct pipe_stream_output_target
*target
;
2622 target
= CALLOC_STRUCT(pipe_stream_output_target
);
2627 pipe_reference_init(&target
->reference
, 1);
2628 pipe_resource_reference(&target
->buffer
, prsc
);
2630 target
->context
= pctx
;
2631 target
->buffer_offset
= buffer_offset
;
2632 target
->buffer_size
= buffer_size
;
2638 panfrost_stream_output_target_destroy(struct pipe_context
*pctx
,
2639 struct pipe_stream_output_target
*target
)
2641 pipe_resource_reference(&target
->buffer
, NULL
);
2646 panfrost_set_stream_output_targets(struct pipe_context
*pctx
,
2647 unsigned num_targets
,
2648 struct pipe_stream_output_target
**targets
,
2649 const unsigned *offsets
)
2655 panfrost_setup_hardware(struct panfrost_context
*ctx
)
2657 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
2658 struct panfrost_screen
*screen
= pan_screen(gallium
->screen
);
2660 for (int i
= 0; i
< ARRAY_SIZE(ctx
->transient_pools
); ++i
) {
2661 /* Allocate the beginning of the transient pool */
2662 int entry_size
= (1 << 22); /* 4MB */
2664 ctx
->transient_pools
[i
].entry_size
= entry_size
;
2665 ctx
->transient_pools
[i
].entry_count
= 1;
2667 ctx
->transient_pools
[i
].entries
[0] = (struct panfrost_memory_entry
*) pb_slab_alloc(&screen
->slabs
, entry_size
, HEAP_TRANSIENT
);
2670 screen
->driver
->allocate_slab(screen
, &ctx
->scratchpad
, 64, false, 0, 0, 0);
2671 screen
->driver
->allocate_slab(screen
, &ctx
->varying_mem
, 16384, false, PAN_ALLOCATE_INVISIBLE
| PAN_ALLOCATE_COHERENT_LOCAL
, 0, 0);
2672 screen
->driver
->allocate_slab(screen
, &ctx
->shaders
, 4096, true, PAN_ALLOCATE_EXECUTE
, 0, 0);
2673 screen
->driver
->allocate_slab(screen
, &ctx
->tiler_heap
, 32768, false, PAN_ALLOCATE_INVISIBLE
| PAN_ALLOCATE_GROWABLE
, 1, 128);
2674 screen
->driver
->allocate_slab(screen
, &ctx
->tiler_polygon_list
, 128*128, false, PAN_ALLOCATE_INVISIBLE
| PAN_ALLOCATE_GROWABLE
, 1, 128);
2675 screen
->driver
->allocate_slab(screen
, &ctx
->tiler_dummy
, 1, false, PAN_ALLOCATE_INVISIBLE
, 0, 0);
2679 /* New context creation, which also does hardware initialisation since I don't
2680 * know the better way to structure this :smirk: */
2682 struct pipe_context
*
2683 panfrost_create_context(struct pipe_screen
*screen
, void *priv
, unsigned flags
)
2685 struct panfrost_context
*ctx
= CALLOC_STRUCT(panfrost_context
);
2686 struct panfrost_screen
*pscreen
= pan_screen(screen
);
2687 memset(ctx
, 0, sizeof(*ctx
));
2688 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
2691 gpu_id
= pscreen
->driver
->query_gpu_version(pscreen
);
2693 ctx
->is_t6xx
= gpu_id
<= 0x0750; /* For now, this flag means T760 or less */
2694 ctx
->require_sfbd
= gpu_id
< 0x0750; /* T760 is the first to support MFBD */
2696 gallium
->screen
= screen
;
2698 gallium
->destroy
= panfrost_destroy
;
2700 gallium
->set_framebuffer_state
= panfrost_set_framebuffer_state
;
2702 gallium
->flush
= panfrost_flush
;
2703 gallium
->clear
= panfrost_clear
;
2704 gallium
->draw_vbo
= panfrost_draw_vbo
;
2706 gallium
->set_vertex_buffers
= panfrost_set_vertex_buffers
;
2707 gallium
->set_constant_buffer
= panfrost_set_constant_buffer
;
2709 gallium
->set_stencil_ref
= panfrost_set_stencil_ref
;
2711 gallium
->create_sampler_view
= panfrost_create_sampler_view
;
2712 gallium
->set_sampler_views
= panfrost_set_sampler_views
;
2713 gallium
->sampler_view_destroy
= panfrost_sampler_view_destroy
;
2715 gallium
->create_rasterizer_state
= panfrost_create_rasterizer_state
;
2716 gallium
->bind_rasterizer_state
= panfrost_bind_rasterizer_state
;
2717 gallium
->delete_rasterizer_state
= panfrost_generic_cso_delete
;
2719 gallium
->create_vertex_elements_state
= panfrost_create_vertex_elements_state
;
2720 gallium
->bind_vertex_elements_state
= panfrost_bind_vertex_elements_state
;
2721 gallium
->delete_vertex_elements_state
= panfrost_generic_cso_delete
;
2723 gallium
->create_fs_state
= panfrost_create_shader_state
;
2724 gallium
->delete_fs_state
= panfrost_delete_shader_state
;
2725 gallium
->bind_fs_state
= panfrost_bind_fs_state
;
2727 gallium
->create_vs_state
= panfrost_create_shader_state
;
2728 gallium
->delete_vs_state
= panfrost_delete_shader_state
;
2729 gallium
->bind_vs_state
= panfrost_bind_vs_state
;
2731 gallium
->create_sampler_state
= panfrost_create_sampler_state
;
2732 gallium
->delete_sampler_state
= panfrost_generic_cso_delete
;
2733 gallium
->bind_sampler_states
= panfrost_bind_sampler_states
;
2735 gallium
->create_blend_state
= panfrost_create_blend_state
;
2736 gallium
->bind_blend_state
= panfrost_bind_blend_state
;
2737 gallium
->delete_blend_state
= panfrost_delete_blend_state
;
2739 gallium
->set_blend_color
= panfrost_set_blend_color
;
2741 gallium
->create_depth_stencil_alpha_state
= panfrost_create_depth_stencil_state
;
2742 gallium
->bind_depth_stencil_alpha_state
= panfrost_bind_depth_stencil_state
;
2743 gallium
->delete_depth_stencil_alpha_state
= panfrost_delete_depth_stencil_state
;
2745 gallium
->set_sample_mask
= panfrost_set_sample_mask
;
2747 gallium
->set_clip_state
= panfrost_set_clip_state
;
2748 gallium
->set_viewport_states
= panfrost_set_viewport_states
;
2749 gallium
->set_scissor_states
= panfrost_set_scissor_states
;
2750 gallium
->set_polygon_stipple
= panfrost_set_polygon_stipple
;
2751 gallium
->set_active_query_state
= panfrost_set_active_query_state
;
2753 gallium
->create_query
= panfrost_create_query
;
2754 gallium
->destroy_query
= panfrost_destroy_query
;
2755 gallium
->begin_query
= panfrost_begin_query
;
2756 gallium
->end_query
= panfrost_end_query
;
2757 gallium
->get_query_result
= panfrost_get_query_result
;
2759 gallium
->create_stream_output_target
= panfrost_create_stream_output_target
;
2760 gallium
->stream_output_target_destroy
= panfrost_stream_output_target_destroy
;
2761 gallium
->set_stream_output_targets
= panfrost_set_stream_output_targets
;
2763 panfrost_resource_context_init(gallium
);
2765 pscreen
->driver
->init_context(ctx
);
2767 panfrost_setup_hardware(ctx
);
2770 gallium
->stream_uploader
= u_upload_create_default(gallium
);
2771 gallium
->const_uploader
= gallium
->stream_uploader
;
2772 assert(gallium
->stream_uploader
);
2774 /* Midgard supports ES modes, plus QUADS/QUAD_STRIPS/POLYGON */
2775 ctx
->draw_modes
= (1 << (PIPE_PRIM_POLYGON
+ 1)) - 1;
2777 ctx
->primconvert
= util_primconvert_create(gallium
, ctx
->draw_modes
);
2779 ctx
->blitter
= util_blitter_create(gallium
);
2780 assert(ctx
->blitter
);
2782 /* Prepare for render! */
2784 panfrost_job_init(ctx
);
2785 panfrost_emit_vertex_payload(ctx
);
2786 panfrost_emit_tiler_payload(ctx
);
2787 panfrost_invalidate_frame(ctx
);
2788 panfrost_default_shader_backend(ctx
);
2789 panfrost_generate_space_filler_indices();