2 * © Copyright 2018 Alyssa Rosenzweig
3 * Copyright © 2014-2017 Broadcom
4 * Copyright (C) 2017 Intel Corporation
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 #include "pan_context.h"
31 #include "pan_format.h"
33 #include "util/macros.h"
34 #include "util/u_format.h"
35 #include "util/u_inlines.h"
36 #include "util/u_upload_mgr.h"
37 #include "util/u_memory.h"
38 #include "util/u_vbuf.h"
39 #include "util/half_float.h"
40 #include "util/u_helpers.h"
41 #include "util/u_format.h"
42 #include "util/u_prim.h"
43 #include "util/u_prim_restart.h"
44 #include "indices/u_primconvert.h"
45 #include "tgsi/tgsi_parse.h"
46 #include "tgsi/tgsi_from_mesa.h"
47 #include "util/u_math.h"
49 #include "pan_screen.h"
50 #include "pan_blending.h"
51 #include "pan_blend_shaders.h"
53 #include "pan_tiler.h"
55 /* Do not actually send anything to the GPU; merely generate the cmdstream as fast as possible. Disables framebuffer writes */
58 /* Framebuffer descriptor */
60 static struct midgard_tiler_descriptor
61 panfrost_emit_midg_tiler(
62 struct panfrost_context
*ctx
,
65 unsigned vertex_count
)
67 struct midgard_tiler_descriptor t
= {};
68 struct panfrost_job
*batch
= panfrost_get_job_for_fbo(ctx
);
71 panfrost_choose_hierarchy_mask(width
, height
, vertex_count
);
73 /* Compute the polygon header size and use that to offset the body */
75 unsigned header_size
= panfrost_tiler_header_size(
76 width
, height
, t
.hierarchy_mask
);
78 unsigned body_size
= panfrost_tiler_body_size(
79 width
, height
, t
.hierarchy_mask
);
83 if (t
.hierarchy_mask
) {
84 t
.polygon_list
= panfrost_job_get_polygon_list(batch
, header_size
+ body_size
);
86 /* Allow the entire tiler heap */
87 t
.heap_start
= ctx
->tiler_heap
.bo
->gpu
;
89 ctx
->tiler_heap
.bo
->gpu
+ ctx
->tiler_heap
.bo
->size
;
91 /* The tiler is disabled, so don't allow the tiler heap */
92 t
.heap_start
= ctx
->tiler_heap
.bo
->gpu
;
93 t
.heap_end
= t
.heap_start
;
95 /* Use a dummy polygon list */
96 t
.polygon_list
= ctx
->tiler_dummy
.bo
->gpu
;
98 /* Also, set a "tiler disabled?" flag? */
99 t
.hierarchy_mask
|= 0x1000;
102 t
.polygon_list_body
=
103 t
.polygon_list
+ header_size
;
105 t
.polygon_list_size
=
106 header_size
+ body_size
;
111 struct mali_single_framebuffer
112 panfrost_emit_sfbd(struct panfrost_context
*ctx
, unsigned vertex_count
)
114 unsigned width
= ctx
->pipe_framebuffer
.width
;
115 unsigned height
= ctx
->pipe_framebuffer
.height
;
117 struct mali_single_framebuffer framebuffer
= {
118 .width
= MALI_POSITIVE(width
),
119 .height
= MALI_POSITIVE(height
),
121 .format
= 0x30000000,
122 .clear_flags
= 0x1000,
123 .unknown_address_0
= ctx
->scratchpad
.bo
->gpu
,
124 .tiler
= panfrost_emit_midg_tiler(ctx
,
125 width
, height
, vertex_count
),
131 struct bifrost_framebuffer
132 panfrost_emit_mfbd(struct panfrost_context
*ctx
, unsigned vertex_count
)
134 unsigned width
= ctx
->pipe_framebuffer
.width
;
135 unsigned height
= ctx
->pipe_framebuffer
.height
;
137 struct bifrost_framebuffer framebuffer
= {
138 .unk0
= 0x1e5, /* 1e4 if no spill */
139 .width1
= MALI_POSITIVE(width
),
140 .height1
= MALI_POSITIVE(height
),
141 .width2
= MALI_POSITIVE(width
),
142 .height2
= MALI_POSITIVE(height
),
146 .rt_count_1
= MALI_POSITIVE(ctx
->pipe_framebuffer
.nr_cbufs
),
151 .scratchpad
= ctx
->scratchpad
.bo
->gpu
,
152 .tiler
= panfrost_emit_midg_tiler(ctx
,
153 width
, height
, vertex_count
)
159 /* Are we currently rendering to the screen (rather than an FBO)? */
162 panfrost_is_scanout(struct panfrost_context
*ctx
)
164 /* If there is no color buffer, it's an FBO */
165 if (ctx
->pipe_framebuffer
.nr_cbufs
!= 1)
168 /* If we're too early that no framebuffer was sent, it's scanout */
169 if (!ctx
->pipe_framebuffer
.cbufs
[0])
172 return ctx
->pipe_framebuffer
.cbufs
[0]->texture
->bind
& PIPE_BIND_DISPLAY_TARGET
||
173 ctx
->pipe_framebuffer
.cbufs
[0]->texture
->bind
& PIPE_BIND_SCANOUT
||
174 ctx
->pipe_framebuffer
.cbufs
[0]->texture
->bind
& PIPE_BIND_SHARED
;
179 struct pipe_context
*pipe
,
181 const union pipe_color_union
*color
,
182 double depth
, unsigned stencil
)
184 struct panfrost_context
*ctx
= pan_context(pipe
);
185 struct panfrost_job
*job
= panfrost_get_job_for_fbo(ctx
);
187 panfrost_job_clear(ctx
, job
, buffers
, color
, depth
, stencil
);
191 panfrost_attach_vt_mfbd(struct panfrost_context
*ctx
)
193 struct bifrost_framebuffer mfbd
= panfrost_emit_mfbd(ctx
, ~0);
195 return panfrost_upload_transient(ctx
, &mfbd
, sizeof(mfbd
)) | MALI_MFBD
;
199 panfrost_attach_vt_sfbd(struct panfrost_context
*ctx
)
201 struct mali_single_framebuffer sfbd
= panfrost_emit_sfbd(ctx
, ~0);
203 return panfrost_upload_transient(ctx
, &sfbd
, sizeof(sfbd
)) | MALI_SFBD
;
207 panfrost_attach_vt_framebuffer(struct panfrost_context
*ctx
)
209 /* Skip the attach if we can */
211 if (ctx
->payloads
[PIPE_SHADER_VERTEX
].postfix
.framebuffer
) {
212 assert(ctx
->payloads
[PIPE_SHADER_FRAGMENT
].postfix
.framebuffer
);
216 struct panfrost_screen
*screen
= pan_screen(ctx
->base
.screen
);
217 mali_ptr framebuffer
= screen
->require_sfbd
?
218 panfrost_attach_vt_sfbd(ctx
) :
219 panfrost_attach_vt_mfbd(ctx
);
221 for (unsigned i
= 0; i
< PIPE_SHADER_TYPES
; ++i
)
222 ctx
->payloads
[i
].postfix
.framebuffer
= framebuffer
;
225 /* Reset per-frame context, called on context initialisation as well as after
226 * flushing a frame */
229 panfrost_invalidate_frame(struct panfrost_context
*ctx
)
231 for (unsigned i
= 0; i
< PIPE_SHADER_TYPES
; ++i
)
232 ctx
->payloads
[i
].postfix
.framebuffer
= 0;
235 ctx
->dirty
|= PAN_DIRTY_RASTERIZER
;
238 ctx
->dirty
|= PAN_DIRTY_SAMPLERS
| PAN_DIRTY_TEXTURES
;
240 /* TODO: When does this need to be handled? */
241 ctx
->active_queries
= true;
244 /* In practice, every field of these payloads should be configurable
245 * arbitrarily, which means these functions are basically catch-all's for
246 * as-of-yet unwavering unknowns */
249 panfrost_emit_vertex_payload(struct panfrost_context
*ctx
)
251 /* 0x2 bit clear on 32-bit T6XX */
253 struct midgard_payload_vertex_tiler payload
= {
254 .gl_enables
= 0x4 | 0x2,
257 /* Vertex and compute are closely coupled, so share a payload */
259 memcpy(&ctx
->payloads
[PIPE_SHADER_VERTEX
], &payload
, sizeof(payload
));
260 memcpy(&ctx
->payloads
[PIPE_SHADER_COMPUTE
], &payload
, sizeof(payload
));
264 panfrost_emit_tiler_payload(struct panfrost_context
*ctx
)
266 struct midgard_payload_vertex_tiler payload
= {
268 .zero1
= 0xffff, /* Why is this only seen on test-quad-textured? */
272 memcpy(&ctx
->payloads
[PIPE_SHADER_FRAGMENT
], &payload
, sizeof(payload
));
276 translate_tex_wrap(enum pipe_tex_wrap w
)
279 case PIPE_TEX_WRAP_REPEAT
:
280 return MALI_WRAP_REPEAT
;
282 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
283 return MALI_WRAP_CLAMP_TO_EDGE
;
285 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
286 return MALI_WRAP_CLAMP_TO_BORDER
;
288 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
289 return MALI_WRAP_MIRRORED_REPEAT
;
292 unreachable("Invalid wrap");
297 panfrost_translate_compare_func(enum pipe_compare_func in
)
300 case PIPE_FUNC_NEVER
:
301 return MALI_FUNC_NEVER
;
304 return MALI_FUNC_LESS
;
306 case PIPE_FUNC_EQUAL
:
307 return MALI_FUNC_EQUAL
;
309 case PIPE_FUNC_LEQUAL
:
310 return MALI_FUNC_LEQUAL
;
312 case PIPE_FUNC_GREATER
:
313 return MALI_FUNC_GREATER
;
315 case PIPE_FUNC_NOTEQUAL
:
316 return MALI_FUNC_NOTEQUAL
;
318 case PIPE_FUNC_GEQUAL
:
319 return MALI_FUNC_GEQUAL
;
321 case PIPE_FUNC_ALWAYS
:
322 return MALI_FUNC_ALWAYS
;
325 unreachable("Invalid func");
330 panfrost_translate_alt_compare_func(enum pipe_compare_func in
)
333 case PIPE_FUNC_NEVER
:
334 return MALI_ALT_FUNC_NEVER
;
337 return MALI_ALT_FUNC_LESS
;
339 case PIPE_FUNC_EQUAL
:
340 return MALI_ALT_FUNC_EQUAL
;
342 case PIPE_FUNC_LEQUAL
:
343 return MALI_ALT_FUNC_LEQUAL
;
345 case PIPE_FUNC_GREATER
:
346 return MALI_ALT_FUNC_GREATER
;
348 case PIPE_FUNC_NOTEQUAL
:
349 return MALI_ALT_FUNC_NOTEQUAL
;
351 case PIPE_FUNC_GEQUAL
:
352 return MALI_ALT_FUNC_GEQUAL
;
354 case PIPE_FUNC_ALWAYS
:
355 return MALI_ALT_FUNC_ALWAYS
;
358 unreachable("Invalid alt func");
363 panfrost_translate_stencil_op(enum pipe_stencil_op in
)
366 case PIPE_STENCIL_OP_KEEP
:
367 return MALI_STENCIL_KEEP
;
369 case PIPE_STENCIL_OP_ZERO
:
370 return MALI_STENCIL_ZERO
;
372 case PIPE_STENCIL_OP_REPLACE
:
373 return MALI_STENCIL_REPLACE
;
375 case PIPE_STENCIL_OP_INCR
:
376 return MALI_STENCIL_INCR
;
378 case PIPE_STENCIL_OP_DECR
:
379 return MALI_STENCIL_DECR
;
381 case PIPE_STENCIL_OP_INCR_WRAP
:
382 return MALI_STENCIL_INCR_WRAP
;
384 case PIPE_STENCIL_OP_DECR_WRAP
:
385 return MALI_STENCIL_DECR_WRAP
;
387 case PIPE_STENCIL_OP_INVERT
:
388 return MALI_STENCIL_INVERT
;
391 unreachable("Invalid stencil op");
396 panfrost_make_stencil_state(const struct pipe_stencil_state
*in
, struct mali_stencil_test
*out
)
398 out
->ref
= 0; /* Gallium gets it from elsewhere */
400 out
->mask
= in
->valuemask
;
401 out
->func
= panfrost_translate_compare_func(in
->func
);
402 out
->sfail
= panfrost_translate_stencil_op(in
->fail_op
);
403 out
->dpfail
= panfrost_translate_stencil_op(in
->zfail_op
);
404 out
->dppass
= panfrost_translate_stencil_op(in
->zpass_op
);
408 panfrost_default_shader_backend(struct panfrost_context
*ctx
)
410 struct mali_shader_meta shader
= {
411 .alpha_coverage
= ~MALI_ALPHA_COVERAGE(0.000000),
413 .unknown2_3
= MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS
) | 0x3010,
414 .unknown2_4
= MALI_NO_MSAA
| 0x4e0,
417 /* unknown2_4 has 0x10 bit set on T6XX. We don't know why this is
418 * required (independent of 32-bit/64-bit descriptors), or why it's not
419 * used on later GPU revisions. Otherwise, all shader jobs fault on
420 * these earlier chips (perhaps this is a chicken bit of some kind).
421 * More investigation is needed. */
424 shader
.unknown2_4
|= 0x10;
427 struct pipe_stencil_state default_stencil
= {
429 .func
= PIPE_FUNC_ALWAYS
,
430 .fail_op
= MALI_STENCIL_KEEP
,
431 .zfail_op
= MALI_STENCIL_KEEP
,
432 .zpass_op
= MALI_STENCIL_KEEP
,
437 panfrost_make_stencil_state(&default_stencil
, &shader
.stencil_front
);
438 shader
.stencil_mask_front
= default_stencil
.writemask
;
440 panfrost_make_stencil_state(&default_stencil
, &shader
.stencil_back
);
441 shader
.stencil_mask_back
= default_stencil
.writemask
;
443 if (default_stencil
.enabled
)
444 shader
.unknown2_4
|= MALI_STENCIL_TEST
;
446 memcpy(&ctx
->fragment_shader_core
, &shader
, sizeof(shader
));
449 /* Generates a vertex/tiler job. This is, in some sense, the heart of the
450 * graphics command stream. It should be called once per draw, accordding to
451 * presentations. Set is_tiler for "tiler" jobs (fragment shader jobs, but in
452 * Mali parlance, "fragment" refers to framebuffer writeout). Clear it for
455 struct panfrost_transfer
456 panfrost_vertex_tiler_job(struct panfrost_context
*ctx
, bool is_tiler
)
458 struct mali_job_descriptor_header job
= {
459 .job_type
= is_tiler
? JOB_TYPE_TILER
: JOB_TYPE_VERTEX
,
460 .job_descriptor_size
= 1,
463 struct midgard_payload_vertex_tiler
*payload
= is_tiler
? &ctx
->payloads
[PIPE_SHADER_FRAGMENT
] : &ctx
->payloads
[PIPE_SHADER_VERTEX
];
465 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, sizeof(job
) + sizeof(*payload
));
466 memcpy(transfer
.cpu
, &job
, sizeof(job
));
467 memcpy(transfer
.cpu
+ sizeof(job
), payload
, sizeof(*payload
));
472 panfrost_vertex_buffer_address(struct panfrost_context
*ctx
, unsigned i
)
474 struct pipe_vertex_buffer
*buf
= &ctx
->vertex_buffers
[i
];
475 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) (buf
->buffer
.resource
);
477 return rsrc
->bo
->gpu
+ buf
->buffer_offset
;
481 panfrost_writes_point_size(struct panfrost_context
*ctx
)
483 assert(ctx
->shader
[PIPE_SHADER_VERTEX
]);
484 struct panfrost_shader_state
*vs
= &ctx
->shader
[PIPE_SHADER_VERTEX
]->variants
[ctx
->shader
[PIPE_SHADER_VERTEX
]->active_variant
];
486 return vs
->writes_point_size
&& ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.draw_mode
== MALI_POINTS
;
489 /* Stage the attribute descriptors so we can adjust src_offset
490 * to let BOs align nicely */
493 panfrost_stage_attributes(struct panfrost_context
*ctx
)
495 struct panfrost_vertex_state
*so
= ctx
->vertex
;
497 size_t sz
= sizeof(struct mali_attr_meta
) * so
->num_elements
;
498 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, sz
);
499 struct mali_attr_meta
*target
= (struct mali_attr_meta
*) transfer
.cpu
;
501 /* Copy as-is for the first pass */
502 memcpy(target
, so
->hw
, sz
);
504 /* Fixup offsets for the second pass. Recall that the hardware
505 * calculates attribute addresses as:
507 * addr = base + (stride * vtx) + src_offset;
509 * However, on Mali, base must be aligned to 64-bytes, so we
512 * base' = base & ~63 = base - (base & 63)
514 * To compensate when using base' (see emit_vertex_data), we have
515 * to adjust src_offset by the masked off piece:
517 * addr' = base' + (stride * vtx) + (src_offset + (base & 63))
518 * = base - (base & 63) + (stride * vtx) + src_offset + (base & 63)
519 * = base + (stride * vtx) + src_offset
525 unsigned start
= ctx
->payloads
[PIPE_SHADER_VERTEX
].offset_start
;
527 for (unsigned i
= 0; i
< so
->num_elements
; ++i
) {
528 unsigned vbi
= so
->pipe
[i
].vertex_buffer_index
;
529 struct pipe_vertex_buffer
*buf
= &ctx
->vertex_buffers
[vbi
];
530 mali_ptr addr
= panfrost_vertex_buffer_address(ctx
, vbi
);
532 /* Adjust by the masked off bits of the offset */
533 target
[i
].src_offset
+= (addr
& 63);
535 /* Also, somewhat obscurely per-instance data needs to be
536 * offset in response to a delayed start in an indexed draw */
538 if (so
->pipe
[i
].instance_divisor
&& ctx
->instance_count
> 1 && start
) {
539 target
[i
].src_offset
-= buf
->stride
* start
;
545 ctx
->payloads
[PIPE_SHADER_VERTEX
].postfix
.attribute_meta
= transfer
.gpu
;
549 panfrost_upload_sampler_descriptors(struct panfrost_context
*ctx
)
551 size_t desc_size
= sizeof(struct mali_sampler_descriptor
);
553 for (int t
= 0; t
<= PIPE_SHADER_FRAGMENT
; ++t
) {
556 if (ctx
->sampler_count
[t
] && ctx
->sampler_view_count
[t
]) {
557 size_t transfer_size
= desc_size
* ctx
->sampler_count
[t
];
559 struct panfrost_transfer transfer
=
560 panfrost_allocate_transient(ctx
, transfer_size
);
562 struct mali_sampler_descriptor
*desc
=
563 (struct mali_sampler_descriptor
*) transfer
.cpu
;
565 for (int i
= 0; i
< ctx
->sampler_count
[t
]; ++i
)
566 desc
[i
] = ctx
->samplers
[t
][i
]->hw
;
568 upload
= transfer
.gpu
;
571 ctx
->payloads
[t
].postfix
.sampler_descriptor
= upload
;
576 panfrost_layout_for_texture(struct panfrost_resource
*rsrc
, bool manual_stride
)
578 /* TODO: other linear depth textures */
579 bool is_depth
= rsrc
->base
.format
== PIPE_FORMAT_Z32_UNORM
;
581 unsigned usage2_layout
= 0x10;
583 switch (rsrc
->layout
) {
585 usage2_layout
|= 0x8 | 0x4;
588 usage2_layout
|= 0x1;
591 usage2_layout
|= is_depth
? 0x1 : 0x2;
599 usage2_layout
|= MALI_TEX_MANUAL_STRIDE
;
601 return usage2_layout
;
606 struct panfrost_context
*ctx
,
607 struct panfrost_sampler_view
*view
)
612 struct pipe_sampler_view
*pview
= &view
->base
;
613 struct panfrost_resource
*rsrc
= pan_resource(pview
->texture
);
615 /* Do we interleave an explicit stride with every element? */
617 bool has_manual_stride
= view
->manual_stride
;
619 /* For easy access */
621 assert(pview
->target
!= PIPE_BUFFER
);
622 unsigned first_level
= pview
->u
.tex
.first_level
;
623 unsigned last_level
= pview
->u
.tex
.last_level
;
624 unsigned first_layer
= pview
->u
.tex
.first_layer
;
625 unsigned last_layer
= pview
->u
.tex
.last_layer
;
627 /* Lower-bit is set when sampling from colour AFBC */
628 bool is_afbc
= rsrc
->layout
== PAN_AFBC
;
629 bool is_zs
= rsrc
->base
.bind
& PIPE_BIND_DEPTH_STENCIL
;
630 unsigned afbc_bit
= (is_afbc
&& !is_zs
) ? 1 : 0;
632 /* Add the BO to the job so it's retained until the job is done. */
633 struct panfrost_job
*job
= panfrost_get_job_for_fbo(ctx
);
634 panfrost_job_add_bo(job
, rsrc
->bo
);
636 /* Add the usage flags in, since they can change across the CSO
637 * lifetime due to layout switches */
639 view
->hw
.format
.usage2
= panfrost_layout_for_texture(rsrc
, has_manual_stride
);
641 /* Inject the addresses in, interleaving mip levels, cube faces, and
642 * strides in that order */
646 for (unsigned l
= first_level
; l
<= last_level
; ++l
) {
647 for (unsigned f
= first_layer
; f
<= last_layer
; ++f
) {
649 view
->hw
.payload
[idx
++] =
650 panfrost_get_texture_address(rsrc
, l
, f
) + afbc_bit
;
652 if (has_manual_stride
) {
653 view
->hw
.payload
[idx
++] =
654 rsrc
->slices
[l
].stride
;
659 return panfrost_upload_transient(ctx
, &view
->hw
,
660 sizeof(struct mali_texture_descriptor
));
664 panfrost_upload_texture_descriptors(struct panfrost_context
*ctx
)
666 for (int t
= 0; t
<= PIPE_SHADER_FRAGMENT
; ++t
) {
667 mali_ptr trampoline
= 0;
669 if (ctx
->sampler_view_count
[t
]) {
670 uint64_t trampolines
[PIPE_MAX_SHADER_SAMPLER_VIEWS
];
672 for (int i
= 0; i
< ctx
->sampler_view_count
[t
]; ++i
)
674 panfrost_upload_tex(ctx
, ctx
->sampler_views
[t
][i
]);
676 trampoline
= panfrost_upload_transient(ctx
, trampolines
, sizeof(uint64_t) * ctx
->sampler_view_count
[t
]);
679 ctx
->payloads
[t
].postfix
.texture_trampoline
= trampoline
;
683 struct sysval_uniform
{
692 static void panfrost_upload_viewport_scale_sysval(struct panfrost_context
*ctx
,
693 struct sysval_uniform
*uniform
)
695 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
697 uniform
->f
[0] = vp
->scale
[0];
698 uniform
->f
[1] = vp
->scale
[1];
699 uniform
->f
[2] = vp
->scale
[2];
702 static void panfrost_upload_viewport_offset_sysval(struct panfrost_context
*ctx
,
703 struct sysval_uniform
*uniform
)
705 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
707 uniform
->f
[0] = vp
->translate
[0];
708 uniform
->f
[1] = vp
->translate
[1];
709 uniform
->f
[2] = vp
->translate
[2];
712 static void panfrost_upload_txs_sysval(struct panfrost_context
*ctx
,
713 enum pipe_shader_type st
,
714 unsigned int sysvalid
,
715 struct sysval_uniform
*uniform
)
717 unsigned texidx
= PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid
);
718 unsigned dim
= PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid
);
719 bool is_array
= PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid
);
720 struct pipe_sampler_view
*tex
= &ctx
->sampler_views
[st
][texidx
]->base
;
723 uniform
->i
[0] = u_minify(tex
->texture
->width0
, tex
->u
.tex
.first_level
);
726 uniform
->i
[1] = u_minify(tex
->texture
->height0
,
727 tex
->u
.tex
.first_level
);
730 uniform
->i
[2] = u_minify(tex
->texture
->depth0
,
731 tex
->u
.tex
.first_level
);
734 uniform
->i
[dim
] = tex
->texture
->array_size
;
737 static void panfrost_upload_ssbo_sysval(
738 struct panfrost_context
*ctx
,
739 enum pipe_shader_type st
,
741 struct sysval_uniform
*uniform
)
743 assert(ctx
->ssbo_mask
[st
] & (1 << ssbo_id
));
744 struct pipe_shader_buffer sb
= ctx
->ssbo
[st
][ssbo_id
];
746 /* Compute address */
747 struct panfrost_job
*batch
= panfrost_get_job_for_fbo(ctx
);
748 struct panfrost_bo
*bo
= pan_resource(sb
.buffer
)->bo
;
750 panfrost_job_add_bo(batch
, bo
);
752 /* Upload address and size as sysval */
753 uniform
->du
[0] = bo
->gpu
+ sb
.buffer_offset
;
754 uniform
->u
[2] = sb
.buffer_size
;
757 static void panfrost_upload_num_work_groups_sysval(struct panfrost_context
*ctx
,
758 struct sysval_uniform
*uniform
)
760 uniform
->u
[0] = ctx
->compute_grid
->grid
[0];
761 uniform
->u
[1] = ctx
->compute_grid
->grid
[1];
762 uniform
->u
[2] = ctx
->compute_grid
->grid
[2];
765 static void panfrost_upload_sysvals(struct panfrost_context
*ctx
, void *buf
,
766 struct panfrost_shader_state
*ss
,
767 enum pipe_shader_type st
)
769 struct sysval_uniform
*uniforms
= (void *)buf
;
771 for (unsigned i
= 0; i
< ss
->sysval_count
; ++i
) {
772 int sysval
= ss
->sysval
[i
];
774 switch (PAN_SYSVAL_TYPE(sysval
)) {
775 case PAN_SYSVAL_VIEWPORT_SCALE
:
776 panfrost_upload_viewport_scale_sysval(ctx
, &uniforms
[i
]);
778 case PAN_SYSVAL_VIEWPORT_OFFSET
:
779 panfrost_upload_viewport_offset_sysval(ctx
, &uniforms
[i
]);
781 case PAN_SYSVAL_TEXTURE_SIZE
:
782 panfrost_upload_txs_sysval(ctx
, st
, PAN_SYSVAL_ID(sysval
),
785 case PAN_SYSVAL_SSBO
:
786 panfrost_upload_ssbo_sysval(ctx
, st
, PAN_SYSVAL_ID(sysval
),
789 case PAN_SYSVAL_NUM_WORK_GROUPS
:
790 panfrost_upload_num_work_groups_sysval(ctx
, &uniforms
[i
]);
800 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer
*buf
, unsigned index
)
802 struct pipe_constant_buffer
*cb
= &buf
->cb
[index
];
803 struct panfrost_resource
*rsrc
= pan_resource(cb
->buffer
);
806 return rsrc
->bo
->cpu
;
807 else if (cb
->user_buffer
)
808 return cb
->user_buffer
;
810 unreachable("No constant buffer");
814 panfrost_map_constant_buffer_gpu(
815 struct panfrost_context
*ctx
,
816 struct panfrost_constant_buffer
*buf
,
819 struct pipe_constant_buffer
*cb
= &buf
->cb
[index
];
820 struct panfrost_resource
*rsrc
= pan_resource(cb
->buffer
);
823 return rsrc
->bo
->gpu
;
824 else if (cb
->user_buffer
)
825 return panfrost_upload_transient(ctx
, cb
->user_buffer
, cb
->buffer_size
);
827 unreachable("No constant buffer");
830 /* Compute number of UBOs active (more specifically, compute the highest UBO
831 * number addressable -- if there are gaps, include them in the count anyway).
832 * We always include UBO #0 in the count, since we *need* uniforms enabled for
836 panfrost_ubo_count(struct panfrost_context
*ctx
, enum pipe_shader_type stage
)
838 unsigned mask
= ctx
->constant_buffer
[stage
].enabled_mask
| 1;
839 return 32 - __builtin_clz(mask
);
842 /* Fixes up a shader state with current state, returning a GPU address to the
846 panfrost_patch_shader_state(
847 struct panfrost_context
*ctx
,
848 struct panfrost_shader_state
*ss
,
849 enum pipe_shader_type stage
,
852 ss
->tripipe
->texture_count
= ctx
->sampler_view_count
[stage
];
853 ss
->tripipe
->sampler_count
= ctx
->sampler_count
[stage
];
855 ss
->tripipe
->midgard1
.flags
= 0x220;
857 unsigned ubo_count
= panfrost_ubo_count(ctx
, stage
);
858 ss
->tripipe
->midgard1
.uniform_buffer_count
= ubo_count
;
860 /* We can't reuse over frames; that's not safe. The descriptor must be
861 * transient uploaded */
864 return panfrost_upload_transient(ctx
,
866 sizeof(struct mali_shader_meta
));
869 /* If we don't need an upload, don't bother */
875 panfrost_patch_shader_state_compute(
876 struct panfrost_context
*ctx
,
877 enum pipe_shader_type stage
,
880 struct panfrost_shader_variants
*all
= ctx
->shader
[stage
];
883 ctx
->payloads
[stage
].postfix
._shader_upper
= 0;
887 struct panfrost_shader_state
*s
= &all
->variants
[all
->active_variant
];
889 ctx
->payloads
[stage
].postfix
._shader_upper
=
890 panfrost_patch_shader_state(ctx
, s
, stage
, should_upload
) >> 4;
893 /* Go through dirty flags and actualise them in the cmdstream. */
896 panfrost_emit_for_draw(struct panfrost_context
*ctx
, bool with_vertex_data
)
898 struct panfrost_job
*job
= panfrost_get_job_for_fbo(ctx
);
899 struct panfrost_screen
*screen
= pan_screen(ctx
->base
.screen
);
901 panfrost_attach_vt_framebuffer(ctx
);
903 if (with_vertex_data
) {
904 panfrost_emit_vertex_data(job
);
906 /* Varyings emitted for -all- geometry */
907 unsigned total_count
= ctx
->padded_count
* ctx
->instance_count
;
908 panfrost_emit_varying_descriptor(ctx
, total_count
);
911 bool msaa
= ctx
->rasterizer
->base
.multisample
;
913 if (ctx
->dirty
& PAN_DIRTY_RASTERIZER
) {
914 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].gl_enables
= ctx
->rasterizer
->tiler_gl_enables
;
916 /* TODO: Sample size */
917 SET_BIT(ctx
->fragment_shader_core
.unknown2_3
, MALI_HAS_MSAA
, msaa
);
918 SET_BIT(ctx
->fragment_shader_core
.unknown2_4
, MALI_NO_MSAA
, !msaa
);
921 panfrost_job_set_requirements(ctx
, job
);
923 if (ctx
->occlusion_query
) {
924 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].gl_enables
|= MALI_OCCLUSION_QUERY
| MALI_OCCLUSION_PRECISE
;
925 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].postfix
.occlusion_counter
= ctx
->occlusion_query
->transfer
.gpu
;
928 panfrost_patch_shader_state_compute(ctx
, PIPE_SHADER_VERTEX
, true);
929 panfrost_patch_shader_state_compute(ctx
, PIPE_SHADER_COMPUTE
, true);
931 if (ctx
->dirty
& (PAN_DIRTY_RASTERIZER
| PAN_DIRTY_VS
)) {
932 /* Check if we need to link the gl_PointSize varying */
933 if (!panfrost_writes_point_size(ctx
)) {
934 /* If the size is constant, write it out. Otherwise,
935 * don't touch primitive_size (since we would clobber
936 * the pointer there) */
938 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].primitive_size
.constant
= ctx
->rasterizer
->base
.line_width
;
942 /* TODO: Maybe dirty track FS, maybe not. For now, it's transient. */
943 if (ctx
->shader
[PIPE_SHADER_FRAGMENT
])
944 ctx
->dirty
|= PAN_DIRTY_FS
;
946 if (ctx
->dirty
& PAN_DIRTY_FS
) {
947 assert(ctx
->shader
[PIPE_SHADER_FRAGMENT
]);
948 struct panfrost_shader_state
*variant
= &ctx
->shader
[PIPE_SHADER_FRAGMENT
]->variants
[ctx
->shader
[PIPE_SHADER_FRAGMENT
]->active_variant
];
950 panfrost_patch_shader_state(ctx
, variant
, PIPE_SHADER_FRAGMENT
, false);
952 panfrost_job_add_bo(job
, variant
->bo
);
954 #define COPY(name) ctx->fragment_shader_core.name = variant->tripipe->name
957 COPY(attribute_count
);
962 COPY(midgard1
.uniform_count
);
963 COPY(midgard1
.uniform_buffer_count
);
964 COPY(midgard1
.work_count
);
965 COPY(midgard1
.flags
);
966 COPY(midgard1
.unknown2
);
970 /* Get blending setup */
971 struct panfrost_blend_final blend
=
972 panfrost_get_blend_for_context(ctx
, 0);
974 /* If there is a blend shader, work registers are shared */
977 ctx
->fragment_shader_core
.midgard1
.work_count
= /*MAX2(ctx->fragment_shader_core.midgard1.work_count, ctx->blend->blend_work_count)*/16;
979 /* Set late due to depending on render state */
980 unsigned flags
= ctx
->fragment_shader_core
.midgard1
.flags
;
982 /* Depending on whether it's legal to in the given shader, we
983 * try to enable early-z testing (or forward-pixel kill?) */
985 if (!variant
->can_discard
)
986 flags
|= MALI_EARLY_Z
;
988 /* Any time texturing is used, derivatives are implicitly
989 * calculated, so we need to enable helper invocations */
991 if (variant
->helper_invocations
)
992 flags
|= MALI_HELPER_INVOCATIONS
;
994 ctx
->fragment_shader_core
.midgard1
.flags
= flags
;
996 /* Assign the stencil refs late */
998 unsigned front_ref
= ctx
->stencil_ref
.ref_value
[0];
999 unsigned back_ref
= ctx
->stencil_ref
.ref_value
[1];
1000 bool back_enab
= ctx
->depth_stencil
->stencil
[1].enabled
;
1002 ctx
->fragment_shader_core
.stencil_front
.ref
= front_ref
;
1003 ctx
->fragment_shader_core
.stencil_back
.ref
= back_enab
? back_ref
: front_ref
;
1005 /* CAN_DISCARD should be set if the fragment shader possibly
1006 * contains a 'discard' instruction. It is likely this is
1007 * related to optimizations related to forward-pixel kill, as
1008 * per "Mali Performance 3: Is EGL_BUFFER_PRESERVED a good
1009 * thing?" by Peter Harris
1012 if (variant
->can_discard
) {
1013 ctx
->fragment_shader_core
.unknown2_3
|= MALI_CAN_DISCARD
;
1014 ctx
->fragment_shader_core
.midgard1
.flags
|= 0x400;
1017 /* Check if we're using the default blend descriptor (fast path) */
1021 (blend
.equation
.equation
->rgb_mode
== 0x122) &&
1022 (blend
.equation
.equation
->alpha_mode
== 0x122) &&
1023 (blend
.equation
.equation
->color_mask
== 0xf);
1025 /* Even on MFBD, the shader descriptor gets blend shaders. It's
1026 * *also* copied to the blend_meta appended (by convention),
1027 * but this is the field actually read by the hardware. (Or
1028 * maybe both are read...?) */
1030 if (blend
.is_shader
) {
1031 ctx
->fragment_shader_core
.blend
.shader
=
1032 blend
.shader
.bo
->gpu
| blend
.shader
.first_tag
;
1034 ctx
->fragment_shader_core
.blend
.shader
= 0;
1037 if (screen
->require_sfbd
) {
1038 /* When only a single render target platform is used, the blend
1039 * information is inside the shader meta itself. We
1040 * additionally need to signal CAN_DISCARD for nontrivial blend
1041 * modes (so we're able to read back the destination buffer) */
1043 if (!blend
.is_shader
) {
1044 ctx
->fragment_shader_core
.blend
.equation
=
1045 *blend
.equation
.equation
;
1046 ctx
->fragment_shader_core
.blend
.constant
=
1047 blend
.equation
.constant
;
1051 ctx
->fragment_shader_core
.unknown2_3
|= MALI_CAN_DISCARD
;
1055 size_t size
= sizeof(struct mali_shader_meta
) + sizeof(struct midgard_blend_rt
);
1056 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, size
);
1057 memcpy(transfer
.cpu
, &ctx
->fragment_shader_core
, sizeof(struct mali_shader_meta
));
1059 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].postfix
._shader_upper
= (transfer
.gpu
) >> 4;
1061 if (!screen
->require_sfbd
) {
1062 /* Additional blend descriptor tacked on for jobs using MFBD */
1064 unsigned blend_count
= 0x200;
1066 if (blend
.is_shader
) {
1067 /* For a blend shader, the bottom nibble corresponds to
1068 * the number of work registers used, which signals the
1069 * -existence- of a blend shader */
1071 assert(blend
.shader
.work_count
>= 2);
1072 blend_count
|= MIN2(blend
.shader
.work_count
, 3);
1074 /* Otherwise, the bottom bit simply specifies if
1075 * blending (anything other than REPLACE) is enabled */
1082 struct midgard_blend_rt rts
[4];
1084 for (unsigned i
= 0; i
< ctx
->pipe_framebuffer
.nr_cbufs
; ++i
) {
1086 (ctx
->pipe_framebuffer
.nr_cbufs
> i
) &&
1087 (ctx
->pipe_framebuffer
.cbufs
[i
]) &&
1088 util_format_is_srgb(ctx
->pipe_framebuffer
.cbufs
[i
]->format
);
1090 rts
[i
].flags
= blend_count
;
1093 rts
[i
].flags
|= MALI_BLEND_SRGB
;
1095 if (!ctx
->blend
->base
.dither
)
1096 rts
[i
].flags
|= MALI_BLEND_NO_DITHER
;
1098 /* TODO: sRGB in blend shaders is currently
1099 * unimplemented. Contact me (Alyssa) if you're
1100 * interested in working on this. We have
1101 * native Midgard ops for helping here, but
1102 * they're not well-understood yet. */
1104 assert(!(is_srgb
&& blend
.is_shader
));
1106 if (blend
.is_shader
) {
1107 rts
[i
].blend
.shader
= blend
.shader
.bo
->gpu
| blend
.shader
.first_tag
;
1109 rts
[i
].blend
.equation
= *blend
.equation
.equation
;
1110 rts
[i
].blend
.constant
= blend
.equation
.constant
;
1114 memcpy(transfer
.cpu
+ sizeof(struct mali_shader_meta
), rts
, sizeof(rts
[0]) * 1);
1118 /* We stage to transient, so always dirty.. */
1120 panfrost_stage_attributes(ctx
);
1122 if (ctx
->dirty
& PAN_DIRTY_SAMPLERS
)
1123 panfrost_upload_sampler_descriptors(ctx
);
1125 if (ctx
->dirty
& PAN_DIRTY_TEXTURES
)
1126 panfrost_upload_texture_descriptors(ctx
);
1128 const struct pipe_viewport_state
*vp
= &ctx
->pipe_viewport
;
1130 for (int i
= 0; i
< PIPE_SHADER_TYPES
; ++i
) {
1131 struct panfrost_shader_variants
*all
= ctx
->shader
[i
];
1136 struct panfrost_constant_buffer
*buf
= &ctx
->constant_buffer
[i
];
1138 struct panfrost_shader_state
*ss
= &all
->variants
[all
->active_variant
];
1140 /* Uniforms are implicitly UBO #0 */
1141 bool has_uniforms
= buf
->enabled_mask
& (1 << 0);
1143 /* Allocate room for the sysval and the uniforms */
1144 size_t sys_size
= sizeof(float) * 4 * ss
->sysval_count
;
1145 size_t uniform_size
= has_uniforms
? (buf
->cb
[0].buffer_size
) : 0;
1146 size_t size
= sys_size
+ uniform_size
;
1147 struct panfrost_transfer transfer
= panfrost_allocate_transient(ctx
, size
);
1149 /* Upload sysvals requested by the shader */
1150 panfrost_upload_sysvals(ctx
, transfer
.cpu
, ss
, i
);
1152 /* Upload uniforms */
1154 const void *cpu
= panfrost_map_constant_buffer_cpu(buf
, 0);
1155 memcpy(transfer
.cpu
+ sys_size
, cpu
, uniform_size
);
1159 ctx
->shader
[i
]->variants
[ctx
->shader
[i
]->active_variant
].uniform_count
;
1161 struct mali_vertex_tiler_postfix
*postfix
=
1162 &ctx
->payloads
[i
].postfix
;
1164 /* Next up, attach UBOs. UBO #0 is the uniforms we just
1167 unsigned ubo_count
= panfrost_ubo_count(ctx
, i
);
1168 assert(ubo_count
>= 1);
1170 size_t sz
= sizeof(struct mali_uniform_buffer_meta
) * ubo_count
;
1171 struct mali_uniform_buffer_meta ubos
[PAN_MAX_CONST_BUFFERS
];
1173 /* Upload uniforms as a UBO */
1174 ubos
[0].size
= MALI_POSITIVE((2 + uniform_count
));
1175 ubos
[0].ptr
= transfer
.gpu
>> 2;
1177 /* The rest are honest-to-goodness UBOs */
1179 for (unsigned ubo
= 1; ubo
< ubo_count
; ++ubo
) {
1180 size_t sz
= buf
->cb
[ubo
].buffer_size
;
1182 bool enabled
= buf
->enabled_mask
& (1 << ubo
);
1183 bool empty
= sz
== 0;
1185 if (!enabled
|| empty
) {
1186 /* Stub out disabled UBOs to catch accesses */
1189 ubos
[ubo
].ptr
= 0xDEAD0000;
1193 mali_ptr gpu
= panfrost_map_constant_buffer_gpu(ctx
, buf
, ubo
);
1195 unsigned bytes_per_field
= 16;
1196 unsigned aligned
= ALIGN_POT(sz
, bytes_per_field
);
1197 unsigned fields
= aligned
/ bytes_per_field
;
1199 ubos
[ubo
].size
= MALI_POSITIVE(fields
);
1200 ubos
[ubo
].ptr
= gpu
>> 2;
1203 mali_ptr ubufs
= panfrost_upload_transient(ctx
, ubos
, sz
);
1204 postfix
->uniforms
= transfer
.gpu
;
1205 postfix
->uniform_buffers
= ubufs
;
1207 buf
->dirty_mask
= 0;
1210 /* TODO: Upload the viewport somewhere more appropriate */
1212 /* Clip bounds are encoded as floats. The viewport itself is encoded as
1213 * (somewhat) asymmetric ints. */
1214 const struct pipe_scissor_state
*ss
= &ctx
->scissor
;
1216 struct mali_viewport view
= {
1217 /* By default, do no viewport clipping, i.e. clip to (-inf,
1218 * inf) in each direction. Clipping to the viewport in theory
1219 * should work, but in practice causes issues when we're not
1220 * explicitly trying to scissor */
1222 .clip_minx
= -INFINITY
,
1223 .clip_miny
= -INFINITY
,
1224 .clip_maxx
= INFINITY
,
1225 .clip_maxy
= INFINITY
,
1231 /* Always scissor to the viewport by default. */
1232 float vp_minx
= (int) (vp
->translate
[0] - fabsf(vp
->scale
[0]));
1233 float vp_maxx
= (int) (vp
->translate
[0] + fabsf(vp
->scale
[0]));
1235 float vp_miny
= (int) (vp
->translate
[1] - fabsf(vp
->scale
[1]));
1236 float vp_maxy
= (int) (vp
->translate
[1] + fabsf(vp
->scale
[1]));
1238 /* Apply the scissor test */
1240 unsigned minx
, miny
, maxx
, maxy
;
1242 if (ss
&& ctx
->rasterizer
&& ctx
->rasterizer
->base
.scissor
) {
1243 minx
= MAX2(ss
->minx
, vp_minx
);
1244 miny
= MAX2(ss
->miny
, vp_miny
);
1245 maxx
= MIN2(ss
->maxx
, vp_maxx
);
1246 maxy
= MIN2(ss
->maxy
, vp_maxy
);
1254 /* Hardware needs the min/max to be strictly ordered, so flip if we
1255 * need to. The viewport transformation in the vertex shader will
1256 * handle the negatives if we don't */
1270 /* Clamp everything positive, just in case */
1272 maxx
= MAX2(0, maxx
);
1273 maxy
= MAX2(0, maxy
);
1274 minx
= MAX2(0, minx
);
1275 miny
= MAX2(0, miny
);
1277 /* Clamp to the framebuffer size as a last check */
1279 minx
= MIN2(ctx
->pipe_framebuffer
.width
, minx
);
1280 maxx
= MIN2(ctx
->pipe_framebuffer
.width
, maxx
);
1282 miny
= MIN2(ctx
->pipe_framebuffer
.height
, miny
);
1283 maxy
= MIN2(ctx
->pipe_framebuffer
.height
, maxy
);
1285 /* Update the job, unless we're doing wallpapering (whose lack of
1286 * scissor we can ignore, since if we "miss" a tile of wallpaper, it'll
1287 * just... be faster :) */
1289 if (!ctx
->wallpaper_batch
)
1290 panfrost_job_union_scissor(job
, minx
, miny
, maxx
, maxy
);
1294 view
.viewport0
[0] = minx
;
1295 view
.viewport1
[0] = MALI_POSITIVE(maxx
);
1297 view
.viewport0
[1] = miny
;
1298 view
.viewport1
[1] = MALI_POSITIVE(maxy
);
1300 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].postfix
.viewport
=
1301 panfrost_upload_transient(ctx
,
1303 sizeof(struct mali_viewport
));
1308 /* Corresponds to exactly one draw, but does not submit anything */
1311 panfrost_queue_draw(struct panfrost_context
*ctx
)
1313 /* Handle dirty flags now */
1314 panfrost_emit_for_draw(ctx
, true);
1316 /* If rasterizer discard is enable, only submit the vertex */
1318 bool rasterizer_discard
= ctx
->rasterizer
1319 && ctx
->rasterizer
->base
.rasterizer_discard
;
1321 struct panfrost_transfer vertex
= panfrost_vertex_tiler_job(ctx
, false);
1322 struct panfrost_transfer tiler
;
1324 if (!rasterizer_discard
)
1325 tiler
= panfrost_vertex_tiler_job(ctx
, true);
1327 struct panfrost_job
*batch
= panfrost_get_job_for_fbo(ctx
);
1329 if (rasterizer_discard
)
1330 panfrost_scoreboard_queue_vertex_job(batch
, vertex
, FALSE
);
1331 else if (ctx
->wallpaper_batch
)
1332 panfrost_scoreboard_queue_fused_job_prepend(batch
, vertex
, tiler
);
1334 panfrost_scoreboard_queue_fused_job(batch
, vertex
, tiler
);
1337 /* The entire frame is in memory -- send it off to the kernel! */
1340 panfrost_submit_frame(struct panfrost_context
*ctx
, bool flush_immediate
,
1341 struct pipe_fence_handle
**fence
,
1342 struct panfrost_job
*job
)
1344 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
1345 struct panfrost_screen
*screen
= pan_screen(gallium
->screen
);
1349 panfrost_job_submit(ctx
, job
);
1351 /* If visual, we can stall a frame */
1353 if (!flush_immediate
)
1354 panfrost_drm_force_flush_fragment(ctx
, fence
);
1356 screen
->last_fragment_flushed
= false;
1357 screen
->last_job
= job
;
1359 /* If readback, flush now (hurts the pipelined performance) */
1360 if (flush_immediate
)
1361 panfrost_drm_force_flush_fragment(ctx
, fence
);
1366 panfrost_draw_wallpaper(struct pipe_context
*pipe
)
1368 struct panfrost_context
*ctx
= pan_context(pipe
);
1370 /* Nothing to reload? TODO: MRT wallpapers */
1371 if (ctx
->pipe_framebuffer
.cbufs
[0] == NULL
)
1374 /* Check if the buffer has any content on it worth preserving */
1376 struct pipe_surface
*surf
= ctx
->pipe_framebuffer
.cbufs
[0];
1377 struct panfrost_resource
*rsrc
= pan_resource(surf
->texture
);
1378 unsigned level
= surf
->u
.tex
.level
;
1380 if (!rsrc
->slices
[level
].initialized
)
1383 /* Save the batch */
1384 struct panfrost_job
*batch
= panfrost_get_job_for_fbo(ctx
);
1386 ctx
->wallpaper_batch
= batch
;
1388 /* Clamp the rendering area to the damage extent. The
1389 * KHR_partial_update() spec states that trying to render outside of
1390 * the damage region is "undefined behavior", so we should be safe.
1392 panfrost_job_intersection_scissor(batch
, rsrc
->damage
.extent
.minx
,
1393 rsrc
->damage
.extent
.miny
,
1394 rsrc
->damage
.extent
.maxx
,
1395 rsrc
->damage
.extent
.maxy
);
1397 /* FIXME: Looks like aligning on a tile is not enough, but
1398 * aligning on twice the tile size seems to works. We don't
1399 * know exactly what happens here but this deserves extra
1400 * investigation to figure it out.
1402 batch
->minx
= batch
->minx
& ~((MALI_TILE_LENGTH
* 2) - 1);
1403 batch
->miny
= batch
->miny
& ~((MALI_TILE_LENGTH
* 2) - 1);
1404 batch
->maxx
= MIN2(ALIGN_POT(batch
->maxx
, MALI_TILE_LENGTH
* 2),
1406 batch
->maxy
= MIN2(ALIGN_POT(batch
->maxy
, MALI_TILE_LENGTH
* 2),
1407 rsrc
->base
.height0
);
1409 struct pipe_scissor_state damage
;
1410 struct pipe_box rects
[4];
1412 /* Clamp the damage box to the rendering area. */
1413 damage
.minx
= MAX2(batch
->minx
, rsrc
->damage
.biggest_rect
.x
);
1414 damage
.miny
= MAX2(batch
->miny
, rsrc
->damage
.biggest_rect
.y
);
1415 damage
.maxx
= MIN2(batch
->maxx
,
1416 rsrc
->damage
.biggest_rect
.x
+
1417 rsrc
->damage
.biggest_rect
.width
);
1418 damage
.maxy
= MIN2(batch
->maxy
,
1419 rsrc
->damage
.biggest_rect
.y
+
1420 rsrc
->damage
.biggest_rect
.height
);
1422 /* One damage rectangle means we can end up with at most 4 reload
1424 * 1: left region, only exists if damage.x > 0
1425 * 2: right region, only exists if damage.x + damage.width < fb->width
1426 * 3: top region, only exists if damage.y > 0. The intersection with
1427 * the left and right regions are dropped
1428 * 4: bottom region, only exists if damage.y + damage.height < fb->height.
1429 * The intersection with the left and right regions are dropped
1431 * ____________________________
1438 * |_______|___________|______|
1440 u_box_2d(batch
->minx
, batch
->miny
, damage
.minx
- batch
->minx
,
1441 batch
->maxy
- batch
->miny
, &rects
[0]);
1442 u_box_2d(damage
.maxx
, batch
->miny
, batch
->maxx
- damage
.maxx
,
1443 batch
->maxy
- batch
->miny
, &rects
[1]);
1444 u_box_2d(damage
.minx
, batch
->miny
, damage
.maxx
- damage
.minx
,
1445 damage
.miny
- batch
->miny
, &rects
[2]);
1446 u_box_2d(damage
.minx
, damage
.maxy
, damage
.maxx
- damage
.minx
,
1447 batch
->maxy
- damage
.maxy
, &rects
[3]);
1449 for (unsigned i
= 0; i
< 4; i
++) {
1450 /* Width and height are always >= 0 even if width is declared as a
1451 * signed integer: u_box_2d() helper takes unsigned args and
1452 * panfrost_set_damage_region() is taking care of clamping
1455 if (!rects
[i
].width
|| !rects
[i
].height
)
1458 /* Blit the wallpaper in */
1459 panfrost_blit_wallpaper(ctx
, &rects
[i
]);
1461 ctx
->wallpaper_batch
= NULL
;
1466 struct pipe_context
*pipe
,
1467 struct pipe_fence_handle
**fence
,
1470 struct panfrost_context
*ctx
= pan_context(pipe
);
1471 struct panfrost_job
*job
= panfrost_get_job_for_fbo(ctx
);
1473 /* Nothing to do! */
1474 if (!job
->last_job
.gpu
&& !job
->clear
) return;
1476 if (!job
->clear
&& job
->last_tiler
.gpu
)
1477 panfrost_draw_wallpaper(&ctx
->base
);
1479 /* Whether to stall the pipeline for immediately correct results. Since
1480 * pipelined rendering is quite broken right now (to be fixed by the
1481 * panfrost_job refactor, just take the perf hit for correctness) */
1482 bool flush_immediate
= /*flags & PIPE_FLUSH_END_OF_FRAME*/true;
1484 /* Submit the frame itself */
1485 panfrost_submit_frame(ctx
, flush_immediate
, fence
, job
);
1487 /* Prepare for the next frame */
1488 panfrost_invalidate_frame(ctx
);
1491 #define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_##c;
1494 g2m_draw_mode(enum pipe_prim_type mode
)
1497 DEFINE_CASE(POINTS
);
1499 DEFINE_CASE(LINE_LOOP
);
1500 DEFINE_CASE(LINE_STRIP
);
1501 DEFINE_CASE(TRIANGLES
);
1502 DEFINE_CASE(TRIANGLE_STRIP
);
1503 DEFINE_CASE(TRIANGLE_FAN
);
1505 DEFINE_CASE(QUAD_STRIP
);
1506 DEFINE_CASE(POLYGON
);
1509 unreachable("Invalid draw mode");
1516 panfrost_translate_index_size(unsigned size
)
1520 return MALI_DRAW_INDEXED_UINT8
;
1523 return MALI_DRAW_INDEXED_UINT16
;
1526 return MALI_DRAW_INDEXED_UINT32
;
1529 unreachable("Invalid index size");
1533 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
1534 * good for the duration of the draw (transient), could last longer */
1537 panfrost_get_index_buffer_mapped(struct panfrost_context
*ctx
, const struct pipe_draw_info
*info
)
1539 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) (info
->index
.resource
);
1541 off_t offset
= info
->start
* info
->index_size
;
1542 struct panfrost_job
*batch
= panfrost_get_job_for_fbo(ctx
);
1544 if (!info
->has_user_indices
) {
1545 /* Only resources can be directly mapped */
1546 panfrost_job_add_bo(batch
, rsrc
->bo
);
1547 return rsrc
->bo
->gpu
+ offset
;
1549 /* Otherwise, we need to upload to transient memory */
1550 const uint8_t *ibuf8
= (const uint8_t *) info
->index
.user
;
1551 return panfrost_upload_transient(ctx
, ibuf8
+ offset
, info
->count
* info
->index_size
);
1556 panfrost_scissor_culls_everything(struct panfrost_context
*ctx
)
1558 const struct pipe_scissor_state
*ss
= &ctx
->scissor
;
1560 /* Check if we're scissoring at all */
1562 if (!(ctx
->rasterizer
&& ctx
->rasterizer
->base
.scissor
))
1565 return (ss
->minx
== ss
->maxx
) || (ss
->miny
== ss
->maxy
);
1568 /* Count generated primitives (when there is no geom/tess shaders) for
1569 * transform feedback */
1572 panfrost_statistics_record(
1573 struct panfrost_context
*ctx
,
1574 const struct pipe_draw_info
*info
)
1576 if (!ctx
->active_queries
)
1579 uint32_t prims
= u_prims_for_vertices(info
->mode
, info
->count
);
1580 ctx
->prims_generated
+= prims
;
1582 if (ctx
->streamout
.num_targets
<= 0)
1585 ctx
->tf_prims_generated
+= prims
;
1590 struct pipe_context
*pipe
,
1591 const struct pipe_draw_info
*info
)
1593 struct panfrost_context
*ctx
= pan_context(pipe
);
1595 /* First of all, check the scissor to see if anything is drawn at all.
1596 * If it's not, we drop the draw (mostly a conformance issue;
1597 * well-behaved apps shouldn't hit this) */
1599 if (panfrost_scissor_culls_everything(ctx
))
1602 ctx
->payloads
[PIPE_SHADER_VERTEX
].offset_start
= info
->start
;
1603 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].offset_start
= info
->start
;
1605 int mode
= info
->mode
;
1607 /* Fallback unsupported restart index */
1608 unsigned primitive_index
= (1 << (info
->index_size
* 8)) - 1;
1610 if (info
->primitive_restart
&& info
->index_size
1611 && info
->restart_index
!= primitive_index
) {
1612 util_draw_vbo_without_prim_restart(pipe
, info
);
1616 /* Fallback for unsupported modes */
1618 if (!(ctx
->draw_modes
& (1 << mode
))) {
1619 if (mode
== PIPE_PRIM_QUADS
&& info
->count
== 4 && ctx
->rasterizer
&& !ctx
->rasterizer
->base
.flatshade
) {
1620 mode
= PIPE_PRIM_TRIANGLE_FAN
;
1622 if (info
->count
< 4) {
1623 /* Degenerate case? */
1627 util_primconvert_save_rasterizer_state(ctx
->primconvert
, &ctx
->rasterizer
->base
);
1628 util_primconvert_draw_vbo(ctx
->primconvert
, info
);
1633 /* Now that we have a guaranteed terminating path, find the job.
1634 * Assignment commented out to prevent unused warning */
1636 /* struct panfrost_job *job = */ panfrost_get_job_for_fbo(ctx
);
1638 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.draw_mode
= g2m_draw_mode(mode
);
1640 /* Take into account a negative bias */
1641 ctx
->vertex_count
= info
->count
+ abs(info
->index_bias
);
1642 ctx
->instance_count
= info
->instance_count
;
1643 ctx
->active_prim
= info
->mode
;
1645 /* For non-indexed draws, they're the same */
1646 unsigned vertex_count
= ctx
->vertex_count
;
1648 unsigned draw_flags
= 0;
1650 /* The draw flags interpret how primitive size is interpreted */
1652 if (panfrost_writes_point_size(ctx
))
1653 draw_flags
|= MALI_DRAW_VARYING_SIZE
;
1655 if (info
->primitive_restart
)
1656 draw_flags
|= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX
;
1658 /* For higher amounts of vertices (greater than what fits in a 16-bit
1659 * short), the other value is needed, otherwise there will be bizarre
1660 * rendering artefacts. It's not clear what these values mean yet. This
1661 * change is also needed for instancing and sometimes points (perhaps
1662 * related to dynamically setting gl_PointSize) */
1664 bool is_points
= mode
== PIPE_PRIM_POINTS
;
1665 bool many_verts
= ctx
->vertex_count
> 0xFFFF;
1666 bool instanced
= ctx
->instance_count
> 1;
1668 draw_flags
|= (is_points
|| many_verts
|| instanced
) ? 0x3000 : 0x18000;
1670 /* This doesn't make much sense */
1671 if (mode
== PIPE_PRIM_LINE_STRIP
) {
1672 draw_flags
|= 0x800;
1675 panfrost_statistics_record(ctx
, info
);
1677 if (info
->index_size
) {
1678 /* Calculate the min/max index used so we can figure out how
1679 * many times to invoke the vertex shader */
1681 /* Fetch / calculate index bounds */
1682 unsigned min_index
= 0, max_index
= 0;
1684 if (info
->max_index
== ~0u) {
1685 u_vbuf_get_minmax_index(pipe
, info
, &min_index
, &max_index
);
1687 min_index
= info
->min_index
;
1688 max_index
= info
->max_index
;
1691 /* Use the corresponding values */
1692 vertex_count
= max_index
- min_index
+ 1;
1693 ctx
->payloads
[PIPE_SHADER_VERTEX
].offset_start
= min_index
+ info
->index_bias
;
1694 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].offset_start
= min_index
+ info
->index_bias
;
1696 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.offset_bias_correction
= -min_index
;
1697 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.index_count
= MALI_POSITIVE(info
->count
);
1699 //assert(!info->restart_index); /* TODO: Research */
1701 draw_flags
|= panfrost_translate_index_size(info
->index_size
);
1702 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.indices
= panfrost_get_index_buffer_mapped(ctx
, info
);
1704 /* Index count == vertex count, if no indexing is applied, as
1705 * if it is internally indexed in the expected order */
1707 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.offset_bias_correction
= 0;
1708 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.index_count
= MALI_POSITIVE(ctx
->vertex_count
);
1710 /* Reverse index state */
1711 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.indices
= (u64
) NULL
;
1714 /* Dispatch "compute jobs" for the vertex/tiler pair as (1,
1715 * vertex_count, 1) */
1717 panfrost_pack_work_groups_fused(
1718 &ctx
->payloads
[PIPE_SHADER_VERTEX
].prefix
,
1719 &ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
,
1720 1, vertex_count
, info
->instance_count
,
1723 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].prefix
.unknown_draw
= draw_flags
;
1725 /* Encode the padded vertex count */
1727 if (info
->instance_count
> 1) {
1728 /* Triangles have non-even vertex counts so they change how
1729 * padding works internally */
1732 mode
== PIPE_PRIM_TRIANGLES
||
1733 mode
== PIPE_PRIM_TRIANGLE_STRIP
||
1734 mode
== PIPE_PRIM_TRIANGLE_FAN
;
1736 struct pan_shift_odd so
=
1737 panfrost_padded_vertex_count(vertex_count
, !is_triangle
);
1739 ctx
->payloads
[PIPE_SHADER_VERTEX
].instance_shift
= so
.shift
;
1740 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].instance_shift
= so
.shift
;
1742 ctx
->payloads
[PIPE_SHADER_VERTEX
].instance_odd
= so
.odd
;
1743 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].instance_odd
= so
.odd
;
1745 ctx
->padded_count
= pan_expand_shift_odd(so
);
1747 ctx
->padded_count
= ctx
->vertex_count
;
1749 /* Reset instancing state */
1750 ctx
->payloads
[PIPE_SHADER_VERTEX
].instance_shift
= 0;
1751 ctx
->payloads
[PIPE_SHADER_VERTEX
].instance_odd
= 0;
1752 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].instance_shift
= 0;
1753 ctx
->payloads
[PIPE_SHADER_FRAGMENT
].instance_odd
= 0;
1756 /* Fire off the draw itself */
1757 panfrost_queue_draw(ctx
);
1759 /* Increment transform feedback offsets */
1761 for (unsigned i
= 0; i
< ctx
->streamout
.num_targets
; ++i
) {
1762 unsigned output_count
= u_stream_outputs_for_vertices(
1763 ctx
->active_prim
, ctx
->vertex_count
);
1765 ctx
->streamout
.offsets
[i
] += output_count
;
1772 panfrost_generic_cso_delete(struct pipe_context
*pctx
, void *hwcso
)
1778 panfrost_create_rasterizer_state(
1779 struct pipe_context
*pctx
,
1780 const struct pipe_rasterizer_state
*cso
)
1782 struct panfrost_rasterizer
*so
= CALLOC_STRUCT(panfrost_rasterizer
);
1786 /* Bitmask, unknown meaning of the start value. 0x105 on 32-bit T6XX */
1787 so
->tiler_gl_enables
= 0x7;
1790 so
->tiler_gl_enables
|= MALI_FRONT_CCW_TOP
;
1792 if (cso
->cull_face
& PIPE_FACE_FRONT
)
1793 so
->tiler_gl_enables
|= MALI_CULL_FACE_FRONT
;
1795 if (cso
->cull_face
& PIPE_FACE_BACK
)
1796 so
->tiler_gl_enables
|= MALI_CULL_FACE_BACK
;
1802 panfrost_bind_rasterizer_state(
1803 struct pipe_context
*pctx
,
1806 struct panfrost_context
*ctx
= pan_context(pctx
);
1808 /* TODO: Why can't rasterizer be NULL ever? Other drivers are fine.. */
1812 ctx
->rasterizer
= hwcso
;
1813 ctx
->dirty
|= PAN_DIRTY_RASTERIZER
;
1815 ctx
->fragment_shader_core
.depth_units
= ctx
->rasterizer
->base
.offset_units
;
1816 ctx
->fragment_shader_core
.depth_factor
= ctx
->rasterizer
->base
.offset_scale
;
1818 /* Gauranteed with the core GL call, so don't expose ARB_polygon_offset */
1819 assert(ctx
->rasterizer
->base
.offset_clamp
== 0.0);
1821 /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
1823 SET_BIT(ctx
->fragment_shader_core
.unknown2_4
, MALI_DEPTH_RANGE_A
, ctx
->rasterizer
->base
.offset_tri
);
1824 SET_BIT(ctx
->fragment_shader_core
.unknown2_4
, MALI_DEPTH_RANGE_B
, ctx
->rasterizer
->base
.offset_tri
);
1826 /* Point sprites are emulated */
1828 struct panfrost_shader_state
*variant
=
1829 ctx
->shader
[PIPE_SHADER_FRAGMENT
] ? &ctx
->shader
[PIPE_SHADER_FRAGMENT
]->variants
[ctx
->shader
[PIPE_SHADER_FRAGMENT
]->active_variant
] : NULL
;
1831 if (ctx
->rasterizer
->base
.sprite_coord_enable
|| (variant
&& variant
->point_sprite_mask
))
1832 ctx
->base
.bind_fs_state(&ctx
->base
, ctx
->shader
[PIPE_SHADER_FRAGMENT
]);
1836 panfrost_create_vertex_elements_state(
1837 struct pipe_context
*pctx
,
1838 unsigned num_elements
,
1839 const struct pipe_vertex_element
*elements
)
1841 struct panfrost_vertex_state
*so
= CALLOC_STRUCT(panfrost_vertex_state
);
1843 so
->num_elements
= num_elements
;
1844 memcpy(so
->pipe
, elements
, sizeof(*elements
) * num_elements
);
1846 for (int i
= 0; i
< num_elements
; ++i
) {
1847 so
->hw
[i
].index
= i
;
1849 enum pipe_format fmt
= elements
[i
].src_format
;
1850 const struct util_format_description
*desc
= util_format_description(fmt
);
1851 so
->hw
[i
].unknown1
= 0x2;
1852 so
->hw
[i
].swizzle
= panfrost_get_default_swizzle(desc
->nr_channels
);
1854 so
->hw
[i
].format
= panfrost_find_format(desc
);
1856 /* The field itself should probably be shifted over */
1857 so
->hw
[i
].src_offset
= elements
[i
].src_offset
;
1864 panfrost_bind_vertex_elements_state(
1865 struct pipe_context
*pctx
,
1868 struct panfrost_context
*ctx
= pan_context(pctx
);
1870 ctx
->vertex
= hwcso
;
1871 ctx
->dirty
|= PAN_DIRTY_VERTEX
;
1875 panfrost_create_shader_state(
1876 struct pipe_context
*pctx
,
1877 const struct pipe_shader_state
*cso
)
1879 struct panfrost_shader_variants
*so
= CALLOC_STRUCT(panfrost_shader_variants
);
1882 /* Token deep copy to prevent memory corruption */
1884 if (cso
->type
== PIPE_SHADER_IR_TGSI
)
1885 so
->base
.tokens
= tgsi_dup_tokens(so
->base
.tokens
);
1891 panfrost_delete_shader_state(
1892 struct pipe_context
*pctx
,
1895 struct panfrost_shader_variants
*cso
= (struct panfrost_shader_variants
*) so
;
1897 if (cso
->base
.type
== PIPE_SHADER_IR_TGSI
) {
1898 DBG("Deleting TGSI shader leaks duplicated tokens\n");
1901 for (unsigned i
= 0; i
< cso
->variant_count
; ++i
) {
1902 struct panfrost_shader_state
*shader_state
= &cso
->variants
[i
];
1903 panfrost_bo_unreference(pctx
->screen
, shader_state
->bo
);
1904 shader_state
->bo
= NULL
;
1911 panfrost_create_sampler_state(
1912 struct pipe_context
*pctx
,
1913 const struct pipe_sampler_state
*cso
)
1915 struct panfrost_sampler_state
*so
= CALLOC_STRUCT(panfrost_sampler_state
);
1918 /* sampler_state corresponds to mali_sampler_descriptor, which we can generate entirely here */
1920 bool min_nearest
= cso
->min_img_filter
== PIPE_TEX_FILTER_NEAREST
;
1921 bool mag_nearest
= cso
->mag_img_filter
== PIPE_TEX_FILTER_NEAREST
;
1922 bool mip_linear
= cso
->min_mip_filter
== PIPE_TEX_MIPFILTER_LINEAR
;
1924 unsigned min_filter
= min_nearest
? MALI_SAMP_MIN_NEAREST
: 0;
1925 unsigned mag_filter
= mag_nearest
? MALI_SAMP_MAG_NEAREST
: 0;
1926 unsigned mip_filter
= mip_linear
?
1927 (MALI_SAMP_MIP_LINEAR_1
| MALI_SAMP_MIP_LINEAR_2
) : 0;
1928 unsigned normalized
= cso
->normalized_coords
? MALI_SAMP_NORM_COORDS
: 0;
1930 struct mali_sampler_descriptor sampler_descriptor
= {
1931 .filter_mode
= min_filter
| mag_filter
| mip_filter
| normalized
,
1932 .wrap_s
= translate_tex_wrap(cso
->wrap_s
),
1933 .wrap_t
= translate_tex_wrap(cso
->wrap_t
),
1934 .wrap_r
= translate_tex_wrap(cso
->wrap_r
),
1935 .compare_func
= panfrost_translate_alt_compare_func(cso
->compare_func
),
1937 cso
->border_color
.f
[0],
1938 cso
->border_color
.f
[1],
1939 cso
->border_color
.f
[2],
1940 cso
->border_color
.f
[3]
1942 .min_lod
= FIXED_16(cso
->min_lod
),
1943 .max_lod
= FIXED_16(cso
->max_lod
),
1944 .seamless_cube_map
= cso
->seamless_cube_map
,
1947 /* If necessary, we disable mipmapping in the sampler descriptor by
1948 * clamping the LOD as tight as possible (from 0 to epsilon,
1949 * essentially -- remember these are fixed point numbers, so
1952 if (cso
->min_mip_filter
== PIPE_TEX_MIPFILTER_NONE
)
1953 sampler_descriptor
.max_lod
= sampler_descriptor
.min_lod
;
1955 /* Enforce that there is something in the middle by adding epsilon*/
1957 if (sampler_descriptor
.min_lod
== sampler_descriptor
.max_lod
)
1958 sampler_descriptor
.max_lod
++;
1961 assert(sampler_descriptor
.max_lod
> sampler_descriptor
.min_lod
);
1963 so
->hw
= sampler_descriptor
;
1969 panfrost_bind_sampler_states(
1970 struct pipe_context
*pctx
,
1971 enum pipe_shader_type shader
,
1972 unsigned start_slot
, unsigned num_sampler
,
1975 assert(start_slot
== 0);
1977 struct panfrost_context
*ctx
= pan_context(pctx
);
1979 /* XXX: Should upload, not just copy? */
1980 ctx
->sampler_count
[shader
] = num_sampler
;
1981 memcpy(ctx
->samplers
[shader
], sampler
, num_sampler
* sizeof (void *));
1983 ctx
->dirty
|= PAN_DIRTY_SAMPLERS
;
1987 panfrost_variant_matches(
1988 struct panfrost_context
*ctx
,
1989 struct panfrost_shader_state
*variant
,
1990 enum pipe_shader_type type
)
1992 struct pipe_rasterizer_state
*rasterizer
= &ctx
->rasterizer
->base
;
1993 struct pipe_alpha_state
*alpha
= &ctx
->depth_stencil
->alpha
;
1995 bool is_fragment
= (type
== PIPE_SHADER_FRAGMENT
);
1997 if (is_fragment
&& (alpha
->enabled
|| variant
->alpha_state
.enabled
)) {
1998 /* Make sure enable state is at least the same */
1999 if (alpha
->enabled
!= variant
->alpha_state
.enabled
) {
2003 /* Check that the contents of the test are the same */
2004 bool same_func
= alpha
->func
== variant
->alpha_state
.func
;
2005 bool same_ref
= alpha
->ref_value
== variant
->alpha_state
.ref_value
;
2007 if (!(same_func
&& same_ref
)) {
2012 if (is_fragment
&& rasterizer
&& (rasterizer
->sprite_coord_enable
|
2013 variant
->point_sprite_mask
)) {
2014 /* Ensure the same varyings are turned to point sprites */
2015 if (rasterizer
->sprite_coord_enable
!= variant
->point_sprite_mask
)
2018 /* Ensure the orientation is correct */
2020 rasterizer
->sprite_coord_mode
==
2021 PIPE_SPRITE_COORD_UPPER_LEFT
;
2023 if (variant
->point_sprite_upper_left
!= upper_left
)
2027 /* Otherwise, we're good to go */
2032 * Fix an uncompiled shader's stream output info, and produce a bitmask
2033 * of which VARYING_SLOT_* are captured for stream output.
2035 * Core Gallium stores output->register_index as a "slot" number, where
2036 * slots are assigned consecutively to all outputs in info->outputs_written.
2037 * This naive packing of outputs doesn't work for us - we too have slots,
2038 * but the layout is defined by the VUE map, which we won't have until we
2039 * compile a specific shader variant. So, we remap these and simply store
2040 * VARYING_SLOT_* in our copy's output->register_index fields.
2042 * We then produce a bitmask of outputs which are used for SO.
2044 * Implementation from iris.
2048 update_so_info(struct pipe_stream_output_info
*so_info
,
2049 uint64_t outputs_written
)
2051 uint64_t so_outputs
= 0;
2052 uint8_t reverse_map
[64] = {};
2055 while (outputs_written
)
2056 reverse_map
[slot
++] = u_bit_scan64(&outputs_written
);
2058 for (unsigned i
= 0; i
< so_info
->num_outputs
; i
++) {
2059 struct pipe_stream_output
*output
= &so_info
->output
[i
];
2061 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
2062 output
->register_index
= reverse_map
[output
->register_index
];
2064 so_outputs
|= 1ull << output
->register_index
;
2071 panfrost_bind_shader_state(
2072 struct pipe_context
*pctx
,
2074 enum pipe_shader_type type
)
2076 struct panfrost_context
*ctx
= pan_context(pctx
);
2078 ctx
->shader
[type
] = hwcso
;
2080 if (type
== PIPE_SHADER_FRAGMENT
)
2081 ctx
->dirty
|= PAN_DIRTY_FS
;
2083 ctx
->dirty
|= PAN_DIRTY_VS
;
2087 /* Match the appropriate variant */
2089 signed variant
= -1;
2090 struct panfrost_shader_variants
*variants
= (struct panfrost_shader_variants
*) hwcso
;
2092 for (unsigned i
= 0; i
< variants
->variant_count
; ++i
) {
2093 if (panfrost_variant_matches(ctx
, &variants
->variants
[i
], type
)) {
2099 if (variant
== -1) {
2100 /* No variant matched, so create a new one */
2101 variant
= variants
->variant_count
++;
2102 assert(variants
->variant_count
< MAX_SHADER_VARIANTS
);
2104 struct panfrost_shader_state
*v
=
2105 &variants
->variants
[variant
];
2107 if (type
== PIPE_SHADER_FRAGMENT
) {
2108 v
->alpha_state
= ctx
->depth_stencil
->alpha
;
2110 if (ctx
->rasterizer
) {
2111 v
->point_sprite_mask
= ctx
->rasterizer
->base
.sprite_coord_enable
;
2112 v
->point_sprite_upper_left
=
2113 ctx
->rasterizer
->base
.sprite_coord_mode
==
2114 PIPE_SPRITE_COORD_UPPER_LEFT
;
2118 variants
->variants
[variant
].tripipe
= malloc(sizeof(struct mali_shader_meta
));
2122 /* Select this variant */
2123 variants
->active_variant
= variant
;
2125 struct panfrost_shader_state
*shader_state
= &variants
->variants
[variant
];
2126 assert(panfrost_variant_matches(ctx
, shader_state
, type
));
2128 /* We finally have a variant, so compile it */
2130 if (!shader_state
->compiled
) {
2131 uint64_t outputs_written
= 0;
2133 panfrost_shader_compile(ctx
, shader_state
->tripipe
,
2134 variants
->base
.type
,
2135 variants
->base
.type
== PIPE_SHADER_IR_NIR
?
2136 variants
->base
.ir
.nir
:
2137 variants
->base
.tokens
,
2138 tgsi_processor_to_shader_stage(type
), shader_state
,
2141 shader_state
->compiled
= true;
2143 /* Fixup the stream out information, since what Gallium returns
2144 * normally is mildly insane */
2146 shader_state
->stream_output
= variants
->base
.stream_output
;
2147 shader_state
->so_mask
=
2148 update_so_info(&shader_state
->stream_output
, outputs_written
);
2153 panfrost_bind_vs_state(struct pipe_context
*pctx
, void *hwcso
)
2155 panfrost_bind_shader_state(pctx
, hwcso
, PIPE_SHADER_VERTEX
);
2159 panfrost_bind_fs_state(struct pipe_context
*pctx
, void *hwcso
)
2161 panfrost_bind_shader_state(pctx
, hwcso
, PIPE_SHADER_FRAGMENT
);
2165 panfrost_set_vertex_buffers(
2166 struct pipe_context
*pctx
,
2167 unsigned start_slot
,
2168 unsigned num_buffers
,
2169 const struct pipe_vertex_buffer
*buffers
)
2171 struct panfrost_context
*ctx
= pan_context(pctx
);
2173 util_set_vertex_buffers_mask(ctx
->vertex_buffers
, &ctx
->vb_mask
, buffers
, start_slot
, num_buffers
);
2177 panfrost_set_constant_buffer(
2178 struct pipe_context
*pctx
,
2179 enum pipe_shader_type shader
, uint index
,
2180 const struct pipe_constant_buffer
*buf
)
2182 struct panfrost_context
*ctx
= pan_context(pctx
);
2183 struct panfrost_constant_buffer
*pbuf
= &ctx
->constant_buffer
[shader
];
2185 util_copy_constant_buffer(&pbuf
->cb
[index
], buf
);
2187 unsigned mask
= (1 << index
);
2189 if (unlikely(!buf
)) {
2190 pbuf
->enabled_mask
&= ~mask
;
2191 pbuf
->dirty_mask
&= ~mask
;
2195 pbuf
->enabled_mask
|= mask
;
2196 pbuf
->dirty_mask
|= mask
;
2200 panfrost_set_stencil_ref(
2201 struct pipe_context
*pctx
,
2202 const struct pipe_stencil_ref
*ref
)
2204 struct panfrost_context
*ctx
= pan_context(pctx
);
2205 ctx
->stencil_ref
= *ref
;
2207 /* Shader core dirty */
2208 ctx
->dirty
|= PAN_DIRTY_FS
;
2211 static enum mali_texture_type
2212 panfrost_translate_texture_type(enum pipe_texture_target t
) {
2216 case PIPE_TEXTURE_1D
:
2217 case PIPE_TEXTURE_1D_ARRAY
:
2220 case PIPE_TEXTURE_2D
:
2221 case PIPE_TEXTURE_2D_ARRAY
:
2222 case PIPE_TEXTURE_RECT
:
2225 case PIPE_TEXTURE_3D
:
2228 case PIPE_TEXTURE_CUBE
:
2229 case PIPE_TEXTURE_CUBE_ARRAY
:
2230 return MALI_TEX_CUBE
;
2233 unreachable("Unknown target");
2237 static struct pipe_sampler_view
*
2238 panfrost_create_sampler_view(
2239 struct pipe_context
*pctx
,
2240 struct pipe_resource
*texture
,
2241 const struct pipe_sampler_view
*template)
2243 struct panfrost_sampler_view
*so
= rzalloc(pctx
, struct panfrost_sampler_view
);
2244 int bytes_per_pixel
= util_format_get_blocksize(texture
->format
);
2246 pipe_reference(NULL
, &texture
->reference
);
2248 struct panfrost_resource
*prsrc
= (struct panfrost_resource
*) texture
;
2251 so
->base
= *template;
2252 so
->base
.texture
= texture
;
2253 so
->base
.reference
.count
= 1;
2254 so
->base
.context
= pctx
;
2256 /* sampler_views correspond to texture descriptors, minus the texture
2257 * (data) itself. So, we serialise the descriptor here and cache it for
2260 /* TODO: Detect from format better */
2261 const struct util_format_description
*desc
= util_format_description(prsrc
->base
.format
);
2263 unsigned char user_swizzle
[4] = {
2264 template->swizzle_r
,
2265 template->swizzle_g
,
2266 template->swizzle_b
,
2270 enum mali_format format
= panfrost_find_format(desc
);
2272 /* Check if we need to set a custom stride by computing the "expected"
2273 * stride and comparing it to what the BO actually wants. Only applies
2274 * to linear textures, since tiled/compressed textures have strict
2275 * alignment requirements for their strides as it is */
2277 unsigned first_level
= template->u
.tex
.first_level
;
2278 unsigned last_level
= template->u
.tex
.last_level
;
2280 if (prsrc
->layout
== PAN_LINEAR
) {
2281 for (unsigned l
= first_level
; l
<= last_level
; ++l
) {
2282 unsigned actual_stride
= prsrc
->slices
[l
].stride
;
2283 unsigned width
= u_minify(texture
->width0
, l
);
2284 unsigned comp_stride
= width
* bytes_per_pixel
;
2286 if (comp_stride
!= actual_stride
) {
2287 so
->manual_stride
= true;
2293 /* In the hardware, array_size refers specifically to array textures,
2294 * whereas in Gallium, it also covers cubemaps */
2296 unsigned array_size
= texture
->array_size
;
2298 if (template->target
== PIPE_TEXTURE_CUBE
) {
2299 /* TODO: Cubemap arrays */
2300 assert(array_size
== 6);
2304 struct mali_texture_descriptor texture_descriptor
= {
2305 .width
= MALI_POSITIVE(u_minify(texture
->width0
, first_level
)),
2306 .height
= MALI_POSITIVE(u_minify(texture
->height0
, first_level
)),
2307 .depth
= MALI_POSITIVE(u_minify(texture
->depth0
, first_level
)),
2308 .array_size
= MALI_POSITIVE(array_size
),
2312 .swizzle
= panfrost_translate_swizzle_4(desc
->swizzle
),
2315 .srgb
= desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
,
2316 .type
= panfrost_translate_texture_type(template->target
),
2319 .swizzle
= panfrost_translate_swizzle_4(user_swizzle
)
2322 texture_descriptor
.nr_mipmap_levels
= last_level
- first_level
;
2324 so
->hw
= texture_descriptor
;
2326 return (struct pipe_sampler_view
*) so
;
2330 panfrost_set_sampler_views(
2331 struct pipe_context
*pctx
,
2332 enum pipe_shader_type shader
,
2333 unsigned start_slot
, unsigned num_views
,
2334 struct pipe_sampler_view
**views
)
2336 struct panfrost_context
*ctx
= pan_context(pctx
);
2338 assert(start_slot
== 0);
2340 unsigned new_nr
= 0;
2341 for (unsigned i
= 0; i
< num_views
; ++i
) {
2346 ctx
->sampler_view_count
[shader
] = new_nr
;
2347 memcpy(ctx
->sampler_views
[shader
], views
, num_views
* sizeof (void *));
2349 ctx
->dirty
|= PAN_DIRTY_TEXTURES
;
2353 panfrost_sampler_view_destroy(
2354 struct pipe_context
*pctx
,
2355 struct pipe_sampler_view
*view
)
2357 pipe_resource_reference(&view
->texture
, NULL
);
2362 panfrost_set_shader_buffers(
2363 struct pipe_context
*pctx
,
2364 enum pipe_shader_type shader
,
2365 unsigned start
, unsigned count
,
2366 const struct pipe_shader_buffer
*buffers
,
2367 unsigned writable_bitmask
)
2369 struct panfrost_context
*ctx
= pan_context(pctx
);
2371 util_set_shader_buffers_mask(ctx
->ssbo
[shader
], &ctx
->ssbo_mask
[shader
],
2372 buffers
, start
, count
);
2375 /* Hints that a framebuffer should use AFBC where possible */
2379 struct panfrost_screen
*screen
,
2380 const struct pipe_framebuffer_state
*fb
)
2382 /* AFBC implemenation incomplete; hide it */
2383 if (!(pan_debug
& PAN_DBG_AFBC
)) return;
2385 /* Hint AFBC to the resources bound to each color buffer */
2387 for (unsigned i
= 0; i
< fb
->nr_cbufs
; ++i
) {
2388 struct pipe_surface
*surf
= fb
->cbufs
[i
];
2389 struct panfrost_resource
*rsrc
= pan_resource(surf
->texture
);
2390 panfrost_resource_hint_layout(screen
, rsrc
, PAN_AFBC
, 1);
2393 /* Also hint it to the depth buffer */
2396 struct panfrost_resource
*rsrc
= pan_resource(fb
->zsbuf
->texture
);
2397 panfrost_resource_hint_layout(screen
, rsrc
, PAN_AFBC
, 1);
2402 panfrost_set_framebuffer_state(struct pipe_context
*pctx
,
2403 const struct pipe_framebuffer_state
*fb
)
2405 struct panfrost_context
*ctx
= pan_context(pctx
);
2407 /* Flush when switching framebuffers, but not if the framebuffer
2408 * state is being restored by u_blitter
2411 struct panfrost_job
*job
= panfrost_get_job_for_fbo(ctx
);
2412 bool is_scanout
= panfrost_is_scanout(ctx
);
2413 bool has_draws
= job
->last_job
.gpu
;
2415 /* Bail out early when the current and new states are the same. */
2416 if (util_framebuffer_state_equal(&ctx
->pipe_framebuffer
, fb
))
2419 /* The wallpaper logic sets a new FB state before doing the blit and
2420 * restore the old one when it's done. Those FB states are reported to
2421 * be different because the surface they are pointing to are different,
2422 * but those surfaces actually point to the same cbufs/zbufs. In that
2423 * case we definitely don't want new FB descs to be emitted/attached
2424 * since the job is expected to be flushed just after the blit is done,
2425 * so let's just copy the new state and return here.
2427 if (ctx
->wallpaper_batch
) {
2428 util_copy_framebuffer_state(&ctx
->pipe_framebuffer
, fb
);
2432 if (!is_scanout
|| has_draws
)
2433 panfrost_flush(pctx
, NULL
, PIPE_FLUSH_END_OF_FRAME
);
2435 assert(!ctx
->payloads
[PIPE_SHADER_VERTEX
].postfix
.framebuffer
&&
2436 !ctx
->payloads
[PIPE_SHADER_FRAGMENT
].postfix
.framebuffer
);
2438 /* Invalidate the FBO job cache since we've just been assigned a new
2443 util_copy_framebuffer_state(&ctx
->pipe_framebuffer
, fb
);
2445 /* Given that we're rendering, we'd love to have compression */
2446 struct panfrost_screen
*screen
= pan_screen(ctx
->base
.screen
);
2448 panfrost_hint_afbc(screen
, &ctx
->pipe_framebuffer
);
2449 for (unsigned i
= 0; i
< PIPE_SHADER_TYPES
; ++i
)
2450 ctx
->payloads
[i
].postfix
.framebuffer
= 0;
2454 panfrost_create_depth_stencil_state(struct pipe_context
*pipe
,
2455 const struct pipe_depth_stencil_alpha_state
*depth_stencil
)
2457 return mem_dup(depth_stencil
, sizeof(*depth_stencil
));
2461 panfrost_bind_depth_stencil_state(struct pipe_context
*pipe
,
2464 struct panfrost_context
*ctx
= pan_context(pipe
);
2465 struct pipe_depth_stencil_alpha_state
*depth_stencil
= cso
;
2466 ctx
->depth_stencil
= depth_stencil
;
2471 /* Alpha does not exist in the hardware (it's not in ES3), so it's
2472 * emulated in the fragment shader */
2474 if (depth_stencil
->alpha
.enabled
) {
2475 /* We need to trigger a new shader (maybe) */
2476 ctx
->base
.bind_fs_state(&ctx
->base
, ctx
->shader
[PIPE_SHADER_FRAGMENT
]);
2480 SET_BIT(ctx
->fragment_shader_core
.unknown2_4
, MALI_STENCIL_TEST
, depth_stencil
->stencil
[0].enabled
);
2482 panfrost_make_stencil_state(&depth_stencil
->stencil
[0], &ctx
->fragment_shader_core
.stencil_front
);
2483 ctx
->fragment_shader_core
.stencil_mask_front
= depth_stencil
->stencil
[0].writemask
;
2485 /* If back-stencil is not enabled, use the front values */
2486 bool back_enab
= ctx
->depth_stencil
->stencil
[1].enabled
;
2487 unsigned back_index
= back_enab
? 1 : 0;
2489 panfrost_make_stencil_state(&depth_stencil
->stencil
[back_index
], &ctx
->fragment_shader_core
.stencil_back
);
2490 ctx
->fragment_shader_core
.stencil_mask_back
= depth_stencil
->stencil
[back_index
].writemask
;
2492 /* Depth state (TODO: Refactor) */
2493 SET_BIT(ctx
->fragment_shader_core
.unknown2_3
, MALI_DEPTH_TEST
, depth_stencil
->depth
.enabled
);
2495 int func
= depth_stencil
->depth
.enabled
? depth_stencil
->depth
.func
: PIPE_FUNC_ALWAYS
;
2497 ctx
->fragment_shader_core
.unknown2_3
&= ~MALI_DEPTH_FUNC_MASK
;
2498 ctx
->fragment_shader_core
.unknown2_3
|= MALI_DEPTH_FUNC(panfrost_translate_compare_func(func
));
2500 /* Bounds test not implemented */
2501 assert(!depth_stencil
->depth
.bounds_test
);
2503 ctx
->dirty
|= PAN_DIRTY_FS
;
2507 panfrost_delete_depth_stencil_state(struct pipe_context
*pipe
, void *depth
)
2513 panfrost_set_sample_mask(struct pipe_context
*pipe
,
2514 unsigned sample_mask
)
2519 panfrost_set_clip_state(struct pipe_context
*pipe
,
2520 const struct pipe_clip_state
*clip
)
2522 //struct panfrost_context *panfrost = pan_context(pipe);
2526 panfrost_set_viewport_states(struct pipe_context
*pipe
,
2527 unsigned start_slot
,
2528 unsigned num_viewports
,
2529 const struct pipe_viewport_state
*viewports
)
2531 struct panfrost_context
*ctx
= pan_context(pipe
);
2533 assert(start_slot
== 0);
2534 assert(num_viewports
== 1);
2536 ctx
->pipe_viewport
= *viewports
;
2540 panfrost_set_scissor_states(struct pipe_context
*pipe
,
2541 unsigned start_slot
,
2542 unsigned num_scissors
,
2543 const struct pipe_scissor_state
*scissors
)
2545 struct panfrost_context
*ctx
= pan_context(pipe
);
2547 assert(start_slot
== 0);
2548 assert(num_scissors
== 1);
2550 ctx
->scissor
= *scissors
;
2554 panfrost_set_polygon_stipple(struct pipe_context
*pipe
,
2555 const struct pipe_poly_stipple
*stipple
)
2557 //struct panfrost_context *panfrost = pan_context(pipe);
2561 panfrost_set_active_query_state(struct pipe_context
*pipe
,
2564 struct panfrost_context
*ctx
= pan_context(pipe
);
2565 ctx
->active_queries
= enable
;
2569 panfrost_destroy(struct pipe_context
*pipe
)
2571 struct panfrost_context
*panfrost
= pan_context(pipe
);
2572 struct panfrost_screen
*screen
= pan_screen(pipe
->screen
);
2574 if (panfrost
->blitter
)
2575 util_blitter_destroy(panfrost
->blitter
);
2577 if (panfrost
->blitter_wallpaper
)
2578 util_blitter_destroy(panfrost
->blitter_wallpaper
);
2580 panfrost_drm_free_slab(screen
, &panfrost
->scratchpad
);
2581 panfrost_drm_free_slab(screen
, &panfrost
->tiler_heap
);
2582 panfrost_drm_free_slab(screen
, &panfrost
->tiler_dummy
);
2587 static struct pipe_query
*
2588 panfrost_create_query(struct pipe_context
*pipe
,
2592 struct panfrost_query
*q
= rzalloc(pipe
, struct panfrost_query
);
2597 return (struct pipe_query
*) q
;
2601 panfrost_destroy_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
2607 panfrost_begin_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
2609 struct panfrost_context
*ctx
= pan_context(pipe
);
2610 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
2612 switch (query
->type
) {
2613 case PIPE_QUERY_OCCLUSION_COUNTER
:
2614 case PIPE_QUERY_OCCLUSION_PREDICATE
:
2615 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
2616 /* Allocate a word for the query results to be stored */
2617 query
->transfer
= panfrost_allocate_transient(ctx
, sizeof(unsigned));
2618 ctx
->occlusion_query
= query
;
2621 /* Geometry statistics are computed in the driver. XXX: geom/tess
2624 case PIPE_QUERY_PRIMITIVES_GENERATED
:
2625 query
->start
= ctx
->prims_generated
;
2627 case PIPE_QUERY_PRIMITIVES_EMITTED
:
2628 query
->start
= ctx
->tf_prims_generated
;
2632 fprintf(stderr
, "Skipping query %d\n", query
->type
);
2640 panfrost_end_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
2642 struct panfrost_context
*ctx
= pan_context(pipe
);
2643 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
2645 switch (query
->type
) {
2646 case PIPE_QUERY_OCCLUSION_COUNTER
:
2647 case PIPE_QUERY_OCCLUSION_PREDICATE
:
2648 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
2649 ctx
->occlusion_query
= NULL
;
2651 case PIPE_QUERY_PRIMITIVES_GENERATED
:
2652 query
->end
= ctx
->prims_generated
;
2654 case PIPE_QUERY_PRIMITIVES_EMITTED
:
2655 query
->end
= ctx
->tf_prims_generated
;
2663 panfrost_get_query_result(struct pipe_context
*pipe
,
2664 struct pipe_query
*q
,
2666 union pipe_query_result
*vresult
)
2668 struct panfrost_query
*query
= (struct panfrost_query
*) q
;
2671 switch (query
->type
) {
2672 case PIPE_QUERY_OCCLUSION_COUNTER
:
2673 case PIPE_QUERY_OCCLUSION_PREDICATE
:
2674 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
2676 panfrost_flush(pipe
, NULL
, PIPE_FLUSH_END_OF_FRAME
);
2678 /* Read back the query results */
2679 unsigned *result
= (unsigned *) query
->transfer
.cpu
;
2680 unsigned passed
= *result
;
2682 if (query
->type
== PIPE_QUERY_OCCLUSION_COUNTER
) {
2683 vresult
->u64
= passed
;
2685 vresult
->b
= !!passed
;
2690 case PIPE_QUERY_PRIMITIVES_GENERATED
:
2691 case PIPE_QUERY_PRIMITIVES_EMITTED
:
2692 panfrost_flush(pipe
, NULL
, PIPE_FLUSH_END_OF_FRAME
);
2693 vresult
->u64
= query
->end
- query
->start
;
2697 DBG("Skipped query get %d\n", query
->type
);
2704 static struct pipe_stream_output_target
*
2705 panfrost_create_stream_output_target(struct pipe_context
*pctx
,
2706 struct pipe_resource
*prsc
,
2707 unsigned buffer_offset
,
2708 unsigned buffer_size
)
2710 struct pipe_stream_output_target
*target
;
2712 target
= rzalloc(pctx
, struct pipe_stream_output_target
);
2717 pipe_reference_init(&target
->reference
, 1);
2718 pipe_resource_reference(&target
->buffer
, prsc
);
2720 target
->context
= pctx
;
2721 target
->buffer_offset
= buffer_offset
;
2722 target
->buffer_size
= buffer_size
;
2728 panfrost_stream_output_target_destroy(struct pipe_context
*pctx
,
2729 struct pipe_stream_output_target
*target
)
2731 pipe_resource_reference(&target
->buffer
, NULL
);
2732 ralloc_free(target
);
2736 panfrost_set_stream_output_targets(struct pipe_context
*pctx
,
2737 unsigned num_targets
,
2738 struct pipe_stream_output_target
**targets
,
2739 const unsigned *offsets
)
2741 struct panfrost_context
*ctx
= pan_context(pctx
);
2742 struct panfrost_streamout
*so
= &ctx
->streamout
;
2744 assert(num_targets
<= ARRAY_SIZE(so
->targets
));
2746 for (unsigned i
= 0; i
< num_targets
; i
++) {
2747 if (offsets
[i
] != -1)
2748 so
->offsets
[i
] = offsets
[i
];
2750 pipe_so_target_reference(&so
->targets
[i
], targets
[i
]);
2753 for (unsigned i
= 0; i
< so
->num_targets
; i
++)
2754 pipe_so_target_reference(&so
->targets
[i
], NULL
);
2756 so
->num_targets
= num_targets
;
2760 panfrost_setup_hardware(struct panfrost_context
*ctx
)
2762 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
2763 struct panfrost_screen
*screen
= pan_screen(gallium
->screen
);
2765 panfrost_drm_allocate_slab(screen
, &ctx
->scratchpad
, 64*4, false, 0, 0, 0);
2766 panfrost_drm_allocate_slab(screen
, &ctx
->tiler_heap
, 4096, false, PAN_ALLOCATE_INVISIBLE
| PAN_ALLOCATE_GROWABLE
, 1, 128);
2767 panfrost_drm_allocate_slab(screen
, &ctx
->tiler_dummy
, 1, false, PAN_ALLOCATE_INVISIBLE
, 0, 0);
2770 /* New context creation, which also does hardware initialisation since I don't
2771 * know the better way to structure this :smirk: */
2773 struct pipe_context
*
2774 panfrost_create_context(struct pipe_screen
*screen
, void *priv
, unsigned flags
)
2776 struct panfrost_context
*ctx
= rzalloc(screen
, struct panfrost_context
);
2777 struct panfrost_screen
*pscreen
= pan_screen(screen
);
2778 memset(ctx
, 0, sizeof(*ctx
));
2779 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
2781 ctx
->is_t6xx
= pscreen
->gpu_id
< 0x0700; /* Literally, "earlier than T700" */
2783 gallium
->screen
= screen
;
2785 gallium
->destroy
= panfrost_destroy
;
2787 gallium
->set_framebuffer_state
= panfrost_set_framebuffer_state
;
2789 gallium
->flush
= panfrost_flush
;
2790 gallium
->clear
= panfrost_clear
;
2791 gallium
->draw_vbo
= panfrost_draw_vbo
;
2793 gallium
->set_vertex_buffers
= panfrost_set_vertex_buffers
;
2794 gallium
->set_constant_buffer
= panfrost_set_constant_buffer
;
2795 gallium
->set_shader_buffers
= panfrost_set_shader_buffers
;
2797 gallium
->set_stencil_ref
= panfrost_set_stencil_ref
;
2799 gallium
->create_sampler_view
= panfrost_create_sampler_view
;
2800 gallium
->set_sampler_views
= panfrost_set_sampler_views
;
2801 gallium
->sampler_view_destroy
= panfrost_sampler_view_destroy
;
2803 gallium
->create_rasterizer_state
= panfrost_create_rasterizer_state
;
2804 gallium
->bind_rasterizer_state
= panfrost_bind_rasterizer_state
;
2805 gallium
->delete_rasterizer_state
= panfrost_generic_cso_delete
;
2807 gallium
->create_vertex_elements_state
= panfrost_create_vertex_elements_state
;
2808 gallium
->bind_vertex_elements_state
= panfrost_bind_vertex_elements_state
;
2809 gallium
->delete_vertex_elements_state
= panfrost_generic_cso_delete
;
2811 gallium
->create_fs_state
= panfrost_create_shader_state
;
2812 gallium
->delete_fs_state
= panfrost_delete_shader_state
;
2813 gallium
->bind_fs_state
= panfrost_bind_fs_state
;
2815 gallium
->create_vs_state
= panfrost_create_shader_state
;
2816 gallium
->delete_vs_state
= panfrost_delete_shader_state
;
2817 gallium
->bind_vs_state
= panfrost_bind_vs_state
;
2819 gallium
->create_sampler_state
= panfrost_create_sampler_state
;
2820 gallium
->delete_sampler_state
= panfrost_generic_cso_delete
;
2821 gallium
->bind_sampler_states
= panfrost_bind_sampler_states
;
2823 gallium
->create_depth_stencil_alpha_state
= panfrost_create_depth_stencil_state
;
2824 gallium
->bind_depth_stencil_alpha_state
= panfrost_bind_depth_stencil_state
;
2825 gallium
->delete_depth_stencil_alpha_state
= panfrost_delete_depth_stencil_state
;
2827 gallium
->set_sample_mask
= panfrost_set_sample_mask
;
2829 gallium
->set_clip_state
= panfrost_set_clip_state
;
2830 gallium
->set_viewport_states
= panfrost_set_viewport_states
;
2831 gallium
->set_scissor_states
= panfrost_set_scissor_states
;
2832 gallium
->set_polygon_stipple
= panfrost_set_polygon_stipple
;
2833 gallium
->set_active_query_state
= panfrost_set_active_query_state
;
2835 gallium
->create_query
= panfrost_create_query
;
2836 gallium
->destroy_query
= panfrost_destroy_query
;
2837 gallium
->begin_query
= panfrost_begin_query
;
2838 gallium
->end_query
= panfrost_end_query
;
2839 gallium
->get_query_result
= panfrost_get_query_result
;
2841 gallium
->create_stream_output_target
= panfrost_create_stream_output_target
;
2842 gallium
->stream_output_target_destroy
= panfrost_stream_output_target_destroy
;
2843 gallium
->set_stream_output_targets
= panfrost_set_stream_output_targets
;
2845 panfrost_resource_context_init(gallium
);
2846 panfrost_blend_context_init(gallium
);
2847 panfrost_compute_context_init(gallium
);
2849 panfrost_drm_init_context(ctx
);
2851 panfrost_setup_hardware(ctx
);
2854 gallium
->stream_uploader
= u_upload_create_default(gallium
);
2855 gallium
->const_uploader
= gallium
->stream_uploader
;
2856 assert(gallium
->stream_uploader
);
2858 /* Midgard supports ES modes, plus QUADS/QUAD_STRIPS/POLYGON */
2859 ctx
->draw_modes
= (1 << (PIPE_PRIM_POLYGON
+ 1)) - 1;
2861 ctx
->primconvert
= util_primconvert_create(gallium
, ctx
->draw_modes
);
2863 ctx
->blitter
= util_blitter_create(gallium
);
2864 ctx
->blitter_wallpaper
= util_blitter_create(gallium
);
2866 assert(ctx
->blitter
);
2867 assert(ctx
->blitter_wallpaper
);
2869 /* Prepare for render! */
2871 panfrost_job_init(ctx
);
2872 panfrost_emit_vertex_payload(ctx
);
2873 panfrost_emit_tiler_payload(ctx
);
2874 panfrost_invalidate_frame(ctx
);
2875 panfrost_default_shader_backend(ctx
);