#include "pan_blend_shaders.h"
#include "pan_cmdstream.h"
#include "pan_util.h"
-#include "pandecode/decode.h"
+#include "decode.h"
+#include "util/pan_lower_framebuffer.h"
struct midgard_tiler_descriptor
panfrost_emit_midg_tiler(struct panfrost_batch *batch, unsigned vertex_count)
{
- struct panfrost_screen *screen = pan_screen(batch->ctx->base.screen);
- bool hierarchy = !(screen->quirks & MIDGARD_NO_HIER_TILING);
+ struct panfrost_device *device = pan_device(batch->ctx->base.screen);
+ bool hierarchy = !(device->quirks & MIDGARD_NO_HIER_TILING);
struct midgard_tiler_descriptor t = {0};
unsigned height = batch->key.height;
unsigned width = batch->key.width;
t.polygon_list_size = panfrost_tiler_full_size(
width, height, t.hierarchy_mask, hierarchy);
- /* Sanity check */
-
if (vertex_count) {
- struct panfrost_bo *tiler_heap;
-
- tiler_heap = panfrost_batch_get_tiler_heap(batch);
t.polygon_list = panfrost_batch_get_polygon_list(batch,
header_size +
t.polygon_list_size);
- /* Allow the entire tiler heap */
- t.heap_start = tiler_heap->gpu;
- t.heap_end = tiler_heap->gpu + tiler_heap->size;
+ t.heap_start = device->tiler_heap->gpu;
+ t.heap_end = device->tiler_heap->gpu + device->tiler_heap->size;
} else {
struct panfrost_bo *tiler_dummy;
panfrost_clear(
struct pipe_context *pipe,
unsigned buffers,
+ const struct pipe_scissor_state *scissor_state,
const union pipe_color_union *color,
double depth, unsigned stencil)
{
* fragment jobs.
*/
struct panfrost_batch *batch = panfrost_get_fresh_batch_for_fbo(ctx);
-
- panfrost_batch_add_fbo_bos(batch);
panfrost_batch_clear(batch, buffers, color, depth, stencil);
}
-/* Reset per-frame context, called on context initialisation as well as after
- * flushing a frame */
-
-void
-panfrost_invalidate_frame(struct panfrost_context *ctx)
-{
- for (unsigned i = 0; i < PIPE_SHADER_TYPES; ++i)
- ctx->payloads[i].postfix.shared_memory = 0;
-
- /* TODO: When does this need to be handled? */
- ctx->active_queries = true;
-}
-
-/* In practice, every field of these payloads should be configurable
- * arbitrarily, which means these functions are basically catch-all's for
- * as-of-yet unwavering unknowns */
-
-static void
-panfrost_emit_vertex_payload(struct panfrost_context *ctx)
-{
- /* 0x2 bit clear on 32-bit T6XX */
-
- struct midgard_payload_vertex_tiler payload = {
- .gl_enables = 0x4 | 0x2,
- };
-
- /* Vertex and compute are closely coupled, so share a payload */
-
- memcpy(&ctx->payloads[PIPE_SHADER_VERTEX], &payload, sizeof(payload));
- memcpy(&ctx->payloads[PIPE_SHADER_COMPUTE], &payload, sizeof(payload));
-}
-
bool
panfrost_writes_point_size(struct panfrost_context *ctx)
{
assert(ctx->shader[PIPE_SHADER_VERTEX]);
struct panfrost_shader_state *vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
- return vs->writes_point_size && ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.draw_mode == MALI_POINTS;
-}
-
-/* Stage the attribute descriptors so we can adjust src_offset
- * to let BOs align nicely */
-
-static void
-panfrost_stage_attributes(struct panfrost_context *ctx)
-{
- struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
- struct panfrost_vertex_state *so = ctx->vertex;
-
- size_t sz = sizeof(struct mali_attr_meta) * PAN_MAX_ATTRIBUTE;
- struct panfrost_transfer transfer = panfrost_allocate_transient(batch, sz);
- struct mali_attr_meta *target = (struct mali_attr_meta *) transfer.cpu;
-
- /* Copy as-is for the first pass */
- memcpy(target, so->hw, sz);
-
- /* Fixup offsets for the second pass. Recall that the hardware
- * calculates attribute addresses as:
- *
- * addr = base + (stride * vtx) + src_offset;
- *
- * However, on Mali, base must be aligned to 64-bytes, so we
- * instead let:
- *
- * base' = base & ~63 = base - (base & 63)
- *
- * To compensate when using base' (see emit_vertex_data), we have
- * to adjust src_offset by the masked off piece:
- *
- * addr' = base' + (stride * vtx) + (src_offset + (base & 63))
- * = base - (base & 63) + (stride * vtx) + src_offset + (base & 63)
- * = base + (stride * vtx) + src_offset
- * = addr;
- *
- * QED.
- */
-
- unsigned start = ctx->payloads[PIPE_SHADER_VERTEX].offset_start;
-
- for (unsigned i = 0; i < so->num_elements; ++i) {
- unsigned vbi = so->pipe[i].vertex_buffer_index;
- struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
- struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer.resource);
- mali_ptr addr = rsrc->bo->gpu + buf->buffer_offset;
-
- /* Adjust by the masked off bits of the offset. Make sure we
- * read src_offset from so->hw (which is not GPU visible)
- * rather than target (which is) due to caching effects */
-
- unsigned src_offset = so->hw[i].src_offset;
- src_offset += (addr & 63);
-
- /* Also, somewhat obscurely per-instance data needs to be
- * offset in response to a delayed start in an indexed draw */
-
- if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
- src_offset -= buf->stride * start;
-
- target[i].src_offset = src_offset;
- }
-
- /* Let's also include vertex builtins */
-
- struct mali_attr_meta builtin = {
- .format = MALI_R32UI,
- .swizzle = panfrost_get_default_swizzle(1)
- };
-
- /* See mali_attr_meta specification for the magic number */
-
- builtin.index = so->vertexid_index;
- memcpy(&target[PAN_VERTEX_ID], &builtin, 4);
-
- builtin.index = so->vertexid_index + 1;
- memcpy(&target[PAN_INSTANCE_ID], &builtin, 4);
-
- ctx->payloads[PIPE_SHADER_VERTEX].postfix.attribute_meta = transfer.gpu;
-}
-
-/* Compute number of UBOs active (more specifically, compute the highest UBO
- * number addressable -- if there are gaps, include them in the count anyway).
- * We always include UBO #0 in the count, since we *need* uniforms enabled for
- * sysvals. */
-
-unsigned
-panfrost_ubo_count(struct panfrost_context *ctx, enum pipe_shader_type stage)
-{
- unsigned mask = ctx->constant_buffer[stage].enabled_mask | 1;
- return 32 - __builtin_clz(mask);
-}
-
-/* Go through dirty flags and actualise them in the cmdstream. */
-
-static void
-panfrost_emit_for_draw(struct panfrost_context *ctx)
-{
- struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
-
- panfrost_batch_add_fbo_bos(batch);
-
- for (int i = 0; i <= PIPE_SHADER_FRAGMENT; ++i)
- panfrost_vt_attach_framebuffer(ctx, &ctx->payloads[i]);
-
- panfrost_emit_vertex_data(batch);
-
- /* Varyings emitted for -all- geometry */
- unsigned total_count = ctx->padded_count * ctx->instance_count;
- panfrost_emit_varying_descriptor(ctx, total_count);
-
- panfrost_batch_set_requirements(batch);
-
- panfrost_vt_update_rasterizer(ctx, &ctx->payloads[PIPE_SHADER_FRAGMENT]);
- panfrost_vt_update_occlusion_query(ctx, &ctx->payloads[PIPE_SHADER_FRAGMENT]);
-
- panfrost_emit_shader_meta(batch, PIPE_SHADER_VERTEX,
- &ctx->payloads[PIPE_SHADER_VERTEX]);
- panfrost_emit_shader_meta(batch, PIPE_SHADER_FRAGMENT,
- &ctx->payloads[PIPE_SHADER_FRAGMENT]);
-
- /* We stage to transient, so always dirty.. */
- if (ctx->vertex)
- panfrost_stage_attributes(ctx);
-
- for (int i = 0; i <= PIPE_SHADER_FRAGMENT; ++i) {
- panfrost_emit_sampler_descriptors(batch, i, &ctx->payloads[i]);
- panfrost_emit_texture_descriptors(batch, i, &ctx->payloads[i]);
- panfrost_emit_const_buf(batch, i, &ctx->payloads[i]);
- }
-
- /* TODO: Upload the viewport somewhere more appropriate */
-
- panfrost_emit_viewport(batch, &ctx->payloads[PIPE_SHADER_FRAGMENT]);
-}
-
-/* Corresponds to exactly one draw, but does not submit anything */
-
-static void
-panfrost_queue_draw(struct panfrost_context *ctx)
-{
- /* Handle dirty flags now */
- panfrost_emit_for_draw(ctx);
-
- /* If rasterizer discard is enable, only submit the vertex */
-
- bool rasterizer_discard = ctx->rasterizer
- && ctx->rasterizer->base.rasterizer_discard;
-
-
- struct midgard_payload_vertex_tiler *vertex_payload = &ctx->payloads[PIPE_SHADER_VERTEX];
- struct midgard_payload_vertex_tiler *tiler_payload = &ctx->payloads[PIPE_SHADER_FRAGMENT];
-
- struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
- bool wallpapering = ctx->wallpaper_batch && batch->tiler_dep;
-
- if (wallpapering) {
- /* Inject in reverse order, with "predicted" job indices. THIS IS A HACK XXX */
- panfrost_new_job(batch, JOB_TYPE_TILER, false, batch->job_index + 2, tiler_payload, sizeof(*tiler_payload), true);
- panfrost_new_job(batch, JOB_TYPE_VERTEX, false, 0, vertex_payload, sizeof(*vertex_payload), true);
- } else {
- unsigned vertex = panfrost_new_job(batch, JOB_TYPE_VERTEX, false, 0, vertex_payload, sizeof(*vertex_payload), false);
-
- if (!rasterizer_discard)
- panfrost_new_job(batch, JOB_TYPE_TILER, false, vertex, tiler_payload, sizeof(*tiler_payload), false);
- }
-
- panfrost_batch_adjust_stack_size(batch);
+ return vs->writes_point_size && ctx->active_prim == PIPE_PRIM_POINTS;
}
/* The entire frame is in memory -- send it off to the kernel! */
unsigned flags)
{
struct panfrost_context *ctx = pan_context(pipe);
- struct util_dynarray fences;
+ struct panfrost_device *dev = pan_device(pipe->screen);
+ uint32_t syncobj = 0;
- /* We must collect the fences before the flush is done, otherwise we'll
- * lose track of them.
- */
- if (fence) {
- util_dynarray_init(&fences, NULL);
- hash_table_foreach(ctx->batches, hentry) {
- struct panfrost_batch *batch = hentry->data;
-
- panfrost_batch_fence_reference(batch->out_sync);
- util_dynarray_append(&fences,
- struct panfrost_batch_fence *,
- batch->out_sync);
- }
- }
+ if (fence)
+ drmSyncobjCreate(dev->fd, 0, &syncobj);
/* Submit all pending jobs */
- panfrost_flush_all_batches(ctx, false);
+ panfrost_flush_all_batches(ctx, syncobj);
if (fence) {
- struct panfrost_fence *f = panfrost_fence_create(ctx, &fences);
+ struct panfrost_fence *f = panfrost_fence_create(ctx, syncobj);
pipe->screen->fence_reference(pipe->screen, fence, NULL);
*fence = (struct pipe_fence_handle *)f;
-
- util_dynarray_foreach(&fences, struct panfrost_batch_fence *, fence)
- panfrost_batch_fence_unreference(*fence);
-
- util_dynarray_fini(&fences);
}
- if (pan_debug & PAN_DBG_TRACE)
+ if (dev->debug & PAN_DBG_TRACE)
pandecode_next_frame();
}
-#define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_##c;
+static void
+panfrost_texture_barrier(struct pipe_context *pipe, unsigned flags)
+{
+ struct panfrost_context *ctx = pan_context(pipe);
+ panfrost_flush_all_batches(ctx, 0);
+}
+
+#define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_DRAW_MODE_##c;
static int
g2m_draw_mode(enum pipe_prim_type mode)
#undef DEFINE_CASE
-static unsigned
-panfrost_translate_index_size(unsigned size)
-{
- switch (size) {
- case 1:
- return MALI_DRAW_INDEXED_UINT8;
-
- case 2:
- return MALI_DRAW_INDEXED_UINT16;
-
- case 4:
- return MALI_DRAW_INDEXED_UINT32;
-
- default:
- unreachable("Invalid index size");
- }
-}
-
-/* Gets a GPU address for the associated index buffer. Only gauranteed to be
- * good for the duration of the draw (transient), could last longer. Also get
- * the bounds on the index buffer for the range accessed by the draw. We do
- * these operations together because there are natural optimizations which
- * require them to be together. */
-
-static mali_ptr
-panfrost_get_index_buffer_bounded(struct panfrost_context *ctx, const struct pipe_draw_info *info, unsigned *min_index, unsigned *max_index)
-{
- struct panfrost_resource *rsrc = (struct panfrost_resource *) (info->index.resource);
-
- off_t offset = info->start * info->index_size;
- struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
- mali_ptr out = 0;
-
- bool needs_indices = true;
-
- if (info->max_index != ~0u) {
- *min_index = info->min_index;
- *max_index = info->max_index;
- needs_indices = false;
- }
-
- if (!info->has_user_indices) {
- /* Only resources can be directly mapped */
- panfrost_batch_add_bo(batch, rsrc->bo,
- PAN_BO_ACCESS_SHARED |
- PAN_BO_ACCESS_READ |
- PAN_BO_ACCESS_VERTEX_TILER);
- out = rsrc->bo->gpu + offset;
-
- /* Check the cache */
- needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache, info->start, info->count,
- min_index, max_index);
- } else {
- /* Otherwise, we need to upload to transient memory */
- const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
- out = panfrost_upload_transient(batch, ibuf8 + offset, info->count * info->index_size);
- }
-
- if (needs_indices) {
- /* Fallback */
- u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
-
- if (!info->has_user_indices) {
- panfrost_minmax_cache_add(rsrc->index_cache, info->start, info->count,
- *min_index, *max_index);
- }
- }
-
-
- return out;
-}
-
static bool
panfrost_scissor_culls_everything(struct panfrost_context *ctx)
{
/* Check if we're scissoring at all */
- if (!(ctx->rasterizer && ctx->rasterizer->base.scissor))
+ if (!ctx->rasterizer->base.scissor)
return false;
return (ss->minx == ss->maxx) || (ss->miny == ss->maxy);
ctx->tf_prims_generated += prims;
}
+static void
+panfrost_update_streamout_offsets(struct panfrost_context *ctx)
+{
+ for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
+ unsigned count;
+
+ count = u_stream_outputs_for_vertices(ctx->active_prim,
+ ctx->vertex_count);
+ ctx->streamout.offsets[i] += count;
+ }
+}
+
static void
panfrost_draw_vbo(
struct pipe_context *pipe,
assert(ctx->rasterizer != NULL);
if (!(ctx->draw_modes & (1 << mode))) {
- if (mode == PIPE_PRIM_QUADS && info->count == 4 && !ctx->rasterizer->base.flatshade) {
- mode = PIPE_PRIM_TRIANGLE_FAN;
- } else {
- if (info->count < 4) {
- /* Degenerate case? */
- return;
- }
-
- util_primconvert_save_rasterizer_state(ctx->primconvert, &ctx->rasterizer->base);
- util_primconvert_draw_vbo(ctx->primconvert, info);
+ if (info->count < 4) {
+ /* Degenerate case? */
return;
}
- }
- ctx->payloads[PIPE_SHADER_VERTEX].offset_start = info->start;
- ctx->payloads[PIPE_SHADER_FRAGMENT].offset_start = info->start;
-
- /* Now that we have a guaranteed terminating path, find the job.
- * Assignment commented out to prevent unused warning */
+ util_primconvert_save_rasterizer_state(ctx->primconvert, &ctx->rasterizer->base);
+ util_primconvert_draw_vbo(ctx->primconvert, info);
+ return;
+ }
- /* struct panfrost_batch *batch = */ panfrost_get_batch_for_fbo(ctx);
+ /* Now that we have a guaranteed terminating path, find the job. */
- ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.draw_mode = g2m_draw_mode(mode);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
+ panfrost_batch_set_requirements(batch);
/* Take into account a negative bias */
ctx->vertex_count = info->count + abs(info->index_bias);
ctx->instance_count = info->instance_count;
ctx->active_prim = info->mode;
- /* For non-indexed draws, they're the same */
- unsigned vertex_count = ctx->vertex_count;
-
- unsigned draw_flags = 0;
+ struct mali_vertex_tiler_prefix vertex_prefix, tiler_prefix;
+ struct mali_vertex_tiler_postfix vertex_postfix, tiler_postfix;
+ union midgard_primitive_size primitive_size;
+ unsigned vertex_count;
- /* The draw flags interpret how primitive size is interpreted */
+ panfrost_vt_init(ctx, PIPE_SHADER_VERTEX, &vertex_prefix, &vertex_postfix);
+ panfrost_vt_init(ctx, PIPE_SHADER_FRAGMENT, &tiler_prefix, &tiler_postfix);
- if (panfrost_writes_point_size(ctx))
- draw_flags |= MALI_DRAW_VARYING_SIZE;
-
- if (info->primitive_restart)
- draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
-
- /* These doesn't make much sense */
-
- draw_flags |= 0x3000;
-
- if (ctx->rasterizer && ctx->rasterizer->base.flatshade_first)
- draw_flags |= MALI_DRAW_FLATSHADE_FIRST;
+ panfrost_vt_set_draw_info(ctx, info, g2m_draw_mode(mode),
+ &vertex_postfix, &tiler_prefix,
+ &tiler_postfix, &vertex_count,
+ &ctx->padded_count);
panfrost_statistics_record(ctx, info);
- if (info->index_size) {
- unsigned min_index = 0, max_index = 0;
- ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.indices =
- panfrost_get_index_buffer_bounded(ctx, info, &min_index, &max_index);
-
- /* Use the corresponding values */
- vertex_count = max_index - min_index + 1;
- ctx->payloads[PIPE_SHADER_VERTEX].offset_start = min_index + info->index_bias;
- ctx->payloads[PIPE_SHADER_FRAGMENT].offset_start = min_index + info->index_bias;
-
- ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.offset_bias_correction = -min_index;
- ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.index_count = MALI_POSITIVE(info->count);
-
- draw_flags |= panfrost_translate_index_size(info->index_size);
- } else {
- /* Index count == vertex count, if no indexing is applied, as
- * if it is internally indexed in the expected order */
-
- ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.offset_bias_correction = 0;
- ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.index_count = MALI_POSITIVE(ctx->vertex_count);
-
- /* Reverse index state */
- ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.indices = (mali_ptr) 0;
- }
-
- /* Dispatch "compute jobs" for the vertex/tiler pair as (1,
- * vertex_count, 1) */
-
- panfrost_pack_work_groups_fused(
- &ctx->payloads[PIPE_SHADER_VERTEX].prefix,
- &ctx->payloads[PIPE_SHADER_FRAGMENT].prefix,
- 1, vertex_count, info->instance_count,
- 1, 1, 1);
-
- ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.unknown_draw = draw_flags;
-
- /* Encode the padded vertex count */
-
- if (info->instance_count > 1) {
- ctx->padded_count = panfrost_padded_vertex_count(vertex_count);
-
- unsigned shift = __builtin_ctz(ctx->padded_count);
- unsigned k = ctx->padded_count >> (shift + 1);
-
- ctx->payloads[PIPE_SHADER_VERTEX].instance_shift = shift;
- ctx->payloads[PIPE_SHADER_FRAGMENT].instance_shift = shift;
-
- ctx->payloads[PIPE_SHADER_VERTEX].instance_odd = k;
- ctx->payloads[PIPE_SHADER_FRAGMENT].instance_odd = k;
- } else {
- ctx->padded_count = vertex_count;
-
- /* Reset instancing state */
- ctx->payloads[PIPE_SHADER_VERTEX].instance_shift = 0;
- ctx->payloads[PIPE_SHADER_VERTEX].instance_odd = 0;
- ctx->payloads[PIPE_SHADER_FRAGMENT].instance_shift = 0;
- ctx->payloads[PIPE_SHADER_FRAGMENT].instance_odd = 0;
- }
+ panfrost_pack_work_groups_fused(&vertex_prefix, &tiler_prefix,
+ 1, vertex_count, info->instance_count,
+ 1, 1, 1);
+
+ /* Emit all sort of descriptors. */
+ panfrost_emit_vertex_data(batch, &vertex_postfix);
+ panfrost_emit_varying_descriptor(batch,
+ ctx->padded_count *
+ ctx->instance_count,
+ &vertex_postfix, &tiler_postfix,
+ &primitive_size);
+ panfrost_emit_shader_meta(batch, PIPE_SHADER_VERTEX, &vertex_postfix);
+ panfrost_emit_shader_meta(batch, PIPE_SHADER_FRAGMENT, &tiler_postfix);
+ panfrost_emit_sampler_descriptors(batch, PIPE_SHADER_VERTEX, &vertex_postfix);
+ panfrost_emit_sampler_descriptors(batch, PIPE_SHADER_FRAGMENT, &tiler_postfix);
+ panfrost_emit_texture_descriptors(batch, PIPE_SHADER_VERTEX, &vertex_postfix);
+ panfrost_emit_texture_descriptors(batch, PIPE_SHADER_FRAGMENT, &tiler_postfix);
+ panfrost_emit_const_buf(batch, PIPE_SHADER_VERTEX, &vertex_postfix);
+ panfrost_emit_const_buf(batch, PIPE_SHADER_FRAGMENT, &tiler_postfix);
+ panfrost_emit_viewport(batch, &tiler_postfix);
+
+ panfrost_vt_update_primitive_size(ctx, &tiler_prefix, &primitive_size);
/* Fire off the draw itself */
- panfrost_queue_draw(ctx);
+ panfrost_emit_vertex_tiler_jobs(batch, &vertex_prefix, &vertex_postfix,
+ &tiler_prefix, &tiler_postfix,
+ &primitive_size);
- /* Increment transform feedback offsets */
-
- for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
- unsigned output_count = u_stream_outputs_for_vertices(
- ctx->active_prim, ctx->vertex_count);
+ /* Adjust the batch stack size based on the new shader stack sizes. */
+ panfrost_batch_adjust_stack_size(batch);
- ctx->streamout.offsets[i] += output_count;
- }
+ /* Increment transform feedback offsets */
+ panfrost_update_streamout_offsets(ctx);
}
/* CSO state */
so->base = *cso;
+ /* Gauranteed with the core GL call, so don't expose ARB_polygon_offset */
+ assert(cso->offset_clamp == 0.0);
+
return so;
}
if (!hwcso)
return;
- /* Gauranteed with the core GL call, so don't expose ARB_polygon_offset */
- assert(ctx->rasterizer->base.offset_clamp == 0.0);
-
/* Point sprites are emulated */
struct panfrost_shader_state *variant = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
const struct pipe_vertex_element *elements)
{
struct panfrost_vertex_state *so = CALLOC_STRUCT(panfrost_vertex_state);
+ struct panfrost_device *dev = pan_device(pctx->screen);
so->num_elements = num_elements;
memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
for (int i = 0; i < num_elements; ++i) {
- so->hw[i].index = i;
-
enum pipe_format fmt = elements[i].src_format;
const struct util_format_description *desc = util_format_description(fmt);
- so->hw[i].unknown1 = 0x2;
- so->hw[i].swizzle = panfrost_get_default_swizzle(desc->nr_channels);
+ unsigned swizzle = 0;
+ if (dev->quirks & HAS_SWIZZLES)
+ swizzle = panfrost_translate_swizzle_4(desc->swizzle);
+ else
+ swizzle = panfrost_bifrost_swizzle(desc->nr_channels);
+
+ enum mali_format hw_format = panfrost_pipe_format_table[desc->format].hw;
+ so->formats[i] = (hw_format << 12) | swizzle;
+ assert(hw_format);
+ }
- so->hw[i].format = panfrost_find_format(desc);
+ /* Let's also prepare vertex builtins */
+ if (dev->quirks & HAS_SWIZZLES)
+ so->formats[PAN_VERTEX_ID] = (MALI_R32UI << 12) | panfrost_get_default_swizzle(1);
+ else
+ so->formats[PAN_VERTEX_ID] = (MALI_R32UI << 12) | panfrost_bifrost_swizzle(1);
- /* The field itself should probably be shifted over */
- so->hw[i].src_offset = elements[i].src_offset;
- }
+ if (dev->quirks & HAS_SWIZZLES)
+ so->formats[PAN_INSTANCE_ID] = (MALI_R32UI << 12) | panfrost_get_default_swizzle(1);
+ else
+ so->formats[PAN_INSTANCE_ID] = (MALI_R32UI << 12) | panfrost_bifrost_swizzle(1);
return so;
}
enum pipe_shader_type stage)
{
struct panfrost_shader_variants *so = CALLOC_STRUCT(panfrost_shader_variants);
+ struct panfrost_device *dev = pan_device(pctx->screen);
so->base = *cso;
/* Token deep copy to prevent memory corruption */
so->base.tokens = tgsi_dup_tokens(so->base.tokens);
/* Precompile for shader-db if we need to */
- if (unlikely((pan_debug & PAN_DBG_PRECOMPILE) && cso->type == PIPE_SHADER_IR_NIR)) {
+ if (unlikely((dev->debug & PAN_DBG_PRECOMPILE) && cso->type == PIPE_SHADER_IR_NIR)) {
struct panfrost_context *ctx = pan_context(pctx);
struct panfrost_shader_state state;
struct panfrost_shader_variants *cso = (struct panfrost_shader_variants *) so;
if (cso->base.type == PIPE_SHADER_IR_TGSI) {
- DBG("Deleting TGSI shader leaks duplicated tokens\n");
+ /* TODO: leaks TGSI tokens! */
}
for (unsigned i = 0; i < cso->variant_count; ++i) {
const struct pipe_sampler_state *cso)
{
struct panfrost_sampler_state *so = CALLOC_STRUCT(panfrost_sampler_state);
+ struct panfrost_device *device = pan_device(pctx->screen);
+
so->base = *cso;
- panfrost_sampler_desc_init(cso, &so->hw);
+ if (device->quirks & IS_BIFROST)
+ panfrost_sampler_desc_init_bifrost(cso, (struct mali_bifrost_sampler_packed *) &so->hw);
+ else
+ panfrost_sampler_desc_init(cso, &so->hw);
return so;
}
struct panfrost_shader_state *variant,
enum pipe_shader_type type)
{
+ struct panfrost_device *dev = pan_device(ctx->base.screen);
struct pipe_rasterizer_state *rasterizer = &ctx->rasterizer->base;
- struct pipe_alpha_state *alpha = &ctx->depth_stencil->alpha;
bool is_fragment = (type == PIPE_SHADER_FRAGMENT);
- if (is_fragment && (alpha->enabled || variant->alpha_state.enabled)) {
- /* Make sure enable state is at least the same */
- if (alpha->enabled != variant->alpha_state.enabled) {
- return false;
- }
+ if (variant->outputs_read) {
+ struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
- /* Check that the contents of the test are the same */
- bool same_func = alpha->func == variant->alpha_state.func;
- bool same_ref = alpha->ref_value == variant->alpha_state.ref_value;
+ unsigned i;
+ BITSET_FOREACH_SET(i, &variant->outputs_read, 8) {
+ enum pipe_format fmt = PIPE_FORMAT_R8G8B8A8_UNORM;
- if (!(same_func && same_ref)) {
- return false;
+ if ((fb->nr_cbufs > i) && fb->cbufs[i])
+ fmt = fb->cbufs[i]->format;
+
+ const struct util_format_description *desc =
+ util_format_description(fmt);
+
+ if (pan_format_class_load(desc, dev->quirks) == PAN_FORMAT_NATIVE)
+ fmt = PIPE_FORMAT_NONE;
+
+ if (variant->rt_formats[i] != fmt)
+ return false;
}
}
+ /* Point sprites TODO on bifrost, always pass */
if (is_fragment && rasterizer && (rasterizer->sprite_coord_enable |
- variant->point_sprite_mask)) {
+ variant->point_sprite_mask)
+ && !(dev->quirks & IS_BIFROST)) {
/* Ensure the same varyings are turned to point sprites */
if (rasterizer->sprite_coord_enable != variant->point_sprite_mask)
return false;
enum pipe_shader_type type)
{
struct panfrost_context *ctx = pan_context(pctx);
+ struct panfrost_device *dev = pan_device(ctx->base.screen);
ctx->shader[type] = hwcso;
if (!hwcso) return;
&variants->variants[variant];
if (type == PIPE_SHADER_FRAGMENT) {
- v->alpha_state = ctx->depth_stencil->alpha;
+ struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
+ for (unsigned i = 0; i < fb->nr_cbufs; ++i) {
+ enum pipe_format fmt = PIPE_FORMAT_R8G8B8A8_UNORM;
+
+ if ((fb->nr_cbufs > i) && fb->cbufs[i])
+ fmt = fb->cbufs[i]->format;
- if (ctx->rasterizer) {
+ const struct util_format_description *desc =
+ util_format_description(fmt);
+
+ if (pan_format_class_load(desc, dev->quirks) == PAN_FORMAT_NATIVE)
+ fmt = PIPE_FORMAT_NONE;
+
+ v->rt_formats[i] = fmt;
+ }
+
+ /* Point sprites are TODO on Bifrost */
+ if (ctx->rasterizer && !(dev->quirks & IS_BIFROST)) {
v->point_sprite_mask = ctx->rasterizer->base.sprite_coord_enable;
v->point_sprite_upper_left =
ctx->rasterizer->base.sprite_coord_mode ==
ctx->stencil_ref = *ref;
}
-static enum mali_texture_type
-panfrost_translate_texture_type(enum pipe_texture_target t) {
- switch (t)
- {
- case PIPE_BUFFER:
- case PIPE_TEXTURE_1D:
- case PIPE_TEXTURE_1D_ARRAY:
- return MALI_TEX_1D;
+void
+panfrost_create_sampler_view_bo(struct panfrost_sampler_view *so,
+ struct pipe_context *pctx,
+ struct pipe_resource *texture)
+{
+ struct panfrost_device *device = pan_device(pctx->screen);
+ struct panfrost_resource *prsrc = (struct panfrost_resource *)texture;
+ enum pipe_format format = so->base.format;
+ assert(prsrc->bo);
- case PIPE_TEXTURE_2D:
- case PIPE_TEXTURE_2D_ARRAY:
- case PIPE_TEXTURE_RECT:
- return MALI_TEX_2D;
+ /* Format to access the stencil portion of a Z32_S8 texture */
+ if (format == PIPE_FORMAT_X32_S8X24_UINT) {
+ assert(prsrc->separate_stencil);
+ texture = &prsrc->separate_stencil->base;
+ prsrc = (struct panfrost_resource *)texture;
+ format = texture->format;
+ }
- case PIPE_TEXTURE_3D:
- return MALI_TEX_3D;
+ const struct util_format_description *desc = util_format_description(format);
- case PIPE_TEXTURE_CUBE:
- case PIPE_TEXTURE_CUBE_ARRAY:
- return MALI_TEX_CUBE;
+ bool fake_rgtc = !panfrost_supports_compressed_format(device, MALI_BC4_UNORM);
- default:
- unreachable("Unknown target");
+ if (desc->layout == UTIL_FORMAT_LAYOUT_RGTC && fake_rgtc) {
+ if (desc->is_snorm)
+ format = PIPE_FORMAT_R8G8B8A8_SNORM;
+ else
+ format = PIPE_FORMAT_R8G8B8A8_UNORM;
+ desc = util_format_description(format);
+ }
+
+ so->texture_bo = prsrc->bo->gpu;
+ so->modifier = prsrc->modifier;
+
+ unsigned char user_swizzle[4] = {
+ so->base.swizzle_r,
+ so->base.swizzle_g,
+ so->base.swizzle_b,
+ so->base.swizzle_a
+ };
+
+ /* In the hardware, array_size refers specifically to array textures,
+ * whereas in Gallium, it also covers cubemaps */
+
+ unsigned array_size = texture->array_size;
+ unsigned depth = texture->depth0;
+
+ if (so->base.target == PIPE_TEXTURE_CUBE) {
+ /* TODO: Cubemap arrays */
+ assert(array_size == 6);
+ array_size /= 6;
+ }
+
+ /* MSAA only supported for 2D textures (and 2D texture arrays via an
+ * extension currently unimplemented */
+
+ if (so->base.target == PIPE_TEXTURE_2D) {
+ assert(depth == 1);
+ depth = texture->nr_samples;
+ } else {
+ /* MSAA only supported for 2D textures */
+ assert(texture->nr_samples <= 1);
+ }
+
+ enum mali_texture_dimension type =
+ panfrost_translate_texture_dimension(so->base.target);
+
+ if (device->quirks & IS_BIFROST) {
+ unsigned char composed_swizzle[4];
+ util_format_compose_swizzles(desc->swizzle, user_swizzle, composed_swizzle);
+
+ unsigned size = panfrost_estimate_texture_payload_size(
+ so->base.u.tex.first_level,
+ so->base.u.tex.last_level,
+ so->base.u.tex.first_layer,
+ so->base.u.tex.last_layer,
+ texture->nr_samples,
+ type, prsrc->modifier);
+
+ so->bo = panfrost_bo_create(device, size, 0);
+
+ panfrost_new_texture_bifrost(
+ &so->bifrost_descriptor,
+ texture->width0, texture->height0,
+ depth, array_size,
+ format,
+ type, prsrc->modifier,
+ so->base.u.tex.first_level,
+ so->base.u.tex.last_level,
+ so->base.u.tex.first_layer,
+ so->base.u.tex.last_layer,
+ texture->nr_samples,
+ prsrc->cubemap_stride,
+ panfrost_translate_swizzle_4(composed_swizzle),
+ prsrc->bo->gpu,
+ prsrc->slices,
+ so->bo);
+ } else {
+ unsigned size = panfrost_estimate_texture_payload_size(
+ so->base.u.tex.first_level,
+ so->base.u.tex.last_level,
+ so->base.u.tex.first_layer,
+ so->base.u.tex.last_layer,
+ texture->nr_samples,
+ type, prsrc->modifier);
+ size += MALI_MIDGARD_TEXTURE_LENGTH;
+
+ so->bo = panfrost_bo_create(device, size, 0);
+
+ panfrost_new_texture(
+ so->bo->cpu,
+ texture->width0, texture->height0,
+ depth, array_size,
+ format,
+ type, prsrc->modifier,
+ so->base.u.tex.first_level,
+ so->base.u.tex.last_level,
+ so->base.u.tex.first_layer,
+ so->base.u.tex.last_layer,
+ texture->nr_samples,
+ prsrc->cubemap_stride,
+ panfrost_translate_swizzle_4(user_swizzle),
+ prsrc->bo->gpu,
+ prsrc->slices);
}
}
struct pipe_resource *texture,
const struct pipe_sampler_view *template)
{
- struct panfrost_screen *screen = pan_screen(pctx->screen);
struct panfrost_sampler_view *so = rzalloc(pctx, struct panfrost_sampler_view);
pipe_reference(NULL, &texture->reference);
- struct panfrost_resource *prsrc = (struct panfrost_resource *) texture;
- assert(prsrc->bo);
-
so->base = *template;
so->base.texture = texture;
so->base.reference.count = 1;
so->base.context = pctx;
- unsigned char user_swizzle[4] = {
- template->swizzle_r,
- template->swizzle_g,
- template->swizzle_b,
- template->swizzle_a
- };
-
- /* In the hardware, array_size refers specifically to array textures,
- * whereas in Gallium, it also covers cubemaps */
-
- unsigned array_size = texture->array_size;
-
- if (template->target == PIPE_TEXTURE_CUBE) {
- /* TODO: Cubemap arrays */
- assert(array_size == 6);
- array_size /= 6;
- }
-
- enum mali_texture_type type =
- panfrost_translate_texture_type(template->target);
-
- unsigned size = panfrost_estimate_texture_size(
- template->u.tex.first_level,
- template->u.tex.last_level,
- template->u.tex.first_layer,
- template->u.tex.last_layer,
- type, prsrc->layout);
-
- so->bo = panfrost_bo_create(screen, size, 0);
-
- panfrost_new_texture(
- so->bo->cpu,
- texture->width0, texture->height0,
- texture->depth0, array_size,
- template->format,
- type, prsrc->layout,
- template->u.tex.first_level,
- template->u.tex.last_level,
- template->u.tex.first_layer,
- template->u.tex.last_layer,
- prsrc->cubemap_stride,
- panfrost_translate_swizzle_4(user_swizzle),
- prsrc->bo->gpu,
- prsrc->slices);
+ panfrost_create_sampler_view_bo(so, pctx, texture);
return (struct pipe_sampler_view *) so;
}
buffers, start, count);
}
-/* Hints that a framebuffer should use AFBC where possible */
-
static void
-panfrost_hint_afbc(
- struct panfrost_screen *screen,
- const struct pipe_framebuffer_state *fb)
+panfrost_set_framebuffer_state(struct pipe_context *pctx,
+ const struct pipe_framebuffer_state *fb)
{
- /* AFBC implemenation incomplete; hide it */
- if (!(pan_debug & PAN_DBG_AFBC)) return;
+ struct panfrost_context *ctx = pan_context(pctx);
- /* Hint AFBC to the resources bound to each color buffer */
+ util_copy_framebuffer_state(&ctx->pipe_framebuffer, fb);
+ ctx->batch = NULL;
- for (unsigned i = 0; i < fb->nr_cbufs; ++i) {
- struct pipe_surface *surf = fb->cbufs[i];
- struct panfrost_resource *rsrc = pan_resource(surf->texture);
- panfrost_resource_hint_layout(screen, rsrc, MALI_TEXTURE_AFBC, 1);
- }
+ /* We may need to generate a new variant if the fragment shader is
+ * keyed to the framebuffer format (due to EXT_framebuffer_fetch) */
+ struct panfrost_shader_variants *fs = ctx->shader[PIPE_SHADER_FRAGMENT];
- /* Also hint it to the depth buffer */
+ if (fs && fs->variant_count && fs->variants[fs->active_variant].outputs_read)
+ ctx->base.bind_fs_state(&ctx->base, fs);
+}
- if (fb->zsbuf) {
- struct panfrost_resource *rsrc = pan_resource(fb->zsbuf->texture);
- panfrost_resource_hint_layout(screen, rsrc, MALI_TEXTURE_AFBC, 1);
+static inline unsigned
+pan_pipe_to_stencil_op(enum pipe_stencil_op in)
+{
+ switch (in) {
+ case PIPE_STENCIL_OP_KEEP: return MALI_STENCIL_OP_KEEP;
+ case PIPE_STENCIL_OP_ZERO: return MALI_STENCIL_OP_ZERO;
+ case PIPE_STENCIL_OP_REPLACE: return MALI_STENCIL_OP_REPLACE;
+ case PIPE_STENCIL_OP_INCR: return MALI_STENCIL_OP_INCR_SAT;
+ case PIPE_STENCIL_OP_DECR: return MALI_STENCIL_OP_DECR_SAT;
+ case PIPE_STENCIL_OP_INCR_WRAP: return MALI_STENCIL_OP_INCR_WRAP;
+ case PIPE_STENCIL_OP_DECR_WRAP: return MALI_STENCIL_OP_DECR_WRAP;
+ case PIPE_STENCIL_OP_INVERT: return MALI_STENCIL_OP_INVERT;
+ default: unreachable("Invalid stencil op");
}
}
-static void
-panfrost_set_framebuffer_state(struct pipe_context *pctx,
- const struct pipe_framebuffer_state *fb)
+static inline void
+pan_pipe_to_stencil(const struct pipe_stencil_state *in, void *out)
{
- struct panfrost_context *ctx = pan_context(pctx);
-
- panfrost_hint_afbc(pan_screen(pctx->screen), fb);
- util_copy_framebuffer_state(&ctx->pipe_framebuffer, fb);
- ctx->batch = NULL;
- panfrost_invalidate_frame(ctx);
+ pan_pack(out, STENCIL, cfg) {
+ cfg.mask = in->valuemask;
+ cfg.compare_function = panfrost_translate_compare_func(in->func);
+ cfg.stencil_fail = pan_pipe_to_stencil_op(in->fail_op);
+ cfg.depth_fail = pan_pipe_to_stencil_op(in->zfail_op);
+ cfg.depth_pass = pan_pipe_to_stencil_op(in->zpass_op);
+ }
}
static void *
panfrost_create_depth_stencil_state(struct pipe_context *pipe,
- const struct pipe_depth_stencil_alpha_state *depth_stencil)
+ const struct pipe_depth_stencil_alpha_state *zsa)
{
- return mem_dup(depth_stencil, sizeof(*depth_stencil));
+ struct panfrost_zsa_state *so = CALLOC_STRUCT(panfrost_zsa_state);
+ so->base = *zsa;
+
+ pan_pipe_to_stencil(&zsa->stencil[0], &so->stencil_front);
+ pan_pipe_to_stencil(&zsa->stencil[1], &so->stencil_back);
+
+ so->stencil_mask_front = zsa->stencil[0].writemask;
+
+ if (zsa->stencil[1].enabled)
+ so->stencil_mask_back = zsa->stencil[1].writemask;
+ else
+ so->stencil_mask_back = so->stencil_mask_front;
+
+ /* Alpha lowered by frontend */
+ assert(!zsa->alpha.enabled);
+
+ /* TODO: Bounds test should be easy */
+ assert(!zsa->depth.bounds_test);
+
+ return so;
}
static void
void *cso)
{
struct panfrost_context *ctx = pan_context(pipe);
- struct pipe_depth_stencil_alpha_state *depth_stencil = cso;
- ctx->depth_stencil = depth_stencil;
-
- if (!depth_stencil)
- return;
-
- /* Alpha does not exist in the hardware (it's not in ES3), so it's
- * emulated in the fragment shader */
-
- if (depth_stencil->alpha.enabled) {
- /* We need to trigger a new shader (maybe) */
- ctx->base.bind_fs_state(&ctx->base, ctx->shader[PIPE_SHADER_FRAGMENT]);
- }
-
- /* Bounds test not implemented */
- assert(!depth_stencil->depth.bounds_test);
+ struct panfrost_zsa_state *zsa = cso;
+ ctx->depth_stencil = zsa;
}
static void
panfrost_set_sample_mask(struct pipe_context *pipe,
unsigned sample_mask)
{
+ struct panfrost_context *ctx = pan_context(pipe);
+ ctx->sample_mask = sample_mask;
+}
+
+static void
+panfrost_set_min_samples(struct pipe_context *pipe,
+ unsigned min_samples)
+{
+ struct panfrost_context *ctx = pan_context(pipe);
+ ctx->min_samples = min_samples;
}
+
static void
panfrost_set_clip_state(struct pipe_context *pipe,
const struct pipe_clip_state *clip)
/* Allocate a bo for the query results to be stored */
if (!query->bo) {
query->bo = panfrost_bo_create(
- pan_screen(ctx->base.screen),
+ pan_device(ctx->base.screen),
sizeof(unsigned), 0);
}
break;
default:
- DBG("Skipping query %u\n", query->type);
+ /* TODO: timestamp queries, etc? */
break;
}
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
- /* Flush first */
- panfrost_flush_all_batches(ctx, true);
+ panfrost_flush_batches_accessing_bo(ctx, query->bo, false);
+ panfrost_bo_wait(query->bo, INT64_MAX, false);
/* Read back the query results */
unsigned *result = (unsigned *) query->bo->cpu;
case PIPE_QUERY_PRIMITIVES_GENERATED:
case PIPE_QUERY_PRIMITIVES_EMITTED:
- panfrost_flush_all_batches(ctx, true);
+ panfrost_flush_all_batches(ctx, 0);
vresult->u64 = query->end - query->start;
break;
default:
- DBG("Skipped query get %u\n", query->type);
+ /* TODO: more queries */
break;
}
{
struct panfrost_context *ctx = rzalloc(screen, struct panfrost_context);
struct pipe_context *gallium = (struct pipe_context *) ctx;
+ struct panfrost_device *dev = pan_device(screen);
gallium->screen = screen;
gallium->flush = panfrost_flush;
gallium->clear = panfrost_clear;
gallium->draw_vbo = panfrost_draw_vbo;
+ gallium->texture_barrier = panfrost_texture_barrier;
gallium->set_vertex_buffers = panfrost_set_vertex_buffers;
gallium->set_constant_buffer = panfrost_set_constant_buffer;
gallium->delete_depth_stencil_alpha_state = panfrost_delete_depth_stencil_state;
gallium->set_sample_mask = panfrost_set_sample_mask;
+ gallium->set_min_samples = panfrost_set_min_samples;
gallium->set_clip_state = panfrost_set_clip_state;
gallium->set_viewport_states = panfrost_set_viewport_states;
panfrost_blend_context_init(gallium);
panfrost_compute_context_init(gallium);
- /* XXX: leaks */
gallium->stream_uploader = u_upload_create_default(gallium);
gallium->const_uploader = gallium->stream_uploader;
assert(gallium->stream_uploader);
- /* Midgard supports ES modes, plus QUADS/QUAD_STRIPS/POLYGON */
- ctx->draw_modes = (1 << (PIPE_PRIM_POLYGON + 1)) - 1;
+ /* All of our GPUs support ES mode. Midgard supports additionally
+ * QUADS/QUAD_STRIPS/POLYGON. Bifrost supports just QUADS. */
+
+ ctx->draw_modes = (1 << (PIPE_PRIM_QUADS + 1)) - 1;
+
+ if (!(dev->quirks & IS_BIFROST)) {
+ ctx->draw_modes |= (1 << PIPE_PRIM_QUAD_STRIP);
+ ctx->draw_modes |= (1 << PIPE_PRIM_POLYGON);
+ }
ctx->primconvert = util_primconvert_create(gallium, ctx->draw_modes);
/* Prepare for render! */
panfrost_batch_init(ctx);
- panfrost_emit_vertex_payload(ctx);
- panfrost_invalidate_frame(ctx);
+
+ if (!(dev->quirks & IS_BIFROST)) {
+ for (unsigned c = 0; c < PIPE_MAX_COLOR_BUFS; ++c)
+ ctx->blit_blend.rt[c].shaders = _mesa_hash_table_u64_create(ctx);
+ }
+
+ /* By default mask everything on */
+ ctx->sample_mask = ~0;
+ ctx->active_queries = true;
return gallium;
}