.unknown2 = 0x1f,
.format = 0x30000000,
.clear_flags = 0x1000,
- .unknown_address_0 = ctx->scratchpad.gpu,
- .tiler_polygon_list = ctx->tiler_polygon_list.gpu,
- .tiler_polygon_list_body = ctx->tiler_polygon_list.gpu + 40960,
+ .unknown_address_0 = ctx->scratchpad.bo->gpu,
+ .tiler_polygon_list = ctx->tiler_polygon_list.bo->gpu,
+ .tiler_polygon_list_body = ctx->tiler_polygon_list.bo->gpu + 40960,
.tiler_hierarchy_mask = 0xF0,
.tiler_flags = 0x0,
- .tiler_heap_free = ctx->tiler_heap.gpu,
- .tiler_heap_end = ctx->tiler_heap.gpu + ctx->tiler_heap.size,
+ .tiler_heap_free = ctx->tiler_heap.bo->gpu,
+ .tiler_heap_end = ctx->tiler_heap.bo->gpu + ctx->tiler_heap.bo->size,
};
panfrost_set_framebuffer_resolution(&framebuffer, ctx->pipe_framebuffer.width, ctx->pipe_framebuffer.height);
.unknown2 = 0x1f,
- .scratchpad = ctx->scratchpad.gpu,
+ .scratchpad = ctx->scratchpad.bo->gpu,
};
framebuffer.tiler_hierarchy_mask =
unsigned total_size = header_size + body_size;
if (framebuffer.tiler_hierarchy_mask) {
- assert(ctx->tiler_polygon_list.size >= total_size);
+ assert(ctx->tiler_polygon_list.bo->size >= total_size);
/* Specify allocated tiler structures */
- framebuffer.tiler_polygon_list = ctx->tiler_polygon_list.gpu;
+ framebuffer.tiler_polygon_list = ctx->tiler_polygon_list.bo->gpu;
/* Allow the entire tiler heap */
- framebuffer.tiler_heap_start = ctx->tiler_heap.gpu;
+ framebuffer.tiler_heap_start = ctx->tiler_heap.bo->gpu;
framebuffer.tiler_heap_end =
- ctx->tiler_heap.gpu + ctx->tiler_heap.size;
+ ctx->tiler_heap.bo->gpu + ctx->tiler_heap.bo->size;
} else {
/* The tiler is disabled, so don't allow the tiler heap */
- framebuffer.tiler_heap_start = ctx->tiler_heap.gpu;
+ framebuffer.tiler_heap_start = ctx->tiler_heap.bo->gpu;
framebuffer.tiler_heap_end = framebuffer.tiler_heap_start;
/* Use a dummy polygon list */
- framebuffer.tiler_polygon_list = ctx->tiler_dummy.gpu;
+ framebuffer.tiler_polygon_list = ctx->tiler_dummy.bo->gpu;
/* Also, set a "tiler disabled?" flag? */
framebuffer.tiler_hierarchy_mask |= 0x1000;
panfrost_is_scanout(struct panfrost_context *ctx)
{
/* If there is no color buffer, it's an FBO */
- if (!ctx->pipe_framebuffer.nr_cbufs)
+ if (ctx->pipe_framebuffer.nr_cbufs != 1)
return false;
/* If we're too early that no framebuffer was sent, it's scanout */
unsigned stride,
unsigned count)
{
- mali_ptr varying_address = ctx->varying_mem.gpu + ctx->varying_height;
+ mali_ptr varying_address = ctx->varying_mem.bo->gpu + ctx->varying_height;
/* Fill out the descriptor */
slot->elements = varying_address | MALI_ATTR_LINEAR;
slot->stride = stride;
slot->size = stride * count;
- ctx->varying_height += ALIGN(slot->size, 64);
- assert(ctx->varying_height < ctx->varying_mem.size);
+ ctx->varying_height += ALIGN_POT(slot->size, 64);
+ assert(ctx->varying_height < ctx->varying_mem.bo->size);
return varying_address;
}
static void
panfrost_emit_varying_descriptor(
struct panfrost_context *ctx,
- unsigned invocation_count)
+ unsigned vertex_count)
{
/* Load the shaders */
unsigned idx = 0;
panfrost_emit_varyings(ctx, &varyings[idx++], num_gen_varyings * 16,
- invocation_count);
+ vertex_count);
/* fp32 vec4 gl_Position */
ctx->payload_tiler.postfix.position_varying =
panfrost_emit_varyings(ctx, &varyings[idx++],
- sizeof(float) * 4, invocation_count);
+ sizeof(float) * 4, vertex_count);
if (vs->writes_point_size || fs->reads_point_coord) {
/* fp16 vec1 gl_PointSize */
ctx->payload_tiler.primitive_size.pointer =
panfrost_emit_varyings(ctx, &varyings[idx++],
- 2, invocation_count);
+ 2, vertex_count);
}
if (fs->reads_point_coord) {
ctx->payload_tiler.postfix.varyings = varyings_p;
}
-static mali_ptr
+mali_ptr
panfrost_vertex_buffer_address(struct panfrost_context *ctx, unsigned i)
{
struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[i];
return rsrc->bo->gpu + buf->buffer_offset;
}
-/* Emits attributes and varying descriptors, which should be called every draw,
- * excepting some obscure circumstances */
-
-static void
-panfrost_emit_vertex_data(struct panfrost_context *ctx, struct panfrost_job *job)
-{
- /* Staged mali_attr, and index into them. i =/= k, depending on the
- * vertex buffer mask */
- union mali_attr attrs[PIPE_MAX_ATTRIBS];
- unsigned k = 0;
-
- unsigned invocation_count = MALI_NEGATIVE(ctx->payload_tiler.prefix.invocation_count);
-
- for (int i = 0; i < ARRAY_SIZE(ctx->vertex_buffers); ++i) {
- if (!(ctx->vb_mask & (1 << i))) continue;
-
- struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[i];
- struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer.resource);
-
- if (!rsrc) continue;
-
- /* Align to 64 bytes by masking off the lower bits. This
- * will be adjusted back when we fixup the src_offset in
- * mali_attr_meta */
-
- mali_ptr addr = panfrost_vertex_buffer_address(ctx, i) & ~63;
-
- /* Offset vertex count by draw_start to make sure we upload enough */
- attrs[k].stride = buf->stride;
- attrs[k].size = rsrc->base.width0;
-
- panfrost_job_add_bo(job, rsrc->bo);
- attrs[k].elements = addr | MALI_ATTR_LINEAR;
-
- ++k;
- }
-
- ctx->payload_vertex.postfix.attributes = panfrost_upload_transient(ctx, attrs, k * sizeof(union mali_attr));
-
- panfrost_emit_varying_descriptor(ctx, invocation_count);
-}
-
static bool
panfrost_writes_point_size(struct panfrost_context *ctx)
{
* QED.
*/
+ unsigned start = ctx->payload_vertex.draw_start;
+
for (unsigned i = 0; i < so->num_elements; ++i) {
unsigned vbi = so->pipe[i].vertex_buffer_index;
+ struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
mali_ptr addr = panfrost_vertex_buffer_address(ctx, vbi);
/* Adjust by the masked off bits of the offset */
target[i].src_offset += (addr & 63);
+
+ /* Also, somewhat obscurely per-instance data needs to be
+ * offset in response to a delayed start in an indexed draw */
+
+ if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start) {
+ target[i].src_offset -= buf->stride * start;
+ }
+
+
}
ctx->payload_vertex.postfix.attribute_meta = transfer.gpu;
unsigned last_layer = pview->u.tex.last_layer;
/* Lower-bit is set when sampling from colour AFBC */
- bool is_afbc = rsrc->bo->layout == PAN_AFBC;
+ bool is_afbc = rsrc->layout == PAN_AFBC;
bool is_zs = rsrc->base.bind & PIPE_BIND_DEPTH_STENCIL;
unsigned afbc_bit = (is_afbc && !is_zs) ? 1 : 0;
+ /* Add the BO to the job so it's retained until the job is done. */
+ struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
+ panfrost_job_add_bo(job, rsrc->bo);
+
/* Inject the addresses in, interleaving mip levels, cube faces, and
* strides in that order */
if (has_manual_stride) {
view->hw.payload[idx++] =
- rsrc->bo->slices[l].stride;
+ rsrc->slices[l].stride;
}
}
}
struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
if (with_vertex_data) {
- panfrost_emit_vertex_data(ctx, job);
+ panfrost_emit_vertex_data(job);
+
+ /* Varyings emitted for -all- geometry */
+ unsigned total_count = ctx->padded_count * ctx->instance_count;
+ panfrost_emit_varying_descriptor(ctx, total_count);
}
bool msaa = ctx->rasterizer->base.multisample;
COPY(midgard1.unknown2);
#undef COPY
+
+ /* Get blending setup */
+ struct panfrost_blend_final blend =
+ panfrost_get_blend_for_context(ctx, 0);
+
/* If there is a blend shader, work registers are shared */
- if (ctx->blend->has_blend_shader)
+ if (blend.is_shader)
ctx->fragment_shader_core.midgard1.work_count = /*MAX2(ctx->fragment_shader_core.midgard1.work_count, ctx->blend->blend_work_count)*/16;
/* Set late due to depending on render state */
/* Check if we're using the default blend descriptor (fast path) */
bool no_blending =
- !ctx->blend->has_blend_shader &&
- (ctx->blend->equation.rgb_mode == 0x122) &&
- (ctx->blend->equation.alpha_mode == 0x122) &&
- (ctx->blend->equation.color_mask == 0xf);
+ !blend.is_shader &&
+ (blend.equation.equation->rgb_mode == 0x122) &&
+ (blend.equation.equation->alpha_mode == 0x122) &&
+ (blend.equation.equation->color_mask == 0xf);
/* Even on MFBD, the shader descriptor gets blend shaders. It's
* *also* copied to the blend_meta appended (by convention),
* but this is the field actually read by the hardware. (Or
* maybe both are read...?) */
- if (ctx->blend->has_blend_shader) {
- ctx->fragment_shader_core.blend.shader = ctx->blend->blend_shader;
+ if (blend.is_shader) {
+ ctx->fragment_shader_core.blend.shader =
+ blend.shader.gpu;
} else {
ctx->fragment_shader_core.blend.shader = 0;
}
* additionally need to signal CAN_DISCARD for nontrivial blend
* modes (so we're able to read back the destination buffer) */
- if (!ctx->blend->has_blend_shader) {
- ctx->fragment_shader_core.blend.equation = ctx->blend->equation;
- ctx->fragment_shader_core.blend.constant = ctx->blend->constant;
+ if (!blend.is_shader) {
+ ctx->fragment_shader_core.blend.equation =
+ *blend.equation.equation;
+ ctx->fragment_shader_core.blend.constant =
+ blend.equation.constant;
}
if (!no_blending) {
unsigned blend_count = 0x200;
- if (ctx->blend->has_blend_shader) {
+ if (blend.is_shader) {
/* For a blend shader, the bottom nibble corresponds to
* the number of work registers used, which signals the
* -existence- of a blend shader */
- assert(ctx->blend->blend_work_count >= 2);
- blend_count |= MIN2(ctx->blend->blend_work_count, 3);
+ assert(blend.shader.work_count >= 2);
+ blend_count |= MIN2(blend.shader.work_count, 3);
} else {
/* Otherwise, the bottom bit simply specifies if
* blending (anything other than REPLACE) is enabled */
* native Midgard ops for helping here, but
* they're not well-understood yet. */
- assert(!(is_srgb && ctx->blend->has_blend_shader));
+ assert(!(is_srgb && blend.is_shader));
- if (ctx->blend->has_blend_shader) {
- rts[i].blend.shader = ctx->blend->blend_shader;
+ if (blend.is_shader) {
+ rts[i].blend.shader = blend.shader.gpu;
} else {
- rts[i].blend.equation = ctx->blend->equation;
- rts[i].blend.constant = ctx->blend->constant;
+ rts[i].blend.equation = *blend.equation.equation;
+ rts[i].blend.constant = blend.equation.constant;
}
}
mali_ptr gpu = panfrost_map_constant_buffer_gpu(ctx, buf, ubo);
unsigned bytes_per_field = 16;
- unsigned aligned = ALIGN(sz, bytes_per_field);
+ unsigned aligned = ALIGN_POT(sz, bytes_per_field);
unsigned fields = aligned / bytes_per_field;
ubos[ubo].size = MALI_POSITIVE(fields);
* should work, but in practice causes issues when we're not
* explicitly trying to scissor */
- .clip_minx = -inff,
- .clip_miny = -inff,
- .clip_maxx = inff,
- .clip_maxy = inff,
+ .clip_minx = -INFINITY,
+ .clip_miny = -INFINITY,
+ .clip_maxx = INFINITY,
+ .clip_maxy = INFINITY,
.clip_minz = 0.0,
.clip_maxz = 1.0,
{
struct panfrost_context *ctx = pan_context(pipe);
- /* Nothing to reload? */
+ /* Nothing to reload? TODO: MRT wallpapers */
if (ctx->pipe_framebuffer.cbufs[0] == NULL)
return;
struct panfrost_resource *rsrc = pan_resource(surf->texture);
unsigned level = surf->u.tex.level;
- if (!rsrc->bo->slices[level].initialized)
+ if (!rsrc->slices[level].initialized)
return;
/* Save the batch */
struct panfrost_resource *rsrc = (struct panfrost_resource *) (info->index.resource);
off_t offset = info->start * info->index_size;
+ struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx);
if (!info->has_user_indices) {
/* Only resources can be directly mapped */
+ panfrost_job_add_bo(batch, rsrc->bo);
return rsrc->bo->gpu + offset;
} else {
/* Otherwise, we need to upload to transient memory */
ctx->payload_tiler.prefix.draw_mode = g2m_draw_mode(mode);
ctx->vertex_count = info->count;
+ ctx->instance_count = info->instance_count;
/* For non-indexed draws, they're the same */
unsigned vertex_count = ctx->vertex_count;
/* For higher amounts of vertices (greater than what fits in a 16-bit
* short), the other value is needed, otherwise there will be bizarre
- * rendering artefacts. It's not clear what these values mean yet. */
+ * rendering artefacts. It's not clear what these values mean yet. This
+ * change is also needed for instancing and sometimes points (perhaps
+ * related to dynamically setting gl_PointSize) */
+
+ bool is_points = mode == PIPE_PRIM_POINTS;
+ bool many_verts = ctx->vertex_count > 0xFFFF;
+ bool instanced = ctx->instance_count > 1;
- draw_flags |= (mode == PIPE_PRIM_POINTS || ctx->vertex_count > 65535) ? 0x3000 : 0x18000;
+ draw_flags |= (is_points || many_verts || instanced) ? 0x3000 : 0x18000;
+
+ /* This doesn't make much sense */
+ if (mode == PIPE_PRIM_LINE_STRIP) {
+ draw_flags |= 0x800;
+ }
if (info->index_size) {
/* Calculate the min/max index used so we can figure out how
panfrost_pack_work_groups_fused(
&ctx->payload_vertex.prefix,
&ctx->payload_tiler.prefix,
- 1, vertex_count, 1,
+ 1, vertex_count, info->instance_count,
1, 1, 1);
ctx->payload_tiler.prefix.unknown_draw = draw_flags;
+ /* Encode the padded vertex count */
+
+ if (info->instance_count > 1) {
+ /* Triangles have non-even vertex counts so they change how
+ * padding works internally */
+
+ bool is_triangle =
+ mode == PIPE_PRIM_TRIANGLES ||
+ mode == PIPE_PRIM_TRIANGLE_STRIP ||
+ mode == PIPE_PRIM_TRIANGLE_FAN;
+
+ struct pan_shift_odd so =
+ panfrost_padded_vertex_count(vertex_count, !is_triangle);
+
+ ctx->payload_vertex.instance_shift = so.shift;
+ ctx->payload_tiler.instance_shift = so.shift;
+
+ ctx->payload_vertex.instance_odd = so.odd;
+ ctx->payload_tiler.instance_odd = so.odd;
+
+ ctx->padded_count = pan_expand_shift_odd(so);
+ } else {
+ ctx->padded_count = ctx->vertex_count;
+
+ /* Reset instancing state */
+ ctx->payload_vertex.instance_shift = 0;
+ ctx->payload_vertex.instance_odd = 0;
+ ctx->payload_tiler.instance_shift = 0;
+ ctx->payload_tiler.instance_odd = 0;
+ }
+
/* Fire off the draw itself */
panfrost_queue_draw(ctx);
}
panfrost_allocate_chunk(pan_context(pctx), 0, HEAP_DESCRIPTOR);
for (int i = 0; i < num_elements; ++i) {
- so->hw[i].index = elements[i].vertex_buffer_index;
+ so->hw[i].index = i;
enum pipe_format fmt = elements[i].src_format;
const struct util_format_description *desc = util_format_description(fmt);
unsigned usage2_layout = 0x10;
- switch (prsrc->bo->layout) {
+ switch (prsrc->layout) {
case PAN_AFBC:
usage2_layout |= 0x8 | 0x4;
break;
unsigned first_level = template->u.tex.first_level;
unsigned last_level = template->u.tex.last_level;
- if (prsrc->bo->layout == PAN_LINEAR) {
+ if (prsrc->layout == PAN_LINEAR) {
for (unsigned l = first_level; l <= last_level; ++l) {
- unsigned actual_stride = prsrc->bo->slices[l].stride;
+ unsigned actual_stride = prsrc->slices[l].stride;
unsigned width = u_minify(texture->width0, l);
unsigned comp_stride = width * bytes_per_pixel;
ctx->pipe_framebuffer.width = fb->width;
ctx->pipe_framebuffer.height = fb->height;
+ struct pipe_surface *zb = fb->zsbuf;
+ bool needs_reattach = false;
+
for (int i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
struct pipe_surface *cb = i < fb->nr_cbufs ? fb->cbufs[i] : NULL;
/* check if changing cbuf */
if (ctx->pipe_framebuffer.cbufs[i] == cb) continue;
- if (cb && (i != 0)) {
- DBG("XXX: Multiple render targets not supported before t7xx!\n");
- assert(0);
- }
-
/* assign new */
pipe_surface_reference(&ctx->pipe_framebuffer.cbufs[i], cb);
- if (!cb)
- continue;
+ needs_reattach |= (cb != NULL);
+ }
+
+ if (ctx->pipe_framebuffer.zsbuf != zb) {
+ pipe_surface_reference(&ctx->pipe_framebuffer.zsbuf, zb);
+ needs_reattach |= (zb != NULL);
+ }
+ if (needs_reattach) {
if (ctx->require_sfbd)
ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx, ~0);
else
panfrost_attach_vt_framebuffer(ctx);
}
-
- {
- struct pipe_surface *zb = fb->zsbuf;
-
- if (ctx->pipe_framebuffer.zsbuf != zb) {
- pipe_surface_reference(&ctx->pipe_framebuffer.zsbuf, zb);
-
- if (zb) {
- if (ctx->require_sfbd)
- ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx, ~0);
- else
- ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx, ~0);
-
- panfrost_attach_vt_framebuffer(ctx);
- }
- }
- }
-}
-
-static void *
-panfrost_create_blend_state(struct pipe_context *pipe,
- const struct pipe_blend_state *blend)
-{
- struct panfrost_context *ctx = pan_context(pipe);
- struct panfrost_blend_state *so = rzalloc(ctx, struct panfrost_blend_state);
- so->base = *blend;
-
- /* TODO: The following features are not yet implemented */
- assert(!blend->logicop_enable);
- assert(!blend->alpha_to_coverage);
- assert(!blend->alpha_to_one);
-
- /* Compile the blend state, first as fixed-function if we can */
-
- if (panfrost_make_fixed_blend_mode(&blend->rt[0], so, blend->rt[0].colormask, &ctx->blend_color))
- return so;
-
- /* If we can't, compile a blend shader instead */
-
- panfrost_make_blend_shader(ctx, so, &ctx->blend_color);
-
- return so;
-}
-
-static void
-panfrost_bind_blend_state(struct pipe_context *pipe,
- void *cso)
-{
- struct panfrost_context *ctx = pan_context(pipe);
- struct pipe_blend_state *blend = (struct pipe_blend_state *) cso;
- struct panfrost_blend_state *pblend = (struct panfrost_blend_state *) cso;
- ctx->blend = pblend;
-
- if (!blend)
- return;
-
- SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_NO_DITHER, !blend->dither);
-
- /* TODO: Attach color */
-
- /* Shader itself is not dirty, but the shader core is */
- ctx->dirty |= PAN_DIRTY_FS;
-}
-
-static void
-panfrost_delete_blend_state(struct pipe_context *pipe,
- void *blend)
-{
- struct panfrost_blend_state *so = (struct panfrost_blend_state *) blend;
-
- if (so->has_blend_shader) {
- DBG("Deleting blend state leak blend shaders bytecode\n");
- }
-
- ralloc_free(blend);
-}
-
-static void
-panfrost_set_blend_color(struct pipe_context *pipe,
- const struct pipe_blend_color *blend_color)
-{
- struct panfrost_context *ctx = pan_context(pipe);
-
- /* If blend_color is we're unbinding, so ctx->blend_color is now undefined -> nothing to do */
-
- if (blend_color) {
- ctx->blend_color = *blend_color;
-
- /* The blend mode depends on the blend constant color, due to the
- * fixed/programmable split. So, we're forced to regenerate the blend
- * equation */
-
- /* TODO: Attach color */
- }
}
static void *
gallium->delete_sampler_state = panfrost_generic_cso_delete;
gallium->bind_sampler_states = panfrost_bind_sampler_states;
- gallium->create_blend_state = panfrost_create_blend_state;
- gallium->bind_blend_state = panfrost_bind_blend_state;
- gallium->delete_blend_state = panfrost_delete_blend_state;
-
- gallium->set_blend_color = panfrost_set_blend_color;
-
gallium->create_depth_stencil_alpha_state = panfrost_create_depth_stencil_state;
gallium->bind_depth_stencil_alpha_state = panfrost_bind_depth_stencil_state;
gallium->delete_depth_stencil_alpha_state = panfrost_delete_depth_stencil_state;
gallium->set_stream_output_targets = panfrost_set_stream_output_targets;
panfrost_resource_context_init(gallium);
+ panfrost_blend_context_init(gallium);
panfrost_drm_init_context(ctx);