assert(pool->entry_index < PANFROST_MAX_TRANSIENT_ENTRIES);
/* Check if this entry exists */
-
+
if (pool->entry_index >= pool->entry_count) {
/* Don't overflow the pool -- allocate a new one */
struct pipe_context *gallium = (struct pipe_context *) ctx;
struct panfrost_blend_rt {
/* If has_fixed_function is set, equation is the
* fixed-function configuration for this blend state */
-
+
bool has_fixed_function;
struct mali_blend_equation equation;
static struct panfrost_blend_shader *
panfrost_get_blend_shader(
- struct panfrost_context *ctx,
- struct panfrost_blend_state *blend,
- enum pipe_format fmt,
- unsigned rt)
+ struct panfrost_context *ctx,
+ struct panfrost_blend_state *blend,
+ enum pipe_format fmt,
+ unsigned rt)
{
/* Prevent NULL collision issues.. */
assert(fmt != 0);
rt->has_fixed_function =
panfrost_make_fixed_blend_mode(
- &blend->rt[c],
- &rt->equation,
- &rt->constant_mask,
- blend->rt[c].colormask);
+ &blend->rt[c],
+ &rt->equation,
+ &rt->constant_mask,
+ blend->rt[c].colormask);
/* Regardless if that works, we also need to initialize
* the blend shaders */
/* First, we'll try a fixed function path */
if (rt->has_fixed_function && panfrost_can_fixed_blend(fmt)) {
if (panfrost_blend_constant(
- &final.equation.constant,
- ctx->blend_color.color,
- rt->constant_mask))
- {
+ &final.equation.constant,
+ ctx->blend_color.color,
+ rt->constant_mask)) {
/* There's an equation and suitable constant, so we're good to go */
final.is_shader = false;
final.equation.equation = &rt->equation;
/* We have to specialize the blend shader to use constants, so
* patch in the current constants and upload to transient
* memory */
-
+
float *patch = (float *) (shader->shader.cpu + shader->patch_index);
memcpy(patch, ctx->blend_color.color, sizeof(float) * 4);
final.shader.gpu = panfrost_upload_transient(
- ctx, shader->shader.cpu, shader->size);
+ ctx, shader->shader.cpu, shader->size);
} else {
/* No need to specialize further, use the preuploaded */
final.shader.gpu = shader->shader.gpu;
struct panfrost_blend_shader
panfrost_compile_blend_shader(
- struct panfrost_context *ctx,
- struct pipe_blend_state *cso,
- enum pipe_format format)
+ struct panfrost_context *ctx,
+ struct pipe_blend_state *cso,
+ enum pipe_format format)
{
struct panfrost_blend_shader res;
struct panfrost_blend_shader
panfrost_compile_blend_shader(
- struct panfrost_context *ctx,
- struct pipe_blend_state *cso,
- enum pipe_format format);
+ struct panfrost_context *ctx,
+ struct pipe_blend_state *cso,
+ enum pipe_format format);
#endif
/* Certain special formats are, too */
switch (format) {
- case PIPE_FORMAT_B5G6R5_UNORM:
- case PIPE_FORMAT_R10G10B10A2_UNORM:
- case PIPE_FORMAT_B10G10R10A2_UNORM:
- case PIPE_FORMAT_R10G10B10X2_UNORM:
- case PIPE_FORMAT_B10G10R10X2_UNORM:
- case PIPE_FORMAT_B4G4R4A4_UNORM:
- case PIPE_FORMAT_B4G4R4X4_UNORM:
- case PIPE_FORMAT_A4R4_UNORM:
- case PIPE_FORMAT_R4A4_UNORM:
- case PIPE_FORMAT_A4B4G4R4_UNORM:
- return true;
- default:
- return false;
+ case PIPE_FORMAT_B5G6R5_UNORM:
+ case PIPE_FORMAT_R10G10B10A2_UNORM:
+ case PIPE_FORMAT_B10G10R10A2_UNORM:
+ case PIPE_FORMAT_R10G10B10X2_UNORM:
+ case PIPE_FORMAT_B10G10R10X2_UNORM:
+ case PIPE_FORMAT_B4G4R4A4_UNORM:
+ case PIPE_FORMAT_B4G4R4X4_UNORM:
+ case PIPE_FORMAT_A4R4_UNORM:
+ case PIPE_FORMAT_R4A4_UNORM:
+ case PIPE_FORMAT_A4B4G4R4_UNORM:
+ return true;
+ default:
+ return false;
}
}
/* Make sure that the blend function is representible */
switch (func) {
- case PIPE_BLEND_ADD:
- break;
-
- /* TODO: Reenable subtraction modes when those fixed */
- case PIPE_BLEND_SUBTRACT:
- case PIPE_BLEND_REVERSE_SUBTRACT:
- default:
- return false;
+ case PIPE_BLEND_ADD:
+ break;
+
+ /* TODO: Reenable subtraction modes when those fixed */
+ case PIPE_BLEND_SUBTRACT:
+ case PIPE_BLEND_REVERSE_SUBTRACT:
+ default:
+ return false;
}
part.clip_modifier = MALI_BLEND_MOD_NORMAL;
} else if (src_factor == dst_factor) {
/* XXX: Why? */
part.dominant = func == PIPE_BLEND_ADD ?
- MALI_BLEND_DOM_DESTINATION : MALI_BLEND_DOM_SOURCE;
+ MALI_BLEND_DOM_DESTINATION : MALI_BLEND_DOM_SOURCE;
part.nondominant_mode = MALI_BLEND_NON_MIRROR;
} else if (src_factor == complement_factor(dst_factor)) {
bool
panfrost_make_fixed_blend_mode(
- const struct pipe_rt_blend_state *blend,
- struct mali_blend_equation *out,
- unsigned *constant_mask,
- unsigned colormask)
+ const struct pipe_rt_blend_state *blend,
+ struct mali_blend_equation *out,
+ unsigned *constant_mask,
+ unsigned colormask)
{
/* Gallium and Mali represent colour masks identically. XXX: Static
* assert for future proof */
unsigned alpha_mode = 0;
if (!panfrost_make_fixed_blend_part(
- blend->rgb_func, blend->rgb_src_factor, blend->rgb_dst_factor,
- &rgb_mode))
+ blend->rgb_func, blend->rgb_src_factor, blend->rgb_dst_factor,
+ &rgb_mode))
return false;
if (!panfrost_make_fixed_blend_part(
- blend->alpha_func, blend->alpha_src_factor, blend->alpha_dst_factor,
- &alpha_mode))
+ blend->alpha_func, blend->alpha_src_factor, blend->alpha_dst_factor,
+ &alpha_mode))
return false;
out->rgb_mode = rgb_mode;
bool
panfrost_make_fixed_blend_mode(
- const struct pipe_rt_blend_state *blend,
- struct mali_blend_equation *out,
- unsigned *constant_mask,
- unsigned colormask);
+ const struct pipe_rt_blend_state *blend,
+ struct mali_blend_equation *out,
+ unsigned *constant_mask,
+ unsigned colormask);
bool
panfrost_can_fixed_blend(enum pipe_format format);
static void
panfrost_blitter_save(
- struct panfrost_context *ctx,
- struct blitter_context *blitter)
+ struct panfrost_context *ctx,
+ struct blitter_context *blitter)
{
util_blitter_save_vertex_buffer_slot(blitter, ctx->vertex_buffers);
util_blitter_save_blend(blitter, ctx->blend);
util_blitter_save_depth_stencil_alpha(blitter, ctx->depth_stencil);
util_blitter_save_stencil_ref(blitter, &ctx->stencil_ref);
- util_blitter_save_so_targets(blitter, 0, NULL);
+ util_blitter_save_so_targets(blitter, 0, NULL);
- /* For later */
+ /* For later */
// util_blitter_save_sample_mask(blitter, ctx->sample_mask);
util_blitter_save_framebuffer(blitter, &ctx->pipe_framebuffer);
util_blitter_save_fragment_sampler_states(blitter,
- ctx->sampler_count[PIPE_SHADER_FRAGMENT],
- (void **)(&ctx->samplers[PIPE_SHADER_FRAGMENT]));
+ ctx->sampler_count[PIPE_SHADER_FRAGMENT],
+ (void **)(&ctx->samplers[PIPE_SHADER_FRAGMENT]));
util_blitter_save_fragment_sampler_views(blitter,
- ctx->sampler_view_count[PIPE_SHADER_FRAGMENT],
- (struct pipe_sampler_view **)&ctx->sampler_views[PIPE_SHADER_FRAGMENT]);
+ ctx->sampler_view_count[PIPE_SHADER_FRAGMENT],
+ (struct pipe_sampler_view **)&ctx->sampler_views[PIPE_SHADER_FRAGMENT]);
}
static bool
panfrost_u_blitter_blit(struct pipe_context *pipe,
- const struct pipe_blit_info *info)
+ const struct pipe_blit_info *info)
{
struct panfrost_context *ctx = pan_context(pipe);
if (!util_blitter_is_blit_supported(ctx->blitter, info)) {
fprintf(stderr, "blit unsupported %s -> %s\n",
- util_format_short_name(info->src.resource->format),
- util_format_short_name(info->dst.resource->format));
+ util_format_short_name(info->src.resource->format),
+ util_format_short_name(info->dst.resource->format));
return false;
}
unsigned layer = surf->u.tex.first_layer;
assert(surf->u.tex.last_layer == layer);
- binfo.src.resource = binfo.dst.resource = ctx->pipe_framebuffer.cbufs[0]->texture;
- binfo.src.level = binfo.dst.level = level;
- binfo.src.box.x = binfo.dst.box.x = 0;
- binfo.src.box.y = binfo.dst.box.y = 0;
- binfo.src.box.z = binfo.dst.box.z = layer;
- binfo.src.box.width = binfo.dst.box.width = ctx->pipe_framebuffer.width;
- binfo.src.box.height = binfo.dst.box.height = ctx->pipe_framebuffer.height;
- binfo.src.box.depth = binfo.dst.box.depth = 1;
+ binfo.src.resource = binfo.dst.resource = ctx->pipe_framebuffer.cbufs[0]->texture;
+ binfo.src.level = binfo.dst.level = level;
+ binfo.src.box.x = binfo.dst.box.x = 0;
+ binfo.src.box.y = binfo.dst.box.y = 0;
+ binfo.src.box.z = binfo.dst.box.z = layer;
+ binfo.src.box.width = binfo.dst.box.width = ctx->pipe_framebuffer.width;
+ binfo.src.box.height = binfo.dst.box.height = ctx->pipe_framebuffer.height;
+ binfo.src.box.depth = binfo.dst.box.depth = 1;
- binfo.src.format = binfo.dst.format = ctx->pipe_framebuffer.cbufs[0]->format;
+ binfo.src.format = binfo.dst.format = ctx->pipe_framebuffer.cbufs[0]->format;
- assert(ctx->pipe_framebuffer.nr_cbufs == 1);
- binfo.mask = PIPE_MASK_RGBA;
- binfo.filter = PIPE_TEX_FILTER_LINEAR;
- binfo.scissor_enable = FALSE;
+ assert(ctx->pipe_framebuffer.nr_cbufs == 1);
+ binfo.mask = PIPE_MASK_RGBA;
+ binfo.filter = PIPE_TEX_FILTER_LINEAR;
+ binfo.scissor_enable = FALSE;
- util_blitter_blit(ctx->blitter_wallpaper, &binfo);
+ util_blitter_blit(ctx->blitter_wallpaper, &binfo);
}
//#define DRY_RUN
static enum mali_job_type
-panfrost_job_type_for_pipe(enum pipe_shader_type type)
-{
- switch (type) {
- case PIPE_SHADER_VERTEX:
- return JOB_TYPE_VERTEX;
+panfrost_job_type_for_pipe(enum pipe_shader_type type) {
+ switch (type)
+ {
+ case PIPE_SHADER_VERTEX:
+ return JOB_TYPE_VERTEX;
- case PIPE_SHADER_FRAGMENT:
- /* Note: JOB_TYPE_FRAGMENT is different.
- * JOB_TYPE_FRAGMENT actually executes the
- * fragment shader, but JOB_TYPE_TILER is how you
- * specify it*/
- return JOB_TYPE_TILER;
+ case PIPE_SHADER_FRAGMENT:
+ /* Note: JOB_TYPE_FRAGMENT is different.
+ * JOB_TYPE_FRAGMENT actually executes the
+ * fragment shader, but JOB_TYPE_TILER is how you
+ * specify it*/
+ return JOB_TYPE_TILER;
- case PIPE_SHADER_GEOMETRY:
- return JOB_TYPE_GEOMETRY;
+ case PIPE_SHADER_GEOMETRY:
+ return JOB_TYPE_GEOMETRY;
- case PIPE_SHADER_COMPUTE:
- return JOB_TYPE_COMPUTE;
+ case PIPE_SHADER_COMPUTE:
+ return JOB_TYPE_COMPUTE;
- default:
- unreachable("Unsupported shader stage");
+ default:
+ unreachable("Unsupported shader stage");
}
}
static struct midgard_tiler_descriptor
panfrost_emit_midg_tiler(
- struct panfrost_context *ctx,
- unsigned width,
- unsigned height,
- unsigned vertex_count)
+ struct panfrost_context *ctx,
+ unsigned width,
+ unsigned height,
+ unsigned vertex_count)
{
struct midgard_tiler_descriptor t = {};
/* Compute the polygon header size and use that to offset the body */
unsigned header_size = panfrost_tiler_header_size(
- width, height, t.hierarchy_mask);
+ width, height, t.hierarchy_mask);
unsigned body_size = panfrost_tiler_body_size(
- width, height, t.hierarchy_mask);
+ width, height, t.hierarchy_mask);
/* Sanity check */
unsigned total_size = header_size + body_size;
if (t.hierarchy_mask) {
- assert(ctx->tiler_polygon_list.bo->size >= total_size);
+ assert(ctx->tiler_polygon_list.bo->size >= total_size);
/* Specify allocated tiler structures */
t.polygon_list = ctx->tiler_polygon_list.bo->gpu;
.clear_flags = 0x1000,
.unknown_address_0 = ctx->scratchpad.bo->gpu,
.tiler = panfrost_emit_midg_tiler(ctx,
- width, height, vertex_count),
+ width, height, vertex_count),
};
return framebuffer;
.scratchpad = ctx->scratchpad.bo->gpu,
.tiler = panfrost_emit_midg_tiler(ctx,
- width, height, vertex_count)
+ width, height, vertex_count)
};
return framebuffer;
panfrost_attach_vt_framebuffer(struct panfrost_context *ctx)
{
mali_ptr framebuffer = ctx->require_sfbd ?
- panfrost_attach_vt_sfbd(ctx) :
- panfrost_attach_vt_mfbd(ctx);
+ panfrost_attach_vt_sfbd(ctx) :
+ panfrost_attach_vt_mfbd(ctx);
ctx->payload_vertex.postfix.framebuffer = framebuffer;
ctx->payload_tiler.postfix.framebuffer = framebuffer;
panfrost_invalidate_frame(struct panfrost_context *ctx)
{
unsigned transient_count = ctx->transient_pools[ctx->cmdstream_i].entry_index*ctx->transient_pools[0].entry_size + ctx->transient_pools[ctx->cmdstream_i].entry_offset;
- DBG("Uploaded transient %d bytes\n", transient_count);
+ DBG("Uploaded transient %d bytes\n", transient_count);
/* Rotate cmdstream */
if ((++ctx->cmdstream_i) == (sizeof(ctx->transient_pools) / sizeof(ctx->transient_pools[0])))
panfrost_emit_vertex_payload(struct panfrost_context *ctx)
{
struct midgard_payload_vertex_tiler payload = {
- .gl_enables = 0x4 | (ctx->is_t6xx ? 0 : 0x2),
+ .gl_enables = 0x4 | (ctx->is_t6xx ? 0 : 0x2),
};
memcpy(&ctx->payload_vertex, &payload, sizeof(payload));
.unknown2_4 = MALI_NO_MSAA | 0x4e0,
};
- if (ctx->is_t6xx) {
+ if (ctx->is_t6xx) {
shader.unknown2_4 |= 0x10;
- }
+ }
struct pipe_stencil_state default_stencil = {
.enabled = 0,
static mali_ptr
panfrost_emit_varyings(
- struct panfrost_context *ctx,
- union mali_attr *slot,
- unsigned stride,
- unsigned count)
+ struct panfrost_context *ctx,
+ union mali_attr *slot,
+ unsigned stride,
+ unsigned count)
{
mali_ptr varying_address = ctx->varying_mem.bo->gpu + ctx->varying_height;
static void
panfrost_emit_varying_descriptor(
- struct panfrost_context *ctx,
- unsigned vertex_count)
+ struct panfrost_context *ctx,
+ unsigned vertex_count)
{
/* Load the shaders */
size_t fs_size = sizeof(struct mali_attr_meta) * fs->tripipe->varying_count;
struct panfrost_transfer trans = panfrost_allocate_transient(ctx,
- vs_size + fs_size);
+ vs_size + fs_size);
/*
* Assign ->src_offset now that we know about all the general purpose
/* fp32 vec4 gl_Position */
ctx->payload_tiler.postfix.position_varying =
panfrost_emit_varyings(ctx, &varyings[idx++],
- sizeof(float) * 4, vertex_count);
+ sizeof(float) * 4, vertex_count);
if (vs->writes_point_size || fs->reads_point_coord) {
/* fp16 vec1 gl_PointSize */
ctx->payload_tiler.primitive_size.pointer =
panfrost_emit_varyings(ctx, &varyings[idx++],
- 2, vertex_count);
+ 2, vertex_count);
}
if (fs->reads_point_coord) {
* instead let:
*
* base' = base & ~63 = base - (base & 63)
- *
+ *
* To compensate when using base' (see emit_vertex_data), we have
* to adjust src_offset by the masked off piece:
*
static mali_ptr
panfrost_upload_tex(
- struct panfrost_context *ctx,
- struct panfrost_sampler_view *view)
+ struct panfrost_context *ctx,
+ struct panfrost_sampler_view *view)
{
if (!view)
return (mali_ptr) NULL;
bool is_zs = rsrc->base.bind & PIPE_BIND_DEPTH_STENCIL;
unsigned afbc_bit = (is_afbc && !is_zs) ? 1 : 0;
- /* Add the BO to the job so it's retained until the job is done. */
+ /* Add the BO to the job so it's retained until the job is done. */
struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
panfrost_job_add_bo(job, rsrc->bo);
}
return panfrost_upload_transient(ctx, &view->hw,
- sizeof(struct mali_texture_descriptor));
+ sizeof(struct mali_texture_descriptor));
}
static void
};
static void panfrost_upload_viewport_scale_sysval(struct panfrost_context *ctx,
- struct sysval_uniform *uniform)
+ struct sysval_uniform *uniform)
{
const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
}
static void panfrost_upload_viewport_offset_sysval(struct panfrost_context *ctx,
- struct sysval_uniform *uniform)
+ struct sysval_uniform *uniform)
{
const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
static mali_ptr
panfrost_map_constant_buffer_gpu(
- struct panfrost_context *ctx,
- struct panfrost_constant_buffer *buf,
- unsigned index)
+ struct panfrost_context *ctx,
+ struct panfrost_constant_buffer *buf,
+ unsigned index)
{
struct pipe_constant_buffer *cb = &buf->cb[index];
struct panfrost_resource *rsrc = pan_resource(cb->buffer);
static mali_ptr
panfrost_patch_shader_state(
- struct panfrost_context *ctx,
- struct panfrost_shader_state *ss,
- enum pipe_shader_type stage)
+ struct panfrost_context *ctx,
+ struct panfrost_shader_state *ss,
+ enum pipe_shader_type stage)
{
ss->tripipe->texture_count = ctx->sampler_view_count[stage];
ss->tripipe->sampler_count = ctx->sampler_count[stage];
SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_NO_MSAA, !msaa);
}
- panfrost_job_set_requirements(ctx, job);
+ panfrost_job_set_requirements(ctx, job);
if (ctx->occlusion_query) {
ctx->payload_tiler.gl_enables |= MALI_OCCLUSION_QUERY | MALI_OCCLUSION_PRECISE;
ctx->fragment_shader_core.midgard1.flags |= 0x400;
}
- /* Check if we're using the default blend descriptor (fast path) */
+ /* Check if we're using the default blend descriptor (fast path) */
- bool no_blending =
- !blend.is_shader &&
- (blend.equation.equation->rgb_mode == 0x122) &&
- (blend.equation.equation->alpha_mode == 0x122) &&
- (blend.equation.equation->color_mask == 0xf);
+ bool no_blending =
+ !blend.is_shader &&
+ (blend.equation.equation->rgb_mode == 0x122) &&
+ (blend.equation.equation->alpha_mode == 0x122) &&
+ (blend.equation.equation->color_mask == 0xf);
/* Even on MFBD, the shader descriptor gets blend shaders. It's
* *also* copied to the blend_meta appended (by convention),
maxx = ss->maxx;
miny = ss->miny;
maxy = ss->maxy;
- }
+ }
/* Hardware needs the min/max to be strictly ordered, so flip if we
* need to. The viewport transformation in the vertex shader will
ctx->payload_tiler.postfix.viewport =
panfrost_upload_transient(ctx,
- &view,
- sizeof(struct mali_viewport));
+ &view,
+ sizeof(struct mali_viewport));
ctx->dirty = 0;
}
/* If rasterizer discard is enable, only submit the vertex */
bool rasterizer_discard = ctx->rasterizer
- && ctx->rasterizer->base.rasterizer_discard;
+ && ctx->rasterizer->base.rasterizer_discard;
struct panfrost_transfer vertex = panfrost_vertex_tiler_job(ctx, false);
struct panfrost_transfer tiler;
static void
panfrost_submit_frame(struct panfrost_context *ctx, bool flush_immediate,
- struct pipe_fence_handle **fence,
+ struct pipe_fence_handle **fence,
struct panfrost_job *job)
{
struct pipe_context *gallium = (struct pipe_context *) ctx;
struct panfrost_screen *screen = pan_screen(gallium->screen);
#ifndef DRY_RUN
-
+
panfrost_job_submit(ctx, job);
/* If visual, we can stall a frame */
static void
panfrost_draw_wallpaper(struct pipe_context *pipe)
{
- struct panfrost_context *ctx = pan_context(pipe);
+ struct panfrost_context *ctx = pan_context(pipe);
- /* Nothing to reload? TODO: MRT wallpapers */
- if (ctx->pipe_framebuffer.cbufs[0] == NULL)
- return;
+ /* Nothing to reload? TODO: MRT wallpapers */
+ if (ctx->pipe_framebuffer.cbufs[0] == NULL)
+ return;
/* Check if the buffer has any content on it worth preserving */
/* Nothing to do! */
if (!job->last_job.gpu && !job->clear) return;
- if (!job->clear)
- panfrost_draw_wallpaper(&ctx->base);
+ if (!job->clear)
+ panfrost_draw_wallpaper(&ctx->base);
/* Whether to stall the pipeline for immediately correct results. Since
* pipelined rendering is quite broken right now (to be fixed by the
unsigned primitive_index = (1 << (info->index_size * 8)) - 1;
if (info->primitive_restart && info->index_size
- && info->restart_index != primitive_index) {
+ && info->restart_index != primitive_index) {
util_draw_vbo_without_prim_restart(pipe, info);
return;
}
* vertex_count, 1) */
panfrost_pack_work_groups_fused(
- &ctx->payload_vertex.prefix,
- &ctx->payload_tiler.prefix,
- 1, vertex_count, info->instance_count,
- 1, 1, 1);
+ &ctx->payload_vertex.prefix,
+ &ctx->payload_tiler.prefix,
+ 1, vertex_count, info->instance_count,
+ 1, 1, 1);
ctx->payload_tiler.prefix.unknown_draw = draw_flags;
/* Point sprites are emulated */
struct panfrost_shader_state *variant =
- ctx->fs ? &ctx->fs->variants[ctx->fs->active_variant] : NULL;
+ ctx->fs ? &ctx->fs->variants[ctx->fs->active_variant] : NULL;
if (ctx->rasterizer->base.sprite_coord_enable || (variant && variant->point_sprite_mask))
ctx->base.bind_fs_state(&ctx->base, ctx->fs);
static bool
panfrost_variant_matches(
- struct panfrost_context *ctx,
- struct panfrost_shader_state *variant,
- enum pipe_shader_type type)
+ struct panfrost_context *ctx,
+ struct panfrost_shader_state *variant,
+ enum pipe_shader_type type)
{
struct pipe_rasterizer_state *rasterizer = &ctx->rasterizer->base;
struct pipe_alpha_state *alpha = &ctx->depth_stencil->alpha;
}
if (is_fragment && rasterizer && (rasterizer->sprite_coord_enable |
- variant->point_sprite_mask)) {
+ variant->point_sprite_mask)) {
/* Ensure the same varyings are turned to point sprites */
if (rasterizer->sprite_coord_enable != variant->point_sprite_mask)
return false;
assert(variants->variant_count < MAX_SHADER_VARIANTS);
struct panfrost_shader_state *v =
- &variants->variants[variant];
+ &variants->variants[variant];
v->base = hwcso;
if (!shader_state->compiled) {
panfrost_shader_compile(ctx, shader_state->tripipe, NULL,
- panfrost_job_type_for_pipe(type), shader_state);
+ panfrost_job_type_for_pipe(type), shader_state);
shader_state->compiled = true;
}
}
static enum mali_texture_type
-panfrost_translate_texture_type(enum pipe_texture_target t)
-{
- switch (t) {
- case PIPE_BUFFER:
- case PIPE_TEXTURE_1D:
- case PIPE_TEXTURE_1D_ARRAY:
- return MALI_TEX_1D;
-
- case PIPE_TEXTURE_2D:
- case PIPE_TEXTURE_2D_ARRAY:
- case PIPE_TEXTURE_RECT:
- return MALI_TEX_2D;
-
- case PIPE_TEXTURE_3D:
- return MALI_TEX_3D;
-
- case PIPE_TEXTURE_CUBE:
- case PIPE_TEXTURE_CUBE_ARRAY:
- return MALI_TEX_CUBE;
+panfrost_translate_texture_type(enum pipe_texture_target t) {
+ switch (t)
+ {
+ case PIPE_BUFFER:
+ case PIPE_TEXTURE_1D:
+ case PIPE_TEXTURE_1D_ARRAY:
+ return MALI_TEX_1D;
+
+ case PIPE_TEXTURE_2D:
+ case PIPE_TEXTURE_2D_ARRAY:
+ case PIPE_TEXTURE_RECT:
+ return MALI_TEX_2D;
+
+ case PIPE_TEXTURE_3D:
+ return MALI_TEX_3D;
+
+ case PIPE_TEXTURE_CUBE:
+ case PIPE_TEXTURE_CUBE_ARRAY:
+ return MALI_TEX_CUBE;
- default:
- unreachable("Unknown target");
+ default:
+ unreachable("Unknown target");
}
}
unsigned usage2_layout = 0x10;
switch (prsrc->layout) {
- case PAN_AFBC:
- usage2_layout |= 0x8 | 0x4;
- break;
- case PAN_TILED:
- usage2_layout |= 0x1;
- break;
- case PAN_LINEAR:
- usage2_layout |= is_depth ? 0x1 : 0x2;
- break;
- default:
- assert(0);
- break;
+ case PAN_AFBC:
+ usage2_layout |= 0x8 | 0x4;
+ break;
+ case PAN_TILED:
+ usage2_layout |= 0x1;
+ break;
+ case PAN_LINEAR:
+ usage2_layout |= is_depth ? 0x1 : 0x2;
+ break;
+ default:
+ assert(0);
+ break;
}
/* Check if we need to set a custom stride by computing the "expected"
}
static struct pipe_query *
-panfrost_create_query(struct pipe_context *pipe,
- unsigned type,
- unsigned index)
+panfrost_create_query(struct pipe_context *pipe,
+ unsigned type,
+ unsigned index)
{
struct panfrost_query *q = rzalloc(pipe, struct panfrost_query);
struct panfrost_query *query = (struct panfrost_query *) q;
switch (query->type) {
- case PIPE_QUERY_OCCLUSION_COUNTER:
- case PIPE_QUERY_OCCLUSION_PREDICATE:
- case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
- {
- /* Allocate a word for the query results to be stored */
- query->transfer = panfrost_allocate_chunk(ctx, sizeof(unsigned), HEAP_DESCRIPTOR);
+ case PIPE_QUERY_OCCLUSION_COUNTER:
+ case PIPE_QUERY_OCCLUSION_PREDICATE:
+ case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: {
+ /* Allocate a word for the query results to be stored */
+ query->transfer = panfrost_allocate_chunk(ctx, sizeof(unsigned), HEAP_DESCRIPTOR);
- ctx->occlusion_query = query;
+ ctx->occlusion_query = query;
- break;
- }
+ break;
+ }
- default:
- DBG("Skipping query %d\n", query->type);
- break;
+ default:
+ DBG("Skipping query %d\n", query->type);
+ break;
}
return true;
}
static boolean
-panfrost_get_query_result(struct pipe_context *pipe,
+panfrost_get_query_result(struct pipe_context *pipe,
struct pipe_query *q,
boolean wait,
union pipe_query_result *vresult)
panfrost_flush(pipe, NULL, PIPE_FLUSH_END_OF_FRAME);
switch (query->type) {
- case PIPE_QUERY_OCCLUSION_COUNTER:
- case PIPE_QUERY_OCCLUSION_PREDICATE:
- case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: {
- /* Read back the query results */
- unsigned *result = (unsigned *) query->transfer.cpu;
- unsigned passed = *result;
-
- if (query->type == PIPE_QUERY_OCCLUSION_COUNTER) {
- vresult->u64 = passed;
- } else {
- vresult->b = !!passed;
- }
-
- break;
+ case PIPE_QUERY_OCCLUSION_COUNTER:
+ case PIPE_QUERY_OCCLUSION_PREDICATE:
+ case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: {
+ /* Read back the query results */
+ unsigned *result = (unsigned *) query->transfer.cpu;
+ unsigned passed = *result;
+
+ if (query->type == PIPE_QUERY_OCCLUSION_COUNTER) {
+ vresult->u64 = passed;
+ } else {
+ vresult->b = !!passed;
}
- default:
- DBG("Skipped query get %d\n", query->type);
- break;
+
+ break;
+ }
+ default:
+ DBG("Skipped query get %d\n", query->type);
+ break;
}
return true;
static struct pipe_stream_output_target *
panfrost_create_stream_output_target(struct pipe_context *pctx,
- struct pipe_resource *prsc,
- unsigned buffer_offset,
- unsigned buffer_size)
+ struct pipe_resource *prsc,
+ unsigned buffer_offset,
+ unsigned buffer_size)
{
struct pipe_stream_output_target *target;
static void
panfrost_stream_output_target_destroy(struct pipe_context *pctx,
- struct pipe_stream_output_target *target)
+ struct pipe_stream_output_target *target)
{
pipe_resource_reference(&target->buffer, NULL);
ralloc_free(target);
static void
panfrost_set_stream_output_targets(struct pipe_context *pctx,
- unsigned num_targets,
- struct pipe_stream_output_target **targets,
- const unsigned *offsets)
+ unsigned num_targets,
+ struct pipe_stream_output_target **targets,
+ const unsigned *offsets)
{
/* STUB */
}
* there's no real advantage to doing so */
bool require_sfbd;
- uint32_t out_sync;
+ uint32_t out_sync;
};
/* Corresponds to the CSO */
void
panfrost_pack_work_groups_compute(
- struct mali_vertex_tiler_prefix *out,
- unsigned num_x,
- unsigned num_y,
- unsigned num_z,
- unsigned size_x,
- unsigned size_y,
- unsigned size_z);
+ struct mali_vertex_tiler_prefix *out,
+ unsigned num_x,
+ unsigned num_y,
+ unsigned num_z,
+ unsigned size_x,
+ unsigned size_y,
+ unsigned size_z);
void
panfrost_pack_work_groups_fused(
- struct mali_vertex_tiler_prefix *vertex,
- struct mali_vertex_tiler_prefix *tiler,
- unsigned num_x,
- unsigned num_y,
- unsigned num_z,
- unsigned size_x,
- unsigned size_y,
- unsigned size_z);
+ struct mali_vertex_tiler_prefix *vertex,
+ struct mali_vertex_tiler_prefix *tiler,
+ unsigned num_x,
+ unsigned num_y,
+ unsigned num_z,
+ unsigned size_x,
+ unsigned size_y,
+ unsigned size_z);
/* Instancing */
struct pan_shift_odd
panfrost_padded_vertex_count(
- unsigned vertex_count,
- bool primitive_pot);
+ unsigned vertex_count,
+ bool primitive_pot);
unsigned
void
panfrost_drm_allocate_slab(struct panfrost_screen *screen,
- struct panfrost_memory *mem,
- size_t pages,
- bool same_va,
- int extra_flags,
- int commit_count,
- int extent)
+ struct panfrost_memory *mem,
+ size_t pages,
+ bool same_va,
+ int extra_flags,
+ int commit_count,
+ int extent)
{
// TODO cache allocations
// TODO properly handle errors
struct panfrost_bo *
panfrost_drm_import_bo(struct panfrost_screen *screen, int fd)
{
- struct panfrost_bo *bo = rzalloc(screen, struct panfrost_bo);
+ struct panfrost_bo *bo = rzalloc(screen, struct panfrost_bo);
struct drm_panfrost_get_bo_offset get_bo_offset = {0,};
int ret;
unsigned gem_handle;
- ret = drmPrimeFDToHandle(screen->fd, fd, &gem_handle);
- assert(!ret);
+ ret = drmPrimeFDToHandle(screen->fd, fd, &gem_handle);
+ assert(!ret);
- get_bo_offset.handle = gem_handle;
+ get_bo_offset.handle = gem_handle;
ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_GET_BO_OFFSET, &get_bo_offset);
assert(!ret);
- bo->gem_handle = gem_handle;
+ bo->gem_handle = gem_handle;
bo->gpu = (mali_ptr) get_bo_offset.offset;
bo->size = lseek(fd, 0, SEEK_END);
assert(bo->size > 0);
submit.out_sync = ctx->out_sync;
- submit.jc = job_desc;
- submit.requirements = reqs;
+ submit.jc = job_desc;
+ submit.requirements = reqs;
- bo_handles = calloc(job->bos->entries, sizeof(*bo_handles));
- assert(bo_handles);
+ bo_handles = calloc(job->bos->entries, sizeof(*bo_handles));
+ assert(bo_handles);
- set_foreach(job->bos, entry) {
- struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
- assert(bo->gem_handle > 0);
- bo_handles[submit.bo_handle_count++] = bo->gem_handle;
- }
+ set_foreach(job->bos, entry) {
+ struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
+ assert(bo->gem_handle > 0);
+ bo_handles[submit.bo_handle_count++] = bo->gem_handle;
+ }
- submit.bo_handles = (u64) (uintptr_t) bo_handles;
- ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_SUBMIT, &submit);
- free(bo_handles);
- if (ret) {
- fprintf(stderr, "Error submitting: %m\n");
- return errno;
- }
+ submit.bo_handles = (u64) (uintptr_t) bo_handles;
+ ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_SUBMIT, &submit);
+ free(bo_handles);
+ if (ret) {
+ fprintf(stderr, "Error submitting: %m\n");
+ return errno;
+ }
/* Trace the job if we're doing that */
if (pan_debug & PAN_DBG_TRACE) {
pandecode_replay_jc(submit.jc, FALSE);
}
- return 0;
+ return 0;
}
int
panfrost_drm_submit_vs_fs_job(struct panfrost_context *ctx, bool has_draws, bool is_scanout)
{
- int ret;
+ int ret;
struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
void
panfrost_drm_force_flush_fragment(struct panfrost_context *ctx,
- struct pipe_fence_handle **fence)
+ struct pipe_fence_handle **fence)
{
struct pipe_context *gallium = (struct pipe_context *) ctx;
struct panfrost_screen *screen = pan_screen(gallium->screen);
if (!screen->last_fragment_flushed) {
- drmSyncobjWait(screen->fd, &ctx->out_sync, 1, INT64_MAX, 0, NULL);
+ drmSyncobjWait(screen->fd, &ctx->out_sync, 1, INT64_MAX, 0, NULL);
screen->last_fragment_flushed = true;
/* The job finished up, so we're safe to clean it up now */
panfrost_free_job(ctx, screen->last_job);
- }
+ }
if (fence) {
struct panfrost_fence *f = panfrost_fence_create(ctx);
struct drm_panfrost_get_param get_param = {0,};
int ret;
- get_param.param = DRM_PANFROST_PARAM_GPU_PROD_ID;
+ get_param.param = DRM_PANFROST_PARAM_GPU_PROD_ID;
ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_GET_PARAM, &get_param);
assert(!ret);
- return get_param.value;
+ return get_param.value;
}
int
void
panfrost_drm_fence_reference(struct pipe_screen *screen,
- struct pipe_fence_handle **ptr,
- struct pipe_fence_handle *fence)
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence)
{
struct panfrost_fence **p = (struct panfrost_fence **)ptr;
struct panfrost_fence *f = (struct panfrost_fence *)fence;
boolean
panfrost_drm_fence_finish(struct pipe_screen *pscreen,
- struct pipe_context *ctx,
- struct pipe_fence_handle *fence,
- uint64_t timeout)
+ struct pipe_context *ctx,
+ struct pipe_fence_handle *fence,
+ uint64_t timeout)
{
struct panfrost_screen *screen = pan_screen(pscreen);
struct panfrost_fence *f = (struct panfrost_fence *)fence;
panfrost_translate_channel_width(unsigned size)
{
switch (size) {
- case 4:
- return MALI_CHANNEL_4;
- case 8:
- return MALI_CHANNEL_8;
- case 16:
- return MALI_CHANNEL_16;
- case 32:
- return MALI_CHANNEL_32;
- default:
- unreachable("Invalid width");
+ case 4:
+ return MALI_CHANNEL_4;
+ case 8:
+ return MALI_CHANNEL_8;
+ case 16:
+ return MALI_CHANNEL_16;
+ case 32:
+ return MALI_CHANNEL_32;
+ default:
+ unreachable("Invalid width");
}
}
static unsigned
-panfrost_translate_channel_type(unsigned type, unsigned size, bool norm) {
+panfrost_translate_channel_type(unsigned type, unsigned size, bool norm)
+{
switch (type) {
- case UTIL_FORMAT_TYPE_UNSIGNED:
- return norm ? MALI_FORMAT_UNORM : MALI_FORMAT_UINT;
-
- case UTIL_FORMAT_TYPE_SIGNED:
- return norm ? MALI_FORMAT_SNORM : MALI_FORMAT_SINT;
-
- case UTIL_FORMAT_TYPE_FLOAT:
- if (size == 16) {
- /* With FLOAT, fp16 */
- return MALI_FORMAT_SINT;
- } else if (size == 32) {
- /* With FLOAT< fp32 */
- return MALI_FORMAT_UNORM;
- } else {
- assert(0);
- return 0;
- }
-
- default:
- unreachable("Invalid type");
+ case UTIL_FORMAT_TYPE_UNSIGNED:
+ return norm ? MALI_FORMAT_UNORM : MALI_FORMAT_UINT;
+
+ case UTIL_FORMAT_TYPE_SIGNED:
+ return norm ? MALI_FORMAT_SNORM : MALI_FORMAT_SINT;
+
+ case UTIL_FORMAT_TYPE_FLOAT:
+ if (size == 16) {
+ /* With FLOAT, fp16 */
+ return MALI_FORMAT_SINT;
+ } else if (size == 32) {
+ /* With FLOAT< fp32 */
+ return MALI_FORMAT_UNORM;
+ } else {
+ assert(0);
+ return 0;
+ }
+
+ default:
+ unreachable("Invalid type");
}
}
* description */
enum mali_format
-panfrost_find_format(const struct util_format_description *desc)
-{
+panfrost_find_format(const struct util_format_description *desc) {
/* Find first non-VOID channel */
struct util_format_channel_description chan = desc->channel[0];
- for (unsigned c = 0; c < 4; ++c) {
+ for (unsigned c = 0; c < 4; ++c)
+ {
if (desc->channel[c].type == UTIL_FORMAT_TYPE_VOID)
continue;
}
/* Check for special formats */
- switch (desc->format) {
- case PIPE_FORMAT_YV12:
- case PIPE_FORMAT_YV16:
- case PIPE_FORMAT_IYUV:
- case PIPE_FORMAT_NV21:
- fprintf(stderr, "YUV format type %s (%d) is not yet supported, but it's probably close to NV12!\n", desc->name, desc->format);
- assert(0);
- break;
-
- case PIPE_FORMAT_NV12:
- return MALI_NV12;
-
- case PIPE_FORMAT_R10G10B10X2_UNORM:
- case PIPE_FORMAT_B10G10R10X2_UNORM:
- case PIPE_FORMAT_R10G10B10A2_UNORM:
- case PIPE_FORMAT_B10G10R10A2_UNORM:
- return MALI_RGB10_A2_UNORM;
-
- case PIPE_FORMAT_R10G10B10X2_SNORM:
- case PIPE_FORMAT_R10G10B10A2_SNORM:
- case PIPE_FORMAT_B10G10R10A2_SNORM:
- return MALI_RGB10_A2_SNORM;
-
- case PIPE_FORMAT_R10G10B10A2_UINT:
- case PIPE_FORMAT_B10G10R10A2_UINT:
- return MALI_RGB10_A2UI;
-
- /* TODO: ZS isn't really special case */
- case PIPE_FORMAT_Z32_UNORM:
- return MALI_Z32_UNORM;
-
- case PIPE_FORMAT_B5G6R5_UNORM:
- return MALI_RGB565;
-
- case PIPE_FORMAT_B5G5R5A1_UNORM:
- return MALI_RGB5_A1_UNORM;
-
- case PIPE_FORMAT_A1B5G5R5_UNORM:
- case PIPE_FORMAT_X1B5G5R5_UNORM:
- /* Not supported - this is backwards from OpenGL! */
- assert(0);
- break;
-
- case PIPE_FORMAT_R32_FIXED:
- return MALI_R32_FIXED;
- case PIPE_FORMAT_R32G32_FIXED:
- return MALI_RG32_FIXED;
- case PIPE_FORMAT_R32G32B32_FIXED:
- return MALI_RGB32_FIXED;
- case PIPE_FORMAT_R32G32B32A32_FIXED:
- return MALI_RGBA32_FIXED;
-
- case PIPE_FORMAT_R11G11B10_FLOAT:
- return MALI_R11F_G11F_B10F;
- case PIPE_FORMAT_R9G9B9E5_FLOAT:
- return MALI_R9F_G9F_B9F_E5F;
-
- default:
- /* Fallthrough to default */
- break;
+ switch (desc->format)
+ {
+ case PIPE_FORMAT_YV12:
+ case PIPE_FORMAT_YV16:
+ case PIPE_FORMAT_IYUV:
+ case PIPE_FORMAT_NV21:
+ fprintf(stderr, "YUV format type %s (%d) is not yet supported, but it's probably close to NV12!\n", desc->name, desc->format);
+ assert(0);
+ break;
+
+ case PIPE_FORMAT_NV12:
+ return MALI_NV12;
+
+ case PIPE_FORMAT_R10G10B10X2_UNORM:
+ case PIPE_FORMAT_B10G10R10X2_UNORM:
+ case PIPE_FORMAT_R10G10B10A2_UNORM:
+ case PIPE_FORMAT_B10G10R10A2_UNORM:
+ return MALI_RGB10_A2_UNORM;
+
+ case PIPE_FORMAT_R10G10B10X2_SNORM:
+ case PIPE_FORMAT_R10G10B10A2_SNORM:
+ case PIPE_FORMAT_B10G10R10A2_SNORM:
+ return MALI_RGB10_A2_SNORM;
+
+ case PIPE_FORMAT_R10G10B10A2_UINT:
+ case PIPE_FORMAT_B10G10R10A2_UINT:
+ return MALI_RGB10_A2UI;
+
+ /* TODO: ZS isn't really special case */
+ case PIPE_FORMAT_Z32_UNORM:
+ return MALI_Z32_UNORM;
+
+ case PIPE_FORMAT_B5G6R5_UNORM:
+ return MALI_RGB565;
+
+ case PIPE_FORMAT_B5G5R5A1_UNORM:
+ return MALI_RGB5_A1_UNORM;
+
+ case PIPE_FORMAT_A1B5G5R5_UNORM:
+ case PIPE_FORMAT_X1B5G5R5_UNORM:
+ /* Not supported - this is backwards from OpenGL! */
+ assert(0);
+ break;
+
+ case PIPE_FORMAT_R32_FIXED:
+ return MALI_R32_FIXED;
+ case PIPE_FORMAT_R32G32_FIXED:
+ return MALI_RG32_FIXED;
+ case PIPE_FORMAT_R32G32B32_FIXED:
+ return MALI_RGB32_FIXED;
+ case PIPE_FORMAT_R32G32B32A32_FIXED:
+ return MALI_RGBA32_FIXED;
+
+ case PIPE_FORMAT_R11G11B10_FLOAT:
+ return MALI_R11F_G11F_B10F;
+ case PIPE_FORMAT_R9G9B9E5_FLOAT:
+ return MALI_R9F_G9F_B9F_E5F;
+
+ default:
+ /* Fallthrough to default */
+ break;
}
/* Formats must match in channel count */
assert(desc->nr_channels >= 1 && desc->nr_channels <= 4);
unsigned format = MALI_NR_CHANNELS(desc->nr_channels);
- switch (chan.type) {
- case UTIL_FORMAT_TYPE_UNSIGNED:
- case UTIL_FORMAT_TYPE_SIGNED:
- case UTIL_FORMAT_TYPE_FIXED:
- /* Channel width */
- format |= panfrost_translate_channel_width(chan.size);
+ switch (chan.type)
+ {
+ case UTIL_FORMAT_TYPE_UNSIGNED:
+ case UTIL_FORMAT_TYPE_SIGNED:
+ case UTIL_FORMAT_TYPE_FIXED:
+ /* Channel width */
+ format |= panfrost_translate_channel_width(chan.size);
- /* Channel type */
- format |= panfrost_translate_channel_type(chan.type, chan.size, chan.normalized);
- break;
+ /* Channel type */
+ format |= panfrost_translate_channel_type(chan.type, chan.size, chan.normalized);
+ break;
- case UTIL_FORMAT_TYPE_FLOAT:
- /* Float formats use a special width and encode width
- * with type mixed */
+ case UTIL_FORMAT_TYPE_FLOAT:
+ /* Float formats use a special width and encode width
+ * with type mixed */
- format |= MALI_CHANNEL_FLOAT;
- format |= panfrost_translate_channel_type(chan.type, chan.size, chan.normalized);
- break;
+ format |= MALI_CHANNEL_FLOAT;
+ format |= panfrost_translate_channel_type(chan.type, chan.size, chan.normalized);
+ break;
- default:
- printf("%s\n", util_format_name(desc->format));
- unreachable("Invalid format type");
+ default:
+ printf("%s\n", util_format_name(desc->format));
+ unreachable("Invalid format type");
}
return (enum mali_format) format;
panfrost_fragment_job(struct panfrost_context *ctx, bool has_draws)
{
mali_ptr framebuffer = ctx->require_sfbd ?
- panfrost_sfbd_fragment(ctx, has_draws) :
- panfrost_mfbd_fragment(ctx, has_draws);
+ panfrost_sfbd_fragment(ctx, has_draws) :
+ panfrost_mfbd_fragment(ctx, has_draws);
/* Mark the affected buffers as initialized, since we're writing to it */
struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
panfrost_small_padded_vertex_count(unsigned idx)
{
return pan_factored(
- small_lut[idx].pot,
- small_lut[idx].odd);
+ small_lut[idx].pot,
+ small_lut[idx].odd);
}
static struct pan_shift_odd
struct pan_shift_odd
panfrost_padded_vertex_count(
- unsigned vertex_count,
- bool pot)
+ unsigned vertex_count,
+ bool pot)
{
assert(vertex_count > 0);
if (vertex_count < 20) {
/* Add an off-by-one if it won't align naturally (quirk of the hardware) */
//if (!pot)
- // vertex_count++;
+ // vertex_count++;
return panfrost_small_padded_vertex_count(vertex_count);
} else
static unsigned
panfrost_vertex_instanced(
- struct panfrost_job *batch,
- struct panfrost_resource *rsrc,
- unsigned divisor,
- union mali_attr *attrs,
- mali_ptr addr,
- unsigned vertex_count,
- unsigned instance_count)
+ struct panfrost_job *batch,
+ struct panfrost_resource *rsrc,
+ unsigned divisor,
+ union mali_attr *attrs,
+ mali_ptr addr,
+ unsigned vertex_count,
+ unsigned instance_count)
{
/* First, grab the padded vertex count */
} else if (util_is_power_of_two_or_zero(hw_divisor)) {
/* If there is a divisor but the hardware divisor works out to
* a power of two (not terribly exceptional), we can use an
- * easy path (just shifting) */
+ * easy path (just shifting) */
attrs->elements |= MALI_ATTR_POT_DIVIDE;
attrs->shift = __builtin_ctz(hw_divisor);
attrs[k++].elements |= MALI_ATTR_LINEAR;
} else {
k += panfrost_vertex_instanced(
- batch, rsrc, divisor, &attrs[k], addr, vertex_count, instanced_count);
+ batch, rsrc, divisor, &attrs[k], addr, vertex_count, instanced_count);
}
}
void
panfrost_pack_work_groups_compute(
- struct mali_vertex_tiler_prefix *out,
- unsigned num_x,
- unsigned num_y,
- unsigned num_z,
- unsigned size_x,
- unsigned size_y,
- unsigned size_z)
+ struct mali_vertex_tiler_prefix *out,
+ unsigned num_x,
+ unsigned num_y,
+ unsigned num_z,
+ unsigned size_x,
+ unsigned size_y,
+ unsigned size_z)
{
/* First of all, all 6 values are off-by-one (strictly positive).
* Account for that, first by ensuring all values are strictly positive
/* Packs vertex/tiler descriptors simultaneously */
void
panfrost_pack_work_groups_fused(
- struct mali_vertex_tiler_prefix *vertex,
- struct mali_vertex_tiler_prefix *tiler,
- unsigned num_x,
- unsigned num_y,
- unsigned num_z,
- unsigned size_x,
- unsigned size_y,
- unsigned size_z)
+ struct mali_vertex_tiler_prefix *vertex,
+ struct mali_vertex_tiler_prefix *tiler,
+ unsigned num_x,
+ unsigned num_y,
+ unsigned num_z,
+ unsigned size_x,
+ unsigned size_y,
+ unsigned size_z)
{
panfrost_pack_work_groups_compute(vertex, num_x, num_y, num_z, size_x, size_y, size_z);
util_dynarray_init(&job->headers, job);
util_dynarray_init(&job->gpu_headers, job);
-
+
return job;
}
struct panfrost_job *
panfrost_get_job(struct panfrost_context *ctx,
- struct pipe_surface **cbufs, struct pipe_surface *zsbuf)
+ struct pipe_surface **cbufs, struct pipe_surface *zsbuf)
{
/* Lookup the job first */
},
.zsbuf = zsbuf
};
-
+
struct hash_entry *entry = _mesa_hash_table_search(ctx->jobs, &key);
if (entry)
void
panfrost_flush_jobs_writing_resource(struct panfrost_context *panfrost,
- struct pipe_resource *prsc)
+ struct pipe_resource *prsc)
{
#if 0
struct hash_entry *entry = _mesa_hash_table_search(panfrost->write_jobs,
- prsc);
+ prsc);
if (entry) {
struct panfrost_job *job = entry->data;
panfrost_job_submit(panfrost, job);
void
panfrost_job_set_requirements(struct panfrost_context *ctx,
- struct panfrost_job *job)
+ struct panfrost_job *job)
{
if (ctx->rasterizer && ctx->rasterizer->base.multisample)
job->requirements |= PAN_REQ_MSAA;
if (util_format_is_rgba8_variant(desc)) {
pan_pack_color_32(packed,
- (float_to_ubyte(clear_alpha) << 24) |
- (float_to_ubyte(color->f[2]) << 16) |
- (float_to_ubyte(color->f[1]) << 8) |
- (float_to_ubyte(color->f[0]) << 0));
+ (float_to_ubyte(clear_alpha) << 24) |
+ (float_to_ubyte(color->f[2]) << 16) |
+ (float_to_ubyte(color->f[1]) << 8) |
+ (float_to_ubyte(color->f[0]) << 0));
} else if (format == PIPE_FORMAT_B5G6R5_UNORM) {
/* First, we convert the components to R5, G6, B5 separately */
unsigned r5 = CLAMP(color->f[0], 0.0, 1.0) * 31.0;
void
panfrost_job_clear(struct panfrost_context *ctx,
- struct panfrost_job *job,
- unsigned buffers,
- const union pipe_color_union *color,
- double depth, unsigned stencil)
+ struct panfrost_job *job,
+ unsigned buffers,
+ const union pipe_color_union *color,
+ double depth, unsigned stencil)
{
if (buffers & PIPE_CLEAR_COLOR) {
* would emit a quad instead and we wouldn't go down this code path) */
panfrost_job_union_scissor(job, 0, 0,
- ctx->pipe_framebuffer.width,
- ctx->pipe_framebuffer.height);
+ ctx->pipe_framebuffer.width,
+ ctx->pipe_framebuffer.height);
}
void
panfrost_flush_jobs_reading_resource(struct panfrost_context *panfrost,
- struct pipe_resource *prsc)
+ struct pipe_resource *prsc)
{
struct panfrost_resource *rsc = pan_resource(prsc);
void
panfrost_job_union_scissor(struct panfrost_job *job,
- unsigned minx, unsigned miny,
- unsigned maxx, unsigned maxy)
+ unsigned minx, unsigned miny,
+ unsigned maxx, unsigned maxy)
{
job->minx = MIN2(job->minx, minx);
job->miny = MIN2(job->miny, miny);
panfrost_job_compare);
ctx->write_jobs = _mesa_hash_table_create(ctx,
- _mesa_hash_pointer,
- _mesa_key_pointer_equal);
+ _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
}
struct panfrost_job *
panfrost_get_job(struct panfrost_context *ctx,
- struct pipe_surface **cbufs, struct pipe_surface *zsbuf);
+ struct pipe_surface **cbufs, struct pipe_surface *zsbuf);
struct panfrost_job *
panfrost_get_job_for_fbo(struct panfrost_context *ctx);
void
panfrost_flush_jobs_writing_resource(struct panfrost_context *panfrost,
- struct pipe_resource *prsc);
+ struct pipe_resource *prsc);
void
panfrost_flush_jobs_reading_resource(struct panfrost_context *panfrost,
- struct pipe_resource *prsc);
+ struct pipe_resource *prsc);
void
panfrost_job_submit(struct panfrost_context *ctx, struct panfrost_job *job);
void
panfrost_job_set_requirements(struct panfrost_context *ctx,
- struct panfrost_job *job);
+ struct panfrost_job *job);
void
panfrost_job_clear(struct panfrost_context *ctx,
- struct panfrost_job *job,
- unsigned buffers,
- const union pipe_color_union *color,
- double depth, unsigned stencil);
+ struct panfrost_job *job,
+ unsigned buffers,
+ const union pipe_color_union *color,
+ double depth, unsigned stencil);
void
panfrost_job_union_scissor(struct panfrost_job *job,
- unsigned minx, unsigned miny,
- unsigned maxx, unsigned maxy);
+ unsigned minx, unsigned miny,
+ unsigned maxx, unsigned maxy);
/* Scoreboarding */
void
panfrost_scoreboard_queue_compute_job(
- struct panfrost_job *batch,
- struct panfrost_transfer job);
+ struct panfrost_job *batch,
+ struct panfrost_transfer job);
void
panfrost_scoreboard_queue_vertex_job(
- struct panfrost_job *batch,
- struct panfrost_transfer vertex,
- bool requires_tiling);
+ struct panfrost_job *batch,
+ struct panfrost_transfer vertex,
+ bool requires_tiling);
void
panfrost_scoreboard_queue_tiler_job(
- struct panfrost_job *batch,
- struct panfrost_transfer tiler);
+ struct panfrost_job *batch,
+ struct panfrost_transfer tiler);
void
panfrost_scoreboard_queue_fused_job(
- struct panfrost_job *batch,
- struct panfrost_transfer vertex,
- struct panfrost_transfer tiler);
+ struct panfrost_job *batch,
+ struct panfrost_transfer vertex,
+ struct panfrost_transfer tiler);
void
panfrost_scoreboard_queue_fused_job_prepend(
- struct panfrost_job *batch,
- struct panfrost_transfer vertex,
- struct panfrost_transfer tiler);
+ struct panfrost_job *batch,
+ struct panfrost_transfer vertex,
+ struct panfrost_transfer tiler);
void
panfrost_scoreboard_link_batch(struct panfrost_job *batch);
return fmt;
/* Set flags for alternative formats */
-
+
switch (linearized) {
- case PIPE_FORMAT_B5G6R5_UNORM:
- fmt.unk1 = 0x14000000;
- fmt.nr_channels = MALI_POSITIVE(2);
- fmt.unk3 |= 0x1;
- break;
-
- case PIPE_FORMAT_A4B4G4R4_UNORM:
- case PIPE_FORMAT_B4G4R4A4_UNORM:
- fmt.unk1 = 0x10000000;
- fmt.unk3 = 0x5;
- fmt.nr_channels = MALI_POSITIVE(1);
- break;
-
- case PIPE_FORMAT_R10G10B10A2_UNORM:
- case PIPE_FORMAT_B10G10R10A2_UNORM:
- case PIPE_FORMAT_R10G10B10X2_UNORM:
- case PIPE_FORMAT_B10G10R10X2_UNORM:
- fmt.unk1 = 0x08000000;
- fmt.unk3 = 0x6;
- fmt.nr_channels = MALI_POSITIVE(1);
- break;
-
- /* Generic 8-bit */
- case PIPE_FORMAT_R8_UINT:
- case PIPE_FORMAT_R8_SINT:
- fmt.unk1 = 0x80000000;
- fmt.unk3 = 0x0;
- fmt.nr_channels = MALI_POSITIVE(1);
- break;
-
- /* Generic 32-bit */
- case PIPE_FORMAT_R11G11B10_FLOAT:
- case PIPE_FORMAT_R8G8B8A8_UINT:
- case PIPE_FORMAT_R8G8B8A8_SINT:
- case PIPE_FORMAT_R16G16_FLOAT:
- case PIPE_FORMAT_R16G16_UINT:
- case PIPE_FORMAT_R16G16_SINT:
- case PIPE_FORMAT_R32_FLOAT:
- case PIPE_FORMAT_R32_UINT:
- case PIPE_FORMAT_R32_SINT:
- case PIPE_FORMAT_R10G10B10A2_UINT:
- fmt.unk1 = 0x88000000;
- fmt.unk3 = 0x0;
- fmt.nr_channels = MALI_POSITIVE(4);
- break;
-
- /* Generic 16-bit */
- case PIPE_FORMAT_R8G8_UINT:
- case PIPE_FORMAT_R8G8_SINT:
- case PIPE_FORMAT_R16_FLOAT:
- case PIPE_FORMAT_R16_UINT:
- case PIPE_FORMAT_R16_SINT:
- case PIPE_FORMAT_B5G5R5A1_UNORM:
- fmt.unk1 = 0x84000000;
- fmt.unk3 = 0x0;
- fmt.nr_channels = MALI_POSITIVE(2);
- break;
-
- /* Generic 64-bit */
- case PIPE_FORMAT_R32G32_FLOAT:
- case PIPE_FORMAT_R32G32_SINT:
- case PIPE_FORMAT_R32G32_UINT:
- case PIPE_FORMAT_R16G16B16A16_FLOAT:
- case PIPE_FORMAT_R16G16B16A16_SINT:
- case PIPE_FORMAT_R16G16B16A16_UINT:
- fmt.unk1 = 0x8c000000;
- fmt.unk3 = 0x1;
- fmt.nr_channels = MALI_POSITIVE(2);
- break;
-
- /* Generic 128-bit */
- case PIPE_FORMAT_R32G32B32A32_FLOAT:
- case PIPE_FORMAT_R32G32B32A32_SINT:
- case PIPE_FORMAT_R32G32B32A32_UINT:
- fmt.unk1 = 0x90000000;
- fmt.unk3 = 0x1;
- fmt.nr_channels = MALI_POSITIVE(4);
- break;
-
- default:
- unreachable("Invalid format rendering");
+ case PIPE_FORMAT_B5G6R5_UNORM:
+ fmt.unk1 = 0x14000000;
+ fmt.nr_channels = MALI_POSITIVE(2);
+ fmt.unk3 |= 0x1;
+ break;
+
+ case PIPE_FORMAT_A4B4G4R4_UNORM:
+ case PIPE_FORMAT_B4G4R4A4_UNORM:
+ fmt.unk1 = 0x10000000;
+ fmt.unk3 = 0x5;
+ fmt.nr_channels = MALI_POSITIVE(1);
+ break;
+
+ case PIPE_FORMAT_R10G10B10A2_UNORM:
+ case PIPE_FORMAT_B10G10R10A2_UNORM:
+ case PIPE_FORMAT_R10G10B10X2_UNORM:
+ case PIPE_FORMAT_B10G10R10X2_UNORM:
+ fmt.unk1 = 0x08000000;
+ fmt.unk3 = 0x6;
+ fmt.nr_channels = MALI_POSITIVE(1);
+ break;
+
+ /* Generic 8-bit */
+ case PIPE_FORMAT_R8_UINT:
+ case PIPE_FORMAT_R8_SINT:
+ fmt.unk1 = 0x80000000;
+ fmt.unk3 = 0x0;
+ fmt.nr_channels = MALI_POSITIVE(1);
+ break;
+
+ /* Generic 32-bit */
+ case PIPE_FORMAT_R11G11B10_FLOAT:
+ case PIPE_FORMAT_R8G8B8A8_UINT:
+ case PIPE_FORMAT_R8G8B8A8_SINT:
+ case PIPE_FORMAT_R16G16_FLOAT:
+ case PIPE_FORMAT_R16G16_UINT:
+ case PIPE_FORMAT_R16G16_SINT:
+ case PIPE_FORMAT_R32_FLOAT:
+ case PIPE_FORMAT_R32_UINT:
+ case PIPE_FORMAT_R32_SINT:
+ case PIPE_FORMAT_R10G10B10A2_UINT:
+ fmt.unk1 = 0x88000000;
+ fmt.unk3 = 0x0;
+ fmt.nr_channels = MALI_POSITIVE(4);
+ break;
+
+ /* Generic 16-bit */
+ case PIPE_FORMAT_R8G8_UINT:
+ case PIPE_FORMAT_R8G8_SINT:
+ case PIPE_FORMAT_R16_FLOAT:
+ case PIPE_FORMAT_R16_UINT:
+ case PIPE_FORMAT_R16_SINT:
+ case PIPE_FORMAT_B5G5R5A1_UNORM:
+ fmt.unk1 = 0x84000000;
+ fmt.unk3 = 0x0;
+ fmt.nr_channels = MALI_POSITIVE(2);
+ break;
+
+ /* Generic 64-bit */
+ case PIPE_FORMAT_R32G32_FLOAT:
+ case PIPE_FORMAT_R32G32_SINT:
+ case PIPE_FORMAT_R32G32_UINT:
+ case PIPE_FORMAT_R16G16B16A16_FLOAT:
+ case PIPE_FORMAT_R16G16B16A16_SINT:
+ case PIPE_FORMAT_R16G16B16A16_UINT:
+ fmt.unk1 = 0x8c000000;
+ fmt.unk3 = 0x1;
+ fmt.nr_channels = MALI_POSITIVE(2);
+ break;
+
+ /* Generic 128-bit */
+ case PIPE_FORMAT_R32G32B32A32_FLOAT:
+ case PIPE_FORMAT_R32G32B32A32_SINT:
+ case PIPE_FORMAT_R32G32B32A32_UINT:
+ fmt.unk1 = 0x90000000;
+ fmt.unk3 = 0x1;
+ fmt.nr_channels = MALI_POSITIVE(4);
+ break;
+
+ default:
+ unreachable("Invalid format rendering");
}
return fmt;
static void
panfrost_mfbd_clear(
- struct panfrost_job *job,
- struct bifrost_framebuffer *fb,
- struct bifrost_fb_extra *fbx,
- struct bifrost_render_target *rts,
- unsigned rt_count)
+ struct panfrost_job *job,
+ struct bifrost_framebuffer *fb,
+ struct bifrost_fb_extra *fbx,
+ struct bifrost_render_target *rts,
+ unsigned rt_count)
{
for (unsigned i = 0; i < rt_count; ++i) {
if (!(job->clear & (PIPE_CLEAR_COLOR0 << i)))
static void
panfrost_mfbd_set_cbuf(
- struct bifrost_render_target *rt,
- struct pipe_surface *surf)
+ struct bifrost_render_target *rt,
+ struct pipe_surface *surf)
{
struct panfrost_resource *rsrc = pan_resource(surf->texture);
static void
panfrost_mfbd_set_zsbuf(
- struct bifrost_framebuffer *fb,
- struct bifrost_fb_extra *fbx,
- struct pipe_surface *surf)
+ struct bifrost_framebuffer *fb,
+ struct bifrost_fb_extra *fbx,
+ struct pipe_surface *surf)
{
struct panfrost_resource *rsrc = pan_resource(surf->texture);
static mali_ptr
panfrost_mfbd_upload(
- struct panfrost_context *ctx,
- struct bifrost_framebuffer *fb,
- struct bifrost_fb_extra *fbx,
- struct bifrost_render_target *rts,
- unsigned cbufs)
+ struct panfrost_context *ctx,
+ struct bifrost_framebuffer *fb,
+ struct bifrost_fb_extra *fbx,
+ struct bifrost_render_target *rts,
+ unsigned cbufs)
{
off_t offset = 0;
#define DEFINE_CASE(name) case MALI_## name: return "MALI_" #name
char *pandecode_format_name(enum mali_format format)
{
- static char unk_format_str[5];
-
- switch (format) {
- DEFINE_CASE(RGB565);
- DEFINE_CASE(RGB5_A1_UNORM);
- DEFINE_CASE(RGB10_A2_UNORM);
- DEFINE_CASE(RGB10_A2_SNORM);
- DEFINE_CASE(RGB10_A2UI);
- DEFINE_CASE(RGB10_A2I);
- DEFINE_CASE(NV12);
- DEFINE_CASE(Z32_UNORM);
- DEFINE_CASE(R32_FIXED);
- DEFINE_CASE(RG32_FIXED);
- DEFINE_CASE(RGB32_FIXED);
- DEFINE_CASE(RGBA32_FIXED);
- DEFINE_CASE(R11F_G11F_B10F);
- DEFINE_CASE(R9F_G9F_B9F_E5F);
- DEFINE_CASE(VARYING_POS);
- DEFINE_CASE(VARYING_DISCARD);
-
- DEFINE_CASE(R8_SNORM);
- DEFINE_CASE(R16_SNORM);
- DEFINE_CASE(R32_SNORM);
- DEFINE_CASE(RG8_SNORM);
- DEFINE_CASE(RG16_SNORM);
- DEFINE_CASE(RG32_SNORM);
- DEFINE_CASE(RGB8_SNORM);
- DEFINE_CASE(RGB16_SNORM);
- DEFINE_CASE(RGB32_SNORM);
- DEFINE_CASE(RGBA8_SNORM);
- DEFINE_CASE(RGBA16_SNORM);
- DEFINE_CASE(RGBA32_SNORM);
-
- DEFINE_CASE(R8UI);
- DEFINE_CASE(R16UI);
- DEFINE_CASE(R32UI);
- DEFINE_CASE(RG8UI);
- DEFINE_CASE(RG16UI);
- DEFINE_CASE(RG32UI);
- DEFINE_CASE(RGB8UI);
- DEFINE_CASE(RGB16UI);
- DEFINE_CASE(RGB32UI);
- DEFINE_CASE(RGBA8UI);
- DEFINE_CASE(RGBA16UI);
- DEFINE_CASE(RGBA32UI);
-
- DEFINE_CASE(R8_UNORM);
- DEFINE_CASE(R16_UNORM);
- DEFINE_CASE(R32_UNORM);
- DEFINE_CASE(R32F);
- DEFINE_CASE(RG8_UNORM);
- DEFINE_CASE(RG16_UNORM);
- DEFINE_CASE(RG32_UNORM);
- DEFINE_CASE(RG32F);
- DEFINE_CASE(RGB8_UNORM);
- DEFINE_CASE(RGB16_UNORM);
- DEFINE_CASE(RGB32_UNORM);
- DEFINE_CASE(RGB32F);
- DEFINE_CASE(RGBA4_UNORM);
- DEFINE_CASE(RGBA8_UNORM);
- DEFINE_CASE(RGBA16_UNORM);
- DEFINE_CASE(RGBA32_UNORM);
- DEFINE_CASE(RGBA32F);
-
- DEFINE_CASE(R8I);
- DEFINE_CASE(R16I);
- DEFINE_CASE(R32I);
- DEFINE_CASE(RG8I);
- DEFINE_CASE(R16F);
- DEFINE_CASE(RG16I);
- DEFINE_CASE(RG32I);
- DEFINE_CASE(RG16F);
- DEFINE_CASE(RGB8I);
- DEFINE_CASE(RGB16I);
- DEFINE_CASE(RGB32I);
- DEFINE_CASE(RGB16F);
- DEFINE_CASE(RGBA8I);
- DEFINE_CASE(RGBA16I);
- DEFINE_CASE(RGBA32I);
- DEFINE_CASE(RGBA16F);
-
- DEFINE_CASE(RGBA4);
- DEFINE_CASE(RGBA8_2);
- DEFINE_CASE(RGB10_A2_2);
- default:
- snprintf(unk_format_str, sizeof(unk_format_str), "0x%02x", format);
- return unk_format_str;
- }
+ static char unk_format_str[5];
+
+ switch (format) {
+ DEFINE_CASE(RGB565);
+ DEFINE_CASE(RGB5_A1_UNORM);
+ DEFINE_CASE(RGB10_A2_UNORM);
+ DEFINE_CASE(RGB10_A2_SNORM);
+ DEFINE_CASE(RGB10_A2UI);
+ DEFINE_CASE(RGB10_A2I);
+ DEFINE_CASE(NV12);
+ DEFINE_CASE(Z32_UNORM);
+ DEFINE_CASE(R32_FIXED);
+ DEFINE_CASE(RG32_FIXED);
+ DEFINE_CASE(RGB32_FIXED);
+ DEFINE_CASE(RGBA32_FIXED);
+ DEFINE_CASE(R11F_G11F_B10F);
+ DEFINE_CASE(R9F_G9F_B9F_E5F);
+ DEFINE_CASE(VARYING_POS);
+ DEFINE_CASE(VARYING_DISCARD);
+
+ DEFINE_CASE(R8_SNORM);
+ DEFINE_CASE(R16_SNORM);
+ DEFINE_CASE(R32_SNORM);
+ DEFINE_CASE(RG8_SNORM);
+ DEFINE_CASE(RG16_SNORM);
+ DEFINE_CASE(RG32_SNORM);
+ DEFINE_CASE(RGB8_SNORM);
+ DEFINE_CASE(RGB16_SNORM);
+ DEFINE_CASE(RGB32_SNORM);
+ DEFINE_CASE(RGBA8_SNORM);
+ DEFINE_CASE(RGBA16_SNORM);
+ DEFINE_CASE(RGBA32_SNORM);
+
+ DEFINE_CASE(R8UI);
+ DEFINE_CASE(R16UI);
+ DEFINE_CASE(R32UI);
+ DEFINE_CASE(RG8UI);
+ DEFINE_CASE(RG16UI);
+ DEFINE_CASE(RG32UI);
+ DEFINE_CASE(RGB8UI);
+ DEFINE_CASE(RGB16UI);
+ DEFINE_CASE(RGB32UI);
+ DEFINE_CASE(RGBA8UI);
+ DEFINE_CASE(RGBA16UI);
+ DEFINE_CASE(RGBA32UI);
+
+ DEFINE_CASE(R8_UNORM);
+ DEFINE_CASE(R16_UNORM);
+ DEFINE_CASE(R32_UNORM);
+ DEFINE_CASE(R32F);
+ DEFINE_CASE(RG8_UNORM);
+ DEFINE_CASE(RG16_UNORM);
+ DEFINE_CASE(RG32_UNORM);
+ DEFINE_CASE(RG32F);
+ DEFINE_CASE(RGB8_UNORM);
+ DEFINE_CASE(RGB16_UNORM);
+ DEFINE_CASE(RGB32_UNORM);
+ DEFINE_CASE(RGB32F);
+ DEFINE_CASE(RGBA4_UNORM);
+ DEFINE_CASE(RGBA8_UNORM);
+ DEFINE_CASE(RGBA16_UNORM);
+ DEFINE_CASE(RGBA32_UNORM);
+ DEFINE_CASE(RGBA32F);
+
+ DEFINE_CASE(R8I);
+ DEFINE_CASE(R16I);
+ DEFINE_CASE(R32I);
+ DEFINE_CASE(RG8I);
+ DEFINE_CASE(R16F);
+ DEFINE_CASE(RG16I);
+ DEFINE_CASE(RG32I);
+ DEFINE_CASE(RG16F);
+ DEFINE_CASE(RGB8I);
+ DEFINE_CASE(RGB16I);
+ DEFINE_CASE(RGB32I);
+ DEFINE_CASE(RGB16F);
+ DEFINE_CASE(RGBA8I);
+ DEFINE_CASE(RGBA16I);
+ DEFINE_CASE(RGBA32I);
+ DEFINE_CASE(RGBA16F);
+
+ DEFINE_CASE(RGBA4);
+ DEFINE_CASE(RGBA8_2);
+ DEFINE_CASE(RGB10_A2_2);
+ default:
+ snprintf(unk_format_str, sizeof(unk_format_str), "0x%02x", format);
+ return unk_format_str;
+ }
}
#undef DEFINE_CASE
pipe_reference_init(&prsc->reference, 1);
prsc->screen = pscreen;
- rsc->bo = panfrost_drm_import_bo(screen, whandle->handle);
- rsc->slices[0].stride = whandle->stride;
- rsc->slices[0].initialized = true;
-
- if (screen->ro) {
- rsc->scanout =
- renderonly_create_gpu_import_for_resource(prsc, screen->ro, NULL);
- /* failure is expected in some cases.. */
- }
+ rsc->bo = panfrost_drm_import_bo(screen, whandle->handle);
+ rsc->slices[0].stride = whandle->stride;
+ rsc->slices[0].initialized = true;
+
+ if (screen->ro) {
+ rsc->scanout =
+ renderonly_create_gpu_import_for_resource(prsc, screen->ro, NULL);
+ /* failure is expected in some cases.. */
+ }
return prsc;
}
handle->modifier = DRM_FORMAT_MOD_INVALID;
- if (handle->type == WINSYS_HANDLE_TYPE_SHARED) {
- return FALSE;
- } else if (handle->type == WINSYS_HANDLE_TYPE_KMS) {
- if (renderonly_get_handle(scanout, handle))
- return TRUE;
+ if (handle->type == WINSYS_HANDLE_TYPE_SHARED) {
+ return FALSE;
+ } else if (handle->type == WINSYS_HANDLE_TYPE_KMS) {
+ if (renderonly_get_handle(scanout, handle))
+ return TRUE;
- handle->handle = rsrc->bo->gem_handle;
- handle->stride = rsrc->slices[0].stride;
- return TRUE;
- } else if (handle->type == WINSYS_HANDLE_TYPE_FD) {
+ handle->handle = rsrc->bo->gem_handle;
+ handle->stride = rsrc->slices[0].stride;
+ return TRUE;
+ } else if (handle->type == WINSYS_HANDLE_TYPE_FD) {
if (scanout) {
struct drm_prime_handle args = {
.handle = scanout->handle,
handle->handle = fd;
handle->stride = rsrc->slices[0].stride;
return TRUE;
- }
- }
+ }
+ }
- return FALSE;
+ return FALSE;
}
static void
struct pipe_resource *res;
scanout = renderonly_scanout_for_resource(&scanout_templat,
- pscreen->ro, &handle);
+ pscreen->ro, &handle);
if (!scanout)
return NULL;
static unsigned
panfrost_compute_checksum_sizes(
- struct panfrost_slice *slice,
- unsigned width,
- unsigned height)
+ struct panfrost_slice *slice,
+ unsigned width,
+ unsigned height)
{
unsigned aligned_width = ALIGN_POT(width, CHECKSUM_TILE_WIDTH);
unsigned aligned_height = ALIGN_POT(height, CHECKSUM_TILE_HEIGHT);
* makes code a lot simpler */
bool renderable = res->bind &
- (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL);
+ (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL);
bool afbc = pres->layout == PAN_AFBC;
bool tiled = pres->layout == PAN_TILED;
bool should_align = renderable || tiled;
slice->checksum_offset = offset;
unsigned size = panfrost_compute_checksum_sizes(
- slice, width, height);
+ slice, width, height);
offset += size;
}
static void
panfrost_resource_create_bo(struct panfrost_screen *screen, struct panfrost_resource *pres)
{
- struct pipe_resource *res = &pres->base;
+ struct pipe_resource *res = &pres->base;
/* Based on the usage, figure out what storing will be used. There are
* various tradeoffs:
*
* Tiled: Not compressed, but cache-optimized. Expensive to write into
* (due to software tiling), but cheap to sample from. Ideal for most
- * textures.
+ * textures.
*
* AFBC: Compressed and renderable (so always desirable for non-scanout
* rendertargets). Cheap to sample from. The format is black box, so we
{
/* Make sure we're familiar */
switch (template->target) {
- case PIPE_BUFFER:
- case PIPE_TEXTURE_1D:
- case PIPE_TEXTURE_2D:
- case PIPE_TEXTURE_3D:
- case PIPE_TEXTURE_CUBE:
- case PIPE_TEXTURE_RECT:
- case PIPE_TEXTURE_2D_ARRAY:
- break;
- default:
- DBG("Unknown texture target %d\n", template->target);
- assert(0);
+ case PIPE_BUFFER:
+ case PIPE_TEXTURE_1D:
+ case PIPE_TEXTURE_2D:
+ case PIPE_TEXTURE_3D:
+ case PIPE_TEXTURE_CUBE:
+ case PIPE_TEXTURE_RECT:
+ case PIPE_TEXTURE_2D_ARRAY:
+ break;
+ default:
+ DBG("Unknown texture target %d\n", template->target);
+ assert(0);
}
if (template->bind &
struct panfrost_screen *pscreen = pan_screen(screen);
struct panfrost_resource *rsrc = (struct panfrost_resource *) pt;
- if (rsrc->scanout)
- renderonly_scanout_destroy(rsrc->scanout, pscreen->ro);
+ if (rsrc->scanout)
+ renderonly_scanout_destroy(rsrc->scanout, pscreen->ro);
- if (rsrc->bo)
+ if (rsrc->bo)
panfrost_bo_unreference(screen, rsrc->bo);
util_range_destroy(&rsrc->valid_buffer_range);
- ralloc_free(rsrc);
+ ralloc_free(rsrc);
}
static void *
/* TODO: reallocate */
//printf("debug: Missed reallocate\n");
} else if ((usage & PIPE_TRANSFER_WRITE)
- && resource->target == PIPE_BUFFER
- && !util_ranges_intersect(&rsrc->valid_buffer_range, box->x, box->x + box->width)) {
+ && resource->target == PIPE_BUFFER
+ && !util_ranges_intersect(&rsrc->valid_buffer_range, box->x, box->x + box->width)) {
/* No flush for writes to uninitialized */
} else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
if (usage & PIPE_TRANSFER_WRITE) {
DBG("Unimplemented: reads from AFBC");
} else if (rsrc->layout == PAN_TILED) {
panfrost_load_tiled_image(
- transfer->map,
- bo->cpu + rsrc->slices[level].offset,
- box,
- transfer->base.stride,
- rsrc->slices[level].stride,
- util_format_get_blocksize(resource->format));
+ transfer->map,
+ bo->cpu + rsrc->slices[level].offset,
+ box,
+ transfer->base.stride,
+ rsrc->slices[level].stride,
+ util_format_get_blocksize(resource->format));
}
}
rsrc->slices[level].initialized = true;
return bo->cpu
- + rsrc->slices[level].offset
- + transfer->base.box.z * rsrc->cubemap_stride
- + transfer->base.box.y * rsrc->slices[level].stride
- + transfer->base.box.x * bytes_per_pixel;
+ + rsrc->slices[level].offset
+ + transfer->base.box.z * rsrc->cubemap_stride
+ + transfer->base.box.y * rsrc->slices[level].stride
+ + transfer->base.box.x * bytes_per_pixel;
}
}
assert(transfer->box.depth == 1);
panfrost_store_tiled_image(
- bo->cpu + prsrc->slices[level].offset,
- trans->map,
- &transfer->box,
- prsrc->slices[level].stride,
- transfer->stride,
- util_format_get_blocksize(prsrc->base.format));
+ bo->cpu + prsrc->slices[level].offset,
+ trans->map,
+ &transfer->box,
+ prsrc->slices[level].stride,
+ transfer->stride,
+ util_format_get_blocksize(prsrc->base.format));
}
}
}
- util_range_add(&prsrc->valid_buffer_range,
- transfer->box.x,
- transfer->box.x + transfer->box.width);
+ util_range_add(&prsrc->valid_buffer_range,
+ transfer->box.x,
+ transfer->box.x + transfer->box.width);
/* Derefence the resource */
pipe_resource_reference(&transfer->resource, NULL);
static void
panfrost_transfer_flush_region(struct pipe_context *pctx,
- struct pipe_transfer *transfer,
- const struct pipe_box *box)
+ struct pipe_transfer *transfer,
+ const struct pipe_box *box)
{
- struct panfrost_resource *rsc = pan_resource(transfer->resource);
+ struct panfrost_resource *rsc = pan_resource(transfer->resource);
- if (transfer->resource->target == PIPE_BUFFER) {
- util_range_add(&rsc->valid_buffer_range,
- transfer->box.x + box->x,
- transfer->box.x + box->x + box->width);
+ if (transfer->resource->target == PIPE_BUFFER) {
+ util_range_add(&rsc->valid_buffer_range,
+ transfer->box.x + box->x,
+ transfer->box.x + box->x + box->width);
}
}
}
static enum pipe_format
-panfrost_resource_get_internal_format(struct pipe_resource *prsrc)
-{
+panfrost_resource_get_internal_format(struct pipe_resource *prsrc) {
return prsrc->format;
}
static boolean
panfrost_generate_mipmap(
- struct pipe_context *pctx,
- struct pipe_resource *prsrc,
- enum pipe_format format,
- unsigned base_level,
- unsigned last_level,
- unsigned first_layer,
- unsigned last_layer)
+ struct pipe_context *pctx,
+ struct pipe_resource *prsrc,
+ enum pipe_format format,
+ unsigned base_level,
+ unsigned last_level,
+ unsigned first_layer,
+ unsigned last_layer)
{
struct panfrost_context *ctx = pan_context(pctx);
struct panfrost_resource *rsrc = pan_resource(prsrc);
/* We've flushed the original buffer if needed, now trigger a blit */
bool blit_res = util_gen_mipmap(
- pctx, prsrc, format,
- base_level, last_level,
- first_layer, last_layer,
- PIPE_TEX_FILTER_LINEAR);
+ pctx, prsrc, format,
+ base_level, last_level,
+ first_layer, last_layer,
+ PIPE_TEX_FILTER_LINEAR);
/* If the blit was successful, flush once more. If it wasn't, well, let
* the state tracker deal with it. */
mali_ptr
panfrost_get_texture_address(
- struct panfrost_resource *rsrc,
- unsigned level, unsigned face)
+ struct panfrost_resource *rsrc,
+ unsigned level, unsigned face)
{
unsigned level_offset = rsrc->slices[level].offset;
unsigned face_offset = face * rsrc->cubemap_stride;
pscreen->base.resource_from_handle = panfrost_resource_from_handle;
pscreen->base.resource_get_handle = panfrost_resource_get_handle;
pscreen->base.transfer_helper = u_transfer_helper_create(&transfer_vtbl,
- true, false,
- true, true);
+ true, false,
+ true, true);
pb_slabs_init(&pscreen->slabs,
- MIN_SLAB_ENTRY_SIZE,
- MAX_SLAB_ENTRY_SIZE,
+ MIN_SLAB_ENTRY_SIZE,
+ MAX_SLAB_ENTRY_SIZE,
- 3, /* Number of heaps */
+ 3, /* Number of heaps */
- pscreen,
+ pscreen,
- panfrost_slab_can_reclaim,
- panfrost_slab_alloc,
- panfrost_slab_free);
+ panfrost_slab_can_reclaim,
+ panfrost_slab_alloc,
+ panfrost_slab_free);
}
void
static inline struct panfrost_resource *
pan_resource(struct pipe_resource *p)
{
- return (struct panfrost_resource *)p;
+ return (struct panfrost_resource *)p;
}
struct panfrost_gtransfer {
static inline struct panfrost_gtransfer *
pan_transfer(struct pipe_transfer *p)
{
- return (struct panfrost_gtransfer *)p;
+ return (struct panfrost_gtransfer *)p;
}
mali_ptr
panfrost_get_texture_address(
- struct panfrost_resource *rsrc,
- unsigned level, unsigned face);
+ struct panfrost_resource *rsrc,
+ unsigned level, unsigned face);
void panfrost_resource_screen_init(struct panfrost_screen *screen);
void panfrost_resource_screen_deinit(struct panfrost_screen *screen);
static void
panfrost_assign_index(
- struct panfrost_job *job,
- struct panfrost_transfer transfer)
+ struct panfrost_job *job,
+ struct panfrost_transfer transfer)
{
/* Assign the index */
unsigned index = ++job->job_index;
static void
panfrost_add_dependency(
- struct panfrost_transfer depender,
- struct panfrost_transfer dependent)
+ struct panfrost_transfer depender,
+ struct panfrost_transfer dependent)
{
struct mali_job_descriptor_header *first =
static void
panfrost_scoreboard_queue_job_internal(
- struct panfrost_job *batch,
- struct panfrost_transfer job)
+ struct panfrost_job *batch,
+ struct panfrost_transfer job)
{
panfrost_assign_index(batch, job);
void
panfrost_scoreboard_queue_compute_job(
- struct panfrost_job *batch,
- struct panfrost_transfer job)
+ struct panfrost_job *batch,
+ struct panfrost_transfer job)
{
panfrost_scoreboard_queue_job_internal(batch, job);
void
panfrost_scoreboard_queue_vertex_job(
- struct panfrost_job *batch,
- struct panfrost_transfer vertex,
- bool requires_tiling)
+ struct panfrost_job *batch,
+ struct panfrost_transfer vertex,
+ bool requires_tiling)
{
panfrost_scoreboard_queue_compute_job(batch, vertex);
void
panfrost_scoreboard_queue_tiler_job(
- struct panfrost_job *batch,
- struct panfrost_transfer tiler)
+ struct panfrost_job *batch,
+ struct panfrost_transfer tiler)
{
panfrost_scoreboard_queue_compute_job(batch, tiler);
void
panfrost_scoreboard_queue_fused_job(
- struct panfrost_job *batch,
- struct panfrost_transfer vertex,
- struct panfrost_transfer tiler)
+ struct panfrost_job *batch,
+ struct panfrost_transfer vertex,
+ struct panfrost_transfer tiler)
{
panfrost_scoreboard_queue_vertex_job(batch, vertex, true);
panfrost_scoreboard_queue_tiler_job(batch, tiler);
void
panfrost_scoreboard_queue_fused_job_prepend(
- struct panfrost_job *batch,
- struct panfrost_transfer vertex,
- struct panfrost_transfer tiler)
+ struct panfrost_job *batch,
+ struct panfrost_transfer vertex,
+ struct panfrost_transfer tiler)
{
/* Sanity check */
assert(batch->last_tiler.gpu);
unsigned arr_size = BITSET_WORDS(node_count);
for (unsigned node_n_1 = __bitset_ffs(no_incoming, arr_size);
- (node_n_1 != 0);
- node_n_1 = __bitset_ffs(no_incoming, arr_size)) {
+ (node_n_1 != 0);
+ node_n_1 = __bitset_ffs(no_incoming, arr_size)) {
unsigned node_n = node_n_1 - 1;
#include "midgard/midgard_compile.h"
static const struct debug_named_value debug_options[] = {
- {"msgs", PAN_DBG_MSGS, "Print debug messages"},
- {"trace", PAN_DBG_TRACE, "Trace the command stream"},
- {"deqp", PAN_DBG_DEQP, "Hacks for dEQP"},
- /* ^^ If Rob can do it, so can I */
- DEBUG_NAMED_VALUE_END
+ {"msgs", PAN_DBG_MSGS, "Print debug messages"},
+ {"trace", PAN_DBG_TRACE, "Trace the command stream"},
+ {"deqp", PAN_DBG_DEQP, "Hacks for dEQP"},
+ /* ^^ If Rob can do it, so can I */
+ DEBUG_NAMED_VALUE_END
};
DEBUG_GET_ONCE_FLAGS_OPTION(pan_debug, "PAN_MESA_DEBUG", debug_options, 0)
enum pipe_shader_cap param)
{
if (shader != PIPE_SHADER_VERTEX &&
- shader != PIPE_SHADER_FRAGMENT) {
+ shader != PIPE_SHADER_FRAGMENT) {
return 0;
}
return FALSE;
if (format_desc->layout != UTIL_FORMAT_LAYOUT_PLAIN &&
- format_desc->layout != UTIL_FORMAT_LAYOUT_OTHER) {
+ format_desc->layout != UTIL_FORMAT_LAYOUT_OTHER) {
/* Compressed formats not yet hooked up. */
return FALSE;
}
{
struct panfrost_screen *screen = rzalloc(NULL, struct panfrost_screen);
- pan_debug = debug_get_option_pan_debug();
+ pan_debug = debug_get_option_pan_debug();
if (!screen)
return NULL;
switch (screen->gpu_id) {
#ifdef __LP64__
- case 0x820: /* T820 */
- case 0x860: /* T860 */
- break;
+ case 0x820: /* T820 */
+ case 0x860: /* T860 */
+ break;
#else
- case 0x750: /* T760 */
- break;
+ case 0x750: /* T760 */
+ break;
#endif
- default:
- /* Fail to load against untested models */
- debug_printf("panfrost: Unsupported model %X",
- screen->gpu_id);
- return NULL;
+ default:
+ /* Fail to load against untested models */
+ debug_printf("panfrost: Unsupported model %X",
+ screen->gpu_id);
+ return NULL;
}
if (pan_debug & PAN_DBG_TRACE)
screen->base.fence_reference = panfrost_fence_reference;
screen->base.fence_finish = panfrost_fence_finish;
- screen->last_fragment_flushed = true;
+ screen->last_fragment_flushed = true;
screen->last_job = NULL;
panfrost_resource_screen_init(screen);
/* Memory management is based on subdividing slabs with AMD's allocator */
struct pb_slabs slabs;
-
+
/* TODO: Where? */
struct panfrost_resource *display_target;
/* While we're busy building up the job for frame N, the GPU is
* still busy executing frame N-1. So hold a reference to
* yesterjob */
- int last_fragment_flushed;
+ int last_fragment_flushed;
struct panfrost_job *last_job;
};
static inline struct panfrost_screen *
pan_screen(struct pipe_screen *p)
{
- return (struct panfrost_screen *)p;
+ return (struct panfrost_screen *)p;
}
void
static void
panfrost_sfbd_clear(
- struct panfrost_job *job,
- struct mali_single_framebuffer *sfbd)
+ struct panfrost_job *job,
+ struct mali_single_framebuffer *sfbd)
{
struct panfrost_context *ctx = job->ctx;
static void
panfrost_sfbd_set_cbuf(
- struct mali_single_framebuffer *fb,
- struct pipe_surface *surf)
+ struct mali_single_framebuffer *fb,
+ struct pipe_surface *surf)
{
struct panfrost_resource *rsrc = pan_resource(surf->texture);
unsigned
panfrost_choose_hierarchy_mask(
- unsigned width, unsigned height,
- unsigned vertex_count)
+ unsigned width, unsigned height,
+ unsigned vertex_count)
{
/* If there is no geometry, we don't bother enabling anything */
unsigned
panfrost_choose_hierarchy_mask(
- unsigned width, unsigned height,
- unsigned vertex_count);
+ unsigned width, unsigned height,
+ unsigned vertex_count);
#endif