etna_clear_blit_init(struct pipe_context *pctx)
{
struct etna_context *ctx = etna_context(pctx);
+ struct etna_screen *screen = ctx->screen;
pctx->clear_render_target = etna_clear_render_target;
pctx->clear_depth_stencil = etna_clear_depth_stencil;
pctx->resource_copy_region = etna_resource_copy_region;
pctx->flush_resource = etna_flush_resource;
- if (ctx->specs.use_blt)
+ if (screen->specs.use_blt)
etna_clear_blit_blt_init(pctx);
else
etna_clear_blit_rs_init(pctx);
etna_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
{
struct etna_context *ctx = etna_context(pctx);
+ struct etna_screen *screen = ctx->screen;
struct pipe_framebuffer_state *pfb = &ctx->framebuffer_s;
uint32_t draw_mode;
unsigned i;
/* First, sync state, then emit DRAW_PRIMITIVES or DRAW_INDEXED_PRIMITIVES */
etna_emit_state(ctx);
- if (ctx->specs.halti >= 2) {
+ if (screen->specs.halti >= 2) {
/* On HALTI2+ (GC3000 and higher) only use instanced drawing commands, as the blob does */
etna_draw_instanced(ctx->stream, info->index_size, draw_mode, info->instance_count,
info->count, info->index_size ? info->index_bias : info->start);
etna_reset_gpu_state(struct etna_context *ctx)
{
struct etna_cmd_stream *stream = ctx->stream;
+ struct etna_screen *screen = ctx->screen;
etna_set_state(stream, VIVS_GL_API_MODE, VIVS_GL_API_MODE_OPENGL);
etna_set_state(stream, VIVS_GL_VERTEX_ELEMENT_CONFIG, 0x00000001);
etna_set_state(stream, VIVS_PS_CONTROL_EXT, 0x00000000);
/* There is no HALTI0 specific state */
- if (ctx->specs.halti >= 1) { /* Only on HALTI1+ */
+ if (screen->specs.halti >= 1) { /* Only on HALTI1+ */
etna_set_state(stream, VIVS_VS_HALTI1_UNK00884, 0x00000808);
}
- if (ctx->specs.halti >= 2) { /* Only on HALTI2+ */
+ if (screen->specs.halti >= 2) { /* Only on HALTI2+ */
etna_set_state(stream, VIVS_RA_UNK00E0C, 0x00000000);
}
- if (ctx->specs.halti >= 3) { /* Only on HALTI3+ */
+ if (screen->specs.halti >= 3) { /* Only on HALTI3+ */
etna_set_state(stream, VIVS_PS_HALTI3_UNK0103C, 0x76543210);
}
- if (ctx->specs.halti >= 4) { /* Only on HALTI4+ */
+ if (screen->specs.halti >= 4) { /* Only on HALTI4+ */
etna_set_state(stream, VIVS_PS_MSAA_CONFIG, 0x6fffffff & 0xf70fffff & 0xfff6ffff &
0xffff6fff & 0xfffff6ff & 0xffffff7f);
etna_set_state(stream, VIVS_PE_HALTI4_UNK014C0, 0x00000000);
}
- if (ctx->specs.halti >= 5) { /* Only on HALTI5+ */
+ if (screen->specs.halti >= 5) { /* Only on HALTI5+ */
etna_set_state(stream, VIVS_NTE_DESCRIPTOR_UNK14C40, 0x00000001);
etna_set_state(stream, VIVS_FE_HALTI5_UNK007D8, 0x00000002);
etna_set_state(stream, VIVS_FE_HALTI5_ID_CONFIG, 0x00000000);
etna_set_state(stream, VIVS_GL_UNK03854, 0x00000000);
}
- if (!ctx->specs.use_blt) {
+ if (!screen->specs.use_blt) {
/* Enable SINGLE_BUFFER for resolve, if supported */
- etna_set_state(stream, VIVS_RS_SINGLE_BUFFER, COND(ctx->specs.single_buffer, VIVS_RS_SINGLE_BUFFER_ENABLE));
+ etna_set_state(stream, VIVS_RS_SINGLE_BUFFER, COND(screen->specs.single_buffer, VIVS_RS_SINGLE_BUFFER_ENABLE));
}
- if (ctx->specs.halti >= 5) {
+ if (screen->specs.halti >= 5) {
/* TXDESC cache flush - do this once at the beginning, as texture
* descriptors are only written by the CPU once, then patched by the kernel
* before command stream submission. It does not need flushing if the
mtx_init(&ctx->lock, mtx_recursive);
/* context ctxate setup */
- ctx->specs = screen->specs;
ctx->screen = screen;
/* need some sane default in case state tracker doesn't set some state: */
ctx->sample_mask = 0xffff;
/* Get sampler TS pointer for sampler view */
struct etna_sampler_ts *(*ts_for_sampler_view)(struct pipe_sampler_view *pview);
- struct etna_specs specs;
struct etna_screen *screen;
struct etna_cmd_stream *stream;
etna_emit_state(struct etna_context *ctx)
{
struct etna_cmd_stream *stream = ctx->stream;
+ struct etna_screen *screen = ctx->screen;
unsigned ccw = ctx->rasterizer->front_ccw;
* a) the number of vertex elements written matters: so write only active ones
* b) the vertex element states must all be written: do not skip entries that stay the same */
if (dirty & (ETNA_DIRTY_VERTEX_ELEMENTS)) {
- if (ctx->specs.halti >= 5) {
+ if (screen->specs.halti >= 5) {
/*17800*/ etna_set_state_multi(stream, VIVS_NFE_GENERIC_ATTRIB_CONFIG0(0),
ctx->vertex_elements->num_elements,
ctx->vertex_elements->NFE_GENERIC_ATTRIB_CONFIG0);
/*00600*/ etna_set_state_multi(stream, VIVS_FE_VERTEX_ELEMENT_CONFIG(0),
ctx->vertex_elements->num_elements,
ctx->vertex_elements->FE_VERTEX_ELEMENT_CONFIG);
- if (ctx->specs.halti >= 2) {
+ if (screen->specs.halti >= 2) {
/*00780*/ etna_set_state_multi(stream, VIVS_FE_GENERIC_ATTRIB_SCALE(0),
ctx->vertex_elements->num_elements,
ctx->vertex_elements->NFE_GENERIC_ATTRIB_SCALE);
/*00674*/ EMIT_STATE(FE_PRIMITIVE_RESTART_INDEX, ctx->index_buffer.FE_PRIMITIVE_RESTART_INDEX);
}
if (likely(dirty & (ETNA_DIRTY_VERTEX_BUFFERS))) {
- if (ctx->specs.halti >= 2) { /* HALTI2+: NFE_VERTEX_STREAMS */
+ if (screen->specs.halti >= 2) { /* HALTI2+: NFE_VERTEX_STREAMS */
for (int x = 0; x < ctx->vertex_buffer.count; ++x) {
/*14600*/ EMIT_STATE_RELOC(NFE_VERTEX_STREAMS_BASE_ADDR(x), &ctx->vertex_buffer.cvb[x].FE_VERTEX_STREAM_BASE_ADDR);
}
/*14640*/ EMIT_STATE(NFE_VERTEX_STREAMS_CONTROL(x), ctx->vertex_buffer.cvb[x].FE_VERTEX_STREAM_CONTROL);
}
}
- } else if(ctx->specs.stream_count > 1) { /* hw w/ multiple vertex streams */
+ } else if(screen->specs.stream_count > 1) { /* hw w/ multiple vertex streams */
for (int x = 0; x < ctx->vertex_buffer.count; ++x) {
/*00680*/ EMIT_STATE_RELOC(FE_VERTEX_STREAMS_BASE_ADDR(x), &ctx->vertex_buffer.cvb[x].FE_VERTEX_STREAM_BASE_ADDR);
}
}
}
/* gallium has instance divisor as part of elements state */
- if ((dirty & (ETNA_DIRTY_VERTEX_ELEMENTS)) && ctx->specs.halti >= 2) {
+ if ((dirty & (ETNA_DIRTY_VERTEX_ELEMENTS)) && screen->specs.halti >= 2) {
for (int x = 0; x < ctx->vertex_elements->num_buffers; ++x) {
/*14680*/ EMIT_STATE(NFE_VERTEX_STREAMS_VERTEX_DIVISOR(x), ctx->vertex_elements->NFE_VERTEX_STREAMS_VERTEX_DIVISOR[x]);
}
if (unlikely(dirty & (ETNA_DIRTY_FRAMEBUFFER))) {
/*0140C*/ EMIT_STATE(PE_DEPTH_NORMALIZE, ctx->framebuffer.PE_DEPTH_NORMALIZE);
- if (ctx->specs.pixel_pipes == 1) {
+ if (screen->specs.pixel_pipes == 1) {
/*01410*/ EMIT_STATE_RELOC(PE_DEPTH_ADDR, &ctx->framebuffer.PE_DEPTH_ADDR);
}
/*0142C*/ EMIT_STATE(PE_COLOR_FORMAT, val);
}
if (unlikely(dirty & (ETNA_DIRTY_FRAMEBUFFER))) {
- if (ctx->specs.pixel_pipes == 1) {
+ if (screen->specs.pixel_pipes == 1) {
/*01430*/ EMIT_STATE_RELOC(PE_COLOR_ADDR, &ctx->framebuffer.PE_COLOR_ADDR);
/*01434*/ EMIT_STATE(PE_COLOR_STRIDE, ctx->framebuffer.PE_COLOR_STRIDE);
/*01454*/ EMIT_STATE(PE_HDEPTH_CONTROL, ctx->framebuffer.PE_HDEPTH_CONTROL);
- } else if (ctx->specs.pixel_pipes == 2) {
+ } else if (screen->specs.pixel_pipes == 2) {
/*01434*/ EMIT_STATE(PE_COLOR_STRIDE, ctx->framebuffer.PE_COLOR_STRIDE);
/*01454*/ EMIT_STATE(PE_HDEPTH_CONTROL, ctx->framebuffer.PE_HDEPTH_CONTROL);
/*01460*/ EMIT_STATE_RELOC(PE_PIPE_COLOR_ADDR(0), &ctx->framebuffer.PE_PIPE_COLOR_ADDR[0]);
if (unlikely(dirty & (ETNA_DIRTY_ZSA | ETNA_DIRTY_RASTERIZER))) {
/*014B8*/ EMIT_STATE(PE_STENCIL_CONFIG_EXT2, etna_zsa_state(ctx->zsa)->PE_STENCIL_CONFIG_EXT2[ccw]);
}
- if (unlikely(dirty & (ETNA_DIRTY_FRAMEBUFFER)) && ctx->specs.halti >= 3)
+ if (unlikely(dirty & (ETNA_DIRTY_FRAMEBUFFER)) && screen->specs.halti >= 3)
/*014BC*/ EMIT_STATE(PE_MEM_CONFIG, ctx->framebuffer.PE_MEM_CONFIG);
if (unlikely(dirty & (ETNA_DIRTY_FRAMEBUFFER | ETNA_DIRTY_TS))) {
/*01654*/ EMIT_STATE(TS_MEM_CONFIG, ctx->framebuffer.TS_MEM_CONFIG);
/* end only EMIT_STATE */
/* Emit strongly architecture-specific state */
- if (ctx->specs.halti >= 5)
+ if (screen->specs.halti >= 5)
emit_halti5_only_state(ctx, vs_output_count);
else
emit_pre_halti5_state(ctx);
* I summise that this is because the "new" locations at 0xc000 are not
* properly protected against updates as other states seem to be. Hence,
* we detect the "new" vertex shader instruction offset to apply this. */
- if (ctx->dirty & (ETNA_DIRTY_SHADER | ETNA_DIRTY_CONSTBUF) && ctx->specs.vs_offset > 0x4000)
+ if (ctx->dirty & (ETNA_DIRTY_SHADER | ETNA_DIRTY_CONSTBUF) && screen->specs.vs_offset > 0x4000)
etna_stall(ctx->stream, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
/* We need to update the uniform cache only if one of the following bits are
ETNA_DIRTY_SHADER | ETNA_DIRTY_CONSTBUF;
/**** Large dynamically-sized state ****/
- bool do_uniform_flush = ctx->specs.halti < 5;
+ bool do_uniform_flush = screen->specs.halti < 5;
if (dirty & (ETNA_DIRTY_SHADER)) {
/* Special case: a new shader was loaded; simply re-load all uniforms and
* shader code at once */
state can legitimately be programmed multiple times.
*/
- if (ctx->specs.halti >= 5) { /* ICACHE (HALTI5) */
+ if (screen->specs.halti >= 5) { /* ICACHE (HALTI5) */
assert(ctx->shader_state.VS_INST_ADDR.bo && ctx->shader_state.PS_INST_ADDR.bo);
/* Set icache (VS) */
etna_set_state(stream, VIVS_VS_NEWRANGE_LOW, 0);
} else if (ctx->shader_state.VS_INST_ADDR.bo || ctx->shader_state.PS_INST_ADDR.bo) {
/* ICACHE (pre-HALTI5) */
- assert(ctx->specs.has_icache && ctx->specs.has_shader_range_registers);
+ assert(screen->specs.has_icache && screen->specs.has_shader_range_registers);
/* Set icache (VS) */
etna_set_state(stream, VIVS_VS_RANGE, (ctx->shader_state.vs_inst_mem_size / 4 - 1) << 16);
etna_set_state(stream, VIVS_VS_ICACHE_CONTROL,
} else {
/* Upload shader directly, first flushing and disabling icache if
* supported on this hw */
- if (ctx->specs.has_icache) {
+ if (screen->specs.has_icache) {
etna_set_state(stream, VIVS_VS_ICACHE_CONTROL,
VIVS_VS_ICACHE_CONTROL_FLUSH_PS |
VIVS_VS_ICACHE_CONTROL_FLUSH_VS);
}
- if (ctx->specs.has_shader_range_registers) {
+ if (screen->specs.has_shader_range_registers) {
etna_set_state(stream, VIVS_VS_RANGE, (ctx->shader_state.vs_inst_mem_size / 4 - 1) << 16);
etna_set_state(stream, VIVS_PS_RANGE, ((ctx->shader_state.ps_inst_mem_size / 4 - 1 + 0x100) << 16) |
0x100);
}
- etna_set_state_multi(stream, ctx->specs.vs_offset,
+ etna_set_state_multi(stream, screen->specs.vs_offset,
ctx->shader_state.vs_inst_mem_size,
ctx->shader_state.VS_INST_MEM);
- etna_set_state_multi(stream, ctx->specs.ps_offset,
+ etna_set_state_multi(stream, screen->specs.ps_offset,
ctx->shader_state.ps_inst_mem_size,
ctx->shader_state.PS_INST_MEM);
}
- if (ctx->specs.has_unified_uniforms) {
+ if (screen->specs.has_unified_uniforms) {
etna_set_state(stream, VIVS_VS_UNIFORM_BASE, 0);
- etna_set_state(stream, VIVS_PS_UNIFORM_BASE, ctx->specs.max_vs_uniforms);
+ etna_set_state(stream, VIVS_PS_UNIFORM_BASE, screen->specs.max_vs_uniforms);
}
if (do_uniform_flush)
etna_uniforms_write(ctx, ctx->shader.fs, ctx->constant_buffer[PIPE_SHADER_FRAGMENT].cb);
- if (ctx->specs.halti >= 5) {
+ if (screen->specs.halti >= 5) {
/* HALTI5 needs to be prompted to pre-fetch shaders */
etna_set_state(stream, VIVS_VS_ICACHE_PREFETCH, 0x00000000);
etna_set_state(stream, VIVS_PS_ICACHE_PREFETCH, 0x00000000);
etna_compile_rs_state(struct etna_context *ctx, struct compiled_rs_state *cs,
const struct rs_state *rs)
{
+ struct etna_screen *screen = ctx->screen;
+
memset(cs, 0, sizeof(*cs));
/* TILED and SUPERTILED layout have their strides multiplied with 4 in RS */
* destination buffer respectively. This will be overridden below as
* necessary for the multi-pipe, multi-tiled case.
*/
- for (unsigned pipe = 0; pipe < ctx->specs.pixel_pipes; ++pipe) {
+ for (unsigned pipe = 0; pipe < screen->specs.pixel_pipes; ++pipe) {
cs->source[pipe].bo = rs->source;
cs->source[pipe].offset = rs->source_offset;
cs->source[pipe].flags = ETNA_RELOC_READ;
VIVS_RS_WINDOW_SIZE_HEIGHT(rs->height);
/* use dual pipe mode when required */
- if (!ctx->specs.single_buffer && ctx->specs.pixel_pipes == 2 && !(rs->height & 7)) {
+ if (!screen->specs.single_buffer && screen->specs.pixel_pipes == 2 && !(rs->height & 7)) {
cs->RS_WINDOW_SIZE = VIVS_RS_WINDOW_SIZE_WIDTH(rs->width) |
VIVS_RS_WINDOW_SIZE_HEIGHT(rs->height / 2);
cs->RS_PIPE_OFFSET[1] = VIVS_RS_PIPE_OFFSET_X(0) | VIVS_RS_PIPE_OFFSET_Y(rs->height / 2);
/* If source the same as destination, and the hardware supports this,
* do an in-place resolve to fill in unrendered tiles.
*/
- if (ctx->specs.single_buffer && rs->source == rs->dest &&
+ if (screen->specs.single_buffer && rs->source == rs->dest &&
rs->source_offset == rs->dest_offset &&
rs->source_format == rs->dest_format &&
rs->source_tiling == rs->dest_tiling &&
etna_rs_gen_clear_surface(struct etna_context *ctx, struct etna_surface *surf,
uint64_t clear_value)
{
+ struct etna_screen *screen = ctx->screen;
struct etna_resource *dst = etna_resource(surf->base.texture);
uint32_t format;
format = RS_FORMAT_A8R8G8B8;
break;
case 64:
- assert(ctx->specs.halti >= 2);
+ assert(screen->specs.halti >= 2);
format = RS_FORMAT_64BPP_CLEAR;
break;
default:
const enum etna_surface_layout layout,
unsigned int *width_mask, unsigned int *height_mask)
{
+ struct etna_screen *screen = ctx->screen;
unsigned int h_align, w_align;
if (layout & ETNA_LAYOUT_BIT_SUPER) {
w_align = 64;
- h_align = 64 * ctx->specs.pixel_pipes;
+ h_align = 64 * screen->specs.pixel_pipes;
} else {
w_align = ETNA_RS_WIDTH_MASK + 1;
h_align = ETNA_RS_HEIGHT_MASK + 1;
const struct pipe_shader_state *pss)
{
struct etna_context *ctx = etna_context(pctx);
+ struct etna_screen *screen = ctx->screen;
struct etna_shader *shader = CALLOC_STRUCT(etna_shader);
if (!shader)
static uint32_t id;
shader->id = id++;
- shader->specs = &ctx->specs;
+ shader->specs = &screen->specs;
if (DBG_ENABLED(ETNA_DBG_NIR))
shader->nir = (pss->type == PIPE_SHADER_IR_NIR) ? pss->ir.nir :
#include "etnaviv_clear_blit.h"
#include "etnaviv_context.h"
#include "etnaviv_format.h"
+#include "etnaviv_screen.h"
#include "etnaviv_shader.h"
#include "etnaviv_surface.h"
#include "etnaviv_translate.h"
const struct pipe_framebuffer_state *fb)
{
struct etna_context *ctx = etna_context(pctx);
+ struct etna_screen *screen = ctx->screen;
struct compiled_framebuffer_state *cs = &ctx->framebuffer;
int nr_samples_color = -1;
int nr_samples_depth = -1;
VIVS_PE_COLOR_FORMAT_COMPONENTS__MASK |
VIVS_PE_COLOR_FORMAT_OVERWRITE |
COND(color_supertiled, VIVS_PE_COLOR_FORMAT_SUPER_TILED) |
- COND(color_supertiled && ctx->specs.halti >= 5, VIVS_PE_COLOR_FORMAT_SUPER_TILED_NEW);
+ COND(color_supertiled && screen->specs.halti >= 5, VIVS_PE_COLOR_FORMAT_SUPER_TILED_NEW);
/* VIVS_PE_COLOR_FORMAT_COMPONENTS() and
* VIVS_PE_COLOR_FORMAT_OVERWRITE comes from blend_state
* but only if we set the bits above. */
cbuf->surf.offset, cbuf->surf.stride * 4);
}
- if (ctx->specs.pixel_pipes == 1) {
+ if (screen->specs.pixel_pipes == 1) {
cs->PE_COLOR_ADDR = cbuf->reloc[0];
cs->PE_COLOR_ADDR.flags = ETNA_RELOC_READ | ETNA_RELOC_WRITE;
} else {
/* Rendered textures must always be multi-tiled, or single-buffer mode must be supported */
- assert((res->layout & ETNA_LAYOUT_BIT_MULTI) || ctx->specs.single_buffer);
- for (int i = 0; i < ctx->specs.pixel_pipes; i++) {
+ assert((res->layout & ETNA_LAYOUT_BIT_MULTI) || screen->specs.single_buffer);
+ for (int i = 0; i < screen->specs.pixel_pipes; i++) {
cs->PE_PIPE_COLOR_ADDR[i] = cbuf->reloc[i];
cs->PE_PIPE_COLOR_ADDR[i].flags = ETNA_RELOC_READ | ETNA_RELOC_WRITE;
}
if (cbuf->level->ts_compress_fmt >= 0) {
/* overwrite bit breaks v1/v2 compression */
- if (!ctx->specs.v4_compression)
+ if (!screen->specs.v4_compression)
cs->PE_COLOR_FORMAT &= ~VIVS_PE_COLOR_FORMAT_OVERWRITE;
ts_mem_config |=
cs->PS_CONTROL = COND(util_format_is_unorm(cbuf->base.format), VIVS_PS_CONTROL_SATURATE_RT0);
cs->PS_CONTROL_EXT =
- VIVS_PS_CONTROL_EXT_OUTPUT_MODE0(translate_output_mode(cbuf->base.format, ctx->specs.halti >= 5));
+ VIVS_PS_CONTROL_EXT_OUTPUT_MODE0(translate_output_mode(cbuf->base.format, screen->specs.halti >= 5));
} else {
/* Clearing VIVS_PE_COLOR_FORMAT_COMPONENTS__MASK and
* VIVS_PE_COLOR_FORMAT_OVERWRITE prevents us from overwriting the
cs->TS_COLOR_SURFACE_BASE.bo = NULL;
cs->PE_COLOR_ADDR = ctx->dummy_rt_reloc;
- for (int i = 0; i < ctx->specs.pixel_pipes; i++)
+ for (int i = 0; i < screen->specs.pixel_pipes; i++)
cs->PE_PIPE_COLOR_ADDR[i] = ctx->dummy_rt_reloc;
}
COND(depth_supertiled, VIVS_PE_DEPTH_CONFIG_SUPER_TILED) |
VIVS_PE_DEPTH_CONFIG_DEPTH_MODE_Z |
VIVS_PE_DEPTH_CONFIG_UNK18 | /* something to do with clipping? */
- COND(ctx->specs.halti >= 5, VIVS_PE_DEPTH_CONFIG_DISABLE_ZS) /* Needs to be enabled on GC7000, otherwise depth writes hang w/ TS - apparently it does something else now */
+ COND(screen->specs.halti >= 5, VIVS_PE_DEPTH_CONFIG_DISABLE_ZS) /* Needs to be enabled on GC7000, otherwise depth writes hang w/ TS - apparently it does something else now */
;
/* VIVS_PE_DEPTH_CONFIG_ONLY_DEPTH */
/* merged with depth_stencil_alpha */
- if (ctx->specs.pixel_pipes == 1) {
+ if (screen->specs.pixel_pipes == 1) {
cs->PE_DEPTH_ADDR = zsbuf->reloc[0];
cs->PE_DEPTH_ADDR.flags = ETNA_RELOC_READ | ETNA_RELOC_WRITE;
} else {
- for (int i = 0; i < ctx->specs.pixel_pipes; i++) {
+ for (int i = 0; i < screen->specs.pixel_pipes; i++) {
cs->PE_PIPE_DEPTH_ADDR[i] = zsbuf->reloc[i];
cs->PE_PIPE_DEPTH_ADDR[i].flags = ETNA_RELOC_READ | ETNA_RELOC_WRITE;
}
* single buffer when this feature is available.
* note: the blob will use 2 in some situations, figure out why?
*/
- pe_logic_op |= VIVS_PE_LOGIC_OP_SINGLE_BUFFER(ctx->specs.single_buffer ? 3 : 0);
+ pe_logic_op |= VIVS_PE_LOGIC_OP_SINGLE_BUFFER(screen->specs.single_buffer ? 3 : 0);
cs->PE_LOGIC_OP = pe_logic_op;
/* keep copy of original structure */
unsigned num_elements, const struct pipe_vertex_element *elements)
{
struct etna_context *ctx = etna_context(pctx);
+ struct etna_screen *screen = ctx->screen;
struct compiled_vertex_elements_state *cs = CALLOC_STRUCT(compiled_vertex_elements_state);
if (!cs)
return NULL;
- if (num_elements > ctx->specs.vertex_max_elements) {
+ if (num_elements > screen->specs.vertex_max_elements) {
BUG("number of elements (%u) exceeds chip maximum (%u)", num_elements,
- ctx->specs.vertex_max_elements);
+ screen->specs.vertex_max_elements);
return NULL;
}
start_offset = elements[idx].src_offset;
/* guaranteed by PIPE_CAP_MAX_VERTEX_BUFFERS */
- assert(buffer_idx < ctx->specs.stream_count);
+ assert(buffer_idx < screen->specs.stream_count);
/* maximum vertex size is 256 bytes */
assert(element_size != 0 && (end_offset - start_offset) < 256);
assert(format_type != ETNA_NO_MATCH);
assert(normalize != ETNA_NO_MATCH);
- if (ctx->specs.halti < 5) {
+ if (screen->specs.halti < 5) {
cs->FE_VERTEX_ELEMENT_CONFIG[idx] =
COND(nonconsecutive, VIVS_FE_VERTEX_ELEMENT_CONFIG_NONCONSECUTIVE) |
format_type |
etna_render_handle_incompatible(struct pipe_context *pctx, struct pipe_resource *prsc)
{
struct etna_context *ctx = etna_context(pctx);
+ struct etna_screen *screen = ctx->screen;
struct etna_resource *res = etna_resource(prsc);
- bool need_multitiled = ctx->specs.pixel_pipes > 1 && !ctx->specs.single_buffer;
- bool want_supertiled = ctx->specs.can_supertile;
+ bool need_multitiled = screen->specs.pixel_pipes > 1 && !screen->specs.single_buffer;
+ bool want_supertiled = screen->specs.can_supertile;
/* Resource is compatible if it is tiled and has multi tiling when required
* TODO: LINEAR_PE feature means render to linear is possible ?
const struct pipe_surface *templat)
{
struct etna_context *ctx = etna_context(pctx);
+ struct etna_screen *screen = ctx->screen;
struct etna_resource *rsc = etna_render_handle_incompatible(pctx, prsc);
struct etna_surface *surf = CALLOC_STRUCT(etna_surface);
* indicate the tile status module bypasses the memory
* offset and MMU. */
- if (VIV_FEATURE(ctx->screen, chipFeatures, FAST_CLEAR) &&
- VIV_FEATURE(ctx->screen, chipMinorFeatures0, MC20) &&
+ if (VIV_FEATURE(screen, chipFeatures, FAST_CLEAR) &&
+ VIV_FEATURE(screen, chipMinorFeatures0, MC20) &&
!rsc->ts_bo &&
/* needs to be RS/BLT compatible for transfer_map/unmap */
(rsc->levels[level].padded_width & ETNA_RS_WIDTH_MASK) == 0 &&
(rsc->levels[level].padded_height & ETNA_RS_HEIGHT_MASK) == 0 &&
- etna_resource_hw_tileable(ctx->specs.use_blt, prsc)) {
+ etna_resource_hw_tileable(screen->specs.use_blt, prsc)) {
etna_screen_resource_alloc_ts(pctx->screen, rsc);
}
struct etna_resource_level *lev = &rsc->levels[level];
/* Setup template relocations for this surface */
- for (unsigned pipe = 0; pipe < ctx->specs.pixel_pipes; ++pipe) {
+ for (unsigned pipe = 0; pipe < screen->specs.pixel_pipes; ++pipe) {
surf->reloc[pipe].bo = rsc->bo;
surf->reloc[pipe].offset = surf->surf.offset;
surf->reloc[pipe].flags = 0;
surf->ts_reloc.offset = surf->surf.ts_offset;
surf->ts_reloc.flags = 0;
- if (!ctx->specs.use_blt) {
+ if (!screen->specs.use_blt) {
/* This (ab)uses the RS as a plain buffer memset().
* Currently uses a fixed row size of 64 bytes. Some benchmarking with
* different sizes may be in order. */
.dither = {0xffffffff, 0xffffffff},
.width = 16,
.height = etna_align_up(surf->surf.ts_size / 0x40, 4),
- .clear_value = {ctx->specs.ts_clear_value},
+ .clear_value = {screen->specs.ts_clear_value},
.clear_mode = VIVS_RS_CLEAR_CONTROL_MODE_ENABLED1,
.clear_bits = 0xffff
});
}
} else {
- if (!ctx->specs.use_blt)
+ if (!screen->specs.use_blt)
etna_rs_gen_clear_surface(ctx, surf, surf->level->clear_value);
}
{
/* bind fragment sampler */
struct etna_context *ctx = etna_context(pctx);
+ struct etna_screen *screen = ctx->screen;
int offset;
switch (shader) {
ctx->num_fragment_samplers = num_samplers;
break;
case PIPE_SHADER_VERTEX:
- offset = ctx->specs.vertex_sampler_offset;
+ offset = screen->specs.vertex_sampler_offset;
break;
default:
assert(!"Invalid shader");
etna_fragtex_set_sampler_views(struct etna_context *ctx, unsigned nr,
struct pipe_sampler_view **views)
{
+ struct etna_screen *screen = ctx->screen;
unsigned start = 0;
- unsigned end = start + ctx->specs.fragment_sampler_count;
+ unsigned end = start + screen->specs.fragment_sampler_count;
set_sampler_views(ctx, start, end, nr, views);
ctx->num_fragment_sampler_views = nr;
etna_vertex_set_sampler_views(struct etna_context *ctx, unsigned nr,
struct pipe_sampler_view **views)
{
- unsigned start = ctx->specs.vertex_sampler_offset;
- unsigned end = start + ctx->specs.vertex_sampler_count;
+ struct etna_screen *screen = ctx->screen;
+ unsigned start = screen->specs.vertex_sampler_offset;
+ unsigned end = start + screen->specs.vertex_sampler_count;
set_sampler_views(ctx, start, end, nr, views);
}
etna_texture_init(struct pipe_context *pctx)
{
struct etna_context *ctx = etna_context(pctx);
+ struct etna_screen *screen = ctx->screen;
pctx->bind_sampler_states = etna_bind_sampler_states;
pctx->set_sampler_views = etna_set_sampler_views;
pctx->texture_barrier = etna_texture_barrier;
- if (ctx->specs.halti >= 5)
+ if (screen->specs.halti >= 5)
etna_texture_desc_init(pctx);
else
etna_texture_state_init(pctx);
{
struct etna_sampler_view *sv = CALLOC_STRUCT(etna_sampler_view);
struct etna_context *ctx = etna_context(pctx);
+ struct etna_screen *screen = ctx->screen;
const uint32_t format = translate_texture_format(so->format);
const bool ext = !!(format & EXT_FORMAT);
const bool astc = !!(format & ASTC_FORMAT);
/* Workaround for npot textures -- it appears that only CLAMP_TO_EDGE is
* supported when the appropriate capability is not set. */
- if (!ctx->specs.npot_tex_any_wrap &&
+ if (!screen->specs.npot_tex_any_wrap &&
(!util_is_power_of_two_or_zero(res->base.width0) ||
!util_is_power_of_two_or_zero(res->base.height0))) {
sv->TE_SAMPLER_CONFIG0_MASK = ~(VIVS_TE_SAMPLER_CONFIG0_UWRAP__MASK |
etna_emit_texture_state(struct etna_context *ctx)
{
struct etna_cmd_stream *stream = ctx->stream;
+ struct etna_screen *screen = ctx->screen;
uint32_t active_samplers = active_samplers_bits(ctx);
uint32_t dirty = ctx->dirty;
struct etna_coalesce coalesce;
}
}
}
- if (unlikely(ctx->specs.tex_astc && (dirty & (ETNA_DIRTY_SAMPLER_VIEWS)))) {
+ if (unlikely(screen->specs.tex_astc && (dirty & (ETNA_DIRTY_SAMPLER_VIEWS)))) {
for (int x = 0; x < VIVS_TE_SAMPLER__LEN; ++x) {
if ((1 << x) & active_samplers) {
struct etna_sampler_view *sv = etna_sampler_view(ctx->sampler_view[x]);
}
}
}
- if (unlikely(ctx->specs.halti >= 1 && (dirty & (ETNA_DIRTY_SAMPLER_VIEWS)))) {
+ if (unlikely(screen->specs.halti >= 1 && (dirty & (ETNA_DIRTY_SAMPLER_VIEWS)))) {
for (int x = 0; x < VIVS_TE_SAMPLER__LEN; ++x) {
if ((1 << x) & active_samplers) {
struct etna_sampler_state *ss = etna_sampler_state(ctx->sampler[x]);
struct pipe_transfer **out_transfer)
{
struct etna_context *ctx = etna_context(pctx);
+ struct etna_screen *screen = ctx->screen;
struct etna_resource *rsc = etna_resource(prsc);
struct etna_transfer *trans;
struct pipe_transfer *ptrans;
rsc = etna_resource(rsc->texture);
} else if (rsc->ts_bo ||
(rsc->layout != ETNA_LAYOUT_LINEAR &&
- etna_resource_hw_tileable(ctx->specs.use_blt, prsc) &&
+ etna_resource_hw_tileable(screen->specs.use_blt, prsc) &&
/* HALIGN 4 resources are incompatible with the resolve engine,
* so fall back to using software to detile this resource. */
rsc->halign != TEXTURE_HALIGN_FOUR)) {
return NULL;
}
- if (!ctx->specs.use_blt) {
+ if (!screen->specs.use_blt) {
/* Need to align the transfer region to satisfy RS restrictions, as we
* really want to hit the RS blit path here.
*/
static unsigned
get_const_idx(const struct etna_context *ctx, bool frag, unsigned samp_id)
{
+ struct etna_screen *screen = ctx->screen;
+
if (frag)
return samp_id;
- return samp_id + ctx->specs.vertex_sampler_offset;
+ return samp_id + screen->specs.vertex_sampler_offset;
}
static uint32_t
const struct etna_shader_variant *sobj,
struct pipe_constant_buffer *cb)
{
+ struct etna_screen *screen = ctx->screen;
struct etna_cmd_stream *stream = ctx->stream;
const struct etna_shader_uniform_info *uinfo = &sobj->uniforms;
bool frag = (sobj == ctx->shader.fs);
- uint32_t base = frag ? ctx->specs.ps_uniforms_offset : ctx->specs.vs_uniforms_offset;
+ uint32_t base = frag ? screen->specs.ps_uniforms_offset : screen->specs.vs_uniforms_offset;
unsigned idx;
if (!uinfo->imm_count)
COND(so->depth.writemask, VIVS_PE_DEPTH_CONFIG_WRITE_ENABLE) |
COND(early_z, VIVS_PE_DEPTH_CONFIG_EARLY_Z) |
/* this bit changed meaning with HALTI5: */
- COND(disable_zs && ctx->specs.halti < 5, VIVS_PE_DEPTH_CONFIG_DISABLE_ZS);
+ COND(disable_zs && screen->specs.halti < 5, VIVS_PE_DEPTH_CONFIG_DISABLE_ZS);
cs->PE_ALPHA_OP =
COND(so->alpha.enabled, VIVS_PE_ALPHA_OP_ALPHA_TEST) |
VIVS_PE_ALPHA_OP_ALPHA_FUNC(so->alpha.func) |