void
iris_init_batch(struct iris_batch *batch,
struct iris_screen *screen,
- struct iris_vtable *vtbl,
struct pipe_debug_callback *dbg,
struct pipe_device_reset_callback *reset,
struct hash_table_u64 *state_sizes,
int priority)
{
batch->screen = screen;
- batch->vtbl = vtbl;
batch->dbg = dbg;
batch->reset = reset;
batch->state_sizes = state_sizes;
struct iris_batch {
struct iris_screen *screen;
- struct iris_vtable *vtbl;
struct pipe_debug_callback *dbg;
struct pipe_device_reset_callback *reset;
void iris_init_batch(struct iris_batch *batch,
struct iris_screen *screen,
- struct iris_vtable *vtbl,
struct pipe_debug_callback *dbg,
struct pipe_device_reset_callback *reset,
struct hash_table_u64 *state_sizes,
}
void
-iris_blorp_surf_for_resource(struct iris_vtable *vtbl,
- struct isl_device *isl_dev,
+iris_blorp_surf_for_resource(struct isl_device *isl_dev,
struct blorp_surf *surf,
struct pipe_resource *p_res,
enum isl_aux_usage aux_usage,
bool dst_clear_supported = isl_aux_usage_has_fast_clears(dst_aux_usage);
struct blorp_surf src_surf, dst_surf;
- iris_blorp_surf_for_resource(&ice->vtbl, &screen->isl_dev, &src_surf,
+ iris_blorp_surf_for_resource(&screen->isl_dev, &src_surf,
info->src.resource, src_aux_usage,
info->src.level, false);
- iris_blorp_surf_for_resource(&ice->vtbl, &screen->isl_dev, &dst_surf,
+ iris_blorp_surf_for_resource(&screen->isl_dev, &dst_surf,
info->dst.resource, dst_aux_usage,
info->dst.level, true);
iris_resource_prepare_access(ice, batch, stc_dst, info->dst.level, 1,
info->dst.box.z, info->dst.box.depth,
stc_dst_aux_usage, false);
- iris_blorp_surf_for_resource(&ice->vtbl, &screen->isl_dev, &src_surf,
+ iris_blorp_surf_for_resource(&screen->isl_dev, &src_surf,
&src_res->base, stc_src_aux_usage,
info->src.level, false);
- iris_blorp_surf_for_resource(&ice->vtbl, &screen->isl_dev, &dst_surf,
+ iris_blorp_surf_for_resource(&screen->isl_dev, &dst_surf,
&stc_dst->base, stc_dst_aux_usage,
info->dst.level, true);
// XXX: what about one surface being a buffer and not the other?
struct blorp_surf src_surf, dst_surf;
- iris_blorp_surf_for_resource(&ice->vtbl, &screen->isl_dev, &src_surf,
+ iris_blorp_surf_for_resource(&screen->isl_dev, &src_surf,
src, src_aux_usage, src_level, false);
- iris_blorp_surf_for_resource(&ice->vtbl, &screen->isl_dev, &dst_surf,
+ iris_blorp_surf_for_resource(&screen->isl_dev, &dst_surf,
dst, dst_aux_usage, dst_level, true);
iris_resource_prepare_access(ice, batch, src_res, src_level, 1,
iris_emit_pipe_control_flush(batch,
"stall for MI_COPY_MEM_MEM copy_region",
PIPE_CONTROL_CS_STALL);
- ice->vtbl.copy_mem_mem(batch, dst_bo, dstx, iris_resource_bo(src),
+ batch->screen->vtbl.copy_mem_mem(batch, dst_bo, dstx, iris_resource_bo(src),
src_box->x, src_box->width);
return;
}
iris_use_pinned_bo(batch, binder->bo, false);
- ice->vtbl.update_surface_base_address(batch, binder);
+ batch->screen->vtbl.update_surface_base_address(batch, binder);
}
static void *
* is not something that should happen often, we stall on the CPU here
* to resolve the predication, and then proceed.
*/
- ice->vtbl.resolve_conditional_render(ice);
+ batch->screen->vtbl.resolve_conditional_render(ice);
if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
return;
blorp_batch_init(&ice->blorp, &blorp_batch, batch, blorp_flags);
struct blorp_surf surf;
- iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev, &surf,
+ iris_blorp_surf_for_resource(&batch->screen->isl_dev, &surf,
p_res, res->aux.usage, level, true);
/* In newer gens (> 9), the hardware will do a linear -> sRGB conversion of
box->z, box->depth, aux_usage);
struct blorp_surf surf;
- iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev, &surf,
+ iris_blorp_surf_for_resource(&batch->screen->isl_dev, &surf,
p_res, aux_usage, level, true);
struct blorp_batch blorp_batch;
* even more complex, so the easiest thing to do when the fast clear
* depth is changing is to stall on the CPU and resolve the predication.
*/
- ice->vtbl.resolve_conditional_render(ice);
+ batch->screen->vtbl.resolve_conditional_render(ice);
if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
return;
if (clear_depth && z_res) {
iris_resource_prepare_depth(ice, batch, z_res, level, box->z, box->depth);
- iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev,
+ iris_blorp_surf_for_resource(&batch->screen->isl_dev,
&z_surf, &z_res->base, z_res->aux.usage,
level, true);
}
if (stencil_mask) {
iris_resource_prepare_access(ice, batch, stencil_res, level, 1, box->z,
box->depth, stencil_res->aux.usage, false);
- iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev,
+ iris_blorp_surf_for_resource(&batch->screen->isl_dev,
&stencil_surf, &stencil_res->base,
stencil_res->aux.usage, level, true);
}
ice = container_of(batch, ice, batches[IRIS_BATCH_RENDER]);
assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
- ice->vtbl.init_render_context(batch);
+ batch->screen->vtbl.init_render_context(batch);
} else if (batch->name == IRIS_BATCH_COMPUTE) {
ice = container_of(batch, ice, batches[IRIS_BATCH_COMPUTE]);
assert(&ice->batches[IRIS_BATCH_COMPUTE] == batch);
- ice->vtbl.init_compute_context(batch);
+ batch->screen->vtbl.init_compute_context(batch);
} else {
unreachable("unhandled batch reset");
}
memset(ice->state.last_grid, 0, sizeof(ice->state.last_grid));
batch->last_surface_base_address = ~0ull;
batch->last_aux_map_state = 0;
- ice->vtbl.lost_genx_state(ice, batch);
+ batch->screen->vtbl.lost_genx_state(ice, batch);
}
static enum pipe_reset_status
iris_destroy_context(struct pipe_context *ctx)
{
struct iris_context *ice = (struct iris_context *)ctx;
+ struct iris_screen *screen = (struct iris_screen *)ctx->screen;
if (ctx->stream_uploader)
u_upload_destroy(ctx->stream_uploader);
- ice->vtbl.destroy_state(ice);
+ screen->vtbl.destroy_state(ice);
iris_destroy_program_cache(ice);
iris_destroy_border_color_pool(ice);
u_upload_destroy(ice->state.surface_uploader);
ice->state.sizes = _mesa_hash_table_u64_create(ice);
for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
- iris_init_batch(&ice->batches[i], screen, &ice->vtbl, &ice->dbg,
+ iris_init_batch(&ice->batches[i], screen, &ice->dbg,
&ice->reset, ice->state.sizes,
ice->batches, (enum iris_batch_name) i, priority);
}
- ice->vtbl.init_render_context(&ice->batches[IRIS_BATCH_RENDER]);
- ice->vtbl.init_compute_context(&ice->batches[IRIS_BATCH_COMPUTE]);
+ screen->vtbl.init_render_context(&ice->batches[IRIS_BATCH_RENDER]);
+ screen->vtbl.init_compute_context(&ice->batches[IRIS_BATCH_COMPUTE]);
return ctx;
}
#include "pipe/p_context.h"
#include "pipe/p_state.h"
+#include "util/slab.h"
#include "util/u_debug.h"
#include "intel/blorp/blorp.h"
#include "intel/dev/gen_debug.h"
bool zeroed;
};
-/**
- * Virtual table for generation-specific (genxml) function calls.
- */
-struct iris_vtable {
- void (*destroy_state)(struct iris_context *ice);
- void (*init_render_context)(struct iris_batch *batch);
- void (*init_compute_context)(struct iris_batch *batch);
- void (*upload_render_state)(struct iris_context *ice,
- struct iris_batch *batch,
- const struct pipe_draw_info *draw);
- void (*update_surface_base_address)(struct iris_batch *batch,
- struct iris_binder *binder);
- void (*upload_compute_state)(struct iris_context *ice,
- struct iris_batch *batch,
- const struct pipe_grid_info *grid);
- void (*rebind_buffer)(struct iris_context *ice,
- struct iris_resource *res);
- void (*resolve_conditional_render)(struct iris_context *ice);
- void (*load_register_reg32)(struct iris_batch *batch, uint32_t dst,
- uint32_t src);
- void (*load_register_reg64)(struct iris_batch *batch, uint32_t dst,
- uint32_t src);
- void (*load_register_imm32)(struct iris_batch *batch, uint32_t reg,
- uint32_t val);
- void (*load_register_imm64)(struct iris_batch *batch, uint32_t reg,
- uint64_t val);
- void (*load_register_mem32)(struct iris_batch *batch, uint32_t reg,
- struct iris_bo *bo, uint32_t offset);
- void (*load_register_mem64)(struct iris_batch *batch, uint32_t reg,
- struct iris_bo *bo, uint32_t offset);
- void (*store_register_mem32)(struct iris_batch *batch, uint32_t reg,
- struct iris_bo *bo, uint32_t offset,
- bool predicated);
- void (*store_register_mem64)(struct iris_batch *batch, uint32_t reg,
- struct iris_bo *bo, uint32_t offset,
- bool predicated);
- void (*store_data_imm32)(struct iris_batch *batch,
- struct iris_bo *bo, uint32_t offset,
- uint32_t value);
- void (*store_data_imm64)(struct iris_batch *batch,
- struct iris_bo *bo, uint32_t offset,
- uint64_t value);
- void (*copy_mem_mem)(struct iris_batch *batch,
- struct iris_bo *dst_bo, uint32_t dst_offset,
- struct iris_bo *src_bo, uint32_t src_offset,
- unsigned bytes);
- void (*emit_raw_pipe_control)(struct iris_batch *batch,
- const char *reason, uint32_t flags,
- struct iris_bo *bo, uint32_t offset,
- uint64_t imm);
-
- void (*emit_mi_report_perf_count)(struct iris_batch *batch,
- struct iris_bo *bo,
- uint32_t offset_in_bytes,
- uint32_t report_id);
-
- unsigned (*derived_program_state_size)(enum iris_program_cache_id id);
- void (*store_derived_program_state)(struct iris_context *ice,
- enum iris_program_cache_id cache_id,
- struct iris_compiled_shader *shader);
- uint32_t *(*create_so_decl_list)(const struct pipe_stream_output_info *sol,
- const struct brw_vue_map *vue_map);
- void (*populate_vs_key)(const struct iris_context *ice,
- const struct shader_info *info,
- gl_shader_stage last_stage,
- struct iris_vs_prog_key *key);
- void (*populate_tcs_key)(const struct iris_context *ice,
- struct iris_tcs_prog_key *key);
- void (*populate_tes_key)(const struct iris_context *ice,
- const struct shader_info *info,
- gl_shader_stage last_stage,
- struct iris_tes_prog_key *key);
- void (*populate_gs_key)(const struct iris_context *ice,
- const struct shader_info *info,
- gl_shader_stage last_stage,
- struct iris_gs_prog_key *key);
- void (*populate_fs_key)(const struct iris_context *ice,
- const struct shader_info *info,
- struct iris_fs_prog_key *key);
- void (*populate_cs_key)(const struct iris_context *ice,
- struct iris_cs_prog_key *key);
- void (*lost_genx_state)(struct iris_context *ice, struct iris_batch *batch);
-};
-
/**
* A pool containing SAMPLER_BORDER_COLOR_STATE entries.
*
/** Slab allocator for iris_transfer_map objects. */
struct slab_child_pool transfer_pool;
- struct iris_vtable vtbl;
-
struct blorp_context blorp;
struct iris_batch batches[IRIS_BATCH_COUNT];
/* iris_blit.c */
-void iris_blorp_surf_for_resource(struct iris_vtable *vtbl,
- struct isl_device *isl_dev,
+void iris_blorp_surf_for_resource(struct isl_device *isl_dev,
struct blorp_surf *surf,
struct pipe_resource *p_res,
enum isl_aux_usage aux_usage,
struct iris_batch *batch,
const struct pipe_draw_info *draw);
+
+
#ifdef genX
# include "iris_genx_protos.h"
#else
stage == MESA_SHADER_TESS_EVAL ||
stage == MESA_SHADER_GEOMETRY) {
struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
- so_decls = ice->vtbl.create_so_decl_list(&ish->stream_output,
+ so_decls = screen->vtbl.create_so_decl_list(&ish->stream_output,
&vue_prog_data->vue_map);
}
if (info.indirect->indirect_draw_count &&
ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
/* Upload MI_PREDICATE_RESULT to GPR15.*/
- ice->vtbl.load_register_reg64(batch, CS_GPR(15), MI_PREDICATE_RESULT);
+ batch->screen->vtbl.load_register_reg64(batch, CS_GPR(15), MI_PREDICATE_RESULT);
}
uint64_t orig_dirty = ice->state.dirty;
iris_update_draw_parameters(ice, &info);
- ice->vtbl.upload_render_state(ice, batch, &info);
+ batch->screen->vtbl.upload_render_state(ice, batch, &info);
ice->state.dirty &= ~IRIS_ALL_DIRTY_FOR_RENDER;
if (info.indirect->indirect_draw_count &&
ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
/* Restore MI_PREDICATE_RESULT. */
- ice->vtbl.load_register_reg64(batch, MI_PREDICATE_RESULT, CS_GPR(15));
+ batch->screen->vtbl.load_register_reg64(batch, MI_PREDICATE_RESULT, CS_GPR(15));
}
/* Put this back for post-draw resolves, we'll clear it again after. */
iris_update_draw_parameters(ice, draw);
- ice->vtbl.upload_render_state(ice, batch, draw);
+ batch->screen->vtbl.upload_render_state(ice, batch, draw);
}
/**
iris_binder_reserve_3d(ice);
- ice->vtbl.update_surface_base_address(batch, &ice->state.binder);
+ batch->screen->vtbl.update_surface_base_address(batch, &ice->state.binder);
iris_handle_always_flush_cache(batch);
iris_update_grid_size_resource(ice, grid);
iris_binder_reserve_compute(ice);
- ice->vtbl.update_surface_base_address(batch, &ice->state.binder);
+ batch->screen->vtbl.update_surface_base_address(batch, &ice->state.binder);
if (ice->state.compute_predicate) {
- ice->vtbl.load_register_mem64(batch, MI_PREDICATE_RESULT,
+ batch->screen->vtbl.load_register_mem64(batch, MI_PREDICATE_RESULT,
ice->state.compute_predicate, 0);
ice->state.compute_predicate = NULL;
}
iris_handle_always_flush_cache(batch);
- ice->vtbl.upload_compute_state(ice, batch, grid);
+ batch->screen->vtbl.upload_compute_state(ice, batch, grid);
iris_handle_always_flush_cache(batch);
{
struct iris_context *ice = c;
struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
- ice->vtbl.emit_mi_report_perf_count(batch, bo, offset_in_bytes, report_id);
+ batch->screen->vtbl.emit_mi_report_perf_count(batch, bo, offset_in_bytes, report_id);
}
static void
struct iris_context *ice = ctx;
struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
if (reg_size == 8) {
- ice->vtbl.store_register_mem64(batch, reg, bo, offset, false);
+ batch->screen->vtbl.store_register_mem64(batch, reg, bo, offset, false);
} else {
assert(reg_size == 4);
- ice->vtbl.store_register_mem32(batch, reg, bo, offset, false);
+ batch->screen->vtbl.store_register_mem32(batch, reg, bo, offset, false);
}
}
flags &= ~(PIPE_CONTROL_CACHE_FLUSH_BITS | PIPE_CONTROL_CS_STALL);
}
- batch->vtbl->emit_raw_pipe_control(batch, reason, flags, NULL, 0, 0);
+ batch->screen->vtbl.emit_raw_pipe_control(batch, reason, flags, NULL, 0, 0);
}
/**
struct iris_bo *bo, uint32_t offset,
uint64_t imm)
{
- batch->vtbl->emit_raw_pipe_control(batch, reason, flags, bo, offset, imm);
+ batch->screen->vtbl.emit_raw_pipe_control(batch, reason, flags, bo, offset, imm);
}
/*
}
uint32_t *so_decls =
- ice->vtbl.create_so_decl_list(&ish->stream_output,
+ screen->vtbl.create_so_decl_list(&ish->stream_output,
&vue_prog_data->vue_map);
struct iris_compiled_shader *shader =
static void
iris_update_compiled_vs(struct iris_context *ice)
{
+ struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
struct iris_uncompiled_shader *ish =
ice->shaders.uncompiled[MESA_SHADER_VERTEX];
struct iris_vs_prog_key key = { KEY_ID(vue.base) };
- ice->vtbl.populate_vs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
+ screen->vtbl.populate_vs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_VS];
struct iris_compiled_shader *shader =
};
get_unified_tess_slots(ice, &key.outputs_written,
&key.patch_outputs_written);
- ice->vtbl.populate_tcs_key(ice, &key);
+ screen->vtbl.populate_tcs_key(ice, &key);
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TCS];
struct iris_compiled_shader *shader =
}
uint32_t *so_decls =
- ice->vtbl.create_so_decl_list(&ish->stream_output,
+ screen->vtbl.create_so_decl_list(&ish->stream_output,
&vue_prog_data->vue_map);
static void
iris_update_compiled_tes(struct iris_context *ice)
{
+ struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
struct iris_uncompiled_shader *ish =
ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
struct iris_tes_prog_key key = { KEY_ID(vue.base) };
get_unified_tess_slots(ice, &key.inputs_read, &key.patch_inputs_read);
- ice->vtbl.populate_tes_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
+ screen->vtbl.populate_tes_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TES];
struct iris_compiled_shader *shader =
}
uint32_t *so_decls =
- ice->vtbl.create_so_decl_list(&ish->stream_output,
+ screen->vtbl.create_so_decl_list(&ish->stream_output,
&vue_prog_data->vue_map);
struct iris_compiled_shader *shader =
ice->shaders.uncompiled[MESA_SHADER_GEOMETRY];
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_GS];
struct iris_compiled_shader *shader = NULL;
+ struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
if (ish) {
struct iris_gs_prog_key key = { KEY_ID(vue.base) };
- ice->vtbl.populate_gs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
+ screen->vtbl.populate_gs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
shader =
iris_find_cached_shader(ice, IRIS_CACHE_GS, sizeof(key), &key);
struct iris_uncompiled_shader *ish =
ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
struct iris_fs_prog_key key = { KEY_ID(base) };
- ice->vtbl.populate_fs_key(ice, &ish->nir->info, &key);
+ struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
+ screen->vtbl.populate_fs_key(ice, &ish->nir->info, &key);
if (ish->nos & (1ull << IRIS_NOS_LAST_VUE_MAP))
key.input_slots_valid = ice->shaders.last_vue_map->slots_valid;
ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
struct iris_cs_prog_key key = { KEY_ID(base) };
- ice->vtbl.populate_cs_key(ice, &key);
+ struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
+ screen->vtbl.populate_cs_key(ice, &key);
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_CS];
struct iris_compiled_shader *shader =
const struct iris_binding_table *bt)
{
struct hash_table *cache = ice->shaders.cache;
+ struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
struct iris_compiled_shader *shader =
rzalloc_size(cache, sizeof(struct iris_compiled_shader) +
- ice->vtbl.derived_program_state_size(cache_id));
+ screen->vtbl.derived_program_state_size(cache_id));
const struct iris_compiled_shader *existing =
find_existing_assembly(cache, assembly, prog_data->program_size);
ralloc_steal(shader, shader->system_values);
/* Store the 3DSTATE shader packets and other derived state. */
- ice->vtbl.store_derived_program_state(ice, cache_id, shader);
+ screen->vtbl.store_derived_program_state(ice, cache_id, shader);
struct keybox *keybox = make_keybox(shader, cache_id, key, key_size);
_mesa_hash_table_insert(ice->shaders.cache, keybox, shader);
offset += q->query_state_ref.offset;
if (!iris_is_query_pipelined(q)) {
- ice->vtbl.store_data_imm64(batch, bo, offset, true);
+ batch->screen->vtbl.store_data_imm64(batch, bo, offset, true);
} else {
/* Order available *after* the query results. */
flags |= PIPE_CONTROL_FLUSH_ENABLE;
offset);
break;
case PIPE_QUERY_PRIMITIVES_GENERATED:
- ice->vtbl.store_register_mem64(batch,
+ batch->screen->vtbl.store_register_mem64(batch,
q->index == 0 ?
GENX(CL_INVOCATION_COUNT_num) :
SO_PRIM_STORAGE_NEEDED(q->index),
bo, offset, false);
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
- ice->vtbl.store_register_mem64(batch,
+ batch->screen->vtbl.store_register_mem64(batch,
SO_NUM_PRIMS_WRITTEN(q->index),
bo, offset, false);
break;
};
const uint32_t reg = index_to_reg[q->index];
- ice->vtbl.store_register_mem64(batch, reg, bo, offset, false);
+ batch->screen->vtbl.store_register_mem64(batch, reg, bo, offset, false);
break;
}
default:
stream[s].num_prims[end]);
int w_idx = offset + offsetof(struct iris_query_so_overflow,
stream[s].prim_storage_needed[end]);
- ice->vtbl.store_register_mem64(batch, SO_NUM_PRIMS_WRITTEN(s),
+ batch->screen->vtbl.store_register_mem64(batch, SO_NUM_PRIMS_WRITTEN(s),
bo, g_idx, false);
- ice->vtbl.store_register_mem64(batch, SO_PRIM_STORAGE_NEEDED(s),
+ batch->screen->vtbl.store_register_mem64(batch, SO_PRIM_STORAGE_NEEDED(s),
bo, w_idx, false);
}
}
if (q->syncpt == iris_batch_get_signal_syncpt(batch))
iris_batch_flush(batch);
- ice->vtbl.copy_mem_mem(batch, dst_bo, offset,
+ batch->screen->vtbl.copy_mem_mem(batch, dst_bo, offset,
query_bo, snapshots_landed_offset,
result_type <= PIPE_QUERY_TYPE_U32 ? 4 : 8);
return;
if (q->ready) {
/* We happen to have the result on the CPU, so just copy it. */
if (result_type <= PIPE_QUERY_TYPE_U32) {
- ice->vtbl.store_data_imm32(batch, dst_bo, offset, q->result);
+ batch->screen->vtbl.store_data_imm32(batch, dst_bo, offset, q->result);
} else {
- ice->vtbl.store_data_imm64(batch, dst_bo, offset, q->result);
+ batch->screen->vtbl.store_data_imm64(batch, dst_bo, offset, q->result);
}
/* Make sure the result lands before they use bind the QBO elsewhere
genX(init_query)(struct iris_context *ice)
{
struct pipe_context *ctx = &ice->ctx;
+ struct iris_screen *screen = (struct iris_screen *)ctx->screen;
ctx->create_query = iris_create_query;
ctx->create_batch_query = iris_create_batch_query;
ctx->set_active_query_state = iris_set_active_query_state;
ctx->render_condition = iris_render_condition;
- ice->vtbl.resolve_conditional_render = iris_resolve_conditional_render;
+ screen->vtbl.resolve_conditional_render = iris_resolve_conditional_render;
}
//DBG("%s to mt %p level %u layer %u\n", __FUNCTION__, mt, level, layer);
struct blorp_surf surf;
- iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev, &surf,
+ iris_blorp_surf_for_resource(&batch->screen->isl_dev, &surf,
&res->base, res->aux.usage, level, true);
iris_batch_maybe_flush(batch, 1500);
assert(isl_aux_usage_has_mcs(res->aux.usage));
struct blorp_surf surf;
- iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev, &surf,
+ iris_blorp_surf_for_resource(&batch->screen->isl_dev, &surf,
&res->base, res->aux.usage, 0, true);
struct blorp_batch blorp_batch;
iris_batch_maybe_flush(batch, 1500);
struct blorp_surf surf;
- iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev, &surf,
+ iris_blorp_surf_for_resource(&batch->screen->isl_dev, &surf,
&res->base, res->aux.usage, level, true);
struct blorp_batch blorp_batch;
/* Rebind the buffer, replacing any state referring to the old BO's
* address, and marking state dirty so it's reemitted.
*/
- ice->vtbl.rebind_buffer(ice, res);
+ screen->vtbl.rebind_buffer(ice, res);
util_range_set_empty(&res->valid_buffer_range);
#include "intel/dev/gen_device_info.h"
#include "intel/isl/isl.h"
#include "iris_bufmgr.h"
+#include "iris_binder.h"
+#include "iris_resource.h"
-struct iris_bo;
-struct iris_monitor_config;
struct gen_l3_config;
+struct brw_vue_map;
+struct iris_monitor_config;
+struct iris_vs_prog_key;
+struct iris_tcs_prog_key;
+struct iris_tes_prog_key;
+struct iris_gs_prog_key;
+struct iris_fs_prog_key;
+struct iris_cs_prog_key;
+enum iris_program_cache_id;
#define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
#define WRITE_ONCE(x, v) *(volatile __typeof__(x) *)&(x) = (v)
#define IRIS_MAX_SOL_BUFFERS 4
#define IRIS_MAP_BUFFER_ALIGNMENT 64
+/**
+ * Virtual table for generation-specific (genxml) function calls.
+ */
+struct iris_vtable {
+ void (*destroy_state)(struct iris_context *ice);
+ void (*init_render_context)(struct iris_batch *batch);
+ void (*init_compute_context)(struct iris_batch *batch);
+ void (*upload_render_state)(struct iris_context *ice,
+ struct iris_batch *batch,
+ const struct pipe_draw_info *draw);
+ void (*update_surface_base_address)(struct iris_batch *batch,
+ struct iris_binder *binder);
+ void (*upload_compute_state)(struct iris_context *ice,
+ struct iris_batch *batch,
+ const struct pipe_grid_info *grid);
+ void (*rebind_buffer)(struct iris_context *ice,
+ struct iris_resource *res);
+ void (*resolve_conditional_render)(struct iris_context *ice);
+ void (*load_register_reg32)(struct iris_batch *batch, uint32_t dst,
+ uint32_t src);
+ void (*load_register_reg64)(struct iris_batch *batch, uint32_t dst,
+ uint32_t src);
+ void (*load_register_imm32)(struct iris_batch *batch, uint32_t reg,
+ uint32_t val);
+ void (*load_register_imm64)(struct iris_batch *batch, uint32_t reg,
+ uint64_t val);
+ void (*load_register_mem32)(struct iris_batch *batch, uint32_t reg,
+ struct iris_bo *bo, uint32_t offset);
+ void (*load_register_mem64)(struct iris_batch *batch, uint32_t reg,
+ struct iris_bo *bo, uint32_t offset);
+ void (*store_register_mem32)(struct iris_batch *batch, uint32_t reg,
+ struct iris_bo *bo, uint32_t offset,
+ bool predicated);
+ void (*store_register_mem64)(struct iris_batch *batch, uint32_t reg,
+ struct iris_bo *bo, uint32_t offset,
+ bool predicated);
+ void (*store_data_imm32)(struct iris_batch *batch,
+ struct iris_bo *bo, uint32_t offset,
+ uint32_t value);
+ void (*store_data_imm64)(struct iris_batch *batch,
+ struct iris_bo *bo, uint32_t offset,
+ uint64_t value);
+ void (*copy_mem_mem)(struct iris_batch *batch,
+ struct iris_bo *dst_bo, uint32_t dst_offset,
+ struct iris_bo *src_bo, uint32_t src_offset,
+ unsigned bytes);
+ void (*emit_raw_pipe_control)(struct iris_batch *batch,
+ const char *reason, uint32_t flags,
+ struct iris_bo *bo, uint32_t offset,
+ uint64_t imm);
+
+ void (*emit_mi_report_perf_count)(struct iris_batch *batch,
+ struct iris_bo *bo,
+ uint32_t offset_in_bytes,
+ uint32_t report_id);
+
+ unsigned (*derived_program_state_size)(enum iris_program_cache_id id);
+ void (*store_derived_program_state)(struct iris_context *ice,
+ enum iris_program_cache_id cache_id,
+ struct iris_compiled_shader *shader);
+ uint32_t *(*create_so_decl_list)(const struct pipe_stream_output_info *sol,
+ const struct brw_vue_map *vue_map);
+ void (*populate_vs_key)(const struct iris_context *ice,
+ const struct shader_info *info,
+ gl_shader_stage last_stage,
+ struct iris_vs_prog_key *key);
+ void (*populate_tcs_key)(const struct iris_context *ice,
+ struct iris_tcs_prog_key *key);
+ void (*populate_tes_key)(const struct iris_context *ice,
+ const struct shader_info *info,
+ gl_shader_stage last_stage,
+ struct iris_tes_prog_key *key);
+ void (*populate_gs_key)(const struct iris_context *ice,
+ const struct shader_info *info,
+ gl_shader_stage last_stage,
+ struct iris_gs_prog_key *key);
+ void (*populate_fs_key)(const struct iris_context *ice,
+ const struct shader_info *info,
+ struct iris_fs_prog_key *key);
+ void (*populate_cs_key)(const struct iris_context *ice,
+ struct iris_cs_prog_key *key);
+ void (*lost_genx_state)(struct iris_context *ice, struct iris_batch *batch);
+};
+
struct iris_screen {
struct pipe_screen base;
bool no_hw;
+ struct iris_vtable vtbl;
+
/** Global program_string_id counter (see get_program_string_id()) */
unsigned program_id;
ctx->set_stream_output_targets = iris_set_stream_output_targets;
ctx->set_frontend_noop = iris_set_frontend_noop;
- ice->vtbl.destroy_state = iris_destroy_state;
- ice->vtbl.init_render_context = iris_init_render_context;
- ice->vtbl.init_compute_context = iris_init_compute_context;
- ice->vtbl.upload_render_state = iris_upload_render_state;
- ice->vtbl.update_surface_base_address = iris_update_surface_base_address;
- ice->vtbl.upload_compute_state = iris_upload_compute_state;
- ice->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
- ice->vtbl.emit_mi_report_perf_count = iris_emit_mi_report_perf_count;
- ice->vtbl.rebind_buffer = iris_rebind_buffer;
- ice->vtbl.load_register_reg32 = iris_load_register_reg32;
- ice->vtbl.load_register_reg64 = iris_load_register_reg64;
- ice->vtbl.load_register_imm32 = iris_load_register_imm32;
- ice->vtbl.load_register_imm64 = iris_load_register_imm64;
- ice->vtbl.load_register_mem32 = iris_load_register_mem32;
- ice->vtbl.load_register_mem64 = iris_load_register_mem64;
- ice->vtbl.store_register_mem32 = iris_store_register_mem32;
- ice->vtbl.store_register_mem64 = iris_store_register_mem64;
- ice->vtbl.store_data_imm32 = iris_store_data_imm32;
- ice->vtbl.store_data_imm64 = iris_store_data_imm64;
- ice->vtbl.copy_mem_mem = iris_copy_mem_mem;
- ice->vtbl.derived_program_state_size = iris_derived_program_state_size;
- ice->vtbl.store_derived_program_state = iris_store_derived_program_state;
- ice->vtbl.create_so_decl_list = iris_create_so_decl_list;
- ice->vtbl.populate_vs_key = iris_populate_vs_key;
- ice->vtbl.populate_tcs_key = iris_populate_tcs_key;
- ice->vtbl.populate_tes_key = iris_populate_tes_key;
- ice->vtbl.populate_gs_key = iris_populate_gs_key;
- ice->vtbl.populate_fs_key = iris_populate_fs_key;
- ice->vtbl.populate_cs_key = iris_populate_cs_key;
- ice->vtbl.lost_genx_state = iris_lost_genx_state;
+ screen->vtbl.destroy_state = iris_destroy_state;
+ screen->vtbl.init_render_context = iris_init_render_context;
+ screen->vtbl.init_compute_context = iris_init_compute_context;
+ screen->vtbl.upload_render_state = iris_upload_render_state;
+ screen->vtbl.update_surface_base_address = iris_update_surface_base_address;
+ screen->vtbl.upload_compute_state = iris_upload_compute_state;
+ screen->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
+ screen->vtbl.emit_mi_report_perf_count = iris_emit_mi_report_perf_count;
+ screen->vtbl.rebind_buffer = iris_rebind_buffer;
+ screen->vtbl.load_register_reg32 = iris_load_register_reg32;
+ screen->vtbl.load_register_reg64 = iris_load_register_reg64;
+ screen->vtbl.load_register_imm32 = iris_load_register_imm32;
+ screen->vtbl.load_register_imm64 = iris_load_register_imm64;
+ screen->vtbl.load_register_mem32 = iris_load_register_mem32;
+ screen->vtbl.load_register_mem64 = iris_load_register_mem64;
+ screen->vtbl.store_register_mem32 = iris_store_register_mem32;
+ screen->vtbl.store_register_mem64 = iris_store_register_mem64;
+ screen->vtbl.store_data_imm32 = iris_store_data_imm32;
+ screen->vtbl.store_data_imm64 = iris_store_data_imm64;
+ screen->vtbl.copy_mem_mem = iris_copy_mem_mem;
+ screen->vtbl.derived_program_state_size = iris_derived_program_state_size;
+ screen->vtbl.store_derived_program_state = iris_store_derived_program_state;
+ screen->vtbl.create_so_decl_list = iris_create_so_decl_list;
+ screen->vtbl.populate_vs_key = iris_populate_vs_key;
+ screen->vtbl.populate_tcs_key = iris_populate_tcs_key;
+ screen->vtbl.populate_tes_key = iris_populate_tes_key;
+ screen->vtbl.populate_gs_key = iris_populate_gs_key;
+ screen->vtbl.populate_fs_key = iris_populate_fs_key;
+ screen->vtbl.populate_cs_key = iris_populate_cs_key;
+ screen->vtbl.lost_genx_state = iris_lost_genx_state;
ice->state.dirty = ~0ull;
#include <fcntl.h>
#include "iris_drm_public.h"
-#include "iris/iris_screen.h"
+extern struct pipe_screen *iris_screen_create(int fd, const struct pipe_screen_config *config);
struct pipe_screen *
iris_drm_screen_create(int fd, const struct pipe_screen_config *config)