X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Firis%2Firis_state.c;h=bfc29d71496f001b62d7465501b06105d1338f12;hb=de2efd5ea7fafba09a596beba68436c33ebe912e;hp=60b30739df8ff7e52d98fce9bf743b90994945cf;hpb=00b5bf2d729f6c23525c4496552036c71d05479e;p=mesa.git diff --git a/src/gallium/drivers/iris/iris_state.c b/src/gallium/drivers/iris/iris_state.c index 60b30739df8..bfc29d71496 100644 --- a/src/gallium/drivers/iris/iris_state.c +++ b/src/gallium/drivers/iris/iris_state.c @@ -608,6 +608,85 @@ iris_enable_obj_preemption(struct iris_batch *batch, bool enable) } #endif +#if GEN_GEN == 11 +static void +iris_upload_slice_hashing_state(struct iris_batch *batch) +{ + const struct gen_device_info *devinfo = &batch->screen->devinfo; + int subslices_delta = + devinfo->ppipe_subslices[0] - devinfo->ppipe_subslices[1]; + if (subslices_delta == 0) + return; + + struct iris_context *ice = NULL; + ice = container_of(batch, ice, batches[IRIS_BATCH_RENDER]); + assert(&ice->batches[IRIS_BATCH_RENDER] == batch); + + unsigned size = GENX(SLICE_HASH_TABLE_length) * 4; + uint32_t hash_address; + struct pipe_resource *tmp = NULL; + uint32_t *map = + stream_state(batch, ice->state.dynamic_uploader, &tmp, + size, 64, &hash_address); + pipe_resource_reference(&tmp, NULL); + + struct GENX(SLICE_HASH_TABLE) table0 = { + .Entry = { + { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }, + { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 }, + { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 }, + { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }, + { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 }, + { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 }, + { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }, + { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 }, + { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 }, + { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }, + { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 }, + { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 }, + { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }, + { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 }, + { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 }, + { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 } + } + }; + + struct GENX(SLICE_HASH_TABLE) table1 = { + .Entry = { + { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }, + { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 }, + { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }, + { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 }, + { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }, + { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 }, + { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }, + { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 }, + { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }, + { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 }, + { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 } + } + }; + + const struct GENX(SLICE_HASH_TABLE) *table = + subslices_delta < 0 ? &table0 : &table1; + GENX(SLICE_HASH_TABLE_pack)(NULL, map, table); + + iris_emit_cmd(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) { + ptr.SliceHashStatePointerValid = true; + ptr.SliceHashTableStatePointer = hash_address; + } + + iris_emit_cmd(batch, GENX(3DSTATE_3D_MODE), mode) { + mode.SliceHashingTableEnable = true; + } +} +#endif + /** * Upload the initial GPU state for a render context. * @@ -687,7 +766,7 @@ iris_init_render_context(struct iris_screen *screen, iris_emit_lri(batch, CACHE_MODE_0, reg_val); } - // XXX: 3D_MODE? + iris_upload_slice_hashing_state(batch); #endif /* 3DSTATE_DRAWING_RECTANGLE is non-pipelined, so we want to avoid @@ -1204,7 +1283,6 @@ iris_create_rasterizer_state(struct pipe_context *ctx, iris_pack_command(GENX(3DSTATE_SF), cso->sf, sf) { sf.StatisticsEnable = true; - sf.ViewportTransformEnable = true; sf.AALineDistanceMode = AALINEDISTANCE_TRUE; sf.LineEndCapAntialiasingRegionWidth = state->line_smooth ? _10pixels : _05pixels; @@ -1671,6 +1749,8 @@ fill_surface_state(struct isl_device *isl_dev, .address = res->bo->gtt_offset + res->offset, }; + assert(!iris_resource_unfinished_aux_import(res)); + if (aux_usage != ISL_AUX_USAGE_NONE) { f.aux_surf = &res->aux.surf; f.aux_usage = aux_usage; @@ -1761,6 +1841,9 @@ iris_create_sampler_view(struct pipe_context *ctx, isv->view.array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1; + if (iris_resource_unfinished_aux_import(isv->res)) + iris_resource_finish_aux_import(&screen->base, isv->res); + unsigned aux_modes = isv->res->aux.sampler_usages; while (aux_modes) { enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes); @@ -1871,6 +1954,9 @@ iris_create_surface(struct pipe_context *ctx, return NULL; if (!isl_format_is_compressed(res->surf.format)) { + if (iris_resource_unfinished_aux_import(res)) + iris_resource_finish_aux_import(&screen->base, res); + /* This is a normal surface. Fill out a SURFACE_STATE for each possible * auxiliary surface mode and return the pipe_surface. */ @@ -2181,12 +2267,15 @@ iris_set_clip_state(struct pipe_context *ctx, struct iris_context *ice = (struct iris_context *) ctx; struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX]; struct iris_shader_state *gshs = &ice->state.shaders[MESA_SHADER_GEOMETRY]; + struct iris_shader_state *tshs = &ice->state.shaders[MESA_SHADER_TESS_EVAL]; memcpy(&ice->state.clip_planes, state, sizeof(*state)); - ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS | IRIS_DIRTY_CONSTANTS_GS; + ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS | IRIS_DIRTY_CONSTANTS_GS | + IRIS_DIRTY_CONSTANTS_TES; shs->sysvals_need_upload = true; gshs->sysvals_need_upload = true; + tshs->sysvals_need_upload = true; } /** @@ -2380,7 +2469,7 @@ iris_set_framebuffer_state(struct pipe_context *ctx, if (iris_resource_level_has_hiz(zres, view.base_level)) { info.hiz_usage = ISL_AUX_USAGE_HIZ; info.hiz_surf = &zres->aux.surf; - info.hiz_address = zres->aux.bo->gtt_offset; + info.hiz_address = zres->aux.bo->gtt_offset + zres->aux.offset; } } @@ -3363,8 +3452,16 @@ iris_populate_tcs_key(const struct iris_context *ice, */ static void iris_populate_tes_key(const struct iris_context *ice, + const struct shader_info *info, + gl_shader_stage last_stage, struct brw_tes_prog_key *key) { + const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast; + + if (info->clip_distance_array_size == 0 && + (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) && + last_stage == MESA_SHADER_TESS_EVAL) + key->nr_userclip_plane_consts = cso_rast->num_clip_plane_consts; } /** @@ -3634,23 +3731,6 @@ iris_store_fs_state(struct iris_context *ice, */ ps.PositionXYOffsetSelect = wm_prog_data->uses_pos_offset ? POSOFFSET_SAMPLE : POSOFFSET_NONE; - ps._8PixelDispatchEnable = wm_prog_data->dispatch_8; - ps._16PixelDispatchEnable = wm_prog_data->dispatch_16; - /* ps._32PixelDispatchEnable is filled in at draw time. */ - - ps.DispatchGRFStartRegisterForConstantSetupData0 = - brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0); - ps.DispatchGRFStartRegisterForConstantSetupData1 = - brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1); - ps.DispatchGRFStartRegisterForConstantSetupData2 = - brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2); - - ps.KernelStartPointer0 = - KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0); - ps.KernelStartPointer1 = - KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1); - ps.KernelStartPointer2 = - KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2); if (prog_data->total_scratch) { struct iris_bo *bo = @@ -3675,8 +3755,6 @@ iris_store_fs_state(struct iris_context *ice, #if GEN_GEN >= 9 psx.PixelShaderPullsBary = wm_prog_data->pulls_bary; psx.PixelShaderComputesStencil = wm_prog_data->computed_stencil; -#else - psx.PixelShaderUsesInputCoverageMask = wm_prog_data->uses_sample_mask; #endif } } @@ -4343,6 +4421,18 @@ iris_update_surface_base_address(struct iris_batch *batch, batch->last_surface_base_address = binder->bo->gtt_offset; } +static inline void +iris_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz, + bool window_space_position, float *zmin, float *zmax) +{ + if (window_space_position) { + *zmin = 0.f; + *zmax = 1.f; + return; + } + util_viewport_zmin_zmax(vp, halfz, zmin, zmax); +} + static void iris_upload_dirty_render_state(struct iris_context *ice, struct iris_batch *batch, @@ -4370,8 +4460,9 @@ iris_upload_dirty_render_state(struct iris_context *ice, GENX(CC_VIEWPORT_length), 32, &cc_vp_address); for (int i = 0; i < ice->state.num_viewports; i++) { float zmin, zmax; - util_viewport_zmin_zmax(&ice->state.viewports[i], - cso_rast->clip_halfz, &zmin, &zmax); + iris_viewport_zmin_zmax(&ice->state.viewports[i], cso_rast->clip_halfz, + ice->state.window_space_position, + &zmin, &zmax); if (cso_rast->depth_clip_near) zmin = 0.0; if (cso_rast->depth_clip_far) @@ -4644,47 +4735,72 @@ iris_upload_dirty_render_state(struct iris_context *ice, iris_get_scratch_space(ice, prog_data->total_scratch, stage); iris_use_pinned_bo(batch, bo, true); } -#if GEN_GEN >= 9 - if (stage == MESA_SHADER_FRAGMENT && wm_prog_data->uses_sample_mask) { - uint32_t *shader_ps = (uint32_t *) shader->derived_data; - uint32_t *shader_psx = shader_ps + GENX(3DSTATE_PS_length); - uint32_t ps_state[GENX(3DSTATE_PS_length)] = {0}; - uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0}; - struct iris_rasterizer_state *cso = ice->state.cso_rast; + + if (stage == MESA_SHADER_FRAGMENT) { + UNUSED struct iris_rasterizer_state *cso = ice->state.cso_rast; struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer; - /* The docs for 3DSTATE_PS::32 Pixel Dispatch Enable say: - * - * "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16, - * SIMD32 Dispatch must not be enabled for PER_PIXEL dispatch - * mode." - * - * 16x MSAA only exists on Gen9+, so we can skip this on Gen8. - */ - iris_pack_command(GENX(3DSTATE_PS), &ps_state, ps) { - ps._32PixelDispatchEnable = wm_prog_data->dispatch_32 && - (cso_fb->samples != 16 || wm_prog_data->persample_dispatch); + uint32_t ps_state[GENX(3DSTATE_PS_length)] = {0}; + iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) { + ps._8PixelDispatchEnable = wm_prog_data->dispatch_8; + ps._16PixelDispatchEnable = wm_prog_data->dispatch_16; + ps._32PixelDispatchEnable = wm_prog_data->dispatch_32; + + /* The docs for 3DSTATE_PS::32 Pixel Dispatch Enable say: + * + * "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16, + * SIMD32 Dispatch must not be enabled for PER_PIXEL dispatch + * mode." + * + * 16x MSAA only exists on Gen9+, so we can skip this on Gen8. + */ + if (GEN_GEN >= 9 && cso_fb->samples == 16 && + !wm_prog_data->persample_dispatch) { + assert(ps._8PixelDispatchEnable || ps._16PixelDispatchEnable); + ps._32PixelDispatchEnable = false; + } + + ps.DispatchGRFStartRegisterForConstantSetupData0 = + brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0); + ps.DispatchGRFStartRegisterForConstantSetupData1 = + brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1); + ps.DispatchGRFStartRegisterForConstantSetupData2 = + brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2); + + ps.KernelStartPointer0 = KSP(shader) + + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0); + ps.KernelStartPointer1 = KSP(shader) + + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1); + ps.KernelStartPointer2 = KSP(shader) + + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2); } - iris_pack_command(GENX(3DSTATE_PS_EXTRA), &psx_state, psx) { + uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0}; + iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) { +#if GEN_GEN >= 9 if (wm_prog_data->post_depth_coverage) psx.InputCoverageMaskState = ICMS_DEPTH_COVERAGE; - else if (wm_prog_data->inner_coverage && cso->conservative_rasterization) + else if (wm_prog_data->inner_coverage && + cso->conservative_rasterization) psx.InputCoverageMaskState = ICMS_INNER_CONSERVATIVE; else psx.InputCoverageMaskState = ICMS_NORMAL; +#else + psx.PixelShaderUsesInputCoverageMask = + wm_prog_data->uses_sample_mask; +#endif } + uint32_t *shader_ps = (uint32_t *) shader->derived_data; + uint32_t *shader_psx = shader_ps + GENX(3DSTATE_PS_length); iris_emit_merge(batch, shader_ps, ps_state, GENX(3DSTATE_PS_length)); - iris_emit_merge(batch, - shader_psx, - psx_state, + iris_emit_merge(batch, shader_psx, psx_state, GENX(3DSTATE_PS_EXTRA_length)); - } else -#endif + } else { iris_batch_emit(batch, shader->derived_data, iris_derived_program_state_size(stage)); + } } else { if (stage == MESA_SHADER_TESS_EVAL) { iris_emit_cmd(batch, GENX(3DSTATE_HS), hs); @@ -4756,8 +4872,14 @@ iris_upload_dirty_render_state(struct iris_context *ice, uint32_t dynamic_clip[GENX(3DSTATE_CLIP_length)]; iris_pack_command(GENX(3DSTATE_CLIP), &dynamic_clip, cl) { cl.StatisticsEnable = ice->state.statistics_counters_enabled; - cl.ClipMode = cso_rast->rasterizer_discard ? CLIPMODE_REJECT_ALL - : CLIPMODE_NORMAL; + if (cso_rast->rasterizer_discard) + cl.ClipMode = CLIPMODE_REJECT_ALL; + else if (ice->state.window_space_position) + cl.ClipMode = CLIPMODE_ACCEPT_ALL; + else + cl.ClipMode = CLIPMODE_NORMAL; + + cl.PerspectiveDivideDisable = ice->state.window_space_position; cl.ViewportXYClipTestEnable = !points_or_lines; if (wm_prog_data->barycentric_interp_modes & @@ -4774,8 +4896,13 @@ iris_upload_dirty_render_state(struct iris_context *ice, if (dirty & IRIS_DIRTY_RASTER) { struct iris_rasterizer_state *cso = ice->state.cso_rast; iris_batch_emit(batch, cso->raster, sizeof(cso->raster)); - iris_batch_emit(batch, cso->sf, sizeof(cso->sf)); + uint32_t dynamic_sf[GENX(3DSTATE_SF_length)]; + iris_pack_command(GENX(3DSTATE_SF), &dynamic_sf, sf) { + sf.ViewportTransformEnable = !ice->state.window_space_position; + } + iris_emit_merge(batch, cso->sf, dynamic_sf, + ARRAY_SIZE(dynamic_sf)); } if (dirty & IRIS_DIRTY_WM) { @@ -5152,6 +5279,9 @@ iris_upload_dirty_render_state(struct iris_context *ice, } } + if (ice->state.current_hash_scale != 1) + genX(emit_hashing_mode)(ice, batch, UINT_MAX, UINT_MAX, 1); + /* TODO: Gen8 PMA fix */ } @@ -6410,6 +6540,111 @@ iris_lost_genx_state(struct iris_context *ice, struct iris_batch *batch) memset(genx->last_index_buffer, 0, sizeof(genx->last_index_buffer)); } +static void +iris_emit_mi_report_perf_count(struct iris_batch *batch, + struct iris_bo *bo, + uint32_t offset_in_bytes, + uint32_t report_id) +{ + iris_emit_cmd(batch, GENX(MI_REPORT_PERF_COUNT), mi_rpc) { + mi_rpc.MemoryAddress = rw_bo(bo, offset_in_bytes); + mi_rpc.ReportID = report_id; + } +} + +/** + * Update the pixel hashing modes that determine the balancing of PS threads + * across subslices and slices. + * + * \param width Width bound of the rendering area (already scaled down if \p + * scale is greater than 1). + * \param height Height bound of the rendering area (already scaled down if \p + * scale is greater than 1). + * \param scale The number of framebuffer samples that could potentially be + * affected by an individual channel of the PS thread. This is + * typically one for single-sampled rendering, but for operations + * like CCS resolves and fast clears a single PS invocation may + * update a huge number of pixels, in which case a finer + * balancing is desirable in order to maximally utilize the + * bandwidth available. UINT_MAX can be used as shorthand for + * "finest hashing mode available". + */ +void +genX(emit_hashing_mode)(struct iris_context *ice, struct iris_batch *batch, + unsigned width, unsigned height, unsigned scale) +{ +#if GEN_GEN == 9 + const struct gen_device_info *devinfo = &batch->screen->devinfo; + const unsigned slice_hashing[] = { + /* Because all Gen9 platforms with more than one slice require + * three-way subslice hashing, a single "normal" 16x16 slice hashing + * block is guaranteed to suffer from substantial imbalance, with one + * subslice receiving twice as much work as the other two in the + * slice. + * + * The performance impact of that would be particularly severe when + * three-way hashing is also in use for slice balancing (which is the + * case for all Gen9 GT4 platforms), because one of the slices + * receives one every three 16x16 blocks in either direction, which + * is roughly the periodicity of the underlying subslice imbalance + * pattern ("roughly" because in reality the hardware's + * implementation of three-way hashing doesn't do exact modulo 3 + * arithmetic, which somewhat decreases the magnitude of this effect + * in practice). This leads to a systematic subslice imbalance + * within that slice regardless of the size of the primitive. The + * 32x32 hashing mode guarantees that the subslice imbalance within a + * single slice hashing block is minimal, largely eliminating this + * effect. + */ + _32x32, + /* Finest slice hashing mode available. */ + NORMAL + }; + const unsigned subslice_hashing[] = { + /* 16x16 would provide a slight cache locality benefit especially + * visible in the sampler L1 cache efficiency of low-bandwidth + * non-LLC platforms, but it comes at the cost of greater subslice + * imbalance for primitives of dimensions approximately intermediate + * between 16x4 and 16x16. + */ + _16x4, + /* Finest subslice hashing mode available. */ + _8x4 + }; + /* Dimensions of the smallest hashing block of a given hashing mode. If + * the rendering area is smaller than this there can't possibly be any + * benefit from switching to this mode, so we optimize out the + * transition. + */ + const unsigned min_size[][2] = { + { 16, 4 }, + { 8, 4 } + }; + const unsigned idx = scale > 1; + + if (width > min_size[idx][0] || height > min_size[idx][1]) { + uint32_t gt_mode; + + iris_pack_state(GENX(GT_MODE), >_mode, reg) { + reg.SliceHashing = (devinfo->num_slices > 1 ? slice_hashing[idx] : 0); + reg.SliceHashingMask = (devinfo->num_slices > 1 ? -1 : 0); + reg.SubsliceHashing = subslice_hashing[idx]; + reg.SubsliceHashingMask = -1; + }; + + iris_emit_raw_pipe_control(batch, + "workaround: CS stall before GT_MODE LRI", + PIPE_CONTROL_STALL_AT_SCOREBOARD | + PIPE_CONTROL_CS_STALL, + NULL, 0, 0); + + iris_emit_lri(batch, GT_MODE, gt_mode); + + ice->state.current_hash_scale = scale; + } +#endif +} + void genX(init_state)(struct iris_context *ice) { @@ -6462,6 +6697,7 @@ genX(init_state)(struct iris_context *ice) ice->vtbl.update_surface_base_address = iris_update_surface_base_address; ice->vtbl.upload_compute_state = iris_upload_compute_state; ice->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control; + ice->vtbl.emit_mi_report_perf_count = iris_emit_mi_report_perf_count; ice->vtbl.rebind_buffer = iris_rebind_buffer; ice->vtbl.load_register_reg32 = iris_load_register_reg32; ice->vtbl.load_register_reg64 = iris_load_register_reg64;