X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Firis%2Firis_state.c;h=2d02f631d000918d0f85000cc6882e772facbdd6;hb=72ccefb5298203c6e1c4b40b60b5dd356900ad47;hp=09e8b2fb5e5f0845b4c81f7b33f3dcc28822081d;hpb=5307ff6a5fdb4f76e80da42795f1970f06e4e3b6;p=mesa.git diff --git a/src/gallium/drivers/iris/iris_state.c b/src/gallium/drivers/iris/iris_state.c index 09e8b2fb5e5..2d02f631d00 100644 --- a/src/gallium/drivers/iris/iris_state.c +++ b/src/gallium/drivers/iris/iris_state.c @@ -77,7 +77,7 @@ #include #include #define VG(x) x -#ifndef NDEBUG +#ifdef DEBUG #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x)) #endif #else @@ -88,13 +88,14 @@ #include "pipe/p_state.h" #include "pipe/p_context.h" #include "pipe/p_screen.h" +#include "util/u_dual_blend.h" #include "util/u_inlines.h" #include "util/u_format.h" #include "util/u_framebuffer.h" #include "util/u_transfer.h" #include "util/u_upload_mgr.h" #include "util/u_viewport.h" -#include "i915_drm.h" +#include "drm-uapi/i915_drm.h" #include "nir.h" #include "intel/compiler/brw_compiler.h" #include "intel/common/gen_l3_config.h" @@ -161,7 +162,19 @@ __gen_combine_address(struct iris_batch *batch, void *location, #include "genxml/gen_macros.h" #include "genxml/genX_bits.h" -#define MOCS_WB (2 << 1) +#if GEN_GEN == 8 +#define MOCS_PTE 0x18 +#define MOCS_WB 0x78 +#else +#define MOCS_PTE (1 << 1) +#define MOCS_WB (2 << 1) +#endif + +static uint32_t +mocs(const struct iris_bo *bo) +{ + return bo && bo->external ? MOCS_PTE : MOCS_WB; +} /** * Statically assert that PIPE_* enums match the hardware packets. @@ -576,15 +589,11 @@ init_state_base_address(struct iris_batch *batch) * updated occasionally. See iris_binder.c for the details there. */ iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) { - #if 0 - // XXX: MOCS is stupid for this. - sba.GeneralStateMemoryObjectControlState = MOCS_WB; - sba.StatelessDataPortAccessMemoryObjectControlState = MOCS_WB; - sba.DynamicStateMemoryObjectControlState = MOCS_WB; - sba.IndirectObjectMemoryObjectControlState = MOCS_WB; - sba.InstructionMemoryObjectControlState = MOCS_WB; - sba.BindlessSurfaceStateMemoryObjectControlState = MOCS_WB; - #endif + sba.GeneralStateMOCS = MOCS_WB; + sba.StatelessDataPortAccessMOCS = MOCS_WB; + sba.DynamicStateMOCS = MOCS_WB; + sba.IndirectObjectMOCS = MOCS_WB; + sba.InstructionMOCS = MOCS_WB; sba.GeneralStateBaseAddressModifyEnable = true; sba.DynamicStateBaseAddressModifyEnable = true; @@ -592,7 +601,10 @@ init_state_base_address(struct iris_batch *batch) sba.InstructionBaseAddressModifyEnable = true; sba.GeneralStateBufferSizeModifyEnable = true; sba.DynamicStateBufferSizeModifyEnable = true; +#if (GEN_GEN >= 9) sba.BindlessSurfaceStateBaseAddressModifyEnable = true; + sba.BindlessSurfaceStateMOCS = MOCS_WB; +#endif sba.IndirectObjectBufferSizeModifyEnable = true; sba.InstructionBuffersizeModifyEnable = true; @@ -606,6 +618,60 @@ init_state_base_address(struct iris_batch *batch) } } +static void +iris_emit_l3_config(struct iris_batch *batch, const struct gen_l3_config *cfg, + bool has_slm, bool wants_dc_cache) +{ + uint32_t reg_val; + iris_pack_state(GENX(L3CNTLREG), ®_val, reg) { + reg.SLMEnable = has_slm; +#if GEN_GEN == 11 + /* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set + * in L3CNTLREG register. The default setting of the bit is not the + * desirable behavior. + */ + reg.ErrorDetectionBehaviorControl = true; + reg.UseFullWays = true; +#endif + reg.URBAllocation = cfg->n[GEN_L3P_URB]; + reg.ROAllocation = cfg->n[GEN_L3P_RO]; + reg.DCAllocation = cfg->n[GEN_L3P_DC]; + reg.AllAllocation = cfg->n[GEN_L3P_ALL]; + } + iris_emit_lri(batch, L3CNTLREG, reg_val); +} + +static void +iris_emit_default_l3_config(struct iris_batch *batch, + const struct gen_device_info *devinfo, + bool compute) +{ + bool wants_dc_cache = true; + bool has_slm = compute; + const struct gen_l3_weights w = + gen_get_default_l3_weights(devinfo, wants_dc_cache, has_slm); + const struct gen_l3_config *cfg = gen_get_l3_config(devinfo, w); + iris_emit_l3_config(batch, cfg, has_slm, wants_dc_cache); +} + +#if GEN_GEN == 9 || GEN_GEN == 10 +static void +iris_enable_obj_preemption(struct iris_batch *batch, bool enable) +{ + uint32_t reg_val; + + /* A fixed function pipe flush is required before modifying this field */ + iris_emit_end_of_pipe_sync(batch, PIPE_CONTROL_RENDER_TARGET_FLUSH); + + /* enable object level preemption */ + iris_pack_state(GENX(CS_CHICKEN1), ®_val, reg) { + reg.ReplayMode = enable; + reg.ReplayModeMask = true; + } + iris_emit_lri(batch, CS_CHICKEN1, reg_val); +} +#endif + /** * Upload the initial GPU state for a render context. * @@ -623,14 +689,23 @@ iris_init_render_context(struct iris_screen *screen, emit_pipeline_select(batch, _3D); + iris_emit_default_l3_config(batch, devinfo, false); + init_state_base_address(batch); - // XXX: INSTPM on Gen8 +#if GEN_GEN >= 9 iris_pack_state(GENX(CS_DEBUG_MODE2), ®_val, reg) { reg.CONSTANT_BUFFERAddressOffsetDisable = true; reg.CONSTANT_BUFFERAddressOffsetDisableMask = true; } iris_emit_lri(batch, CS_DEBUG_MODE2, reg_val); +#else + iris_pack_state(GENX(INSTPM), ®_val, reg) { + reg.CONSTANT_BUFFERAddressOffsetDisable = true; + reg.CONSTANT_BUFFERAddressOffsetDisableMask = true; + } + iris_emit_lri(batch, INSTPM, reg_val); +#endif #if GEN_GEN == 9 iris_pack_state(GENX(CACHE_MODE_1), ®_val, reg) { @@ -652,6 +727,27 @@ iris_init_render_context(struct iris_screen *screen, } iris_emit_lri(batch, SAMPLER_MODE, reg_val); + /* Bit 1 must be set in HALF_SLICE_CHICKEN7. */ + iris_pack_state(GENX(HALF_SLICE_CHICKEN7), ®_val, reg) { + reg.EnabledTexelOffsetPrecisionFix = 1; + reg.EnabledTexelOffsetPrecisionFixMask = 1; + } + iris_emit_lri(batch, HALF_SLICE_CHICKEN7, reg_val); + + /* WA_2204188704: Pixel Shader Panic dispatch must be disabled. */ + iris_pack_state(GENX(COMMON_SLICE_CHICKEN3), ®_val, reg) { + reg.PSThreadPanicDispatch = 0x3; + reg.PSThreadPanicDispatchMask = 0x3; + } + iris_emit_lri(batch, COMMON_SLICE_CHICKEN3, reg_val); + + iris_pack_state(GENX(SLICE_COMMON_ECO_CHICKEN1), ®_val, reg) { + reg.StateCacheRedirectToCSSectionEnable = true; + reg.StateCacheRedirectToCSSectionEnableMask = true; + } + iris_emit_lri(batch, SLICE_COMMON_ECO_CHICKEN1, reg_val); + + // XXX: 3D_MODE? #endif @@ -671,7 +767,9 @@ iris_init_render_context(struct iris_screen *screen, GEN_SAMPLE_POS_2X(pat._2xSample); GEN_SAMPLE_POS_4X(pat._4xSample); GEN_SAMPLE_POS_8X(pat._8xSample); +#if GEN_GEN >= 9 GEN_SAMPLE_POS_16X(pat._16xSample); +#endif } /* Use the legacy AA line coverage computation. */ @@ -684,11 +782,11 @@ iris_init_render_context(struct iris_screen *screen, iris_emit_cmd(batch, GENX(3DSTATE_WM_HZ_OP), foo); /* No polygon stippling offsets are necessary. */ - // XXX: may need to set an offset for origin-UL framebuffers + /* TODO: may need to set an offset for origin-UL framebuffers */ iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_OFFSET), foo); /* Set a static partitioning of the push constant area. */ - // XXX: this may be a bad idea...could starve the push ringbuffers... + /* TODO: this may be a bad idea...could starve the push ringbuffers... */ for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) { iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) { alloc._3DCommandSubOpcode = 18 + i; @@ -696,6 +794,11 @@ iris_init_render_context(struct iris_screen *screen, alloc.ConstantBufferSize = i == MESA_SHADER_FRAGMENT ? 8 : 6; } } + +#if GEN_GEN == 10 + /* Gen11+ is enabled for us by the kernel. */ + iris_enable_obj_preemption(batch, true); +#endif } static void @@ -708,29 +811,7 @@ iris_init_compute_context(struct iris_screen *screen, emit_pipeline_select(batch, GPGPU); - const bool has_slm = true; - const bool wants_dc_cache = true; - - const struct gen_l3_weights w = - gen_get_default_l3_weights(devinfo, wants_dc_cache, has_slm); - const struct gen_l3_config *cfg = gen_get_l3_config(devinfo, w); - - uint32_t reg_val; - iris_pack_state(GENX(L3CNTLREG), ®_val, reg) { - reg.SLMEnable = has_slm; -#if GEN_GEN == 11 - /* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set - * in L3CNTLREG register. The default setting of the bit is not the - * desirable behavior. - */ - reg.ErrorDetectionBehaviorControl = true; -#endif - reg.URBAllocation = cfg->n[GEN_L3P_URB]; - reg.ROAllocation = cfg->n[GEN_L3P_RO]; - reg.DCAllocation = cfg->n[GEN_L3P_DC]; - reg.AllAllocation = cfg->n[GEN_L3P_ALL]; - } - iris_emit_lri(batch, L3CNTLREG, reg_val); + iris_emit_default_l3_config(batch, devinfo, true); init_state_base_address(batch); @@ -765,13 +846,20 @@ struct iris_depth_buffer_state { struct iris_genx_state { struct iris_vertex_buffer_state vertex_buffers[33]; - /** The number of bound vertex buffers. */ - uint64_t bound_vertex_buffers; - struct iris_depth_buffer_state depth_buffer; uint32_t so_buffers[4 * GENX(3DSTATE_SO_BUFFER_length)]; - uint32_t streamout[4 * GENX(3DSTATE_STREAMOUT_length)]; + +#if GEN_GEN == 9 + /* Is object level preemption enabled? */ + bool object_preemption; +#endif + + struct { +#if GEN_GEN == 8 + struct brw_image_param image_param[PIPE_MAX_SHADER_IMAGES]; +#endif + } shaders[MESA_SHADER_STAGES]; }; /** @@ -802,6 +890,15 @@ struct iris_blend_state { BRW_MAX_DRAW_BUFFERS * GENX(BLEND_STATE_ENTRY_length)]; bool alpha_to_coverage; /* for shader key */ + + /** Bitfield of whether blending is enabled for RT[i] - for aux resolves */ + uint8_t blend_enables; + + /** Bitfield of whether color writes are enabled for RT[i] */ + uint8_t color_write_enables; + + /** Does RT[0] use dual color blending? */ + bool dual_color_blending; }; static enum pipe_blendfactor @@ -830,6 +927,10 @@ iris_create_blend_state(struct pipe_context *ctx, struct iris_blend_state *cso = malloc(sizeof(struct iris_blend_state)); uint32_t *blend_entry = cso->blend_state + GENX(BLEND_STATE_length); + cso->blend_enables = 0; + cso->color_write_enables = 0; + STATIC_ASSERT(BRW_MAX_DRAW_BUFFERS <= 8); + cso->alpha_to_coverage = state->alpha_to_coverage; bool indep_alpha_blend = false; @@ -851,6 +952,12 @@ iris_create_blend_state(struct pipe_context *ctx, src_rgb != src_alpha || dst_rgb != dst_alpha) indep_alpha_blend = true; + if (rt->blend_enable) + cso->blend_enables |= 1u << i; + + if (rt->colormask) + cso->color_write_enables |= 1u << i; + iris_pack_state(GENX(BLEND_STATE_ENTRY), blend_entry, be) { be.LogicOpEnable = state->logicop_enable; be.LogicOpFunction = state->logicop_func; @@ -878,13 +985,16 @@ iris_create_blend_state(struct pipe_context *ctx, } iris_pack_command(GENX(3DSTATE_PS_BLEND), cso->ps_blend, pb) { - /* pb.HasWriteableRT is filled in at draw time. */ - /* pb.AlphaTestEnable is filled in at draw time. */ + /* pb.HasWriteableRT is filled in at draw time. + * pb.AlphaTestEnable is filled in at draw time. + * + * pb.ColorBufferBlendEnable is filled in at draw time so we can avoid + * setting it when dual color blending without an appropriate shader. + */ + pb.AlphaToCoverageEnable = state->alpha_to_coverage; pb.IndependentAlphaBlendEnable = indep_alpha_blend; - pb.ColorBufferBlendEnable = state->rt[0].blend_enable; - pb.SourceBlendFactor = fix_blendfactor(state->rt[0].rgb_src_factor, state->alpha_to_one); pb.SourceAlphaBlendFactor = @@ -904,6 +1014,7 @@ iris_create_blend_state(struct pipe_context *ctx, /* bl.AlphaTestEnable and bs.AlphaTestFunction are filled in later. */ } + cso->dual_color_blending = util_blend_state_is_dual(state, 0); return cso; } @@ -917,12 +1028,36 @@ static void iris_bind_blend_state(struct pipe_context *ctx, void *state) { struct iris_context *ice = (struct iris_context *) ctx; - ice->state.cso_blend = state; + struct iris_blend_state *cso = state; + + ice->state.cso_blend = cso; + ice->state.blend_enables = cso ? cso->blend_enables : 0; + ice->state.dirty |= IRIS_DIRTY_PS_BLEND; ice->state.dirty |= IRIS_DIRTY_BLEND_STATE; + ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES; ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_BLEND]; } +/** + * Return true if the FS writes to any color outputs which are not disabled + * via color masking. + */ +static bool +has_writeable_rt(const struct iris_blend_state *cso_blend, + const struct shader_info *fs_info) +{ + if (!fs_info) + return false; + + unsigned rt_outputs = fs_info->outputs_written >> FRAG_RESULT_DATA0; + + if (fs_info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_COLOR)) + rt_outputs = (1 << BRW_MAX_DRAW_BUFFERS) - 1; + + return cso_blend->color_write_enables & rt_outputs; +} + /** * Gallium CSO for depth, stencil, and alpha testing state. */ @@ -957,7 +1092,7 @@ iris_create_zsa_state(struct pipe_context *ctx, cso->depth_writes_enabled = state->depth.writemask; cso->stencil_writes_enabled = state->stencil[0].writemask != 0 || - (two_sided_stencil && state->stencil[1].writemask != 1); + (two_sided_stencil && state->stencil[1].writemask != 0); /* The state tracker needs to optimize away EQUAL writes for us. */ assert(!(state->depth.func == PIPE_FUNC_EQUAL && state->depth.writemask)); @@ -1013,6 +1148,9 @@ iris_bind_zsa_state(struct pipe_context *ctx, void *state) if (cso_changed(alpha.func)) ice->state.dirty |= IRIS_DIRTY_BLEND_STATE; + if (cso_changed(depth_writes_enabled)) + ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES; + ice->state.depth_writes_enabled = new_cso->depth_writes_enabled; ice->state.stencil_writes_enabled = new_cso->stencil_writes_enabled; } @@ -1047,6 +1185,8 @@ struct iris_rasterizer_state { bool poly_stipple_enable; bool multisample; bool force_persample_interp; + bool conservative_rasterization; + bool fill_mode_point_or_line; enum pipe_sprite_coord_mode sprite_coord_mode; /* PIPE_SPRITE_* */ uint16_t sprite_coord_enable; }; @@ -1091,21 +1231,6 @@ iris_create_rasterizer_state(struct pipe_context *ctx, struct iris_rasterizer_state *cso = malloc(sizeof(struct iris_rasterizer_state)); -#if 0 - point_quad_rasterization -> SBE? - - not necessary? - { - poly_smooth - bottom_edge_rule - - offset_units_unscaled - cap not exposed - } - #endif - - // XXX: it may make more sense just to store the pipe_rasterizer_state, - // we're copying a lot of booleans here. But we don't need all of them... - cso->multisample = state->multisample; cso->force_persample_interp = state->force_persample_interp; cso->clip_halfz = state->clip_halfz; @@ -1121,6 +1246,14 @@ iris_create_rasterizer_state(struct pipe_context *ctx, cso->sprite_coord_enable = state->sprite_coord_enable; cso->line_stipple_enable = state->line_stipple_enable; cso->poly_stipple_enable = state->poly_stipple_enable; + cso->conservative_rasterization = + state->conservative_raster_mode == PIPE_CONSERVATIVE_RASTER_POST_SNAP; + + cso->fill_mode_point_or_line = + state->fill_front == PIPE_POLYGON_MODE_LINE || + state->fill_front == PIPE_POLYGON_MODE_POINT || + state->fill_back == PIPE_POLYGON_MODE_LINE || + state->fill_back == PIPE_POLYGON_MODE_POINT; if (state->clip_plane_enable != 0) cso->num_clip_plane_consts = util_logbase2(state->clip_plane_enable) + 1; @@ -1137,7 +1270,8 @@ iris_create_rasterizer_state(struct pipe_context *ctx, state->line_smooth ? _10pixels : _05pixels; sf.LastPixelEnable = state->line_last_pixel; sf.LineWidth = line_width; - sf.SmoothPointEnable = state->point_smooth || state->multisample; + sf.SmoothPointEnable = (state->point_smooth || state->multisample) && + !state->point_quad_rasterization; sf.PointWidthSource = state->point_size_per_vertex ? Vertex : State; sf.PointWidth = state->point_size; @@ -1162,12 +1296,17 @@ iris_create_rasterizer_state(struct pipe_context *ctx, rr.GlobalDepthOffsetConstant = state->offset_units * 2; rr.GlobalDepthOffsetScale = state->offset_scale; rr.GlobalDepthOffsetClamp = state->offset_clamp; - rr.SmoothPointEnable = state->point_smooth || state->multisample; + rr.SmoothPointEnable = state->point_smooth; rr.AntialiasingEnable = state->line_smooth; rr.ScissorRectangleEnable = state->scissor; +#if GEN_GEN >= 9 rr.ViewportZNearClipTestEnable = state->depth_clip_near; rr.ViewportZFarClipTestEnable = state->depth_clip_far; - //rr.ConservativeRasterizationEnable = not yet supported by Gallium... + rr.ConservativeRasterizationEnable = + cso->conservative_rasterization; +#else + rr.ViewportZClipTestEnable = (state->depth_clip_near || state->depth_clip_far); +#endif } iris_pack_command(GENX(3DSTATE_CLIP), cso->clip, cl) { @@ -1180,7 +1319,6 @@ iris_create_rasterizer_state(struct pipe_context *ctx, cl.APIMode = state->clip_halfz ? APIMODE_D3D : APIMODE_OGL; cl.GuardbandClipTestEnable = true; cl.ClipEnable = true; - cl.ViewportXYClipTestEnable = state->point_tri_clip; cl.MinimumPointWidth = 0.125; cl.MaximumPointWidth = 255.875; @@ -1253,6 +1391,9 @@ iris_bind_rasterizer_state(struct pipe_context *ctx, void *state) cso_changed(sprite_coord_mode) || cso_changed(light_twoside)) ice->state.dirty |= IRIS_DIRTY_SBE; + + if (cso_changed(conservative_rasterization)) + ice->state.dirty |= IRIS_DIRTY_FS; } ice->state.cso_rast = new_cso; @@ -1377,19 +1518,7 @@ iris_create_sampler_state(struct pipe_context *ctx, /** * The pipe->bind_sampler_states() driver hook. - * - * Now that we know all the sampler states, we upload them all into a - * contiguous area of GPU memory, for 3DSTATE_SAMPLER_STATE_POINTERS_*. - * We also fill out the border color state pointers at this point. - * - * We could defer this work to draw time, but we assume that binding - * will be less frequent than drawing. */ -// XXX: this may be a bad idea, need to make sure that st/mesa calls us -// XXX: with the complete set of shaders. If it makes multiple calls to -// XXX: things one at a time, we could waste a lot of time assembling things. -// XXX: it doesn't even BUY us anything to do it here, because we only flag -// XXX: IRIS_DIRTY_SAMPLER_STATE when this is called... static void iris_bind_sampler_states(struct pipe_context *ctx, enum pipe_shader_type p_stage, @@ -1406,6 +1535,29 @@ iris_bind_sampler_states(struct pipe_context *ctx, shs->samplers[start + i] = states[i]; } + ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage; +} + +/** + * Upload the sampler states into a contiguous area of GPU memory, for + * for 3DSTATE_SAMPLER_STATE_POINTERS_*. + * + * Also fill out the border color state pointers. + */ +static void +iris_upload_sampler_states(struct iris_context *ice, gl_shader_stage stage) +{ + struct iris_shader_state *shs = &ice->state.shaders[stage]; + const struct shader_info *info = iris_get_shader_info(ice, stage); + + /* We assume the state tracker will call pipe->bind_sampler_states() + * if the program's number of textures changes. + */ + unsigned count = info ? util_last_bit(info->textures_used) : 0; + + if (!count) + return; + /* Assemble the SAMPLER_STATEs into a contiguous table that lives * in the dynamic state memory zone, so we can point to it via the * 3DSTATE_SAMPLER_STATE_POINTERS_* commands. @@ -1423,19 +1575,50 @@ iris_bind_sampler_states(struct pipe_context *ctx, /* Make sure all land in the same BO */ iris_border_color_pool_reserve(ice, IRIS_MAX_TEXTURE_SAMPLERS); + ice->state.need_border_colors &= ~(1 << stage); + for (int i = 0; i < count; i++) { struct iris_sampler_state *state = shs->samplers[i]; + struct iris_sampler_view *tex = shs->textures[i]; if (!state) { memset(map, 0, 4 * GENX(SAMPLER_STATE_length)); } else if (!state->needs_border_color) { memcpy(map, state->sampler_state, 4 * GENX(SAMPLER_STATE_length)); } else { - ice->state.need_border_colors = true; + ice->state.need_border_colors |= 1 << stage; + + /* We may need to swizzle the border color for format faking. + * A/LA formats are faked as R/RG with 000R or R00G swizzles. + * This means we need to move the border color's A channel into + * the R or G channels so that those read swizzles will move it + * back into A. + */ + union pipe_color_union *color = &state->border_color; + union pipe_color_union tmp; + if (tex) { + enum pipe_format internal_format = tex->res->internal_format; + + if (util_format_is_alpha(internal_format)) { + unsigned char swz[4] = { + PIPE_SWIZZLE_W, PIPE_SWIZZLE_0, + PIPE_SWIZZLE_0, PIPE_SWIZZLE_0 + }; + util_format_apply_color_swizzle(&tmp, color, swz, true); + color = &tmp; + } else if (util_format_is_luminance_alpha(internal_format) && + internal_format != PIPE_FORMAT_L8A8_SRGB) { + unsigned char swz[4] = { + PIPE_SWIZZLE_X, PIPE_SWIZZLE_W, + PIPE_SWIZZLE_0, PIPE_SWIZZLE_0 + }; + util_format_apply_color_swizzle(&tmp, color, swz, true); + color = &tmp; + } + } /* Stream out the border color and merge the pointer. */ - uint32_t offset = - iris_upload_border_color(ice, &state->border_color); + uint32_t offset = iris_upload_border_color(ice, color); uint32_t dynamic[GENX(SAMPLER_STATE_length)]; iris_pack_state(GENX(SAMPLER_STATE), dynamic, dyns) { @@ -1448,8 +1631,6 @@ iris_bind_sampler_states(struct pipe_context *ctx, map += GENX(SAMPLER_STATE_length); } - - ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage; } static enum isl_channel_select @@ -1468,14 +1649,15 @@ fmt_swizzle(const struct iris_format_info *fmt, enum pipe_swizzle swz) static void fill_buffer_surface_state(struct isl_device *isl_dev, - struct iris_bo *bo, + struct iris_resource *res, void *map, enum isl_format format, + struct isl_swizzle swizzle, unsigned offset, unsigned size) { const struct isl_format_layout *fmtl = isl_format_get_layout(format); - const unsigned cpp = fmtl->bpb / 8; + const unsigned cpp = format == ISL_FORMAT_RAW ? 1 : fmtl->bpb / 8; /* The ARB_texture_buffer_specification says: * @@ -1494,14 +1676,75 @@ fill_buffer_surface_state(struct isl_device *isl_dev, * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE. */ unsigned final_size = - MIN3(size, bo->size - offset, IRIS_MAX_TEXTURE_BUFFER_SIZE * cpp); + MIN3(size, res->bo->size - res->offset - offset, + IRIS_MAX_TEXTURE_BUFFER_SIZE * cpp); isl_buffer_fill_state(isl_dev, map, - .address = bo->gtt_offset + offset, + .address = res->bo->gtt_offset + res->offset + offset, .size_B = final_size, .format = format, + .swizzle = swizzle, .stride_B = cpp, - .mocs = MOCS_WB); + .mocs = mocs(res->bo)); +} + +#define SURFACE_STATE_ALIGNMENT 64 + +/** + * Allocate several contiguous SURFACE_STATE structures, one for each + * supported auxiliary surface mode. + */ +static void * +alloc_surface_states(struct u_upload_mgr *mgr, + struct iris_state_ref *ref, + unsigned aux_usages) +{ + const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length); + + /* If this changes, update this to explicitly align pointers */ + STATIC_ASSERT(surf_size == SURFACE_STATE_ALIGNMENT); + + assert(aux_usages != 0); + + void *map = + upload_state(mgr, ref, util_bitcount(aux_usages) * surf_size, + SURFACE_STATE_ALIGNMENT); + + ref->offset += iris_bo_offset_from_base_address(iris_resource_bo(ref->res)); + + return map; +} + +static void +fill_surface_state(struct isl_device *isl_dev, + void *map, + struct iris_resource *res, + struct isl_view *view, + unsigned aux_usage) +{ + struct isl_surf_fill_state_info f = { + .surf = &res->surf, + .view = view, + .mocs = mocs(res->bo), + .address = res->bo->gtt_offset + res->offset, + }; + + if (aux_usage != ISL_AUX_USAGE_NONE) { + f.aux_surf = &res->aux.surf; + f.aux_usage = aux_usage; + f.aux_address = res->aux.bo->gtt_offset + res->aux.offset; + + struct iris_bo *clear_bo = NULL; + uint64_t clear_offset = 0; + f.clear_color = + iris_resource_get_clear_color(res, &clear_bo, &clear_offset); + if (clear_bo) { + f.clear_address = clear_bo->gtt_offset + clear_offset; + f.use_clear_address = isl_dev->info->gen > 9; + } + } + + isl_surf_fill_state_s(isl_dev, map, &f); } /** @@ -1527,14 +1770,6 @@ iris_create_sampler_view(struct pipe_context *ctx, pipe_reference_init(&isv->base.reference, 1); pipe_resource_reference(&isv->base.texture, tex); - void *map = upload_state(ice->state.surface_uploader, &isv->surface_state, - 4 * GENX(RENDER_SURFACE_STATE_length), 64); - if (!unlikely(map)) - return NULL; - - struct iris_bo *state_bo = iris_resource_bo(isv->surface_state.res); - isv->surface_state.offset += iris_bo_offset_from_base_address(state_bo); - if (util_format_is_depth_or_stencil(tmpl->format)) { struct iris_resource *zres, *sres; const struct util_format_description *desc = @@ -1547,6 +1782,12 @@ iris_create_sampler_view(struct pipe_context *ctx, isv->res = (struct iris_resource *) tex; + void *map = alloc_surface_states(ice->state.surface_uploader, + &isv->surface_state, + isv->res->aux.sampler_usages); + if (!unlikely(map)) + return NULL; + isl_surf_usage_flags_t usage = ISL_SURF_USAGE_TEXTURE_BIT; if (isv->base.target == PIPE_TEXTURE_CUBE || @@ -1556,6 +1797,8 @@ iris_create_sampler_view(struct pipe_context *ctx, const struct iris_format_info fmt = iris_format_for_usage(devinfo, tmpl->format, usage); + isv->clear_color = isv->res->aux.clear_color; + isv->view = (struct isl_view) { .format = fmt.fmt, .swizzle = (struct isl_swizzle) { @@ -1576,16 +1819,22 @@ iris_create_sampler_view(struct pipe_context *ctx, isv->view.array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1; - isl_surf_fill_state(&screen->isl_dev, map, - .surf = &isv->res->surf, .view = &isv->view, - .mocs = MOCS_WB, - .address = isv->res->bo->gtt_offset); - // .aux_surf = - // .clear_color = clear_color, + unsigned aux_modes = isv->res->aux.sampler_usages; + while (aux_modes) { + enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes); + + /* If we have a multisampled depth buffer, do not create a sampler + * surface state with HiZ. + */ + fill_surface_state(&screen->isl_dev, map, isv->res, &isv->view, + aux_usage); + + map += SURFACE_STATE_ALIGNMENT; + } } else { - fill_buffer_surface_state(&screen->isl_dev, isv->res->bo, map, - isv->view.format, tmpl->u.buf.offset, - tmpl->u.buf.size); + fill_buffer_surface_state(&screen->isl_dev, isv->res, map, + isv->view.format, isv->view.swizzle, + tmpl->u.buf.offset, tmpl->u.buf.size); } return &isv->base; @@ -1654,7 +1903,8 @@ iris_create_surface(struct pipe_context *ctx, return NULL; } - surf->view = (struct isl_view) { + struct isl_view *view = &surf->view; + *view = (struct isl_view) { .format = fmt.fmt, .base_level = tmpl->u.tex.level, .levels = 1, @@ -1664,30 +1914,149 @@ iris_create_surface(struct pipe_context *ctx, .usage = usage, }; + surf->clear_color = res->aux.clear_color; + /* Bail early for depth/stencil - we don't want SURFACE_STATE for them. */ if (res->surf.usage & (ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_STENCIL_BIT)) return psurf; - void *map = upload_state(ice->state.surface_uploader, &surf->surface_state, - 4 * GENX(RENDER_SURFACE_STATE_length), 64); + void *map = alloc_surface_states(ice->state.surface_uploader, + &surf->surface_state, + res->aux.possible_usages); if (!unlikely(map)) return NULL; - struct iris_bo *state_bo = iris_resource_bo(surf->surface_state.res); - surf->surface_state.offset += iris_bo_offset_from_base_address(state_bo); + if (!isl_format_is_compressed(res->surf.format)) { + /* This is a normal surface. Fill out a SURFACE_STATE for each possible + * auxiliary surface mode and return the pipe_surface. + */ + unsigned aux_modes = res->aux.possible_usages; + while (aux_modes) { + enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes); + + fill_surface_state(&screen->isl_dev, map, res, view, aux_usage); + + map += SURFACE_STATE_ALIGNMENT; + } + + return psurf; + } - isl_surf_fill_state(&screen->isl_dev, map, - .surf = &res->surf, .view = &surf->view, - .mocs = MOCS_WB, - .address = res->bo->gtt_offset); - // .aux_surf = - // .clear_color = clear_color, + /* The resource has a compressed format, which is not renderable, but we + * have a renderable view format. We must be attempting to upload blocks + * of compressed data via an uncompressed view. + * + * In this case, we can assume there are no auxiliary buffers, a single + * miplevel, and that the resource is single-sampled. Gallium may try + * and create an uncompressed view with multiple layers, however. + */ + assert(!isl_format_is_compressed(fmt.fmt)); + assert(res->aux.possible_usages == 1 << ISL_AUX_USAGE_NONE); + assert(res->surf.samples == 1); + assert(view->levels == 1); + + struct isl_surf isl_surf; + uint32_t offset_B = 0, tile_x_sa = 0, tile_y_sa = 0; + + if (view->base_level > 0) { + /* We can't rely on the hardware's miplevel selection with such + * a substantial lie about the format, so we select a single image + * using the Tile X/Y Offset fields. In this case, we can't handle + * multiple array slices. + * + * On Broadwell, HALIGN and VALIGN are specified in pixels and are + * hard-coded to align to exactly the block size of the compressed + * texture. This means that, when reinterpreted as a non-compressed + * texture, the tile offsets may be anything and we can't rely on + * X/Y Offset. + * + * Return NULL to force the state tracker to take fallback paths. + */ + if (view->array_len > 1 || GEN_GEN == 8) + return NULL; + + const bool is_3d = res->surf.dim == ISL_SURF_DIM_3D; + isl_surf_get_image_surf(&screen->isl_dev, &res->surf, + view->base_level, + is_3d ? 0 : view->base_array_layer, + is_3d ? view->base_array_layer : 0, + &isl_surf, + &offset_B, &tile_x_sa, &tile_y_sa); + + /* We use address and tile offsets to access a single level/layer + * as a subimage, so reset level/layer so it doesn't offset again. + */ + view->base_array_layer = 0; + view->base_level = 0; + } else { + /* Level 0 doesn't require tile offsets, and the hardware can find + * array slices using QPitch even with the format override, so we + * can allow layers in this case. Copy the original ISL surface. + */ + memcpy(&isl_surf, &res->surf, sizeof(isl_surf)); + } + + /* Scale down the image dimensions by the block size. */ + const struct isl_format_layout *fmtl = + isl_format_get_layout(res->surf.format); + isl_surf.format = fmt.fmt; + isl_surf.logical_level0_px.width = + DIV_ROUND_UP(isl_surf.logical_level0_px.width, fmtl->bw); + isl_surf.logical_level0_px.height = + DIV_ROUND_UP(isl_surf.logical_level0_px.height, fmtl->bh); + isl_surf.phys_level0_sa.width /= fmtl->bw; + isl_surf.phys_level0_sa.height /= fmtl->bh; + tile_x_sa /= fmtl->bw; + tile_y_sa /= fmtl->bh; + + psurf->width = isl_surf.logical_level0_px.width; + psurf->height = isl_surf.logical_level0_px.height; + + struct isl_surf_fill_state_info f = { + .surf = &isl_surf, + .view = view, + .mocs = mocs(res->bo), + .address = res->bo->gtt_offset + offset_B, + .x_offset_sa = tile_x_sa, + .y_offset_sa = tile_y_sa, + }; + isl_surf_fill_state_s(&screen->isl_dev, map, &f); return psurf; } +#if GEN_GEN < 9 +static void +fill_default_image_param(struct brw_image_param *param) +{ + memset(param, 0, sizeof(*param)); + /* Set the swizzling shifts to all-ones to effectively disable swizzling -- + * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more + * detailed explanation of these parameters. + */ + param->swizzling[0] = 0xff; + param->swizzling[1] = 0xff; +} + +static void +fill_buffer_image_param(struct brw_image_param *param, + enum pipe_format pfmt, + unsigned size) +{ + const unsigned cpp = util_format_get_blocksize(pfmt); + + fill_default_image_param(param); + param->size[0] = size / cpp; + param->stride[0] = cpp; +} +#else +#define isl_surf_fill_image_param(x, ...) +#define fill_default_image_param(x, ...) +#define fill_buffer_image_param(x, ...) +#endif + /** * The pipe->set_shader_images() driver hook. */ @@ -1702,46 +2071,58 @@ iris_set_shader_images(struct pipe_context *ctx, const struct gen_device_info *devinfo = &screen->devinfo; gl_shader_stage stage = stage_from_pipe(p_stage); struct iris_shader_state *shs = &ice->state.shaders[stage]; +#if GEN_GEN == 8 + struct iris_genx_state *genx = ice->state.genx; + struct brw_image_param *image_params = genx->shaders[stage].image_param; +#endif shs->bound_image_views &= ~u_bit_consecutive(start_slot, count); for (unsigned i = 0; i < count; i++) { + struct iris_image_view *iv = &shs->image[start_slot + i]; + if (p_images && p_images[i].resource) { const struct pipe_image_view *img = &p_images[i]; struct iris_resource *res = (void *) img->resource; - pipe_resource_reference(&shs->image[start_slot + i].res, &res->base); - - shs->bound_image_views |= 1 << (start_slot + i); - - res->bind_history |= PIPE_BIND_SHADER_IMAGE; // XXX: these are not retained forever, use a separate uploader? void *map = - upload_state(ice->state.surface_uploader, - &shs->image[start_slot + i].surface_state, - 4 * GENX(RENDER_SURFACE_STATE_length), 64); - if (!unlikely(map)) { - pipe_resource_reference(&shs->image[start_slot + i].res, NULL); + alloc_surface_states(ice->state.surface_uploader, + &iv->surface_state, 1 << ISL_AUX_USAGE_NONE); + if (!unlikely(map)) return; - } - struct iris_bo *surf_state_bo = - iris_resource_bo(shs->image[start_slot + i].surface_state.res); - shs->image[start_slot + i].surface_state.offset += - iris_bo_offset_from_base_address(surf_state_bo); + iv->base = *img; + iv->base.resource = NULL; + pipe_resource_reference(&iv->base.resource, &res->base); + + shs->bound_image_views |= 1 << (start_slot + i); + + res->bind_history |= PIPE_BIND_SHADER_IMAGE; isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT; - enum isl_format isl_format = + enum isl_format isl_fmt = iris_format_for_usage(devinfo, img->format, usage).fmt; - if (img->shader_access & PIPE_IMAGE_ACCESS_READ) - isl_format = isl_lower_storage_image_format(devinfo, isl_format); + bool untyped_fallback = false; + + if (img->shader_access & PIPE_IMAGE_ACCESS_READ) { + /* On Gen8, try to use typed surfaces reads (which support a + * limited number of formats), and if not possible, fall back + * to untyped reads. + */ + untyped_fallback = GEN_GEN == 8 && + !isl_has_matching_typed_storage_image_format(devinfo, isl_fmt); - shs->image[start_slot + i].access = img->shader_access; + if (untyped_fallback) + isl_fmt = ISL_FORMAT_RAW; + else + isl_fmt = isl_lower_storage_image_format(devinfo, isl_fmt); + } if (res->base.target != PIPE_BUFFER) { struct isl_view view = { - .format = isl_format, + .format = isl_fmt, .base_level = img->u.tex.level, .levels = 1, .base_array_layer = img->u.tex.first_layer, @@ -1750,25 +2131,52 @@ iris_set_shader_images(struct pipe_context *ctx, .usage = usage, }; - isl_surf_fill_state(&screen->isl_dev, map, - .surf = &res->surf, .view = &view, - .mocs = MOCS_WB, - .address = res->bo->gtt_offset); - // .aux_surf = - // .clear_color = clear_color, + if (untyped_fallback) { + fill_buffer_surface_state(&screen->isl_dev, res, map, + isl_fmt, ISL_SWIZZLE_IDENTITY, + 0, res->bo->size); + } else { + /* Images don't support compression */ + unsigned aux_modes = 1 << ISL_AUX_USAGE_NONE; + while (aux_modes) { + enum isl_aux_usage usage = u_bit_scan(&aux_modes); + + fill_surface_state(&screen->isl_dev, map, res, &view, usage); + + map += SURFACE_STATE_ALIGNMENT; + } + } + + isl_surf_fill_image_param(&screen->isl_dev, + &image_params[start_slot + i], + &res->surf, &view); } else { - fill_buffer_surface_state(&screen->isl_dev, res->bo, map, - isl_format, img->u.buf.offset, - img->u.buf.size); + util_range_add(&res->valid_buffer_range, img->u.buf.offset, + img->u.buf.offset + img->u.buf.size); + + fill_buffer_surface_state(&screen->isl_dev, res, map, + isl_fmt, ISL_SWIZZLE_IDENTITY, + img->u.buf.offset, img->u.buf.size); + fill_buffer_image_param(&image_params[start_slot + i], + img->format, img->u.buf.size); } } else { - pipe_resource_reference(&shs->image[start_slot + i].res, NULL); - pipe_resource_reference(&shs->image[start_slot + i].surface_state.res, - NULL); + pipe_resource_reference(&iv->base.resource, NULL); + pipe_resource_reference(&iv->surface_state.res, NULL); + fill_default_image_param(&image_params[start_slot + i]); } } ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage; + ice->state.dirty |= + stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES + : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES; + + /* Broadwell also needs brw_image_params re-uploaded */ + if (GEN_GEN < 9) { + ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage; + shs->cbuf0_needs_upload = true; + } } @@ -1788,9 +2196,10 @@ iris_set_sampler_views(struct pipe_context *ctx, shs->bound_sampler_views &= ~u_bit_consecutive(start, count); for (unsigned i = 0; i < count; i++) { + struct pipe_sampler_view *pview = views ? views[i] : NULL; pipe_sampler_view_reference((struct pipe_sampler_view **) - &shs->textures[start + i], views[i]); - struct iris_sampler_view *view = (void *) views[i]; + &shs->textures[start + i], pview); + struct iris_sampler_view *view = (void *) pview; if (view) { view->res->bind_history |= PIPE_BIND_SAMPLER_VIEW; shs->bound_sampler_views |= 1 << (start + i); @@ -1798,6 +2207,9 @@ iris_set_sampler_views(struct pipe_context *ctx, } ice->state.dirty |= (IRIS_DIRTY_BINDINGS_VS << stage); + ice->state.dirty |= + stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES + : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES; } /** @@ -1809,11 +2221,13 @@ iris_set_tess_state(struct pipe_context *ctx, const float default_inner_level[2]) { struct iris_context *ice = (struct iris_context *) ctx; + struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL]; memcpy(&ice->state.default_outer_level[0], &default_outer_level[0], 4 * sizeof(float)); memcpy(&ice->state.default_inner_level[0], &default_inner_level[0], 2 * sizeof(float)); ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TCS; + shs->cbuf0_needs_upload = true; } static void @@ -1912,7 +2326,10 @@ iris_set_stencil_ref(struct pipe_context *ctx, { struct iris_context *ice = (struct iris_context *) ctx; memcpy(&ice->state.stencil_ref, state, sizeof(*state)); - ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL; + if (GEN_GEN == 8) + ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE; + else + ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL; } static float @@ -2043,6 +2460,7 @@ iris_set_framebuffer_state(struct pipe_context *ctx, struct iris_resource *stencil_res; unsigned samples = util_framebuffer_get_num_samples(state); + unsigned layers = util_framebuffer_get_num_layers(state); if (cso->samples != samples) { ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE; @@ -2052,7 +2470,7 @@ iris_set_framebuffer_state(struct pipe_context *ctx, ice->state.dirty |= IRIS_DIRTY_BLEND_STATE; } - if ((cso->layers == 0) != (state->layers == 0)) { + if ((cso->layers == 0) != (layers == 0)) { ice->state.dirty |= IRIS_DIRTY_CLIP; } @@ -2062,6 +2480,7 @@ iris_set_framebuffer_state(struct pipe_context *ctx, util_copy_framebuffer_state(cso, state); cso->samples = samples; + cso->layers = layers; struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer; @@ -2073,10 +2492,7 @@ iris_set_framebuffer_state(struct pipe_context *ctx, .swizzle = ISL_SWIZZLE_IDENTITY, }; - struct isl_depth_stencil_hiz_emit_info info = { - .view = &view, - .mocs = MOCS_WB, - }; + struct isl_depth_stencil_hiz_emit_info info = { .view = &view }; if (cso->zsbuf) { iris_get_depth_stencil_resources(cso->zsbuf->texture, &zres, @@ -2091,18 +2507,26 @@ iris_set_framebuffer_state(struct pipe_context *ctx, view.usage |= ISL_SURF_USAGE_DEPTH_BIT; info.depth_surf = &zres->surf; - info.depth_address = zres->bo->gtt_offset; - info.hiz_usage = ISL_AUX_USAGE_NONE; + info.depth_address = zres->bo->gtt_offset + zres->offset; + info.mocs = mocs(zres->bo); view.format = zres->surf.format; + + if (iris_resource_level_has_hiz(zres, view.base_level)) { + info.hiz_usage = ISL_AUX_USAGE_HIZ; + info.hiz_surf = &zres->aux.surf; + info.hiz_address = zres->aux.bo->gtt_offset; + } } if (stencil_res) { view.usage |= ISL_SURF_USAGE_STENCIL_BIT; info.stencil_surf = &stencil_res->surf; - info.stencil_address = stencil_res->bo->gtt_offset; - if (!zres) + info.stencil_address = stencil_res->bo->gtt_offset + stencil_res->offset; + if (!zres) { view.format = stencil_res->surf.format; + info.mocs = mocs(stencil_res->bo); + } } } @@ -2124,6 +2548,8 @@ iris_set_framebuffer_state(struct pipe_context *ctx, /* Render target change */ ice->state.dirty |= IRIS_DIRTY_BINDINGS_FS; + ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES; + ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_FRAMEBUFFER]; #if GEN_GEN == 11 @@ -2146,33 +2572,37 @@ iris_set_framebuffer_state(struct pipe_context *ctx, } static void -upload_ubo_surf_state(struct iris_context *ice, - struct iris_const_buffer *cbuf, - unsigned buffer_size) +upload_ubo_ssbo_surf_state(struct iris_context *ice, + struct pipe_shader_buffer *buf, + struct iris_state_ref *surf_state, + bool ssbo) { struct pipe_context *ctx = &ice->ctx; struct iris_screen *screen = (struct iris_screen *) ctx->screen; // XXX: these are not retained forever, use a separate uploader? void *map = - upload_state(ice->state.surface_uploader, &cbuf->surface_state, + upload_state(ice->state.surface_uploader, surf_state, 4 * GENX(RENDER_SURFACE_STATE_length), 64); if (!unlikely(map)) { - pipe_resource_reference(&cbuf->data.res, NULL); + surf_state->res = NULL; return; } - struct iris_resource *res = (void *) cbuf->data.res; - struct iris_bo *surf_bo = iris_resource_bo(cbuf->surface_state.res); - cbuf->surface_state.offset += iris_bo_offset_from_base_address(surf_bo); + struct iris_resource *res = (void *) buf->buffer; + struct iris_bo *surf_bo = iris_resource_bo(surf_state->res); + surf_state->offset += iris_bo_offset_from_base_address(surf_bo); isl_buffer_fill_state(&screen->isl_dev, map, - .address = res->bo->gtt_offset + cbuf->data.offset, - .size_B = MIN2(buffer_size, - res->bo->size - cbuf->data.offset), - .format = ISL_FORMAT_R32G32B32A32_FLOAT, + .address = res->bo->gtt_offset + res->offset + + buf->buffer_offset, + .size_B = buf->buffer_size - res->offset, + .format = ssbo ? ISL_FORMAT_RAW + : ISL_FORMAT_R32G32B32A32_FLOAT, + .swizzle = ISL_SWIZZLE_IDENTITY, .stride_B = 1, - .mocs = MOCS_WB) + .mocs = mocs(res->bo)) + } /** @@ -2189,21 +2619,28 @@ iris_set_constant_buffer(struct pipe_context *ctx, struct iris_context *ice = (struct iris_context *) ctx; gl_shader_stage stage = stage_from_pipe(p_stage); struct iris_shader_state *shs = &ice->state.shaders[stage]; - struct iris_const_buffer *cbuf = &shs->constbuf[index]; + struct pipe_shader_buffer *cbuf = &shs->constbuf[index]; if (input && input->buffer) { + shs->bound_cbufs |= 1u << index; + assert(index > 0); - pipe_resource_reference(&cbuf->data.res, input->buffer); - cbuf->data.offset = input->buffer_offset; + pipe_resource_reference(&cbuf->buffer, input->buffer); + cbuf->buffer_offset = input->buffer_offset; + cbuf->buffer_size = + MIN2(input->buffer_size, + iris_resource_bo(input->buffer)->size - cbuf->buffer_offset); - struct iris_resource *res = (void *) cbuf->data.res; + struct iris_resource *res = (void *) cbuf->buffer; res->bind_history |= PIPE_BIND_CONSTANT_BUFFER; - upload_ubo_surf_state(ice, cbuf, input->buffer_size); + upload_ubo_ssbo_surf_state(ice, cbuf, &shs->constbuf_surf_state[index], + false); } else { - pipe_resource_reference(&cbuf->data.res, NULL); - pipe_resource_reference(&cbuf->surface_state.res, NULL); + shs->bound_cbufs &= ~(1u << index); + pipe_resource_reference(&cbuf->buffer, NULL); + pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL); } if (index == 0) { @@ -2226,8 +2663,9 @@ static void upload_uniforms(struct iris_context *ice, gl_shader_stage stage) { + UNUSED struct iris_genx_state *genx = ice->state.genx; struct iris_shader_state *shs = &ice->state.shaders[stage]; - struct iris_const_buffer *cbuf = &shs->constbuf[0]; + struct pipe_shader_buffer *cbuf = &shs->constbuf[0]; struct iris_compiled_shader *shader = ice->shaders.prog[stage]; unsigned upload_size = shader->num_system_values * sizeof(uint32_t) + @@ -2236,14 +2674,27 @@ upload_uniforms(struct iris_context *ice, if (upload_size == 0) return; - uint32_t *map = - upload_state(ice->ctx.const_uploader, &cbuf->data, upload_size, 64); + uint32_t *map = NULL; + u_upload_alloc(ice->ctx.const_uploader, 0, upload_size, 64, + &cbuf->buffer_offset, &cbuf->buffer, (void **) &map); for (int i = 0; i < shader->num_system_values; i++) { uint32_t sysval = shader->system_values[i]; uint32_t value = 0; - if (BRW_PARAM_BUILTIN_IS_CLIP_PLANE(sysval)) { + if (BRW_PARAM_DOMAIN(sysval) == BRW_PARAM_DOMAIN_IMAGE) { +#if GEN_GEN == 8 + unsigned img = BRW_PARAM_IMAGE_IDX(sysval); + unsigned offset = BRW_PARAM_IMAGE_OFFSET(sysval); + struct brw_image_param *param = + &genx->shaders[stage].image_param[img]; + + assert(offset < sizeof(struct brw_image_param)); + value = ((uint32_t *) param)[offset]; +#endif + } else if (sysval == BRW_PARAM_BUILTIN_ZERO) { + value = 0; + } else if (BRW_PARAM_BUILTIN_IS_CLIP_PLANE(sysval)) { int plane = BRW_PARAM_BUILTIN_CLIP_PLANE_IDX(sysval); int comp = BRW_PARAM_BUILTIN_CLIP_PLANE_COMP(sysval); value = fui(ice->state.clip_planes.ucp[plane][comp]); @@ -2254,10 +2705,19 @@ upload_uniforms(struct iris_context *ice, assert(stage == MESA_SHADER_TESS_EVAL); const struct shader_info *tcs_info = iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL); - assert(tcs_info); - - value = tcs_info->tess.tcs_vertices_out; + if (tcs_info) + value = tcs_info->tess.tcs_vertices_out; + else + value = ice->state.vertices_per_patch; } + } else if (sysval >= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X && + sysval <= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_W) { + unsigned i = sysval - BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X; + value = fui(ice->state.default_outer_level[i]); + } else if (sysval == BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X) { + value = fui(ice->state.default_inner_level[0]); + } else if (sysval == BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y) { + value = fui(ice->state.default_inner_level[1]); } else { assert(!"unhandled system value"); } @@ -2269,7 +2729,8 @@ upload_uniforms(struct iris_context *ice, memcpy(map, shs->cbuf0.user_buffer, shs->cbuf0.buffer_size); } - upload_ubo_surf_state(ice, cbuf, upload_size); + cbuf->buffer_size = upload_size; + upload_ubo_ssbo_surf_state(ice, cbuf, &shs->constbuf_surf_state[0], false); } /** @@ -2282,48 +2743,41 @@ static void iris_set_shader_buffers(struct pipe_context *ctx, enum pipe_shader_type p_stage, unsigned start_slot, unsigned count, - const struct pipe_shader_buffer *buffers) + const struct pipe_shader_buffer *buffers, + unsigned writable_bitmask) { struct iris_context *ice = (struct iris_context *) ctx; - struct iris_screen *screen = (struct iris_screen *)ctx->screen; gl_shader_stage stage = stage_from_pipe(p_stage); struct iris_shader_state *shs = &ice->state.shaders[stage]; + unsigned modified_bits = u_bit_consecutive(start_slot, count); + + shs->bound_ssbos &= ~modified_bits; + shs->writable_ssbos &= ~modified_bits; + shs->writable_ssbos |= writable_bitmask << start_slot; + for (unsigned i = 0; i < count; i++) { if (buffers && buffers[i].buffer) { - const struct pipe_shader_buffer *buffer = &buffers[i]; - struct iris_resource *res = (void *) buffer->buffer; - pipe_resource_reference(&shs->ssbo[start_slot + i], &res->base); + struct iris_resource *res = (void *) buffers[i].buffer; + struct pipe_shader_buffer *ssbo = &shs->ssbo[start_slot + i]; + struct iris_state_ref *surf_state = + &shs->ssbo_surf_state[start_slot + i]; + pipe_resource_reference(&ssbo->buffer, &res->base); + ssbo->buffer_offset = buffers[i].buffer_offset; + ssbo->buffer_size = + MIN2(buffers[i].buffer_size, res->bo->size - ssbo->buffer_offset); + + shs->bound_ssbos |= 1 << (start_slot + i); + + upload_ubo_ssbo_surf_state(ice, ssbo, surf_state, true); res->bind_history |= PIPE_BIND_SHADER_BUFFER; - // XXX: these are not retained forever, use a separate uploader? - void *map = - upload_state(ice->state.surface_uploader, - &shs->ssbo_surface_state[start_slot + i], - 4 * GENX(RENDER_SURFACE_STATE_length), 64); - if (!unlikely(map)) { - pipe_resource_reference(&shs->ssbo[start_slot + i], NULL); - return; - } - - struct iris_bo *surf_state_bo = - iris_resource_bo(shs->ssbo_surface_state[start_slot + i].res); - shs->ssbo_surface_state[start_slot + i].offset += - iris_bo_offset_from_base_address(surf_state_bo); - - isl_buffer_fill_state(&screen->isl_dev, map, - .address = - res->bo->gtt_offset + buffer->buffer_offset, - .size_B = - MIN2(buffer->buffer_size, - res->bo->size - buffer->buffer_offset), - .format = ISL_FORMAT_RAW, - .stride_B = 1, - .mocs = MOCS_WB); + util_range_add(&res->valid_buffer_range, ssbo->buffer_offset, + ssbo->buffer_offset + ssbo->buffer_size); } else { - pipe_resource_reference(&shs->ssbo[start_slot + i], NULL); - pipe_resource_reference(&shs->ssbo_surface_state[start_slot + i].res, + pipe_resource_reference(&shs->ssbo[start_slot + i].buffer, NULL); + pipe_resource_reference(&shs->ssbo_surf_state[start_slot + i].res, NULL); } } @@ -2362,25 +2816,26 @@ iris_set_vertex_buffers(struct pipe_context *ctx, continue; } - assert(!buffer->is_user_buffer); - - ice->state.bound_vertex_buffers |= 1ull << (start_slot + i); + /* We may see user buffers that are NULL bindings. */ + assert(!(buffer->is_user_buffer && buffer->buffer.user != NULL)); pipe_resource_reference(&state->resource, buffer->buffer.resource); struct iris_resource *res = (void *) state->resource; - if (res) + if (res) { + ice->state.bound_vertex_buffers |= 1ull << (start_slot + i); res->bind_history |= PIPE_BIND_VERTEX_BUFFER; + } iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) { vb.VertexBufferIndex = start_slot + i; - vb.MOCS = MOCS_WB; vb.AddressModifyEnable = true; vb.BufferPitch = buffer->stride; if (res) { - vb.BufferSize = res->bo->size; + vb.BufferSize = res->bo->size - (int) buffer->buffer_offset; vb.BufferStartingAddress = - ro_bo(NULL, res->bo->gtt_offset + buffer->buffer_offset); + ro_bo(NULL, res->bo->gtt_offset + (int) buffer->buffer_offset); + vb.MOCS = mocs(res->bo); } else { vb.NullVertexBuffer = true; } @@ -2396,6 +2851,8 @@ iris_set_vertex_buffers(struct pipe_context *ctx, struct iris_vertex_element_state { uint32_t vertex_elements[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)]; uint32_t vf_instancing[33 * GENX(3DSTATE_VF_INSTANCING_length)]; + uint32_t edgeflag_ve[GENX(VERTEX_ELEMENT_STATE_length)]; + uint32_t edgeflag_vfi[GENX(3DSTATE_VF_INSTANCING_length)]; unsigned count; }; @@ -2403,7 +2860,12 @@ struct iris_vertex_element_state { * The pipe->create_vertex_elements() driver hook. * * This translates pipe_vertex_element to our 3DSTATE_VERTEX_ELEMENTS - * and 3DSTATE_VF_INSTANCING commands. SGVs are handled at draw time. + * and 3DSTATE_VF_INSTANCING commands. The vertex_elements and vf_instancing + * arrays are ready to be emitted at draw time if no EdgeFlag or SGVs are + * needed. In these cases we will need information available at draw time. + * We setup edgeflag_ve and edgeflag_vfi as alternatives last + * 3DSTATE_VERTEX_ELEMENT and 3DSTATE_VF_INSTANCING that can be used at + * draw time if we detect that EdgeFlag is needed by the Vertex Shader. */ static void * iris_create_vertex_elements(struct pipe_context *ctx, @@ -2417,11 +2879,6 @@ iris_create_vertex_elements(struct pipe_context *ctx, cso->count = count; - /* TODO: - * - create edge flag one - * - create SGV ones - * - if those are necessary, use count + 1/2/3... OR in the length - */ iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS), cso->vertex_elements, ve) { ve.DWordLength = 1 + GENX(VERTEX_ELEMENT_STATE_length) * MAX2(count, 1) - 2; @@ -2451,15 +2908,16 @@ iris_create_vertex_elements(struct pipe_context *ctx, VFCOMP_STORE_SRC, VFCOMP_STORE_SRC }; switch (isl_format_get_num_channels(fmt.fmt)) { - case 0: comp[0] = VFCOMP_STORE_0; - case 1: comp[1] = VFCOMP_STORE_0; - case 2: comp[2] = VFCOMP_STORE_0; + case 0: comp[0] = VFCOMP_STORE_0; /* fallthrough */ + case 1: comp[1] = VFCOMP_STORE_0; /* fallthrough */ + case 2: comp[2] = VFCOMP_STORE_0; /* fallthrough */ case 3: comp[3] = isl_format_has_int_channel(fmt.fmt) ? VFCOMP_STORE_1_INT : VFCOMP_STORE_1_FP; break; } iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) { + ve.EdgeFlagEnable = false; ve.VertexBufferIndex = state[i].vertex_buffer_index; ve.Valid = true; ve.SourceElementOffset = state[i].src_offset; @@ -2480,6 +2938,33 @@ iris_create_vertex_elements(struct pipe_context *ctx, vfi_pack_dest += GENX(3DSTATE_VF_INSTANCING_length); } + /* An alternative version of the last VE and VFI is stored so it + * can be used at draw time in case Vertex Shader uses EdgeFlag + */ + if (count) { + const unsigned edgeflag_index = count - 1; + const struct iris_format_info fmt = + iris_format_for_usage(devinfo, state[edgeflag_index].src_format, 0); + iris_pack_state(GENX(VERTEX_ELEMENT_STATE), cso->edgeflag_ve, ve) { + ve.EdgeFlagEnable = true ; + ve.VertexBufferIndex = state[edgeflag_index].vertex_buffer_index; + ve.Valid = true; + ve.SourceElementOffset = state[edgeflag_index].src_offset; + ve.SourceElementFormat = fmt.fmt; + ve.Component0Control = VFCOMP_STORE_SRC; + ve.Component1Control = VFCOMP_STORE_0; + ve.Component2Control = VFCOMP_STORE_0; + ve.Component3Control = VFCOMP_STORE_0; + } + iris_pack_command(GENX(3DSTATE_VF_INSTANCING), cso->edgeflag_vfi, vi) { + /* The vi.VertexElementIndex of the EdgeFlag Vertex Element is filled + * at draw time, as it should change if SGVs are emitted. + */ + vi.InstancingEnable = state[edgeflag_index].instance_divisor > 0; + vi.InstanceDataStepRate = state[edgeflag_index].instance_divisor; + } + } + return cso; } @@ -2530,6 +3015,9 @@ iris_create_stream_output_target(struct pipe_context *ctx, cso->base.buffer_size = buffer_size; cso->base.context = ctx; + util_range_add(&res->valid_buffer_range, buffer_offset, + buffer_offset + buffer_size); + upload_state(ctx->stream_uploader, &cso->offset, sizeof(uint32_t), 4); return &cso->base; @@ -2574,8 +3062,22 @@ iris_set_stream_output_targets(struct pipe_context *ctx, * may have missed emitting it earlier, so do so now. (We're already * taking a stall to update 3DSTATE_SO_BUFFERS anyway...) */ - if (active) + if (active) { ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST; + } else { + uint32_t flush = 0; + for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) { + struct iris_stream_output_target *tgt = + (void *) ice->state.so_target[i]; + if (tgt) { + struct iris_resource *res = (void *) tgt->base.buffer; + + flush |= iris_flush_bits_for_history(res); + iris_dirty_for_history(ice, res); + } + } + iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER], flush); + } } for (int i = 0; i < 4; i++) { @@ -2590,20 +3092,30 @@ iris_set_stream_output_targets(struct pipe_context *ctx, for (unsigned i = 0; i < 4; i++, so_buffers += GENX(3DSTATE_SO_BUFFER_length)) { - if (i >= num_targets || !targets[i]) { + struct iris_stream_output_target *tgt = (void *) ice->state.so_target[i]; + unsigned offset = offsets[i]; + + if (!tgt) { iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) sob.SOBufferIndex = i; continue; } - struct iris_stream_output_target *tgt = (void *) targets[i]; struct iris_resource *res = (void *) tgt->base.buffer; /* Note that offsets[i] will either be 0, causing us to zero * the value in the buffer, or 0xFFFFFFFF, which happens to mean * "continue appending at the existing offset." */ - assert(offsets[i] == 0 || offsets[i] == 0xFFFFFFFF); + assert(offset == 0 || offset == 0xFFFFFFFF); + + /* We might be called by Begin (offset = 0), Pause, then Resume + * (offset = 0xFFFFFFFF) before ever drawing (where these commands + * will actually be sent to the GPU). In this case, we don't want + * to append - we still want to do our initial zeroing. + */ + if (!tgt->zeroed) + offset = 0; iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) { sob.SurfaceBaseAddress = @@ -2611,12 +3123,12 @@ iris_set_stream_output_targets(struct pipe_context *ctx, sob.SOBufferEnable = true; sob.StreamOffsetWriteEnable = true; sob.StreamOutputBufferOffsetAddressEnable = true; - sob.MOCS = MOCS_WB; // XXX: MOCS + sob.MOCS = mocs(res->bo); sob.SurfaceSize = MAX2(tgt->base.buffer_size / 4, 1) - 1; sob.SOBufferIndex = i; - sob.StreamOffset = offsets[i]; + sob.StreamOffset = offset; sob.StreamOutputBufferOffsetAddress = rw_bo(NULL, iris_resource_bo(tgt->offset.res)->gtt_offset + tgt->offset.offset); @@ -2835,8 +3347,6 @@ iris_emit_sbe_swiz(struct iris_batch *batch, /* XXX: this should be generated when putting programs in place */ - // XXX: raster->sprite_coord_enable - for (int fs_attr = 0; fs_attr < VARYING_SLOT_MAX; fs_attr++) { const int input_index = wm_prog_data->urb_setup[fs_attr]; if (input_index < 0 || input_index >= 16) @@ -2970,10 +3480,11 @@ iris_emit_sbe(struct iris_batch *batch, const struct iris_context *ice) sbe.ForceVertexURBEntryReadLength = true; sbe.ConstantInterpolationEnable = wm_prog_data->flat_inputs; sbe.PointSpriteTextureCoordinateEnable = sprite_coord_overrides; - +#if GEN_GEN >= 9 for (int i = 0; i < 32; i++) { sbe.AttributeActiveComponentFormat[i] = ACTIVE_COMPONENT_XYZW; } +#endif } iris_emit_sbe_swiz(batch, ice, urb_read_offset, sprite_coord_overrides); @@ -3030,7 +3541,7 @@ static void iris_populate_fs_key(const struct iris_context *ice, struct brw_wm_prog_key *key) { - /* XXX: dirty flags? */ + struct iris_screen *screen = (void *) ice->ctx.screen; const struct pipe_framebuffer_state *fb = &ice->state.framebuffer; const struct iris_depth_stencil_alpha_state *zsa = ice->state.cso_zsa; const struct iris_rasterizer_state *rast = ice->state.cso_rast; @@ -3040,8 +3551,9 @@ iris_populate_fs_key(const struct iris_context *ice, key->clamp_fragment_color = rast->clamp_fragment_color; - key->replicate_alpha = fb->nr_cbufs > 1 && - (zsa->alpha.enabled || blend->alpha_to_coverage); + key->alpha_to_coverage = blend->alpha_to_coverage; + + key->alpha_test_replicate_alpha = fb->nr_cbufs > 1 && zsa->alpha.enabled; /* XXX: only bother if COL0/1 are read */ key->flat_shade = rast->flatshade; @@ -3051,10 +3563,12 @@ iris_populate_fs_key(const struct iris_context *ice, key->coherent_fb_fetch = true; - // XXX: uint64_t input_slots_valid; - for >16 inputs + key->force_dual_color_blend = + screen->driconf.dual_color_blend_by_location && + (blend->blend_enables & 1) && blend->dual_color_blending; - // XXX: key->force_dual_color_blend for unigine - // XXX: respect hint for high_quality_derivatives:1; + /* TODO: support key->force_dual_color_blend for Unigine */ + /* TODO: Respect glHint for key->high_quality_derivatives */ } static void @@ -3063,13 +3577,6 @@ iris_populate_cs_key(const struct iris_context *ice, { } -#if 0 - // XXX: these need to go in INIT_THREAD_DISPATCH_FIELDS - pkt.SamplerCount = \ - DIV_ROUND_UP(CLAMP(stage_state->sampler_count, 0, 16), 4); \ - -#endif - static uint64_t KSP(const struct iris_compiled_shader *shader) { @@ -3077,9 +3584,12 @@ KSP(const struct iris_compiled_shader *shader) return iris_bo_offset_from_base_address(res->bo) + shader->assembly.offset; } -// Gen11 workaround table #2056 WABTPPrefetchDisable suggests to disable -// prefetching of binding tables in A0 and B0 steppings. XXX: Revisit -// this WA on C0 stepping. +/* Gen11 workaround table #2056 WABTPPrefetchDisable suggests to disable + * prefetching of binding tables in A0 and B0 steppings. XXX: Revisit + * this WA on C0 stepping. + * + * TODO: Fill out SamplerCount for prefetching? + */ #define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage) \ pkt.KernelStartPointer = KSP(shader); \ @@ -3096,8 +3606,9 @@ KSP(const struct iris_compiled_shader *shader) pkt.Enable = true; \ \ if (prog_data->total_scratch) { \ - uint32_t scratch_addr = \ + struct iris_bo *bo = \ iris_get_scratch_space(ice, prog_data->total_scratch, stage); \ + uint32_t scratch_addr = bo->gtt_offset; \ pkt.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11; \ pkt.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr); \ } @@ -3245,15 +3756,13 @@ iris_store_fs_state(struct iris_context *ice, iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) { ps.VectorMaskEnable = true; - //ps.SamplerCount = ... // XXX: WABTPPrefetchDisable, see above, drop at C0 ps.BindingTableEntryCount = GEN_GEN == 11 ? 0 : prog_data->binding_table.size_bytes / 4; ps.FloatingPointMode = prog_data->use_alt_mode; ps.MaximumNumberofThreadsPerPSD = 64 - (GEN_GEN == 8 ? 2 : 1); - ps.PushConstantEnable = shader->num_system_values > 0 || - prog_data->ubo_ranges[0].length > 0; + ps.PushConstantEnable = prog_data->ubo_ranges[0].length > 0; /* From the documentation for this packet: * "If the PS kernel does not need the Position XY Offsets to @@ -3291,9 +3800,10 @@ iris_store_fs_state(struct iris_context *ice, KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2); if (prog_data->total_scratch) { - uint32_t scratch_addr = + struct iris_bo *bo = iris_get_scratch_space(ice, prog_data->total_scratch, MESA_SHADER_FRAGMENT); + uint32_t scratch_addr = bo->gtt_offset; ps.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11; ps.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr); } @@ -3302,26 +3812,19 @@ iris_store_fs_state(struct iris_context *ice, iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) { psx.PixelShaderValid = true; psx.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode; - // XXX: alpha test / alpha to coverage :/ - psx.PixelShaderKillsPixel = wm_prog_data->uses_kill || - wm_prog_data->uses_omask; + psx.PixelShaderKillsPixel = wm_prog_data->uses_kill; psx.AttributeEnable = wm_prog_data->num_varying_inputs != 0; psx.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth; psx.PixelShaderUsesSourceW = wm_prog_data->uses_src_w; psx.PixelShaderIsPerSample = wm_prog_data->persample_dispatch; - - if (wm_prog_data->uses_sample_mask) { - /* TODO: conservative rasterization */ - if (wm_prog_data->post_depth_coverage) - psx.InputCoverageMaskState = ICMS_DEPTH_COVERAGE; - else - psx.InputCoverageMaskState = ICMS_NORMAL; - } - psx.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask; + +#if GEN_GEN >= 9 psx.PixelShaderPullsBary = wm_prog_data->pulls_bary; psx.PixelShaderComputesStencil = wm_prog_data->computed_stencil; - +#else + psx.PixelShaderUsesInputCoverageMask = wm_prog_data->uses_sample_mask; +#endif // XXX: UAV bit } } @@ -3412,47 +3915,6 @@ iris_store_derived_program_state(struct iris_context *ice, /* ------------------------------------------------------------------- */ -/** - * Configure the URB. - * - * XXX: write a real comment. - */ -static void -iris_upload_urb_config(struct iris_context *ice, struct iris_batch *batch) -{ - const struct gen_device_info *devinfo = &batch->screen->devinfo; - const unsigned push_size_kB = 32; - unsigned entries[4]; - unsigned start[4]; - unsigned size[4]; - - for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) { - if (!ice->shaders.prog[i]) { - size[i] = 1; - } else { - struct brw_vue_prog_data *vue_prog_data = - (void *) ice->shaders.prog[i]->prog_data; - size[i] = vue_prog_data->urb_entry_size; - } - assert(size[i] != 0); - } - - gen_get_urb_config(devinfo, 1024 * push_size_kB, - 1024 * ice->shaders.urb_size, - ice->shaders.prog[MESA_SHADER_TESS_EVAL] != NULL, - ice->shaders.prog[MESA_SHADER_GEOMETRY] != NULL, - size, entries, start); - - for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) { - iris_emit_cmd(batch, GENX(3DSTATE_URB_VS), urb) { - urb._3DCommandSubOpcode += i; - urb.VSURBStartingAddress = start[i]; - urb.VSURBEntryAllocationSize = size[i] - 1; - urb.VSNumberofURBEntries = entries[i]; - } - } -} - static const uint32_t push_constant_opcodes[] = { [MESA_SHADER_VERTEX] = 21, [MESA_SHADER_TESS_CTRL] = 25, /* HS */ @@ -3486,6 +3948,76 @@ use_null_fb_surface(struct iris_batch *batch, struct iris_context *ice) return ice->state.null_fb.offset; } +static uint32_t +surf_state_offset_for_aux(struct iris_resource *res, + unsigned aux_modes, + enum isl_aux_usage aux_usage) +{ + return SURFACE_STATE_ALIGNMENT * + util_bitcount(res->aux.possible_usages & ((1 << aux_usage) - 1)); +} + +static void +surf_state_update_clear_value(struct iris_batch *batch, + struct iris_resource *res, + struct iris_state_ref *state, + unsigned aux_modes, + enum isl_aux_usage aux_usage) +{ + struct isl_device *isl_dev = &batch->screen->isl_dev; + struct iris_bo *state_bo = iris_resource_bo(state->res); + uint64_t real_offset = state->offset + + IRIS_MEMZONE_BINDER_START; + uint32_t offset_into_bo = real_offset - state_bo->gtt_offset; + uint32_t clear_offset = offset_into_bo + + isl_dev->ss.clear_value_offset + + surf_state_offset_for_aux(res, aux_modes, aux_usage); + + batch->vtbl->copy_mem_mem(batch, state_bo, clear_offset, + res->aux.clear_color_bo, + res->aux.clear_color_offset, + isl_dev->ss.clear_value_size); +} + +static void +update_clear_value(struct iris_context *ice, + struct iris_batch *batch, + struct iris_resource *res, + struct iris_state_ref *state, + unsigned aux_modes, + struct isl_view *view) +{ + struct iris_screen *screen = batch->screen; + const struct gen_device_info *devinfo = &screen->devinfo; + + /* We only need to update the clear color in the surface state for gen8 and + * gen9. Newer gens can read it directly from the clear color state buffer. + */ + if (devinfo->gen > 9) + return; + + if (devinfo->gen == 9) { + /* Skip updating the ISL_AUX_USAGE_NONE surface state */ + aux_modes &= ~(1 << ISL_AUX_USAGE_NONE); + + while (aux_modes) { + enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes); + + surf_state_update_clear_value(batch, res, state, aux_modes, + aux_usage); + } + } else if (devinfo->gen == 8) { + pipe_resource_reference(&state->res, NULL); + void *map = alloc_surface_states(ice->state.surface_uploader, + state, res->aux.possible_usages); + while (aux_modes) { + enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes); + fill_surface_state(&screen->isl_dev, map, res, view, aux_usage); + map += SURFACE_STATE_ALIGNMENT; + } + } +} + /** * Add a surface to the validation list, as well as the buffer containing * the corresponding SURFACE_STATE. @@ -3493,51 +4025,75 @@ use_null_fb_surface(struct iris_batch *batch, struct iris_context *ice) * Returns the binding table entry (offset to SURFACE_STATE). */ static uint32_t -use_surface(struct iris_batch *batch, +use_surface(struct iris_context *ice, + struct iris_batch *batch, struct pipe_surface *p_surf, - bool writeable) + bool writeable, + enum isl_aux_usage aux_usage) { struct iris_surface *surf = (void *) p_surf; + struct iris_resource *res = (void *) p_surf->texture; iris_use_pinned_bo(batch, iris_resource_bo(p_surf->texture), writeable); iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state.res), false); - return surf->surface_state.offset; -} + if (res->aux.bo) { + iris_use_pinned_bo(batch, res->aux.bo, writeable); + if (res->aux.clear_color_bo) + iris_use_pinned_bo(batch, res->aux.clear_color_bo, false); -static uint32_t -use_sampler_view(struct iris_batch *batch, struct iris_sampler_view *isv) -{ - iris_use_pinned_bo(batch, isv->res->bo, false); - iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.res), false); + if (memcmp(&res->aux.clear_color, &surf->clear_color, + sizeof(surf->clear_color)) != 0) { + update_clear_value(ice, batch, res, &surf->surface_state, + res->aux.possible_usages, &surf->view); + surf->clear_color = res->aux.clear_color; + } + } - return isv->surface_state.offset; + return surf->surface_state.offset + + surf_state_offset_for_aux(res, res->aux.possible_usages, aux_usage); } static uint32_t -use_const_buffer(struct iris_batch *batch, - struct iris_context *ice, - struct iris_const_buffer *cbuf) +use_sampler_view(struct iris_context *ice, + struct iris_batch *batch, + struct iris_sampler_view *isv) { - if (!cbuf->surface_state.res) - return use_null_surface(batch, ice); + // XXX: ASTC hacks + enum isl_aux_usage aux_usage = + iris_resource_texture_aux_usage(ice, isv->res, isv->view.format, 0); - iris_use_pinned_bo(batch, iris_resource_bo(cbuf->data.res), false); - iris_use_pinned_bo(batch, iris_resource_bo(cbuf->surface_state.res), false); + iris_use_pinned_bo(batch, isv->res->bo, false); + iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.res), false); - return cbuf->surface_state.offset; + if (isv->res->aux.bo) { + iris_use_pinned_bo(batch, isv->res->aux.bo, false); + if (isv->res->aux.clear_color_bo) + iris_use_pinned_bo(batch, isv->res->aux.clear_color_bo, false); + if (memcmp(&isv->res->aux.clear_color, &isv->clear_color, + sizeof(isv->clear_color)) != 0) { + update_clear_value(ice, batch, isv->res, &isv->surface_state, + isv->res->aux.sampler_usages, &isv->view); + isv->clear_color = isv->res->aux.clear_color; + } + } + + return isv->surface_state.offset + + surf_state_offset_for_aux(isv->res, isv->res->aux.sampler_usages, + aux_usage); } static uint32_t -use_ssbo(struct iris_batch *batch, struct iris_context *ice, - struct iris_shader_state *shs, int i) +use_ubo_ssbo(struct iris_batch *batch, + struct iris_context *ice, + struct pipe_shader_buffer *buf, + struct iris_state_ref *surf_state, + bool writable) { - if (!shs->ssbo[i]) + if (!buf->buffer) return use_null_surface(batch, ice); - struct iris_state_ref *surf_state = &shs->ssbo_surface_state[i]; - - iris_use_pinned_bo(batch, iris_resource_bo(shs->ssbo[i]), true); + iris_use_pinned_bo(batch, iris_resource_bo(buf->buffer), writable); iris_use_pinned_bo(batch, iris_resource_bo(surf_state->res), false); return surf_state->offset; @@ -3547,16 +4103,21 @@ static uint32_t use_image(struct iris_batch *batch, struct iris_context *ice, struct iris_shader_state *shs, int i) { - if (!shs->image[i].res) + struct iris_image_view *iv = &shs->image[i]; + struct iris_resource *res = (void *) iv->base.resource; + + if (!res) return use_null_surface(batch, ice); - struct iris_state_ref *surf_state = &shs->image[i].surface_state; + bool write = iv->base.shader_access & PIPE_IMAGE_ACCESS_WRITE; - iris_use_pinned_bo(batch, iris_resource_bo(shs->image[i].res), - shs->image[i].access & PIPE_IMAGE_ACCESS_WRITE); - iris_use_pinned_bo(batch, iris_resource_bo(surf_state->res), false); + iris_use_pinned_bo(batch, res->bo, write); + iris_use_pinned_bo(batch, iris_resource_bo(iv->surface_state.res), false); - return surf_state->offset; + if (res->aux.bo) + iris_use_pinned_bo(batch, res->aux.bo, write); + + return iv->surface_state.offset; } #define push_bt_entry(addr) \ @@ -3615,9 +4176,13 @@ iris_populate_binding_table(struct iris_context *ice, /* Note that cso_fb->nr_cbufs == fs_key->nr_color_regions. */ if (cso_fb->nr_cbufs) { for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) { - uint32_t addr = - cso_fb->cbufs[i] ? use_surface(batch, cso_fb->cbufs[i], true) - : use_null_fb_surface(batch, ice); + uint32_t addr; + if (cso_fb->cbufs[i]) { + addr = use_surface(ice, batch, cso_fb->cbufs[i], true, + ice->state.draw_aux_usage[i]); + } else { + addr = use_null_fb_surface(batch, ice); + } push_bt_entry(addr); } } else { @@ -3626,11 +4191,13 @@ iris_populate_binding_table(struct iris_context *ice, } } - bt_assert(texture_start, info->num_textures > 0); + unsigned num_textures = util_last_bit(info->textures_used); - for (int i = 0; i < info->num_textures; i++) { + bt_assert(texture_start, num_textures > 0); + + for (int i = 0; i < num_textures; i++) { struct iris_sampler_view *view = shs->textures[i]; - uint32_t addr = view ? use_sampler_view(batch, view) + uint32_t addr = view ? use_sampler_view(ice, batch, view) : use_null_surface(batch, ice); push_bt_entry(addr); } @@ -3642,12 +4209,11 @@ iris_populate_binding_table(struct iris_context *ice, push_bt_entry(addr); } - const int num_ubos = iris_get_shader_num_ubos(ice, stage); - - bt_assert(ubo_start, num_ubos > 0); + bt_assert(ubo_start, shader->num_cbufs > 0); - for (int i = 0; i < num_ubos; i++) { - uint32_t addr = use_const_buffer(batch, ice, &shs->constbuf[i]); + for (int i = 0; i < shader->num_cbufs; i++) { + uint32_t addr = use_ubo_ssbo(batch, ice, &shs->constbuf[i], + &shs->constbuf_surf_state[i], false); push_bt_entry(addr); } @@ -3660,13 +4226,15 @@ iris_populate_binding_table(struct iris_context *ice, */ if (info->num_abos + info->num_ssbos > 0) { for (int i = 0; i < IRIS_MAX_ABOS + info->num_ssbos; i++) { - uint32_t addr = use_ssbo(batch, ice, shs, i); + uint32_t addr = + use_ubo_ssbo(batch, ice, &shs->ssbo[i], &shs->ssbo_surf_state[i], + shs->writable_ssbos & (1u << i)); push_bt_entry(addr); } } #if 0 - // XXX: not implemented yet + /* XXX: YUV surfaces not implemented yet */ bt_assert(plane_start[1], ...); bt_assert(plane_start[2], ...); #endif @@ -3683,6 +4251,30 @@ iris_use_optional_res(struct iris_batch *batch, } } +static void +pin_depth_and_stencil_buffers(struct iris_batch *batch, + struct pipe_surface *zsbuf, + struct iris_depth_stencil_alpha_state *cso_zsa) +{ + if (!zsbuf) + return; + + struct iris_resource *zres, *sres; + iris_get_depth_stencil_resources(zsbuf->texture, &zres, &sres); + + if (zres) { + iris_use_pinned_bo(batch, zres->bo, cso_zsa->depth_writes_enabled); + if (zres->aux.bo) { + iris_use_pinned_bo(batch, zres->aux.bo, + cso_zsa->depth_writes_enabled); + } + } + + if (sres) { + iris_use_pinned_bo(batch, sres->bo, cso_zsa->stencil_writes_enabled); + } +} + /* ------------------------------------------------------------------- */ /** @@ -3705,8 +4297,6 @@ iris_restore_render_saved_bos(struct iris_context *ice, { struct iris_genx_state *genx = ice->state.genx; - // XXX: whack IRIS_SHADER_DIRTY_BINDING_TABLE on new batch - const uint64_t clean = ~ice->state.dirty; if (clean & IRIS_DIRTY_CC_VIEWPORT) { @@ -3760,8 +4350,8 @@ iris_restore_render_saved_bos(struct iris_context *ice, if (range->length == 0) continue; - struct iris_const_buffer *cbuf = &shs->constbuf[range->block]; - struct iris_resource *res = (void *) cbuf->data.res; + struct pipe_shader_buffer *cbuf = &shs->constbuf[range->block]; + struct iris_resource *res = (void *) cbuf->buffer; if (res) iris_use_pinned_bo(batch, res->bo, false); @@ -3787,28 +4377,26 @@ iris_restore_render_saved_bos(struct iris_context *ice, for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) { if (clean & (IRIS_DIRTY_VS << stage)) { struct iris_compiled_shader *shader = ice->shaders.prog[stage]; + if (shader) { struct iris_bo *bo = iris_resource_bo(shader->assembly.res); iris_use_pinned_bo(batch, bo, false); - } - // XXX: scratch buffer + struct brw_stage_prog_data *prog_data = shader->prog_data; + + if (prog_data->total_scratch > 0) { + struct iris_bo *bo = + iris_get_scratch_space(ice, prog_data->total_scratch, stage); + iris_use_pinned_bo(batch, bo, true); + } + } } } - if (clean & IRIS_DIRTY_DEPTH_BUFFER) { + if ((clean & IRIS_DIRTY_DEPTH_BUFFER) && + (clean & IRIS_DIRTY_WM_DEPTH_STENCIL)) { struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer; - - if (cso_fb->zsbuf) { - struct iris_resource *zres, *sres; - iris_get_depth_stencil_resources(cso_fb->zsbuf->texture, - &zres, &sres); - // XXX: might not be writable... - if (zres) - iris_use_pinned_bo(batch, zres->bo, true); - if (sres) - iris_use_pinned_bo(batch, sres->bo, true); - } + pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa); } if (draw->index_size == 0 && ice->state.last_res.index_buffer) { @@ -3847,8 +4435,8 @@ iris_restore_compute_saved_bos(struct iris_context *ice, const struct brw_ubo_range *range = &prog_data->ubo_ranges[0]; if (range->length > 0) { - struct iris_const_buffer *cbuf = &shs->constbuf[range->block]; - struct iris_resource *res = (void *) cbuf->data.res; + struct pipe_shader_buffer *cbuf = &shs->constbuf[range->block]; + struct iris_resource *res = (void *) cbuf->buffer; if (res) iris_use_pinned_bo(batch, res->bo, false); @@ -3869,12 +4457,19 @@ iris_restore_compute_saved_bos(struct iris_context *ice, if (clean & IRIS_DIRTY_CS) { struct iris_compiled_shader *shader = ice->shaders.prog[stage]; + if (shader) { struct iris_bo *bo = iris_resource_bo(shader->assembly.res); iris_use_pinned_bo(batch, bo, false); - } - // XXX: scratch buffer + struct brw_stage_prog_data *prog_data = shader->prog_data; + + if (prog_data->total_scratch > 0) { + struct iris_bo *bo = + iris_get_scratch_space(ice, prog_data->total_scratch, stage); + iris_use_pinned_bo(batch, bo, true); + } + } } } @@ -3891,7 +4486,7 @@ iris_update_surface_base_address(struct iris_batch *batch, flush_for_state_base_change(batch); iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) { - // XXX: sba.SurfaceStateMemoryObjectControlState = MOCS_WB; + sba.SurfaceStateMOCS = MOCS_WB; sba.SurfaceStateBaseAddressModifyEnable = true; sba.SurfaceStateBaseAddress = ro_bo(binder->bo, 0); } @@ -3994,11 +4589,23 @@ iris_upload_dirty_render_state(struct iris_context *ice, } } - /* XXX: L3 State */ - - // XXX: this is only flagged at setup, we assume a static configuration if (dirty & IRIS_DIRTY_URB) { - iris_upload_urb_config(ice, batch); + unsigned size[4]; + + for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) { + if (!ice->shaders.prog[i]) { + size[i] = 1; + } else { + struct brw_vue_prog_data *vue_prog_data = + (void *) ice->shaders.prog[i]->prog_data; + size[i] = vue_prog_data->urb_entry_size; + } + assert(size[i] != 0); + } + + genX(emit_urb_setup)(ice, batch, size, + ice->shaders.prog[MESA_SHADER_TESS_EVAL] != NULL, + ice->shaders.prog[MESA_SHADER_GEOMETRY] != NULL); } if (dirty & IRIS_DIRTY_BLEND_STATE) { @@ -4006,7 +4613,14 @@ iris_upload_dirty_render_state(struct iris_context *ice, struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer; struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa; const int header_dwords = GENX(BLEND_STATE_length); - const int rt_dwords = cso_fb->nr_cbufs * GENX(BLEND_STATE_ENTRY_length); + + /* Always write at least one BLEND_STATE - the final RT message will + * reference BLEND_STATE[0] even if there aren't color writes. There + * may still be alpha testing, computed depth, and so on. + */ + const int rt_dwords = + MAX2(cso_fb->nr_cbufs, 1) * GENX(BLEND_STATE_ENTRY_length); + uint32_t blend_offset; uint32_t *blend_map = stream_state(batch, ice->state.dynamic_uploader, @@ -4030,6 +4644,9 @@ iris_upload_dirty_render_state(struct iris_context *ice, if (dirty & IRIS_DIRTY_COLOR_CALC_STATE) { struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa; +#if GEN_GEN == 8 + struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref; +#endif uint32_t cc_offset; void *cc_map = stream_state(batch, ice->state.dynamic_uploader, @@ -4043,6 +4660,10 @@ iris_upload_dirty_render_state(struct iris_context *ice, cc.BlendConstantColorGreen = ice->state.blend_color.color[1]; cc.BlendConstantColorBlue = ice->state.blend_color.color[2]; cc.BlendConstantColorAlpha = ice->state.blend_color.color[3]; +#if GEN_GEN == 8 + cc.StencilReferenceValue = p_stencil_refs->ref_value[0]; + cc.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1]; +#endif } iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), ptr) { ptr.ColorCalcStatePointer = cc_offset; @@ -4050,43 +4671,6 @@ iris_upload_dirty_render_state(struct iris_context *ice, } } - /* Upload constants for TCS passthrough. */ - if ((dirty & IRIS_DIRTY_CONSTANTS_TCS) && - ice->shaders.prog[MESA_SHADER_TESS_CTRL] && - !ice->shaders.uncompiled[MESA_SHADER_TESS_CTRL]) { - struct iris_compiled_shader *tes_shader = ice->shaders.prog[MESA_SHADER_TESS_EVAL]; - assert(tes_shader); - - /* Passthrough always copies 2 vec4s, so when uploading data we ensure - * it is in the right layout for TES. - */ - float hdr[8] = {}; - struct brw_tes_prog_data *tes_prog_data = (void *) tes_shader->prog_data; - switch (tes_prog_data->domain) { - case BRW_TESS_DOMAIN_QUAD: - for (int i = 0; i < 4; i++) - hdr[7 - i] = ice->state.default_outer_level[i]; - hdr[3] = ice->state.default_inner_level[0]; - hdr[2] = ice->state.default_inner_level[1]; - break; - case BRW_TESS_DOMAIN_TRI: - for (int i = 0; i < 3; i++) - hdr[7 - i] = ice->state.default_outer_level[i]; - hdr[4] = ice->state.default_inner_level[0]; - break; - case BRW_TESS_DOMAIN_ISOLINE: - hdr[7] = ice->state.default_outer_level[1]; - hdr[6] = ice->state.default_outer_level[0]; - break; - } - - struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL]; - struct iris_const_buffer *cbuf = &shs->constbuf[0]; - u_upload_data(ice->ctx.const_uploader, 0, sizeof(hdr), 32, - &hdr[0], &cbuf->data.offset, - &cbuf->data.res); - } - for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) { if (!(dirty & (IRIS_DIRTY_CONSTANTS_VS << stage))) continue; @@ -4124,14 +4708,14 @@ iris_upload_dirty_render_state(struct iris_context *ice, if (range->length == 0) continue; - struct iris_const_buffer *cbuf = &shs->constbuf[range->block]; - struct iris_resource *res = (void *) cbuf->data.res; + struct pipe_shader_buffer *cbuf = &shs->constbuf[range->block]; + struct iris_resource *res = (void *) cbuf->buffer; - assert(cbuf->data.offset % 32 == 0); + assert(cbuf->buffer_offset % 32 == 0); pkt.ConstantBody.ReadLength[n] = range->length; pkt.ConstantBody.Buffer[n] = - res ? ro_bo(res->bo, range->start * 32 + cbuf->data.offset) + res ? ro_bo(res->bo, range->start * 32 + cbuf->buffer_offset) : ro_bo(batch->screen->workaround_bo, 0); n--; } @@ -4154,14 +4738,13 @@ iris_upload_dirty_render_state(struct iris_context *ice, } } - if (ice->state.need_border_colors) - iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false); - for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) { if (!(dirty & (IRIS_DIRTY_SAMPLER_STATES_VS << stage)) || !ice->shaders.prog[stage]) continue; + iris_upload_sampler_states(ice, stage); + struct iris_shader_state *shs = &ice->state.shaders[stage]; struct pipe_resource *res = shs->sampler_table.res; if (res) @@ -4173,6 +4756,9 @@ iris_upload_dirty_render_state(struct iris_context *ice, } } + if (ice->state.need_border_colors) + iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false); + if (dirty & IRIS_DIRTY_MULTISAMPLE) { iris_emit_cmd(batch, GENX(3DSTATE_MULTISAMPLE), ms) { ms.PixelLocation = @@ -4184,7 +4770,7 @@ iris_upload_dirty_render_state(struct iris_context *ice, if (dirty & IRIS_DIRTY_SAMPLE_MASK) { iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_MASK), ms) { - ms.SampleMask = MAX2(ice->state.sample_mask, 1); + ms.SampleMask = ice->state.sample_mask; } } @@ -4195,10 +4781,41 @@ iris_upload_dirty_render_state(struct iris_context *ice, struct iris_compiled_shader *shader = ice->shaders.prog[stage]; if (shader) { + struct brw_stage_prog_data *prog_data = shader->prog_data; struct iris_resource *cache = (void *) shader->assembly.res; iris_use_pinned_bo(batch, cache->bo, false); - iris_batch_emit(batch, shader->derived_data, - iris_derived_program_state_size(stage)); + + if (prog_data->total_scratch > 0) { + struct iris_bo *bo = + iris_get_scratch_space(ice, prog_data->total_scratch, stage); + iris_use_pinned_bo(batch, bo, true); + } +#if GEN_GEN >= 9 + if (stage == MESA_SHADER_FRAGMENT && wm_prog_data->uses_sample_mask) { + uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0}; + uint32_t *shader_psx = ((uint32_t*)shader->derived_data) + + GENX(3DSTATE_PS_length); + struct iris_rasterizer_state *cso = ice->state.cso_rast; + + iris_pack_command(GENX(3DSTATE_PS_EXTRA), &psx_state, psx) { + if (wm_prog_data->post_depth_coverage) + psx.InputCoverageMaskState = ICMS_DEPTH_COVERAGE; + else if (wm_prog_data->inner_coverage && cso->conservative_rasterization) + psx.InputCoverageMaskState = ICMS_INNER_CONSERVATIVE; + else + psx.InputCoverageMaskState = ICMS_NORMAL; + } + + iris_batch_emit(batch, shader->derived_data, + sizeof(uint32_t) * GENX(3DSTATE_PS_length)); + iris_emit_merge(batch, + shader_psx, + psx_state, + GENX(3DSTATE_PS_EXTRA_length)); + } else +#endif + iris_batch_emit(batch, shader->derived_data, + iris_derived_program_state_size(stage)); } else { if (stage == MESA_SHADER_TESS_EVAL) { iris_emit_cmd(batch, GENX(3DSTATE_HS), hs); @@ -4218,6 +4835,7 @@ iris_upload_dirty_render_state(struct iris_context *ice, struct iris_stream_output_target *tgt = (void *) ice->state.so_target[i]; if (tgt) { + tgt->zeroed = true; iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer), true); iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res), @@ -4260,11 +4878,19 @@ iris_upload_dirty_render_state(struct iris_context *ice, struct iris_rasterizer_state *cso_rast = ice->state.cso_rast; struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer; + bool gs_or_tes = ice->shaders.prog[MESA_SHADER_GEOMETRY] || + ice->shaders.prog[MESA_SHADER_TESS_EVAL]; + bool points_or_lines = cso_rast->fill_mode_point_or_line || + (gs_or_tes ? ice->shaders.output_topology_is_points_or_lines + : ice->state.prim_is_points_or_lines); + uint32_t dynamic_clip[GENX(3DSTATE_CLIP_length)]; iris_pack_command(GENX(3DSTATE_CLIP), &dynamic_clip, cl) { cl.StatisticsEnable = ice->state.statistics_counters_enabled; cl.ClipMode = cso_rast->rasterizer_discard ? CLIPMODE_REJECT_ALL : CLIPMODE_NORMAL; + cl.ViewportXYClipTestEnable = !points_or_lines; + if (wm_prog_data->barycentric_interp_modes & BRW_BARYCENTRIC_NONPERSPECTIVE_BITS) cl.NonPerspectiveBarycentricEnable = true; @@ -4283,7 +4909,6 @@ iris_upload_dirty_render_state(struct iris_context *ice, } - /* XXX: FS program updates needs to flag IRIS_DIRTY_WM */ if (dirty & IRIS_DIRTY_WM) { struct iris_rasterizer_state *cso = ice->state.cso_rast; uint32_t dynamic_wm[GENX(3DSTATE_WM_length)]; @@ -4298,6 +4923,10 @@ iris_upload_dirty_render_state(struct iris_context *ice, wm.EarlyDepthStencilControl = EDSC_PREPS; else if (wm_prog_data->has_side_effects) wm.EarlyDepthStencilControl = EDSC_PSEXEC; + + /* We could skip this bit if color writes are enabled. */ + if (wm_prog_data->has_side_effects || wm_prog_data->uses_kill) + wm.ForceThreadDispatchEnable = ForceON; } iris_emit_merge(batch, cso->wm, dynamic_wm, ARRAY_SIZE(cso->wm)); } @@ -4309,10 +4938,21 @@ iris_upload_dirty_render_state(struct iris_context *ice, if (dirty & IRIS_DIRTY_PS_BLEND) { struct iris_blend_state *cso_blend = ice->state.cso_blend; struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa; + const struct shader_info *fs_info = + iris_get_shader_info(ice, MESA_SHADER_FRAGMENT); + uint32_t dynamic_pb[GENX(3DSTATE_PS_BLEND_length)]; iris_pack_command(GENX(3DSTATE_PS_BLEND), &dynamic_pb, pb) { - pb.HasWriteableRT = true; // XXX: comes from somewhere :( + pb.HasWriteableRT = has_writeable_rt(cso_blend, fs_info); pb.AlphaTestEnable = cso_zsa->alpha.enabled; + + /* The dual source blending docs caution against using SRC1 factors + * when the shader doesn't use a dual source render target write. + * Empirically, this can lead to GPU hangs, and the results are + * undefined anyway, so simply disable blending to avoid the hang. + */ + pb.ColorBufferBlendEnable = (cso_blend->blend_enables & 1) && + (!cso_blend->dual_color_blending || wm_prog_data->dual_src_blend); } iris_emit_merge(batch, cso_blend->ps_blend, dynamic_pb, @@ -4321,14 +4961,17 @@ iris_upload_dirty_render_state(struct iris_context *ice, if (dirty & IRIS_DIRTY_WM_DEPTH_STENCIL) { struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa; +#if GEN_GEN >= 9 struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref; - uint32_t stencil_refs[GENX(3DSTATE_WM_DEPTH_STENCIL_length)]; iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), &stencil_refs, wmds) { wmds.StencilReferenceValue = p_stencil_refs->ref_value[0]; wmds.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1]; } iris_emit_merge(batch, cso->wmds, stencil_refs, ARRAY_SIZE(cso->wmds)); +#else + iris_batch_emit(batch, cso->wmds, sizeof(cso->wmds)); +#endif } if (dirty & IRIS_DIRTY_SCISSOR_RECT) { @@ -4345,21 +4988,38 @@ iris_upload_dirty_render_state(struct iris_context *ice, } if (dirty & IRIS_DIRTY_DEPTH_BUFFER) { - struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer; struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer; - iris_batch_emit(batch, cso_z->packets, sizeof(cso_z->packets)); + /* Do not emit the clear params yets. We need to update the clear value + * first. + */ + uint32_t clear_length = GENX(3DSTATE_CLEAR_PARAMS_length) * 4; + uint32_t cso_z_size = sizeof(cso_z->packets) - clear_length; + iris_batch_emit(batch, cso_z->packets, cso_z_size); + + union isl_color_value clear_value = { .f32 = { 0, } }; + struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer; if (cso_fb->zsbuf) { struct iris_resource *zres, *sres; iris_get_depth_stencil_resources(cso_fb->zsbuf->texture, &zres, &sres); - // XXX: might not be writable... - if (zres) - iris_use_pinned_bo(batch, zres->bo, true); - if (sres) - iris_use_pinned_bo(batch, sres->bo, true); + if (zres && zres->aux.bo) + clear_value = iris_resource_get_clear_color(zres, NULL, NULL); + } + + uint32_t clear_params[GENX(3DSTATE_CLEAR_PARAMS_length)]; + iris_pack_command(GENX(3DSTATE_CLEAR_PARAMS), clear_params, clear) { + clear.DepthClearValueValid = true; + clear.DepthClearValue = clear_value.f32[0]; } + iris_batch_emit(batch, clear_params, clear_length); + } + + if (dirty & (IRIS_DIRTY_DEPTH_BUFFER | IRIS_DIRTY_WM_DEPTH_STENCIL)) { + /* Listen for buffer changes, and also write enable changes. */ + struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer; + pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa); } if (dirty & IRIS_DIRTY_POLYGON_STIPPLE) { @@ -4384,6 +5044,62 @@ iris_upload_dirty_render_state(struct iris_context *ice, if (dirty & IRIS_DIRTY_VERTEX_BUFFERS) { int count = util_bitcount64(ice->state.bound_vertex_buffers); + int dynamic_bound = ice->state.bound_vertex_buffers; + + if (ice->state.vs_uses_draw_params) { + if (ice->draw.draw_params_offset == 0) { + u_upload_data(ice->state.dynamic_uploader, 0, sizeof(ice->draw.params), + 4, &ice->draw.params, &ice->draw.draw_params_offset, + &ice->draw.draw_params_res); + } + assert(ice->draw.draw_params_res); + + struct iris_vertex_buffer_state *state = + &(ice->state.genx->vertex_buffers[count]); + pipe_resource_reference(&state->resource, ice->draw.draw_params_res); + struct iris_resource *res = (void *) state->resource; + + iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) { + vb.VertexBufferIndex = count; + vb.AddressModifyEnable = true; + vb.BufferPitch = 0; + vb.BufferSize = res->bo->size - ice->draw.draw_params_offset; + vb.BufferStartingAddress = + ro_bo(NULL, res->bo->gtt_offset + + (int) ice->draw.draw_params_offset); + vb.MOCS = mocs(res->bo); + } + dynamic_bound |= 1ull << count; + count++; + } + + if (ice->state.vs_uses_derived_draw_params) { + u_upload_data(ice->state.dynamic_uploader, 0, + sizeof(ice->draw.derived_params), 4, + &ice->draw.derived_params, + &ice->draw.derived_draw_params_offset, + &ice->draw.derived_draw_params_res); + + struct iris_vertex_buffer_state *state = + &(ice->state.genx->vertex_buffers[count]); + pipe_resource_reference(&state->resource, + ice->draw.derived_draw_params_res); + struct iris_resource *res = (void *) ice->draw.derived_draw_params_res; + + iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) { + vb.VertexBufferIndex = count; + vb.AddressModifyEnable = true; + vb.BufferPitch = 0; + vb.BufferSize = + res->bo->size - ice->draw.derived_draw_params_offset; + vb.BufferStartingAddress = + ro_bo(NULL, res->bo->gtt_offset + + (int) ice->draw.derived_draw_params_offset); + vb.MOCS = mocs(res->bo); + } + dynamic_bound |= 1ull << count; + count++; + } if (count) { /* The VF cache designers cut corners, and made the cache key's @@ -4397,7 +5113,7 @@ iris_upload_dirty_render_state(struct iris_context *ice, */ unsigned flush_flags = 0; - uint64_t bound = ice->state.bound_vertex_buffers; + uint64_t bound = dynamic_bound; while (bound) { const int i = u_bit_scan64(&bound); uint16_t high_bits = 0; @@ -4409,18 +5125,10 @@ iris_upload_dirty_render_state(struct iris_context *ice, high_bits = res->bo->gtt_offset >> 32ull; if (high_bits != ice->state.last_vbo_high_bits[i]) { - flush_flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flush_flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE | + PIPE_CONTROL_CS_STALL; ice->state.last_vbo_high_bits[i] = high_bits; } - - /* If the buffer was written to by streamout, we may need - * to stall so those writes land and become visible to the - * vertex fetcher. - * - * TODO: This may stall more than necessary. - */ - if (res->bind_history & PIPE_BIND_STREAM_OUTPUT) - flush_flags |= PIPE_CONTROL_CS_STALL; } } @@ -4436,7 +5144,7 @@ iris_upload_dirty_render_state(struct iris_context *ice, } map += 1; - bound = ice->state.bound_vertex_buffers; + bound = dynamic_bound; while (bound) { const int i = u_bit_scan64(&bound); memcpy(map, genx->vertex_buffers[i].state, @@ -4449,10 +5157,90 @@ iris_upload_dirty_render_state(struct iris_context *ice, if (dirty & IRIS_DIRTY_VERTEX_ELEMENTS) { struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements; const unsigned entries = MAX2(cso->count, 1); - iris_batch_emit(batch, cso->vertex_elements, sizeof(uint32_t) * - (1 + entries * GENX(VERTEX_ELEMENT_STATE_length))); - iris_batch_emit(batch, cso->vf_instancing, sizeof(uint32_t) * - entries * GENX(3DSTATE_VF_INSTANCING_length)); + if (!(ice->state.vs_needs_sgvs_element || + ice->state.vs_uses_derived_draw_params || + ice->state.vs_needs_edge_flag)) { + iris_batch_emit(batch, cso->vertex_elements, sizeof(uint32_t) * + (1 + entries * GENX(VERTEX_ELEMENT_STATE_length))); + } else { + uint32_t dynamic_ves[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)]; + const unsigned dyn_count = cso->count + + ice->state.vs_needs_sgvs_element + + ice->state.vs_uses_derived_draw_params; + + iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS), + &dynamic_ves, ve) { + ve.DWordLength = + 1 + GENX(VERTEX_ELEMENT_STATE_length) * dyn_count - 2; + } + memcpy(&dynamic_ves[1], &cso->vertex_elements[1], + (cso->count - ice->state.vs_needs_edge_flag) * + GENX(VERTEX_ELEMENT_STATE_length) * sizeof(uint32_t)); + uint32_t *ve_pack_dest = + &dynamic_ves[1 + (cso->count - ice->state.vs_needs_edge_flag) * + GENX(VERTEX_ELEMENT_STATE_length)]; + + if (ice->state.vs_needs_sgvs_element) { + uint32_t base_ctrl = ice->state.vs_uses_draw_params ? + VFCOMP_STORE_SRC : VFCOMP_STORE_0; + iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) { + ve.Valid = true; + ve.VertexBufferIndex = + util_bitcount64(ice->state.bound_vertex_buffers); + ve.SourceElementFormat = ISL_FORMAT_R32G32_UINT; + ve.Component0Control = base_ctrl; + ve.Component1Control = base_ctrl; + ve.Component2Control = VFCOMP_STORE_0; + ve.Component3Control = VFCOMP_STORE_0; + } + ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length); + } + if (ice->state.vs_uses_derived_draw_params) { + iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) { + ve.Valid = true; + ve.VertexBufferIndex = + util_bitcount64(ice->state.bound_vertex_buffers) + + ice->state.vs_uses_draw_params; + ve.SourceElementFormat = ISL_FORMAT_R32G32_UINT; + ve.Component0Control = VFCOMP_STORE_SRC; + ve.Component1Control = VFCOMP_STORE_SRC; + ve.Component2Control = VFCOMP_STORE_0; + ve.Component3Control = VFCOMP_STORE_0; + } + ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length); + } + if (ice->state.vs_needs_edge_flag) { + for (int i = 0; i < GENX(VERTEX_ELEMENT_STATE_length); i++) + ve_pack_dest[i] = cso->edgeflag_ve[i]; + } + + iris_batch_emit(batch, &dynamic_ves, sizeof(uint32_t) * + (1 + dyn_count * GENX(VERTEX_ELEMENT_STATE_length))); + } + + if (!ice->state.vs_needs_edge_flag) { + iris_batch_emit(batch, cso->vf_instancing, sizeof(uint32_t) * + entries * GENX(3DSTATE_VF_INSTANCING_length)); + } else { + assert(cso->count > 0); + const unsigned edgeflag_index = cso->count - 1; + uint32_t dynamic_vfi[33 * GENX(3DSTATE_VF_INSTANCING_length)]; + memcpy(&dynamic_vfi[0], cso->vf_instancing, edgeflag_index * + GENX(3DSTATE_VF_INSTANCING_length) * sizeof(uint32_t)); + + uint32_t *vfi_pack_dest = &dynamic_vfi[0] + + edgeflag_index * GENX(3DSTATE_VF_INSTANCING_length); + iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) { + vi.VertexElementIndex = edgeflag_index + + ice->state.vs_needs_sgvs_element + + ice->state.vs_uses_derived_draw_params; + } + for (int i = 0; i < GENX(3DSTATE_VF_INSTANCING_length); i++) + vfi_pack_dest[i] |= cso->edgeflag_vfi[i]; + + iris_batch_emit(batch, &dynamic_vfi[0], sizeof(uint32_t) * + entries * GENX(3DSTATE_VF_INSTANCING_length)); + } } if (dirty & IRIS_DIRTY_VF_SGVS) { @@ -4464,13 +5252,15 @@ iris_upload_dirty_render_state(struct iris_context *ice, if (vs_prog_data->uses_vertexid) { sgv.VertexIDEnable = true; sgv.VertexIDComponentNumber = 2; - sgv.VertexIDElementOffset = cso->count; + sgv.VertexIDElementOffset = + cso->count - ice->state.vs_needs_edge_flag; } if (vs_prog_data->uses_instanceid) { sgv.InstanceIDEnable = true; sgv.InstanceIDComponentNumber = 3; - sgv.InstanceIDElementOffset = cso->count; + sgv.InstanceIDElementOffset = + cso->count - ice->state.vs_needs_edge_flag; } } } @@ -4484,7 +5274,13 @@ iris_upload_dirty_render_state(struct iris_context *ice, } } - // XXX: Gen8 - PMA fix + if (dirty & IRIS_DIRTY_VF_STATISTICS) { + iris_emit_cmd(batch, GENX(3DSTATE_VF_STATISTICS), vf) { + vf.StatisticsEnable = true; + } + } + + /* TODO: Gen8 PMA fix */ } static void @@ -4526,15 +5322,16 @@ iris_upload_render_state(struct iris_context *ice, iris_emit_cmd(batch, GENX(3DSTATE_INDEX_BUFFER), ib) { ib.IndexFormat = draw->index_size >> 1; - ib.MOCS = MOCS_WB; - ib.BufferSize = bo->size; + ib.MOCS = mocs(bo); + ib.BufferSize = bo->size - offset; ib.BufferStartingAddress = ro_bo(bo, offset); } /* The VF cache key only uses 32-bits, see vertex buffer comment above */ uint16_t high_bits = bo->gtt_offset >> 32ull; if (high_bits != ice->state.last_index_bo_high_bits) { - iris_emit_pipe_control_flush(batch, PIPE_CONTROL_VF_CACHE_INVALIDATE); + iris_emit_pipe_control_flush(batch, PIPE_CONTROL_VF_CACHE_INVALIDATE | + PIPE_CONTROL_CS_STALL); ice->state.last_index_bo_high_bits = high_bits; } } @@ -4588,7 +5385,7 @@ iris_upload_render_state(struct iris_context *ice, struct iris_stream_output_target *so = (void *) draw->count_from_stream_output; - // XXX: avoid if possible + /* XXX: Replace with actual cache tracking */ iris_emit_pipe_control_flush(batch, PIPE_CONTROL_CS_STALL); iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) { @@ -4596,6 +5393,8 @@ iris_upload_render_state(struct iris_context *ice, lrm.MemoryAddress = ro_bo(iris_resource_bo(so->offset.res), so->offset.offset); } + if (so->base.buffer_offset) + iris_math_add32_gpr0(ice, batch, -so->base.buffer_offset); iris_math_div32_gpr0(ice, batch, so->stride); _iris_emit_lrr(batch, _3DPRIM_VERTEX_COUNT, CS_GPR(0)); @@ -4646,12 +5445,22 @@ iris_upload_compute_state(struct iris_context *ice, struct brw_stage_prog_data *prog_data = shader->prog_data; struct brw_cs_prog_data *cs_prog_data = (void *) prog_data; + /* Always pin the binder. If we're emitting new binding table pointers, + * we need it. If not, we're probably inheriting old tables via the + * context, and need it anyway. Since true zero-bindings cases are + * practically non-existent, just pin it and avoid last_res tracking. + */ + iris_use_pinned_bo(batch, ice->state.binder.bo, false); + if ((dirty & IRIS_DIRTY_CONSTANTS_CS) && shs->cbuf0_needs_upload) upload_uniforms(ice, MESA_SHADER_COMPUTE); if (dirty & IRIS_DIRTY_BINDINGS_CS) iris_populate_binding_table(ice, batch, MESA_SHADER_COMPUTE, false); + if (dirty & IRIS_DIRTY_SAMPLER_STATES_CS) + iris_upload_sampler_states(ice, MESA_SHADER_COMPUTE); + iris_use_optional_res(batch, shs->sampler_table.res, false); iris_use_pinned_bo(batch, iris_resource_bo(shader->assembly.res), false); @@ -4671,11 +5480,11 @@ iris_upload_compute_state(struct iris_context *ice, iris_emit_cmd(batch, GENX(MEDIA_VFE_STATE), vfe) { if (prog_data->total_scratch) { - uint32_t scratch_addr = + struct iris_bo *bo = iris_get_scratch_space(ice, prog_data->total_scratch, MESA_SHADER_COMPUTE); vfe.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11; - vfe.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr); + vfe.ScratchSpaceBasePointer = rw_bo(bo, 0); } vfe.MaximumNumberofThreads = @@ -4684,22 +5493,20 @@ iris_upload_compute_state(struct iris_context *ice, vfe.ResetGatewayTimer = Resettingrelativetimerandlatchingtheglobaltimestamp; #endif - +#if GEN_GEN == 8 + vfe.BypassGatewayControl = true; +#endif vfe.NumberofURBEntries = 2; vfe.URBEntryAllocationSize = 2; - // XXX: Use Indirect Payload Storage? vfe.CURBEAllocationSize = ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads + cs_prog_data->push.cross_thread.regs, 2); } } - // XXX: hack iris_set_constant_buffers to upload these thread counts - // XXX: along with regular uniforms for compute shaders, somehow. - + /* TODO: Combine subgroup-id with cbuf0 so we can push regular uniforms */ uint32_t curbe_data_offset = 0; - // TODO: Move subgroup-id into uniforms ubo so we can push uniforms assert(cs_prog_data->push.cross_thread.dwords == 0 && cs_prog_data->push.per_thread.dwords == 1 && cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID); @@ -4810,8 +5617,8 @@ iris_destroy_state(struct iris_context *ice) const int i = u_bit_scan64(&bound_vbs); pipe_resource_reference(&genx->vertex_buffers[i].resource, NULL); } + free(ice->state.genx); - // XXX: unreference resources/surfaces. for (unsigned i = 0; i < ice->state.framebuffer.nr_cbufs; i++) { pipe_surface_reference(&ice->state.framebuffer.cbufs[i], NULL); } @@ -4820,9 +5627,28 @@ iris_destroy_state(struct iris_context *ice) for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) { struct iris_shader_state *shs = &ice->state.shaders[stage]; pipe_resource_reference(&shs->sampler_table.res, NULL); + for (int i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) { + pipe_resource_reference(&shs->constbuf[i].buffer, NULL); + pipe_resource_reference(&shs->constbuf_surf_state[i].res, NULL); + } + for (int i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) { + pipe_resource_reference(&shs->image[i].base.resource, NULL); + pipe_resource_reference(&shs->image[i].surface_state.res, NULL); + } + for (int i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) { + pipe_resource_reference(&shs->ssbo[i].buffer, NULL); + pipe_resource_reference(&shs->ssbo_surf_state[i].res, NULL); + } + for (int i = 0; i < IRIS_MAX_TEXTURE_SAMPLERS; i++) { + pipe_sampler_view_reference((struct pipe_sampler_view **) + &shs->textures[i], NULL); + } } - free(ice->state.genx); + pipe_resource_reference(&ice->state.grid_size.res, NULL); + pipe_resource_reference(&ice->state.grid_surf_state.res, NULL); + + pipe_resource_reference(&ice->state.null_fb.res, NULL); pipe_resource_reference(&ice->state.unbound_tex.res, NULL); pipe_resource_reference(&ice->state.last_res.cc_vp, NULL); @@ -4835,6 +5661,130 @@ iris_destroy_state(struct iris_context *ice) /* ------------------------------------------------------------------- */ +static void +iris_rebind_buffer(struct iris_context *ice, + struct iris_resource *res, + uint64_t old_address) +{ + struct pipe_context *ctx = &ice->ctx; + struct iris_screen *screen = (void *) ctx->screen; + struct iris_genx_state *genx = ice->state.genx; + + assert(res->base.target == PIPE_BUFFER); + + /* Buffers can't be framebuffer attachments, nor display related, + * and we don't have upstream Clover support. + */ + assert(!(res->bind_history & (PIPE_BIND_DEPTH_STENCIL | + PIPE_BIND_RENDER_TARGET | + PIPE_BIND_BLENDABLE | + PIPE_BIND_DISPLAY_TARGET | + PIPE_BIND_CURSOR | + PIPE_BIND_COMPUTE_RESOURCE | + PIPE_BIND_GLOBAL))); + + if (res->bind_history & PIPE_BIND_VERTEX_BUFFER) { + uint64_t bound_vbs = ice->state.bound_vertex_buffers; + while (bound_vbs) { + const int i = u_bit_scan64(&bound_vbs); + struct iris_vertex_buffer_state *state = &genx->vertex_buffers[i]; + + /* Update the CPU struct */ + STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_start) == 32); + STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_bits) == 64); + uint64_t *addr = (uint64_t *) &state->state[1]; + + if (*addr == old_address) { + *addr = res->bo->gtt_offset; + ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS; + } + } + } + + /* No need to handle these: + * - PIPE_BIND_INDEX_BUFFER (emitted for every indexed draw) + * - PIPE_BIND_COMMAND_ARGS_BUFFER (emitted for every indirect draw) + * - PIPE_BIND_QUERY_BUFFER (no persistent state references) + */ + + if (res->bind_history & PIPE_BIND_STREAM_OUTPUT) { + /* XXX: be careful about resetting vs appending... */ + assert(false); + } + + for (int s = MESA_SHADER_VERTEX; s < MESA_SHADER_STAGES; s++) { + struct iris_shader_state *shs = &ice->state.shaders[s]; + enum pipe_shader_type p_stage = stage_to_pipe(s); + + if (res->bind_history & PIPE_BIND_CONSTANT_BUFFER) { + /* Skip constant buffer 0, it's for regular uniforms, not UBOs */ + uint32_t bound_cbufs = shs->bound_cbufs & ~1u; + while (bound_cbufs) { + const int i = u_bit_scan(&bound_cbufs); + struct pipe_shader_buffer *cbuf = &shs->constbuf[i]; + struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i]; + + if (res->bo == iris_resource_bo(cbuf->buffer)) { + upload_ubo_ssbo_surf_state(ice, cbuf, surf_state, false); + ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << s; + } + } + } + + if (res->bind_history & PIPE_BIND_SHADER_BUFFER) { + uint32_t bound_ssbos = shs->bound_ssbos; + while (bound_ssbos) { + const int i = u_bit_scan(&bound_ssbos); + struct pipe_shader_buffer *ssbo = &shs->ssbo[i]; + + if (res->bo == iris_resource_bo(ssbo->buffer)) { + struct pipe_shader_buffer buf = { + .buffer = &res->base, + .buffer_offset = ssbo->buffer_offset, + .buffer_size = ssbo->buffer_size, + }; + iris_set_shader_buffers(ctx, p_stage, i, 1, &buf, + (shs->writable_ssbos >> i) & 1); + } + } + } + + if (res->bind_history & PIPE_BIND_SAMPLER_VIEW) { + uint32_t bound_sampler_views = shs->bound_sampler_views; + while (bound_sampler_views) { + const int i = u_bit_scan(&bound_sampler_views); + struct iris_sampler_view *isv = shs->textures[i]; + + if (res->bo == iris_resource_bo(isv->base.texture)) { + void *map = alloc_surface_states(ice->state.surface_uploader, + &isv->surface_state, + isv->res->aux.sampler_usages); + assert(map); + fill_buffer_surface_state(&screen->isl_dev, isv->res, map, + isv->view.format, isv->view.swizzle, + isv->base.u.buf.offset, + isv->base.u.buf.size); + ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << s; + } + } + } + + if (res->bind_history & PIPE_BIND_SHADER_IMAGE) { + uint32_t bound_image_views = shs->bound_image_views; + while (bound_image_views) { + const int i = u_bit_scan(&bound_image_views); + struct iris_image_view *iv = &shs->image[i]; + + if (res->bo == iris_resource_bo(iv->base.resource)) { + iris_set_shader_images(ctx, p_stage, i, 1, &iv->base); + } + } + } + } +} + +/* ------------------------------------------------------------------- */ + static void iris_load_register_reg32(struct iris_batch *batch, uint32_t dst, uint32_t src) @@ -5370,6 +6320,102 @@ iris_emit_raw_pipe_control(struct iris_batch *batch, uint32_t flags, } } +void +genX(emit_urb_setup)(struct iris_context *ice, + struct iris_batch *batch, + const unsigned size[4], + bool tess_present, bool gs_present) +{ + const struct gen_device_info *devinfo = &batch->screen->devinfo; + const unsigned push_size_kB = 32; + unsigned entries[4]; + unsigned start[4]; + + ice->shaders.last_vs_entry_size = size[MESA_SHADER_VERTEX]; + + gen_get_urb_config(devinfo, 1024 * push_size_kB, + 1024 * ice->shaders.urb_size, + tess_present, gs_present, + size, entries, start); + + for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) { + iris_emit_cmd(batch, GENX(3DSTATE_URB_VS), urb) { + urb._3DCommandSubOpcode += i; + urb.VSURBStartingAddress = start[i]; + urb.VSURBEntryAllocationSize = size[i] - 1; + urb.VSNumberofURBEntries = entries[i]; + } + } +} + +#if GEN_GEN == 9 +/** + * Preemption on Gen9 has to be enabled or disabled in various cases. + * + * See these workarounds for preemption: + * - WaDisableMidObjectPreemptionForGSLineStripAdj + * - WaDisableMidObjectPreemptionForTrifanOrPolygon + * - WaDisableMidObjectPreemptionForLineLoop + * - WA#0798 + * + * We don't put this in the vtable because it's only used on Gen9. + */ +void +gen9_toggle_preemption(struct iris_context *ice, + struct iris_batch *batch, + const struct pipe_draw_info *draw) +{ + struct iris_genx_state *genx = ice->state.genx; + bool object_preemption = true; + + /* WaDisableMidObjectPreemptionForGSLineStripAdj + * + * "WA: Disable mid-draw preemption when draw-call is a linestrip_adj + * and GS is enabled." + */ + if (draw->mode == PIPE_PRIM_LINE_STRIP_ADJACENCY && + ice->shaders.prog[MESA_SHADER_GEOMETRY]) + object_preemption = false; + + /* WaDisableMidObjectPreemptionForTrifanOrPolygon + * + * "TriFan miscompare in Execlist Preemption test. Cut index that is + * on a previous context. End the previous, the resume another context + * with a tri-fan or polygon, and the vertex count is corrupted. If we + * prempt again we will cause corruption. + * + * WA: Disable mid-draw preemption when draw-call has a tri-fan." + */ + if (draw->mode == PIPE_PRIM_TRIANGLE_FAN) + object_preemption = false; + + /* WaDisableMidObjectPreemptionForLineLoop + * + * "VF Stats Counters Missing a vertex when preemption enabled. + * + * WA: Disable mid-draw preemption when the draw uses a lineloop + * topology." + */ + if (draw->mode == PIPE_PRIM_LINE_LOOP) + object_preemption = false; + + /* WA#0798 + * + * "VF is corrupting GAFS data when preempted on an instance boundary + * and replayed with instancing enabled. + * + * WA: Disable preemption when using instanceing." + */ + if (draw->instance_count > 1) + object_preemption = false; + + if (genx->object_preemption != object_preemption) { + iris_enable_obj_preemption(batch, object_preemption); + genx->object_preemption = object_preemption; + } +} +#endif + void genX(init_state)(struct iris_context *ice) { @@ -5422,6 +6468,7 @@ genX(init_state)(struct iris_context *ice) ice->vtbl.update_surface_base_address = iris_update_surface_base_address; ice->vtbl.upload_compute_state = iris_upload_compute_state; ice->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control; + ice->vtbl.rebind_buffer = iris_rebind_buffer; ice->vtbl.load_register_reg32 = iris_load_register_reg32; ice->vtbl.load_register_reg64 = iris_load_register_reg64; ice->vtbl.load_register_imm32 = iris_load_register_imm32; @@ -5442,6 +6489,7 @@ genX(init_state)(struct iris_context *ice) ice->vtbl.populate_gs_key = iris_populate_gs_key; ice->vtbl.populate_fs_key = iris_populate_fs_key; ice->vtbl.populate_cs_key = iris_populate_cs_key; + ice->vtbl.mocs = mocs; ice->state.dirty = ~0ull;