X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fradeonsi%2Fsi_state.c;h=da3c7debd5780519675aa70cc2279eb89c7be5e2;hb=8d8f1ef573932679145b5aa8e2bafbf4c82701ef;hp=3f471a3672e68f93510366a727fef63b7c68f802;hpb=2b7fd9df9a3b6cf254497b44ac05bf326073b782;p=mesa.git diff --git a/src/gallium/drivers/radeonsi/si_state.c b/src/gallium/drivers/radeonsi/si_state.c index 3f471a3672e..da3c7debd57 100644 --- a/src/gallium/drivers/radeonsi/si_state.c +++ b/src/gallium/drivers/radeonsi/si_state.c @@ -74,11 +74,6 @@ static unsigned si_map_swizzle(unsigned swizzle) } } -static uint32_t S_FIXED(float value, uint32_t frac_bits) -{ - return value * (1 << frac_bits); -} - /* 12.4 fixed-point */ static unsigned si_pack_float_12p4(float x) { @@ -120,7 +115,7 @@ static void si_emit_cb_render_state(struct si_context *sctx, struct r600_atom *a /* GFX9: Flush DFSM when CB_TARGET_MASK changes. * I think we don't have to do anything between IBs. */ - if (sctx->b.chip_class >= GFX9 && + if (sctx->screen->dfsm_allowed && sctx->last_cb_target_mask != cb_target_mask) { sctx->last_cb_target_mask = cb_target_mask; @@ -446,6 +441,8 @@ static void *si_create_blend_state_mode(struct pipe_context *ctx, blend->need_src_alpha_4bit |= 0xf; blend->cb_target_mask = 0; + blend->cb_target_enabled_4bit = 0; + for (int i = 0; i < 8; i++) { /* state->rt entries > 0 only written if independent blending */ const int j = state->independent_blend_enable ? i : 0; @@ -487,6 +484,8 @@ static void *si_create_blend_state_mode(struct pipe_context *ctx, /* cb_render_state will disable unused ones */ blend->cb_target_mask |= (unsigned)state->rt[j].colormask << (4 * i); + if (state->rt[j].colormask) + blend->cb_target_enabled_4bit |= 0xf << (4 * i); if (!state->rt[j].colormask || !state->rt[j].blend_enable) { si_pm4_set_reg(pm4, R_028780_CB_BLEND0_CONTROL + i * 4, blend_cntl); @@ -603,9 +602,34 @@ static void *si_create_blend_state(struct pipe_context *ctx, static void si_bind_blend_state(struct pipe_context *ctx, void *state) { struct si_context *sctx = (struct si_context *)ctx; - si_pm4_bind_state(sctx, blend, (struct si_state_blend *)state); - si_mark_atom_dirty(sctx, &sctx->cb_render_state); - sctx->do_update_shaders = true; + struct si_state_blend *old_blend = sctx->queued.named.blend; + struct si_state_blend *blend = (struct si_state_blend *)state; + + if (!state) + return; + + if (!old_blend || + old_blend->cb_target_mask != blend->cb_target_mask || + old_blend->dual_src_blend != blend->dual_src_blend) + si_mark_atom_dirty(sctx, &sctx->cb_render_state); + + si_pm4_bind_state(sctx, blend, state); + + if (!old_blend || + old_blend->cb_target_mask != blend->cb_target_mask || + old_blend->alpha_to_coverage != blend->alpha_to_coverage || + old_blend->alpha_to_one != blend->alpha_to_one || + old_blend->dual_src_blend != blend->dual_src_blend || + old_blend->blend_enable_4bit != blend->blend_enable_4bit || + old_blend->need_src_alpha_4bit != blend->need_src_alpha_4bit) + sctx->do_update_shaders = true; + + if (sctx->screen->dpbb_allowed && + (!old_blend || + old_blend->alpha_to_coverage != blend->alpha_to_coverage || + old_blend->blend_enable_4bit != blend->blend_enable_4bit || + old_blend->cb_target_enabled_4bit != blend->cb_target_enabled_4bit)) + si_mark_atom_dirty(sctx, &sctx->dpbb_state); } static void si_delete_blend_state(struct pipe_context *ctx, void *state) @@ -618,8 +642,10 @@ static void si_set_blend_color(struct pipe_context *ctx, const struct pipe_blend_color *state) { struct si_context *sctx = (struct si_context *)ctx; + static const struct pipe_blend_color zeros; sctx->blend_color.state = *state; + sctx->blend_color.any_nonzeros = memcmp(state, &zeros, sizeof(*state)) != 0; si_mark_atom_dirty(sctx, &sctx->blend_color.atom); } @@ -640,11 +666,13 @@ static void si_set_clip_state(struct pipe_context *ctx, { struct si_context *sctx = (struct si_context *)ctx; struct pipe_constant_buffer cb; + static const struct pipe_clip_state zeros; if (memcmp(&sctx->clip_state.state, state, sizeof(*state)) == 0) return; sctx->clip_state.state = *state; + sctx->clip_state.any_nonzeros = memcmp(state, &zeros, sizeof(*state)) != 0; si_mark_atom_dirty(sctx, &sctx->clip_state.atom); cb.buffer = NULL; @@ -677,7 +705,7 @@ static void si_emit_clip_regs(struct si_context *sctx, struct r600_atom *atom) unsigned culldist_mask = vs_sel->culldist_mask; unsigned total_mask; - if (vs->key.opt.hw_vs.clip_disable) { + if (vs->key.opt.clip_disable) { assert(!info->culldist_writemask); clipdist_mask = 0; culldist_mask = 0; @@ -702,12 +730,6 @@ static void si_emit_clip_regs(struct si_context *sctx, struct r600_atom *atom) rs->pa_cl_clip_cntl | ucp_mask | S_028810_CLIP_DISABLE(window_space)); - - if (sctx->b.chip_class <= VI) { - /* reuse needs to be set off if we write oViewport */ - radeon_set_context_reg(cs, R_028AB4_VGT_REUSE_OFF, - S_028AB4_REUSE_OFF(info->writes_viewport_index)); - } } /* @@ -852,6 +874,15 @@ static void *si_create_rs_state(struct pipe_context *ctx, S_028814_POLYMODE_FRONT_PTYPE(si_translate_fill(state->fill_front)) | S_028814_POLYMODE_BACK_PTYPE(si_translate_fill(state->fill_back))); + if (!rs->uses_poly_offset) + return rs; + + rs->pm4_poly_offset = CALLOC(3, sizeof(struct si_pm4_state)); + if (!rs->pm4_poly_offset) { + FREE(rs); + return NULL; + } + /* Precalculate polygon offset states for 16-bit, 24-bit, and 32-bit zbuffers. */ for (i = 0; i < 3; i++) { struct si_pm4_state *pm4 = &rs->pm4_poly_offset[i]; @@ -921,19 +952,39 @@ static void si_bind_rs_state(struct pipe_context *ctx, void *state) si_pm4_bind_state(sctx, rasterizer, rs); si_update_poly_offset_state(sctx); - si_mark_atom_dirty(sctx, &sctx->clip_regs); + if (!old_rs || + old_rs->clip_plane_enable != rs->clip_plane_enable || + old_rs->pa_cl_clip_cntl != rs->pa_cl_clip_cntl) + si_mark_atom_dirty(sctx, &sctx->clip_regs); + sctx->ia_multi_vgt_param_key.u.line_stipple_enabled = rs->line_stipple_enable; - sctx->do_update_shaders = true; + + if (!old_rs || + old_rs->clip_plane_enable != rs->clip_plane_enable || + old_rs->rasterizer_discard != rs->rasterizer_discard || + old_rs->sprite_coord_enable != rs->sprite_coord_enable || + old_rs->flatshade != rs->flatshade || + old_rs->two_side != rs->two_side || + old_rs->multisample_enable != rs->multisample_enable || + old_rs->poly_stipple_enable != rs->poly_stipple_enable || + old_rs->poly_smooth != rs->poly_smooth || + old_rs->line_smooth != rs->line_smooth || + old_rs->clamp_fragment_color != rs->clamp_fragment_color || + old_rs->force_persample_interp != rs->force_persample_interp) + sctx->do_update_shaders = true; } static void si_delete_rs_state(struct pipe_context *ctx, void *state) { struct si_context *sctx = (struct si_context *)ctx; + struct si_state_rasterizer *rs = (struct si_state_rasterizer *)state; if (sctx->queued.named.rasterizer == state) si_pm4_bind_state(sctx, poly_offset, NULL); - si_pm4_delete_state(sctx, rasterizer, (struct si_state_rasterizer *)state); + + FREE(rs->pm4_poly_offset); + si_pm4_delete_state(sctx, rasterizer, rs); } /* @@ -1000,6 +1051,14 @@ static uint32_t si_translate_stencil_op(int s_op) return 0; } +static bool si_dsa_writes_stencil(const struct pipe_stencil_state *s) +{ + return s->enabled && s->writemask && + (s->fail_op != PIPE_STENCIL_OP_KEEP || + s->zfail_op != PIPE_STENCIL_OP_KEEP || + s->zpass_op != PIPE_STENCIL_OP_KEEP); +} + static void *si_create_dsa_state(struct pipe_context *ctx, const struct pipe_depth_stencil_alpha_state *state) { @@ -1050,18 +1109,29 @@ static void *si_create_dsa_state(struct pipe_context *ctx, } si_pm4_set_reg(pm4, R_028800_DB_DEPTH_CONTROL, db_depth_control); - si_pm4_set_reg(pm4, R_02842C_DB_STENCIL_CONTROL, db_stencil_control); + if (state->stencil[0].enabled) + si_pm4_set_reg(pm4, R_02842C_DB_STENCIL_CONTROL, db_stencil_control); if (state->depth.bounds_test) { si_pm4_set_reg(pm4, R_028020_DB_DEPTH_BOUNDS_MIN, fui(state->depth.bounds_min)); si_pm4_set_reg(pm4, R_028024_DB_DEPTH_BOUNDS_MAX, fui(state->depth.bounds_max)); } + dsa->depth_enabled = state->depth.enabled; + dsa->depth_write_enabled = state->depth.enabled && + state->depth.writemask; + dsa->stencil_enabled = state->stencil[0].enabled; + dsa->stencil_write_enabled = state->stencil[0].enabled && + (si_dsa_writes_stencil(&state->stencil[0]) || + si_dsa_writes_stencil(&state->stencil[1])); + dsa->db_can_write = dsa->depth_write_enabled || + dsa->stencil_write_enabled; return dsa; } static void si_bind_dsa_state(struct pipe_context *ctx, void *state) { struct si_context *sctx = (struct si_context *)ctx; + struct si_state_dsa *old_dsa = sctx->queued.named.dsa; struct si_state_dsa *dsa = state; if (!state) @@ -1074,7 +1144,16 @@ static void si_bind_dsa_state(struct pipe_context *ctx, void *state) sctx->stencil_ref.dsa_part = dsa->stencil_ref; si_mark_atom_dirty(sctx, &sctx->stencil_ref.atom); } - sctx->do_update_shaders = true; + + if (!old_dsa || old_dsa->alpha_func != dsa->alpha_func) + sctx->do_update_shaders = true; + + if (sctx->screen->dpbb_allowed && + (!old_dsa || + (old_dsa->depth_enabled != dsa->depth_enabled || + old_dsa->stencil_enabled != dsa->stencil_enabled || + old_dsa->db_can_write != dsa->db_can_write))) + si_mark_atom_dirty(sctx, &sctx->dpbb_state); } static void si_delete_dsa_state(struct pipe_context *ctx, void *state) @@ -2096,36 +2175,36 @@ static void si_initialize_color_surface(struct si_context *sctx, unsigned color_info, color_attrib, color_view; unsigned format, swap, ntype, endian; const struct util_format_description *desc; - int i; + int firstchan; unsigned blend_clamp = 0, blend_bypass = 0; color_view = S_028C6C_SLICE_START(surf->base.u.tex.first_layer) | S_028C6C_SLICE_MAX(surf->base.u.tex.last_layer); desc = util_format_description(surf->base.format); - for (i = 0; i < 4; i++) { - if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) { + for (firstchan = 0; firstchan < 4; firstchan++) { + if (desc->channel[firstchan].type != UTIL_FORMAT_TYPE_VOID) { break; } } - if (i == 4 || desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT) { + if (firstchan == 4 || desc->channel[firstchan].type == UTIL_FORMAT_TYPE_FLOAT) { ntype = V_028C70_NUMBER_FLOAT; } else { ntype = V_028C70_NUMBER_UNORM; if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB) ntype = V_028C70_NUMBER_SRGB; - else if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) { - if (desc->channel[i].pure_integer) { + else if (desc->channel[firstchan].type == UTIL_FORMAT_TYPE_SIGNED) { + if (desc->channel[firstchan].pure_integer) { ntype = V_028C70_NUMBER_SINT; } else { - assert(desc->channel[i].normalized); + assert(desc->channel[firstchan].normalized); ntype = V_028C70_NUMBER_SNORM; } - } else if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) { - if (desc->channel[i].pure_integer) { + } else if (desc->channel[firstchan].type == UTIL_FORMAT_TYPE_UNSIGNED) { + if (desc->channel[firstchan].pure_integer) { ntype = V_028C70_NUMBER_UINT; } else { - assert(desc->channel[i].normalized); + assert(desc->channel[firstchan].normalized); ntype = V_028C70_NUMBER_UNORM; } } @@ -2248,7 +2327,7 @@ static void si_init_depth_surface(struct si_context *sctx, uint32_t z_info, s_info; format = si_translate_dbformat(rtex->db_render_format); - stencil_format = rtex->surface.flags & RADEON_SURF_SBUFFER ? + stencil_format = rtex->surface.has_stencil ? V_028044_STENCIL_8 : V_028044_STENCIL_INVALID; assert(format != V_028040_Z_INVALID); @@ -2277,8 +2356,7 @@ static void si_init_depth_surface(struct si_context *sctx, surf->db_depth_size = S_02801C_X_MAX(rtex->resource.b.b.width0 - 1) | S_02801C_Y_MAX(rtex->resource.b.b.height0 - 1); - /* Only use HTILE for the first level. */ - if (rtex->htile_buffer && !level) { + if (r600_htile_enabled(rtex, level)) { z_info |= S_028038_TILE_SURFACE_ENABLE(1) | S_028038_ALLOW_EXPCLEAR(1); @@ -2294,7 +2372,7 @@ static void si_init_depth_surface(struct si_context *sctx, s_info |= S_02803C_ITERATE_FLUSH(1); } - if (rtex->surface.flags & RADEON_SURF_SBUFFER) { + if (rtex->surface.has_stencil) { /* Stencil buffer workaround ported from the SI-CI-VI code. * See that for explanation. */ @@ -2304,7 +2382,8 @@ static void si_init_depth_surface(struct si_context *sctx, s_info |= S_02803C_TILE_STENCIL_DISABLE(1); } - surf->db_htile_data_base = rtex->htile_buffer->gpu_address >> 8; + surf->db_htile_data_base = (rtex->resource.gpu_address + + rtex->htile_offset) >> 8; surf->db_htile_surface = S_028ABC_FULL_CACHE(1) | S_028ABC_PIPE_ALIGNED(rtex->surface.u.gfx9.htile.pipe_aligned) | S_028ABC_RB_ALIGNED(rtex->surface.u.gfx9.htile.rb_aligned); @@ -2355,12 +2434,11 @@ static void si_init_depth_surface(struct si_context *sctx, surf->db_depth_slice = S_02805C_SLICE_TILE_MAX((levelinfo->nblk_x * levelinfo->nblk_y) / 64 - 1); - /* Only use HTILE for the first level. */ - if (rtex->htile_buffer && !level) { + if (r600_htile_enabled(rtex, level)) { z_info |= S_028040_TILE_SURFACE_ENABLE(1) | S_028040_ALLOW_EXPCLEAR(1); - if (rtex->surface.flags & RADEON_SURF_SBUFFER) { + if (rtex->surface.has_stencil) { /* Workaround: For a not yet understood reason, the * combination of MSAA, fast stencil clear and stencil * decompress messes with subsequent stencil buffer @@ -2382,7 +2460,8 @@ static void si_init_depth_surface(struct si_context *sctx, s_info |= S_028044_TILE_STENCIL_DISABLE(1); } - surf->db_htile_data_base = rtex->htile_buffer->gpu_address >> 8; + surf->db_htile_data_base = (rtex->resource.gpu_address + + rtex->htile_offset) >> 8; surf->db_htile_surface = S_028ABC_FULL_CACHE(1); if (rtex->tc_compatible_htile) { @@ -2404,6 +2483,38 @@ static void si_init_depth_surface(struct si_context *sctx, surf->depth_initialized = true; } +void si_update_fb_dirtiness_after_rendering(struct si_context *sctx) +{ + if (sctx->decompression_enabled) + return; + + if (sctx->framebuffer.state.zsbuf) { + struct pipe_surface *surf = sctx->framebuffer.state.zsbuf; + struct r600_texture *rtex = (struct r600_texture *)surf->texture; + + rtex->dirty_level_mask |= 1 << surf->u.tex.level; + + if (rtex->surface.has_stencil) + rtex->stencil_dirty_level_mask |= 1 << surf->u.tex.level; + } + if (sctx->framebuffer.compressed_cb_mask) { + struct pipe_surface *surf; + struct r600_texture *rtex; + unsigned mask = sctx->framebuffer.compressed_cb_mask; + + do { + unsigned i = u_bit_scan(&mask); + surf = sctx->framebuffer.state.cbufs[i]; + rtex = (struct r600_texture*)surf->texture; + + if (rtex->fmask.size) + rtex->dirty_level_mask |= 1 << surf->u.tex.level; + if (rtex->dcc_gather_statistics) + rtex->separate_dcc_dirty = true; + } while (mask); + } +} + static void si_dec_framebuffer_counters(const struct pipe_framebuffer_state *state) { for (int i = 0; i < state->nr_cbufs; ++i) { @@ -2431,6 +2542,8 @@ static void si_set_framebuffer_state(struct pipe_context *ctx, bool unbound = false; int i; + si_update_fb_dirtiness_after_rendering(sctx); + for (i = 0; i < sctx->framebuffer.state.nr_cbufs; i++) { if (!sctx->framebuffer.state.cbufs[i]) continue; @@ -2472,14 +2585,44 @@ static void si_set_framebuffer_state(struct pipe_context *ctx, * the only client not using TC that can change textures is * the framebuffer. * - * Flush all CB and DB caches here because all buffers can be used - * for write by both TC (with shader image stores) and CB/DB. + * Wait for compute shaders because of possible transitions: + * - FB write -> shader read + * - shader write -> FB read + * + * DB caches are flushed on demand (using si_decompress_textures). + * + * When MSAA is enabled, CB and TC caches are flushed on demand + * (after FMASK decompression). Shader write -> FB read transitions + * cannot happen for MSAA textures, because MSAA shader images are + * not supported. + * + * Only flush and wait for CB if there is actually a bound color buffer. */ - sctx->b.flags |= SI_CONTEXT_INV_VMEM_L1 | - SI_CONTEXT_INV_GLOBAL_L2 | - SI_CONTEXT_FLUSH_AND_INV_CB | - SI_CONTEXT_FLUSH_AND_INV_DB | - SI_CONTEXT_CS_PARTIAL_FLUSH; + if (sctx->framebuffer.nr_samples <= 1 && + sctx->framebuffer.state.nr_cbufs) + si_make_CB_shader_coherent(sctx, sctx->framebuffer.nr_samples, + sctx->framebuffer.CB_has_shader_readable_metadata); + + sctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH; + + /* u_blitter doesn't invoke depth decompression when it does multiple + * blits in a row, but the only case when it matters for DB is when + * doing generate_mipmap. So here we flush DB manually between + * individual generate_mipmap blits. + * Note that lower mipmap levels aren't compressed. + */ + if (sctx->generate_mipmap_for_depth) { + si_make_DB_shader_coherent(sctx, 1, false, + sctx->framebuffer.DB_has_shader_readable_metadata); + } else if (sctx->b.chip_class == GFX9) { + /* It appears that DB metadata "leaks" in a sequence of: + * - depth clear + * - DCC decompress for shader image writes (with DB disabled) + * - render with DEPTH_BEFORE_SHADER=1 + * Flushing DB metadata works around the problem. + */ + sctx->b.flags |= SI_CONTEXT_FLUSH_AND_INV_DB_META; + } /* Take the maximum of the old and new count. If the new count is lower, * dirtying is needed to disable the unbound colorbuffers. @@ -2503,6 +2646,8 @@ static void si_set_framebuffer_state(struct pipe_context *ctx, sctx->framebuffer.nr_samples = util_framebuffer_get_num_samples(state); sctx->framebuffer.log_samples = util_logbase2(sctx->framebuffer.nr_samples); sctx->framebuffer.any_dst_linear = false; + sctx->framebuffer.CB_has_shader_readable_metadata = false; + sctx->framebuffer.DB_has_shader_readable_metadata = false; for (i = 0; i < state->nr_cbufs; i++) { if (!state->cbufs[i]) @@ -2537,6 +2682,9 @@ static void si_set_framebuffer_state(struct pipe_context *ctx, if (rtex->surface.is_linear) sctx->framebuffer.any_dst_linear = true; + if (vi_dcc_enabled(rtex, surf->base.u.tex.level)) + sctx->framebuffer.CB_has_shader_readable_metadata = true; + r600_context_add_resource_size(ctx, surf->base.texture); p_atomic_inc(&rtex->framebuffers_bound); @@ -2555,6 +2703,10 @@ static void si_set_framebuffer_state(struct pipe_context *ctx, if (!surf->depth_initialized) { si_init_depth_surface(sctx, surf); } + + if (vi_tc_compat_htile_enabled(rtex, surf->base.u.tex.level)) + sctx->framebuffer.DB_has_shader_readable_metadata = true; + r600_context_add_resource_size(ctx, surf->base.texture); } @@ -2562,6 +2714,9 @@ static void si_set_framebuffer_state(struct pipe_context *ctx, si_mark_atom_dirty(sctx, &sctx->cb_render_state); si_mark_atom_dirty(sctx, &sctx->framebuffer.atom); + if (sctx->screen->dpbb_allowed) + si_mark_atom_dirty(sctx, &sctx->dpbb_state); + if (sctx->framebuffer.any_dst_linear != old_any_dst_linear) si_mark_atom_dirty(sctx, &sctx->msaa_config); @@ -2604,7 +2759,6 @@ static void si_set_framebuffer_state(struct pipe_context *ctx, * changes come from the decompression passes themselves. */ sctx->need_check_render_feedback = true; - sctx->framebuffer.do_update_surf_dirtiness = true; } } @@ -2653,13 +2807,15 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom /* Compute mutable surface parameters. */ cb_color_base = tex->resource.gpu_address >> 8; - cb_color_fmask = cb_color_base; + cb_color_fmask = 0; cb_dcc_base = 0; cb_color_info = cb->cb_color_info | tex->cb_color_info; cb_color_attrib = cb->cb_color_attrib; - if (tex->fmask.size) + if (tex->fmask.size) { cb_color_fmask = (tex->resource.gpu_address + tex->fmask.offset) >> 8; + cb_color_fmask |= tex->fmask.tile_swizzle; + } /* Set up DCC. */ if (vi_dcc_enabled(tex, cb->base.u.tex.level)) { @@ -2673,6 +2829,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom cb_dcc_base = ((!tex->dcc_separate_buffer ? tex->resource.gpu_address : 0) + tex->dcc_offset) >> 8; + cb_dcc_base |= tex->surface.tile_swizzle; } if (sctx->b.chip_class >= GFX9) { @@ -2685,6 +2842,9 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom /* Set mutable surface parameters. */ cb_color_base += tex->surface.u.gfx9.surf_offset >> 8; + cb_color_base |= tex->surface.tile_swizzle; + if (!tex->fmask.size) + cb_color_fmask = cb_color_base; cb_color_attrib |= S_028C74_COLOR_SW_MODE(tex->surface.u.gfx9.surf.swizzle_mode) | S_028C74_FMASK_SW_MODE(tex->surface.u.gfx9.fmask.swizzle_mode) | S_028C74_RB_ALIGNED(meta.rb_aligned) | @@ -2717,6 +2877,12 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom unsigned cb_color_pitch, cb_color_slice, cb_color_fmask_slice; cb_color_base += level_info->offset >> 8; + /* Only macrotiled modes can set tile swizzle. */ + if (level_info->mode == RADEON_SURF_MODE_2D) + cb_color_base |= tex->surface.tile_swizzle; + + if (!tex->fmask.size) + cb_color_fmask = cb_color_base; if (cb_dcc_base) cb_dcc_base += level_info->dcc_offset >> 8; @@ -2777,12 +2943,6 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom RADEON_PRIO_DEPTH_BUFFER_MSAA : RADEON_PRIO_DEPTH_BUFFER); - if (zb->db_htile_data_base) { - radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, - rtex->htile_buffer, RADEON_USAGE_READWRITE, - RADEON_PRIO_HTILE); - } - if (sctx->b.chip_class >= GFX9) { radeon_set_context_reg_seq(cs, R_028014_DB_HTILE_DATA_BASE, 3); radeon_emit(cs, zb->db_htile_data_base); /* DB_HTILE_DATA_BASE */ @@ -2842,7 +3002,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom radeon_set_context_reg(cs, R_028208_PA_SC_WINDOW_SCISSOR_BR, S_028208_BR_X(state->width) | S_028208_BR_Y(state->height)); - if (sctx->b.chip_class >= GFX9) { + if (sctx->screen->dfsm_allowed) { radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0)); } @@ -2870,8 +3030,7 @@ static void si_emit_msaa_sample_locs(struct si_context *sctx, if (has_msaa_sample_loc_bug) nr_samples = MAX2(nr_samples, 1); - if (nr_samples >= 1 && - (nr_samples != sctx->msaa_sample_locs.nr_samples)) { + if (nr_samples != sctx->msaa_sample_locs.nr_samples) { sctx->msaa_sample_locs.nr_samples = nr_samples; cayman_emit_msaa_sample_locs(cs, nr_samples); } @@ -2921,7 +3080,7 @@ static void si_emit_msaa_config(struct si_context *sctx, struct r600_atom *atom) sc_mode_cntl_1); /* GFX9: Flush DFSM when the AA mode changes. */ - if (sctx->b.chip_class >= GFX9) { + if (sctx->screen->dfsm_allowed) { radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0)); } @@ -2939,6 +3098,8 @@ static void si_set_min_samples(struct pipe_context *ctx, unsigned min_samples) if (sctx->framebuffer.nr_samples > 1) si_mark_atom_dirty(sctx, &sctx->msaa_config); + if (sctx->screen->dpbb_allowed) + si_mark_atom_dirty(sctx, &sctx->dpbb_state); } /* @@ -3063,14 +3224,13 @@ si_make_texture_descriptor(struct si_screen *screen, uint32_t *fmask_state) { struct pipe_resource *res = &tex->resource.b.b; - const struct util_format_description *base_desc, *desc; + const struct util_format_description *desc; unsigned char swizzle[4]; int first_non_void; unsigned num_format, data_format, type; uint64_t va; desc = util_format_description(pipe_format); - base_desc = util_format_description(res->format); if (desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS) { const unsigned char swizzle_xxxx[4] = {0, 0, 0, 0}; @@ -3171,14 +3331,11 @@ si_make_texture_descriptor(struct si_screen *screen, data_format = 0; } - /* Enable clamping for UNORM depth formats promoted to Z32F. */ + /* S8 with Z32 HTILE needs a special format. */ if (screen->b.chip_class >= GFX9 && - util_format_has_depth(desc) && - num_format == V_008F14_IMG_NUM_FORMAT_FLOAT && - util_get_depth_format_type(base_desc) != UTIL_FORMAT_TYPE_FLOAT) { - /* NUM_FORMAT=FLOAT and DATA_FORMAT=24_8 means "clamp to [0,1]". */ - data_format = V_008F14_IMG_DATA_FORMAT_24_8; - } + pipe_format == PIPE_FORMAT_S8_UINT && + tex->tc_compatible_htile) + data_format = V_008F14_IMG_DATA_FORMAT_S8_32; if (!sampler && (res->target == PIPE_TEXTURE_CUBE || @@ -3303,7 +3460,7 @@ si_make_texture_descriptor(struct si_screen *screen, num_format = V_008F14_IMG_NUM_FORMAT_UINT; } - fmask_state[0] = va >> 8; + fmask_state[0] = (va >> 8) | tex->fmask.tile_swizzle; fmask_state[1] = S_008F14_BASE_ADDRESS_HI(va >> 40) | S_008F14_DATA_FORMAT_GFX6(data_format) | S_008F14_NUM_FORMAT_GFX6(num_format); @@ -3669,7 +3826,7 @@ static void *si_create_vertex_elements(struct pipe_context *ctx, const struct pipe_vertex_element *elements) { struct si_screen *sscreen = (struct si_screen*)ctx->screen; - struct si_vertex_element *v = CALLOC_STRUCT(si_vertex_element); + struct si_vertex_elements *v = CALLOC_STRUCT(si_vertex_elements); bool used[SI_NUM_VERTEX_BUFFERS] = {}; int i; @@ -3693,6 +3850,16 @@ static void *si_create_vertex_elements(struct pipe_context *ctx, return NULL; } + if (elements[i].instance_divisor) { + v->uses_instance_divisors = true; + v->instance_divisors[i] = elements[i].instance_divisor; + + if (v->instance_divisors[i] == 1) + v->instance_divisor_is_one |= 1u << i; + else + v->instance_divisor_is_fetched |= 1u << i; + } + if (!used[vbo_index]) { v->first_vb_use_mask |= 1 << i; used[vbo_index] = true; @@ -3706,6 +3873,8 @@ static void *si_create_vertex_elements(struct pipe_context *ctx, memcpy(swizzle, desc->swizzle, sizeof(swizzle)); v->format_size[i] = desc->block.bits / 8; + v->src_offset[i] = elements[i].src_offset; + v->vertex_buffer_index[i] = vbo_index; /* The hardware always treats the 2-bit alpha channel as * unsigned, so a shader workaround is needed. The affected @@ -3798,19 +3967,35 @@ static void *si_create_vertex_elements(struct pipe_context *ctx, S_008F0C_NUM_FORMAT(num_format) | S_008F0C_DATA_FORMAT(data_format); } - memcpy(v->elements, elements, sizeof(struct pipe_vertex_element) * count); - return v; } static void si_bind_vertex_elements(struct pipe_context *ctx, void *state) { struct si_context *sctx = (struct si_context *)ctx; - struct si_vertex_element *v = (struct si_vertex_element*)state; + struct si_vertex_elements *old = sctx->vertex_elements; + struct si_vertex_elements *v = (struct si_vertex_elements*)state; sctx->vertex_elements = v; sctx->vertex_buffers_dirty = true; - sctx->do_update_shaders = true; + + if (v && + (!old || + old->count != v->count || + old->uses_instance_divisors != v->uses_instance_divisors || + v->uses_instance_divisors || /* we don't check which divisors changed */ + memcmp(old->fix_fetch, v->fix_fetch, sizeof(v->fix_fetch[0]) * v->count))) + sctx->do_update_shaders = true; + + if (v && v->instance_divisor_is_fetched) { + struct pipe_constant_buffer cb; + + cb.buffer = NULL; + cb.user_buffer = v->instance_divisors; + cb.buffer_offset = 0; + cb.buffer_size = sizeof(uint32_t) * v->count; + si_set_rw_buffer(sctx, SI_VS_CONST_INSTANCE_DIVISORS, &cb); + } } static void si_delete_vertex_element(struct pipe_context *ctx, void *state) @@ -3884,10 +4069,13 @@ static void si_texture_barrier(struct pipe_context *ctx, unsigned flags) { struct si_context *sctx = (struct si_context *)ctx; - sctx->b.flags |= SI_CONTEXT_INV_VMEM_L1 | - SI_CONTEXT_INV_GLOBAL_L2 | - SI_CONTEXT_FLUSH_AND_INV_CB; - sctx->framebuffer.do_update_surf_dirtiness = true; + si_update_fb_dirtiness_after_rendering(sctx); + + /* Multisample surfaces are flushed in si_decompress_textures. */ + if (sctx->framebuffer.nr_samples <= 1 && + sctx->framebuffer.state.nr_cbufs) + si_make_CB_shader_coherent(sctx, sctx->framebuffer.nr_samples, + sctx->framebuffer.CB_has_shader_readable_metadata); } /* This only ensures coherency for shader image/buffer stores. */ @@ -3924,12 +4112,21 @@ static void si_memory_barrier(struct pipe_context *ctx, unsigned flags) sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2; } - if (flags & PIPE_BARRIER_FRAMEBUFFER) - sctx->b.flags |= SI_CONTEXT_FLUSH_AND_INV_CB | - SI_CONTEXT_FLUSH_AND_INV_DB; + /* MSAA color, any depth and any stencil are flushed in + * si_decompress_textures when needed. + */ + if (flags & PIPE_BARRIER_FRAMEBUFFER && + sctx->framebuffer.nr_samples <= 1 && + sctx->framebuffer.state.nr_cbufs) { + sctx->b.flags |= SI_CONTEXT_FLUSH_AND_INV_CB; - if (flags & (PIPE_BARRIER_FRAMEBUFFER | - PIPE_BARRIER_INDIRECT_BUFFER)) + if (sctx->b.chip_class <= VI) + sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2; + } + + /* Indirect buffers use TC L2 on GFX9, but not older hw. */ + if (sctx->screen->b.chip_class <= VI && + flags & PIPE_BARRIER_INDIRECT_BUFFER) sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2; } @@ -3962,6 +4159,7 @@ void si_init_state_functions(struct si_context *sctx) si_init_atom(sctx, &sctx->framebuffer.atom, &sctx->atoms.s.framebuffer, si_emit_framebuffer_state); si_init_atom(sctx, &sctx->msaa_sample_locs.atom, &sctx->atoms.s.msaa_sample_locs, si_emit_msaa_sample_locs); si_init_atom(sctx, &sctx->db_render_state, &sctx->atoms.s.db_render_state, si_emit_db_render_state); + si_init_atom(sctx, &sctx->dpbb_state, &sctx->atoms.s.dpbb_state, si_emit_dpbb_state); si_init_atom(sctx, &sctx->msaa_config, &sctx->atoms.s.msaa_config, si_emit_msaa_config); si_init_atom(sctx, &sctx->sample_mask.atom, &sctx->atoms.s.sample_mask, si_emit_sample_mask); si_init_atom(sctx, &sctx->cb_render_state, &sctx->atoms.s.cb_render_state, si_emit_cb_render_state); @@ -4126,6 +4324,25 @@ void si_init_screen_state_functions(struct si_screen *sscreen) sscreen->b.apply_opaque_metadata = si_apply_opaque_metadata; } +static void si_set_grbm_gfx_index(struct si_context *sctx, + struct si_pm4_state *pm4, unsigned value) +{ + unsigned reg = sctx->b.chip_class >= CIK ? R_030800_GRBM_GFX_INDEX : + GRBM_GFX_INDEX; + si_pm4_set_reg(pm4, reg, value); +} + +static void si_set_grbm_gfx_index_se(struct si_context *sctx, + struct si_pm4_state *pm4, unsigned se) +{ + assert(se == ~0 || se < sctx->screen->b.info.max_se); + si_set_grbm_gfx_index(sctx, pm4, + (se == ~0 ? S_030800_SE_BROADCAST_WRITES(1) : + S_030800_SE_INDEX(se)) | + S_030800_SH_BROADCAST_WRITES(1) | + S_030800_INSTANCE_BROADCAST_WRITES(1)); +} + static void si_write_harvested_raster_configs(struct si_context *sctx, struct si_pm4_state *pm4, @@ -4228,28 +4445,12 @@ si_write_harvested_raster_configs(struct si_context *sctx, } } - /* GRBM_GFX_INDEX has a different offset on SI and CI+ */ - if (sctx->b.chip_class < CIK) - si_pm4_set_reg(pm4, GRBM_GFX_INDEX, - SE_INDEX(se) | SH_BROADCAST_WRITES | - INSTANCE_BROADCAST_WRITES); - else - si_pm4_set_reg(pm4, R_030800_GRBM_GFX_INDEX, - S_030800_SE_INDEX(se) | S_030800_SH_BROADCAST_WRITES(1) | - S_030800_INSTANCE_BROADCAST_WRITES(1)); + si_set_grbm_gfx_index_se(sctx, pm4, se); si_pm4_set_reg(pm4, R_028350_PA_SC_RASTER_CONFIG, raster_config_se); } + si_set_grbm_gfx_index(sctx, pm4, ~0); - /* GRBM_GFX_INDEX has a different offset on SI and CI+ */ - if (sctx->b.chip_class < CIK) - si_pm4_set_reg(pm4, GRBM_GFX_INDEX, - SE_BROADCAST_WRITES | SH_BROADCAST_WRITES | - INSTANCE_BROADCAST_WRITES); - else { - si_pm4_set_reg(pm4, R_030800_GRBM_GFX_INDEX, - S_030800_SE_BROADCAST_WRITES(1) | S_030800_SH_BROADCAST_WRITES(1) | - S_030800_INSTANCE_BROADCAST_WRITES(1)); - + if (sctx->b.chip_class >= CIK) { if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) || (!se_mask[2] && !se_mask[3]))) { raster_config_1 &= C_028354_SE_PAIR_MAP; @@ -4267,51 +4468,14 @@ si_write_harvested_raster_configs(struct si_context *sctx, } } -static void si_init_config(struct si_context *sctx) +static void si_set_raster_config(struct si_context *sctx, struct si_pm4_state *pm4) { struct si_screen *sscreen = sctx->screen; unsigned num_rb = MIN2(sctx->screen->b.info.num_render_backends, 16); unsigned rb_mask = sctx->screen->b.info.enabled_rb_mask; unsigned raster_config, raster_config_1; - uint64_t border_color_va = sctx->border_color_buffer->gpu_address; - struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state); - - if (!pm4) - return; - - si_pm4_cmd_begin(pm4, PKT3_CONTEXT_CONTROL); - si_pm4_cmd_add(pm4, CONTEXT_CONTROL_LOAD_ENABLE(1)); - si_pm4_cmd_add(pm4, CONTEXT_CONTROL_SHADOW_ENABLE(1)); - si_pm4_cmd_end(pm4, false); - - si_pm4_set_reg(pm4, R_028A18_VGT_HOS_MAX_TESS_LEVEL, fui(64)); - si_pm4_set_reg(pm4, R_028A1C_VGT_HOS_MIN_TESS_LEVEL, fui(0)); - - /* FIXME calculate these values somehow ??? */ - if (sctx->b.chip_class <= VI) { - si_pm4_set_reg(pm4, R_028A54_VGT_GS_PER_ES, SI_GS_PER_ES); - si_pm4_set_reg(pm4, R_028A58_VGT_ES_PER_GS, 0x40); - } - si_pm4_set_reg(pm4, R_028A5C_VGT_GS_PER_VS, 0x2); - - si_pm4_set_reg(pm4, R_028A8C_VGT_PRIMITIVEID_RESET, 0x0); - si_pm4_set_reg(pm4, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0); - - si_pm4_set_reg(pm4, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0x0); - si_pm4_set_reg(pm4, R_028AA0_VGT_INSTANCE_STEP_RATE_0, 1); - if (sctx->b.chip_class >= GFX9) - si_pm4_set_reg(pm4, R_028AB4_VGT_REUSE_OFF, 0); - si_pm4_set_reg(pm4, R_028AB8_VGT_VTX_CNT_EN, 0x0); - if (sctx->b.chip_class < CIK) - si_pm4_set_reg(pm4, R_008A14_PA_CL_ENHANCE, S_008A14_NUM_CLIP_SEQ(3) | - S_008A14_CLIP_VTX_REORDER_ENA(1)); - - si_pm4_set_reg(pm4, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 0x76543210); - si_pm4_set_reg(pm4, R_028BD8_PA_SC_CENTROID_PRIORITY_1, 0xfedcba98); - si_pm4_set_reg(pm4, R_02882C_PA_SU_PRIM_FILTER_CNTL, 0); - - switch (sctx->screen->b.family) { + switch (sctx->b.family) { case CHIP_TAHITI: case CHIP_PITCAIRN: raster_config = 0x2a00126a; @@ -4383,61 +4547,125 @@ static void si_init_config(struct si_context *sctx) raster_config_1 = 0x00000000; break; default: - if (sctx->b.chip_class <= VI) { - fprintf(stderr, - "radeonsi: Unknown GPU, using 0 for raster_config\n"); - raster_config = 0x00000000; - raster_config_1 = 0x00000000; - } - break; + fprintf(stderr, + "radeonsi: Unknown GPU, using 0 for raster_config\n"); + raster_config = 0x00000000; + raster_config_1 = 0x00000000; + } + + if (!rb_mask || util_bitcount(rb_mask) >= num_rb) { + /* Always use the default config when all backends are enabled + * (or when we failed to determine the enabled backends). + */ + si_pm4_set_reg(pm4, R_028350_PA_SC_RASTER_CONFIG, + raster_config); + if (sctx->b.chip_class >= CIK) + si_pm4_set_reg(pm4, R_028354_PA_SC_RASTER_CONFIG_1, + raster_config_1); + } else { + si_write_harvested_raster_configs(sctx, pm4, raster_config, raster_config_1); + } +} + +static void si_init_config(struct si_context *sctx) +{ + struct si_screen *sscreen = sctx->screen; + uint64_t border_color_va = sctx->border_color_buffer->gpu_address; + bool has_clear_state = sscreen->has_clear_state; + struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state); + + /* Only SI can disable CLEAR_STATE for now. */ + assert(has_clear_state || sscreen->b.chip_class == SI); + + if (!pm4) + return; + + si_pm4_cmd_begin(pm4, PKT3_CONTEXT_CONTROL); + si_pm4_cmd_add(pm4, CONTEXT_CONTROL_LOAD_ENABLE(1)); + si_pm4_cmd_add(pm4, CONTEXT_CONTROL_SHADOW_ENABLE(1)); + si_pm4_cmd_end(pm4, false); + + if (has_clear_state) { + si_pm4_cmd_begin(pm4, PKT3_CLEAR_STATE); + si_pm4_cmd_add(pm4, 0); + si_pm4_cmd_end(pm4, false); } + if (sctx->b.chip_class <= VI) + si_set_raster_config(sctx, pm4); + + si_pm4_set_reg(pm4, R_028A18_VGT_HOS_MAX_TESS_LEVEL, fui(64)); + if (!has_clear_state) + si_pm4_set_reg(pm4, R_028A1C_VGT_HOS_MIN_TESS_LEVEL, fui(0)); + + /* FIXME calculate these values somehow ??? */ if (sctx->b.chip_class <= VI) { - if (!rb_mask || util_bitcount(rb_mask) >= num_rb) { - /* Always use the default config when all backends are enabled - * (or when we failed to determine the enabled backends). - */ - si_pm4_set_reg(pm4, R_028350_PA_SC_RASTER_CONFIG, - raster_config); - if (sctx->b.chip_class >= CIK) - si_pm4_set_reg(pm4, R_028354_PA_SC_RASTER_CONFIG_1, - raster_config_1); - } else { - si_write_harvested_raster_configs(sctx, pm4, raster_config, raster_config_1); - } + si_pm4_set_reg(pm4, R_028A54_VGT_GS_PER_ES, SI_GS_PER_ES); + si_pm4_set_reg(pm4, R_028A58_VGT_ES_PER_GS, 0x40); + } + + if (!has_clear_state) { + si_pm4_set_reg(pm4, R_028A5C_VGT_GS_PER_VS, 0x2); + si_pm4_set_reg(pm4, R_028A8C_VGT_PRIMITIVEID_RESET, 0x0); + si_pm4_set_reg(pm4, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0x0); } - si_pm4_set_reg(pm4, R_028204_PA_SC_WINDOW_SCISSOR_TL, S_028204_WINDOW_OFFSET_DISABLE(1)); - si_pm4_set_reg(pm4, R_028240_PA_SC_GENERIC_SCISSOR_TL, S_028240_WINDOW_OFFSET_DISABLE(1)); - si_pm4_set_reg(pm4, R_028244_PA_SC_GENERIC_SCISSOR_BR, - S_028244_BR_X(16384) | S_028244_BR_Y(16384)); - si_pm4_set_reg(pm4, R_028030_PA_SC_SCREEN_SCISSOR_TL, 0); - si_pm4_set_reg(pm4, R_028034_PA_SC_SCREEN_SCISSOR_BR, - S_028034_BR_X(16384) | S_028034_BR_Y(16384)); - - si_pm4_set_reg(pm4, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF); - si_pm4_set_reg(pm4, R_028230_PA_SC_EDGERULE, - S_028230_ER_TRI(0xA) | - S_028230_ER_POINT(0xA) | - S_028230_ER_RECT(0xA) | - /* Required by DX10_DIAMOND_TEST_ENA: */ - S_028230_ER_LINE_LR(0x1A) | - S_028230_ER_LINE_RL(0x26) | - S_028230_ER_LINE_TB(0xA) | - S_028230_ER_LINE_BT(0xA)); - /* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on SI */ - si_pm4_set_reg(pm4, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0); - si_pm4_set_reg(pm4, R_028820_PA_CL_NANINF_CNTL, 0); - si_pm4_set_reg(pm4, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 0x0); - si_pm4_set_reg(pm4, R_028AC4_DB_SRESULTS_COMPARE_STATE1, 0x0); - si_pm4_set_reg(pm4, R_028AC8_DB_PRELOAD_CONTROL, 0x0); - si_pm4_set_reg(pm4, R_02800C_DB_RENDER_OVERRIDE, 0); + si_pm4_set_reg(pm4, R_028AA0_VGT_INSTANCE_STEP_RATE_0, 1); + if (!has_clear_state) + si_pm4_set_reg(pm4, R_028AB8_VGT_VTX_CNT_EN, 0x0); + if (sctx->b.chip_class < CIK) + si_pm4_set_reg(pm4, R_008A14_PA_CL_ENHANCE, S_008A14_NUM_CLIP_SEQ(3) | + S_008A14_CLIP_VTX_REORDER_ENA(1)); + + si_pm4_set_reg(pm4, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 0x76543210); + si_pm4_set_reg(pm4, R_028BD8_PA_SC_CENTROID_PRIORITY_1, 0xfedcba98); + + if (!has_clear_state) + si_pm4_set_reg(pm4, R_02882C_PA_SU_PRIM_FILTER_CNTL, 0); + + /* CLEAR_STATE doesn't clear these correctly on certain generations. + * I don't know why. Deduced by trial and error. + */ + if (sctx->b.chip_class <= CIK) { + si_pm4_set_reg(pm4, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0); + si_pm4_set_reg(pm4, R_028204_PA_SC_WINDOW_SCISSOR_TL, S_028204_WINDOW_OFFSET_DISABLE(1)); + si_pm4_set_reg(pm4, R_028240_PA_SC_GENERIC_SCISSOR_TL, S_028240_WINDOW_OFFSET_DISABLE(1)); + si_pm4_set_reg(pm4, R_028244_PA_SC_GENERIC_SCISSOR_BR, + S_028244_BR_X(16384) | S_028244_BR_Y(16384)); + si_pm4_set_reg(pm4, R_028030_PA_SC_SCREEN_SCISSOR_TL, 0); + si_pm4_set_reg(pm4, R_028034_PA_SC_SCREEN_SCISSOR_BR, + S_028034_BR_X(16384) | S_028034_BR_Y(16384)); + } + + if (!has_clear_state) { + si_pm4_set_reg(pm4, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF); + si_pm4_set_reg(pm4, R_028230_PA_SC_EDGERULE, + S_028230_ER_TRI(0xA) | + S_028230_ER_POINT(0xA) | + S_028230_ER_RECT(0xA) | + /* Required by DX10_DIAMOND_TEST_ENA: */ + S_028230_ER_LINE_LR(0x1A) | + S_028230_ER_LINE_RL(0x26) | + S_028230_ER_LINE_TB(0xA) | + S_028230_ER_LINE_BT(0xA)); + /* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on SI */ + si_pm4_set_reg(pm4, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0); + si_pm4_set_reg(pm4, R_028820_PA_CL_NANINF_CNTL, 0); + si_pm4_set_reg(pm4, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 0x0); + si_pm4_set_reg(pm4, R_028AC4_DB_SRESULTS_COMPARE_STATE1, 0x0); + si_pm4_set_reg(pm4, R_028AC8_DB_PRELOAD_CONTROL, 0x0); + si_pm4_set_reg(pm4, R_02800C_DB_RENDER_OVERRIDE, 0); + } if (sctx->b.chip_class >= GFX9) { si_pm4_set_reg(pm4, R_030920_VGT_MAX_VTX_INDX, ~0); si_pm4_set_reg(pm4, R_030924_VGT_MIN_VTX_INDX, 0); si_pm4_set_reg(pm4, R_030928_VGT_INDX_OFFSET, 0); } else { + /* These registers, when written, also overwrite the CLEAR_STATE + * context, so we can't rely on CLEAR_STATE setting them. + * It would be an issue if there was another UMD changing them. + */ si_pm4_set_reg(pm4, R_028400_VGT_MAX_VTX_INDX, ~0); si_pm4_set_reg(pm4, R_028404_VGT_MIN_VTX_INDX, 0); si_pm4_set_reg(pm4, R_028408_VGT_INDX_OFFSET, 0); @@ -4461,26 +4689,39 @@ static void si_init_config(struct si_context *sctx) } si_pm4_set_reg(pm4, R_00B21C_SPI_SHADER_PGM_RSRC3_GS, S_00B21C_CU_EN(0xffff)); - if (sscreen->b.info.num_good_compute_units / - (sscreen->b.info.max_se * sscreen->b.info.max_sh_per_se) <= 4) { + /* Compute LATE_ALLOC_VS.LIMIT. */ + unsigned num_cu_per_sh = sscreen->b.info.num_good_compute_units / + (sscreen->b.info.max_se * + sscreen->b.info.max_sh_per_se); + unsigned late_alloc_limit; /* The limit is per SH. */ + + if (sctx->b.family == CHIP_KABINI) { + late_alloc_limit = 0; /* Potential hang on Kabini. */ + } else if (num_cu_per_sh <= 4) { /* Too few available compute units per SH. Disallowing - * VS to run on CU0 could hurt us more than late VS + * VS to run on one CU could hurt us more than late VS * allocation would help. * - * LATE_ALLOC_VS = 2 is the highest safe number. + * 2 is the highest safe number that allows us to keep + * all CUs enabled. */ - si_pm4_set_reg(pm4, R_00B118_SPI_SHADER_PGM_RSRC3_VS, S_00B118_CU_EN(0xffff)); - si_pm4_set_reg(pm4, R_00B11C_SPI_SHADER_LATE_ALLOC_VS, S_00B11C_LIMIT(2)); + late_alloc_limit = 2; } else { - /* Set LATE_ALLOC_VS == 31. It should be less than - * the number of scratch waves. Limitations: - * - VS can't execute on CU0. - * - If HS writes outputs to LDS, LS can't execute on CU0. + /* This is a good initial value, allowing 1 late_alloc + * wave per SIMD on num_cu - 2. */ - si_pm4_set_reg(pm4, R_00B118_SPI_SHADER_PGM_RSRC3_VS, S_00B118_CU_EN(0xfffe)); - si_pm4_set_reg(pm4, R_00B11C_SPI_SHADER_LATE_ALLOC_VS, S_00B11C_LIMIT(31)); + late_alloc_limit = (num_cu_per_sh - 2) * 4; + + /* The limit is 0-based, so 0 means 1. */ + assert(late_alloc_limit > 0 && late_alloc_limit <= 64); + late_alloc_limit -= 1; } + /* VS can't execute on one CU if the limit is > 2. */ + si_pm4_set_reg(pm4, R_00B118_SPI_SHADER_PGM_RSRC3_VS, + S_00B118_CU_EN(late_alloc_limit > 2 ? 0xfffe : 0xffff)); + si_pm4_set_reg(pm4, R_00B11C_SPI_SHADER_LATE_ALLOC_VS, + S_00B11C_LIMIT(late_alloc_limit)); si_pm4_set_reg(pm4, R_00B01C_SPI_SHADER_PGM_RSRC3_PS, S_00B01C_CU_EN(0xffff)); } @@ -4490,9 +4731,6 @@ static void si_init_config(struct si_context *sctx) si_pm4_set_reg(pm4, R_028424_CB_DCC_CONTROL, S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) | S_028424_OVERWRITE_COMBINER_WATERMARK(4)); - if (sctx->b.family < CHIP_POLARIS10) - si_pm4_set_reg(pm4, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 30); - si_pm4_set_reg(pm4, R_028C5C_VGT_OUT_DEALLOC_CNTL, 32); vgt_tess_distribution = S_028B50_ACCUM_ISOLINE(32) | @@ -4508,14 +4746,11 @@ static void si_init_config(struct si_context *sctx) vgt_tess_distribution |= S_028B50_TRAP_SPLIT(3); si_pm4_set_reg(pm4, R_028B50_VGT_TESS_DISTRIBUTION, vgt_tess_distribution); - } else { + } else if (!has_clear_state) { si_pm4_set_reg(pm4, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 14); si_pm4_set_reg(pm4, R_028C5C_VGT_OUT_DEALLOC_CNTL, 16); } - if (sctx->screen->b.has_rbplus) - si_pm4_set_reg(pm4, R_028C40_PA_SC_SHADER_CONTROL, 0); - si_pm4_set_reg(pm4, R_028080_TA_BC_BASE_ADDR, border_color_va >> 8); if (sctx->b.chip_class >= CIK) si_pm4_set_reg(pm4, R_028084_TA_BC_BASE_ADDR_HI, border_color_va >> 40); @@ -4537,16 +4772,6 @@ static void si_init_config(struct si_context *sctx) assert(0); } - si_pm4_set_reg(pm4, R_028060_DB_DFSM_CONTROL, - S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF)); - si_pm4_set_reg(pm4, R_028064_DB_RENDER_FILTER, 0); - /* TODO: We can use this to disable RBs for rendering to GART: */ - si_pm4_set_reg(pm4, R_02835C_PA_SC_TILE_STEERING_OVERRIDE, 0); - si_pm4_set_reg(pm4, R_02883C_PA_SU_OVER_RASTERIZATION_CNTL, 0); - /* TODO: Enable the binner: */ - si_pm4_set_reg(pm4, R_028C44_PA_SC_BINNER_CNTL_0, - S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_LEGACY_SC) | - S_028C44_DISABLE_START_OF_PRIM(1)); si_pm4_set_reg(pm4, R_028C48_PA_SC_BINNER_CNTL_1, S_028C48_MAX_ALLOC_COUNT(MIN2(128, pc_lines / (4 * num_se))) | S_028C48_MAX_PRIM_PER_BATCH(1023));