#include "iris_pipe.h"
#include "iris_resource.h"
-#define __gen_address_type struct iris_address
-#define __gen_user_data struct iris_batch
-
-#define ARRAY_BYTES(x) (sizeof(uint32_t) * ARRAY_SIZE(x))
-
-static uint64_t
-__gen_combine_address(struct iris_batch *batch, void *location,
- struct iris_address addr, uint32_t delta)
-{
- uint64_t result = addr.offset + delta;
-
- if (addr.bo) {
- iris_use_pinned_bo(batch, addr.bo, addr.write);
- /* Assume this is a general address, not relative to a base. */
- result += addr.bo->gtt_offset;
- }
-
- return result;
-}
-
-#define __genxml_cmd_length(cmd) cmd ## _length
-#define __genxml_cmd_length_bias(cmd) cmd ## _length_bias
-#define __genxml_cmd_header(cmd) cmd ## _header
-#define __genxml_cmd_pack(cmd) cmd ## _pack
-
-#define _iris_pack_command(batch, cmd, dst, name) \
- for (struct cmd name = { __genxml_cmd_header(cmd) }, \
- *_dst = (void *)(dst); __builtin_expect(_dst != NULL, 1); \
- ({ __genxml_cmd_pack(cmd)(batch, (void *)_dst, &name); \
- _dst = NULL; \
- }))
-
-#define iris_pack_command(cmd, dst, name) \
- _iris_pack_command(NULL, cmd, dst, name)
-
-#define iris_pack_state(cmd, dst, name) \
- for (struct cmd name = {}, \
- *_dst = (void *)(dst); __builtin_expect(_dst != NULL, 1); \
- __genxml_cmd_pack(cmd)(NULL, (void *)_dst, &name), \
- _dst = NULL)
-
-#define iris_emit_cmd(batch, cmd, name) \
- _iris_pack_command(batch, cmd, iris_get_command_space(batch, 4 * __genxml_cmd_length(cmd)), name)
-
-#define iris_emit_merge(batch, dwords0, dwords1, num_dwords) \
- do { \
- uint32_t *dw = iris_get_command_space(batch, 4 * num_dwords); \
- for (uint32_t i = 0; i < num_dwords; i++) \
- dw[i] = (dwords0)[i] | (dwords1)[i]; \
- VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, num_dwords)); \
- } while (0)
-
-#include "genxml/genX_pack.h"
-#include "genxml/gen_macros.h"
-#include "genxml/genX_bits.h"
+#include "iris_genx_macros.h"
#include "intel/common/gen_guardband.h"
#if GEN_GEN == 8
return map[pipe_wrap];
}
-static struct iris_address
-ro_bo(struct iris_bo *bo, uint64_t offset)
-{
- /* CSOs must pass NULL for bo! Otherwise it will add the BO to the
- * validation list at CSO creation time, instead of draw time.
- */
- return (struct iris_address) { .bo = bo, .offset = offset };
-}
-
-static struct iris_address
-rw_bo(struct iris_bo *bo, uint64_t offset)
-{
- /* CSOs must pass NULL for bo! Otherwise it will add the BO to the
- * validation list at CSO creation time, instead of draw time.
- */
- return (struct iris_address) { .bo = bo, .offset = offset, .write = true };
-}
-
/**
* Allocate space for some indirect state.
*
}
#endif
+#if GEN_GEN == 11
+static void
+iris_upload_slice_hashing_state(struct iris_batch *batch)
+{
+ const struct gen_device_info *devinfo = &batch->screen->devinfo;
+ int subslices_delta =
+ devinfo->ppipe_subslices[0] - devinfo->ppipe_subslices[1];
+ if (subslices_delta == 0)
+ return;
+
+ struct iris_context *ice = NULL;
+ ice = container_of(batch, ice, batches[IRIS_BATCH_RENDER]);
+ assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
+
+ unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;
+ uint32_t hash_address;
+ struct pipe_resource *tmp = NULL;
+ uint32_t *map =
+ stream_state(batch, ice->state.dynamic_uploader, &tmp,
+ size, 64, &hash_address);
+ pipe_resource_reference(&tmp, NULL);
+
+ struct GENX(SLICE_HASH_TABLE) table0 = {
+ .Entry = {
+ { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
+ { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
+ { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
+ { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
+ { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
+ { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
+ { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
+ { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
+ { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
+ { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
+ { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
+ { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
+ { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
+ { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
+ { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
+ { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }
+ }
+ };
+
+ struct GENX(SLICE_HASH_TABLE) table1 = {
+ .Entry = {
+ { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
+ { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
+ { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
+ { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
+ { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
+ { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
+ { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
+ { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
+ { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
+ { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
+ { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
+ { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
+ { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
+ { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
+ { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
+ { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }
+ }
+ };
+
+ const struct GENX(SLICE_HASH_TABLE) *table =
+ subslices_delta < 0 ? &table0 : &table1;
+ GENX(SLICE_HASH_TABLE_pack)(NULL, map, table);
+
+ iris_emit_cmd(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) {
+ ptr.SliceHashStatePointerValid = true;
+ ptr.SliceHashTableStatePointer = hash_address;
+ }
+
+ iris_emit_cmd(batch, GENX(3DSTATE_3D_MODE), mode) {
+ mode.SliceHashingTableEnable = true;
+ }
+}
+#endif
+
/**
* Upload the initial GPU state for a render context.
*
iris_emit_lri(batch, CACHE_MODE_0, reg_val);
}
- // XXX: 3D_MODE?
+ iris_upload_slice_hashing_state(batch);
#endif
/* 3DSTATE_DRAWING_RECTANGLE is non-pipelined, so we want to avoid
*/
struct iris_genx_state {
struct iris_vertex_buffer_state vertex_buffers[33];
+ uint32_t last_index_buffer[GENX(3DSTATE_INDEX_BUFFER_length)];
struct iris_depth_buffer_state depth_buffer;
iris_pack_command(GENX(3DSTATE_SF), cso->sf, sf) {
sf.StatisticsEnable = true;
- sf.ViewportTransformEnable = true;
sf.AALineDistanceMode = AALINEDISTANCE_TRUE;
sf.LineEndCapAntialiasingRegionWidth =
state->line_smooth ? _10pixels : _05pixels;
{
struct iris_context *ice = (struct iris_context *) ctx;
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
+ struct iris_shader_state *gshs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
+ struct iris_shader_state *tshs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
memcpy(&ice->state.clip_planes, state, sizeof(*state));
- ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS;
+ ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS | IRIS_DIRTY_CONSTANTS_GS |
+ IRIS_DIRTY_CONSTANTS_TES;
shs->sysvals_need_upload = true;
+ gshs->sysvals_need_upload = true;
+ tshs->sysvals_need_upload = true;
}
/**
if (cso->samples != samples) {
ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
+
+ /* We need to toggle 3DSTATE_PS::32 Pixel Dispatch Enable */
+ if (GEN_GEN >= 9 && (cso->samples == 16 || samples == 16))
+ ice->state.dirty |= IRIS_DIRTY_FS;
}
if (cso->nr_cbufs != state->nr_cbufs) {
ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
}
+ if (cso->zsbuf || state->zsbuf) {
+ ice->state.dirty |= IRIS_DIRTY_DEPTH_BUFFER;
+ }
+
util_copy_framebuffer_state(cso, state);
cso->samples = samples;
cso->layers = layers;
ice->state.null_fb.offset +=
iris_bo_offset_from_base_address(iris_resource_bo(ice->state.null_fb.res));
- ice->state.dirty |= IRIS_DIRTY_DEPTH_BUFFER;
-
/* Render target change */
ice->state.dirty |= IRIS_DIRTY_BINDINGS_FS;
static void
iris_populate_vs_key(const struct iris_context *ice,
const struct shader_info *info,
+ gl_shader_stage last_stage,
struct brw_vs_prog_key *key)
{
const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
if (info->clip_distance_array_size == 0 &&
- (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)))
+ (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
+ last_stage == MESA_SHADER_VERTEX)
key->nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
}
*/
static void
iris_populate_tes_key(const struct iris_context *ice,
+ const struct shader_info *info,
+ gl_shader_stage last_stage,
struct brw_tes_prog_key *key)
{
+ const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
+
+ if (info->clip_distance_array_size == 0 &&
+ (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
+ last_stage == MESA_SHADER_TESS_EVAL)
+ key->nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
}
/**
*/
static void
iris_populate_gs_key(const struct iris_context *ice,
+ const struct shader_info *info,
+ gl_shader_stage last_stage,
struct brw_gs_prog_key *key)
{
+ const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
+
+ if (info->clip_distance_array_size == 0 &&
+ (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
+ last_stage == MESA_SHADER_GEOMETRY)
+ key->nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
}
/**
*/
static void
iris_populate_fs_key(const struct iris_context *ice,
+ const struct shader_info *info,
struct brw_wm_prog_key *key)
{
struct iris_screen *screen = (void *) ice->ctx.screen;
key->alpha_test_replicate_alpha = fb->nr_cbufs > 1 && zsa->alpha.enabled;
- /* XXX: only bother if COL0/1 are read */
- key->flat_shade = rast->flatshade;
+ key->flat_shade = rast->flatshade &&
+ (info->inputs_read & (VARYING_BIT_COL0 | VARYING_BIT_COL1));
key->persample_interp = rast->force_persample_interp;
key->multisample_fbo = rast->multisample && fb->samples > 1;
screen->driconf.dual_color_blend_by_location &&
(blend->blend_enables & 1) && blend->dual_color_blending;
- /* TODO: support key->force_dual_color_blend for Unigine */
/* TODO: Respect glHint for key->high_quality_derivatives */
}
*/
ps.PositionXYOffsetSelect =
wm_prog_data->uses_pos_offset ? POSOFFSET_SAMPLE : POSOFFSET_NONE;
- ps._8PixelDispatchEnable = wm_prog_data->dispatch_8;
- ps._16PixelDispatchEnable = wm_prog_data->dispatch_16;
- ps._32PixelDispatchEnable = wm_prog_data->dispatch_32;
-
- // XXX: Disable SIMD32 with 16x MSAA
-
- ps.DispatchGRFStartRegisterForConstantSetupData0 =
- brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0);
- ps.DispatchGRFStartRegisterForConstantSetupData1 =
- brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1);
- ps.DispatchGRFStartRegisterForConstantSetupData2 =
- brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2);
-
- ps.KernelStartPointer0 =
- KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0);
- ps.KernelStartPointer1 =
- KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1);
- ps.KernelStartPointer2 =
- KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2);
if (prog_data->total_scratch) {
struct iris_bo *bo =
#if GEN_GEN >= 9
psx.PixelShaderPullsBary = wm_prog_data->pulls_bary;
psx.PixelShaderComputesStencil = wm_prog_data->computed_stencil;
-#else
- psx.PixelShaderUsesInputCoverageMask = wm_prog_data->uses_sample_mask;
#endif
}
}
pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa);
}
- if (draw->index_size == 0 && ice->state.last_res.index_buffer) {
- /* This draw didn't emit a new index buffer, so we are inheriting the
- * older index buffer. This draw didn't need it, but future ones may.
- */
- struct iris_bo *bo = iris_resource_bo(ice->state.last_res.index_buffer);
- iris_use_pinned_bo(batch, bo, false);
- }
+ iris_use_optional_res(batch, ice->state.last_res.index_buffer, false);
if (clean & IRIS_DIRTY_VERTEX_BUFFERS) {
uint64_t bound = ice->state.bound_vertex_buffers;
batch->last_surface_base_address = binder->bo->gtt_offset;
}
+static inline void
+iris_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz,
+ bool window_space_position, float *zmin, float *zmax)
+{
+ if (window_space_position) {
+ *zmin = 0.f;
+ *zmax = 1.f;
+ return;
+ }
+ util_viewport_zmin_zmax(vp, halfz, zmin, zmax);
+}
+
static void
iris_upload_dirty_render_state(struct iris_context *ice,
struct iris_batch *batch,
GENX(CC_VIEWPORT_length), 32, &cc_vp_address);
for (int i = 0; i < ice->state.num_viewports; i++) {
float zmin, zmax;
- util_viewport_zmin_zmax(&ice->state.viewports[i],
- cso_rast->clip_halfz, &zmin, &zmax);
+ iris_viewport_zmin_zmax(&ice->state.viewports[i], cso_rast->clip_halfz,
+ ice->state.window_space_position,
+ &zmin, &zmax);
if (cso_rast->depth_clip_near)
zmin = 0.0;
if (cso_rast->depth_clip_far)
iris_get_scratch_space(ice, prog_data->total_scratch, stage);
iris_use_pinned_bo(batch, bo, true);
}
-#if GEN_GEN >= 9
- if (stage == MESA_SHADER_FRAGMENT && wm_prog_data->uses_sample_mask) {
- uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0};
- uint32_t *shader_psx = ((uint32_t*)shader->derived_data) +
- GENX(3DSTATE_PS_length);
- struct iris_rasterizer_state *cso = ice->state.cso_rast;
- iris_pack_command(GENX(3DSTATE_PS_EXTRA), &psx_state, psx) {
+ if (stage == MESA_SHADER_FRAGMENT) {
+ UNUSED struct iris_rasterizer_state *cso = ice->state.cso_rast;
+ struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
+
+ uint32_t ps_state[GENX(3DSTATE_PS_length)] = {0};
+ iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
+ ps._8PixelDispatchEnable = wm_prog_data->dispatch_8;
+ ps._16PixelDispatchEnable = wm_prog_data->dispatch_16;
+ ps._32PixelDispatchEnable = wm_prog_data->dispatch_32;
+
+ /* The docs for 3DSTATE_PS::32 Pixel Dispatch Enable say:
+ *
+ * "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16,
+ * SIMD32 Dispatch must not be enabled for PER_PIXEL dispatch
+ * mode."
+ *
+ * 16x MSAA only exists on Gen9+, so we can skip this on Gen8.
+ */
+ if (GEN_GEN >= 9 && cso_fb->samples == 16 &&
+ !wm_prog_data->persample_dispatch) {
+ assert(ps._8PixelDispatchEnable || ps._16PixelDispatchEnable);
+ ps._32PixelDispatchEnable = false;
+ }
+
+ ps.DispatchGRFStartRegisterForConstantSetupData0 =
+ brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0);
+ ps.DispatchGRFStartRegisterForConstantSetupData1 =
+ brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1);
+ ps.DispatchGRFStartRegisterForConstantSetupData2 =
+ brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2);
+
+ ps.KernelStartPointer0 = KSP(shader) +
+ brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0);
+ ps.KernelStartPointer1 = KSP(shader) +
+ brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1);
+ ps.KernelStartPointer2 = KSP(shader) +
+ brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2);
+ }
+
+ uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0};
+ iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
+#if GEN_GEN >= 9
if (wm_prog_data->post_depth_coverage)
psx.InputCoverageMaskState = ICMS_DEPTH_COVERAGE;
- else if (wm_prog_data->inner_coverage && cso->conservative_rasterization)
+ else if (wm_prog_data->inner_coverage &&
+ cso->conservative_rasterization)
psx.InputCoverageMaskState = ICMS_INNER_CONSERVATIVE;
else
psx.InputCoverageMaskState = ICMS_NORMAL;
+#else
+ psx.PixelShaderUsesInputCoverageMask =
+ wm_prog_data->uses_sample_mask;
+#endif
}
- iris_batch_emit(batch, shader->derived_data,
- sizeof(uint32_t) * GENX(3DSTATE_PS_length));
- iris_emit_merge(batch,
- shader_psx,
- psx_state,
+ uint32_t *shader_ps = (uint32_t *) shader->derived_data;
+ uint32_t *shader_psx = shader_ps + GENX(3DSTATE_PS_length);
+ iris_emit_merge(batch, shader_ps, ps_state,
+ GENX(3DSTATE_PS_length));
+ iris_emit_merge(batch, shader_psx, psx_state,
GENX(3DSTATE_PS_EXTRA_length));
- } else
-#endif
+ } else {
iris_batch_emit(batch, shader->derived_data,
iris_derived_program_state_size(stage));
+ }
} else {
if (stage == MESA_SHADER_TESS_EVAL) {
iris_emit_cmd(batch, GENX(3DSTATE_HS), hs);
uint32_t dynamic_clip[GENX(3DSTATE_CLIP_length)];
iris_pack_command(GENX(3DSTATE_CLIP), &dynamic_clip, cl) {
cl.StatisticsEnable = ice->state.statistics_counters_enabled;
- cl.ClipMode = cso_rast->rasterizer_discard ? CLIPMODE_REJECT_ALL
- : CLIPMODE_NORMAL;
+ if (cso_rast->rasterizer_discard)
+ cl.ClipMode = CLIPMODE_REJECT_ALL;
+ else if (ice->state.window_space_position)
+ cl.ClipMode = CLIPMODE_ACCEPT_ALL;
+ else
+ cl.ClipMode = CLIPMODE_NORMAL;
+
+ cl.PerspectiveDivideDisable = ice->state.window_space_position;
cl.ViewportXYClipTestEnable = !points_or_lines;
if (wm_prog_data->barycentric_interp_modes &
if (dirty & IRIS_DIRTY_RASTER) {
struct iris_rasterizer_state *cso = ice->state.cso_rast;
iris_batch_emit(batch, cso->raster, sizeof(cso->raster));
- iris_batch_emit(batch, cso->sf, sizeof(cso->sf));
+ uint32_t dynamic_sf[GENX(3DSTATE_SF_length)];
+ iris_pack_command(GENX(3DSTATE_SF), &dynamic_sf, sf) {
+ sf.ViewportTransformEnable = !ice->state.window_space_position;
+ }
+ iris_emit_merge(batch, cso->sf, dynamic_sf,
+ ARRAY_SIZE(dynamic_sf));
}
if (dirty & IRIS_DIRTY_WM) {
}
}
+ if (ice->state.current_hash_scale != 1)
+ genX(emit_hashing_mode)(ice, batch, UINT_MAX, UINT_MAX, 1);
+
/* TODO: Gen8 PMA fix */
}
offset = 0;
}
+ struct iris_genx_state *genx = ice->state.genx;
struct iris_bo *bo = iris_resource_bo(ice->state.last_res.index_buffer);
- iris_emit_cmd(batch, GENX(3DSTATE_INDEX_BUFFER), ib) {
+ uint32_t ib_packet[GENX(3DSTATE_INDEX_BUFFER_length)];
+ iris_pack_command(GENX(3DSTATE_INDEX_BUFFER), ib_packet, ib) {
ib.IndexFormat = draw->index_size >> 1;
ib.MOCS = mocs(bo);
ib.BufferSize = bo->size - offset;
- ib.BufferStartingAddress = ro_bo(bo, offset);
+ ib.BufferStartingAddress = ro_bo(NULL, bo->gtt_offset + offset);
+ }
+
+ if (memcmp(genx->last_index_buffer, ib_packet, sizeof(ib_packet)) != 0) {
+ memcpy(genx->last_index_buffer, ib_packet, sizeof(ib_packet));
+ iris_batch_emit(batch, ib_packet, sizeof(ib_packet));
+ iris_use_pinned_bo(batch, bo, false);
}
/* The VF cache key only uses 32-bits, see vertex buffer comment above */
PIPE_CONTROL_FLUSH_ENABLE);
if (ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
- static const uint32_t math[] = {
- MI_MATH | (9 - 2),
- /* Compute (draw index < draw count).
- * We do this by subtracting and storing the carry bit.
- */
- MI_ALU2(LOAD, SRCA, R0),
- MI_ALU2(LOAD, SRCB, R1),
- MI_ALU0(SUB),
- MI_ALU2(STORE, R3, CF),
- /* Compute (subtracting result & MI_PREDICATE). */
- MI_ALU2(LOAD, SRCA, R3),
- MI_ALU2(LOAD, SRCB, R2),
- MI_ALU0(AND),
- MI_ALU2(STORE, R3, ACCU),
- };
-
- /* Upload the current draw count from the draw parameters
- * buffer to GPR1.
- */
- ice->vtbl.load_register_mem32(batch, CS_GPR(1), draw_count_bo,
- draw_count_offset);
- /* Zero the top 32-bits of GPR1. */
- ice->vtbl.load_register_imm32(batch, CS_GPR(1) + 4, 0);
- /* Upload the id of the current primitive to GPR0. */
- ice->vtbl.load_register_imm64(batch, CS_GPR(0), draw->drawid);
-
- iris_batch_emit(batch, math, sizeof(math));
-
- /* Store result of MI_MATH computations to MI_PREDICATE_RESULT. */
- ice->vtbl.load_register_reg64(batch,
- MI_PREDICATE_RESULT, CS_GPR(3));
+ struct gen_mi_builder b;
+ gen_mi_builder_init(&b, batch);
+
+ /* comparison = draw id < draw count */
+ struct gen_mi_value comparison =
+ gen_mi_ult(&b, gen_mi_imm(draw->drawid),
+ gen_mi_mem32(ro_bo(draw_count_bo,
+ draw_count_offset)));
+
+ /* predicate = comparison & conditional rendering predicate */
+ gen_mi_store(&b, gen_mi_reg32(MI_PREDICATE_RESULT),
+ gen_mi_iand(&b, comparison,
+ gen_mi_reg32(CS_GPR(15))));
} else {
uint32_t mi_predicate;
"draw count from stream output stall",
PIPE_CONTROL_CS_STALL);
- iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
- lrm.RegisterAddress = CS_GPR(0);
- lrm.MemoryAddress =
- ro_bo(iris_resource_bo(so->offset.res), so->offset.offset);
- }
- if (so->base.buffer_offset)
- iris_math_add32_gpr0(ice, batch, -so->base.buffer_offset);
- iris_math_div32_gpr0(ice, batch, so->stride);
- _iris_emit_lrr(batch, _3DPRIM_VERTEX_COUNT, CS_GPR(0));
+ struct gen_mi_builder b;
+ gen_mi_builder_init(&b, batch);
+
+ struct iris_address addr =
+ ro_bo(iris_resource_bo(so->offset.res), so->offset.offset);
+ struct gen_mi_value offset =
+ gen_mi_iadd_imm(&b, gen_mi_mem32(addr), -so->base.buffer_offset);
+
+ gen_mi_store(&b, gen_mi_reg32(_3DPRIM_VERTEX_COUNT),
+ gen_mi_udiv32_imm(&b, offset, so->stride));
_iris_emit_lri(batch, _3DPRIM_START_VERTEX, 0);
_iris_emit_lri(batch, _3DPRIM_BASE_VERTEX, 0);
}
}
- /* No need to handle these:
- * - PIPE_BIND_INDEX_BUFFER (emitted for every indexed draw)
+ /* We don't need to handle PIPE_BIND_INDEX_BUFFER here: we re-emit
+ * the 3DSTATE_INDEX_BUFFER packet whenever the address changes.
+ *
+ * There is also no need to handle these:
* - PIPE_BIND_COMMAND_ARGS_BUFFER (emitted for every indirect draw)
* - PIPE_BIND_QUERY_BUFFER (no persistent state references)
*/
}
#endif
+static void
+iris_lost_genx_state(struct iris_context *ice, struct iris_batch *batch)
+{
+ struct iris_genx_state *genx = ice->state.genx;
+
+ memset(genx->last_index_buffer, 0, sizeof(genx->last_index_buffer));
+}
+
+static void
+iris_emit_mi_report_perf_count(struct iris_batch *batch,
+ struct iris_bo *bo,
+ uint32_t offset_in_bytes,
+ uint32_t report_id)
+{
+ iris_emit_cmd(batch, GENX(MI_REPORT_PERF_COUNT), mi_rpc) {
+ mi_rpc.MemoryAddress = rw_bo(bo, offset_in_bytes);
+ mi_rpc.ReportID = report_id;
+ }
+}
+
+/**
+ * Update the pixel hashing modes that determine the balancing of PS threads
+ * across subslices and slices.
+ *
+ * \param width Width bound of the rendering area (already scaled down if \p
+ * scale is greater than 1).
+ * \param height Height bound of the rendering area (already scaled down if \p
+ * scale is greater than 1).
+ * \param scale The number of framebuffer samples that could potentially be
+ * affected by an individual channel of the PS thread. This is
+ * typically one for single-sampled rendering, but for operations
+ * like CCS resolves and fast clears a single PS invocation may
+ * update a huge number of pixels, in which case a finer
+ * balancing is desirable in order to maximally utilize the
+ * bandwidth available. UINT_MAX can be used as shorthand for
+ * "finest hashing mode available".
+ */
+void
+genX(emit_hashing_mode)(struct iris_context *ice, struct iris_batch *batch,
+ unsigned width, unsigned height, unsigned scale)
+{
+#if GEN_GEN == 9
+ const struct gen_device_info *devinfo = &batch->screen->devinfo;
+ const unsigned slice_hashing[] = {
+ /* Because all Gen9 platforms with more than one slice require
+ * three-way subslice hashing, a single "normal" 16x16 slice hashing
+ * block is guaranteed to suffer from substantial imbalance, with one
+ * subslice receiving twice as much work as the other two in the
+ * slice.
+ *
+ * The performance impact of that would be particularly severe when
+ * three-way hashing is also in use for slice balancing (which is the
+ * case for all Gen9 GT4 platforms), because one of the slices
+ * receives one every three 16x16 blocks in either direction, which
+ * is roughly the periodicity of the underlying subslice imbalance
+ * pattern ("roughly" because in reality the hardware's
+ * implementation of three-way hashing doesn't do exact modulo 3
+ * arithmetic, which somewhat decreases the magnitude of this effect
+ * in practice). This leads to a systematic subslice imbalance
+ * within that slice regardless of the size of the primitive. The
+ * 32x32 hashing mode guarantees that the subslice imbalance within a
+ * single slice hashing block is minimal, largely eliminating this
+ * effect.
+ */
+ _32x32,
+ /* Finest slice hashing mode available. */
+ NORMAL
+ };
+ const unsigned subslice_hashing[] = {
+ /* 16x16 would provide a slight cache locality benefit especially
+ * visible in the sampler L1 cache efficiency of low-bandwidth
+ * non-LLC platforms, but it comes at the cost of greater subslice
+ * imbalance for primitives of dimensions approximately intermediate
+ * between 16x4 and 16x16.
+ */
+ _16x4,
+ /* Finest subslice hashing mode available. */
+ _8x4
+ };
+ /* Dimensions of the smallest hashing block of a given hashing mode. If
+ * the rendering area is smaller than this there can't possibly be any
+ * benefit from switching to this mode, so we optimize out the
+ * transition.
+ */
+ const unsigned min_size[][2] = {
+ { 16, 4 },
+ { 8, 4 }
+ };
+ const unsigned idx = scale > 1;
+
+ if (width > min_size[idx][0] || height > min_size[idx][1]) {
+ uint32_t gt_mode;
+
+ iris_pack_state(GENX(GT_MODE), >_mode, reg) {
+ reg.SliceHashing = (devinfo->num_slices > 1 ? slice_hashing[idx] : 0);
+ reg.SliceHashingMask = (devinfo->num_slices > 1 ? -1 : 0);
+ reg.SubsliceHashing = subslice_hashing[idx];
+ reg.SubsliceHashingMask = -1;
+ };
+
+ iris_emit_raw_pipe_control(batch,
+ "workaround: CS stall before GT_MODE LRI",
+ PIPE_CONTROL_STALL_AT_SCOREBOARD |
+ PIPE_CONTROL_CS_STALL,
+ NULL, 0, 0);
+
+ iris_emit_lri(batch, GT_MODE, gt_mode);
+
+ ice->state.current_hash_scale = scale;
+ }
+#endif
+}
+
void
genX(init_state)(struct iris_context *ice)
{
ice->vtbl.update_surface_base_address = iris_update_surface_base_address;
ice->vtbl.upload_compute_state = iris_upload_compute_state;
ice->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
+ ice->vtbl.emit_mi_report_perf_count = iris_emit_mi_report_perf_count;
ice->vtbl.rebind_buffer = iris_rebind_buffer;
ice->vtbl.load_register_reg32 = iris_load_register_reg32;
ice->vtbl.load_register_reg64 = iris_load_register_reg64;
ice->vtbl.populate_fs_key = iris_populate_fs_key;
ice->vtbl.populate_cs_key = iris_populate_cs_key;
ice->vtbl.mocs = mocs;
+ ice->vtbl.lost_genx_state = iris_lost_genx_state;
ice->state.dirty = ~0ull;