}
#define iris_emit_lri(b, r, v) _iris_emit_lri(b, GENX(r##_num), v)
-/**
- * Upload the initial GPU state for a render context.
- *
- * This sets some invariant state that needs to be programmed a particular
- * way, but we never actually change.
- */
static void
-iris_init_render_context(struct iris_screen *screen,
- struct iris_batch *batch,
- struct iris_vtable *vtbl,
- struct pipe_debug_callback *dbg)
+emit_pipeline_select(struct iris_batch *batch, uint32_t pipeline)
{
- uint32_t reg_val;
+#if GEN_GEN >= 8 && GEN_GEN < 10
+ /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
+ *
+ * Software must clear the COLOR_CALC_STATE Valid field in
+ * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
+ * with Pipeline Select set to GPGPU.
+ *
+ * The internal hardware docs recommend the same workaround for Gen9
+ * hardware too.
+ */
+ if (pipeline == GPGPU)
+ iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
+#endif
+
- iris_init_batch(batch, screen, vtbl, dbg, I915_EXEC_RENDER);
+ /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
+ * PIPELINE_SELECT [DevBWR+]":
+ *
+ * "Project: DEVSNB+
+ *
+ * Software must ensure all the write caches are flushed through a
+ * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
+ * command to invalidate read only caches prior to programming
+ * MI_PIPELINE_SELECT command to change the Pipeline Select Mode."
+ */
+ iris_emit_pipe_control_flush(batch,
+ PIPE_CONTROL_RENDER_TARGET_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DATA_CACHE_FLUSH |
+ PIPE_CONTROL_CS_STALL);
+
+ iris_emit_pipe_control_flush(batch,
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+ PIPE_CONTROL_CONST_CACHE_INVALIDATE |
+ PIPE_CONTROL_STATE_CACHE_INVALIDATE |
+ PIPE_CONTROL_INSTRUCTION_INVALIDATE);
+
+ iris_emit_cmd(batch, GENX(PIPELINE_SELECT), sel) {
+#if GEN_GEN >= 9
+ sel.MaskBits = 3;
+#endif
+ sel.PipelineSelection = pipeline;
+ }
+}
+UNUSED static void
+init_glk_barrier_mode(struct iris_batch *batch, uint32_t value)
+{
+#if GEN_GEN == 9
+ /* Project: DevGLK
+ *
+ * "This chicken bit works around a hardware issue with barrier
+ * logic encountered when switching between GPGPU and 3D pipelines.
+ * To workaround the issue, this mode bit should be set after a
+ * pipeline is selected."
+ */
+ uint32_t reg_val;
+ iris_pack_state(GENX(SLICE_COMMON_ECO_CHICKEN1), ®_val, reg) {
+ reg.GLKBarrierMode = value;
+ reg.GLKBarrierModeMask = 1;
+ }
+ iris_emit_lri(batch, SLICE_COMMON_ECO_CHICKEN1, reg_val);
+#endif
+}
+
+static void
+init_state_base_address(struct iris_batch *batch)
+{
flush_for_state_base_change(batch);
/* We program most base addresses once at context initialization time.
sba.InstructionBufferSize = 0xfffff;
sba.DynamicStateBufferSize = 0xfffff;
}
+}
+
+/**
+ * Upload the initial GPU state for a render context.
+ *
+ * This sets some invariant state that needs to be programmed a particular
+ * way, but we never actually change.
+ */
+static void
+iris_init_render_context(struct iris_screen *screen,
+ struct iris_batch *batch,
+ struct iris_vtable *vtbl,
+ struct pipe_debug_callback *dbg)
+{
+ UNUSED const struct gen_device_info *devinfo = &screen->devinfo;
+ uint32_t reg_val;
+
+ emit_pipeline_select(batch, _3D);
+
+ init_state_base_address(batch);
// XXX: INSTPM on Gen8
iris_pack_state(GENX(CS_DEBUG_MODE2), ®_val, reg) {
reg.PartialResolveDisableInVCMask = true;
}
iris_emit_lri(batch, CACHE_MODE_1, reg_val);
+
+ if (devinfo->is_geminilake)
+ init_glk_barrier_mode(batch, GLK_BARRIER_MODE_3D_HULL);
#endif
#if GEN_GEN == 11
}
}
+static void
+iris_init_compute_context(struct iris_screen *screen,
+ struct iris_batch *batch,
+ struct iris_vtable *vtbl,
+ struct pipe_debug_callback *dbg)
+{
+ UNUSED const struct gen_device_info *devinfo = &screen->devinfo;
+
+ emit_pipeline_select(batch, GPGPU);
+
+ const bool has_slm = true;
+ const bool wants_dc_cache = true;
+
+ const struct gen_l3_weights w =
+ gen_get_default_l3_weights(devinfo, wants_dc_cache, has_slm);
+ const struct gen_l3_config *cfg = gen_get_l3_config(devinfo, w);
+
+ uint32_t reg_val;
+ iris_pack_state(GENX(L3CNTLREG), ®_val, reg) {
+ reg.SLMEnable = has_slm;
+#if GEN_GEN == 11
+ /* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set
+ * in L3CNTLREG register. The default setting of the bit is not the
+ * desirable behavior.
+ */
+ reg.ErrorDetectionBehaviorControl = true;
+#endif
+ reg.URBAllocation = cfg->n[GEN_L3P_URB];
+ reg.ROAllocation = cfg->n[GEN_L3P_RO];
+ reg.DCAllocation = cfg->n[GEN_L3P_DC];
+ reg.AllAllocation = cfg->n[GEN_L3P_ALL];
+ }
+ iris_emit_lri(batch, L3CNTLREG, reg_val);
+
+ init_state_base_address(batch);
+
+#if GEN_GEN == 9
+ if (devinfo->is_geminilake)
+ init_glk_barrier_mode(batch, GLK_BARRIER_MODE_GPGPU);
+#endif
+}
+
struct iris_vertex_buffer_state {
/** The 3DSTATE_VERTEX_BUFFERS hardware packet. */
uint32_t vertex_buffers[1 + 33 * GENX(VERTEX_BUFFER_STATE_length)];
uint32_t streamout[4 * GENX(3DSTATE_STREAMOUT_length)];
};
-// XXX: move this to iris_draw.c
-static void
-iris_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info *info)
-{
-}
-
/**
* The pipe->set_blend_color() driver hook.
*
const struct pipe_blend_state *state)
{
struct iris_blend_state *cso = malloc(sizeof(struct iris_blend_state));
- uint32_t *blend_state = cso->blend_state;
+ uint32_t *blend_entry = cso->blend_state + GENX(BLEND_STATE_length);
cso->alpha_to_coverage = state->alpha_to_coverage;
+ bool indep_alpha_blend = false;
+
+ for (int i = 0; i < BRW_MAX_DRAW_BUFFERS; i++) {
+ const struct pipe_rt_blend_state *rt =
+ &state->rt[state->independent_blend_enable ? i : 0];
+
+ if (rt->rgb_func != rt->alpha_func ||
+ rt->rgb_src_factor != rt->alpha_src_factor ||
+ rt->rgb_dst_factor != rt->alpha_dst_factor)
+ indep_alpha_blend = true;
+
+ iris_pack_state(GENX(BLEND_STATE_ENTRY), blend_entry, be) {
+ be.LogicOpEnable = state->logicop_enable;
+ be.LogicOpFunction = state->logicop_func;
+
+ be.PreBlendSourceOnlyClampEnable = false;
+ be.ColorClampRange = COLORCLAMP_RTFORMAT;
+ be.PreBlendColorClampEnable = true;
+ be.PostBlendColorClampEnable = true;
+
+ be.ColorBufferBlendEnable = rt->blend_enable;
+
+ be.ColorBlendFunction = rt->rgb_func;
+ be.AlphaBlendFunction = rt->alpha_func;
+ be.SourceBlendFactor = rt->rgb_src_factor;
+ be.SourceAlphaBlendFactor = rt->alpha_src_factor;
+ be.DestinationBlendFactor = rt->rgb_dst_factor;
+ be.DestinationAlphaBlendFactor = rt->alpha_dst_factor;
+
+ be.WriteDisableRed = !(rt->colormask & PIPE_MASK_R);
+ be.WriteDisableGreen = !(rt->colormask & PIPE_MASK_G);
+ be.WriteDisableBlue = !(rt->colormask & PIPE_MASK_B);
+ be.WriteDisableAlpha = !(rt->colormask & PIPE_MASK_A);
+ }
+ blend_entry += GENX(BLEND_STATE_ENTRY_length);
+ }
+
iris_pack_command(GENX(3DSTATE_PS_BLEND), cso->ps_blend, pb) {
/* pb.HasWriteableRT is filled in at draw time. */
/* pb.AlphaTestEnable is filled in at draw time. */
pb.AlphaToCoverageEnable = state->alpha_to_coverage;
- pb.IndependentAlphaBlendEnable = state->independent_blend_enable;
+ pb.IndependentAlphaBlendEnable = indep_alpha_blend;
pb.ColorBufferBlendEnable = state->rt[0].blend_enable;
pb.SourceBlendFactor = state->rt[0].rgb_src_factor;
- pb.SourceAlphaBlendFactor = state->rt[0].alpha_func;
+ pb.SourceAlphaBlendFactor = state->rt[0].alpha_src_factor;
pb.DestinationBlendFactor = state->rt[0].rgb_dst_factor;
pb.DestinationAlphaBlendFactor = state->rt[0].alpha_dst_factor;
}
- iris_pack_state(GENX(BLEND_STATE), blend_state, bs) {
+ iris_pack_state(GENX(BLEND_STATE), cso->blend_state, bs) {
bs.AlphaToCoverageEnable = state->alpha_to_coverage;
- bs.IndependentAlphaBlendEnable = state->independent_blend_enable;
+ bs.IndependentAlphaBlendEnable = indep_alpha_blend;
bs.AlphaToOneEnable = state->alpha_to_one;
bs.AlphaToCoverageDitherEnable = state->alpha_to_coverage;
bs.ColorDitherEnable = state->dither;
/* bl.AlphaTestEnable and bs.AlphaTestFunction are filled in later. */
}
- blend_state += GENX(BLEND_STATE_length);
-
- for (int i = 0; i < BRW_MAX_DRAW_BUFFERS; i++) {
- iris_pack_state(GENX(BLEND_STATE_ENTRY), blend_state, be) {
- be.LogicOpEnable = state->logicop_enable;
- be.LogicOpFunction = state->logicop_func;
-
- be.PreBlendSourceOnlyClampEnable = false;
- be.ColorClampRange = COLORCLAMP_RTFORMAT;
- be.PreBlendColorClampEnable = true;
- be.PostBlendColorClampEnable = true;
-
- be.ColorBufferBlendEnable = state->rt[i].blend_enable;
-
- be.ColorBlendFunction = state->rt[i].rgb_func;
- be.AlphaBlendFunction = state->rt[i].alpha_func;
- be.SourceBlendFactor = state->rt[i].rgb_src_factor;
- be.SourceAlphaBlendFactor = state->rt[i].alpha_func;
- be.DestinationBlendFactor = state->rt[i].rgb_dst_factor;
- be.DestinationAlphaBlendFactor = state->rt[i].alpha_dst_factor;
-
- be.WriteDisableRed = !(state->rt[i].colormask & PIPE_MASK_R);
- be.WriteDisableGreen = !(state->rt[i].colormask & PIPE_MASK_G);
- be.WriteDisableBlue = !(state->rt[i].colormask & PIPE_MASK_B);
- be.WriteDisableAlpha = !(state->rt[i].colormask & PIPE_MASK_A);
- }
- blend_state += GENX(BLEND_STATE_ENTRY_length);
- }
return cso;
}
uint32_t wm[GENX(3DSTATE_WM_length)];
uint32_t line_stipple[GENX(3DSTATE_LINE_STIPPLE_length)];
+ uint8_t num_clip_plane_consts;
bool clip_halfz; /* for CC_VIEWPORT */
bool depth_clip_near; /* for CC_VIEWPORT */
bool depth_clip_far; /* for CC_VIEWPORT */
cso->line_stipple_enable = state->line_stipple_enable;
cso->poly_stipple_enable = state->poly_stipple_enable;
+ if (state->clip_plane_enable != 0)
+ cso->num_clip_plane_consts = util_logbase2(state->clip_plane_enable) + 1;
+ else
+ cso->num_clip_plane_consts = 0;
+
float line_width = get_line_width(state);
iris_pack_command(GENX(3DSTATE_SF), cso->sf, sf) {
state->line_smooth ? _10pixels : _05pixels;
sf.LastPixelEnable = state->line_last_pixel;
sf.LineWidth = line_width;
- sf.SmoothPointEnable = state->point_smooth;
+ sf.SmoothPointEnable = state->point_smooth || state->multisample;
sf.PointWidthSource = state->point_size_per_vertex ? Vertex : State;
sf.PointWidth = state->point_size;
rr.GlobalDepthOffsetConstant = state->offset_units * 2;
rr.GlobalDepthOffsetScale = state->offset_scale;
rr.GlobalDepthOffsetClamp = state->offset_clamp;
- rr.SmoothPointEnable = state->point_smooth;
+ rr.SmoothPointEnable = state->point_smooth || state->multisample;
rr.AntialiasingEnable = state->line_smooth;
rr.ScissorRectangleEnable = state->scissor;
rr.ViewportZNearClipTestEnable = state->depth_clip_near;
wm.LineAntialiasingRegionWidth = _10pixels;
wm.LineEndCapAntialiasingRegionWidth = _05pixels;
wm.PointRasterizationRule = RASTRULE_UPPER_RIGHT;
- wm.StatisticsEnable = true;
wm.LineStippleEnable = state->line_stipple_enable;
wm.PolygonStippleEnable = state->poly_stipple_enable;
}
}
}
+static void
+fill_buffer_surface_state(struct isl_device *isl_dev,
+ struct iris_bo *bo,
+ void *map,
+ enum isl_format format,
+ unsigned offset,
+ unsigned size)
+{
+ const struct isl_format_layout *fmtl = isl_format_get_layout(format);
+ const unsigned cpp = fmtl->bpb / 8;
+
+ /* The ARB_texture_buffer_specification says:
+ *
+ * "The number of texels in the buffer texture's texel array is given by
+ *
+ * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
+ *
+ * where <buffer_size> is the size of the buffer object, in basic
+ * machine units and <components> and <base_type> are the element count
+ * and base data type for elements, as specified in Table X.1. The
+ * number of texels in the texel array is then clamped to the
+ * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
+ *
+ * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
+ * so that when ISL divides by stride to obtain the number of texels, that
+ * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
+ */
+ unsigned final_size =
+ MIN3(size, bo->size - offset, IRIS_MAX_TEXTURE_BUFFER_SIZE * cpp);
+
+ isl_buffer_fill_state(isl_dev, map,
+ .address = bo->gtt_offset + offset,
+ .size_B = final_size,
+ .format = format,
+ .stride_B = cpp,
+ .mocs = MOCS_WB);
+}
+
/**
* The pipe->create_sampler_view() driver hook.
*/
if (tmpl->target != PIPE_BUFFER) {
isv->view.base_level = tmpl->u.tex.first_level;
isv->view.levels = tmpl->u.tex.last_level - tmpl->u.tex.first_level + 1;
+ // XXX: do I need to port f9fd0cf4790cb2a530e75d1a2206dbb9d8af7cb2?
isv->view.base_array_layer = tmpl->u.tex.first_layer;
isv->view.array_len =
tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
// .aux_surf =
// .clear_color = clear_color,
} else {
- // XXX: what to do about isv->view? other drivers don't use it for bufs
- const struct isl_format_layout *fmtl =
- isl_format_get_layout(isv->view.format);
- const unsigned cpp = fmtl->bpb / 8;
-
- isl_buffer_fill_state(&screen->isl_dev, map,
- .address = isv->res->bo->gtt_offset +
- tmpl->u.buf.offset,
- // XXX: buffer_texture_range_size from i965?
- .size_B = tmpl->u.buf.size,
- .format = isv->view.format,
- .stride_B = cpp,
- .mocs = MOCS_WB);
+ fill_buffer_surface_state(&screen->isl_dev, isv->res->bo, map,
+ isv->view.format, tmpl->u.buf.offset,
+ tmpl->u.buf.size);
}
return &isv->base;
isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT;
enum isl_format isl_format =
iris_format_for_usage(devinfo, img->format, usage).fmt;
- isl_format = isl_lower_storage_image_format(devinfo, isl_format);
+
+ if (img->shader_access & PIPE_IMAGE_ACCESS_READ)
+ isl_format = isl_lower_storage_image_format(devinfo, isl_format);
+
+ shs->image[start_slot + i].access = img->shader_access;
if (res->base.target != PIPE_BUFFER) {
struct isl_view view = {
// .aux_surf =
// .clear_color = clear_color,
} else {
- // XXX: what to do about view? other drivers don't use it for bufs
- const struct isl_format_layout *fmtl =
- isl_format_get_layout(isl_format);
- const unsigned cpp = fmtl->bpb / 8;
-
- isl_buffer_fill_state(&screen->isl_dev, map,
- .address = res->bo->gtt_offset,
- // XXX: buffer_texture_range_size from i965?
- .size_B = res->base.width0,
- .format = isl_format,
- .stride_B = cpp,
- .mocs = MOCS_WB);
+ fill_buffer_surface_state(&screen->isl_dev, res->bo, map,
+ isl_format, img->u.buf.offset,
+ img->u.buf.size);
}
} else {
pipe_resource_reference(&shs->image[start_slot + i].res, NULL);
free(surf);
}
-// XXX: actually implement user clip planes
static void
iris_set_clip_state(struct pipe_context *ctx,
const struct pipe_clip_state *state)
{
+ struct iris_context *ice = (struct iris_context *) ctx;
+ struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
+
+ memcpy(&ice->state.clip_planes, state, sizeof(*state));
+
+ ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS;
+ shs->cbuf0_needs_upload = true;
}
/**
iris_set_scissor_states(struct pipe_context *ctx,
unsigned start_slot,
unsigned num_scissors,
- const struct pipe_scissor_state *states)
+ const struct pipe_scissor_state *rects)
{
struct iris_context *ice = (struct iris_context *) ctx;
for (unsigned i = 0; i < num_scissors; i++) {
- ice->state.scissors[start_slot + i] = states[i];
+ if (rects[i].minx == rects[i].maxx || rects[i].miny == rects[i].maxy) {
+ /* If the scissor was out of bounds and got clamped to 0 width/height
+ * at the bounds, the subtraction of 1 from maximums could produce a
+ * negative number and thus not clip anything. Instead, just provide
+ * a min > max scissor inside the bounds, which produces the expected
+ * no rendering.
+ */
+ ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
+ .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
+ };
+ } else {
+ ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
+ .minx = rects[i].minx, .miny = rects[i].miny,
+ .maxx = rects[i].maxx - 1, .maxy = rects[i].maxy - 1,
+ };
+ }
}
ice->state.dirty |= IRIS_DIRTY_SCISSOR_RECT;
{
struct iris_context *ice = (struct iris_context *) ctx;
struct iris_genx_state *genx = ice->state.genx;
- uint32_t *vp_map = &genx->sf_cl_vp[start_slot];
+ uint32_t *vp_map =
+ &genx->sf_cl_vp[start_slot * GENX(SF_CLIP_VIEWPORT_length)];
for (unsigned i = 0; i < count; i++) {
const struct pipe_viewport_state *state = &states[i];
void *null_surf_map =
upload_state(ice->state.surface_uploader, &ice->state.null_fb,
4 * GENX(RENDER_SURFACE_STATE_length), 64);
- isl_null_fill_state(&screen->isl_dev, null_surf_map, isl_extent3d(cso->width, cso->height, cso->layers ? cso->layers : 1));
+ isl_null_fill_state(&screen->isl_dev, null_surf_map,
+ isl_extent3d(MAX2(cso->width, 1),
+ MAX2(cso->height, 1),
+ cso->layers ? cso->layers : 1));
ice->state.null_fb.offset +=
iris_bo_offset_from_base_address(iris_resource_bo(ice->state.null_fb.res));
#endif
}
+static void
+upload_ubo_surf_state(struct iris_context *ice,
+ struct iris_const_buffer *cbuf,
+ unsigned buffer_size)
+{
+ struct pipe_context *ctx = &ice->ctx;
+ struct iris_screen *screen = (struct iris_screen *) ctx->screen;
+
+ // XXX: these are not retained forever, use a separate uploader?
+ void *map =
+ upload_state(ice->state.surface_uploader, &cbuf->surface_state,
+ 4 * GENX(RENDER_SURFACE_STATE_length), 64);
+ if (!unlikely(map)) {
+ pipe_resource_reference(&cbuf->data.res, NULL);
+ return;
+ }
+
+ struct iris_resource *res = (void *) cbuf->data.res;
+ struct iris_bo *surf_bo = iris_resource_bo(cbuf->surface_state.res);
+ cbuf->surface_state.offset += iris_bo_offset_from_base_address(surf_bo);
+
+ isl_buffer_fill_state(&screen->isl_dev, map,
+ .address = res->bo->gtt_offset + cbuf->data.offset,
+ .size_B = MIN2(buffer_size,
+ res->bo->size - cbuf->data.offset),
+ .format = ISL_FORMAT_R32G32B32A32_FLOAT,
+ .stride_B = 1,
+ .mocs = MOCS_WB)
+}
+
/**
* The pipe->set_constant_buffer() driver hook.
*
const struct pipe_constant_buffer *input)
{
struct iris_context *ice = (struct iris_context *) ctx;
- struct iris_screen *screen = (struct iris_screen *)ctx->screen;
gl_shader_stage stage = stage_from_pipe(p_stage);
struct iris_shader_state *shs = &ice->state.shaders[stage];
struct iris_const_buffer *cbuf = &shs->constbuf[index];
- if (input && (input->buffer || input->user_buffer)) {
- if (input->user_buffer) {
- u_upload_data(ctx->const_uploader, 0, input->buffer_size, 32,
- input->user_buffer, &cbuf->data.offset,
- &cbuf->data.res);
- } else {
- pipe_resource_reference(&cbuf->data.res, input->buffer);
- }
+ if (input && input->buffer) {
+ assert(index > 0);
- // XXX: these are not retained forever, use a separate uploader?
- void *map =
- upload_state(ice->state.surface_uploader, &cbuf->surface_state,
- 4 * GENX(RENDER_SURFACE_STATE_length), 64);
- if (!unlikely(map)) {
- pipe_resource_reference(&cbuf->data.res, NULL);
- return;
- }
+ pipe_resource_reference(&cbuf->data.res, input->buffer);
+ cbuf->data.offset = input->buffer_offset;
- struct iris_resource *res = (void *) cbuf->data.res;
- struct iris_bo *surf_bo = iris_resource_bo(cbuf->surface_state.res);
- cbuf->surface_state.offset += iris_bo_offset_from_base_address(surf_bo);
-
- isl_buffer_fill_state(&screen->isl_dev, map,
- .address = res->bo->gtt_offset + cbuf->data.offset,
- .size_B = input->buffer_size,
- .format = ISL_FORMAT_R32G32B32A32_FLOAT,
- .stride_B = 1,
- .mocs = MOCS_WB)
+ upload_ubo_surf_state(ice, cbuf, input->buffer_size);
} else {
pipe_resource_reference(&cbuf->data.res, NULL);
pipe_resource_reference(&cbuf->surface_state.res, NULL);
}
+ if (index == 0) {
+ if (input)
+ memcpy(&shs->cbuf0, input, sizeof(shs->cbuf0));
+ else
+ memset(&shs->cbuf0, 0, sizeof(shs->cbuf0));
+
+ shs->cbuf0_needs_upload = true;
+ }
+
ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage;
// XXX: maybe not necessary all the time...?
// XXX: we need 3DS_BTP to commit these changes, and if we fell back to
ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
}
+static void
+upload_uniforms(struct iris_context *ice,
+ gl_shader_stage stage)
+{
+ struct iris_shader_state *shs = &ice->state.shaders[stage];
+ struct iris_const_buffer *cbuf = &shs->constbuf[0];
+ struct iris_compiled_shader *shader = ice->shaders.prog[stage];
+
+ unsigned upload_size = shader->num_system_values * sizeof(uint32_t) +
+ shs->cbuf0.buffer_size;
+
+ if (upload_size == 0)
+ return;
+
+ uint32_t *map =
+ upload_state(ice->ctx.const_uploader, &cbuf->data, upload_size, 64);
+
+ for (int i = 0; i < shader->num_system_values; i++) {
+ uint32_t sysval = shader->system_values[i];
+ uint32_t value = 0;
+
+ if (BRW_PARAM_BUILTIN_IS_CLIP_PLANE(sysval)) {
+ int plane = BRW_PARAM_BUILTIN_CLIP_PLANE_IDX(sysval);
+ int comp = BRW_PARAM_BUILTIN_CLIP_PLANE_COMP(sysval);
+ value = fui(ice->state.clip_planes.ucp[plane][comp]);
+ } else {
+ assert(!"unhandled system value");
+ }
+
+ *map++ = value;
+ }
+
+ if (shs->cbuf0.user_buffer) {
+ memcpy(map, shs->cbuf0.user_buffer, shs->cbuf0.buffer_size);
+ }
+
+ upload_ubo_surf_state(ice, cbuf, upload_size);
+}
+
/**
* The pipe->set_shader_buffers() driver hook.
*
isl_buffer_fill_state(&screen->isl_dev, map,
.address =
res->bo->gtt_offset + buffer->buffer_offset,
- .size_B = buffer->buffer_size,
+ .size_B =
+ MIN2(buffer->buffer_size,
+ res->bo->size - buffer->buffer_offset),
.format = ISL_FORMAT_RAW,
.stride_B = 1,
.mocs = MOCS_WB);
vb.MOCS = MOCS_WB;
vb.AddressModifyEnable = true;
vb.BufferPitch = buffers[i].stride;
- vb.BufferSize = res->bo->size;
- vb.BufferStartingAddress =
- ro_bo(NULL, res->bo->gtt_offset + buffers[i].buffer_offset);
+ if (res) {
+ vb.BufferSize = res->bo->size;
+ vb.BufferStartingAddress =
+ ro_bo(NULL, res->bo->gtt_offset + buffers[i].buffer_offset);
+ } else {
+ vb.NullVertexBuffer = true;
+ }
}
vb_pack_dest += GENX(VERTEX_BUFFER_STATE_length);
if (ice->state.streamout_active != active) {
ice->state.streamout_active = active;
ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
+
+ /* We only emit 3DSTATE_SO_DECL_LIST when streamout is active, because
+ * it's a non-pipelined command. If we're switching streamout on, we
+ * may have missed emitting it earlier, so do so now. (We're already
+ * taking a stall to update 3DSTATE_SO_BUFFERS anyway...)
+ */
+ if (active)
+ ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST;
}
for (int i = 0; i < 4; i++) {
*/
static void
iris_populate_vs_key(const struct iris_context *ice,
+ const struct shader_info *info,
struct brw_vs_prog_key *key)
{
+ const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
+
iris_populate_sampler_key(ice, &key->tex);
+
+ if (info->clip_distance_array_size == 0 &&
+ (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)))
+ key->nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
}
/**
// XXX: respect hint for high_quality_derivatives:1;
}
+static void
+iris_populate_cs_key(const struct iris_context *ice,
+ struct brw_cs_prog_key *key)
+{
+ iris_populate_sampler_key(ice, &key->tex);
+}
+
#if 0
// XXX: these need to go in INIT_THREAD_DISPATCH_FIELDS
pkt.SamplerCount = \
DIV_ROUND_UP(CLAMP(stage_state->sampler_count, 0, 16), 4); \
- pkt.PerThreadScratchSpace = prog_data->total_scratch == 0 ? 0 : \
- ffs(stage_state->per_thread_scratch) - 11; \
#endif
// prefetching of binding tables in A0 and B0 steppings. XXX: Revisit
// this WA on C0 stepping.
-#define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix) \
+#define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage) \
pkt.KernelStartPointer = KSP(shader); \
pkt.BindingTableEntryCount = GEN_GEN == 11 ? 0 : \
prog_data->binding_table.size_bytes / 4; \
pkt.prefix##URBEntryReadOffset = 0; \
\
pkt.StatisticsEnable = true; \
- pkt.Enable = true;
+ pkt.Enable = true; \
+ \
+ if (prog_data->total_scratch) { \
+ uint32_t scratch_addr = \
+ iris_get_scratch_space(ice, prog_data->total_scratch, stage); \
+ pkt.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11; \
+ pkt.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr); \
+ }
/**
* Encode most of 3DSTATE_VS based on the compiled shader.
*/
static void
-iris_store_vs_state(const struct gen_device_info *devinfo,
+iris_store_vs_state(struct iris_context *ice,
+ const struct gen_device_info *devinfo,
struct iris_compiled_shader *shader)
{
struct brw_stage_prog_data *prog_data = shader->prog_data;
struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
iris_pack_command(GENX(3DSTATE_VS), shader->derived_data, vs) {
- INIT_THREAD_DISPATCH_FIELDS(vs, Vertex);
+ INIT_THREAD_DISPATCH_FIELDS(vs, Vertex, MESA_SHADER_VERTEX);
vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
vs.SIMD8DispatchEnable = true;
vs.UserClipDistanceCullTestEnableBitmask =
* Encode most of 3DSTATE_HS based on the compiled shader.
*/
static void
-iris_store_tcs_state(const struct gen_device_info *devinfo,
+iris_store_tcs_state(struct iris_context *ice,
+ const struct gen_device_info *devinfo,
struct iris_compiled_shader *shader)
{
struct brw_stage_prog_data *prog_data = shader->prog_data;
struct brw_tcs_prog_data *tcs_prog_data = (void *) prog_data;
iris_pack_command(GENX(3DSTATE_HS), shader->derived_data, hs) {
- INIT_THREAD_DISPATCH_FIELDS(hs, Vertex);
+ INIT_THREAD_DISPATCH_FIELDS(hs, Vertex, MESA_SHADER_TESS_CTRL);
hs.InstanceCount = tcs_prog_data->instances - 1;
hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1;
* Encode 3DSTATE_TE and most of 3DSTATE_DS based on the compiled shader.
*/
static void
-iris_store_tes_state(const struct gen_device_info *devinfo,
+iris_store_tes_state(struct iris_context *ice,
+ const struct gen_device_info *devinfo,
struct iris_compiled_shader *shader)
{
struct brw_stage_prog_data *prog_data = shader->prog_data;
}
iris_pack_command(GENX(3DSTATE_DS), ds_state, ds) {
- INIT_THREAD_DISPATCH_FIELDS(ds, Patch);
+ INIT_THREAD_DISPATCH_FIELDS(ds, Patch, MESA_SHADER_TESS_EVAL);
ds.DispatchMode = DISPATCH_MODE_SIMD8_SINGLE_PATCH;
ds.MaximumNumberofThreads = devinfo->max_tes_threads - 1;
* Encode most of 3DSTATE_GS based on the compiled shader.
*/
static void
-iris_store_gs_state(const struct gen_device_info *devinfo,
+iris_store_gs_state(struct iris_context *ice,
+ const struct gen_device_info *devinfo,
struct iris_compiled_shader *shader)
{
struct brw_stage_prog_data *prog_data = shader->prog_data;
struct brw_gs_prog_data *gs_prog_data = (void *) prog_data;
iris_pack_command(GENX(3DSTATE_GS), shader->derived_data, gs) {
- INIT_THREAD_DISPATCH_FIELDS(gs, Vertex);
+ INIT_THREAD_DISPATCH_FIELDS(gs, Vertex, MESA_SHADER_GEOMETRY);
gs.OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1;
gs.OutputTopology = gs_prog_data->output_topology;
* Encode most of 3DSTATE_PS and 3DSTATE_PS_EXTRA based on the shader.
*/
static void
-iris_store_fs_state(const struct gen_device_info *devinfo,
+iris_store_fs_state(struct iris_context *ice,
+ const struct gen_device_info *devinfo,
struct iris_compiled_shader *shader)
{
struct brw_stage_prog_data *prog_data = shader->prog_data;
ps.FloatingPointMode = prog_data->use_alt_mode;
ps.MaximumNumberofThreadsPerPSD = 64 - (GEN_GEN == 8 ? 2 : 1);
- ps.PushConstantEnable = prog_data->nr_params > 0 ||
+ ps.PushConstantEnable = shader->num_system_values > 0 ||
prog_data->ubo_ranges[0].length > 0;
/* From the documentation for this packet:
KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1);
ps.KernelStartPointer2 =
KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2);
+
+ if (prog_data->total_scratch) {
+ uint32_t scratch_addr =
+ iris_get_scratch_space(ice, prog_data->total_scratch,
+ MESA_SHADER_FRAGMENT);
+ ps.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
+ ps.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr);
+ }
}
iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
*
* This must match the data written by the iris_store_xs_state() functions.
*/
+static void
+iris_store_cs_state(struct iris_context *ice,
+ const struct gen_device_info *devinfo,
+ struct iris_compiled_shader *shader)
+{
+ struct brw_stage_prog_data *prog_data = shader->prog_data;
+ struct brw_cs_prog_data *cs_prog_data = (void *) shader->prog_data;
+ void *map = shader->derived_data;
+
+ iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), map, desc) {
+ desc.KernelStartPointer = KSP(shader);
+ desc.ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs;
+ desc.NumberofThreadsinGPGPUThreadGroup = cs_prog_data->threads;
+ desc.SharedLocalMemorySize =
+ encode_slm_size(GEN_GEN, prog_data->total_shared);
+ desc.BarrierEnable = cs_prog_data->uses_barrier;
+ desc.CrossThreadConstantDataReadLength =
+ cs_prog_data->push.cross_thread.regs;
+ }
+}
+
static unsigned
iris_derived_program_state_size(enum iris_program_cache_id cache_id)
{
[IRIS_CACHE_GS] = GENX(3DSTATE_GS_length),
[IRIS_CACHE_FS] =
GENX(3DSTATE_PS_length) + GENX(3DSTATE_PS_EXTRA_length),
- [IRIS_CACHE_CS] = 0,
+ [IRIS_CACHE_CS] = GENX(INTERFACE_DESCRIPTOR_DATA_length),
[IRIS_CACHE_BLORP] = 0,
};
* get most of the state packet without having to reconstruct it.
*/
static void
-iris_store_derived_program_state(const struct gen_device_info *devinfo,
+iris_store_derived_program_state(struct iris_context *ice,
enum iris_program_cache_id cache_id,
struct iris_compiled_shader *shader)
{
+ struct iris_screen *screen = (void *) ice->ctx.screen;
+ const struct gen_device_info *devinfo = &screen->devinfo;
+
switch (cache_id) {
case IRIS_CACHE_VS:
- iris_store_vs_state(devinfo, shader);
+ iris_store_vs_state(ice, devinfo, shader);
break;
case IRIS_CACHE_TCS:
- iris_store_tcs_state(devinfo, shader);
+ iris_store_tcs_state(ice, devinfo, shader);
break;
case IRIS_CACHE_TES:
- iris_store_tes_state(devinfo, shader);
+ iris_store_tes_state(ice, devinfo, shader);
break;
case IRIS_CACHE_GS:
- iris_store_gs_state(devinfo, shader);
+ iris_store_gs_state(ice, devinfo, shader);
break;
case IRIS_CACHE_FS:
- iris_store_fs_state(devinfo, shader);
+ iris_store_fs_state(ice, devinfo, shader);
break;
case IRIS_CACHE_CS:
+ iris_store_cs_state(ice, devinfo, shader);
case IRIS_CACHE_BLORP:
break;
default:
[MESA_SHADER_COMPUTE] = 0,
};
+static uint32_t
+use_null_surface(struct iris_batch *batch, struct iris_context *ice)
+{
+ struct iris_bo *state_bo = iris_resource_bo(ice->state.unbound_tex.res);
+
+ iris_use_pinned_bo(batch, state_bo, false);
+
+ return ice->state.unbound_tex.offset;
+}
+
+static uint32_t
+use_null_fb_surface(struct iris_batch *batch, struct iris_context *ice)
+{
+ /* If set_framebuffer_state() was never called, fall back to 1x1x1 */
+ if (!ice->state.null_fb.res)
+ return use_null_surface(batch, ice);
+
+ struct iris_bo *state_bo = iris_resource_bo(ice->state.null_fb.res);
+
+ iris_use_pinned_bo(batch, state_bo, false);
+
+ return ice->state.null_fb.offset;
+}
+
/**
* Add a surface to the validation list, as well as the buffer containing
* the corresponding SURFACE_STATE.
}
static uint32_t
-use_const_buffer(struct iris_batch *batch, struct iris_const_buffer *cbuf)
+use_const_buffer(struct iris_batch *batch,
+ struct iris_context *ice,
+ struct iris_const_buffer *cbuf)
{
+ if (!cbuf->surface_state.res)
+ return use_null_surface(batch, ice);
+
iris_use_pinned_bo(batch, iris_resource_bo(cbuf->data.res), false);
iris_use_pinned_bo(batch, iris_resource_bo(cbuf->surface_state.res), false);
return cbuf->surface_state.offset;
}
-static uint32_t
-use_null_surface(struct iris_batch *batch, struct iris_context *ice)
-{
- struct iris_bo *state_bo = iris_resource_bo(ice->state.unbound_tex.res);
-
- iris_use_pinned_bo(batch, state_bo, false);
-
- return ice->state.unbound_tex.offset;
-}
-
-static uint32_t
-use_null_fb_surface(struct iris_batch *batch, struct iris_context *ice)
-{
- struct iris_bo *state_bo = iris_resource_bo(ice->state.null_fb.res);
-
- iris_use_pinned_bo(batch, state_bo, false);
-
- return ice->state.null_fb.offset;
-}
-
static uint32_t
use_ssbo(struct iris_batch *batch, struct iris_context *ice,
struct iris_shader_state *shs, int i)
return;
}
+ if (stage == MESA_SHADER_COMPUTE) {
+ /* surface for gl_NumWorkGroups */
+ struct iris_state_ref *grid_data = &ice->state.grid_size;
+ struct iris_state_ref *grid_state = &ice->state.grid_surf_state;
+ iris_use_pinned_bo(batch, iris_resource_bo(grid_data->res), false);
+ iris_use_pinned_bo(batch, iris_resource_bo(grid_state->res), false);
+ push_bt_entry(grid_state->offset);
+ }
+
if (stage == MESA_SHADER_FRAGMENT) {
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
/* Note that cso_fb->nr_cbufs == fs_key->nr_color_regions. */
push_bt_entry(addr);
}
- for (int i = 0; i < 1 + info->num_ubos; i++) {
- struct iris_const_buffer *cbuf = &shs->constbuf[i];
- if (!cbuf->surface_state.res)
- break;
+ for (int i = 0; i < info->num_images; i++) {
+ uint32_t addr = use_image(batch, ice, shs, i);
+ push_bt_entry(addr);
+ }
- uint32_t addr = use_const_buffer(batch, cbuf);
+ const int num_ubos = iris_get_shader_num_ubos(ice, stage);
+
+ for (int i = 0; i < num_ubos; i++) {
+ uint32_t addr = use_const_buffer(batch, ice, &shs->constbuf[i]);
push_bt_entry(addr);
}
}
}
- if (info->num_images > 0) {
- for (int i = 0; i < info->num_images; i++) {
- uint32_t addr = use_image(batch, ice, shs, i);
- push_bt_entry(addr);
- }
- }
-
#if 0
// XXX: not implemented yet
assert(prog_data->binding_table.plane_start[1] == 0xd0d0d0d0);
* refer to the old BOs.
*/
static void
-iris_restore_context_saved_bos(struct iris_context *ice,
- struct iris_batch *batch,
- const struct pipe_draw_info *draw)
+iris_restore_render_saved_bos(struct iris_context *ice,
+ struct iris_batch *batch,
+ const struct pipe_draw_info *draw)
{
// XXX: whack IRIS_SHADER_DIRTY_BINDING_TABLE on new batch
}
}
+static void
+iris_restore_compute_saved_bos(struct iris_context *ice,
+ struct iris_batch *batch,
+ const struct pipe_grid_info *grid)
+{
+ const uint64_t clean = ~ice->state.dirty;
+
+ const int stage = MESA_SHADER_COMPUTE;
+ struct iris_shader_state *shs = &ice->state.shaders[stage];
+
+ if (clean & IRIS_DIRTY_CONSTANTS_CS) {
+ struct iris_compiled_shader *shader = ice->shaders.prog[stage];
+
+ if (shader) {
+ struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
+ const struct brw_ubo_range *range = &prog_data->ubo_ranges[0];
+
+ if (range->length > 0) {
+ struct iris_const_buffer *cbuf = &shs->constbuf[range->block];
+ struct iris_resource *res = (void *) cbuf->data.res;
+
+ if (res)
+ iris_use_pinned_bo(batch, res->bo, false);
+ else
+ iris_use_pinned_bo(batch, batch->screen->workaround_bo, false);
+ }
+ }
+ }
+
+ if (clean & IRIS_DIRTY_BINDINGS_CS) {
+ /* Re-pin any buffers referred to by the binding table. */
+ iris_populate_binding_table(ice, batch, stage, true);
+ }
+
+ struct pipe_resource *sampler_res = shs->sampler_table.res;
+ if (sampler_res)
+ iris_use_pinned_bo(batch, iris_resource_bo(sampler_res), false);
+
+ if (clean & IRIS_DIRTY_CS) {
+ struct iris_compiled_shader *shader = ice->shaders.prog[stage];
+ if (shader) {
+ struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
+ iris_use_pinned_bo(batch, bo, false);
+ }
+
+ // XXX: scratch buffer
+ }
+}
+
/**
* Possibly emit STATE_BASE_ADDRESS to update Surface State Base Address.
*/
{
const uint64_t dirty = ice->state.dirty;
- if (!dirty)
+ if (!(dirty & IRIS_ALL_DIRTY_FOR_RENDER))
return;
struct iris_genx_state *genx = ice->state.genx;
if (!shader)
continue;
+ if (shs->cbuf0_needs_upload)
+ upload_uniforms(ice, stage);
+
struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_VS), pkt) {
if (range->length == 0)
continue;
- // XXX: is range->block a constbuf index? it would be nice
struct iris_const_buffer *cbuf = &shs->constbuf[range->block];
struct iris_resource *res = (void *) cbuf->data.res;
uint32_t dynamic_wm[GENX(3DSTATE_WM_length)];
iris_pack_command(GENX(3DSTATE_WM), &dynamic_wm, wm) {
+ wm.StatisticsEnable = ice->state.statistics_counters_enabled;
+
wm.BarycentricInterpolationMode =
wm_prog_data->barycentric_interp_modes;
iris_batch_emit(batch, cso_z->packets, sizeof(cso_z->packets));
if (cso_fb->zsbuf) {
- struct iris_resource *zres = (void *) cso_fb->zsbuf->texture;
- // XXX: depth might not be writable...
- iris_use_pinned_bo(batch, zres->bo, true);
+ struct iris_resource *zres, *sres;
+ iris_get_depth_stencil_resources(cso_fb->zsbuf->texture,
+ &zres, &sres);
+ // XXX: might not be writable...
+ if (zres)
+ iris_use_pinned_bo(batch, zres->bo, true);
+ if (sres)
+ iris_use_pinned_bo(batch, sres->bo, true);
}
}
for (unsigned i = 0; i < cso->num_buffers; i++) {
struct iris_resource *res = (void *) cso->resources[i];
- iris_use_pinned_bo(batch, res->bo, false);
+ if (res)
+ iris_use_pinned_bo(batch, res->bo, false);
}
}
}
}
if (!batch->contains_draw) {
- iris_restore_context_saved_bos(ice, batch, draw);
+ iris_restore_render_saved_bos(ice, batch, draw);
+ batch->contains_draw = true;
+ }
+}
+
+static void
+iris_upload_compute_state(struct iris_context *ice,
+ struct iris_batch *batch,
+ const struct pipe_grid_info *grid)
+{
+ const uint64_t dirty = ice->state.dirty;
+ struct iris_screen *screen = batch->screen;
+ const struct gen_device_info *devinfo = &screen->devinfo;
+ struct iris_binder *binder = &ice->state.binder;
+ struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
+ struct iris_compiled_shader *shader =
+ ice->shaders.prog[MESA_SHADER_COMPUTE];
+ struct brw_stage_prog_data *prog_data = shader->prog_data;
+ struct brw_cs_prog_data *cs_prog_data = (void *) prog_data;
+
+ if ((dirty & IRIS_DIRTY_CONSTANTS_CS) && shs->cbuf0_needs_upload)
+ upload_uniforms(ice, MESA_SHADER_COMPUTE);
+
+ if (dirty & IRIS_DIRTY_BINDINGS_CS)
+ iris_populate_binding_table(ice, batch, MESA_SHADER_COMPUTE, false);
+
+ iris_use_optional_res(batch, shs->sampler_table.res, false);
+ iris_use_pinned_bo(batch, iris_resource_bo(shader->assembly.res), false);
+
+ if (ice->state.need_border_colors)
+ iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false);
+
+ if (dirty & IRIS_DIRTY_CS) {
+ /* The MEDIA_VFE_STATE documentation for Gen8+ says:
+ *
+ * "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
+ * the only bits that are changed are scoreboard related: Scoreboard
+ * Enable, Scoreboard Type, Scoreboard Mask, Scoreboard Delta. For
+ * these scoreboard related states, a MEDIA_STATE_FLUSH is
+ * sufficient."
+ */
+ iris_emit_pipe_control_flush(batch, PIPE_CONTROL_CS_STALL);
+
+ iris_emit_cmd(batch, GENX(MEDIA_VFE_STATE), vfe) {
+ if (prog_data->total_scratch) {
+ uint32_t scratch_addr =
+ iris_get_scratch_space(ice, prog_data->total_scratch,
+ MESA_SHADER_COMPUTE);
+ vfe.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
+ vfe.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr);
+ }
+
+ vfe.MaximumNumberofThreads =
+ devinfo->max_cs_threads * screen->subslice_total - 1;
+#if GEN_GEN < 11
+ vfe.ResetGatewayTimer =
+ Resettingrelativetimerandlatchingtheglobaltimestamp;
+#endif
+
+ vfe.NumberofURBEntries = 2;
+ vfe.URBEntryAllocationSize = 2;
+
+ // XXX: Use Indirect Payload Storage?
+ vfe.CURBEAllocationSize =
+ ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
+ cs_prog_data->push.cross_thread.regs, 2);
+ }
+ }
+
+ // XXX: hack iris_set_constant_buffers to upload these thread counts
+ // XXX: along with regular uniforms for compute shaders, somehow.
+
+ uint32_t curbe_data_offset = 0;
+ // TODO: Move subgroup-id into uniforms ubo so we can push uniforms
+ assert(cs_prog_data->push.cross_thread.dwords == 0 &&
+ cs_prog_data->push.per_thread.dwords == 1 &&
+ cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
+ struct pipe_resource *curbe_data_res = NULL;
+ uint32_t *curbe_data_map =
+ stream_state(batch, ice->state.dynamic_uploader, &curbe_data_res,
+ ALIGN(cs_prog_data->push.total.size, 64), 64,
+ &curbe_data_offset);
+ assert(curbe_data_map);
+ memset(curbe_data_map, 0x5a, ALIGN(cs_prog_data->push.total.size, 64));
+ iris_fill_cs_push_const_buffer(cs_prog_data, curbe_data_map);
+
+ if (dirty & IRIS_DIRTY_CONSTANTS_CS) {
+ iris_emit_cmd(batch, GENX(MEDIA_CURBE_LOAD), curbe) {
+ curbe.CURBETotalDataLength =
+ ALIGN(cs_prog_data->push.total.size, 64);
+ curbe.CURBEDataStartAddress = curbe_data_offset;
+ }
+ }
+
+ if (dirty & (IRIS_DIRTY_SAMPLER_STATES_CS |
+ IRIS_DIRTY_BINDINGS_CS |
+ IRIS_DIRTY_CONSTANTS_CS |
+ IRIS_DIRTY_CS)) {
+ struct pipe_resource *desc_res = NULL;
+ uint32_t desc[GENX(INTERFACE_DESCRIPTOR_DATA_length)];
+
+ iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), desc, idd) {
+ idd.SamplerStatePointer = shs->sampler_table.offset;
+ idd.BindingTablePointer = binder->bt_offset[MESA_SHADER_COMPUTE];
+ }
+
+ for (int i = 0; i < GENX(INTERFACE_DESCRIPTOR_DATA_length); i++)
+ desc[i] |= ((uint32_t *) shader->derived_data)[i];
+
+ iris_emit_cmd(batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), load) {
+ load.InterfaceDescriptorTotalLength =
+ GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
+ load.InterfaceDescriptorDataStartAddress =
+ emit_state(batch, ice->state.dynamic_uploader,
+ &desc_res, desc, sizeof(desc), 32);
+ }
+
+ pipe_resource_reference(&desc_res, NULL);
+ }
+
+ uint32_t group_size = grid->block[0] * grid->block[1] * grid->block[2];
+ uint32_t remainder = group_size & (cs_prog_data->simd_size - 1);
+ uint32_t right_mask;
+
+ if (remainder > 0)
+ right_mask = ~0u >> (32 - remainder);
+ else
+ right_mask = ~0u >> (32 - cs_prog_data->simd_size);
+
+#define GPGPU_DISPATCHDIMX 0x2500
+#define GPGPU_DISPATCHDIMY 0x2504
+#define GPGPU_DISPATCHDIMZ 0x2508
+
+ if (grid->indirect) {
+ struct iris_state_ref *grid_size = &ice->state.grid_size;
+ struct iris_bo *bo = iris_resource_bo(grid_size->res);
+ iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
+ lrm.RegisterAddress = GPGPU_DISPATCHDIMX;
+ lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 0);
+ }
+ iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
+ lrm.RegisterAddress = GPGPU_DISPATCHDIMY;
+ lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 4);
+ }
+ iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
+ lrm.RegisterAddress = GPGPU_DISPATCHDIMZ;
+ lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 8);
+ }
+ }
+
+ iris_emit_cmd(batch, GENX(GPGPU_WALKER), ggw) {
+ ggw.IndirectParameterEnable = grid->indirect != NULL;
+ ggw.SIMDSize = cs_prog_data->simd_size / 16;
+ ggw.ThreadDepthCounterMaximum = 0;
+ ggw.ThreadHeightCounterMaximum = 0;
+ ggw.ThreadWidthCounterMaximum = cs_prog_data->threads - 1;
+ ggw.ThreadGroupIDXDimension = grid->grid[0];
+ ggw.ThreadGroupIDYDimension = grid->grid[1];
+ ggw.ThreadGroupIDZDimension = grid->grid[2];
+ ggw.RightExecutionMask = right_mask;
+ ggw.BottomExecutionMask = 0xffffffff;
+ }
+
+ iris_emit_cmd(batch, GENX(MEDIA_STATE_FLUSH), msf);
+
+ if (!batch->contains_draw) {
+ iris_restore_compute_saved_bos(ice, batch, grid);
batch->contains_draw = true;
}
}
* We do these now because they may add post-sync operations or CS stalls.
*/
- if (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) {
+ if (GEN_GEN < 11 && flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) {
/* Project: BDW, SKL+ (stopping at CNL) / Argument: VF Invalidate
*
* "'Post Sync Operation' must be enabled to 'Write Immediate Data' or
ice->vtbl.destroy_state = iris_destroy_state;
ice->vtbl.init_render_context = iris_init_render_context;
+ ice->vtbl.init_compute_context = iris_init_compute_context;
ice->vtbl.upload_render_state = iris_upload_render_state;
ice->vtbl.update_surface_base_address = iris_update_surface_base_address;
+ ice->vtbl.upload_compute_state = iris_upload_compute_state;
ice->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
ice->vtbl.load_register_imm32 = iris_load_register_imm32;
ice->vtbl.load_register_imm64 = iris_load_register_imm64;
ice->vtbl.populate_tes_key = iris_populate_tes_key;
ice->vtbl.populate_gs_key = iris_populate_gs_key;
ice->vtbl.populate_fs_key = iris_populate_fs_key;
+ ice->vtbl.populate_cs_key = iris_populate_cs_key;
ice->state.dirty = ~0ull;
+ ice->state.statistics_counters_enabled = true;
+
ice->state.sample_mask = 0xffff;
ice->state.num_viewports = 1;
ice->state.genx = calloc(1, sizeof(struct iris_genx_state));
isl_null_fill_state(&screen->isl_dev, null_surf_map, isl_extent3d(1, 1, 1));
ice->state.unbound_tex.offset +=
iris_bo_offset_from_base_address(iris_resource_bo(ice->state.unbound_tex.res));
+
+ /* Default all scissor rectangles to be empty regions. */
+ for (int i = 0; i < IRIS_MAX_VIEWPORTS; i++) {
+ ice->state.scissors[i] = (struct pipe_scissor_state) {
+ .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
+ };
+ }
}