*out_offset += iris_bo_offset_from_base_address(bo);
+ iris_record_state_size(batch->state_sizes, *out_offset, size);
+
return ptr;
}
* desirable behavior.
*/
reg.ErrorDetectionBehaviorControl = true;
+ reg.UseFullWays = true;
#endif
reg.URBAllocation = cfg->n[GEN_L3P_URB];
reg.ROAllocation = cfg->n[GEN_L3P_RO];
iris_emit_l3_config(batch, cfg, has_slm, wants_dc_cache);
}
+#if GEN_GEN == 9 || GEN_GEN == 10
+static void
+iris_enable_obj_preemption(struct iris_batch *batch, bool enable)
+{
+ uint32_t reg_val;
+
+ /* A fixed function pipe flush is required before modifying this field */
+ iris_emit_end_of_pipe_sync(batch, PIPE_CONTROL_RENDER_TARGET_FLUSH);
+
+ /* enable object level preemption */
+ iris_pack_state(GENX(CS_CHICKEN1), ®_val, reg) {
+ reg.ReplayMode = enable;
+ reg.ReplayModeMask = true;
+ }
+ iris_emit_lri(batch, CS_CHICKEN1, reg_val);
+}
+#endif
+
/**
* Upload the initial GPU state for a render context.
*
alloc.ConstantBufferSize = i == MESA_SHADER_FRAGMENT ? 8 : 6;
}
}
+
+#if GEN_GEN == 10
+ /* Gen11+ is enabled for us by the kernel. */
+ iris_enable_obj_preemption(batch, true);
+#endif
}
static void
struct iris_depth_buffer_state depth_buffer;
uint32_t so_buffers[4 * GENX(3DSTATE_SO_BUFFER_length)];
+
+#if GEN_GEN == 9
+ /* Is object level preemption enabled? */
+ bool object_preemption;
+#endif
+
+ struct {
+#if GEN_GEN == 8
+ struct brw_image_param image_param[PIPE_MAX_SHADER_IMAGES];
+#endif
+ } shaders[MESA_SHADER_STAGES];
};
/**
}
iris_pack_command(GENX(3DSTATE_PS_BLEND), cso->ps_blend, pb) {
- /* pb.HasWriteableRT is filled in at draw time. */
- /* pb.AlphaTestEnable is filled in at draw time. */
+ /* pb.HasWriteableRT is filled in at draw time.
+ * pb.AlphaTestEnable is filled in at draw time.
+ *
+ * pb.ColorBufferBlendEnable is filled in at draw time so we can avoid
+ * setting it when dual color blending without an appropriate shader.
+ */
+
pb.AlphaToCoverageEnable = state->alpha_to_coverage;
pb.IndependentAlphaBlendEnable = indep_alpha_blend;
- pb.ColorBufferBlendEnable = state->rt[0].blend_enable;
-
pb.SourceBlendFactor =
fix_blendfactor(state->rt[0].rgb_src_factor, state->alpha_to_one);
pb.SourceAlphaBlendFactor =
bool poly_stipple_enable;
bool multisample;
bool force_persample_interp;
+ bool conservative_rasterization;
+ bool fill_mode_point_or_line;
enum pipe_sprite_coord_mode sprite_coord_mode; /* PIPE_SPRITE_* */
uint16_t sprite_coord_enable;
};
cso->sprite_coord_enable = state->sprite_coord_enable;
cso->line_stipple_enable = state->line_stipple_enable;
cso->poly_stipple_enable = state->poly_stipple_enable;
+ cso->conservative_rasterization =
+ state->conservative_raster_mode == PIPE_CONSERVATIVE_RASTER_POST_SNAP;
+
+ cso->fill_mode_point_or_line =
+ state->fill_front == PIPE_POLYGON_MODE_LINE ||
+ state->fill_front == PIPE_POLYGON_MODE_POINT ||
+ state->fill_back == PIPE_POLYGON_MODE_LINE ||
+ state->fill_back == PIPE_POLYGON_MODE_POINT;
if (state->clip_plane_enable != 0)
cso->num_clip_plane_consts = util_logbase2(state->clip_plane_enable) + 1;
#if GEN_GEN >= 9
rr.ViewportZNearClipTestEnable = state->depth_clip_near;
rr.ViewportZFarClipTestEnable = state->depth_clip_far;
+ rr.ConservativeRasterizationEnable =
+ cso->conservative_rasterization;
#else
rr.ViewportZClipTestEnable = (state->depth_clip_near || state->depth_clip_far);
#endif
- /* TODO: ConservativeRasterizationEnable */
}
iris_pack_command(GENX(3DSTATE_CLIP), cso->clip, cl) {
cl.APIMode = state->clip_halfz ? APIMODE_D3D : APIMODE_OGL;
cl.GuardbandClipTestEnable = true;
cl.ClipEnable = true;
- cl.ViewportXYClipTestEnable = state->point_tri_clip;
cl.MinimumPointWidth = 0.125;
cl.MaximumPointWidth = 255.875;
cso_changed(sprite_coord_mode) ||
cso_changed(light_twoside))
ice->state.dirty |= IRIS_DIRTY_SBE;
+
+ if (cso_changed(conservative_rasterization))
+ ice->state.dirty |= IRIS_DIRTY_FS;
}
ice->state.cso_rast = new_cso;
* in the dynamic state memory zone, so we can point to it via the
* 3DSTATE_SAMPLER_STATE_POINTERS_* commands.
*/
+ unsigned size = count * 4 * GENX(SAMPLER_STATE_length);
uint32_t *map =
- upload_state(ice->state.dynamic_uploader, &shs->sampler_table,
- count * 4 * GENX(SAMPLER_STATE_length), 32);
+ upload_state(ice->state.dynamic_uploader, &shs->sampler_table, size, 32);
if (unlikely(!map))
return;
shs->sampler_table.offset +=
iris_bo_offset_from_base_address(iris_resource_bo(res));
+ iris_record_state_size(ice->state.sizes, shs->sampler_table.offset, size);
+
/* Make sure all land in the same BO */
iris_border_color_pool_reserve(ice, IRIS_MAX_TEXTURE_SAMPLERS);
static void
fill_buffer_surface_state(struct isl_device *isl_dev,
- struct iris_bo *bo,
+ struct iris_resource *res,
void *map,
enum isl_format format,
struct isl_swizzle swizzle,
* texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
*/
unsigned final_size =
- MIN3(size, bo->size - offset, IRIS_MAX_TEXTURE_BUFFER_SIZE * cpp);
+ MIN3(size, res->bo->size - res->offset - offset,
+ IRIS_MAX_TEXTURE_BUFFER_SIZE * cpp);
isl_buffer_fill_state(isl_dev, map,
- .address = bo->gtt_offset + offset,
+ .address = res->bo->gtt_offset + res->offset + offset,
.size_B = final_size,
.format = format,
.swizzle = swizzle,
.stride_B = cpp,
- .mocs = mocs(bo));
+ .mocs = mocs(res->bo));
}
#define SURFACE_STATE_ALIGNMENT 64
.surf = &res->surf,
.view = view,
.mocs = mocs(res->bo),
- .address = res->bo->gtt_offset,
+ .address = res->bo->gtt_offset + res->offset,
};
if (aux_usage != ISL_AUX_USAGE_NONE) {
f.aux_surf = &res->aux.surf;
f.aux_usage = aux_usage;
f.aux_address = res->aux.bo->gtt_offset + res->aux.offset;
- f.clear_color = res->aux.clear_color;
+
+ struct iris_bo *clear_bo = NULL;
+ uint64_t clear_offset = 0;
+ f.clear_color =
+ iris_resource_get_clear_color(res, &clear_bo, &clear_offset);
+ if (clear_bo) {
+ f.clear_address = clear_bo->gtt_offset + clear_offset;
+ f.use_clear_address = isl_dev->info->gen > 9;
+ }
}
isl_surf_fill_state_s(isl_dev, map, &f);
map += SURFACE_STATE_ALIGNMENT;
}
} else {
- fill_buffer_surface_state(&screen->isl_dev, isv->res->bo, map,
+ fill_buffer_surface_state(&screen->isl_dev, isv->res, map,
isv->view.format, isv->view.swizzle,
tmpl->u.buf.offset, tmpl->u.buf.size);
}
return NULL;
}
- surf->view = (struct isl_view) {
+ struct isl_view *view = &surf->view;
+ *view = (struct isl_view) {
.format = fmt.fmt,
.base_level = tmpl->u.tex.level,
.levels = 1,
if (!unlikely(map))
return NULL;
- unsigned aux_modes = res->aux.possible_usages;
- while (aux_modes) {
- enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
+ if (!isl_format_is_compressed(res->surf.format)) {
+ /* This is a normal surface. Fill out a SURFACE_STATE for each possible
+ * auxiliary surface mode and return the pipe_surface.
+ */
+ unsigned aux_modes = res->aux.possible_usages;
+ while (aux_modes) {
+ enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
+
+ fill_surface_state(&screen->isl_dev, map, res, view, aux_usage);
- fill_surface_state(&screen->isl_dev, map, res, &surf->view, aux_usage);
+ map += SURFACE_STATE_ALIGNMENT;
+ }
+
+ return psurf;
+ }
- map += SURFACE_STATE_ALIGNMENT;
+ /* The resource has a compressed format, which is not renderable, but we
+ * have a renderable view format. We must be attempting to upload blocks
+ * of compressed data via an uncompressed view.
+ *
+ * In this case, we can assume there are no auxiliary buffers, a single
+ * miplevel, and that the resource is single-sampled. Gallium may try
+ * and create an uncompressed view with multiple layers, however.
+ */
+ assert(!isl_format_is_compressed(fmt.fmt));
+ assert(res->aux.possible_usages == 1 << ISL_AUX_USAGE_NONE);
+ assert(res->surf.samples == 1);
+ assert(view->levels == 1);
+
+ struct isl_surf isl_surf;
+ uint32_t offset_B = 0, tile_x_sa = 0, tile_y_sa = 0;
+
+ if (view->base_level > 0) {
+ /* We can't rely on the hardware's miplevel selection with such
+ * a substantial lie about the format, so we select a single image
+ * using the Tile X/Y Offset fields. In this case, we can't handle
+ * multiple array slices.
+ *
+ * On Broadwell, HALIGN and VALIGN are specified in pixels and are
+ * hard-coded to align to exactly the block size of the compressed
+ * texture. This means that, when reinterpreted as a non-compressed
+ * texture, the tile offsets may be anything and we can't rely on
+ * X/Y Offset.
+ *
+ * Return NULL to force the state tracker to take fallback paths.
+ */
+ if (view->array_len > 1 || GEN_GEN == 8)
+ return NULL;
+
+ const bool is_3d = res->surf.dim == ISL_SURF_DIM_3D;
+ isl_surf_get_image_surf(&screen->isl_dev, &res->surf,
+ view->base_level,
+ is_3d ? 0 : view->base_array_layer,
+ is_3d ? view->base_array_layer : 0,
+ &isl_surf,
+ &offset_B, &tile_x_sa, &tile_y_sa);
+
+ /* We use address and tile offsets to access a single level/layer
+ * as a subimage, so reset level/layer so it doesn't offset again.
+ */
+ view->base_array_layer = 0;
+ view->base_level = 0;
+ } else {
+ /* Level 0 doesn't require tile offsets, and the hardware can find
+ * array slices using QPitch even with the format override, so we
+ * can allow layers in this case. Copy the original ISL surface.
+ */
+ memcpy(&isl_surf, &res->surf, sizeof(isl_surf));
}
+ /* Scale down the image dimensions by the block size. */
+ const struct isl_format_layout *fmtl =
+ isl_format_get_layout(res->surf.format);
+ isl_surf.format = fmt.fmt;
+ isl_surf.logical_level0_px.width =
+ DIV_ROUND_UP(isl_surf.logical_level0_px.width, fmtl->bw);
+ isl_surf.logical_level0_px.height =
+ DIV_ROUND_UP(isl_surf.logical_level0_px.height, fmtl->bh);
+ isl_surf.phys_level0_sa.width /= fmtl->bw;
+ isl_surf.phys_level0_sa.height /= fmtl->bh;
+ tile_x_sa /= fmtl->bw;
+ tile_y_sa /= fmtl->bh;
+
+ psurf->width = isl_surf.logical_level0_px.width;
+ psurf->height = isl_surf.logical_level0_px.height;
+
+ struct isl_surf_fill_state_info f = {
+ .surf = &isl_surf,
+ .view = view,
+ .mocs = mocs(res->bo),
+ .address = res->bo->gtt_offset + offset_B,
+ .x_offset_sa = tile_x_sa,
+ .y_offset_sa = tile_y_sa,
+ };
+
+ isl_surf_fill_state_s(&screen->isl_dev, map, &f);
return psurf;
}
const struct gen_device_info *devinfo = &screen->devinfo;
gl_shader_stage stage = stage_from_pipe(p_stage);
struct iris_shader_state *shs = &ice->state.shaders[stage];
+#if GEN_GEN == 8
+ struct iris_genx_state *genx = ice->state.genx;
+ struct brw_image_param *image_params = genx->shaders[stage].image_param;
+#endif
shs->bound_image_views &= ~u_bit_consecutive(start_slot, count);
for (unsigned i = 0; i < count; i++) {
+ struct iris_image_view *iv = &shs->image[start_slot + i];
+
if (p_images && p_images[i].resource) {
const struct pipe_image_view *img = &p_images[i];
struct iris_resource *res = (void *) img->resource;
- pipe_resource_reference(&shs->image[start_slot + i].res, &res->base);
-
- shs->bound_image_views |= 1 << (start_slot + i);
-
- res->bind_history |= PIPE_BIND_SHADER_IMAGE;
// XXX: these are not retained forever, use a separate uploader?
void *map =
alloc_surface_states(ice->state.surface_uploader,
- &shs->image[start_slot + i].surface_state,
- 1 << ISL_AUX_USAGE_NONE);
- if (!unlikely(map)) {
- pipe_resource_reference(&shs->image[start_slot + i].res, NULL);
+ &iv->surface_state, 1 << ISL_AUX_USAGE_NONE);
+ if (!unlikely(map))
return;
- }
+
+ iv->base = *img;
+ iv->base.resource = NULL;
+ pipe_resource_reference(&iv->base.resource, &res->base);
+
+ shs->bound_image_views |= 1 << (start_slot + i);
+
+ res->bind_history |= PIPE_BIND_SHADER_IMAGE;
isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT;
enum isl_format isl_fmt =
isl_fmt = isl_lower_storage_image_format(devinfo, isl_fmt);
}
- shs->image[start_slot + i].access = img->shader_access;
-
if (res->base.target != PIPE_BUFFER) {
struct isl_view view = {
.format = isl_fmt,
};
if (untyped_fallback) {
- fill_buffer_surface_state(&screen->isl_dev, res->bo, map,
+ fill_buffer_surface_state(&screen->isl_dev, res, map,
isl_fmt, ISL_SWIZZLE_IDENTITY,
0, res->bo->size);
} else {
}
isl_surf_fill_image_param(&screen->isl_dev,
- &shs->image[start_slot + i].param,
+ &image_params[start_slot + i],
&res->surf, &view);
} else {
- fill_buffer_surface_state(&screen->isl_dev, res->bo, map,
+ util_range_add(&res->valid_buffer_range, img->u.buf.offset,
+ img->u.buf.offset + img->u.buf.size);
+
+ fill_buffer_surface_state(&screen->isl_dev, res, map,
isl_fmt, ISL_SWIZZLE_IDENTITY,
img->u.buf.offset, img->u.buf.size);
- fill_buffer_image_param(&shs->image[start_slot + i].param,
+ fill_buffer_image_param(&image_params[start_slot + i],
img->format, img->u.buf.size);
}
} else {
- pipe_resource_reference(&shs->image[start_slot + i].res, NULL);
- pipe_resource_reference(&shs->image[start_slot + i].surface_state.res,
- NULL);
- fill_default_image_param(&shs->image[start_slot + i].param);
+ pipe_resource_reference(&iv->base.resource, NULL);
+ pipe_resource_reference(&iv->surface_state.res, NULL);
+ fill_default_image_param(&image_params[start_slot + i]);
}
}
view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
info.depth_surf = &zres->surf;
- info.depth_address = zres->bo->gtt_offset;
+ info.depth_address = zres->bo->gtt_offset + zres->offset;
info.mocs = mocs(zres->bo);
view.format = zres->surf.format;
if (stencil_res) {
view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
info.stencil_surf = &stencil_res->surf;
- info.stencil_address = stencil_res->bo->gtt_offset;
+ info.stencil_address = stencil_res->bo->gtt_offset + stencil_res->offset;
if (!zres) {
view.format = stencil_res->surf.format;
info.mocs = mocs(stencil_res->bo);
#endif
}
-static void
-upload_ubo_surf_state(struct iris_context *ice,
- struct iris_const_buffer *cbuf,
- unsigned buffer_size)
-{
- struct pipe_context *ctx = &ice->ctx;
- struct iris_screen *screen = (struct iris_screen *) ctx->screen;
-
- // XXX: these are not retained forever, use a separate uploader?
- void *map =
- upload_state(ice->state.surface_uploader, &cbuf->surface_state,
- 4 * GENX(RENDER_SURFACE_STATE_length), 64);
- if (!unlikely(map)) {
- pipe_resource_reference(&cbuf->data.res, NULL);
- return;
- }
-
- struct iris_resource *res = (void *) cbuf->data.res;
- struct iris_bo *surf_bo = iris_resource_bo(cbuf->surface_state.res);
- cbuf->surface_state.offset += iris_bo_offset_from_base_address(surf_bo);
-
- isl_buffer_fill_state(&screen->isl_dev, map,
- .address = res->bo->gtt_offset + cbuf->data.offset,
- .size_B = MIN2(buffer_size,
- res->bo->size - cbuf->data.offset),
- .format = ISL_FORMAT_R32G32B32A32_FLOAT,
- .swizzle = ISL_SWIZZLE_IDENTITY,
- .stride_B = 1,
- .mocs = mocs(res->bo))
-}
-
/**
* The pipe->set_constant_buffer() driver hook.
*
struct iris_context *ice = (struct iris_context *) ctx;
gl_shader_stage stage = stage_from_pipe(p_stage);
struct iris_shader_state *shs = &ice->state.shaders[stage];
- struct iris_const_buffer *cbuf = &shs->constbuf[index];
+ struct pipe_shader_buffer *cbuf = &shs->constbuf[index];
if (input && input->buffer) {
shs->bound_cbufs |= 1u << index;
assert(index > 0);
- pipe_resource_reference(&cbuf->data.res, input->buffer);
- cbuf->data.offset = input->buffer_offset;
+ pipe_resource_reference(&cbuf->buffer, input->buffer);
+ cbuf->buffer_offset = input->buffer_offset;
+ cbuf->buffer_size =
+ MIN2(input->buffer_size,
+ iris_resource_bo(input->buffer)->size - cbuf->buffer_offset);
- struct iris_resource *res = (void *) cbuf->data.res;
+ struct iris_resource *res = (void *) cbuf->buffer;
res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
- upload_ubo_surf_state(ice, cbuf, input->buffer_size);
+ iris_upload_ubo_ssbo_surf_state(ice, cbuf,
+ &shs->constbuf_surf_state[index],
+ false);
} else {
shs->bound_cbufs &= ~(1u << index);
- pipe_resource_reference(&cbuf->data.res, NULL);
- pipe_resource_reference(&cbuf->surface_state.res, NULL);
+ pipe_resource_reference(&cbuf->buffer, NULL);
+ pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL);
}
if (index == 0) {
upload_uniforms(struct iris_context *ice,
gl_shader_stage stage)
{
+ UNUSED struct iris_genx_state *genx = ice->state.genx;
struct iris_shader_state *shs = &ice->state.shaders[stage];
- struct iris_const_buffer *cbuf = &shs->constbuf[0];
+ struct pipe_shader_buffer *cbuf = &shs->constbuf[0];
struct iris_compiled_shader *shader = ice->shaders.prog[stage];
unsigned upload_size = shader->num_system_values * sizeof(uint32_t) +
if (upload_size == 0)
return;
- uint32_t *map =
- upload_state(ice->ctx.const_uploader, &cbuf->data, upload_size, 64);
+ uint32_t *map = NULL;
+ u_upload_alloc(ice->ctx.const_uploader, 0, upload_size, 64,
+ &cbuf->buffer_offset, &cbuf->buffer, (void **) &map);
for (int i = 0; i < shader->num_system_values; i++) {
uint32_t sysval = shader->system_values[i];
uint32_t value = 0;
if (BRW_PARAM_DOMAIN(sysval) == BRW_PARAM_DOMAIN_IMAGE) {
+#if GEN_GEN == 8
unsigned img = BRW_PARAM_IMAGE_IDX(sysval);
unsigned offset = BRW_PARAM_IMAGE_OFFSET(sysval);
- struct brw_image_param *param = &shs->image[img].param;
+ struct brw_image_param *param =
+ &genx->shaders[stage].image_param[img];
assert(offset < sizeof(struct brw_image_param));
value = ((uint32_t *) param)[offset];
+#endif
} else if (sysval == BRW_PARAM_BUILTIN_ZERO) {
value = 0;
} else if (BRW_PARAM_BUILTIN_IS_CLIP_PLANE(sysval)) {
memcpy(map, shs->cbuf0.user_buffer, shs->cbuf0.buffer_size);
}
- upload_ubo_surf_state(ice, cbuf, upload_size);
+ cbuf->buffer_size = upload_size;
+ iris_upload_ubo_ssbo_surf_state(ice, cbuf,
+ &shs->constbuf_surf_state[0], false);
}
/**
unsigned writable_bitmask)
{
struct iris_context *ice = (struct iris_context *) ctx;
- struct iris_screen *screen = (struct iris_screen *)ctx->screen;
gl_shader_stage stage = stage_from_pipe(p_stage);
struct iris_shader_state *shs = &ice->state.shaders[stage];
for (unsigned i = 0; i < count; i++) {
if (buffers && buffers[i].buffer) {
- const struct pipe_shader_buffer *buffer = &buffers[i];
- struct iris_resource *res = (void *) buffer->buffer;
- pipe_resource_reference(&shs->ssbo[start_slot + i], &res->base);
+ struct iris_resource *res = (void *) buffers[i].buffer;
+ struct pipe_shader_buffer *ssbo = &shs->ssbo[start_slot + i];
+ struct iris_state_ref *surf_state =
+ &shs->ssbo_surf_state[start_slot + i];
+ pipe_resource_reference(&ssbo->buffer, &res->base);
+ ssbo->buffer_offset = buffers[i].buffer_offset;
+ ssbo->buffer_size =
+ MIN2(buffers[i].buffer_size, res->bo->size - ssbo->buffer_offset);
shs->bound_ssbos |= 1 << (start_slot + i);
- res->bind_history |= PIPE_BIND_SHADER_BUFFER;
+ iris_upload_ubo_ssbo_surf_state(ice, ssbo, surf_state, true);
- // XXX: these are not retained forever, use a separate uploader?
- void *map =
- upload_state(ice->state.surface_uploader,
- &shs->ssbo_surface_state[start_slot + i],
- 4 * GENX(RENDER_SURFACE_STATE_length), 64);
- if (!unlikely(map)) {
- pipe_resource_reference(&shs->ssbo[start_slot + i], NULL);
- return;
- }
+ res->bind_history |= PIPE_BIND_SHADER_BUFFER;
- struct iris_bo *surf_state_bo =
- iris_resource_bo(shs->ssbo_surface_state[start_slot + i].res);
- shs->ssbo_surface_state[start_slot + i].offset +=
- iris_bo_offset_from_base_address(surf_state_bo);
-
- isl_buffer_fill_state(&screen->isl_dev, map,
- .address =
- res->bo->gtt_offset + buffer->buffer_offset,
- .size_B =
- MIN2(buffer->buffer_size,
- res->bo->size - buffer->buffer_offset),
- .format = ISL_FORMAT_RAW,
- .swizzle = ISL_SWIZZLE_IDENTITY,
- .stride_B = 1,
- .mocs = mocs(res->bo));
+ util_range_add(&res->valid_buffer_range, ssbo->buffer_offset,
+ ssbo->buffer_offset + ssbo->buffer_size);
} else {
- pipe_resource_reference(&shs->ssbo[start_slot + i], NULL);
- pipe_resource_reference(&shs->ssbo_surface_state[start_slot + i].res,
+ pipe_resource_reference(&shs->ssbo[start_slot + i].buffer, NULL);
+ pipe_resource_reference(&shs->ssbo_surf_state[start_slot + i].res,
NULL);
}
}
cso->base.buffer_size = buffer_size;
cso->base.context = ctx;
+ util_range_add(&res->valid_buffer_range, buffer_offset,
+ buffer_offset + buffer_size);
+
upload_state(ctx->stream_uploader, &cso->offset, sizeof(uint32_t), 4);
return &cso->base;
* may have missed emitting it earlier, so do so now. (We're already
* taking a stall to update 3DSTATE_SO_BUFFERS anyway...)
*/
- if (active)
+ if (active) {
ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST;
+ } else {
+ uint32_t flush = 0;
+ for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
+ struct iris_stream_output_target *tgt =
+ (void *) ice->state.so_target[i];
+ if (tgt) {
+ struct iris_resource *res = (void *) tgt->base.buffer;
+
+ flush |= iris_flush_bits_for_history(res);
+ iris_dirty_for_history(ice, res);
+ }
+ }
+ iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER], flush);
+ }
}
for (int i = 0; i < 4; i++) {
for (unsigned i = 0; i < 4; i++,
so_buffers += GENX(3DSTATE_SO_BUFFER_length)) {
- if (i >= num_targets || !targets[i]) {
+ struct iris_stream_output_target *tgt = (void *) ice->state.so_target[i];
+ unsigned offset = offsets[i];
+
+ if (!tgt) {
iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob)
sob.SOBufferIndex = i;
continue;
}
- struct iris_stream_output_target *tgt = (void *) targets[i];
struct iris_resource *res = (void *) tgt->base.buffer;
/* Note that offsets[i] will either be 0, causing us to zero
* the value in the buffer, or 0xFFFFFFFF, which happens to mean
* "continue appending at the existing offset."
*/
- assert(offsets[i] == 0 || offsets[i] == 0xFFFFFFFF);
+ assert(offset == 0 || offset == 0xFFFFFFFF);
+
+ /* We might be called by Begin (offset = 0), Pause, then Resume
+ * (offset = 0xFFFFFFFF) before ever drawing (where these commands
+ * will actually be sent to the GPU). In this case, we don't want
+ * to append - we still want to do our initial zeroing.
+ */
+ if (!tgt->zeroed)
+ offset = 0;
iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
sob.SurfaceBaseAddress =
sob.SurfaceSize = MAX2(tgt->base.buffer_size / 4, 1) - 1;
sob.SOBufferIndex = i;
- sob.StreamOffset = offsets[i];
+ sob.StreamOffset = offset;
sob.StreamOutputBufferOffsetAddress =
rw_bo(NULL, iris_resource_bo(tgt->offset.res)->gtt_offset +
tgt->offset.offset);
#define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage) \
pkt.KernelStartPointer = KSP(shader); \
pkt.BindingTableEntryCount = GEN_GEN == 11 ? 0 : \
- prog_data->binding_table.size_bytes / 4; \
+ shader->bt.size_bytes / 4; \
pkt.FloatingPointMode = prog_data->use_alt_mode; \
\
pkt.DispatchGRFStartRegisterForURBData = \
hs.InstanceCount = tcs_prog_data->instances - 1;
hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1;
hs.IncludeVertexHandles = true;
+
+#if GEN_GEN >= 9
+ hs.DispatchMode = vue_prog_data->dispatch_mode;
+ hs.IncludePrimitiveID = tcs_prog_data->include_primitive_id;
+#endif
}
}
ps.VectorMaskEnable = true;
// XXX: WABTPPrefetchDisable, see above, drop at C0
ps.BindingTableEntryCount = GEN_GEN == 11 ? 0 :
- prog_data->binding_table.size_bytes / 4;
+ shader->bt.size_bytes / 4;
ps.FloatingPointMode = prog_data->use_alt_mode;
ps.MaximumNumberofThreadsPerPSD = 64 - (GEN_GEN == 8 ? 2 : 1);
psx.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
#if GEN_GEN >= 9
- if (wm_prog_data->uses_sample_mask) {
- /* TODO: conservative rasterization */
- if (wm_prog_data->post_depth_coverage)
- psx.InputCoverageMaskState = ICMS_DEPTH_COVERAGE;
- else
- psx.InputCoverageMaskState = ICMS_NORMAL;
- }
-
psx.PixelShaderPullsBary = wm_prog_data->pulls_bary;
psx.PixelShaderComputesStencil = wm_prog_data->computed_stencil;
#else
}
static uint32_t
-use_const_buffer(struct iris_batch *batch,
- struct iris_context *ice,
- struct iris_const_buffer *cbuf)
-{
- if (!cbuf->surface_state.res)
- return use_null_surface(batch, ice);
-
- iris_use_pinned_bo(batch, iris_resource_bo(cbuf->data.res), false);
- iris_use_pinned_bo(batch, iris_resource_bo(cbuf->surface_state.res), false);
-
- return cbuf->surface_state.offset;
-}
-
-static uint32_t
-use_ssbo(struct iris_batch *batch, struct iris_context *ice,
- struct iris_shader_state *shs, int i)
+use_ubo_ssbo(struct iris_batch *batch,
+ struct iris_context *ice,
+ struct pipe_shader_buffer *buf,
+ struct iris_state_ref *surf_state,
+ bool writable)
{
- if (!shs->ssbo[i])
+ if (!buf->buffer)
return use_null_surface(batch, ice);
- struct iris_state_ref *surf_state = &shs->ssbo_surface_state[i];
-
- iris_use_pinned_bo(batch, iris_resource_bo(shs->ssbo[i]),
- shs->writable_ssbos & (1 << i));
+ iris_use_pinned_bo(batch, iris_resource_bo(buf->buffer), writable);
iris_use_pinned_bo(batch, iris_resource_bo(surf_state->res), false);
return surf_state->offset;
use_image(struct iris_batch *batch, struct iris_context *ice,
struct iris_shader_state *shs, int i)
{
- if (!shs->image[i].res)
+ struct iris_image_view *iv = &shs->image[i];
+ struct iris_resource *res = (void *) iv->base.resource;
+
+ if (!res)
return use_null_surface(batch, ice);
- struct iris_resource *res = (void *) shs->image[i].res;
- struct iris_state_ref *surf_state = &shs->image[i].surface_state;
- bool write = shs->image[i].access & PIPE_IMAGE_ACCESS_WRITE;
+ bool write = iv->base.shader_access & PIPE_IMAGE_ACCESS_WRITE;
iris_use_pinned_bo(batch, res->bo, write);
- iris_use_pinned_bo(batch, iris_resource_bo(surf_state->res), false);
+ iris_use_pinned_bo(batch, iris_resource_bo(iv->surface_state.res), false);
if (res->aux.bo)
iris_use_pinned_bo(batch, res->aux.bo, write);
- return surf_state->offset;
+ return iv->surface_state.offset;
}
#define push_bt_entry(addr) \
assert(addr >= binder_addr); \
- assert(s < prog_data->binding_table.size_bytes / sizeof(uint32_t)); \
+ assert(s < shader->bt.size_bytes / sizeof(uint32_t)); \
if (!pin_only) bt_map[s++] = (addr) - binder_addr;
-#define bt_assert(section, exists) \
- if (!pin_only) assert(prog_data->binding_table.section == \
- (exists) ? s : 0xd0d0d0d0)
+#define bt_assert(section) \
+ if (!pin_only && shader->bt.used_mask[section] != 0) \
+ assert(shader->bt.offsets[section] == s);
/**
* Populate the binding table for a given shader stage.
bool pin_only)
{
const struct iris_binder *binder = &ice->state.binder;
+ struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[stage];
struct iris_compiled_shader *shader = ice->shaders.prog[stage];
if (!shader)
return;
+ struct iris_binding_table *bt = &shader->bt;
UNUSED struct brw_stage_prog_data *prog_data = shader->prog_data;
struct iris_shader_state *shs = &ice->state.shaders[stage];
uint32_t binder_addr = binder->bo->gtt_offset;
}
}
- unsigned num_textures = util_last_bit(info->textures_used);
+#define foreach_surface_used(index, group) \
+ bt_assert(group); \
+ for (int index = 0; index < bt->sizes[group]; index++) \
+ if (iris_group_index_to_bti(bt, group, index) != \
+ IRIS_SURFACE_NOT_USED)
- bt_assert(texture_start, num_textures > 0);
-
- for (int i = 0; i < num_textures; i++) {
+ foreach_surface_used(i, IRIS_SURFACE_GROUP_TEXTURE) {
struct iris_sampler_view *view = shs->textures[i];
uint32_t addr = view ? use_sampler_view(ice, batch, view)
: use_null_surface(batch, ice);
push_bt_entry(addr);
}
- bt_assert(image_start, info->num_images > 0);
-
- for (int i = 0; i < info->num_images; i++) {
+ foreach_surface_used(i, IRIS_SURFACE_GROUP_IMAGE) {
uint32_t addr = use_image(batch, ice, shs, i);
push_bt_entry(addr);
}
- bt_assert(ubo_start, shader->num_cbufs > 0);
+ foreach_surface_used(i, IRIS_SURFACE_GROUP_UBO) {
+ uint32_t addr;
+
+ if ((i == bt->sizes[IRIS_SURFACE_GROUP_UBO] - 1) && ish->const_data) {
+ iris_use_pinned_bo(batch, iris_resource_bo(ish->const_data), false);
+ iris_use_pinned_bo(batch, iris_resource_bo(ish->const_data_state.res),
+ false);
+ addr = ish->const_data_state.offset;
+ } else {
+ addr = use_ubo_ssbo(batch, ice, &shs->constbuf[i],
+ &shs->constbuf_surf_state[i], false);
+ }
- for (int i = 0; i < shader->num_cbufs; i++) {
- uint32_t addr = use_const_buffer(batch, ice, &shs->constbuf[i]);
push_bt_entry(addr);
}
- bt_assert(ssbo_start, info->num_abos + info->num_ssbos > 0);
-
- /* XXX: st is wasting 16 binding table slots for ABOs. Should add a cap
- * for changing nir_lower_atomics_to_ssbos setting and buffer_base offset
- * in st_atom_storagebuf.c so it'll compact them into one range, with
- * SSBOs starting at info->num_abos. Ideally it'd reset num_abos to 0 too
- */
- if (info->num_abos + info->num_ssbos > 0) {
- for (int i = 0; i < IRIS_MAX_ABOS + info->num_ssbos; i++) {
- uint32_t addr = use_ssbo(batch, ice, shs, i);
- push_bt_entry(addr);
- }
+ foreach_surface_used(i, IRIS_SURFACE_GROUP_SSBO) {
+ uint32_t addr =
+ use_ubo_ssbo(batch, ice, &shs->ssbo[i], &shs->ssbo_surf_state[i],
+ shs->writable_ssbos & (1u << i));
+ push_bt_entry(addr);
}
#if 0
if (range->length == 0)
continue;
- struct iris_const_buffer *cbuf = &shs->constbuf[range->block];
- struct iris_resource *res = (void *) cbuf->data.res;
+ /* Range block is a binding table index, map back to UBO index. */
+ unsigned block_index = iris_bti_to_group_index(
+ &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
+ assert(block_index != IRIS_SURFACE_NOT_USED);
+
+ struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
+ struct iris_resource *res = (void *) cbuf->buffer;
if (res)
iris_use_pinned_bo(batch, res->bo, false);
const struct brw_ubo_range *range = &prog_data->ubo_ranges[0];
if (range->length > 0) {
- struct iris_const_buffer *cbuf = &shs->constbuf[range->block];
- struct iris_resource *res = (void *) cbuf->data.res;
+ /* Range block is a binding table index, map back to UBO index. */
+ unsigned block_index = iris_bti_to_group_index(
+ &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
+ assert(block_index != IRIS_SURFACE_NOT_USED);
+
+ struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
+ struct iris_resource *res = (void *) cbuf->buffer;
if (res)
iris_use_pinned_bo(batch, res->bo, false);
if (range->length == 0)
continue;
- struct iris_const_buffer *cbuf = &shs->constbuf[range->block];
- struct iris_resource *res = (void *) cbuf->data.res;
+ /* Range block is a binding table index, map back to UBO index. */
+ unsigned block_index = iris_bti_to_group_index(
+ &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
+ assert(block_index != IRIS_SURFACE_NOT_USED);
+
+ struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
+ struct iris_resource *res = (void *) cbuf->buffer;
- assert(cbuf->data.offset % 32 == 0);
+ assert(cbuf->buffer_offset % 32 == 0);
pkt.ConstantBody.ReadLength[n] = range->length;
pkt.ConstantBody.Buffer[n] =
- res ? ro_bo(res->bo, range->start * 32 + cbuf->data.offset)
+ res ? ro_bo(res->bo, range->start * 32 + cbuf->buffer_offset)
: ro_bo(batch->screen->workaround_bo, 0);
n--;
}
iris_get_scratch_space(ice, prog_data->total_scratch, stage);
iris_use_pinned_bo(batch, bo, true);
}
+#if GEN_GEN >= 9
+ if (stage == MESA_SHADER_FRAGMENT && wm_prog_data->uses_sample_mask) {
+ uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0};
+ uint32_t *shader_psx = ((uint32_t*)shader->derived_data) +
+ GENX(3DSTATE_PS_length);
+ struct iris_rasterizer_state *cso = ice->state.cso_rast;
+
+ iris_pack_command(GENX(3DSTATE_PS_EXTRA), &psx_state, psx) {
+ if (wm_prog_data->post_depth_coverage)
+ psx.InputCoverageMaskState = ICMS_DEPTH_COVERAGE;
+ else if (wm_prog_data->inner_coverage && cso->conservative_rasterization)
+ psx.InputCoverageMaskState = ICMS_INNER_CONSERVATIVE;
+ else
+ psx.InputCoverageMaskState = ICMS_NORMAL;
+ }
- iris_batch_emit(batch, shader->derived_data,
- iris_derived_program_state_size(stage));
+ iris_batch_emit(batch, shader->derived_data,
+ sizeof(uint32_t) * GENX(3DSTATE_PS_length));
+ iris_emit_merge(batch,
+ shader_psx,
+ psx_state,
+ GENX(3DSTATE_PS_EXTRA_length));
+ } else
+#endif
+ iris_batch_emit(batch, shader->derived_data,
+ iris_derived_program_state_size(stage));
} else {
if (stage == MESA_SHADER_TESS_EVAL) {
iris_emit_cmd(batch, GENX(3DSTATE_HS), hs);
struct iris_stream_output_target *tgt =
(void *) ice->state.so_target[i];
if (tgt) {
+ tgt->zeroed = true;
iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
true);
iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
+ bool gs_or_tes = ice->shaders.prog[MESA_SHADER_GEOMETRY] ||
+ ice->shaders.prog[MESA_SHADER_TESS_EVAL];
+ bool points_or_lines = cso_rast->fill_mode_point_or_line ||
+ (gs_or_tes ? ice->shaders.output_topology_is_points_or_lines
+ : ice->state.prim_is_points_or_lines);
+
uint32_t dynamic_clip[GENX(3DSTATE_CLIP_length)];
iris_pack_command(GENX(3DSTATE_CLIP), &dynamic_clip, cl) {
cl.StatisticsEnable = ice->state.statistics_counters_enabled;
cl.ClipMode = cso_rast->rasterizer_discard ? CLIPMODE_REJECT_ALL
: CLIPMODE_NORMAL;
+ cl.ViewportXYClipTestEnable = !points_or_lines;
+
if (wm_prog_data->barycentric_interp_modes &
BRW_BARYCENTRIC_NONPERSPECTIVE_BITS)
cl.NonPerspectiveBarycentricEnable = true;
iris_pack_command(GENX(3DSTATE_PS_BLEND), &dynamic_pb, pb) {
pb.HasWriteableRT = has_writeable_rt(cso_blend, fs_info);
pb.AlphaTestEnable = cso_zsa->alpha.enabled;
+
+ /* The dual source blending docs caution against using SRC1 factors
+ * when the shader doesn't use a dual source render target write.
+ * Empirically, this can lead to GPU hangs, and the results are
+ * undefined anyway, so simply disable blending to avoid the hang.
+ */
+ pb.ColorBufferBlendEnable = (cso_blend->blend_enables & 1) &&
+ (!cso_blend->dual_color_blending || wm_prog_data->dual_src_blend);
}
iris_emit_merge(batch, cso_blend->ps_blend, dynamic_pb,
PIPE_CONTROL_CS_STALL;
ice->state.last_vbo_high_bits[i] = high_bits;
}
-
- /* If the buffer was written to by streamout, we may need
- * to stall so those writes land and become visible to the
- * vertex fetcher.
- *
- * TODO: This may stall more than necessary.
- */
- if (res->bind_history & PIPE_BIND_STREAM_OUTPUT)
- flush_flags |= PIPE_CONTROL_CS_STALL;
}
}
struct iris_batch *batch,
const struct pipe_draw_info *draw)
{
+ bool use_predicate = ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT;
+
/* Always pin the binder. If we're emitting new binding table pointers,
* we need it. If not, we're probably inheriting old tables via the
* context, and need it anyway. Since true zero-bindings cases are
#define _3DPRIM_BASE_VERTEX 0x2440
if (draw->indirect) {
- /* We don't support this MultidrawIndirect. */
- assert(!draw->indirect->indirect_draw_count);
+ if (draw->indirect->indirect_draw_count) {
+ use_predicate = true;
+
+ struct iris_bo *draw_count_bo =
+ iris_resource_bo(draw->indirect->indirect_draw_count);
+ unsigned draw_count_offset =
+ draw->indirect->indirect_draw_count_offset;
+
+ iris_emit_pipe_control_flush(batch, PIPE_CONTROL_FLUSH_ENABLE);
+
+ if (ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
+ static const uint32_t math[] = {
+ MI_MATH | (9 - 2),
+ /* Compute (draw index < draw count).
+ * We do this by subtracting and storing the carry bit.
+ */
+ MI_ALU2(LOAD, SRCA, R0),
+ MI_ALU2(LOAD, SRCB, R1),
+ MI_ALU0(SUB),
+ MI_ALU2(STORE, R3, CF),
+ /* Compute (subtracting result & MI_PREDICATE). */
+ MI_ALU2(LOAD, SRCA, R3),
+ MI_ALU2(LOAD, SRCB, R2),
+ MI_ALU0(AND),
+ MI_ALU2(STORE, R3, ACCU),
+ };
+ /* Upload the current draw count from the draw parameters
+ * buffer to GPR1.
+ */
+ ice->vtbl.load_register_mem32(batch, CS_GPR(1), draw_count_bo,
+ draw_count_offset);
+ /* Zero the top 32-bits of GPR1. */
+ ice->vtbl.load_register_imm32(batch, CS_GPR(1) + 4, 0);
+ /* Upload the id of the current primitive to GPR0. */
+ ice->vtbl.load_register_imm64(batch, CS_GPR(0), draw->drawid);
+
+ iris_batch_emit(batch, math, sizeof(math));
+
+ /* Store result of MI_MATH computations to MI_PREDICATE_RESULT. */
+ ice->vtbl.load_register_reg64(batch,
+ MI_PREDICATE_RESULT, CS_GPR(3));
+ } else {
+ uint32_t mi_predicate;
+
+ /* Upload the id of the current primitive to MI_PREDICATE_SRC1. */
+ ice->vtbl.load_register_imm64(batch, MI_PREDICATE_SRC1,
+ draw->drawid);
+ /* Upload the current draw count from the draw parameters buffer
+ * to MI_PREDICATE_SRC0.
+ */
+ ice->vtbl.load_register_mem32(batch, MI_PREDICATE_SRC0,
+ draw_count_bo, draw_count_offset);
+ /* Zero the top 32-bits of MI_PREDICATE_SRC0 */
+ ice->vtbl.load_register_imm32(batch, MI_PREDICATE_SRC0 + 4, 0);
+
+ if (draw->drawid == 0) {
+ mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOADINV |
+ MI_PREDICATE_COMBINEOP_SET |
+ MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
+ } else {
+ /* While draw_index < draw_count the predicate's result will be
+ * (draw_index == draw_count) ^ TRUE = TRUE
+ * When draw_index == draw_count the result is
+ * (TRUE) ^ TRUE = FALSE
+ * After this all results will be:
+ * (FALSE) ^ FALSE = FALSE
+ */
+ mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOAD |
+ MI_PREDICATE_COMBINEOP_XOR |
+ MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
+ }
+ iris_batch_emit(batch, &mi_predicate, sizeof(uint32_t));
+ }
+ }
struct iris_bo *bo = iris_resource_bo(draw->indirect->buffer);
assert(bo);
lrm.MemoryAddress =
ro_bo(iris_resource_bo(so->offset.res), so->offset.offset);
}
+ if (so->base.buffer_offset)
+ iris_math_add32_gpr0(ice, batch, -so->base.buffer_offset);
iris_math_div32_gpr0(ice, batch, so->stride);
_iris_emit_lrr(batch, _3DPRIM_VERTEX_COUNT, CS_GPR(0));
iris_emit_cmd(batch, GENX(3DPRIMITIVE), prim) {
prim.VertexAccessType = draw->index_size > 0 ? RANDOM : SEQUENTIAL;
- prim.PredicateEnable =
- ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT;
+ prim.PredicateEnable = use_predicate;
if (draw->indirect || draw->count_from_stream_output) {
prim.IndirectParameterEnable = true;
struct iris_shader_state *shs = &ice->state.shaders[stage];
pipe_resource_reference(&shs->sampler_table.res, NULL);
for (int i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
- pipe_resource_reference(&shs->constbuf[i].data.res, NULL);
- pipe_resource_reference(&shs->constbuf[i].surface_state.res, NULL);
+ pipe_resource_reference(&shs->constbuf[i].buffer, NULL);
+ pipe_resource_reference(&shs->constbuf_surf_state[i].res, NULL);
}
for (int i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
- pipe_resource_reference(&shs->image[i].res, NULL);
+ pipe_resource_reference(&shs->image[i].base.resource, NULL);
pipe_resource_reference(&shs->image[i].surface_state.res, NULL);
}
for (int i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
- pipe_resource_reference(&shs->ssbo[i], NULL);
- pipe_resource_reference(&shs->ssbo_surface_state[i].res, NULL);
+ pipe_resource_reference(&shs->ssbo[i].buffer, NULL);
+ pipe_resource_reference(&shs->ssbo_surf_state[i].res, NULL);
}
for (int i = 0; i < IRIS_MAX_TEXTURE_SAMPLERS; i++) {
pipe_sampler_view_reference((struct pipe_sampler_view **)
/* ------------------------------------------------------------------- */
+static void
+iris_rebind_buffer(struct iris_context *ice,
+ struct iris_resource *res,
+ uint64_t old_address)
+{
+ struct pipe_context *ctx = &ice->ctx;
+ struct iris_screen *screen = (void *) ctx->screen;
+ struct iris_genx_state *genx = ice->state.genx;
+
+ assert(res->base.target == PIPE_BUFFER);
+
+ /* Buffers can't be framebuffer attachments, nor display related,
+ * and we don't have upstream Clover support.
+ */
+ assert(!(res->bind_history & (PIPE_BIND_DEPTH_STENCIL |
+ PIPE_BIND_RENDER_TARGET |
+ PIPE_BIND_BLENDABLE |
+ PIPE_BIND_DISPLAY_TARGET |
+ PIPE_BIND_CURSOR |
+ PIPE_BIND_COMPUTE_RESOURCE |
+ PIPE_BIND_GLOBAL)));
+
+ if (res->bind_history & PIPE_BIND_VERTEX_BUFFER) {
+ uint64_t bound_vbs = ice->state.bound_vertex_buffers;
+ while (bound_vbs) {
+ const int i = u_bit_scan64(&bound_vbs);
+ struct iris_vertex_buffer_state *state = &genx->vertex_buffers[i];
+
+ /* Update the CPU struct */
+ STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_start) == 32);
+ STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_bits) == 64);
+ uint64_t *addr = (uint64_t *) &state->state[1];
+
+ if (*addr == old_address) {
+ *addr = res->bo->gtt_offset;
+ ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
+ }
+ }
+ }
+
+ /* No need to handle these:
+ * - PIPE_BIND_INDEX_BUFFER (emitted for every indexed draw)
+ * - PIPE_BIND_COMMAND_ARGS_BUFFER (emitted for every indirect draw)
+ * - PIPE_BIND_QUERY_BUFFER (no persistent state references)
+ */
+
+ if (res->bind_history & PIPE_BIND_STREAM_OUTPUT) {
+ /* XXX: be careful about resetting vs appending... */
+ assert(false);
+ }
+
+ for (int s = MESA_SHADER_VERTEX; s < MESA_SHADER_STAGES; s++) {
+ struct iris_shader_state *shs = &ice->state.shaders[s];
+ enum pipe_shader_type p_stage = stage_to_pipe(s);
+
+ if (res->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
+ /* Skip constant buffer 0, it's for regular uniforms, not UBOs */
+ uint32_t bound_cbufs = shs->bound_cbufs & ~1u;
+ while (bound_cbufs) {
+ const int i = u_bit_scan(&bound_cbufs);
+ struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
+ struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
+
+ if (res->bo == iris_resource_bo(cbuf->buffer)) {
+ iris_upload_ubo_ssbo_surf_state(ice, cbuf, surf_state, false);
+ ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << s;
+ }
+ }
+ }
+
+ if (res->bind_history & PIPE_BIND_SHADER_BUFFER) {
+ uint32_t bound_ssbos = shs->bound_ssbos;
+ while (bound_ssbos) {
+ const int i = u_bit_scan(&bound_ssbos);
+ struct pipe_shader_buffer *ssbo = &shs->ssbo[i];
+
+ if (res->bo == iris_resource_bo(ssbo->buffer)) {
+ struct pipe_shader_buffer buf = {
+ .buffer = &res->base,
+ .buffer_offset = ssbo->buffer_offset,
+ .buffer_size = ssbo->buffer_size,
+ };
+ iris_set_shader_buffers(ctx, p_stage, i, 1, &buf,
+ (shs->writable_ssbos >> i) & 1);
+ }
+ }
+ }
+
+ if (res->bind_history & PIPE_BIND_SAMPLER_VIEW) {
+ uint32_t bound_sampler_views = shs->bound_sampler_views;
+ while (bound_sampler_views) {
+ const int i = u_bit_scan(&bound_sampler_views);
+ struct iris_sampler_view *isv = shs->textures[i];
+
+ if (res->bo == iris_resource_bo(isv->base.texture)) {
+ void *map = alloc_surface_states(ice->state.surface_uploader,
+ &isv->surface_state,
+ isv->res->aux.sampler_usages);
+ assert(map);
+ fill_buffer_surface_state(&screen->isl_dev, isv->res, map,
+ isv->view.format, isv->view.swizzle,
+ isv->base.u.buf.offset,
+ isv->base.u.buf.size);
+ ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << s;
+ }
+ }
+ }
+
+ if (res->bind_history & PIPE_BIND_SHADER_IMAGE) {
+ uint32_t bound_image_views = shs->bound_image_views;
+ while (bound_image_views) {
+ const int i = u_bit_scan(&bound_image_views);
+ struct iris_image_view *iv = &shs->image[i];
+
+ if (res->bo == iris_resource_bo(iv->base.resource)) {
+ iris_set_shader_images(ctx, p_stage, i, 1, &iv->base);
+ }
+ }
+ }
+ }
+}
+
+/* ------------------------------------------------------------------- */
+
static void
iris_load_register_reg32(struct iris_batch *batch, uint32_t dst,
uint32_t src)
}
}
+#if GEN_GEN == 9
+/**
+ * Preemption on Gen9 has to be enabled or disabled in various cases.
+ *
+ * See these workarounds for preemption:
+ * - WaDisableMidObjectPreemptionForGSLineStripAdj
+ * - WaDisableMidObjectPreemptionForTrifanOrPolygon
+ * - WaDisableMidObjectPreemptionForLineLoop
+ * - WA#0798
+ *
+ * We don't put this in the vtable because it's only used on Gen9.
+ */
+void
+gen9_toggle_preemption(struct iris_context *ice,
+ struct iris_batch *batch,
+ const struct pipe_draw_info *draw)
+{
+ struct iris_genx_state *genx = ice->state.genx;
+ bool object_preemption = true;
+
+ /* WaDisableMidObjectPreemptionForGSLineStripAdj
+ *
+ * "WA: Disable mid-draw preemption when draw-call is a linestrip_adj
+ * and GS is enabled."
+ */
+ if (draw->mode == PIPE_PRIM_LINE_STRIP_ADJACENCY &&
+ ice->shaders.prog[MESA_SHADER_GEOMETRY])
+ object_preemption = false;
+
+ /* WaDisableMidObjectPreemptionForTrifanOrPolygon
+ *
+ * "TriFan miscompare in Execlist Preemption test. Cut index that is
+ * on a previous context. End the previous, the resume another context
+ * with a tri-fan or polygon, and the vertex count is corrupted. If we
+ * prempt again we will cause corruption.
+ *
+ * WA: Disable mid-draw preemption when draw-call has a tri-fan."
+ */
+ if (draw->mode == PIPE_PRIM_TRIANGLE_FAN)
+ object_preemption = false;
+
+ /* WaDisableMidObjectPreemptionForLineLoop
+ *
+ * "VF Stats Counters Missing a vertex when preemption enabled.
+ *
+ * WA: Disable mid-draw preemption when the draw uses a lineloop
+ * topology."
+ */
+ if (draw->mode == PIPE_PRIM_LINE_LOOP)
+ object_preemption = false;
+
+ /* WA#0798
+ *
+ * "VF is corrupting GAFS data when preempted on an instance boundary
+ * and replayed with instancing enabled.
+ *
+ * WA: Disable preemption when using instanceing."
+ */
+ if (draw->instance_count > 1)
+ object_preemption = false;
+
+ if (genx->object_preemption != object_preemption) {
+ iris_enable_obj_preemption(batch, object_preemption);
+ genx->object_preemption = object_preemption;
+ }
+}
+#endif
+
void
genX(init_state)(struct iris_context *ice)
{
ice->vtbl.update_surface_base_address = iris_update_surface_base_address;
ice->vtbl.upload_compute_state = iris_upload_compute_state;
ice->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
+ ice->vtbl.rebind_buffer = iris_rebind_buffer;
ice->vtbl.load_register_reg32 = iris_load_register_reg32;
ice->vtbl.load_register_reg64 = iris_load_register_reg64;
ice->vtbl.load_register_imm32 = iris_load_register_imm32;