}
}
+static void
+iris_emit_l3_config(struct iris_batch *batch, const struct gen_l3_config *cfg,
+ bool has_slm, bool wants_dc_cache)
+{
+ uint32_t reg_val;
+ iris_pack_state(GENX(L3CNTLREG), ®_val, reg) {
+ reg.SLMEnable = has_slm;
+#if GEN_GEN == 11
+ /* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set
+ * in L3CNTLREG register. The default setting of the bit is not the
+ * desirable behavior.
+ */
+ reg.ErrorDetectionBehaviorControl = true;
+#endif
+ reg.URBAllocation = cfg->n[GEN_L3P_URB];
+ reg.ROAllocation = cfg->n[GEN_L3P_RO];
+ reg.DCAllocation = cfg->n[GEN_L3P_DC];
+ reg.AllAllocation = cfg->n[GEN_L3P_ALL];
+ }
+ iris_emit_lri(batch, L3CNTLREG, reg_val);
+}
+
+static void
+iris_emit_default_l3_config(struct iris_batch *batch,
+ const struct gen_device_info *devinfo,
+ bool compute)
+{
+ bool wants_dc_cache = true;
+ bool has_slm = compute;
+ const struct gen_l3_weights w =
+ gen_get_default_l3_weights(devinfo, wants_dc_cache, has_slm);
+ const struct gen_l3_config *cfg = gen_get_l3_config(devinfo, w);
+ iris_emit_l3_config(batch, cfg, has_slm, wants_dc_cache);
+}
+
/**
* Upload the initial GPU state for a render context.
*
emit_pipeline_select(batch, _3D);
+ iris_emit_default_l3_config(batch, devinfo, false);
+
init_state_base_address(batch);
#if GEN_GEN >= 9
- // XXX: INSTPM on Gen8
iris_pack_state(GENX(CS_DEBUG_MODE2), ®_val, reg) {
reg.CONSTANT_BUFFERAddressOffsetDisable = true;
reg.CONSTANT_BUFFERAddressOffsetDisableMask = true;
iris_emit_cmd(batch, GENX(3DSTATE_WM_HZ_OP), foo);
/* No polygon stippling offsets are necessary. */
- // XXX: may need to set an offset for origin-UL framebuffers
+ /* TODO: may need to set an offset for origin-UL framebuffers */
iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_OFFSET), foo);
/* Set a static partitioning of the push constant area. */
- // XXX: this may be a bad idea...could starve the push ringbuffers...
+ /* TODO: this may be a bad idea...could starve the push ringbuffers... */
for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
alloc._3DCommandSubOpcode = 18 + i;
emit_pipeline_select(batch, GPGPU);
- const bool has_slm = true;
- const bool wants_dc_cache = true;
-
- const struct gen_l3_weights w =
- gen_get_default_l3_weights(devinfo, wants_dc_cache, has_slm);
- const struct gen_l3_config *cfg = gen_get_l3_config(devinfo, w);
-
- uint32_t reg_val;
- iris_pack_state(GENX(L3CNTLREG), ®_val, reg) {
- reg.SLMEnable = has_slm;
-#if GEN_GEN == 11
- /* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set
- * in L3CNTLREG register. The default setting of the bit is not the
- * desirable behavior.
- */
- reg.ErrorDetectionBehaviorControl = true;
-#endif
- reg.URBAllocation = cfg->n[GEN_L3P_URB];
- reg.ROAllocation = cfg->n[GEN_L3P_RO];
- reg.DCAllocation = cfg->n[GEN_L3P_DC];
- reg.AllAllocation = cfg->n[GEN_L3P_ALL];
- }
- iris_emit_lri(batch, L3CNTLREG, reg_val);
+ iris_emit_default_l3_config(batch, devinfo, true);
init_state_base_address(batch);
/** Bitfield of whether blending is enabled for RT[i] - for aux resolves */
uint8_t blend_enables;
+
+ /** Bitfield of whether color writes are enabled for RT[i] */
+ uint8_t color_write_enables;
};
static enum pipe_blendfactor
uint32_t *blend_entry = cso->blend_state + GENX(BLEND_STATE_length);
cso->blend_enables = 0;
+ cso->color_write_enables = 0;
STATIC_ASSERT(BRW_MAX_DRAW_BUFFERS <= 8);
cso->alpha_to_coverage = state->alpha_to_coverage;
if (rt->blend_enable)
cso->blend_enables |= 1u << i;
+ if (rt->colormask)
+ cso->color_write_enables |= 1u << i;
+
iris_pack_state(GENX(BLEND_STATE_ENTRY), blend_entry, be) {
be.LogicOpEnable = state->logicop_enable;
be.LogicOpFunction = state->logicop_func;
ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_BLEND];
}
+/**
+ * Return true if the FS writes to any color outputs which are not disabled
+ * via color masking.
+ */
+static bool
+has_writeable_rt(const struct iris_blend_state *cso_blend,
+ const struct shader_info *fs_info)
+{
+ if (!fs_info)
+ return false;
+
+ unsigned rt_outputs = fs_info->outputs_written >> FRAG_RESULT_DATA0;
+
+ if (fs_info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_COLOR))
+ rt_outputs = (1 << BRW_MAX_DRAW_BUFFERS) - 1;
+
+ return cso_blend->color_write_enables & rt_outputs;
+}
+
/**
* Gallium CSO for depth, stencil, and alpha testing state.
*/
struct iris_rasterizer_state *cso =
malloc(sizeof(struct iris_rasterizer_state));
-#if 0
- point_quad_rasterization -> SBE?
-
- not necessary?
- {
- poly_smooth
- bottom_edge_rule
-
- offset_units_unscaled - cap not exposed
- }
- #endif
-
- // XXX: it may make more sense just to store the pipe_rasterizer_state,
- // we're copying a lot of booleans here. But we don't need all of them...
-
cso->multisample = state->multisample;
cso->force_persample_interp = state->force_persample_interp;
cso->clip_halfz = state->clip_halfz;
state->line_smooth ? _10pixels : _05pixels;
sf.LastPixelEnable = state->line_last_pixel;
sf.LineWidth = line_width;
- sf.SmoothPointEnable = state->point_smooth || state->multisample;
+ sf.SmoothPointEnable = (state->point_smooth || state->multisample) &&
+ !state->point_quad_rasterization;
sf.PointWidthSource = state->point_size_per_vertex ? Vertex : State;
sf.PointWidth = state->point_size;
rr.GlobalDepthOffsetConstant = state->offset_units * 2;
rr.GlobalDepthOffsetScale = state->offset_scale;
rr.GlobalDepthOffsetClamp = state->offset_clamp;
- rr.SmoothPointEnable = state->point_smooth || state->multisample;
+ rr.SmoothPointEnable = state->point_smooth;
rr.AntialiasingEnable = state->line_smooth;
rr.ScissorRectangleEnable = state->scissor;
#if GEN_GEN >= 9
#else
rr.ViewportZClipTestEnable = (state->depth_clip_near || state->depth_clip_far);
#endif
- //rr.ConservativeRasterizationEnable = not yet supported by Gallium...
+ /* TODO: ConservativeRasterizationEnable */
}
iris_pack_command(GENX(3DSTATE_CLIP), cso->clip, cl) {
.mocs = mocs(bo));
}
+#define SURFACE_STATE_ALIGNMENT 64
+
/**
- * Allocate a SURFACE_STATE structure.
+ * Allocate several contiguous SURFACE_STATE structures, one for each
+ * supported auxiliary surface mode.
*/
static void *
alloc_surface_states(struct u_upload_mgr *mgr,
- struct iris_state_ref *ref)
+ struct iris_state_ref *ref,
+ unsigned aux_usages)
{
const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
- void *map = upload_state(mgr, ref, surf_size, 64);
+ /* If this changes, update this to explicitly align pointers */
+ STATIC_ASSERT(surf_size == SURFACE_STATE_ALIGNMENT);
+
+ assert(aux_usages != 0);
+
+ void *map =
+ upload_state(mgr, ref, util_bitcount(aux_usages) * surf_size,
+ SURFACE_STATE_ALIGNMENT);
ref->offset += iris_bo_offset_from_base_address(iris_resource_bo(ref->res));
fill_surface_state(struct isl_device *isl_dev,
void *map,
struct iris_resource *res,
- struct isl_view *view)
+ struct isl_view *view,
+ unsigned aux_usage)
{
struct isl_surf_fill_state_info f = {
.surf = &res->surf,
.address = res->bo->gtt_offset,
};
+ if (aux_usage != ISL_AUX_USAGE_NONE) {
+ f.aux_surf = &res->aux.surf;
+ f.aux_usage = aux_usage;
+ f.aux_address = res->aux.bo->gtt_offset + res->aux.offset;
+ // XXX: clear color
+ }
+
isl_surf_fill_state_s(isl_dev, map, &f);
}
pipe_reference_init(&isv->base.reference, 1);
pipe_resource_reference(&isv->base.texture, tex);
- void *map = alloc_surface_states(ice->state.surface_uploader,
- &isv->surface_state);
- if (!unlikely(map))
- return NULL;
-
if (util_format_is_depth_or_stencil(tmpl->format)) {
struct iris_resource *zres, *sres;
const struct util_format_description *desc =
isv->res = (struct iris_resource *) tex;
+ void *map = alloc_surface_states(ice->state.surface_uploader,
+ &isv->surface_state,
+ isv->res->aux.possible_usages);
+ if (!unlikely(map))
+ return NULL;
+
isl_surf_usage_flags_t usage = ISL_SURF_USAGE_TEXTURE_BIT;
if (isv->base.target == PIPE_TEXTURE_CUBE ||
isv->view.array_len =
tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
- fill_surface_state(&screen->isl_dev, map, isv->res, &isv->view);
+ unsigned aux_modes = isv->res->aux.possible_usages;
+ while (aux_modes) {
+ enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
+
+ fill_surface_state(&screen->isl_dev, map, isv->res, &isv->view,
+ aux_usage);
+
+ map += SURFACE_STATE_ALIGNMENT;
+ }
} else {
fill_buffer_surface_state(&screen->isl_dev, isv->res->bo, map,
isv->view.format, tmpl->u.buf.offset,
void *map = alloc_surface_states(ice->state.surface_uploader,
- &surf->surface_state);
+ &surf->surface_state,
+ res->aux.possible_usages);
if (!unlikely(map))
return NULL;
- fill_surface_state(&screen->isl_dev, map, res, &surf->view);
+ unsigned aux_modes = res->aux.possible_usages;
+ while (aux_modes) {
+ enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
+
+ fill_surface_state(&screen->isl_dev, map, res, &surf->view, aux_usage);
+
+ map += SURFACE_STATE_ALIGNMENT;
+ }
return psurf;
}
// XXX: these are not retained forever, use a separate uploader?
void *map =
alloc_surface_states(ice->state.surface_uploader,
- &shs->image[start_slot + i].surface_state);
+ &shs->image[start_slot + i].surface_state,
+ 1 << ISL_AUX_USAGE_NONE);
if (!unlikely(map)) {
pipe_resource_reference(&shs->image[start_slot + i].res, NULL);
return;
fill_buffer_surface_state(&screen->isl_dev, res->bo, map,
isl_fmt, 0, res->bo->size);
} else {
- fill_surface_state(&screen->isl_dev, map, res, &view);
+ /* Images don't support compression */
+ unsigned aux_modes = 1 << ISL_AUX_USAGE_NONE;
+ while (aux_modes) {
+ enum isl_aux_usage usage = u_bit_scan(&aux_modes);
+
+ fill_surface_state(&screen->isl_dev, map, res, &view, usage);
+
+ map += SURFACE_STATE_ALIGNMENT;
+ }
}
isl_surf_fill_image_param(&screen->isl_dev,
info.mocs = mocs(zres->bo);
view.format = zres->surf.format;
+
+ if (iris_resource_level_has_hiz(zres, view.base_level)) {
+ info.hiz_usage = ISL_AUX_USAGE_HIZ;
+ info.hiz_surf = &zres->aux.surf;
+ info.hiz_address = zres->aux.bo->gtt_offset;
+ }
}
if (stencil_res) {
assert(!buffer->is_user_buffer);
- ice->state.bound_vertex_buffers |= 1ull << (start_slot + i);
-
pipe_resource_reference(&state->resource, buffer->buffer.resource);
struct iris_resource *res = (void *) state->resource;
- if (res)
+ if (res) {
+ ice->state.bound_vertex_buffers |= 1ull << (start_slot + i);
res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
+ }
iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
vb.VertexBufferIndex = start_slot + i;
key->coherent_fb_fetch = true;
- // XXX: key->force_dual_color_blend for unigine
- // XXX: respect hint for high_quality_derivatives:1;
+ /* TODO: support key->force_dual_color_blend for Unigine */
+ /* TODO: Respect glHint for key->high_quality_derivatives */
}
static void
{
}
-#if 0
- // XXX: these need to go in INIT_THREAD_DISPATCH_FIELDS
- pkt.SamplerCount = \
- DIV_ROUND_UP(CLAMP(stage_state->sampler_count, 0, 16), 4); \
-
-#endif
-
static uint64_t
KSP(const struct iris_compiled_shader *shader)
{
return iris_bo_offset_from_base_address(res->bo) + shader->assembly.offset;
}
-// Gen11 workaround table #2056 WABTPPrefetchDisable suggests to disable
-// prefetching of binding tables in A0 and B0 steppings. XXX: Revisit
-// this WA on C0 stepping.
+/* Gen11 workaround table #2056 WABTPPrefetchDisable suggests to disable
+ * prefetching of binding tables in A0 and B0 steppings. XXX: Revisit
+ * this WA on C0 stepping.
+ *
+ * TODO: Fill out SamplerCount for prefetching?
+ */
#define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage) \
pkt.KernelStartPointer = KSP(shader); \
iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
ps.VectorMaskEnable = true;
- //ps.SamplerCount = ...
// XXX: WABTPPrefetchDisable, see above, drop at C0
ps.BindingTableEntryCount = GEN_GEN == 11 ? 0 :
prog_data->binding_table.size_bytes / 4;
iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
psx.PixelShaderValid = true;
psx.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode;
- // XXX: alpha test / alpha to coverage :/
- psx.PixelShaderKillsPixel = wm_prog_data->uses_kill ||
- wm_prog_data->uses_omask;
+ psx.PixelShaderKillsPixel = wm_prog_data->uses_kill;
psx.AttributeEnable = wm_prog_data->num_varying_inputs != 0;
psx.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth;
psx.PixelShaderUsesSourceW = wm_prog_data->uses_src_w;
return ice->state.null_fb.offset;
}
+static uint32_t
+surf_state_offset_for_aux(struct iris_resource *res,
+ enum isl_aux_usage aux_usage)
+{
+ return SURFACE_STATE_ALIGNMENT *
+ util_bitcount(res->aux.possible_usages & ((1 << aux_usage) - 1));
+}
+
/**
* Add a surface to the validation list, as well as the buffer containing
* the corresponding SURFACE_STATE.
static uint32_t
use_surface(struct iris_batch *batch,
struct pipe_surface *p_surf,
- bool writeable)
+ bool writeable,
+ enum isl_aux_usage aux_usage)
{
struct iris_surface *surf = (void *) p_surf;
+ struct iris_resource *res = (void *) p_surf->texture;
iris_use_pinned_bo(batch, iris_resource_bo(p_surf->texture), writeable);
iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state.res), false);
- return surf->surface_state.offset;
+ if (res->aux.bo)
+ iris_use_pinned_bo(batch, res->aux.bo, writeable);
+
+ return surf->surface_state.offset +
+ surf_state_offset_for_aux(res, aux_usage);
}
static uint32_t
-use_sampler_view(struct iris_batch *batch, struct iris_sampler_view *isv)
+use_sampler_view(struct iris_context *ice,
+ struct iris_batch *batch,
+ struct iris_sampler_view *isv)
{
+ // XXX: ASTC hacks
+ enum isl_aux_usage aux_usage =
+ iris_resource_texture_aux_usage(ice, isv->res, isv->view.format, 0);
+
iris_use_pinned_bo(batch, isv->res->bo, false);
iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.res), false);
- return isv->surface_state.offset;
+ if (isv->res->aux.bo)
+ iris_use_pinned_bo(batch, isv->res->aux.bo, false);
+
+ return isv->surface_state.offset +
+ surf_state_offset_for_aux(isv->res, aux_usage);
}
static uint32_t
if (!shs->image[i].res)
return use_null_surface(batch, ice);
+ struct iris_resource *res = (void *) shs->image[i].res;
struct iris_state_ref *surf_state = &shs->image[i].surface_state;
+ bool write = shs->image[i].access & PIPE_IMAGE_ACCESS_WRITE;
- iris_use_pinned_bo(batch, iris_resource_bo(shs->image[i].res),
- shs->image[i].access & PIPE_IMAGE_ACCESS_WRITE);
+ iris_use_pinned_bo(batch, res->bo, write);
iris_use_pinned_bo(batch, iris_resource_bo(surf_state->res), false);
+ if (res->aux.bo)
+ iris_use_pinned_bo(batch, res->aux.bo, write);
+
return surf_state->offset;
}
/* Note that cso_fb->nr_cbufs == fs_key->nr_color_regions. */
if (cso_fb->nr_cbufs) {
for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
- uint32_t addr =
- cso_fb->cbufs[i] ? use_surface(batch, cso_fb->cbufs[i], true)
- : use_null_fb_surface(batch, ice);
+ uint32_t addr;
+ if (cso_fb->cbufs[i]) {
+ addr = use_surface(batch, cso_fb->cbufs[i], true,
+ ice->state.draw_aux_usage[i]);
+ } else {
+ addr = use_null_fb_surface(batch, ice);
+ }
push_bt_entry(addr);
}
} else {
}
}
- bt_assert(texture_start, info->num_textures > 0);
+ unsigned num_textures = util_last_bit(info->textures_used);
- for (int i = 0; i < info->num_textures; i++) {
+ bt_assert(texture_start, num_textures > 0);
+
+ for (int i = 0; i < num_textures; i++) {
struct iris_sampler_view *view = shs->textures[i];
- uint32_t addr = view ? use_sampler_view(batch, view)
+ uint32_t addr = view ? use_sampler_view(ice, batch, view)
: use_null_surface(batch, ice);
push_bt_entry(addr);
}
push_bt_entry(addr);
}
- const int num_ubos = iris_get_shader_num_ubos(ice, stage);
-
- bt_assert(ubo_start, num_ubos > 0);
+ bt_assert(ubo_start, shader->num_cbufs > 0);
- for (int i = 0; i < num_ubos; i++) {
+ for (int i = 0; i < shader->num_cbufs; i++) {
uint32_t addr = use_const_buffer(batch, ice, &shs->constbuf[i]);
push_bt_entry(addr);
}
}
#if 0
- // XXX: not implemented yet
+ /* XXX: YUV surfaces not implemented yet */
bt_assert(plane_start[1], ...);
bt_assert(plane_start[2], ...);
#endif
iris_get_depth_stencil_resources(cso_fb->zsbuf->texture,
&zres, &sres);
if (zres) {
+ iris_cache_flush_for_depth(batch, zres->bo);
+
iris_use_pinned_bo(batch, zres->bo,
ice->state.depth_writes_enabled);
+ if (zres->aux.bo) {
+ iris_use_pinned_bo(batch, zres->aux.bo,
+ ice->state.depth_writes_enabled);
+ }
}
+
if (sres) {
+ iris_cache_flush_for_depth(batch, sres->bo);
+
iris_use_pinned_bo(batch, sres->bo,
ice->state.stencil_writes_enabled);
}
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
const int header_dwords = GENX(BLEND_STATE_length);
- const int rt_dwords = cso_fb->nr_cbufs * GENX(BLEND_STATE_ENTRY_length);
+
+ /* Always write at least one BLEND_STATE - the final RT message will
+ * reference BLEND_STATE[0] even if there aren't color writes. There
+ * may still be alpha testing, computed depth, and so on.
+ */
+ const int rt_dwords =
+ MAX2(cso_fb->nr_cbufs, 1) * GENX(BLEND_STATE_ENTRY_length);
+
uint32_t blend_offset;
uint32_t *blend_map =
stream_state(batch, ice->state.dynamic_uploader,
if (dirty & IRIS_DIRTY_SAMPLE_MASK) {
iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_MASK), ms) {
- ms.SampleMask = MAX2(ice->state.sample_mask, 1);
+ ms.SampleMask = ice->state.sample_mask;
}
}
wm.EarlyDepthStencilControl = EDSC_PREPS;
else if (wm_prog_data->has_side_effects)
wm.EarlyDepthStencilControl = EDSC_PSEXEC;
+
+ /* We could skip this bit if color writes are enabled. */
+ if (wm_prog_data->has_side_effects || wm_prog_data->uses_kill)
+ wm.ForceThreadDispatchEnable = ForceON;
}
iris_emit_merge(batch, cso->wm, dynamic_wm, ARRAY_SIZE(cso->wm));
}
if (dirty & IRIS_DIRTY_PS_BLEND) {
struct iris_blend_state *cso_blend = ice->state.cso_blend;
struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
+ const struct shader_info *fs_info =
+ iris_get_shader_info(ice, MESA_SHADER_FRAGMENT);
+
uint32_t dynamic_pb[GENX(3DSTATE_PS_BLEND_length)];
iris_pack_command(GENX(3DSTATE_PS_BLEND), &dynamic_pb, pb) {
- pb.HasWriteableRT = true; // XXX: comes from somewhere :(
+ pb.HasWriteableRT = has_writeable_rt(cso_blend, fs_info);
pb.AlphaTestEnable = cso_zsa->alpha.enabled;
}
if (zres) {
iris_use_pinned_bo(batch, zres->bo,
ice->state.depth_writes_enabled);
+ if (zres->aux.bo) {
+ iris_use_pinned_bo(batch, zres->aux.bo,
+ ice->state.depth_writes_enabled);
+ }
}
if (sres) {
high_bits = res->bo->gtt_offset >> 32ull;
if (high_bits != ice->state.last_vbo_high_bits[i]) {
- flush_flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flush_flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE |
+ PIPE_CONTROL_CS_STALL;
ice->state.last_vbo_high_bits[i] = high_bits;
}
}
}
- // XXX: Gen8 - PMA fix
+ /* TODO: Gen8 PMA fix */
}
static void
/* The VF cache key only uses 32-bits, see vertex buffer comment above */
uint16_t high_bits = bo->gtt_offset >> 32ull;
if (high_bits != ice->state.last_index_bo_high_bits) {
- iris_emit_pipe_control_flush(batch, PIPE_CONTROL_VF_CACHE_INVALIDATE);
+ iris_emit_pipe_control_flush(batch, PIPE_CONTROL_VF_CACHE_INVALIDATE |
+ PIPE_CONTROL_CS_STALL);
ice->state.last_index_bo_high_bits = high_bits;
}
}
struct iris_stream_output_target *so =
(void *) draw->count_from_stream_output;
- // XXX: avoid if possible
+ /* XXX: Replace with actual cache tracking */
iris_emit_pipe_control_flush(batch, PIPE_CONTROL_CS_STALL);
iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
vfe.NumberofURBEntries = 2;
vfe.URBEntryAllocationSize = 2;
- // XXX: Use Indirect Payload Storage?
vfe.CURBEAllocationSize =
ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
cs_prog_data->push.cross_thread.regs, 2);
}
}
- // XXX: hack iris_set_constant_buffers to upload these thread counts
- // XXX: along with regular uniforms for compute shaders, somehow.
-
+ /* TODO: Combine subgroup-id with cbuf0 so we can push regular uniforms */
uint32_t curbe_data_offset = 0;
- // TODO: Move subgroup-id into uniforms ubo so we can push uniforms
assert(cs_prog_data->push.cross_thread.dwords == 0 &&
cs_prog_data->push.per_thread.dwords == 1 &&
cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
const int i = u_bit_scan64(&bound_vbs);
pipe_resource_reference(&genx->vertex_buffers[i].resource, NULL);
}
+ free(ice->state.genx);
- // XXX: unreference resources/surfaces.
for (unsigned i = 0; i < ice->state.framebuffer.nr_cbufs; i++) {
pipe_surface_reference(&ice->state.framebuffer.cbufs[i], NULL);
}
for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
struct iris_shader_state *shs = &ice->state.shaders[stage];
pipe_resource_reference(&shs->sampler_table.res, NULL);
+ for (int i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
+ pipe_resource_reference(&shs->constbuf[i].data.res, NULL);
+ pipe_resource_reference(&shs->constbuf[i].surface_state.res, NULL);
+ }
+ for (int i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
+ pipe_resource_reference(&shs->image[i].res, NULL);
+ pipe_resource_reference(&shs->image[i].surface_state.res, NULL);
+ }
+ for (int i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
+ pipe_resource_reference(&shs->ssbo[i], NULL);
+ pipe_resource_reference(&shs->ssbo_surface_state[i].res, NULL);
+ }
+ for (int i = 0; i < IRIS_MAX_TEXTURE_SAMPLERS; i++) {
+ pipe_sampler_view_reference((struct pipe_sampler_view **)
+ &shs->textures[i], NULL);
+ }
}
- free(ice->state.genx);
+ pipe_resource_reference(&ice->state.grid_size.res, NULL);
+ pipe_resource_reference(&ice->state.grid_surf_state.res, NULL);
+
+ pipe_resource_reference(&ice->state.null_fb.res, NULL);
pipe_resource_reference(&ice->state.unbound_tex.res, NULL);
pipe_resource_reference(&ice->state.last_res.cc_vp, NULL);