struct pipe_shader_buffer constbuf[PIPE_MAX_CONSTANT_BUFFERS];
struct iris_state_ref constbuf_surf_state[PIPE_MAX_CONSTANT_BUFFERS];
- struct pipe_constant_buffer cbuf0;
- bool cbuf0_needs_upload;
+ bool sysvals_need_upload;
/** Shader Storage Buffers */
struct pipe_shader_buffer ssbo[PIPE_MAX_SHADER_BUFFERS];
* needed, the constant buffer 0 will be needed, so account for it.
*/
unsigned num_cbufs = ish->nir->info.num_ubos;
- if (num_cbufs || num_system_values || ish->nir->num_uniforms)
+
+ if (num_cbufs || ish->nir->num_uniforms)
+ num_cbufs++;
+
+ if (num_system_values)
num_cbufs++;
/* Upload our newly read shader to the in-memory program cache and
if (tcs_info &&
tcs_info->system_values_read & (1ull << SYSTEM_VALUE_VERTICES_IN)) {
ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TCS;
- ice->state.shaders[MESA_SHADER_TESS_CTRL].cbuf0_needs_upload = true;
+ ice->state.shaders[MESA_SHADER_TESS_CTRL].sysvals_need_upload = true;
}
}
nir_validate_shader(nir, "before remapping");
- /* Place the new params at the front of constant buffer 0. */
+ /* Uniforms are stored in constant buffer 0, the
+ * user-facing UBOs are indexed by one. So if any constant buffer is
+ * needed, the constant buffer 0 will be needed, so account for it.
+ */
+ unsigned num_cbufs = nir->info.num_ubos;
+ if (num_cbufs || nir->num_uniforms)
+ num_cbufs++;
+
+ /* Place the new params in a new cbuf. */
if (num_system_values > 0) {
- nir->num_uniforms += num_system_values * sizeof(uint32_t);
+ unsigned sysval_cbuf_index = num_cbufs;
+ num_cbufs++;
system_values = reralloc(mem_ctx, system_values, enum brw_param_builtin,
num_system_values);
assert(load->src[0].is_ssa);
if (load->src[0].ssa == temp_ubo_name) {
+ nir_ssa_def *imm = nir_imm_int(&b, sysval_cbuf_index);
nir_instr_rewrite_src(instr, &load->src[0],
- nir_src_for_ssa(nir_imm_int(&b, 0)));
- } else if (nir_src_is_const(load->src[0]) &&
- nir_src_as_uint(load->src[0]) == 0) {
- nir_ssa_def *offset =
- nir_iadd(&b, load->src[1].ssa,
- nir_imm_int(&b, 4 * num_system_values));
- nir_instr_rewrite_src(instr, &load->src[1],
- nir_src_for_ssa(offset));
+ nir_src_for_ssa(imm));
}
}
}
system_values = NULL;
}
+ assert(num_cbufs < PIPE_MAX_CONSTANT_BUFFERS);
nir_validate_shader(nir, "after remap");
/* We don't use params[], but fs_visitor::nir_setup_uniforms() asserts
prog_data->nr_params = nir->num_uniforms / 4;
prog_data->param = rzalloc_array(mem_ctx, uint32_t, prog_data->nr_params);
- /* System values and uniforms are stored in constant buffer 0, the
- * user-facing UBOs are indexed by one. So if any constant buffer is
- * needed, the constant buffer 0 will be needed, so account for it.
- */
- unsigned num_cbufs = nir->info.num_ubos;
- if (num_cbufs || num_system_values || nir->num_uniforms)
- num_cbufs++;
-
/* Constant loads (if any) need to go at the end of the constant buffers so
* we need to know num_cbufs before we can lower to them.
*/
IRIS_DIRTY_BINDINGS_VS |
IRIS_DIRTY_CONSTANTS_VS |
IRIS_DIRTY_VF_SGVS;
- shs->cbuf0_needs_upload = true;
+ shs->sysvals_need_upload = true;
const struct brw_vs_prog_data *vs_prog_data =
(void *) shader->prog_data;
nir = brw_nir_create_passthrough_tcs(mem_ctx, compiler, options, key);
/* Reserve space for passing the default tess levels as constants. */
+ num_cbufs = 1;
num_system_values = 8;
system_values =
rzalloc_array(mem_ctx, enum brw_param_builtin, num_system_values);
ice->state.dirty |= IRIS_DIRTY_TCS |
IRIS_DIRTY_BINDINGS_TCS |
IRIS_DIRTY_CONSTANTS_TCS;
- shs->cbuf0_needs_upload = true;
-
- if (!tcs) {
- /* We're binding a passthrough TCS, which doesn't have uniforms.
- * Since there's no actual TCS, the state tracker doesn't bother
- * to call set_constant_buffers to clear stale constant buffers.
- *
- * We do upload TCS constants for the default tesslevel system
- * values, however. In this case, we would see stale constant
- * data and try and read a dangling cbuf0->user_buffer pointer.
- * Just zero out the stale constants to avoid the upload.
- */
- memset(&shs->cbuf0, 0, sizeof(shs->cbuf0));
- }
+ shs->sysvals_need_upload = true;
}
}
ice->state.dirty |= IRIS_DIRTY_TES |
IRIS_DIRTY_BINDINGS_TES |
IRIS_DIRTY_CONSTANTS_TES;
- shs->cbuf0_needs_upload = true;
+ shs->sysvals_need_upload = true;
}
/* TODO: Could compare and avoid flagging this. */
const struct shader_info *tes_info = &ish->nir->info;
if (tes_info->system_values_read & (1ull << SYSTEM_VALUE_VERTICES_IN)) {
ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TES;
- ice->state.shaders[MESA_SHADER_TESS_EVAL].cbuf0_needs_upload = true;
+ ice->state.shaders[MESA_SHADER_TESS_EVAL].sysvals_need_upload = true;
}
}
ice->state.dirty |= IRIS_DIRTY_GS |
IRIS_DIRTY_BINDINGS_GS |
IRIS_DIRTY_CONSTANTS_GS;
- shs->cbuf0_needs_upload = true;
+ shs->sysvals_need_upload = true;
}
}
IRIS_DIRTY_WM |
IRIS_DIRTY_CLIP |
IRIS_DIRTY_SBE;
- shs->cbuf0_needs_upload = true;
+ shs->sysvals_need_upload = true;
}
}
ice->state.dirty |= IRIS_DIRTY_CS |
IRIS_DIRTY_BINDINGS_CS |
IRIS_DIRTY_CONSTANTS_CS;
- shs->cbuf0_needs_upload = true;
+ shs->sysvals_need_upload = true;
}
}
/* Broadwell also needs brw_image_params re-uploaded */
if (GEN_GEN < 9) {
ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage;
- shs->cbuf0_needs_upload = true;
+ shs->sysvals_need_upload = true;
}
}
memcpy(&ice->state.default_inner_level[0], &default_inner_level[0], 2 * sizeof(float));
ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TCS;
- shs->cbuf0_needs_upload = true;
+ shs->sysvals_need_upload = true;
}
static void
memcpy(&ice->state.clip_planes, state, sizeof(*state));
ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS;
- shs->cbuf0_needs_upload = true;
+ shs->sysvals_need_upload = true;
}
/**
struct iris_shader_state *shs = &ice->state.shaders[stage];
struct pipe_shader_buffer *cbuf = &shs->constbuf[index];
- if (input && input->buffer) {
+ if (input && input->buffer_size && (input->buffer || input->user_buffer)) {
shs->bound_cbufs |= 1u << index;
- assert(index > 0);
+ if (input->user_buffer) {
+ void *map = NULL;
+ pipe_resource_reference(&cbuf->buffer, NULL);
+ u_upload_alloc(ice->ctx.const_uploader, 0, input->buffer_size, 64,
+ &cbuf->buffer_offset, &cbuf->buffer, (void **) &map);
- pipe_resource_reference(&cbuf->buffer, input->buffer);
- cbuf->buffer_offset = input->buffer_offset;
- cbuf->buffer_size =
- MIN2(input->buffer_size,
- iris_resource_bo(input->buffer)->size - cbuf->buffer_offset);
+ if (!cbuf->buffer) {
+ /* Allocation was unsuccessful - just unbind */
+ iris_set_constant_buffer(ctx, p_stage, index, NULL);
+ return;
+ }
+
+ assert(map);
+ memcpy(map, input->user_buffer, input->buffer_size);
+ u_upload_unmap(ice->ctx.const_uploader);
+
+ } else if (input->buffer) {
+ pipe_resource_reference(&cbuf->buffer, input->buffer);
+
+ cbuf->buffer_offset = input->buffer_offset;
+ cbuf->buffer_size =
+ MIN2(input->buffer_size,
+ iris_resource_bo(cbuf->buffer)->size - cbuf->buffer_offset);
+ }
struct iris_resource *res = (void *) cbuf->buffer;
res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL);
}
- if (index == 0) {
- if (input)
- memcpy(&shs->cbuf0, input, sizeof(shs->cbuf0));
- else
- memset(&shs->cbuf0, 0, sizeof(shs->cbuf0));
-
- shs->cbuf0_needs_upload = true;
- }
-
ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage;
// XXX: maybe not necessary all the time...?
// XXX: we need 3DS_BTP to commit these changes, and if we fell back to
}
static void
-upload_uniforms(struct iris_context *ice,
+upload_sysvals(struct iris_context *ice,
gl_shader_stage stage)
{
UNUSED struct iris_genx_state *genx = ice->state.genx;
struct iris_shader_state *shs = &ice->state.shaders[stage];
- struct pipe_shader_buffer *cbuf = &shs->constbuf[0];
- struct iris_compiled_shader *shader = ice->shaders.prog[stage];
-
- unsigned upload_size = shader->num_system_values * sizeof(uint32_t) +
- shs->cbuf0.buffer_size;
- if (upload_size == 0)
+ struct iris_compiled_shader *shader = ice->shaders.prog[stage];
+ if (!shader || shader->num_system_values == 0)
return;
+ assert(shader->num_cbufs > 0);
+
+ unsigned sysval_cbuf_index = shader->num_cbufs - 1;
+ struct pipe_shader_buffer *cbuf = &shs->constbuf[sysval_cbuf_index];
+ unsigned upload_size = shader->num_system_values * sizeof(uint32_t);
uint32_t *map = NULL;
+
+ assert(sysval_cbuf_index < PIPE_MAX_CONSTANT_BUFFERS);
u_upload_alloc(ice->ctx.const_uploader, 0, upload_size, 64,
&cbuf->buffer_offset, &cbuf->buffer, (void **) &map);
*map++ = value;
}
- if (shs->cbuf0.user_buffer) {
- memcpy(map, shs->cbuf0.user_buffer, shs->cbuf0.buffer_size);
- }
-
cbuf->buffer_size = upload_size;
iris_upload_ubo_ssbo_surf_state(ice, cbuf,
- &shs->constbuf_surf_state[0], false);
- shs->cbuf0_needs_upload = false;
+ &shs->constbuf_surf_state[sysval_cbuf_index], false);
+
+ shs->sysvals_need_upload = false;
}
/**
if (!shader)
continue;
- if (shs->cbuf0_needs_upload)
- upload_uniforms(ice, stage);
+ if (shs->sysvals_need_upload)
+ upload_sysvals(ice, stage);
struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
*/
iris_use_pinned_bo(batch, ice->state.binder.bo, false);
- if ((dirty & IRIS_DIRTY_CONSTANTS_CS) && shs->cbuf0_needs_upload)
- upload_uniforms(ice, MESA_SHADER_COMPUTE);
+ if ((dirty & IRIS_DIRTY_CONSTANTS_CS) && shs->sysvals_need_upload)
+ upload_sysvals(ice, MESA_SHADER_COMPUTE);
if (dirty & IRIS_DIRTY_BINDINGS_CS)
iris_populate_binding_table(ice, batch, MESA_SHADER_COMPUTE, false);