uint32_t size,
const void *pValues)
{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+ memcpy((void*) cmd_buffer->push_constants + offset, pValues, size);
}
VkResult
static void
tu6_emit_user_consts(struct tu_cs *cs, const struct tu_pipeline *pipeline,
struct tu_descriptor_state *descriptors_state,
- gl_shader_stage type)
+ gl_shader_stage type,
+ uint32_t *push_constants)
{
const struct tu_program_descriptor_linkage *link =
&pipeline->program.link[type];
for (uint32_t i = 0; i < ARRAY_SIZE(state->range); i++) {
if (state->range[i].start < state->range[i].end) {
- assert(i && i - 1 < link->ubo_map.num);
uint32_t *ptr = map_get(descriptors_state, &link->ubo_map, i - 1);
uint32_t size = state->range[i].end - state->range[i].start;
debug_assert((size % 16) == 0);
debug_assert((offset % 16) == 0);
+ if (i == 0) {
+ /* push constants */
+ tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + (size / 4));
+ tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
+ CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
+ tu_cs_emit(cs, 0);
+ tu_cs_emit(cs, 0);
+ for (unsigned i = 0; i < size / 4; i++)
+ tu_cs_emit(cs, push_constants[i + offset / 4]);
+ continue;
+ }
+
tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3);
tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
}
static struct tu_cs_entry
-tu6_emit_consts(struct tu_device *device, struct tu_cs *draw_state,
+tu6_emit_consts(struct tu_cmd_buffer *cmd,
const struct tu_pipeline *pipeline,
struct tu_descriptor_state *descriptors_state,
gl_shader_stage type)
{
struct tu_cs cs;
- tu_cs_begin_sub_stream(device, draw_state, 512, &cs); /* TODO: maximum size? */
+ tu_cs_begin_sub_stream(cmd->device, &cmd->draw_state, 512, &cs); /* TODO: maximum size? */
- tu6_emit_user_consts(&cs, pipeline, descriptors_state, type);
+ tu6_emit_user_consts(&cs, pipeline, descriptors_state, type, cmd->push_constants);
tu6_emit_ubos(&cs, pipeline, descriptors_state, type);
- return tu_cs_end_sub_stream(draw_state, &cs);
+ return tu_cs_end_sub_stream(&cmd->draw_state, &cs);
}
static struct tu_cs_entry
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_VS_CONST,
.enable_mask = 0x7,
- .ib = tu6_emit_consts(cmd->device, &cmd->draw_state, pipeline,
- descriptors_state, MESA_SHADER_VERTEX)
+ .ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_VERTEX)
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_FS_CONST,
.enable_mask = 0x6,
- .ib = tu6_emit_consts(cmd->device, &cmd->draw_state, pipeline,
- descriptors_state, MESA_SHADER_FRAGMENT)
+ .ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_FRAGMENT)
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr,
struct tu_shader *shader)
{
+ if (instr->intrinsic == nir_intrinsic_load_push_constant) {
+ /* note: ir3 wants load_ubo, not load_uniform */
+ assert(nir_intrinsic_base(instr) == 0);
+
+ nir_intrinsic_instr *load =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
+ load->num_components = instr->num_components;
+ load->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
+ load->src[1] = instr->src[0];
+ nir_ssa_dest_init(&load->instr, &load->dest,
+ load->num_components, instr->dest.ssa.bit_size,
+ instr->dest.ssa.name);
+ nir_builder_instr_insert(b, &load->instr);
+ nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&load->dest.ssa));
+
+ nir_instr_remove(&instr->instr);
+
+ return true;
+ }
+
if (instr->intrinsic != nir_intrinsic_vulkan_resource_index)
return false;
nir_foreach_block(block, impl) {
nir_foreach_instr_safe(instr, block) {
+ b.cursor = nir_before_instr(instr);
switch (instr->type) {
case nir_instr_type_tex:
progress |= lower_sampler(&b, nir_instr_as_tex(instr), shader);
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
+ /* num_uniforms only used by ir3 for size of ubo 0 (push constants) */
+ nir->num_uniforms = MAX_PUSH_CONSTANTS_SIZE / 16;
+
shader->ir3_shader.compiler = dev->compiler;
shader->ir3_shader.type = stage;
shader->ir3_shader.nir = nir;