return vtn_pointer_from_ssa(b, nir_load_param(&b->nb, param_idx), ptr_type);
}
+static unsigned
+vtn_type_count_function_params(struct vtn_type *type)
+{
+ switch (type->base_type) {
+ case vtn_base_type_array:
+ case vtn_base_type_matrix:
+ return type->length * vtn_type_count_function_params(type->array_element);
+
+ case vtn_base_type_struct: {
+ unsigned count = 0;
+ for (unsigned i = 0; i < type->length; i++)
+ count += vtn_type_count_function_params(type->members[i]);
+ return count;
+ }
+
+ case vtn_base_type_sampled_image:
+ return 2;
+
+ default:
+ return 1;
+ }
+}
+
+static void
+vtn_type_add_to_function_params(struct vtn_type *type,
+ nir_function *func,
+ unsigned *param_idx)
+{
+ static const nir_parameter nir_deref_param = {
+ .num_components = 1,
+ .bit_size = 32,
+ };
+
+ switch (type->base_type) {
+ case vtn_base_type_array:
+ case vtn_base_type_matrix:
+ for (unsigned i = 0; i < type->length; i++)
+ vtn_type_add_to_function_params(type->array_element, func, param_idx);
+ break;
+
+ case vtn_base_type_struct:
+ for (unsigned i = 0; i < type->length; i++)
+ vtn_type_add_to_function_params(type->members[i], func, param_idx);
+ break;
+
+ case vtn_base_type_sampled_image:
+ func->params[(*param_idx)++] = nir_deref_param;
+ func->params[(*param_idx)++] = nir_deref_param;
+ break;
+
+ case vtn_base_type_image:
+ case vtn_base_type_sampler:
+ func->params[(*param_idx)++] = nir_deref_param;
+ break;
+
+ case vtn_base_type_pointer:
+ if (type->type) {
+ func->params[(*param_idx)++] = (nir_parameter) {
+ .num_components = glsl_get_vector_elements(type->type),
+ .bit_size = glsl_get_bit_size(type->type),
+ };
+ } else {
+ func->params[(*param_idx)++] = nir_deref_param;
+ }
+ break;
+
+ default:
+ func->params[(*param_idx)++] = (nir_parameter) {
+ .num_components = glsl_get_vector_elements(type->type),
+ .bit_size = glsl_get_bit_size(type->type),
+ };
+ }
+}
+
+static void
+vtn_ssa_value_add_to_call_params(struct vtn_builder *b,
+ struct vtn_ssa_value *value,
+ struct vtn_type *type,
+ nir_call_instr *call,
+ unsigned *param_idx)
+{
+ switch (type->base_type) {
+ case vtn_base_type_array:
+ case vtn_base_type_matrix:
+ for (unsigned i = 0; i < type->length; i++) {
+ vtn_ssa_value_add_to_call_params(b, value->elems[i],
+ type->array_element,
+ call, param_idx);
+ }
+ break;
+
+ case vtn_base_type_struct:
+ for (unsigned i = 0; i < type->length; i++) {
+ vtn_ssa_value_add_to_call_params(b, value->elems[i],
+ type->members[i],
+ call, param_idx);
+ }
+ break;
+
+ default:
+ call->params[(*param_idx)++] = nir_src_for_ssa(value->def);
+ break;
+ }
+}
+
+static void
+vtn_ssa_value_load_function_param(struct vtn_builder *b,
+ struct vtn_ssa_value *value,
+ struct vtn_type *type,
+ unsigned *param_idx)
+{
+ switch (type->base_type) {
+ case vtn_base_type_array:
+ case vtn_base_type_matrix:
+ for (unsigned i = 0; i < type->length; i++) {
+ vtn_ssa_value_load_function_param(b, value->elems[i],
+ type->array_element, param_idx);
+ }
+ break;
+
+ case vtn_base_type_struct:
+ for (unsigned i = 0; i < type->length; i++) {
+ vtn_ssa_value_load_function_param(b, value->elems[i],
+ type->members[i], param_idx);
+ }
+ break;
+
+ default:
+ value->def = nir_load_param(&b->nb, (*param_idx)++);
+ break;
+ }
+}
+
void
vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
struct vtn_type *ret_type = vtn_callee->type->return_type;
if (ret_type->base_type != vtn_base_type_void) {
nir_variable *ret_tmp =
- nir_local_variable_create(b->nb.impl, ret_type->type, "return_tmp");
+ nir_local_variable_create(b->nb.impl,
+ glsl_get_bare_type(ret_type->type),
+ "return_tmp");
ret_deref = nir_build_deref_var(&b->nb, ret_tmp);
call->params[param_idx++] = nir_src_for_ssa(&ret_deref->dest.ssa);
}
call->params[param_idx++] =
nir_src_for_ssa(vtn_pointer_to_ssa(b, pointer));
} else {
- /* This is a regular SSA value and we need a temporary */
- nir_variable *tmp =
- nir_local_variable_create(b->nb.impl, arg_type->type, "arg_tmp");
- nir_deref_instr *tmp_deref = nir_build_deref_var(&b->nb, tmp);
- vtn_local_store(b, vtn_ssa_value(b, arg_id), tmp_deref);
- call->params[param_idx++] = nir_src_for_ssa(&tmp_deref->dest.ssa);
+ vtn_ssa_value_add_to_call_params(b, vtn_ssa_value(b, arg_id),
+ arg_type, call, ¶m_idx);
}
}
assert(param_idx == call->num_params);
nir_function *func =
nir_function_create(b->shader, ralloc_strdup(b->shader, val->name));
- unsigned num_params = func_type->length;
- for (unsigned i = 0; i < func_type->length; i++) {
- /* Sampled images are actually two parameters */
- if (func_type->params[i]->base_type == vtn_base_type_sampled_image)
- num_params++;
- }
+ unsigned num_params = 0;
+ for (unsigned i = 0; i < func_type->length; i++)
+ num_params += vtn_type_count_function_params(func_type->params[i]);
/* Add one parameter for the function return value */
if (func_type->return_type->base_type != vtn_base_type_void)
};
}
- for (unsigned i = 0; i < func_type->length; i++) {
- if (func_type->params[i]->base_type == vtn_base_type_sampled_image) {
- /* Sampled images are two pointer parameters */
- func->params[idx++] = (nir_parameter) {
- .num_components = 1, .bit_size = 32,
- };
- func->params[idx++] = (nir_parameter) {
- .num_components = 1, .bit_size = 32,
- };
- } else if (func_type->params[i]->base_type == vtn_base_type_pointer &&
- func_type->params[i]->type != NULL) {
- /* Pointers with as storage class get passed by-value */
- assert(glsl_type_is_vector_or_scalar(func_type->params[i]->type));
- func->params[idx++] = (nir_parameter) {
- .num_components =
- glsl_get_vector_elements(func_type->params[i]->type),
- .bit_size = glsl_get_bit_size(func_type->params[i]->type),
- };
- } else {
- /* Everything else is a regular pointer */
- func->params[idx++] = (nir_parameter) {
- .num_components = 1, .bit_size = 32,
- };
- }
- }
+ for (unsigned i = 0; i < func_type->length; i++)
+ vtn_type_add_to_function_params(func_type->params[i], func, &idx);
assert(idx == num_params);
b->func->impl = nir_function_impl_create(func);
vtn_load_param_pointer(b, type, b->func_param_idx++);
} else {
/* We're a regular SSA value. */
- nir_ssa_def *param_val = nir_load_param(&b->nb, b->func_param_idx++);
- nir_deref_instr *deref =
- nir_build_deref_cast(&b->nb, param_val, nir_var_local, type->type);
- vtn_push_ssa(b, w[2], type, vtn_local_load(b, deref));
+ struct vtn_ssa_value *value = vtn_create_ssa_value(b, type->type);
+ vtn_ssa_value_load_function_param(b, value, type, &b->func_param_idx);
+ vtn_push_ssa(b, w[2], type, value);
}
break;
}
{
switch (branch_type) {
case vtn_branch_type_switch_break:
- nir_store_var(&b->nb, switch_fall_var, nir_imm_int(&b->nb, NIR_FALSE), 1);
+ nir_store_var(&b->nb, switch_fall_var, nir_imm_false(&b->nb), 1);
*has_switch_break = true;
break;
case vtn_branch_type_switch_fallthrough:
vtn_base_type_void,
"Return with a value from a function returning void");
struct vtn_ssa_value *src = vtn_ssa_value(b, block->branch[1]);
+ const struct glsl_type *ret_type =
+ glsl_get_bare_type(b->func->type->return_type->type);
nir_deref_instr *ret_deref =
nir_build_deref_cast(&b->nb, nir_load_param(&b->nb, 0),
- nir_var_local, src->type);
+ nir_var_local, ret_type, 0);
vtn_local_store(b, src, ret_deref);
}
nir_local_variable_create(b->nb.impl, glsl_bool_type(), "cont");
b->nb.cursor = nir_before_cf_node(&loop->cf_node);
- nir_store_var(&b->nb, do_cont, nir_imm_int(&b->nb, NIR_FALSE), 1);
+ nir_store_var(&b->nb, do_cont, nir_imm_false(&b->nb), 1);
b->nb.cursor = nir_before_cf_list(&loop->body);
nir_pop_if(&b->nb, cont_if);
- nir_store_var(&b->nb, do_cont, nir_imm_int(&b->nb, NIR_TRUE), 1);
+ nir_store_var(&b->nb, do_cont, nir_imm_true(&b->nb), 1);
b->has_loop_continue = true;
}
*/
nir_variable *fall_var =
nir_local_variable_create(b->nb.impl, glsl_bool_type(), "fall");
- nir_store_var(&b->nb, fall_var, nir_imm_int(&b->nb, NIR_FALSE), 1);
+ nir_store_var(&b->nb, fall_var, nir_imm_false(&b->nb), 1);
/* Next, we gather up all of the conditions. We have to do this
* up-front because we also need to build an "any" condition so
nir_if *case_if = nir_push_if(&b->nb, cond);
bool has_break = false;
- nir_store_var(&b->nb, fall_var, nir_imm_int(&b->nb, NIR_TRUE), 1);
+ nir_store_var(&b->nb, fall_var, nir_imm_true(&b->nb), 1);
vtn_emit_cf_list(b, &cse->body, fall_var, &has_break, handler);
(void)has_break; /* We don't care */
vtn_foreach_instruction(b, func->start_block->label, func->end,
vtn_handle_phi_second_pass);
+ nir_rematerialize_derefs_in_use_blocks_impl(func->impl);
+
/* Continue blocks for loops get inserted before the body of the loop
* but instructions in the continue may use SSA defs in the loop body.
* Therefore, we need to repair SSA to insert the needed phi nodes.