+static struct vtn_block *
+vtn_block(struct vtn_builder *b, uint32_t value_id)
+{
+ return vtn_value(b, value_id, vtn_value_type_block)->block;
+}
+
+static unsigned
+glsl_type_count_function_params(const struct glsl_type *type)
+{
+ if (glsl_type_is_vector_or_scalar(type)) {
+ return 1;
+ } else if (glsl_type_is_array_or_matrix(type)) {
+ return glsl_get_length(type) *
+ glsl_type_count_function_params(glsl_get_array_element(type));
+ } else {
+ assert(glsl_type_is_struct_or_ifc(type));
+ unsigned count = 0;
+ unsigned elems = glsl_get_length(type);
+ for (unsigned i = 0; i < elems; i++) {
+ const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
+ count += glsl_type_count_function_params(elem_type);
+ }
+ return count;
+ }
+}
+
+static void
+glsl_type_add_to_function_params(const struct glsl_type *type,
+ nir_function *func,
+ unsigned *param_idx)
+{
+ if (glsl_type_is_vector_or_scalar(type)) {
+ func->params[(*param_idx)++] = (nir_parameter) {
+ .num_components = glsl_get_vector_elements(type),
+ .bit_size = glsl_get_bit_size(type),
+ };
+ } else if (glsl_type_is_array_or_matrix(type)) {
+ unsigned elems = glsl_get_length(type);
+ const struct glsl_type *elem_type = glsl_get_array_element(type);
+ for (unsigned i = 0; i < elems; i++)
+ glsl_type_add_to_function_params(elem_type,func, param_idx);
+ } else {
+ assert(glsl_type_is_struct_or_ifc(type));
+ unsigned elems = glsl_get_length(type);
+ for (unsigned i = 0; i < elems; i++) {
+ const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
+ glsl_type_add_to_function_params(elem_type, func, param_idx);
+ }
+ }
+}
+
+static void
+vtn_ssa_value_add_to_call_params(struct vtn_builder *b,
+ struct vtn_ssa_value *value,
+ nir_call_instr *call,
+ unsigned *param_idx)
+{
+ if (glsl_type_is_vector_or_scalar(value->type)) {
+ call->params[(*param_idx)++] = nir_src_for_ssa(value->def);
+ } else {
+ unsigned elems = glsl_get_length(value->type);
+ for (unsigned i = 0; i < elems; i++) {
+ vtn_ssa_value_add_to_call_params(b, value->elems[i],
+ call, param_idx);
+ }
+ }
+}
+
+static void
+vtn_ssa_value_load_function_param(struct vtn_builder *b,
+ struct vtn_ssa_value *value,
+ unsigned *param_idx)
+{
+ if (glsl_type_is_vector_or_scalar(value->type)) {
+ value->def = nir_load_param(&b->nb, (*param_idx)++);
+ } else {
+ unsigned elems = glsl_get_length(value->type);
+ for (unsigned i = 0; i < elems; i++)
+ vtn_ssa_value_load_function_param(b, value->elems[i], param_idx);
+ }
+}
+
+void
+vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_function *vtn_callee =
+ vtn_value(b, w[3], vtn_value_type_function)->func;
+ struct nir_function *callee = vtn_callee->impl->function;
+
+ vtn_callee->referenced = true;
+
+ nir_call_instr *call = nir_call_instr_create(b->nb.shader, callee);
+
+ unsigned param_idx = 0;
+
+ nir_deref_instr *ret_deref = NULL;
+ struct vtn_type *ret_type = vtn_callee->type->return_type;
+ if (ret_type->base_type != vtn_base_type_void) {
+ nir_variable *ret_tmp =
+ nir_local_variable_create(b->nb.impl,
+ glsl_get_bare_type(ret_type->type),
+ "return_tmp");
+ ret_deref = nir_build_deref_var(&b->nb, ret_tmp);
+ call->params[param_idx++] = nir_src_for_ssa(&ret_deref->dest.ssa);
+ }
+
+ for (unsigned i = 0; i < vtn_callee->type->length; i++) {
+ vtn_ssa_value_add_to_call_params(b, vtn_ssa_value(b, w[4 + i]),
+ call, ¶m_idx);
+ }
+ assert(param_idx == call->num_params);
+
+ nir_builder_instr_insert(&b->nb, &call->instr);
+
+ if (ret_type->base_type == vtn_base_type_void) {
+ vtn_push_value(b, w[2], vtn_value_type_undef);
+ } else {
+ vtn_push_ssa_value(b, w[2], vtn_local_load(b, ret_deref, 0));
+ }
+}
+