return nir_imov_alu(build, *src, num_components);
}
+static inline unsigned
+nir_get_ptr_bitsize(nir_builder *build)
+{
+ if (build->shader->info.stage == MESA_SHADER_KERNEL)
+ return build->shader->info.cs.ptr_size;
+ return 32;
+}
+
static inline nir_deref_instr *
nir_build_deref_var(nir_builder *build, nir_variable *var)
{
deref->type = var->type;
deref->var = var;
- nir_ssa_dest_init(&deref->instr, &deref->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&deref->instr, &deref->dest, 1,
+ nir_get_ptr_bitsize(build), NULL);
nir_builder_instr_insert(build, &deref->instr);
# src[] = { address }.
# const_index[] = { access, align_mul, align_offset }
load("global", 1, [ACCESS, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE])
+# src[] = { address }. const_index[] = { base, range, align_mul, align_offset }
+load("kernel_input", 1, [BASE, RANGE, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE, CAN_REORDER])
# Stores work the same way as loads, except now the first source is the value
# to store and the second (and possibly third) source specify where to store
assert(addr_format_is_global(addr_format));
op = nir_intrinsic_load_global;
break;
+ case nir_var_shader_in:
+ assert(addr_format_is_global(addr_format));
+ op = nir_intrinsic_load_kernel_input;
+ break;
default:
unreachable("Unsupported explicit IO variable mode");
}
nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
if (addr_format_is_global(addr_format)) {
- assert(op == nir_intrinsic_load_global);
load->src[0] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
} else {
load->src[0] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
load->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
}
- if (mode != nir_var_mem_ubo)
+ if (mode != nir_var_mem_ubo && mode != nir_var_shader_in)
nir_intrinsic_set_access(load, nir_intrinsic_access(intrin));
/* TODO: We should try and provide a better alignment. For OpenCL, we need
b->cursor = nir_after_instr(&deref->instr);
- /* Var derefs must be lowered away by the driver */
- assert(deref->deref_type != nir_deref_type_var);
+ nir_ssa_def *parent_addr = NULL;
+ if (deref->deref_type != nir_deref_type_var) {
+ assert(deref->parent.is_ssa);
+ parent_addr = deref->parent.ssa;
+ }
- assert(deref->parent.is_ssa);
- nir_ssa_def *parent_addr = deref->parent.ssa;
nir_ssa_def *addr = NULL;
assert(deref->dest.is_ssa);
switch (deref->deref_type) {
case nir_deref_type_var:
- unreachable("Must be lowered by the driver");
+ assert(deref->mode == nir_var_shader_in);
+ addr = nir_imm_intN_t(b, deref->var->data.driver_location,
+ deref->dest.ssa.bit_size);
break;
case nir_deref_type_array: {
const struct glsl_type *phys_ssbo_ptr_type;
const struct glsl_type *push_const_ptr_type;
const struct glsl_type *shared_ptr_type;
+ const struct glsl_type *global_ptr_type;
+ const struct glsl_type *temp_ptr_type;
struct {
void (*func)(void *private_data,
#include "nir/nir_deref.h"
#include "spirv_info.h"
+#include "util/u_math.h"
+
#include <stdio.h>
void
val->type->base_type = vtn_base_type_array;
val->type->array_element = array_element;
- val->type->stride = 0;
+ if (b->shader->info.stage == MESA_SHADER_KERNEL)
+ val->type->stride = glsl_get_cl_size(array_element->type);
vtn_foreach_decoration(b, val, array_stride_decoration_cb, NULL);
val->type->type = glsl_array_type(array_element->type, val->type->length,
};
}
+ if (b->shader->info.stage == MESA_SHADER_KERNEL) {
+ unsigned offset = 0;
+ for (unsigned i = 0; i < num_fields; i++) {
+ offset = align(offset, glsl_get_cl_alignment(fields[i].type));
+ fields[i].offset = offset;
+ offset += glsl_get_cl_size(fields[i].type);
+ }
+ }
+
struct member_decoration_ctx ctx = {
.num_fields = num_fields,
.fields = fields,
* declaration.
*/
val = vtn_untyped_value(b, w[1]);
+ struct vtn_type *deref_type = vtn_untyped_value(b, w[3])->type;
SpvStorageClass storage_class = w[2];
break;
case SpvStorageClassWorkgroup:
val->type->type = b->options->shared_ptr_type;
+ if (b->physical_ptrs)
+ val->type->stride = align(glsl_get_cl_size(deref_type->type), glsl_get_cl_alignment(deref_type->type));
+ break;
+ case SpvStorageClassCrossWorkgroup:
+ val->type->type = b->options->global_ptr_type;
+ if (b->physical_ptrs)
+ val->type->stride = align(glsl_get_cl_size(deref_type->type), glsl_get_cl_alignment(deref_type->type));
+ break;
+ case SpvStorageClassFunction:
+ if (b->physical_ptrs) {
+ val->type->type = b->options->temp_ptr_type;
+ val->type->stride = align(glsl_get_cl_size(deref_type->type), glsl_get_cl_alignment(deref_type->type));
+ }
break;
default:
/* In this case, no variable pointers are allowed so all deref
"AddressingModelPhysical32 only supported for kernels");
b->shader->info.cs.ptr_size = 32;
b->physical_ptrs = true;
+ b->options->shared_ptr_type = glsl_uint_type();
+ b->options->global_ptr_type = glsl_uint_type();
+ b->options->temp_ptr_type = glsl_uint_type();
break;
case SpvAddressingModelPhysical64:
vtn_fail_if(b->shader->info.stage != MESA_SHADER_KERNEL,
"AddressingModelPhysical64 only supported for kernels");
b->shader->info.cs.ptr_size = 64;
b->physical_ptrs = true;
+ b->options->shared_ptr_type = glsl_uint64_t_type();
+ b->options->global_ptr_type = glsl_uint64_t_type();
+ b->options->temp_ptr_type = glsl_uint64_t_type();
break;
case SpvAddressingModelLogical:
vtn_fail_if(b->shader->info.stage >= MESA_SHADER_STAGES,
case SpvOpAccessChain:
case SpvOpPtrAccessChain:
case SpvOpInBoundsAccessChain:
+ case SpvOpInBoundsPtrAccessChain:
case SpvOpArrayLength:
case SpvOpConvertPtrToU:
case SpvOpConvertUToPtr:
{
/* Initialize the vtn_builder object */
struct vtn_builder *b = rzalloc(NULL, struct vtn_builder);
+ struct spirv_to_nir_options *dup_options =
+ ralloc(b, struct spirv_to_nir_options);
+ *dup_options = *options;
+
b->spirv = words;
b->spirv_word_count = word_count;
b->file = NULL;
exec_list_make_empty(&b->functions);
b->entry_point_stage = stage;
b->entry_point_name = entry_point_name;
- b->options = options;
+ b->options = dup_options;
/*
* Handle the SPIR-V header (first 5 dwords).
size_t spirv_word_count;
nir_shader *shader;
- const struct spirv_to_nir_options *options;
+ struct spirv_to_nir_options *options;
struct vtn_block *block;
/* Current offset, file, line, and column. Useful for debugging. Set
} else {
const struct glsl_type *deref_type = ptr_type->deref->type;
if (!vtn_pointer_is_external_block(b, ptr)) {
- assert(ssa->bit_size == 32 && ssa->num_components == 1);
ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode,
- glsl_get_bare_type(deref_type), 0);
+ deref_type, 0);
} else if (vtn_type_contains_block(b, ptr->type) &&
ptr->mode != vtn_variable_mode_phys_ssbo) {
/* This is a pointer to somewhere in an array of blocks, not a
case SpvOpAccessChain:
case SpvOpPtrAccessChain:
- case SpvOpInBoundsAccessChain: {
+ case SpvOpInBoundsAccessChain:
+ case SpvOpInBoundsPtrAccessChain: {
struct vtn_access_chain *chain = vtn_access_chain_create(b, count - 4);
- chain->ptr_as_array = (opcode == SpvOpPtrAccessChain);
+ chain->ptr_as_array = (opcode == SpvOpPtrAccessChain || opcode == SpvOpInBoundsPtrAccessChain);
unsigned idx = 0;
for (int i = 4; i < count; i++) {