#include "compiler/nir/nir_control_flow.h"
#include "compiler/nir/nir_builder.h"
#include "main/imports.h"
+#include "main/mtypes.h"
/*
* pass to lower GLSL IR to NIR
} /* end of anonymous namespace */
static void
-nir_remap_attributes(nir_shader *shader)
+nir_remap_attributes(nir_shader *shader,
+ const nir_shader_compiler_options *options)
{
- nir_foreach_variable(var, &shader->inputs) {
- var->data.location += _mesa_bitcount_64(shader->info.double_inputs_read &
- BITFIELD64_MASK(var->data.location));
+ if (options->vs_inputs_dual_locations) {
+ nir_foreach_variable(var, &shader->inputs) {
+ var->data.location +=
+ _mesa_bitcount_64(shader->info.vs.double_inputs &
+ BITFIELD64_MASK(var->data.location));
+ }
}
/* Once the remap is done, reset double_inputs_read, so later it will have
* which location/slots are doubles */
- shader->info.double_inputs_read = 0;
+ shader->info.vs.double_inputs = 0;
}
nir_shader *
* two locations. For instance, if we have in the IR code a dvec3 attr0 in
* location 0 and vec4 attr1 in location 1, in NIR attr0 will use
* locations/slots 0 and 1, and attr1 will use location/slot 2 */
- if (shader->stage == MESA_SHADER_VERTEX)
- nir_remap_attributes(shader);
+ if (shader->info.stage == MESA_SHADER_VERTEX)
+ nir_remap_attributes(shader, options);
shader->info.name = ralloc_asprintf(shader, "GLSL%d", shader_prog->Name);
if (shader_prog->Label)
if (ir == NULL)
return NULL;
- nir_constant *ret = ralloc(mem_ctx, nir_constant);
+ nir_constant *ret = rzalloc(mem_ctx, nir_constant);
const unsigned rows = ir->type->vector_elements;
const unsigned cols = ir->type->matrix_columns;
if (ir->data.mode == ir_var_shader_shared)
return;
- nir_variable *var = ralloc(shader, nir_variable);
+ nir_variable *var = rzalloc(shader, nir_variable);
var->type = ir->type;
var->name = ralloc_strdup(var, ir->name);
var->data.patch = ir->data.patch;
var->data.invariant = ir->data.invariant;
var->data.location = ir->data.location;
+ var->data.stream = ir->data.stream;
var->data.compact = false;
switch(ir->data.mode) {
break;
case ir_var_shader_in:
- if (shader->stage == MESA_SHADER_FRAGMENT &&
+ if (shader->info.stage == MESA_SHADER_FRAGMENT &&
ir->data.location == VARYING_SLOT_FACE) {
/* For whatever reason, GLSL IR makes gl_FrontFacing an input */
var->data.location = SYSTEM_VALUE_FRONT_FACE;
var->data.mode = nir_var_system_value;
- } else if (shader->stage == MESA_SHADER_GEOMETRY &&
+ } else if (shader->info.stage == MESA_SHADER_GEOMETRY &&
ir->data.location == VARYING_SLOT_PRIMITIVE_ID) {
/* For whatever reason, GLSL IR makes gl_PrimitiveIDIn an input */
var->data.location = SYSTEM_VALUE_PRIMITIVE_ID;
} else {
var->data.mode = nir_var_shader_in;
- if (shader->stage == MESA_SHADER_TESS_EVAL &&
+ if (shader->info.stage == MESA_SHADER_TESS_EVAL &&
(ir->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
ir->data.location == VARYING_SLOT_TESS_LEVEL_OUTER)) {
var->data.compact = ir->type->without_array()->is_scalar();
}
/* Mark all the locations that require two slots */
- if (glsl_type_is_dual_slot(glsl_without_array(var->type))) {
- for (uint i = 0; i < glsl_count_attribute_slots(var->type, true); i++) {
+ if (shader->info.stage == MESA_SHADER_VERTEX &&
+ glsl_type_is_dual_slot(glsl_without_array(var->type))) {
+ for (unsigned i = 0; i < glsl_count_attribute_slots(var->type, true); i++) {
uint64_t bitfield = BITFIELD64_BIT(var->data.location + i);
- shader->info.double_inputs_read |= bitfield;
+ shader->info.vs.double_inputs |= bitfield;
}
}
break;
case ir_var_shader_out:
var->data.mode = nir_var_shader_out;
- if (shader->stage == MESA_SHADER_TESS_CTRL &&
+ if (shader->info.stage == MESA_SHADER_TESS_CTRL &&
(ir->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
ir->data.location == VARYING_SLOT_TESS_LEVEL_OUTER)) {
var->data.compact = ir->type->without_array()->is_scalar();
var->data.pixel_center_integer = ir->data.pixel_center_integer;
var->data.location_frac = ir->data.location_frac;
+ if (var->data.pixel_center_integer) {
+ assert(shader->info.stage == MESA_SHADER_FRAGMENT);
+ shader->info.fs.pixel_center_integer = true;
+ }
+
switch (ir->data.depth_layout) {
case ir_depth_layout_none:
var->data.depth_layout = nir_depth_layout_none;
var->data.index = ir->data.index;
var->data.descriptor_set = 0;
var->data.binding = ir->data.binding;
+ var->data.bindless = ir->data.bindless;
var->data.offset = ir->data.offset;
var->data.image.read_only = ir->data.memory_read_only;
var->data.image.write_only = ir->data.memory_write_only;
var->num_state_slots = ir->get_num_state_slots();
if (var->num_state_slots > 0) {
- var->state_slots = ralloc_array(var, nir_state_slot,
- var->num_state_slots);
+ var->state_slots = rzalloc_array(var, nir_state_slot,
+ var->num_state_slots);
ir_state_slot *state_slots = ir->get_state_slots();
for (unsigned i = 0; i < var->num_state_slots; i++) {
op = nir_intrinsic_atomic_counter_comp_swap_var;
break;
case ir_intrinsic_image_load:
- op = nir_intrinsic_image_load;
+ op = nir_intrinsic_image_var_load;
break;
case ir_intrinsic_image_store:
- op = nir_intrinsic_image_store;
+ op = nir_intrinsic_image_var_store;
break;
case ir_intrinsic_image_atomic_add:
- op = nir_intrinsic_image_atomic_add;
+ op = nir_intrinsic_image_var_atomic_add;
break;
case ir_intrinsic_image_atomic_min:
- op = nir_intrinsic_image_atomic_min;
+ op = nir_intrinsic_image_var_atomic_min;
break;
case ir_intrinsic_image_atomic_max:
- op = nir_intrinsic_image_atomic_max;
+ op = nir_intrinsic_image_var_atomic_max;
break;
case ir_intrinsic_image_atomic_and:
- op = nir_intrinsic_image_atomic_and;
+ op = nir_intrinsic_image_var_atomic_and;
break;
case ir_intrinsic_image_atomic_or:
- op = nir_intrinsic_image_atomic_or;
+ op = nir_intrinsic_image_var_atomic_or;
break;
case ir_intrinsic_image_atomic_xor:
- op = nir_intrinsic_image_atomic_xor;
+ op = nir_intrinsic_image_var_atomic_xor;
break;
case ir_intrinsic_image_atomic_exchange:
- op = nir_intrinsic_image_atomic_exchange;
+ op = nir_intrinsic_image_var_atomic_exchange;
break;
case ir_intrinsic_image_atomic_comp_swap:
- op = nir_intrinsic_image_atomic_comp_swap;
+ op = nir_intrinsic_image_var_atomic_comp_swap;
break;
case ir_intrinsic_memory_barrier:
op = nir_intrinsic_memory_barrier;
break;
case ir_intrinsic_image_size:
- op = nir_intrinsic_image_size;
+ op = nir_intrinsic_image_var_size;
break;
case ir_intrinsic_image_samples:
- op = nir_intrinsic_image_samples;
+ op = nir_intrinsic_image_var_samples;
break;
case ir_intrinsic_ssbo_store:
op = nir_intrinsic_store_ssbo;
op = nir_intrinsic_vote_all;
break;
case ir_intrinsic_vote_eq:
- op = nir_intrinsic_vote_eq;
+ op = nir_intrinsic_vote_ieq;
break;
case ir_intrinsic_ballot:
op = nir_intrinsic_ballot;
nir_builder_instr_insert(&b, &instr->instr);
break;
}
- case nir_intrinsic_image_load:
- case nir_intrinsic_image_store:
- case nir_intrinsic_image_atomic_add:
- case nir_intrinsic_image_atomic_min:
- case nir_intrinsic_image_atomic_max:
- case nir_intrinsic_image_atomic_and:
- case nir_intrinsic_image_atomic_or:
- case nir_intrinsic_image_atomic_xor:
- case nir_intrinsic_image_atomic_exchange:
- case nir_intrinsic_image_atomic_comp_swap:
- case nir_intrinsic_image_samples:
- case nir_intrinsic_image_size: {
+ case nir_intrinsic_image_var_load:
+ case nir_intrinsic_image_var_store:
+ case nir_intrinsic_image_var_atomic_add:
+ case nir_intrinsic_image_var_atomic_min:
+ case nir_intrinsic_image_var_atomic_max:
+ case nir_intrinsic_image_var_atomic_and:
+ case nir_intrinsic_image_var_atomic_or:
+ case nir_intrinsic_image_var_atomic_xor:
+ case nir_intrinsic_image_var_atomic_exchange:
+ case nir_intrinsic_image_var_atomic_comp_swap:
+ case nir_intrinsic_image_var_samples:
+ case nir_intrinsic_image_var_size: {
nir_ssa_undef_instr *instr_undef =
nir_ssa_undef_instr_create(shader, 1, 32);
nir_builder_instr_insert(&b, &instr_undef->instr);
/* Set the intrinsic destination. */
if (ir->return_deref) {
unsigned num_components = ir->return_deref->type->vector_elements;
- if (instr->intrinsic == nir_intrinsic_image_size)
+ if (instr->intrinsic == nir_intrinsic_image_var_size)
instr->num_components = num_components;
nir_ssa_dest_init(&instr->instr, &instr->dest,
num_components, 32, NULL);
}
- if (op == nir_intrinsic_image_size ||
- op == nir_intrinsic_image_samples) {
+ if (op == nir_intrinsic_image_var_size ||
+ op == nir_intrinsic_image_var_samples) {
nir_builder_instr_insert(&b, &instr->instr);
break;
}
}
case nir_intrinsic_vote_any:
case nir_intrinsic_vote_all:
- case nir_intrinsic_vote_eq: {
+ case nir_intrinsic_vote_ieq: {
nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
+ instr->num_components = 1;
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(value));
case nir_intrinsic_ballot: {
nir_ssa_dest_init(&instr->instr, &instr->dest,
ir->return_deref->type->vector_elements, 64, NULL);
+ instr->num_components = ir->return_deref->type->vector_elements;
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(value));
static bool
type_is_float(glsl_base_type type)
{
- return type == GLSL_TYPE_FLOAT || type == GLSL_TYPE_DOUBLE;
+ return type == GLSL_TYPE_FLOAT || type == GLSL_TYPE_DOUBLE ||
+ type == GLSL_TYPE_FLOAT16;
}
static bool
type_is_signed(glsl_base_type type)
{
- return type == GLSL_TYPE_INT || type == GLSL_TYPE_INT64;
+ return type == GLSL_TYPE_INT || type == GLSL_TYPE_INT64 ||
+ type == GLSL_TYPE_INT16;
}
void
case ir_unop_u642i64: {
nir_alu_type src_type = nir_get_nir_type_for_glsl_base_type(types[0]);
nir_alu_type dst_type = nir_get_nir_type_for_glsl_base_type(out_type);
- result = nir_build_alu(&b, nir_type_conversion_op(src_type, dst_type),
+ result = nir_build_alu(&b, nir_type_conversion_op(src_type, dst_type,
+ nir_rounding_mode_undef),
srcs[0], NULL, NULL, NULL);
/* b2i and b2f don't have fixed bit-size versions so the builder will
* just assume 32 and we have to fix it up here.
case ir_unop_ceil: result = nir_fceil(&b, srcs[0]); break;
case ir_unop_floor: result = nir_ffloor(&b, srcs[0]); break;
case ir_unop_fract: result = nir_ffract(&b, srcs[0]); break;
+ case ir_unop_frexp_exp: result = nir_frexp_exp(&b, srcs[0]); break;
+ case ir_unop_frexp_sig: result = nir_frexp_sig(&b, srcs[0]); break;
case ir_unop_round_even: result = nir_fround_even(&b, srcs[0]); break;
case ir_unop_sin: result = nir_fsin(&b, srcs[0]); break;
case ir_unop_cos: result = nir_fcos(&b, srcs[0]); break;
case ir_unop_unpack_half_2x16:
result = nir_unpack_half_2x16(&b, srcs[0]);
break;
+ case ir_unop_pack_sampler_2x32:
+ case ir_unop_pack_image_2x32:
case ir_unop_pack_double_2x32:
case ir_unop_pack_int_2x32:
case ir_unop_pack_uint_2x32:
result = nir_pack_64_2x32(&b, srcs[0]);
break;
+ case ir_unop_unpack_sampler_2x32:
+ case ir_unop_unpack_image_2x32:
case ir_unop_unpack_double_2x32:
case ir_unop_unpack_int_2x32:
case ir_unop_unpack_uint_2x32:
result = nir_slt(&b, srcs[0], srcs[1]);
}
break;
- case ir_binop_greater:
- if (supports_ints) {
- if (type_is_float(types[0]))
- result = nir_flt(&b, srcs[1], srcs[0]);
- else if (type_is_signed(types[0]))
- result = nir_ilt(&b, srcs[1], srcs[0]);
- else
- result = nir_ult(&b, srcs[1], srcs[0]);
- } else {
- result = nir_slt(&b, srcs[1], srcs[0]);
- }
- break;
- case ir_binop_lequal:
- if (supports_ints) {
- if (type_is_float(types[0]))
- result = nir_fge(&b, srcs[1], srcs[0]);
- else if (type_is_signed(types[0]))
- result = nir_ige(&b, srcs[1], srcs[0]);
- else
- result = nir_uge(&b, srcs[1], srcs[0]);
- } else {
- result = nir_slt(&b, srcs[1], srcs[0]);
- }
- break;
case ir_binop_gequal:
if (supports_ints) {
if (type_is_float(types[0]))