case GLSL_TYPE_FLOAT:
case GLSL_TYPE_INT:
case GLSL_TYPE_UINT:
- load->value.u[i] = constant->value.u[matrix_offset + i];
+ load->value.u32[i] = constant->value.u[matrix_offset + i];
break;
case GLSL_TYPE_BOOL:
- load->value.u[i] = constant->value.b[matrix_offset + i] ?
+ load->value.u32[i] = constant->value.b[matrix_offset + i] ?
NIR_TRUE : NIR_FALSE;
break;
default:
typedef struct {
union {
- float f[4];
- double d[4];
- int32_t i[4];
- uint32_t u[4];
- int64_t l[4];
- uint64_t ul[4];
+ float f32[4];
+ double f64[4];
+ int32_t i32[4];
+ uint32_t u32[4];
+ int64_t i64[4];
+ uint64_t u64[4];
};
} nir_const_value;
nir_const_value v;
memset(&v, 0, sizeof(v));
- v.f[0] = x;
+ v.f32[0] = x;
return nir_build_imm(build, 1, v);
}
nir_const_value v;
memset(&v, 0, sizeof(v));
- v.f[0] = x;
- v.f[1] = y;
- v.f[2] = z;
- v.f[3] = w;
+ v.f32[0] = x;
+ v.f32[1] = y;
+ v.f32[2] = z;
+ v.f32[3] = w;
return nir_build_imm(build, 4, v);
}
nir_const_value v;
memset(&v, 0, sizeof(v));
- v.i[0] = x;
+ v.i32[0] = x;
return nir_build_imm(build, 1, v);
}
nir_const_value v;
memset(&v, 0, sizeof(v));
- v.i[0] = x;
- v.i[1] = y;
- v.i[2] = z;
- v.i[3] = w;
+ v.i32[0] = x;
+ v.i32[1] = y;
+ v.i32[2] = z;
+ v.i32[3] = w;
return nir_build_imm(build, 4, v);
}
def get_const_field(type_):
if type_ == "int32":
- return "i"
+ return "i32"
if type_ == "uint32":
- return "u"
+ return "u32"
if type_ == "int64":
- return "l"
+ return "i64"
if type_ == "uint64":
- return "ul"
+ return "u64"
if type_ == "bool32":
- return "b"
+ return "u32"
if type_ == "float32":
- return "f"
+ return "f32"
if type_ == "float64":
- return "d"
+ return "f64"
raise Exception(str(type_))
assert(0)
struct ${input_types[j]}_vec src${j} = {
% for k in range(op.input_sizes[j]):
% if input_types[j] == "bool32":
- _src[${j}].u[${k}] != 0,
+ _src[${j}].u32[${k}] != 0,
% else:
_src[${j}].${get_const_field(input_types[j])}[${k}],
% endif
## Avoid unused variable warnings
<% continue %>
% elif input_types[j] == "bool32":
- bool src${j} = _src[${j}].u[_i] != 0;
+ bool src${j} = _src[${j}].u32[_i] != 0;
% else:
${input_types[j]}_t src${j} =
_src[${j}].${get_const_field(input_types[j])}[_i];
## value of dst.
% if output_type == "bool32":
## Sanitize the C value to a proper NIR bool
- _dst_val.u[_i] = dst ? NIR_TRUE : NIR_FALSE;
+ _dst_val.u32[_i] = dst ? NIR_TRUE : NIR_FALSE;
% else:
_dst_val.${get_const_field(output_type)}[_i] = dst;
% endif
% for k in range(op.output_size):
% if output_type == "bool32":
## Sanitize the C value to a proper NIR bool
- _dst_val.u[${k}] = dst.${"xyzw"[k]} ? NIR_TRUE : NIR_FALSE;
+ _dst_val.u32[${k}] = dst.${"xyzw"[k]} ? NIR_TRUE : NIR_FALSE;
% else:
_dst_val.${get_const_field(output_type)}[${k}] = dst.${"xyzw"[k]};
% endif
return -1;
if (count == -1)
- count = val->i[0];
+ count = val->i32[0];
/* We've found contradictory set_vertex_count intrinsics.
* This can happen if there are early-returns in main() and
* different paths emit different numbers of vertices.
*/
- if (count != val->i[0])
+ if (count != val->i32[0])
return -1;
}
}
{
hash = HASH(hash, instr->def.num_components);
- hash = _mesa_fnv32_1a_accumulate_block(hash, instr->value.f,
+ hash = _mesa_fnv32_1a_accumulate_block(hash, instr->value.f32,
instr->def.num_components
- * sizeof(instr->value.f[0]));
+ * sizeof(instr->value.f32[0]));
return hash;
}
if (load1->def.num_components != load2->def.num_components)
return false;
- return memcmp(load1->value.f, load2->value.f,
- load1->def.num_components * sizeof(*load2->value.f)) == 0;
+ return memcmp(load1->value.f32, load2->value.f32,
+ load1->def.num_components * sizeof(*load2->value.f32)) == 0;
}
case nir_instr_type_phi: {
nir_phi_instr *phi1 = nir_instr_as_phi(instr1);
state->shader_program->UniformStorage[uniform_loc].opaque[state->shader->stage].index);
nir_load_const_instr *offset_const = nir_load_const_instr_create(mem_ctx, 1);
- offset_const->value.u[0] = instr->variables[0]->var->data.offset;
+ offset_const->value.u32[0] = instr->variables[0]->var->data.offset;
nir_instr_insert_before(&instr->instr, &offset_const->instr);
unsigned child_array_elements = tail->child != NULL ?
glsl_get_aoa_size(tail->type) : 1;
- offset_const->value.u[0] += deref_array->base_offset *
+ offset_const->value.u32[0] += deref_array->base_offset *
child_array_elements * ATOMIC_COUNTER_SIZE;
if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
nir_load_const_instr *atomic_counter_size =
nir_load_const_instr_create(mem_ctx, 1);
- atomic_counter_size->value.u[0] = child_array_elements * ATOMIC_COUNTER_SIZE;
+ atomic_counter_size->value.u32[0] = child_array_elements * ATOMIC_COUNTER_SIZE;
nir_instr_insert_before(&instr->instr, &atomic_counter_size->instr);
nir_alu_instr *mul = nir_alu_instr_create(mem_ctx, nir_op_imul);
nir_ssa_def *loads[4];
for (unsigned i = 0; i < lower->def.num_components; i++) {
nir_load_const_instr *load_comp = nir_load_const_instr_create(b.shader, 1);
- load_comp->value.u[0] = lower->value.u[i];
+ load_comp->value.u32[0] = lower->value.u32[i];
nir_builder_instr_insert(&b, &load_comp->instr);
loads[i] = &load_comp->def;
}
if (src.reg.indirect) {
nir_load_const_instr *load_const =
nir_load_const_instr_create(state->shader, 1);
- load_const->value.u[0] = glsl_get_length(parent_type);
+ load_const->value.u32[0] = glsl_get_length(parent_type);
nir_instr_insert_before(instr, &load_const->instr);
nir_alu_instr *mul = nir_alu_instr_create(state->shader, nir_op_imul);
memset(&v, 0, sizeof(v));
if (swizzle_val == 4) {
- v.u[0] = v.u[1] = v.u[2] = v.u[3] = 0;
+ v.u32[0] = v.u32[1] = v.u32[2] = v.u32[3] = 0;
} else {
assert(swizzle_val == 5);
if (type == nir_type_float)
- v.f[0] = v.f[1] = v.f[2] = v.f[3] = 1.0;
+ v.f32[0] = v.f32[1] = v.f32[2] = v.f32[3] = 1.0;
else
- v.u[0] = v.u[1] = v.u[2] = v.u[3] = 1;
+ v.u32[0] = v.u32[1] = v.u32[2] = v.u32[3] = 1;
}
return nir_build_imm(b, 4, v);
for (unsigned j = 0; j < nir_ssa_alu_instr_src_components(instr, i);
j++) {
if (load_const->def.bit_size == 64)
- src[i].ul[j] = load_const->value.ul[instr->src[i].swizzle[j]];
+ src[i].u64[j] = load_const->value.u64[instr->src[i].swizzle[j]];
else
- src[i].u[j] = load_const->value.u[instr->src[i].swizzle[j]];
+ src[i].u32[j] = load_const->value.u32[instr->src[i].swizzle[j]];
}
/* We shouldn't have any source modifiers in the optimization loop. */
nir_load_const_instr *indirect =
nir_instr_as_load_const(arr->indirect.ssa->parent_instr);
- arr->base_offset += indirect->value.u[0];
+ arr->base_offset += indirect->value.u32[0];
/* Clear out the source */
nir_instr_rewrite_src(instr, &arr->indirect, nir_src_for_ssa(NULL));
if (!const_value)
return false;
- opt_constant_if(following_if, const_value->u[0] != 0);
+ opt_constant_if(following_if, const_value->u32[0] != 0);
return true;
}
* and then print the float in a comment for readability.
*/
- fprintf(fp, "0x%08x /* %f */", instr->value.u[i], instr->value.f[i]);
+ fprintf(fp, "0x%08x /* %f */", instr->value.u32[i], instr->value.f32[i]);
}
fprintf(fp, ")");
switch (nir_op_infos[instr->op].input_types[src]) {
case nir_type_float:
for (unsigned i = 0; i < num_components; ++i) {
- if (load->value.f[new_swizzle[i]] != const_val->data.f)
+ if (load->value.f32[new_swizzle[i]] != const_val->data.f)
return false;
}
return true;
case nir_type_uint:
case nir_type_bool:
for (unsigned i = 0; i < num_components; ++i) {
- if (load->value.i[new_swizzle[i]] != const_val->data.i)
+ if (load->value.i32[new_swizzle[i]] != const_val->data.i)
return false;
}
return true;
switch (type) {
case nir_type_float:
load->def.name = ralloc_asprintf(mem_ctx, "%f", c->data.f);
- load->value.f[0] = c->data.f;
+ load->value.f32[0] = c->data.f;
break;
case nir_type_int:
load->def.name = ralloc_asprintf(mem_ctx, "%d", c->data.i);
- load->value.i[0] = c->data.i;
+ load->value.i32[0] = c->data.i;
break;
case nir_type_uint:
case nir_type_bool:
- load->value.u[0] = c->data.u;
+ load->value.u32[0] = c->data.u;
break;
default:
unreachable("Invalid alu source type");
c->next_imm++;
for (i = 0; i < 4; i++)
- load_const->value.u[i] = tgsi_imm->u[i].Uint;
+ load_const->value.u32[i] = tgsi_imm->u[i].Uint;
nir_builder_instr_insert(b, &load_const->instr);
}
const_offset = nir_src_as_const_value(intr->src[1]);
if (const_offset) {
- off += const_offset->u[0];
+ off += const_offset->u32[0];
} else {
/* For load_ubo_indirect, second src is indirect offset: */
src1 = get_src(ctx, &intr->src[1])[0];
idx = nir_intrinsic_base(intr);
const_offset = nir_src_as_const_value(intr->src[0]);
if (const_offset) {
- idx += const_offset->u[0];
+ idx += const_offset->u32[0];
for (int i = 0; i < intr->num_components; i++) {
unsigned n = idx * 4 + i;
dst[i] = create_uniform(ctx, n);
idx = nir_intrinsic_base(intr);
const_offset = nir_src_as_const_value(intr->src[0]);
if (const_offset) {
- idx += const_offset->u[0];
+ idx += const_offset->u32[0];
for (int i = 0; i < intr->num_components; i++) {
unsigned n = idx * 4 + i;
dst[i] = ctx->ir->inputs[n];
idx = nir_intrinsic_base(intr);
const_offset = nir_src_as_const_value(intr->src[1]);
compile_assert(ctx, const_offset != NULL);
- idx += const_offset->u[0];
+ idx += const_offset->u32[0];
src = get_src(ctx, &intr->src[0]);
for (int i = 0; i < intr->num_components; i++) {
struct ir3_instruction **dst = get_dst_ssa(ctx, &instr->def,
instr->def.num_components);
for (int i = 0; i < instr->def.num_components; i++)
- dst[i] = create_immed(ctx->block, instr->value.u[i]);
+ dst[i] = create_immed(ctx->block, instr->value.u32[i]);
}
static void
* with an offset value of 0.
*/
assert(nir_src_as_const_value(intr->src[0]) &&
- nir_src_as_const_value(intr->src[0])->u[0] == 0);
+ nir_src_as_const_value(intr->src[0])->u32[0] == 0);
/* Generate dword loads for the VPM values (Since these intrinsics may
* be reordered, the actual reads will be generated at the top of the
* with an offset value of 0.
*/
assert(nir_src_as_const_value(intr->src[0]) &&
- nir_src_as_const_value(intr->src[0])->u[0] == 0);
+ nir_src_as_const_value(intr->src[0])->u32[0] == 0);
/* Generate scalar loads equivalent to the original VEC4. */
nir_ssa_def *dests[4];
* with an offset value of 0.
*/
assert(nir_src_as_const_value(intr->src[1]) &&
- nir_src_as_const_value(intr->src[1])->u[0] == 0);
+ nir_src_as_const_value(intr->src[1])->u32[0] == 0);
b->cursor = nir_before_instr(&intr->instr);
{
struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
for (int i = 0; i < instr->def.num_components; i++)
- qregs[i] = qir_uniform_ui(c, instr->value.u[i]);
+ qregs[i] = qir_uniform_ui(c, instr->value.u32[i]);
_mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
}
assert(instr->num_components == 1);
const_offset = nir_src_as_const_value(instr->src[0]);
if (const_offset) {
- offset = instr->const_index[0] + const_offset->u[0];
+ offset = instr->const_index[0] + const_offset->u32[0];
assert(offset % 4 == 0);
/* We need dwords */
offset = offset / 4;
const_offset = nir_src_as_const_value(instr->src[0]);
assert(const_offset && "vc4 doesn't support indirect inputs");
if (instr->const_index[0] >= VC4_NIR_TLB_COLOR_READ_INPUT) {
- assert(const_offset->u[0] == 0);
+ assert(const_offset->u32[0] == 0);
/* Reads of the per-sample color need to be done in
* order.
*/
}
*dest = c->color_reads[sample_index];
} else {
- offset = instr->const_index[0] + const_offset->u[0];
+ offset = instr->const_index[0] + const_offset->u32[0];
*dest = c->inputs[offset];
}
break;
case nir_intrinsic_store_output:
const_offset = nir_src_as_const_value(instr->src[1]);
assert(const_offset && "vc4 doesn't support indirect outputs");
- offset = instr->const_index[0] + const_offset->u[0];
+ offset = instr->const_index[0] + const_offset->u32[0];
/* MSAA color outputs are the only case where we have an
* output that's not lowered to being a store of a single 32
enum opcode extract_op;
if (src0->op == nir_op_extract_u16 || src0->op == nir_op_extract_i16) {
- assert(element->u[0] <= 1);
+ assert(element->u32[0] <= 1);
extract_op = SHADER_OPCODE_EXTRACT_WORD;
} else {
- assert(element->u[0] <= 3);
+ assert(element->u32[0] <= 3);
extract_op = SHADER_OPCODE_EXTRACT_BYTE;
}
op0 = offset(op0, bld, src0->src[0].swizzle[0]);
set_saturate(instr->dest.saturate,
- bld.emit(extract_op, result, op0, brw_imm_ud(element->u[0])));
+ bld.emit(extract_op, result, op0, brw_imm_ud(element->u32[0])));
return true;
}
return false;
nir_const_value *value1 = nir_src_as_const_value(instr->src[1].src);
- if (!value1 || fabsf(value1->f[0]) != 1.0f)
+ if (!value1 || fabsf(value1->f32[0]) != 1.0f)
return false;
nir_const_value *value2 = nir_src_as_const_value(instr->src[2].src);
- if (!value2 || fabsf(value2->f[0]) != 1.0f)
+ if (!value2 || fabsf(value2->f32[0]) != 1.0f)
return false;
fs_reg tmp = vgrf(glsl_type::int_type);
* surely be TRIANGLES
*/
- if (value1->f[0] == -1.0f) {
+ if (value1->f32[0] == -1.0f) {
g0.negate = true;
}
* surely be TRIANGLES
*/
- if (value1->f[0] == -1.0f) {
+ if (value1->f32[0] == -1.0f) {
g1_6.negate = true;
}
case nir_op_extract_i8: {
nir_const_value *byte = nir_src_as_const_value(instr->src[1].src);
bld.emit(SHADER_OPCODE_EXTRACT_BYTE,
- result, op[0], brw_imm_ud(byte->u[0]));
+ result, op[0], brw_imm_ud(byte->u32[0]));
break;
}
case nir_op_extract_i16: {
nir_const_value *word = nir_src_as_const_value(instr->src[1].src);
bld.emit(SHADER_OPCODE_EXTRACT_WORD,
- result, op[0], brw_imm_ud(word->u[0]));
+ result, op[0], brw_imm_ud(word->u32[0]));
break;
}
fs_reg reg = bld.vgrf(BRW_REGISTER_TYPE_D, instr->def.num_components);
for (unsigned i = 0; i < instr->def.num_components; i++)
- bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value.i[i]));
+ bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value.i32[i]));
nir_ssa_values[instr->def.index] = reg;
}
const bool is_point_size = (base_offset == 0);
if (offset_const != NULL && vertex_const != NULL &&
- 4 * (base_offset + offset_const->u[0]) < push_reg_count) {
- int imm_offset = (base_offset + offset_const->u[0]) * 4 +
- vertex_const->u[0] * push_reg_count;
+ 4 * (base_offset + offset_const->u32[0]) < push_reg_count) {
+ int imm_offset = (base_offset + offset_const->u32[0]) * 4 +
+ vertex_const->u32[0] * push_reg_count;
/* This input was pushed into registers. */
if (is_point_size) {
/* gl_PointSize comes in .w */
if (vertex_const) {
/* The vertex index is constant; just select the proper URB handle. */
icp_handle =
- retype(brw_vec8_grf(first_icp_handle + vertex_const->i[0], 0),
+ retype(brw_vec8_grf(first_icp_handle + vertex_const->i32[0], 0),
BRW_REGISTER_TYPE_UD);
} else {
/* The vertex index is non-constant. We need to use indirect
if (offset_const) {
/* Constant indexing - use global offset. */
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
- inst->offset = base_offset + offset_const->u[0];
+ inst->offset = base_offset + offset_const->u32[0];
inst->base_mrf = -1;
inst->mlen = 1;
inst->regs_written = num_components;
* add_const_offset_to_base() will fold other constant offsets
* into instr->const_index[0].
*/
- assert(const_value->u[0] == 0);
+ assert(const_value->u32[0] == 0);
return fs_reg();
}
nir_const_value *const_sample = nir_src_as_const_value(instr->src[0]);
if (const_sample) {
- unsigned msg_data = const_sample->i[0] << 4;
+ unsigned msg_data = const_sample->i32[0] << 4;
emit_pixel_interpolater_send(bld,
FS_OPCODE_INTERPOLATE_AT_SAMPLE,
nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
if (const_offset) {
- unsigned off_x = MIN2((int)(const_offset->f[0] * 16), 7) & 0xf;
- unsigned off_y = MIN2((int)(const_offset->f[1] * 16), 7) & 0xf;
+ unsigned off_x = MIN2((int)(const_offset->f32[0] * 16), 7) & 0xf;
+ unsigned off_y = MIN2((int)(const_offset->f32[1] * 16), 7) & 0xf;
emit_pixel_interpolater_send(bld,
FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
if (const_offset) {
/* Offsets are in bytes but they should always be multiples of 4 */
- assert(const_offset->u[0] % 4 == 0);
- src.reg_offset = const_offset->u[0] / 4;
+ assert(const_offset->u32[0] % 4 == 0);
+ src.reg_offset = const_offset->u32[0] / 4;
} else {
src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
}
if (const_index) {
const unsigned index = stage_prog_data->binding_table.ubo_start +
- const_index->u[0];
+ const_index->u32[0];
surf_index = brw_imm_ud(index);
brw_mark_surface_used(prog_data, index);
} else {
fs_reg packed_consts = vgrf(glsl_type::float_type);
packed_consts.type = dest.type;
- struct brw_reg const_offset_reg = brw_imm_ud(const_offset->u[0] & ~15);
+ struct brw_reg const_offset_reg = brw_imm_ud(const_offset->u32[0] & ~15);
bld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts,
surf_index, const_offset_reg);
for (unsigned i = 0; i < instr->num_components; i++) {
- packed_consts.set_smear(const_offset->u[0] % 16 / 4 + i);
+ packed_consts.set_smear(const_offset->u32[0] % 16 / 4 + i);
/* The std140 packing rules don't allow vectors to cross 16-byte
* boundaries, and a reg is 32 bytes.
fs_reg surf_index;
if (const_uniform_block) {
unsigned index = stage_prog_data->binding_table.ssbo_start +
- const_uniform_block->u[0];
+ const_uniform_block->u32[0];
surf_index = brw_imm_ud(index);
brw_mark_surface_used(prog_data, index);
} else {
fs_reg offset_reg;
nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
if (const_offset) {
- offset_reg = brw_imm_ud(const_offset->u[0]);
+ offset_reg = brw_imm_ud(const_offset->u32[0]);
} else {
offset_reg = get_nir_src(instr->src[1]);
}
fs_reg offset_reg;
nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
if (const_offset) {
- offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u[0]);
+ offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u32[0]);
} else {
offset_reg = vgrf(glsl_type::uint_type);
bld.ADD(offset_reg,
nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
if (const_offset) {
- offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u[0] +
+ offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u32[0] +
4 * first_component);
} else {
offset_reg = vgrf(glsl_type::uint_type);
nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
assert(const_offset && "Indirect input loads not allowed");
- src = offset(src, bld, const_offset->u[0]);
+ src = offset(src, bld, const_offset->u32[0]);
for (unsigned j = 0; j < instr->num_components; j++) {
bld.MOV(offset(dest, bld, j), offset(src, bld, j));
nir_src_as_const_value(instr->src[1]);
if (const_uniform_block) {
unsigned index = stage_prog_data->binding_table.ssbo_start +
- const_uniform_block->u[0];
+ const_uniform_block->u32[0];
surf_index = brw_imm_ud(index);
brw_mark_surface_used(prog_data, index);
} else {
fs_reg offset_reg;
nir_const_value *const_offset = nir_src_as_const_value(instr->src[2]);
if (const_offset) {
- offset_reg = brw_imm_ud(const_offset->u[0] + 4 * first_component);
+ offset_reg = brw_imm_ud(const_offset->u32[0] + 4 * first_component);
} else {
offset_reg = vgrf(glsl_type::uint_type);
bld.ADD(offset_reg,
nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
assert(const_offset && "Indirect output stores not allowed");
- new_dest = offset(new_dest, bld, const_offset->u[0]);
+ new_dest = offset(new_dest, bld, const_offset->u32[0]);
for (unsigned j = 0; j < instr->num_components; j++) {
bld.MOV(offset(new_dest, bld, j), offset(src, bld, j));
case nir_intrinsic_get_buffer_size: {
nir_const_value *const_uniform_block = nir_src_as_const_value(instr->src[0]);
- unsigned ssbo_index = const_uniform_block ? const_uniform_block->u[0] : 0;
+ unsigned ssbo_index = const_uniform_block ? const_uniform_block->u32[0] : 0;
int reg_width = dispatch_width / 8;
/* Set LOD = 0 */
nir_const_value *const_surface = nir_src_as_const_value(instr->src[0]);
if (const_surface) {
unsigned surf_index = stage_prog_data->binding_table.ssbo_start +
- const_surface->u[0];
+ const_surface->u32[0];
surface = brw_imm_ud(surf_index);
brw_mark_surface_used(prog_data, surf_index);
} else {
nir_const_value *const_offset =
nir_src_as_const_value(instr->src[i].src);
if (const_offset) {
- tex_offset = brw_imm_ud(brw_texture_offset(const_offset->i, 3));
+ tex_offset = brw_imm_ud(brw_texture_offset(const_offset->i32, 3));
} else {
tex_offset = retype(src, BRW_REGISTER_TYPE_D);
}
nir_const_value *const_offset = nir_src_as_const_value(*offset);
if (const_offset) {
- intrin->const_index[0] += const_offset->u[0];
+ intrin->const_index[0] += const_offset->u32[0];
b->cursor = nir_before_instr(&intrin->instr);
nir_instr_rewrite_src(&intrin->instr, offset,
nir_src_for_ssa(nir_imm_int(b, 0)));
if (vertex) {
nir_const_value *const_vertex = nir_src_as_const_value(*vertex);
if (const_vertex) {
- intrin->const_index[0] += const_vertex->u[0] *
+ intrin->const_index[0] += const_vertex->u32[0] *
state->vue_map->num_per_vertex_slots;
} else {
state->b.cursor = nir_before_instr(&intrin->instr);
* have to worry about resolving them.
*/
instr->pass_flags &= ~BRW_NIR_BOOLEAN_MASK;
- if (load->value.u[0] == NIR_TRUE || load->value.u[0] == NIR_FALSE) {
+ if (load->value.u32[0] == NIR_TRUE || load->value.u32[0] == NIR_FALSE) {
instr->pass_flags |= BRW_NIR_BOOLEAN_NO_RESOLVE;
} else {
instr->pass_flags |= BRW_NIR_NON_BOOLEAN;
/* Make up a type...we have no way of knowing... */
const glsl_type *const type = glsl_type::ivec(instr->num_components);
- src = src_reg(ATTR, BRW_VARYING_SLOT_COUNT * vertex->u[0] +
- instr->const_index[0] + offset->u[0],
+ src = src_reg(ATTR, BRW_VARYING_SLOT_COUNT * vertex->u32[0] +
+ instr->const_index[0] + offset->u32[0],
type);
/* gl_PointSize is passed in the .w component of the VUE header */
if (instr->const_index[0] == VARYING_SLOT_PSIZ)
* add_const_offset_to_base() will fold other constant offsets
* into instr->const_index[0].
*/
- assert(const_value->u[0] == 0);
+ assert(const_value->u32[0] == 0);
return src_reg();
}
continue;
for (unsigned j = i; j < instr->def.num_components; j++) {
- if (instr->value.u[i] == instr->value.u[j]) {
+ if (instr->value.u32[i] == instr->value.u32[j]) {
writemask |= 1 << j;
}
}
reg.writemask = writemask;
- emit(MOV(reg, brw_imm_d(instr->value.i[i])));
+ emit(MOV(reg, brw_imm_d(instr->value.i32[i])));
remaining &= ~writemask;
}
/* We set EmitNoIndirectInput for VS */
assert(const_offset);
- src = src_reg(ATTR, instr->const_index[0] + const_offset->u[0],
+ src = src_reg(ATTR, instr->const_index[0] + const_offset->u32[0],
glsl_type::uvec4_type);
dest = get_nir_dest(instr->dest, src.type);
nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
assert(const_offset);
- int varying = instr->const_index[0] + const_offset->u[0];
+ int varying = instr->const_index[0] + const_offset->u32[0];
src = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_F,
instr->num_components);
case nir_intrinsic_get_buffer_size: {
nir_const_value *const_uniform_block = nir_src_as_const_value(instr->src[0]);
- unsigned ssbo_index = const_uniform_block ? const_uniform_block->u[0] : 0;
+ unsigned ssbo_index = const_uniform_block ? const_uniform_block->u32[0] : 0;
const unsigned index =
prog_data->base.binding_table.ssbo_start + ssbo_index;
nir_src_as_const_value(instr->src[1]);
if (const_uniform_block) {
unsigned index = prog_data->base.binding_table.ssbo_start +
- const_uniform_block->u[0];
+ const_uniform_block->u32[0];
surf_index = brw_imm_ud(index);
brw_mark_surface_used(&prog_data->base, index);
} else {
src_reg offset_reg;
nir_const_value *const_offset = nir_src_as_const_value(instr->src[2]);
if (const_offset) {
- offset_reg = brw_imm_ud(const_offset->u[0]);
+ offset_reg = brw_imm_ud(const_offset->u32[0]);
} else {
offset_reg = get_nir_src(instr->src[2], 1);
}
src_reg surf_index;
if (const_uniform_block) {
unsigned index = prog_data->base.binding_table.ssbo_start +
- const_uniform_block->u[0];
+ const_uniform_block->u32[0];
surf_index = brw_imm_ud(index);
brw_mark_surface_used(&prog_data->base, index);
src_reg offset_reg;
nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
if (const_offset) {
- offset_reg = brw_imm_ud(const_offset->u[0]);
+ offset_reg = brw_imm_ud(const_offset->u32[0]);
} else {
offset_reg = get_nir_src(instr->src[1], 1);
}
nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
if (const_offset) {
/* Offsets are in bytes but they should always be multiples of 16 */
- assert(const_offset->u[0] % 16 == 0);
- src.reg_offset = const_offset->u[0] / 16;
+ assert(const_offset->u32[0] % 16 == 0);
+ src.reg_offset = const_offset->u32[0] / 16;
} else {
src_reg tmp = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_D, 1);
src.reladdr = new(mem_ctx) src_reg(tmp);
* as an immediate.
*/
const unsigned index = prog_data->base.binding_table.ubo_start +
- const_block_index->u[0];
+ const_block_index->u32[0];
surf_index = brw_imm_ud(index);
brw_mark_surface_used(&prog_data->base, index);
} else {
src_reg offset;
nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
if (const_offset) {
- offset = brw_imm_ud(const_offset->u[0] & ~15);
+ offset = brw_imm_ud(const_offset->u32[0] & ~15);
} else {
offset = get_nir_src(instr->src[1], nir_type_int, 1);
}
packed_consts.swizzle = brw_swizzle_for_size(instr->num_components);
if (const_offset) {
- packed_consts.swizzle += BRW_SWIZZLE4(const_offset->u[0] % 16 / 4,
- const_offset->u[0] % 16 / 4,
- const_offset->u[0] % 16 / 4,
- const_offset->u[0] % 16 / 4);
+ packed_consts.swizzle += BRW_SWIZZLE4(const_offset->u32[0] % 16 / 4,
+ const_offset->u32[0] % 16 / 4,
+ const_offset->u32[0] % 16 / 4,
+ const_offset->u32[0] % 16 / 4);
}
emit(MOV(dest, packed_consts));
nir_const_value *const_surface = nir_src_as_const_value(instr->src[0]);
if (const_surface) {
unsigned surf_index = prog_data->base.binding_table.ssbo_start +
- const_surface->u[0];
+ const_surface->u32[0];
surface = brw_imm_ud(surf_index);
brw_mark_surface_used(&prog_data->base, surf_index);
} else {
* operand. If we can determine that one of the args is in the low
* 16 bits, though, we can just emit a single MUL.
*/
- if (value0 && value0->u[0] < (1 << 16)) {
+ if (value0 && value0->u32[0] < (1 << 16)) {
if (devinfo->gen < 7)
emit(MUL(dst, op[0], op[1]));
else
emit(MUL(dst, op[1], op[0]));
- } else if (value1 && value1->u[0] < (1 << 16)) {
+ } else if (value1 && value1->u32[0] < (1 << 16)) {
if (devinfo->gen < 7)
emit(MUL(dst, op[1], op[0]));
else
nir_const_value *const_offset =
nir_src_as_const_value(instr->src[i].src);
if (const_offset) {
- constant_offset = brw_texture_offset(const_offset->i, 3);
+ constant_offset = brw_texture_offset(const_offset->i32, 3);
} else {
offset_value =
get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D, 2);
nir_const_value *vertex_const = nir_src_as_const_value(instr->src[0]);
src_reg vertex_index =
- vertex_const ? src_reg(brw_imm_ud(vertex_const->u[0]))
+ vertex_const ? src_reg(brw_imm_ud(vertex_const->u32[0]))
: get_nir_src(instr->src[0], BRW_REGISTER_TYPE_UD, 1);
dst_reg dst = get_nir_dest(instr->dest, BRW_REGISTER_TYPE_D);