src0->op != nir_op_extract_i8 && src0->op != nir_op_extract_i16)
return false;
- nir_const_value *element = nir_src_as_const_value(src0->src[1].src);
- assert(element != NULL);
+ unsigned element = nir_src_as_uint(src0->src[1].src);
/* Element type to extract.*/
const brw_reg_type type = brw_int_type(
op0 = offset(op0, bld, src0->src[0].swizzle[0]);
set_saturate(instr->dest.saturate,
- bld.MOV(result, subscript(op0, type, element->u32[0])));
+ bld.MOV(result, subscript(op0, type, element)));
return true;
}
if (src0->intrinsic != nir_intrinsic_load_front_face)
return false;
- nir_const_value *value1 = nir_src_as_const_value(instr->src[1].src);
- if (!value1 || fabsf(value1->f32[0]) != 1.0f)
+ if (!nir_src_is_const(instr->src[1].src) ||
+ !nir_src_is_const(instr->src[2].src))
return false;
- nir_const_value *value2 = nir_src_as_const_value(instr->src[2].src);
- if (!value2 || fabsf(value2->f32[0]) != 1.0f)
+ const float value1 = nir_src_as_float(instr->src[1].src);
+ const float value2 = nir_src_as_float(instr->src[2].src);
+ if (fabsf(value1) != 1.0f || fabsf(value2) != 1.0f)
return false;
fs_reg tmp = vgrf(glsl_type::int_type);
* surely be TRIANGLES
*/
- if (value1->f32[0] == -1.0f) {
+ if (value1 == -1.0f) {
g0.negate = true;
}
* surely be TRIANGLES
*/
- if (value1->f32[0] == -1.0f) {
+ if (value1 == -1.0f) {
g1_6.negate = true;
}
case nir_op_extract_u8:
case nir_op_extract_i8: {
- nir_const_value *byte = nir_src_as_const_value(instr->src[1].src);
- assert(byte != NULL);
+ unsigned byte = nir_src_as_uint(instr->src[1].src);
/* The PRMs say:
*
if (instr->op == nir_op_extract_i8) {
/* If we need to sign extend, extract to a word first */
fs_reg w_temp = bld.vgrf(BRW_REGISTER_TYPE_W);
- bld.MOV(w_temp, subscript(op[0], type, byte->u32[0]));
+ bld.MOV(w_temp, subscript(op[0], type, byte));
bld.MOV(result, w_temp);
} else {
/* Otherwise use an AND with 0xff and a word type */
- bld.AND(result, subscript(op[0], type, byte->u32[0] / 2), brw_imm_uw(0xff));
+ bld.AND(result, subscript(op[0], type, byte / 2), brw_imm_uw(0xff));
}
} else {
const brw_reg_type type = brw_int_type(1, instr->op == nir_op_extract_i8);
- bld.MOV(result, subscript(op[0], type, byte->u32[0]));
+ bld.MOV(result, subscript(op[0], type, byte));
}
break;
}
case nir_op_extract_u16:
case nir_op_extract_i16: {
const brw_reg_type type = brw_int_type(2, instr->op == nir_op_extract_i16);
- nir_const_value *word = nir_src_as_const_value(instr->src[1].src);
- assert(word != NULL);
- bld.MOV(result, subscript(op[0], type, word->u32[0]));
+ unsigned word = nir_src_as_uint(instr->src[1].src);
+ bld.MOV(result, subscript(op[0], type, word));
break;
}
fs_reg
fs_visitor::get_nir_src_imm(const nir_src &src)
{
- nir_const_value *val = nir_src_as_const_value(src);
assert(nir_src_bit_size(src) == 32);
- return val ? fs_reg(brw_imm_d(val->i32[0])) : get_nir_src(src);
+ return nir_src_is_const(src) ?
+ fs_reg(brw_imm_d(nir_src_as_int(src))) : get_nir_src(src);
}
fs_reg
unsigned first_component)
{
struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
-
- nir_const_value *vertex_const = nir_src_as_const_value(vertex_src);
- nir_const_value *offset_const = nir_src_as_const_value(offset_src);
const unsigned push_reg_count = gs_prog_data->base.urb_read_length * 8;
/* TODO: figure out push input layout for invocations == 1 */
/* TODO: make this work with 64-bit inputs */
if (gs_prog_data->invocations == 1 &&
type_sz(dst.type) <= 4 &&
- offset_const != NULL && vertex_const != NULL &&
- 4 * (base_offset + offset_const->u32[0]) < push_reg_count) {
- int imm_offset = (base_offset + offset_const->u32[0]) * 4 +
- vertex_const->u32[0] * push_reg_count;
+ nir_src_is_const(offset_src) && nir_src_is_const(vertex_src) &&
+ 4 * (base_offset + nir_src_as_uint(offset_src)) < push_reg_count) {
+ int imm_offset = (base_offset + nir_src_as_uint(offset_src)) * 4 +
+ nir_src_as_uint(vertex_src) * push_reg_count;
for (unsigned i = 0; i < num_components; i++) {
bld.MOV(offset(dst, bld, i),
fs_reg(ATTR, imm_offset + i + first_component, dst.type));
fs_reg icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
if (gs_prog_data->invocations == 1) {
- if (vertex_const) {
+ if (nir_src_is_const(vertex_src)) {
/* The vertex index is constant; just select the proper URB handle. */
icp_handle =
- retype(brw_vec8_grf(first_icp_handle + vertex_const->i32[0], 0),
+ retype(brw_vec8_grf(first_icp_handle + nir_src_as_uint(vertex_src), 0),
BRW_REGISTER_TYPE_UD);
} else {
/* The vertex index is non-constant. We need to use indirect
} else {
assert(gs_prog_data->invocations > 1);
- if (vertex_const) {
- assert(devinfo->gen >= 9 || vertex_const->i32[0] <= 5);
+ if (nir_src_is_const(vertex_src)) {
+ unsigned vertex = nir_src_as_uint(vertex_src);
+ assert(devinfo->gen >= 9 || vertex <= 5);
bld.MOV(icp_handle,
- retype(brw_vec1_grf(first_icp_handle +
- vertex_const->i32[0] / 8,
- vertex_const->i32[0] % 8),
+ retype(brw_vec1_grf(first_icp_handle + vertex / 8, vertex % 8),
BRW_REGISTER_TYPE_UD));
} else {
/* The vertex index is non-constant. We need to use indirect
}
for (unsigned iter = 0; iter < num_iterations; iter++) {
- if (offset_const) {
+ if (nir_src_is_const(offset_src)) {
/* Constant indexing - use global offset. */
if (first_component != 0) {
unsigned read_components = num_components + first_component;
inst->size_written = num_components *
tmp_dst.component_size(inst->exec_size);
}
- inst->offset = base_offset + offset_const->u32[0];
+ inst->offset = base_offset + nir_src_as_uint(offset_src);
inst->mlen = 1;
} else {
/* Indirect indexing - use per-slot offsets as well. */
if (num_iterations > 1) {
num_components = orig_num_components - 2;
- if(offset_const) {
+ if(nir_src_is_const(offset_src)) {
base_offset++;
} else {
fs_reg new_indirect = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
fs_visitor::get_indirect_offset(nir_intrinsic_instr *instr)
{
nir_src *offset_src = nir_get_io_offset_src(instr);
- nir_const_value *const_value = nir_src_as_const_value(*offset_src);
- if (const_value) {
+ if (nir_src_is_const(*offset_src)) {
/* The only constant offset we should find is 0. brw_nir.c's
* add_const_offset_to_base() will fold other constant offsets
* into instr->const_index[0].
*/
- assert(const_value->u32[0] == 0);
+ assert(nir_src_as_uint(*offset_src) == 0);
return fs_reg();
}
unsigned first_component = nir_intrinsic_component(instr);
unsigned num_components = instr->num_components;
- nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
- assert(const_offset && "Indirect input loads not allowed");
- src = offset(src, bld, const_offset->u32[0]);
+ src = offset(src, bld, nir_src_as_uint(instr->src[0]));
if (type_sz(dest.type) == 8)
first_component /= 2;
unsigned imm_offset = instr->const_index[0];
const nir_src &vertex_src = instr->src[0];
- nir_const_value *vertex_const = nir_src_as_const_value(vertex_src);
fs_inst *inst;
fs_reg icp_handle;
- if (vertex_const) {
+ if (nir_src_is_const(vertex_src)) {
/* Emit a MOV to resolve <0,1,0> regioning. */
icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
+ unsigned vertex = nir_src_as_uint(vertex_src);
bld.MOV(icp_handle,
- retype(brw_vec1_grf(1 + (vertex_const->i32[0] >> 3),
- vertex_const->i32[0] & 7),
+ retype(brw_vec1_grf(1 + (vertex >> 3), vertex & 7),
BRW_REGISTER_TYPE_UD));
} else if (tcs_prog_data->instances == 1 &&
vertex_src.is_ssa &&
case nir_intrinsic_store_output: {
const fs_reg src = get_nir_src(instr->src[0]);
- const nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
- assert(const_offset && "Indirect output stores not allowed");
+ const unsigned store_offset = nir_src_as_uint(instr->src[1]);
const unsigned location = nir_intrinsic_base(instr) +
- SET_FIELD(const_offset->u32[0], BRW_NIR_FRAG_OUTPUT_LOCATION);
+ SET_FIELD(store_offset, BRW_NIR_FRAG_OUTPUT_LOCATION);
const fs_reg new_dest = retype(alloc_frag_output(this, location),
src.type);
const unsigned l = GET_FIELD(nir_intrinsic_base(instr),
BRW_NIR_FRAG_OUTPUT_LOCATION);
assert(l >= FRAG_RESULT_DATA0);
- nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
- assert(const_offset && "Indirect output loads not allowed");
- const unsigned target = l - FRAG_RESULT_DATA0 + const_offset->u32[0];
+ const unsigned load_offset = nir_src_as_uint(instr->src[0]);
+ const unsigned target = l - FRAG_RESULT_DATA0 + load_offset;
const fs_reg tmp = bld.vgrf(dest.type, 4);
if (reinterpret_cast<const brw_wm_prog_key *>(key)->coherent_fb_fetch)
const glsl_interp_mode interpolation =
(enum glsl_interp_mode) nir_intrinsic_interp_mode(instr);
- nir_const_value *const_sample = nir_src_as_const_value(instr->src[0]);
-
- if (const_sample) {
- unsigned msg_data = const_sample->i32[0] << 4;
+ if (nir_src_is_const(instr->src[0])) {
+ unsigned msg_data = nir_src_as_uint(instr->src[0]) << 4;
emit_pixel_interpolater_send(bld,
FS_OPCODE_INTERPOLATE_AT_SAMPLE,
nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
if (const_offset) {
+ assert(nir_src_bit_size(instr->src[0]) == 32);
unsigned off_x = MIN2((int)(const_offset->f32[0] * 16), 7) & 0xf;
unsigned off_y = MIN2((int)(const_offset->f32[1] * 16), 7) & 0xf;
static int
get_op_for_atomic_add(nir_intrinsic_instr *instr, unsigned src)
{
- const nir_const_value *const val = nir_src_as_const_value(instr->src[src]);
-
- if (val != NULL) {
- if (val->i32[0] == 1)
+ if (nir_src_is_const(instr->src[src])) {
+ int64_t add_val = nir_src_as_int(instr->src[src]);
+ if (add_val == 1)
return BRW_AOP_INC;
- else if (val->i32[0] == -1)
+ else if (add_val == -1)
return BRW_AOP_DEC;
}
/* Get the offset to read from */
fs_reg offset_reg;
- nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
- if (const_offset) {
- offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u32[0]);
+ if (nir_src_is_const(instr->src[0])) {
+ offset_reg = brw_imm_ud(instr->const_index[0] +
+ nir_src_as_uint(instr->src[0]));
} else {
offset_reg = vgrf(glsl_type::uint_type);
bld.ADD(offset_reg,
length = MIN2(2, length);
fs_reg offset_reg;
- nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
- if (const_offset) {
- offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u32[0] +
+ if (nir_src_is_const(instr->src[1])) {
+ offset_reg = brw_imm_ud(instr->const_index[0] +
+ nir_src_as_uint(instr->src[1]) +
type_size * first_component);
} else {
offset_reg = vgrf(glsl_type::uint_type);
{
/* SSBO stores are weird in that their index is in src[1] */
const unsigned src = instr->intrinsic == nir_intrinsic_store_ssbo ? 1 : 0;
- nir_const_value *const_uniform_block =
- nir_src_as_const_value(instr->src[src]);
fs_reg surf_index;
- if (const_uniform_block) {
+ if (nir_src_is_const(instr->src[src])) {
unsigned index = stage_prog_data->binding_table.ssbo_start +
- const_uniform_block->u32[0];
+ nir_src_as_uint(instr->src[src]);
surf_index = brw_imm_ud(index);
brw_mark_surface_used(prog_data, index);
} else {
fs_reg src(UNIFORM, instr->const_index[0] / 4, dest.type);
- nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
- if (const_offset) {
- assert(const_offset->u32[0] % type_sz(dest.type) == 0);
+ if (nir_src_is_const(instr->src[0])) {
+ unsigned load_offset = nir_src_as_uint(instr->src[0]);
+ assert(load_offset % type_sz(dest.type) == 0);
/* For 16-bit types we add the module of the const_index[0]
* offset to access to not 32-bit aligned element
*/
- src.offset = const_offset->u32[0] + instr->const_index[0] % 4;
+ src.offset = load_offset + instr->const_index[0] % 4;
for (unsigned j = 0; j < instr->num_components; j++) {
bld.MOV(offset(dest, bld, j), offset(src, bld, j));
}
case nir_intrinsic_load_ubo: {
- nir_const_value *const_index = nir_src_as_const_value(instr->src[0]);
fs_reg surf_index;
-
- if (const_index) {
+ if (nir_src_is_const(instr->src[0])) {
const unsigned index = stage_prog_data->binding_table.ubo_start +
- const_index->u32[0];
+ nir_src_as_uint(instr->src[0]);
surf_index = brw_imm_ud(index);
brw_mark_surface_used(prog_data, index);
} else {
nir->info.num_ubos - 1);
}
- nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
- if (const_offset == NULL) {
+ if (!nir_src_is_const(instr->src[1])) {
fs_reg base_offset = retype(get_nir_src(instr->src[1]),
BRW_REGISTER_TYPE_UD);
* and we have to split it if necessary.
*/
const unsigned type_size = type_sz(dest.type);
+ const unsigned load_offset = nir_src_as_uint(instr->src[1]);
/* See if we've selected this as a push constant candidate */
- if (const_index) {
- const unsigned ubo_block = const_index->u32[0];
- const unsigned offset_256b = const_offset->u32[0] / 32;
+ if (nir_src_is_const(instr->src[0])) {
+ const unsigned ubo_block = nir_src_as_uint(instr->src[0]);
+ const unsigned offset_256b = load_offset / 32;
fs_reg push_reg;
for (int i = 0; i < 4; i++) {
offset_256b < range->start + range->length) {
push_reg = fs_reg(UNIFORM, UBO_START + i, dest.type);
- push_reg.offset = const_offset->u32[0] - 32 * range->start;
+ push_reg.offset = load_offset - 32 * range->start;
break;
}
}
const fs_reg packed_consts = ubld.vgrf(BRW_REGISTER_TYPE_UD);
for (unsigned c = 0; c < instr->num_components;) {
- const unsigned base = const_offset->u32[0] + c * type_size;
+ const unsigned base = load_offset + c * type_size;
/* Number of usable components in the next block-aligned load. */
const unsigned count = MIN2(instr->num_components - c,
(block_sz - base % block_sz) / type_size);
unsigned num_components = ffs(~(writemask >> first_component)) - 1;
fs_reg write_src = offset(val_reg, bld, first_component);
- nir_const_value *const_offset = nir_src_as_const_value(instr->src[2]);
-
if (type_size > 4) {
/* We can't write more than 2 64-bit components at once. Limit
* the num_components of the write to what we can do and let the next
* write message (the last component).
*/
- if ( !const_offset || ((const_offset->u32[0] +
- type_size * first_component) % 4)) {
+ if (!nir_src_is_const(instr->src[2]) ||
+ ((nir_src_as_uint(instr->src[2]) +
+ type_size * first_component) % 4)) {
/* If we use a .yz writemask we also need to emit 2
* byte-scattered write messages because of y-component not
* being aligned to 32-bit.
fs_reg offset_reg;
- if (const_offset) {
- offset_reg = brw_imm_ud(const_offset->u32[0] +
+ if (nir_src_is_const(instr->src[2])) {
+ offset_reg = brw_imm_ud(nir_src_as_uint(instr->src[2]) +
type_size * first_component);
} else {
offset_reg = vgrf(glsl_type::uint_type);
case nir_intrinsic_store_output: {
fs_reg src = get_nir_src(instr->src[0]);
- nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
- assert(const_offset && "Indirect output stores not allowed");
-
+ unsigned store_offset = nir_src_as_uint(instr->src[1]);
unsigned num_components = instr->num_components;
unsigned first_component = nir_intrinsic_component(instr);
if (nir_src_bit_size(instr->src[0]) == 64) {
}
fs_reg new_dest = retype(offset(outputs[instr->const_index[0]], bld,
- 4 * const_offset->u32[0]), src.type);
+ 4 * store_offset), src.type);
for (unsigned j = 0; j < num_components; j++) {
bld.MOV(offset(new_dest, bld, j + first_component),
offset(src, bld, j));
break;
case nir_intrinsic_get_buffer_size: {
- nir_const_value *const_uniform_block = nir_src_as_const_value(instr->src[0]);
- unsigned ssbo_index = const_uniform_block ? const_uniform_block->u32[0] : 0;
+ unsigned ssbo_index = nir_src_is_const(instr->src[0]) ?
+ nir_src_as_uint(instr->src[0]) : 0;
/* A resinfo's sampler message is used to get the buffer size. The
* SIMD8's writeback message consists of four registers and SIMD16's
case nir_intrinsic_quad_broadcast: {
const fs_reg value = get_nir_src(instr->src[0]);
- nir_const_value *index = nir_src_as_const_value(instr->src[1]);
- assert(nir_src_bit_size(instr->src[1]) == 32);
+ const unsigned index = nir_src_as_uint(instr->src[1]);
bld.emit(SHADER_OPCODE_CLUSTER_BROADCAST, retype(dest, value.type),
- value, brw_imm_ud(index->u32[0]), brw_imm_ud(4));
+ value, brw_imm_ud(index), brw_imm_ud(4));
break;
}
data2 = get_nir_src(instr->src[2]);
/* Get the offset */
- nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
- if (const_offset) {
- offset = brw_imm_ud(instr->const_index[0] + const_offset->u32[0]);
+ if (nir_src_is_const(instr->src[0])) {
+ offset = brw_imm_ud(instr->const_index[0] +
+ nir_src_as_uint(instr->src[0]));
} else {
offset = vgrf(glsl_type::uint_type);
bld.ADD(offset,
data2 = get_nir_src(instr->src[2]);
/* Get the offset */
- nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
- if (const_offset) {
- offset = brw_imm_ud(instr->const_index[0] + const_offset->u32[0]);
+ if (nir_src_is_const(instr->src[0])) {
+ offset = brw_imm_ud(instr->const_index[0] +
+ nir_src_as_uint(instr->src[0]));
} else {
offset = vgrf(glsl_type::uint_type);
bld.ADD(offset,
case nir_tex_src_offset: {
nir_const_value *const_offset =
nir_src_as_const_value(instr->src[i].src);
+ assert(nir_src_bit_size(instr->src[i].src) == 32);
unsigned offset_bits = 0;
if (const_offset &&
brw_texture_offset(const_offset->i32,
break;
case nir_tex_src_plane: {
- nir_const_value *const_plane =
- nir_src_as_const_value(instr->src[i].src);
- const uint32_t plane = const_plane->u32[0];
+ const uint32_t plane = nir_src_as_uint(instr->src[i].src);
const uint32_t texture_index =
instr->texture_index +
stage_prog_data->binding_table.plane_start[plane] -