unsigned msg_length)
{
fs_inst *inst = new (mem_ctx) fs_inst(op, 16, dst, brw_message_reg(base_mrf),
- fs_reg(0u));
+ brw_imm_ud(0u));
inst->base_mrf = base_mrf;
inst->mlen = msg_length;
* the redundant ones.
*/
fs_reg vec4_offset = vgrf(glsl_type::int_type);
- bld.ADD(vec4_offset, varying_offset, fs_reg(const_offset & ~3));
+ bld.ADD(vec4_offset, varying_offset, brw_imm_ud(const_offset & ~3));
int scale = 1;
if (devinfo->gen == 4 && bld.dispatch_width() == 8) {
this->file = BAD_FILE;
}
-/** Immediate value constructor. */
-fs_reg::fs_reg(float f)
-{
- init();
- this->file = IMM;
- this->type = BRW_REGISTER_TYPE_F;
- this->stride = 0;
- this->f = f;
-}
-
-/** Immediate value constructor. */
-fs_reg::fs_reg(int32_t i)
-{
- init();
- this->file = IMM;
- this->type = BRW_REGISTER_TYPE_D;
- this->stride = 0;
- this->d = i;
-}
-
-/** Immediate value constructor. */
-fs_reg::fs_reg(uint32_t u)
-{
- init();
- this->file = IMM;
- this->type = BRW_REGISTER_TYPE_UD;
- this->stride = 0;
- this->ud = u;
-}
-
-/** Vector float immediate value constructor. */
-fs_reg::fs_reg(uint8_t vf[4])
-{
- init();
- this->file = IMM;
- this->type = BRW_REGISTER_TYPE_VF;
- memcpy(&this->ud, vf, sizeof(unsigned));
-}
-
-/** Vector float immediate value constructor. */
-fs_reg::fs_reg(uint8_t vf0, uint8_t vf1, uint8_t vf2, uint8_t vf3)
-{
- init();
- this->file = IMM;
- this->type = BRW_REGISTER_TYPE_VF;
- this->ud = (vf0 << 0) | (vf1 << 8) | (vf2 << 16) | (vf3 << 24);
-}
-
fs_reg::fs_reg(struct brw_reg reg) :
backend_reg(reg)
{
fs_reg reset = shader_end_time;
reset.set_smear(2);
set_condmod(BRW_CONDITIONAL_Z,
- ibld.AND(ibld.null_reg_ud(), reset, fs_reg(1u)));
+ ibld.AND(ibld.null_reg_ud(), reset, brw_imm_ud(1u)));
ibld.IF(BRW_PREDICATE_NORMAL);
fs_reg start = shader_start_time;
* is 2 cycles. Remove that overhead, so I can forget about that when
* trying to determine the time taken for single instructions.
*/
- cbld.ADD(diff, diff, fs_reg(-2u));
+ cbld.ADD(diff, diff, brw_imm_ud(-2u));
SHADER_TIME_ADD(cbld, 0, diff);
- SHADER_TIME_ADD(cbld, 1, fs_reg(1u));
+ SHADER_TIME_ADD(cbld, 1, brw_imm_ud(1u));
ibld.emit(BRW_OPCODE_ELSE);
- SHADER_TIME_ADD(cbld, 2, fs_reg(1u));
+ SHADER_TIME_ADD(cbld, 2, brw_imm_ud(1u));
ibld.emit(BRW_OPCODE_ENDIF);
}
fs_reg value)
{
int index = shader_time_index * 3 + shader_time_subindex;
- fs_reg offset = fs_reg(index * SHADER_TIME_STRIDE);
+ struct brw_reg offset = brw_imm_d(index * SHADER_TIME_STRIDE);
fs_reg payload;
if (dispatch_width == 8)
if (pixel_center_integer) {
bld.MOV(wpos, this->pixel_x);
} else {
- bld.ADD(wpos, this->pixel_x, fs_reg(0.5f));
+ bld.ADD(wpos, this->pixel_x, brw_imm_f(0.5f));
}
wpos = offset(wpos, bld, 1);
offset += key->drawable_height - 1.0f;
}
- bld.ADD(wpos, pixel_y, fs_reg(offset));
+ bld.ADD(wpos, pixel_y, brw_imm_f(offset));
}
wpos = offset(wpos, bld, 1);
fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
g0.negate = true;
- bld.ASR(*reg, g0, fs_reg(15));
+ bld.ASR(*reg, g0, brw_imm_d(15));
} else {
/* Bit 31 of g1.6 is 0 if the polygon is front facing. We want to create
* a boolean result from this (1/true or 0/false).
fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
g1_6.negate = true;
- bld.ASR(*reg, g1_6, fs_reg(31));
+ bld.ASR(*reg, g1_6, brw_imm_d(31));
}
return reg;
/* Convert int_sample_pos to floating point */
bld.MOV(dst, int_sample_pos);
/* Scale to the range [0, 1] */
- bld.MUL(dst, dst, fs_reg(1 / 16.0f));
+ bld.MUL(dst, dst, brw_imm_f(1 / 16.0f));
}
else {
/* From ARB_sample_shading specification:
* rasterization is disabled, gl_SamplePosition will always be
* (0.5, 0.5).
*/
- bld.MOV(dst, fs_reg(0.5f));
+ bld.MOV(dst, brw_imm_f(0.5f));
}
}
abld.exec_all().group(1, 0)
.AND(t1, fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)),
- fs_reg(sspi_mask));
- abld.exec_all().group(1, 0).SHR(t1, t1, fs_reg(5));
+ brw_imm_ud(sspi_mask));
+ abld.exec_all().group(1, 0).SHR(t1, t1, brw_imm_d(5));
/* This works for both SIMD8 and SIMD16 */
abld.exec_all().group(4, 0)
* "When rendering to a non-multisample buffer, or if multisample
* rasterization is disabled, gl_SampleID will always be zero."
*/
- abld.MOV(*reg, fs_reg(0));
+ abld.MOV(*reg, brw_imm_d(0));
}
return reg;
/* Generate a pull load into dst. */
if (inst->src[i].reladdr) {
VARYING_PULL_CONSTANT_LOAD(ibld, dst,
- fs_reg(index),
+ brw_imm_ud(index),
*inst->src[i].reladdr,
pull_index);
inst->src[i].reladdr = NULL;
inst->src[i].stride = 1;
} else {
const fs_builder ubld = ibld.exec_all().group(8, 0);
- fs_reg offset = fs_reg((unsigned)(pull_index * 4) & ~15);
+ struct brw_reg offset = brw_imm_ud((unsigned)(pull_index * 4) & ~15);
ubld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
- dst, fs_reg(index), offset);
+ dst, brw_imm_ud(index), offset);
inst->src[i].set_smear(pull_index & 3);
}
brw_mark_surface_used(prog_data, index);
case SHADER_OPCODE_FIND_LIVE_CHANNEL:
if (depth == 0) {
inst->opcode = BRW_OPCODE_MOV;
- inst->src[0] = fs_reg(0u);
+ inst->src[0] = brw_imm_ud(0u);
inst->sources = 1;
inst->force_writemask_all = true;
progress = true;
(has_lod || shadow_c.file != BAD_FILE ||
(op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8))) {
for (unsigned i = coord_components; i < 3; i++)
- bld.MOV(offset(msg_end, bld, i), fs_reg(0.0f));
+ bld.MOV(offset(msg_end, bld, i), brw_imm_f(0.0f));
msg_end = offset(msg_end, bld, 3 - coord_components);
}
/* There's no plain shadow compare message, so we use shadow
* compare with a bias of 0.0.
*/
- bld.MOV(msg_end, fs_reg(0.0f));
+ bld.MOV(msg_end, brw_imm_f(0.0f));
msg_end = offset(msg_end, bld, 1);
}
case SHADER_OPCODE_TXF_CMS:
msg_lod = offset(msg_coords, bld, 3);
/* lod */
- bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), fs_reg(0u));
+ bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), brw_imm_ud(0u));
/* sample index */
bld.MOV(retype(offset(msg_lod, bld, 1), BRW_REGISTER_TYPE_UD), sample_index);
msg_end = offset(msg_lod, bld, 2);
if (bld.shader->stage != MESA_SHADER_FRAGMENT &&
op == SHADER_OPCODE_TEX) {
op = SHADER_OPCODE_TXL;
- lod = fs_reg(0.0f);
+ lod = brw_imm_f(0.0f);
}
/* Set up the LOD info */
{
fs_builder ubld = bld.exec_all().group(8, 0);
const fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_UD);
- ubld.MOV(dst, fs_reg(0));
+ ubld.MOV(dst, brw_imm_d(0));
ubld.MOV(component(dst, 7), sample_mask);
return dst;
}
case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
lower_surface_logical_send(ibld, inst,
SHADER_OPCODE_TYPED_SURFACE_READ,
- fs_reg(0xffff));
+ brw_imm_d(0xffff));
break;
case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
*/
if (gs_compile->control_data_header_size_bits <= 32) {
const fs_builder abld = bld.annotate("initialize control data bits");
- abld.MOV(this->control_data_bits, fs_reg(0u));
+ abld.MOV(this->control_data_bits, brw_imm_ud(0u));
}
}
sample_mask_reg() const
{
if (shader->stage != MESA_SHADER_FRAGMENT) {
- return src_reg(0xffff);
+ return brw_imm_d(0xffff);
} else if (((brw_wm_prog_data *)shader->stage_prog_data)->uses_kill) {
return brw_flag_reg(0, 1);
} else {
const dst_reg x_times_one_minus_a = vgrf(dst.type);
MUL(y_times_a, y, a);
- ADD(one_minus_a, negate(a), src_reg(1.0f));
+ ADD(one_minus_a, negate(a), brw_imm_f(1.0f));
MUL(x_times_one_minus_a, x, src_reg(one_minus_a));
return ADD(dst, src_reg(x_times_one_minus_a), src_reg(y_times_a));
}
imm->block->last_non_control_flow_inst()->next);
const fs_builder ibld = bld.at(imm->block, n).exec_all().group(1, 0);
- ibld.MOV(reg, fs_reg(imm->val));
+ ibld.MOV(reg, brw_imm_f(imm->val));
imm->nr = reg.nr;
imm->subreg_offset = reg.subreg_offset;
const fs_builder abld = v->bld.annotate("gl_InvocationID", NULL);
fs_reg g1(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
fs_reg iid = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
- abld.SHR(iid, g1, fs_reg(27u));
+ abld.SHR(iid, g1, brw_imm_ud(27u));
*reg = iid;
}
break;
tmp.subreg_offset = 2;
tmp.stride = 2;
- fs_inst *or_inst = bld.OR(tmp, g0, fs_reg(0x3f80));
+ fs_inst *or_inst = bld.OR(tmp, g0, brw_imm_d(0x3f80));
or_inst->src[1].type = BRW_REGISTER_TYPE_UW;
tmp.type = BRW_REGISTER_TYPE_D;
g1_6.negate = true;
}
- bld.OR(tmp, g1_6, fs_reg(0x3f800000));
+ bld.OR(tmp, g1_6, brw_imm_d(0x3f800000));
}
- bld.AND(retype(result, BRW_REGISTER_TYPE_D), tmp, fs_reg(0xbf800000));
+ bld.AND(retype(result, BRW_REGISTER_TYPE_D), tmp, brw_imm_d(0xbf800000));
return true;
}
* Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
* zero.
*/
- bld.CMP(bld.null_reg_f(), op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ);
+ bld.CMP(bld.null_reg_f(), op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ);
fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD);
op[0].type = BRW_REGISTER_TYPE_UD;
result.type = BRW_REGISTER_TYPE_UD;
- bld.AND(result_int, op[0], fs_reg(0x80000000u));
+ bld.AND(result_int, op[0], brw_imm_ud(0x80000000u));
- inst = bld.OR(result_int, result_int, fs_reg(0x3f800000u));
+ inst = bld.OR(result_int, result_int, brw_imm_ud(0x3f800000u));
inst->predicate = BRW_PREDICATE_NORMAL;
if (instr->dest.saturate) {
inst = bld.MOV(result, result);
* -> non-negative val generates 0x00000000.
* Predicated OR sets 1 if val is positive.
*/
- bld.CMP(bld.null_reg_d(), op[0], fs_reg(0), BRW_CONDITIONAL_G);
- bld.ASR(result, op[0], fs_reg(31));
- inst = bld.OR(result, result, fs_reg(1));
+ bld.CMP(bld.null_reg_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_G);
+ bld.ASR(result, op[0], brw_imm_d(31));
+ inst = bld.OR(result, result, brw_imm_d(1));
inst->predicate = BRW_PREDICATE_NORMAL;
break;
case nir_op_fddy:
if (fs_key->high_quality_derivatives) {
inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
- fs_reg(fs_key->render_to_fbo));
+ brw_imm_d(fs_key->render_to_fbo));
} else {
inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
- fs_reg(fs_key->render_to_fbo));
+ brw_imm_d(fs_key->render_to_fbo));
}
inst->saturate = instr->dest.saturate;
break;
case nir_op_fddy_fine:
inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
- fs_reg(fs_key->render_to_fbo));
+ brw_imm_d(fs_key->render_to_fbo));
inst->saturate = instr->dest.saturate;
break;
case nir_op_fddy_coarse:
inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
- fs_reg(fs_key->render_to_fbo));
+ brw_imm_d(fs_key->render_to_fbo));
inst->saturate = instr->dest.saturate;
break;
break;
case nir_op_f2b:
- bld.CMP(result, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ);
+ bld.CMP(result, op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ);
break;
case nir_op_i2b:
- bld.CMP(result, op[0], fs_reg(0), BRW_CONDITIONAL_NZ);
+ bld.CMP(result, op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ);
break;
case nir_op_ftrunc:
* from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
* subtract the result from 31 to convert the MSB count into an LSB count.
*/
- bld.CMP(bld.null_reg_d(), result, fs_reg(-1), BRW_CONDITIONAL_NZ);
+ bld.CMP(bld.null_reg_d(), result, brw_imm_d(-1), BRW_CONDITIONAL_NZ);
- inst = bld.ADD(result, result, fs_reg(31));
+ inst = bld.ADD(result, result, brw_imm_d(31));
inst->predicate = BRW_PREDICATE_NORMAL;
inst->src[0].negate = true;
break;
if (optimize_frontfacing_ternary(instr, result))
return;
- bld.CMP(bld.null_reg_d(), op[0], fs_reg(0), BRW_CONDITIONAL_NZ);
+ bld.CMP(bld.null_reg_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ);
inst = bld.SEL(result, op[1], op[2]);
inst->predicate = BRW_PREDICATE_NORMAL;
break;
if (devinfo->gen <= 5 &&
(instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
fs_reg masked = vgrf(glsl_type::int_type);
- bld.AND(masked, result, fs_reg(1));
+ bld.AND(masked, result, brw_imm_d(1));
masked.negate = true;
bld.MOV(retype(result, BRW_REGISTER_TYPE_D), masked);
}
fs_reg reg = bld.vgrf(BRW_REGISTER_TYPE_D, instr->def.num_components);
for (unsigned i = 0; i < instr->def.num_components; i++)
- bld.MOV(offset(reg, bld, i), fs_reg(instr->value.i[i]));
+ bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value.i[i]));
nir_ssa_values[instr->def.index] = reg;
}
reg.reladdr = new(v->mem_ctx) fs_reg(v->vgrf(glsl_type::int_type));
v->bld.MUL(*reg.reladdr, v->get_nir_src(*indirect),
- fs_reg(multiplier));
+ brw_imm_d(multiplier));
}
return reg;
*/
bld.emit_minmax(tmp, retype(get_nir_src(deref_array->indirect),
BRW_REGISTER_TYPE_UD),
- fs_reg(size - base - 1), BRW_CONDITIONAL_L);
+ brw_imm_ud(size - base - 1), BRW_CONDITIONAL_L);
} else {
bld.MOV(tmp, get_nir_src(deref_array->indirect));
}
- bld.MUL(tmp, tmp, fs_reg(element_size));
+ bld.MUL(tmp, tmp, brw_imm_ud(element_size));
if (image.reladdr)
bld.ADD(*image.reladdr, *image.reladdr, tmp);
else
fs_reg result = bld.vgrf(x.type, 1);
fs_reg one = bld.vgrf(x.type, 1);
- bld.MOV(one, retype(fs_reg(1), one.type));
+ bld.MOV(one, retype(brw_imm_d(1), one.type));
bld.SHL(result, one, x);
return result;
}
/* control_data_bits |= 1 << ((vertex_count - 1) % 32) */
fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
- abld.ADD(prev_count, vertex_count, fs_reg(0xffffffffu));
+ abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
fs_reg mask = intexp2(abld, prev_count);
/* Note: we're relying on the fact that the GEN SHL instruction only pays
* attention to the lower 5 bits of its second source argument, so on this
if (opcode != SHADER_OPCODE_URB_WRITE_SIMD8) {
fs_reg dword_index = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
- abld.ADD(prev_count, vertex_count, fs_reg(0xffffffffu));
+ abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
unsigned log2_bits_per_vertex =
_mesa_fls(gs_compile->control_data_bits_per_vertex);
- abld.SHR(dword_index, prev_count, fs_reg(6u - log2_bits_per_vertex));
+ abld.SHR(dword_index, prev_count, brw_imm_ud(6u - log2_bits_per_vertex));
if (per_slot_offset.file != BAD_FILE) {
/* Set the per-slot offset to dword_index / 4, so that we'll write to
* the appropriate OWord within the control data header.
*/
- abld.SHR(per_slot_offset, dword_index, fs_reg(2u));
+ abld.SHR(per_slot_offset, dword_index, brw_imm_ud(2u));
}
/* Set the channel masks to 1 << (dword_index % 4), so that we'll
* write to the appropriate DWORD within the OWORD.
*/
fs_reg channel = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
- fwa_bld.AND(channel, dword_index, fs_reg(3u));
+ fwa_bld.AND(channel, dword_index, brw_imm_ud(3u));
channel_mask = intexp2(fwa_bld, channel);
/* Then the channel masks need to be in bits 23:16. */
- fwa_bld.SHL(channel_mask, channel_mask, fs_reg(16u));
+ fwa_bld.SHL(channel_mask, channel_mask, brw_imm_ud(16u));
}
/* Store the control data bits in the message payload and send it. */
/* reg::sid = stream_id */
fs_reg sid = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
- abld.MOV(sid, fs_reg(stream_id));
+ abld.MOV(sid, brw_imm_ud(stream_id));
/* reg:shift_count = 2 * (vertex_count - 1) */
fs_reg shift_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
- abld.SHL(shift_count, vertex_count, fs_reg(1u));
+ abld.SHL(shift_count, vertex_count, brw_imm_ud(1u));
/* Note: we're relying on the fact that the GEN SHL instruction only pays
* attention to the lower 5 bits of its second source argument, so on this
*/
fs_inst *inst =
abld.AND(bld.null_reg_d(), vertex_count,
- fs_reg(32u / gs_compile->control_data_bits_per_vertex - 1u));
+ brw_imm_ud(32u / gs_compile->control_data_bits_per_vertex - 1u));
inst->conditional_mod = BRW_CONDITIONAL_Z;
abld.IF(BRW_PREDICATE_NORMAL);
/* If vertex_count is 0, then no control data bits have been
* accumulated yet, so we can skip emitting them.
*/
- abld.CMP(bld.null_reg_d(), vertex_count, fs_reg(0u),
+ abld.CMP(bld.null_reg_d(), vertex_count, brw_imm_ud(0u),
BRW_CONDITIONAL_NEQ);
abld.IF(BRW_PREDICATE_NORMAL);
emit_gs_control_data_bits(vertex_count);
* effect of any call to EndPrimitive() that the shader may have
* made before outputting its first vertex.
*/
- inst = abld.MOV(this->control_data_bits, fs_reg(0u));
+ inst = abld.MOV(this->control_data_bits, brw_imm_ud(0u));
inst->force_writemask_all = true;
abld.emit(BRW_OPCODE_ENDIF);
}
/* sequence = <7, 6, 5, 4, 3, 2, 1, 0> */
bld.MOV(sequence, fs_reg(brw_imm_v(0x76543210)));
/* channel_offsets = 4 * sequence = <28, 24, 20, 16, 12, 8, 4, 0> */
- bld.SHL(channel_offsets, sequence, fs_reg(2u));
+ bld.SHL(channel_offsets, sequence, brw_imm_ud(2u));
/* Convert vertex_index to bytes (multiply by 32) */
bld.SHL(vertex_offset_bytes,
retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
fs_reg(brw_vec8_grf(first_icp_handle, 0)),
fs_reg(icp_offset_bytes),
- fs_reg(nir->info.gs.vertices_in * REG_SIZE));
+ brw_imm_ud(nir->info.gs.vertices_in * REG_SIZE));
}
fs_inst *inst;
fs_inst *cmp;
if (instr->intrinsic == nir_intrinsic_discard_if) {
cmp = bld.CMP(bld.null_reg_f(), get_nir_src(instr->src[0]),
- fs_reg(0), BRW_CONDITIONAL_Z);
+ brw_imm_d(0), BRW_CONDITIONAL_Z);
} else {
fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
BRW_REGISTER_TYPE_UW));
FS_OPCODE_INTERPOLATE_AT_CENTROID,
dst_xy,
fs_reg(), /* src */
- fs_reg(0u),
+ brw_imm_ud(0u),
interpolation);
break;
FS_OPCODE_INTERPOLATE_AT_SAMPLE,
dst_xy,
fs_reg(), /* src */
- fs_reg(msg_data),
+ brw_imm_ud(msg_data),
interpolation);
} else {
const fs_reg sample_src = retype(get_nir_src(instr->src[0]),
if (nir_src_is_dynamically_uniform(instr->src[0])) {
const fs_reg sample_id = bld.emit_uniformize(sample_src);
const fs_reg msg_data = vgrf(glsl_type::uint_type);
- bld.exec_all().group(1, 0).SHL(msg_data, sample_id, fs_reg(4u));
+ bld.exec_all().group(1, 0)
+ .SHL(msg_data, sample_id, brw_imm_ud(4u));
emit_pixel_interpolater_send(bld,
FS_OPCODE_INTERPOLATE_AT_SAMPLE,
dst_xy,
sample_src, sample_id,
BRW_CONDITIONAL_EQ);
const fs_reg msg_data = vgrf(glsl_type::uint_type);
- bld.exec_all().group(1, 0).SHL(msg_data, sample_id, fs_reg(4u));
+ bld.exec_all().group(1, 0)
+ .SHL(msg_data, sample_id, brw_imm_ud(4u));
fs_inst *inst =
emit_pixel_interpolater_send(bld,
FS_OPCODE_INTERPOLATE_AT_SAMPLE,
FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
dst_xy,
fs_reg(), /* src */
- fs_reg(off_x | (off_y << 4)),
+ brw_imm_ud(off_x | (off_y << 4)),
interpolation);
} else {
fs_reg src = vgrf(glsl_type::ivec2_type);
BRW_REGISTER_TYPE_F);
for (int i = 0; i < 2; i++) {
fs_reg temp = vgrf(glsl_type::float_type);
- bld.MUL(temp, offset(offset_src, bld, i), fs_reg(16.0f));
+ bld.MUL(temp, offset(offset_src, bld, i), brw_imm_f(16.0f));
fs_reg itemp = vgrf(glsl_type::int_type);
bld.MOV(itemp, temp); /* float to int */
* FRAGMENT_INTERPOLATION_OFFSET_BITS"
*/
set_condmod(BRW_CONDITIONAL_L,
- bld.SEL(offset(src, bld, i), itemp, fs_reg(7)));
+ bld.SEL(offset(src, bld, i), itemp, brw_imm_d(7)));
}
const enum opcode opcode = FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET;
opcode,
dst_xy,
src,
- fs_reg(0u),
+ brw_imm_ud(0u),
interpolation);
}
break;
cs_prog_data->uses_num_work_groups = true;
- fs_reg surf_index = fs_reg(surface);
+ fs_reg surf_index = brw_imm_ud(surface);
brw_mark_surface_used(prog_data, surface);
/* Read the 3 GLuint components of gl_NumWorkGroups */
for (unsigned i = 0; i < 3; i++) {
fs_reg read_result =
emit_untyped_read(bld, surf_index,
- fs_reg(i << 2),
+ brw_imm_ud(i << 2),
1 /* dims */, 1 /* size */,
BRW_PREDICATE_NONE);
read_result.type = dest.type;
/* Emit a surface read or atomic op. */
switch (instr->intrinsic) {
case nir_intrinsic_atomic_counter_read:
- tmp = emit_untyped_read(bld, fs_reg(surface), offset, 1, 1);
+ tmp = emit_untyped_read(bld, brw_imm_ud(surface), offset, 1, 1);
break;
case nir_intrinsic_atomic_counter_inc:
- tmp = emit_untyped_atomic(bld, fs_reg(surface), offset, fs_reg(),
+ tmp = emit_untyped_atomic(bld, brw_imm_ud(surface), offset, fs_reg(),
fs_reg(), 1, 1, BRW_AOP_INC);
break;
case nir_intrinsic_atomic_counter_dec:
- tmp = emit_untyped_atomic(bld, fs_reg(surface), offset, fs_reg(),
+ tmp = emit_untyped_atomic(bld, brw_imm_ud(surface), offset, fs_reg(),
fs_reg(), 1, 1, BRW_AOP_PREDEC);
break;
for (unsigned c = 0; c < info->dest_components; ++c) {
if ((int)c >= type->coordinate_components()) {
bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
- fs_reg(1));
+ brw_imm_d(1));
} else if (c == 1 && is_1d_array_image) {
bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
offset(size, bld, 2));
} else if (c == 2 && is_cube_array_image) {
bld.emit(SHADER_OPCODE_INT_QUOTIENT,
offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
- offset(size, bld, c), fs_reg(6));
+ offset(size, bld, c), brw_imm_d(6));
} else {
bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
offset(size, bld, c));
case nir_intrinsic_image_samples:
/* The driver does not support multi-sampled images. */
- bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), fs_reg(1));
+ bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), brw_imm_d(1));
break;
case nir_intrinsic_load_uniform_indirect:
if (const_index) {
const unsigned index = stage_prog_data->binding_table.ubo_start +
const_index->u[0];
- surf_index = fs_reg(index);
+ surf_index = brw_imm_ud(index);
brw_mark_surface_used(prog_data, index);
} else {
/* The block index is not a constant. Evaluate the index expression
*/
surf_index = vgrf(glsl_type::uint_type);
bld.ADD(surf_index, get_nir_src(instr->src[0]),
- fs_reg(stage_prog_data->binding_table.ubo_start));
+ brw_imm_ud(stage_prog_data->binding_table.ubo_start));
surf_index = bld.emit_uniformize(surf_index);
/* Assume this may touch any UBO. It would be nice to provide
fs_reg base_offset = vgrf(glsl_type::int_type);
bld.SHR(base_offset, retype(get_nir_src(instr->src[1]),
BRW_REGISTER_TYPE_D),
- fs_reg(2));
+ brw_imm_d(2));
unsigned vec4_offset = instr->const_index[0] / 4;
for (int i = 0; i < instr->num_components; i++)
fs_reg packed_consts = vgrf(glsl_type::float_type);
packed_consts.type = dest.type;
- fs_reg const_offset_reg((unsigned) instr->const_index[0] & ~15);
+ struct brw_reg const_offset_reg = brw_imm_ud(instr->const_index[0] & ~15);
bld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts,
surf_index, const_offset_reg);
if (const_uniform_block) {
unsigned index = stage_prog_data->binding_table.ssbo_start +
const_uniform_block->u[0];
- surf_index = fs_reg(index);
+ surf_index = brw_imm_ud(index);
brw_mark_surface_used(prog_data, index);
} else {
surf_index = vgrf(glsl_type::uint_type);
bld.ADD(surf_index, get_nir_src(instr->src[0]),
- fs_reg(stage_prog_data->binding_table.ssbo_start));
+ brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
/* Assume this may touch any UBO. It would be nice to provide
* a tighter bound, but the array information is already lowered away.
if (has_indirect) {
offset_reg = get_nir_src(instr->src[1]);
} else {
- offset_reg = fs_reg(instr->const_index[0]);
+ offset_reg = brw_imm_ud(instr->const_index[0]);
}
/* Read the vector */
if (const_uniform_block) {
unsigned index = stage_prog_data->binding_table.ssbo_start +
const_uniform_block->u[0];
- surf_index = fs_reg(index);
+ surf_index = brw_imm_ud(index);
brw_mark_surface_used(prog_data, index);
} else {
surf_index = vgrf(glsl_type::uint_type);
bld.ADD(surf_index, get_nir_src(instr->src[1]),
- fs_reg(stage_prog_data->binding_table.ssbo_start));
+ brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
brw_mark_surface_used(prog_data,
stage_prog_data->binding_table.ssbo_start +
fs_reg offset_reg;
if (!has_indirect) {
- offset_reg = fs_reg(instr->const_index[0] + 4 * first_component);
+ offset_reg = brw_imm_ud(instr->const_index[0] + 4 * first_component);
} else {
offset_reg = vgrf(glsl_type::uint_type);
bld.ADD(offset_reg,
retype(get_nir_src(instr->src[2]), BRW_REGISTER_TYPE_UD),
- fs_reg(4 * first_component));
+ brw_imm_ud(4 * first_component));
}
emit_untyped_write(bld, surf_index, offset_reg,
int reg_width = dispatch_width / 8;
/* Set LOD = 0 */
- fs_reg source = fs_reg(0);
+ fs_reg source = brw_imm_d(0);
int mlen = 1 * reg_width;
BRW_REGISTER_TYPE_UD);
const unsigned index = prog_data->binding_table.ssbo_start + ssbo_index;
fs_inst *inst = bld.emit(FS_OPCODE_GET_BUFFER_SIZE, buffer_size,
- src_payload, fs_reg(index));
+ src_payload, brw_imm_ud(index));
inst->header_size = 0;
inst->mlen = mlen;
inst->regs_written = regs_written;
if (const_surface) {
unsigned surf_index = stage_prog_data->binding_table.ssbo_start +
const_surface->u[0];
- surface = fs_reg(surf_index);
+ surface = brw_imm_ud(surf_index);
brw_mark_surface_used(prog_data, surf_index);
} else {
surface = vgrf(glsl_type::uint_type);
bld.ADD(surface, get_nir_src(instr->src[0]),
- fs_reg(stage_prog_data->binding_table.ssbo_start));
+ brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
/* Assume this may touch any SSBO. This is the same we do for other
* UBO/SSBO accesses with non-constant surface.
fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
{
unsigned sampler = instr->sampler_index;
- fs_reg sampler_reg(sampler);
+ fs_reg sampler_reg(brw_imm_ud(sampler));
int gather_component = instr->component;
/* Emit code to evaluate the actual indexing expression */
sampler_reg = vgrf(glsl_type::uint_type);
- bld.ADD(sampler_reg, src, fs_reg(sampler));
+ bld.ADD(sampler_reg, src, brw_imm_ud(sampler));
sampler_reg = bld.emit_uniformize(sampler_reg);
break;
}
key_tex->compressed_multisample_layout_mask & (1 << sampler)) {
mcs = emit_mcs_fetch(coordinate, instr->coord_components, sampler_reg);
} else {
- mcs = fs_reg(0u);
+ mcs = brw_imm_ud(0u);
}
}
for (unsigned i = 0; i < 3; i++) {
if (instr->const_offset[i] != 0) {
assert(offset_components == 0);
- tex_offset = fs_reg(brw_texture_offset(instr->const_offset, 3));
+ tex_offset = brw_imm_ud(brw_texture_offset(instr->const_offset, 3));
break;
}
}
*/
const fs_reg usurface = bld.emit_uniformize(surface);
const fs_reg srcs[] = {
- addr, src, usurface, fs_reg(dims), fs_reg(arg)
+ addr, src, usurface, brw_imm_ud(dims), brw_imm_ud(arg)
};
const fs_reg dst = bld.vgrf(BRW_REGISTER_TYPE_UD, rsize);
fs_inst *inst = bld.emit(opcode, dst, srcs, ARRAY_SIZE(srcs));
* messages causes a hang on IVB and VLV.
*/
set_predicate(pred,
- bld.CMP(bld.null_reg_ud(), stride, fs_reg(4),
+ bld.CMP(bld.null_reg_ud(), stride, brw_imm_d(4),
BRW_CONDITIONAL_G));
return BRW_PREDICATE_NORMAL;
*/
bld.CMP(bld.null_reg_ud(),
retype(size, BRW_REGISTER_TYPE_UD),
- fs_reg(0), BRW_CONDITIONAL_NZ);
+ brw_imm_d(0), BRW_CONDITIONAL_NZ);
return BRW_PREDICATE_NORMAL;
} else {
* FINISHME: Factor out this frequently recurring pattern into a
* helper function.
*/
- const fs_reg srcs[] = { addr, fs_reg(0), offset(addr, bld, 1) };
+ const fs_reg srcs[] = { addr, brw_imm_d(0), offset(addr, bld, 1) };
const fs_reg dst = bld.vgrf(addr.type, dims);
bld.LOAD_PAYLOAD(dst, srcs, dims, 0);
return dst;
bld.ADD(offset(addr, bld, c), offset(off, bld, c),
(c < dims ?
offset(retype(coord, BRW_REGISTER_TYPE_UD), bld, c) :
- fs_reg(0)));
+ fs_reg(brw_imm_d(0))));
/* The layout of 3-D textures in memory is sort-of like a tiling
* format. At each miplevel, the slices are arranged in rows of
/* Decompose z into a major (tmp.y) and a minor (tmp.x)
* index.
*/
- bld.BFE(offset(tmp, bld, 0), offset(tile, bld, 2), fs_reg(0),
+ bld.BFE(offset(tmp, bld, 0), offset(tile, bld, 2), brw_imm_d(0),
offset(retype(coord, BRW_REGISTER_TYPE_UD), bld, 2));
bld.SHR(offset(tmp, bld, 1),
offset(retype(coord, BRW_REGISTER_TYPE_UD), bld, 2),
for (unsigned c = 0; c < 2; ++c) {
/* Calculate the minor x and y indices. */
bld.BFE(offset(minor, bld, c), offset(tile, bld, c),
- fs_reg(0), offset(addr, bld, c));
+ brw_imm_d(0), offset(addr, bld, c));
/* Calculate the major x and y indices. */
bld.SHR(offset(major, bld, c),
/* XOR tmp.x and tmp.y with bit 6 of the memory address. */
bld.XOR(tmp, tmp, offset(tmp, bld, 1));
- bld.AND(tmp, tmp, fs_reg(1 << 6));
+ bld.AND(tmp, tmp, brw_imm_d(1 << 6));
bld.XOR(dst, dst, tmp);
}
const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);
/* Shift each component left to the correct bitfield position. */
- bld.SHL(tmp, offset(src, bld, c), fs_reg(shifts[c] % 32));
+ bld.SHL(tmp, offset(src, bld, c), brw_imm_ud(shifts[c] % 32));
/* Add everything up. */
if (seen[shifts[c] / 32]) {
/* Shift left to discard the most significant bits. */
bld.SHL(offset(dst, bld, c),
offset(src, bld, shifts[c] / 32),
- fs_reg(32 - shifts[c] % 32 - widths[c]));
+ brw_imm_ud(32 - shifts[c] % 32 - widths[c]));
/* Shift back to the least significant bits using an arithmetic
* shift to get sign extension on signed types.
*/
bld.ASR(offset(dst, bld, c),
- offset(dst, bld, c), fs_reg(32 - widths[c]));
+ offset(dst, bld, c), brw_imm_ud(32 - widths[c]));
}
}
if (widths[c]) {
/* Clamp to the maximum value. */
bld.emit_minmax(offset(dst, bld, c), offset(src, bld, c),
- fs_reg((int)scale(widths[c] - s)),
+ brw_imm_d((int)scale(widths[c] - s)),
BRW_CONDITIONAL_L);
/* Clamp to the minimum value. */
if (is_signed)
bld.emit_minmax(offset(dst, bld, c), offset(dst, bld, c),
- fs_reg(-(int)scale(widths[c] - s) - 1),
+ brw_imm_d(-(int)scale(widths[c] - s) - 1),
BRW_CONDITIONAL_GE);
}
}
/* Divide by the normalization constants. */
bld.MUL(offset(dst, bld, c), offset(dst, bld, c),
- fs_reg(1.0f / scale(widths[c] - s)));
+ brw_imm_f(1.0f / scale(widths[c] - s)));
/* Clamp to the minimum value. */
if (is_signed)
bld.emit_minmax(offset(dst, bld, c),
- offset(dst, bld, c), fs_reg(-1.0f),
+ offset(dst, bld, c), brw_imm_f(-1.0f),
BRW_CONDITIONAL_GE);
}
}
/* Clamp the normalized floating-point argument. */
if (is_signed) {
bld.emit_minmax(offset(fdst, bld, c), offset(src, bld, c),
- fs_reg(-1.0f), BRW_CONDITIONAL_GE);
+ brw_imm_f(-1.0f), BRW_CONDITIONAL_GE);
bld.emit_minmax(offset(fdst, bld, c), offset(fdst, bld, c),
- fs_reg(1.0f), BRW_CONDITIONAL_L);
+ brw_imm_f(1.0f), BRW_CONDITIONAL_L);
} else {
set_saturate(true, bld.MOV(offset(fdst, bld, c),
offset(src, bld, c)));
/* Multiply by the normalization constants. */
bld.MUL(offset(fdst, bld, c), offset(fdst, bld, c),
- fs_reg((float)scale(widths[c] - s)));
+ brw_imm_f((float)scale(widths[c] - s)));
/* Convert to integer. */
bld.RNDE(offset(fdst, bld, c), offset(fdst, bld, c));
*/
if (widths[c] < 16)
bld.SHL(offset(dst, bld, c),
- offset(dst, bld, c), fs_reg(15 - widths[c]));
+ offset(dst, bld, c), brw_imm_ud(15 - widths[c]));
/* Convert to 32-bit floating point. */
bld.F16TO32(offset(fdst, bld, c), offset(dst, bld, c));
/* Clamp to the minimum value. */
if (widths[c] < 16)
bld.emit_minmax(offset(fdst, bld, c), offset(fdst, bld, c),
- fs_reg(0.0f), BRW_CONDITIONAL_GE);
+ brw_imm_f(0.0f), BRW_CONDITIONAL_GE);
/* Convert to 16-bit floating-point. */
bld.F32TO16(offset(dst, bld, c), offset(fdst, bld, c));
*/
if (widths[c] < 16)
bld.SHR(offset(dst, bld, c), offset(dst, bld, c),
- fs_reg(15 - widths[c]));
+ brw_imm_ud(15 - widths[c]));
}
}
for (unsigned c = 0; c < 4; ++c)
bld.MOV(offset(dst, bld, c),
- widths[c] ? offset(src, bld, c) : fs_reg(pad[c]));
+ widths[c] ? offset(src, bld, c)
+ : fs_reg(brw_imm_ud(pad[c])));
return dst;
}
/* An out of bounds surface access should give zero as result. */
for (unsigned c = 0; c < size; ++c)
set_predicate(pred, bld.SEL(offset(tmp, bld, c),
- offset(tmp, bld, c), fs_reg(0)));
+ offset(tmp, bld, c), brw_imm_d(0)));
}
/* Set the register type to D instead of UD if the data type is
/* An unbound surface access should give zero as result. */
if (rsize)
- set_predicate(pred, bld.SEL(tmp, tmp, fs_reg(0)));
+ set_predicate(pred, bld.SEL(tmp, tmp, brw_imm_d(0)));
return tmp;
}
chan = offset(chan, bld, i);
set_condmod(BRW_CONDITIONAL_GE,
- bld.emit(BRW_OPCODE_SEL, chan, chan, fs_reg(0.0f)));
+ bld.emit(BRW_OPCODE_SEL, chan, chan, brw_imm_f(0.0f)));
/* Our parameter comes in as 1.0/width or 1.0/height,
* because that's what people normally want for doing
const fs_reg dest = vgrf(glsl_type::uvec4_type);
const fs_reg srcs[] = {
coordinate, fs_reg(), fs_reg(), fs_reg(), fs_reg(), fs_reg(),
- sampler, fs_reg(), fs_reg(components), fs_reg(0)
+ sampler, fs_reg(), brw_imm_ud(components), brw_imm_d(0)
};
fs_inst *inst = bld.emit(SHADER_OPCODE_TXF_MCS_LOGICAL, dest, srcs,
ARRAY_SIZE(srcs));
this->result = res;
for (int i=0; i<4; i++) {
- bld.MOV(res, fs_reg(swiz == SWIZZLE_ZERO ? 0.0f : 1.0f));
+ bld.MOV(res, brw_imm_f(swiz == SWIZZLE_ZERO ? 0.0f : 1.0f));
res = offset(res, bld, 1);
}
return;
* pass a valid LOD argument.
*/
assert(lod.file == BAD_FILE);
- lod = fs_reg(0u);
+ lod = brw_imm_ud(0u);
}
if (coordinate.file != BAD_FILE) {
const fs_reg srcs[] = {
coordinate, shadow_c, lod, lod2,
sample_index, mcs, sampler_reg, offset_value,
- fs_reg(coord_components), fs_reg(grad_components)
+ brw_imm_d(coord_components), brw_imm_d(grad_components)
};
enum opcode opcode;
if (op == ir_txs && is_cube_array) {
fs_reg depth = offset(dst, bld, 2);
fs_reg fixed_depth = vgrf(glsl_type::int_type);
- bld.emit(SHADER_OPCODE_INT_QUOTIENT, fixed_depth, depth, fs_reg(6));
+ bld.emit(SHADER_OPCODE_INT_QUOTIENT, fixed_depth, depth, brw_imm_d(6));
fs_reg *fixed_payload = ralloc_array(mem_ctx, fs_reg, inst->regs_written);
int components = inst->regs_written / (inst->exec_size / 8);
for (int i = 0; i < 4; i++) {
fs_reg dst_f = retype(dst, BRW_REGISTER_TYPE_F);
/* Convert from UNORM to UINT */
- bld.MUL(dst_f, dst_f, fs_reg((float)((1 << width) - 1)));
+ bld.MUL(dst_f, dst_f, brw_imm_f((1 << width) - 1));
bld.MOV(dst, dst_f);
if (wa & WA_SIGN) {
* shifting the sign bit into place, then shifting back
* preserving sign.
*/
- bld.SHL(dst, dst, fs_reg(32 - width));
- bld.ASR(dst, dst, fs_reg(32 - width));
+ bld.SHL(dst, dst, brw_imm_d(32 - width));
+ bld.ASR(dst, dst, brw_imm_d(32 - width));
}
dst = offset(dst, bld, 1);
l = offset(l, bld, i);
if (swiz == SWIZZLE_ZERO) {
- bld.MOV(l, fs_reg(0.0f));
+ bld.MOV(l, brw_imm_f(0.0f));
} else if (swiz == SWIZZLE_ONE) {
- bld.MOV(l, fs_reg(1.0f));
+ bld.MOV(l, brw_imm_f(1.0f));
} else {
bld.MOV(l, offset(orig_val, bld,
GET_SWZ(key_tex->swizzles[sampler], i)));
const float color[4] = { 1.0, 0.0, 1.0, 0.0 };
for (int i = 0; i < 4; i++) {
bld.MOV(fs_reg(MRF, 2 + i * reg_width, BRW_REGISTER_TYPE_F),
- fs_reg(color[i]));
+ brw_imm_f(color[i]));
}
fs_inst *write;
fs_reg color = offset(outputs[0], bld, 3);
/* f0.1 &= func(color, ref) */
- cmp = abld.CMP(bld.null_reg_f(), color, fs_reg(key->alpha_test_ref),
+ cmp = abld.CMP(bld.null_reg_f(), color, brw_imm_f(key->alpha_test_ref),
cond_for_alpha_func(key->alpha_test_func));
}
cmp->predicate = BRW_PREDICATE_NORMAL;
const fs_reg sources[] = {
color0, color1, src0_alpha, src_depth, dst_depth, src_stencil,
- sample_mask, fs_reg(components)
+ sample_mask, brw_imm_ud(components)
};
assert(ARRAY_SIZE(sources) - 1 == FB_WRITE_LOGICAL_SRC_COMPONENTS);
fs_inst *write = bld.emit(FS_OPCODE_FB_WRITE_LOGICAL, fs_reg(),
fs_reg offset;
if (gs_vertex_count.file == IMM) {
- per_slot_offsets = fs_reg(output_vertex_size_owords *
- gs_vertex_count.ud);
+ per_slot_offsets = brw_imm_ud(output_vertex_size_owords *
+ gs_vertex_count.ud);
} else {
per_slot_offsets = vgrf(glsl_type::int_type);
bld.MUL(per_slot_offsets, gs_vertex_count,
- fs_reg(output_vertex_size_owords));
+ brw_imm_ud(output_vertex_size_owords));
}
}
}
fs_reg zero(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
- bld.MOV(zero, fs_reg(0u));
+ bld.MOV(zero, brw_imm_ud(0u));
sources[length++] = zero;
if (vue_map->slots_valid & VARYING_BIT_LAYER)
for (unsigned i = 0; i < output_components[varying]; i++)
sources[length++] = offset(this->outputs[varying], bld, i);
for (unsigned i = output_components[varying]; i < 4; i++)
- sources[length++] = fs_reg(0);
+ sources[length++] = brw_imm_d(0);
}
break;
}
const fs_builder pbld = bld.exec_all().group(8, 0);
/* Clear the message payload */
- pbld.MOV(payload, fs_reg(0u));
+ pbld.MOV(payload, brw_imm_ud(0u));
/* Copy bits 27:24 of r0.2 (barrier id) to the message payload reg.2 */
fs_reg r0_2 = fs_reg(retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD));
- pbld.AND(component(payload, 2), r0_2, fs_reg(0x0f000000u));
+ pbld.AND(component(payload, 2), r0_2, brw_imm_ud(0x0f000000u));
/* Emit a gateway "barrier" message using the payload we set up, followed
* by a wait instruction.
void init();
fs_reg();
- explicit fs_reg(float f);
- explicit fs_reg(int32_t i);
- explicit fs_reg(uint32_t u);
- explicit fs_reg(uint8_t vf[4]);
- explicit fs_reg(uint8_t vf0, uint8_t vf1, uint8_t vf2, uint8_t vf3);
fs_reg(struct brw_reg reg);
fs_reg(enum brw_reg_file file, int nr);
fs_reg(enum brw_reg_file file, int nr, enum brw_reg_type type);
fs_reg dest = v->vgrf(glsl_type::float_type);
fs_reg src0 = v->vgrf(glsl_type::float_type);
fs_reg src1 = v->vgrf(glsl_type::float_type);
- fs_reg zero(0.0f);
+ fs_reg zero(brw_imm_f(0.0f));
bld.ADD(dest, src0, src1);
bld.CMP(bld.null_reg_f(), dest, zero, BRW_CONDITIONAL_GE);
fs_reg dest = v->vgrf(glsl_type::float_type);
fs_reg src0 = v->vgrf(glsl_type::float_type);
fs_reg src1 = v->vgrf(glsl_type::float_type);
- fs_reg nonzero(1.0f);
+ fs_reg nonzero(brw_imm_f(1.0f));
bld.ADD(dest, src0, src1);
bld.CMP(bld.null_reg_f(), dest, nonzero, BRW_CONDITIONAL_GE);
const fs_builder &bld = v->bld;
fs_reg dest = v->vgrf(glsl_type::uint_type);
fs_reg src0 = v->vgrf(glsl_type::uint_type);
- fs_reg zero(0u);
+ fs_reg zero(brw_imm_ud(0u));
bld.FBL(dest, src0);
bld.CMP(bld.null_reg_ud(), dest, zero, BRW_CONDITIONAL_GE);
fs_reg src0 = v->vgrf(glsl_type::float_type);
fs_reg src1 = v->vgrf(glsl_type::float_type);
fs_reg src2 = v->vgrf(glsl_type::float_type);
- fs_reg zero(0.0f);
+ fs_reg zero(brw_imm_f(0.0f));
bld.ADD(dest, src0, src1);
bld.CMP(bld.null_reg_f(), src2, zero, BRW_CONDITIONAL_GE);
bld.CMP(bld.null_reg_f(), dest, zero, BRW_CONDITIONAL_GE);
fs_reg src0 = v->vgrf(glsl_type::float_type);
fs_reg src1 = v->vgrf(glsl_type::float_type);
fs_reg src2 = v->vgrf(glsl_type::float_type);
- fs_reg zero(0.0f);
+ fs_reg zero(brw_imm_f(0.0f));
bld.ADD(dest0, src0, src1);
set_predicate(BRW_PREDICATE_NORMAL, bld.SEL(dest1, src2, zero));
bld.CMP(bld.null_reg_f(), dest0, zero, BRW_CONDITIONAL_GE);
fs_reg src0 = v->vgrf(glsl_type::float_type);
fs_reg src1 = v->vgrf(glsl_type::float_type);
fs_reg src2 = v->vgrf(glsl_type::vec2_type);
- fs_reg zero(0.0f);
+ fs_reg zero(brw_imm_f(0.0f));
bld.ADD(offset(dest, bld, 2), src0, src1);
bld.emit(SHADER_OPCODE_TEX, dest, src2)
->regs_written = 4;
fs_reg src0 = v->vgrf(glsl_type::float_type);
fs_reg src1 = v->vgrf(glsl_type::float_type);
fs_reg src2 = v->vgrf(glsl_type::float_type);
- fs_reg zero(0.0f);
+ fs_reg zero(brw_imm_f(0.0f));
set_condmod(BRW_CONDITIONAL_GE, bld.ADD(dest0, src0, src1));
set_predicate(BRW_PREDICATE_NORMAL, bld.SEL(dest1, src2, zero));
bld.CMP(bld.null_reg_f(), dest0, zero, BRW_CONDITIONAL_GE);
fs_reg dest = v->vgrf(glsl_type::float_type);
fs_reg src0 = v->vgrf(glsl_type::float_type);
fs_reg src1 = v->vgrf(glsl_type::float_type);
- fs_reg zero(0.0f);
+ fs_reg zero(brw_imm_f(0.0f));
bld.ADD(dest, src0, src1);
dest.negate = true;
bld.CMP(bld.null_reg_f(), dest, zero, BRW_CONDITIONAL_GE);
fs_reg dest = v->vgrf(glsl_type::int_type);
fs_reg src0 = v->vgrf(glsl_type::int_type);
fs_reg src1 = v->vgrf(glsl_type::int_type);
- fs_reg zero(0.0f);
+ fs_reg zero(brw_imm_f(0.0f));
bld.ADD(dest, src0, src1);
bld.CMP(bld.null_reg_f(), retype(dest, BRW_REGISTER_TYPE_F), zero,
BRW_CONDITIONAL_GE);
const fs_builder &bld = v->bld;
fs_reg dest = v->vgrf(glsl_type::int_type);
fs_reg src0 = v->vgrf(glsl_type::float_type);
- fs_reg zero(0.0f);
- fs_reg one(1);
+ fs_reg zero(brw_imm_f(0.0f));
+ fs_reg one(brw_imm_d(1));
bld.CMP(retype(dest, BRW_REGISTER_TYPE_F), src0, zero, BRW_CONDITIONAL_L);
set_condmod(BRW_CONDITIONAL_NZ,
const fs_builder &bld = v->bld;
fs_reg dest = v->vgrf(glsl_type::int_type);
fs_reg src0 = v->vgrf(glsl_type::float_type);
- fs_reg zero(0.0f);
- fs_reg nonone(38);
+ fs_reg zero(brw_imm_f(0.0f));
+ fs_reg nonone(brw_imm_d(38));
bld.CMP(retype(dest, BRW_REGISTER_TYPE_F), src0, zero, BRW_CONDITIONAL_L);
set_condmod(BRW_CONDITIONAL_NZ,
const fs_builder &bld = v->bld;
fs_reg dest = v->vgrf(glsl_type::int_type);
fs_reg src0 = v->vgrf(glsl_type::float_type);
- fs_reg zero(0.0f);
- fs_reg one(1);
+ fs_reg zero(brw_imm_f(0.0f));
+ fs_reg one(brw_imm_d(1));
bld.CMP(retype(dest, BRW_REGISTER_TYPE_F), src0, zero, BRW_CONDITIONAL_L);
set_condmod(BRW_CONDITIONAL_Z,