return src_reg(component(dst, 0));
}
+ src_reg
+ move_to_vgrf(const src_reg &src, unsigned num_components) const
+ {
+ src_reg *const src_comps = new src_reg[num_components];
+ for (unsigned i = 0; i < num_components; i++)
+ src_comps[i] = offset(src, dispatch_width(), i);
+
+ const dst_reg dst = vgrf(src.type, num_components);
+ LOAD_PAYLOAD(dst, src_comps, num_components, 0);
+
+ delete[] src_comps;
+
+ return src_reg(dst);
+ }
+
void
emit_scan(enum opcode opcode, const dst_reg &tmp,
unsigned cluster_size, brw_conditional_mod mod) const
return inst;
}
+ instruction *
+ UNDEF(const dst_reg &dst) const
+ {
+ assert(dst.file == VGRF);
+ instruction *inst = emit(SHADER_OPCODE_UNDEF,
+ retype(dst, BRW_REGISTER_TYPE_UD));
+ inst->size_written = shader->alloc.sizes[dst.nr] * REG_SIZE;
+
+ return inst;
+ }
+
backend_shader *shader;
private:
src_reg
fix_3src_operand(const src_reg &src) const
{
- if (src.file == VGRF || src.file == UNIFORM || src.stride > 1) {
+ switch (src.file) {
+ case FIXED_GRF:
+ /* FINISHME: Could handle scalar region, other stride=1 regions */
+ if (src.vstride != BRW_VERTICAL_STRIDE_8 ||
+ src.width != BRW_WIDTH_8 ||
+ src.hstride != BRW_HORIZONTAL_STRIDE_1)
+ break;
+ /* fallthrough */
+ case ATTR:
+ case VGRF:
+ case UNIFORM:
+ case IMM:
return src;
- } else {
- dst_reg expanded = vgrf(src.type);
- MOV(expanded, src);
- return expanded;
+ default:
+ break;
}
+
+ dst_reg expanded = vgrf(src.type);
+ MOV(expanded, src);
+ return expanded;
}
/**