cr_logical,mfcr,mfcrf,mtcr,
fpcompare,fp,fpsimple,dmul,qmul,sdiv,ddiv,ssqrt,dsqrt,
vecsimple,veccomplex,vecdiv,veccmp,veccmpsimple,vecperm,
- vecfloat,vecfdiv,vecdouble,mffgpr,mftgpr,crypto,
+ vecfloat,vecfdiv,vecdouble,mtvsr,mfvsr,crypto,
veclogical,veccmpfx,vecexts,vecmove,
htm,htmsimple,dfp,mma"
(const_string "integer"))
mtvsrwz %x0,%1
mfvsrwz %0,%x1
xxextractuw %x0,%x1,4"
- [(set_attr "type" "load,shift,fpload,fpload,mffgpr,mftgpr,vecexts")
+ [(set_attr "type" "load,shift,fpload,fpload,mtvsr,mfvsr,vecexts")
(set_attr "isa" "*,*,p7,p8v,p8v,p8v,p9v")])
(define_insn_and_split "*zero_extendsi<mode>2_dot"
vextsw2d %0,%1
#
#"
- [(set_attr "type" "load,exts,fpload,fpload,mffgpr,vecexts,vecperm,mftgpr")
+ [(set_attr "type" "load,exts,fpload,fpload,mtvsr,vecexts,vecperm,mfvsr")
(set_attr "sign_extend" "yes")
(set_attr "length" "*,*,*,*,*,*,8,8")
(set_attr "isa" "*,*,p6,p8v,p8v,p9v,p8v,p8v")])
{
operands[2] = gen_highpart (DImode, operands[1]);
}
- [(set_attr "type" "mftgpr,*")])
+ [(set_attr "type" "mfvsr,*")])
;; Optimize IEEE 128-bit signbit on to avoid loading the value into a vector
;; register and then doing a direct move if the value comes from memory. On
lxsiwax %x0,%y1
mtvsrwa %x0,%1
vextsw2d %0,%1"
- [(set_attr "type" "fpload,fpload,mffgpr,vecexts")
+ [(set_attr "type" "fpload,fpload,mtvsr,vecexts")
(set_attr "isa" "*,p8v,p8v,p9v")])
; This split must be run before register allocation because it allocates the
lxsiwzx %x0,%y1
mtvsrwz %x0,%1
xxextractuw %x0,%x1,4"
- [(set_attr "type" "fpload,fpload,mffgpr,vecexts")
+ [(set_attr "type" "fpload,fpload,mtvsr,vecexts")
(set_attr "isa" "*,p8v,p8v,p9v")])
(define_insn_and_split "floatunssi<mode>2_lfiwzx"
*, *, *, *,
veclogical, vecsimple, vecsimple, vecsimple,
veclogical, veclogical, vecsimple,
- mffgpr, mftgpr,
+ mtvsr, mfvsr,
*, *, *")
(set_attr "length"
"*, *,
}
[(set_attr "type"
"*, load, fpload, fpload, store,
- fpstore, fpstore, fpstore, mftgpr, fp,
- mffgpr")
+ fpstore, fpstore, fpstore, mfvsr, fp,
+ mtvsr")
(set_attr "length"
"*, *, *, *, *,
*, *, *, 8, *,
}
[(set_attr "type"
"*, load, fpload, fpload, two,
- two, mffgpr")
+ two, mtvsr")
(set_attr "length"
"*, *, *, *, 8,
8, *")
nop"
[(set_attr "type"
"*, load, fpload, store, fpstore, *,
- vecsimple, vecperm, vecperm, vecperm, vecperm, mftgpr,
- mffgpr, mfjmpr, mtjmpr, *")
+ vecsimple, vecperm, vecperm, vecperm, vecperm, mfvsr,
+ mtvsr, mfjmpr, mtjmpr, *")
(set_attr "length"
"*, *, *, *, *, *,
*, *, *, *, 8, *,
mf%1 %0
nop"
[(set_attr "type"
- "load, fpload, store, fpstore, mffgpr, mftgpr,
+ "load, fpload, store, fpstore, mtvsr, mfvsr,
fpsimple, *, mtjmpr, mfjmpr, *")
(set_attr "isa"
"*, p7, *, *, p8v, p8v,
*, 12, *, *")
(set_attr "type"
"load, fpload, fpload, fpload, store, fpstore,
- fpstore, vecfloat, mftgpr, *")
+ fpstore, vecfloat, mfvsr, *")
(set_attr "isa"
"*, *, p9v, p8v, *, *,
p8v, p8v, p8v, *")])
"fpstore, fpload, fpsimple, fpload, fpstore,
fpload, fpstore, veclogical, veclogical, integer,
store, load, *, mtjmpr, mfjmpr,
- *, mftgpr, mffgpr")
+ *, mfvsr, mtvsr")
(set_attr "size" "64")
(set_attr "isa"
"*, *, *, p9v, p9v,
UNSPEC_P8V_MTVSRWZ))]
"!TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"mtvsrwz %x0,%1"
- [(set_attr "type" "mffgpr")])
+ [(set_attr "type" "mtvsr")])
(define_insn_and_split "reload_fpr_from_gpr<mode>"
[(set (match_operand:FMOVE64X 0 "register_operand" "=d")
UNSPEC_P8V_MTVSRD))]
"TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"mtvsrd %x0,%1"
- [(set_attr "type" "mftgpr")])
+ [(set_attr "type" "mfvsr")])
(define_insn "p8_xxpermdi_<mode>"
[(set (match_operand:FMOVE128_GPR 0 "register_operand" "=wa")
UNSPEC_P8V_MTVSRD))]
"TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"mtvsrd %x0,%1"
- [(set_attr "type" "mffgpr")])
+ [(set_attr "type" "mtvsr")])
(define_insn_and_split "reload_vsx_from_gprsf"
[(set (match_operand:SF 0 "register_operand" "=wa")
UNSPEC_P8V_RELOAD_FROM_VSX))]
"TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"mfvsrd %0,%x1"
- [(set_attr "type" "mftgpr")])
+ [(set_attr "type" "mfvsr")])
(define_insn_and_split "reload_gpr_from_vsx<mode>"
[(set (match_operand:FMOVE128_GPR 0 "register_operand" "=r")
vecsimple, vecsimple, vecsimple, veclogical, veclogical,
vecsimple, vecsimple,
mfjmpr, mtjmpr, *,
- mftgpr, mffgpr")
+ mfvsr, mtvsr")
(set_attr "size" "64")
(set_attr "length"
"*, *, *,
operands[3] = gen_rtx_REG (<FP128_64>mode, fp_regno);
}
- [(set_attr "type" "fp,fpstore,mffgpr,mftgpr,store")])
+ [(set_attr "type" "fp,fpstore,mtvsr,mfvsr,store")])
(define_insn_and_split "unpack<mode>_nodm"
[(set (match_operand:<FP128_64> 0 "nonimmediate_operand" "=d,m")
return rs6000_output_move_128bit (operands);
}
[(set_attr "type"
- "vecstore, vecload, vecsimple, mffgpr, mftgpr, load,
+ "vecstore, vecload, vecsimple, mtvsr, mfvsr, load,
store, load, store, *, vecsimple, vecsimple,
vecsimple, *, *, vecstore, vecload")
(set_attr "num_insns"
else
gcc_unreachable ();
}
- [(set_attr "type" "veclogical,mftgpr,mftgpr,vecperm")
+ [(set_attr "type" "veclogical,mfvsr,mfvsr,vecperm")
(set_attr "isa" "*,*,p8v,p9v")])
;; Optimize extracting a single scalar element from memory.
DONE;
}
- [(set_attr "type" "mftgpr,vecperm,fpstore")
+ [(set_attr "type" "mfvsr,vecperm,fpstore")
(set_attr "length" "8")
(set_attr "isa" "*,p8v,*")])
gen_rtx_REG (DImode, REGNO (vec_tmp)));
DONE;
}
- [(set_attr "type" "mftgpr")])
+ [(set_attr "type" "mfvsr")])
;; Optimize extracting a single scalar element from memory.
(define_insn_and_split "*vsx_extract_<mode>_load"