The mul64 attribute in types.md causes some confusion as it is used to represent
aarch32 instructions that do widening multiplication to generate 32->64 bit results.
But these types are shared with aarch64, which has native 64-bit multiplication operations.
Those are currently not properly represented, which I will fix in follow-up patches.
For now, this patch renames the mul64 attribute to widen_mul64 to more clearly communicate its meaning.
It mechanically updates all users of that name in config/arm/ (there are no users in config/aarch64).
There is thus no change in behaviour.
* config/arm/types.md (mul64): Rename to...
(widen_mul64): ... This.
* config/arm/arm-generic.md: Rename mul64 to widen_mul64.
* config/arm/cortex-a15.md: Likewise.
* config/arm/cortex-a5.md: Likewise.
* config/arm/cortex-a53.md: Likewise.
* config/arm/cortex-a57.md: Likewise.
* config/arm/cortex-a7.md: Likewise.
* config/arm/cortex-m4.md: Likewise.
* config/arm/exynos-m1.md: Likewise.
* config/arm/marvell-pj4.md: Likewise.
* config/arm/xgene1.md: Likewise.
From-SVN: r266471
+2018-11-26 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * config/arm/types.md (mul64): Rename to...
+ (widen_mul64): ... This.
+ * config/arm/arm-generic.md: Rename mul64 to widen_mul64.
+ * config/arm/cortex-a15.md: Likewise.
+ * config/arm/cortex-a5.md: Likewise.
+ * config/arm/cortex-a53.md: Likewise.
+ * config/arm/cortex-a57.md: Likewise.
+ * config/arm/cortex-a7.md: Likewise.
+ * config/arm/cortex-m4.md: Likewise.
+ * config/arm/exynos-m1.md: Likewise.
+ * config/arm/marvell-pj4.md: Likewise.
+ * config/arm/xgene1.md: Likewise.
+
2018-11-26 Richard Biener <rguenther@suse.de>
PR tree-optimization/88182
(and (eq_attr "generic_sched" "yes")
(and (eq_attr "ldsched" "no")
(ior (eq_attr "mul32" "yes")
- (eq_attr "mul64" "yes"))))
+ (eq_attr "widen_mul64" "yes"))))
"core*16")
(define_insn_reservation "mult_ldsched_strongarm" 3
(and (eq_attr "ldsched" "yes")
(and (eq_attr "tune" "strongarm")
(ior (eq_attr "mul32" "yes")
- (eq_attr "mul64" "yes")))))
+ (eq_attr "widen_mul64" "yes")))))
"core*2")
(define_insn_reservation "mult_ldsched" 4
(and (eq_attr "ldsched" "yes")
(and (eq_attr "tune" "!strongarm")
(ior (eq_attr "mul32" "yes")
- (eq_attr "mul64" "yes")))))
+ (eq_attr "widen_mul64" "yes")))))
"core*4")
(define_insn_reservation "multi_cycle" 32
(and (eq_attr "type" "!load_byte,load_4,load_8,load_12,load_16,\
store_4,store_8,store_12,store_16")
(not (ior (eq_attr "mul32" "yes")
- (eq_attr "mul64" "yes"))))))
+ (eq_attr "widen_mul64" "yes"))))))
"core*32")
(define_insn_reservation "single_cycle" 1
;; 64-bit multiplies
(define_insn_reservation "cortex_a15_mult64" 4
(and (eq_attr "tune" "cortexa15")
- (eq_attr "mul64" "yes"))
+ (eq_attr "widen_mul64" "yes"))
"ca15_issue1,ca15_mx*2")
;; Integer divide
(define_insn_reservation "cortex_a5_mul" 2
(and (eq_attr "tune" "cortexa5")
(ior (eq_attr "mul32" "yes")
- (eq_attr "mul64" "yes")))
+ (eq_attr "widen_mul64" "yes")))
"cortex_a5_ex1")
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(define_insn_reservation "cortex_a53_mul" 4
(and (eq_attr "tune" "cortexa53")
(ior (eq_attr "mul32" "yes")
- (eq_attr "mul64" "yes")))
+ (eq_attr "widen_mul64" "yes")))
"cortex_a53_slot_any+cortex_a53_imul")
;; From the perspective of the GCC scheduling state machine, if we wish to
(define_insn_reservation "cortex_a57_mult32" 3
(and (eq_attr "tune" "cortexa57")
(ior (eq_attr "mul32" "yes")
- (eq_attr "mul64" "yes")))
+ (eq_attr "widen_mul64" "yes")))
"ca57_mx")
;; Integer divide
(define_insn_reservation "cortex_a7_mul" 2
(and (eq_attr "tune" "cortexa7")
(ior (eq_attr "mul32" "yes")
- (eq_attr "mul64" "yes")))
+ (eq_attr "widen_mul64" "yes")))
"cortex_a7_both")
;; Forward the result of a multiply operation to the accumulator
mvn_imm,mvn_reg,mvn_shift,mvn_shift_reg,\
mrs,multiple,no_insn")
(ior (eq_attr "mul32" "yes")
- (eq_attr "mul64" "yes"))))
+ (eq_attr "widen_mul64" "yes"))))
"cortex_m4_ex")
;; Byte, half-word and word load is two cycles.
(define_insn_reservation "exynos_m1_mlal" 4
(and (eq_attr "tune" "exynosm1")
- (eq_attr "mul64" "yes"))
+ (eq_attr "widen_mul64" "yes"))
"em1_alu, em1_c")
;; Integer divide
(define_insn_reservation "pj4_ir_mul" 3
(and (eq_attr "tune" "marvell_pj4")
(ior (eq_attr "mul32" "yes")
- (eq_attr "mul64" "yes")))
- "pj4_is,pj4_mul,nothing*2,pj4_cp")
+ (eq_attr "widen_mul64" "yes")))
+ "pj4_is,pj4_mul,nothing*2,pj4_cp")
(define_insn_reservation "pj4_ir_div" 20
(and (eq_attr "tune" "marvell_pj4")
(const_string "yes")
(const_string "no")))
-; Is this an (integer side) multiply with a 64-bit result?
-(define_attr "mul64" "no,yes"
+; Is this an (integer side) widening multiply with a 64-bit result?
+(define_attr "widen_mul64" "no,yes"
(if_then_else
(eq_attr "type"
"smlalxy,umull,umulls,umaal,umlal,umlals,smull,smulls,smlal,smlals")
(eq_attr "mul32" "yes"))
"xgene1_decode2op, xgene1_IXB + xgene1_multiply, xgene1_multiply, nothing, xgene1_IXB_compl")
-(define_insn_reservation "xgene1_mul64" 5
+(define_insn_reservation "xgene1_widen_mul64" 5
(and (eq_attr "tune" "xgene1")
- (eq_attr "mul64" "yes"))
+ (eq_attr "widen_mul64" "yes"))
"xgene1_decode2op, xgene1_IXB + xgene1_multiply, xgene1_multiply, nothing*2, xgene1_IXB_compl")
(define_insn_reservation "xgene1_div" 34