+2004-07-07 Alexandre Oliva <aoliva@redhat.com>
+
+ * builtins.c: Rename movstr*, except for movstrict*, to
+ movmem* and clrstr* to clrmem*.
+ * expr.c: Likewise.
+ * expr.h: Likewise.
+ * genopinit.c: Likewise.
+ * integrate.c: Likewise.
+ * local-alloc.c: Likewise.
+ * optabs.c: Likewise.
+ * optabs.h: Likewise.
+ * config/alpha/alpha.h: Likewise.
+ * config/alpha/alpha.md: Likewise.
+ * config/arm/arm-protos.h: Likewise.
+ * config/arm/arm.c: Likewise.
+ * config/arm/arm.md: Likewise.
+ * config/avr/avr.md: Likewise.
+ * config/c4x/c4x.c: Likewise.
+ * config/c4x/c4x.md: Likewise.
+ * config/frv/frv.md: Likewise.
+ * config/i386/i386-protos.h: Likewise.
+ * config/i386/i386.c: Likewise.
+ * config/i386/i386.h: Likewise.
+ * config/i386/i386.md: Likewise.
+ * config/i860/i860.c: Likewise.
+ * config/i860/i860.md: Likewise.
+ * config/ip2k/ip2k.md: Likewise.
+ * config/ip2k/libgcc.S: Likewise.
+ * config/ip2k/t-ip2k: Likewise.
+ * config/m32r/m32r.c: Likewise.
+ * config/m32r/m32r.md: Likewise.
+ * config/mcore/mcore.md: Likewise.
+ * config/mips/mips.c: Likewise.
+ * config/mips/mips.md: Likewise.
+ * config/ns32k/ns32k.c: Likewise.
+ * config/ns32k/ns32k.h: Likewise.
+ * config/ns32k/ns32k.md: Likewise.
+ * config/pa/pa.c: Likewise.
+ * config/pa/pa.md: Likewise.
+ * config/pdp11/pdp11.h: Likewise.
+ * config/pdp11/pdp11.md: Likewise.
+ * config/rs6000/rs6000.c: Likewise.
+ * config/rs6000/rs6000.md: Likewise.
+ * config/s390/s390-protos.h: Likewise.
+ * config/s390/s390.c: Likewise.
+ * config/s390/s390.md: Likewise.
+ * config/sh/lib1funcs.asm: Likewise.
+ * config/sh/sh.c: Likewise.
+ * config/sh/sh.md: Likewise.
+ * config/sh/t-sh: Likewise.
+ * config/sparc/sparc.h: Likewise.
+ * config/vax/vax.md: Likewise.
+ * config/xtensa/xtensa.c: Likewise.
+ * config/xtensa/xtensa.md: Likewise.
+ * doc/invoke.texi: Likewise.
+ * doc/md.texi: Likewise.
+ * doc/rtl.texi: Likewise.
+
2004-07-07 Richard Sandiford <rsandifo@redhat.com>
PR target/16407
}
/* Get a MEM rtx for expression EXP which is the address of an operand
- to be used to be used in a string instruction (cmpstrsi, movstrsi, ..). */
+ to be used to be used in a string instruction (cmpstrsi, movmemsi, ..). */
static rtx
get_memory_rtx (tree exp)
#define MOVE_MAX 8
/* If a memory-to-memory move would take MOVE_RATIO or more simple
- move-instruction pairs, we will do a movstr or libcall instead.
+ move-instruction pairs, we will do a movmem or libcall instead.
Without byte/word accesses, we want no more than four instructions;
with, several single byte accesses are better. */
;; Argument 2 is the length
;; Argument 3 is the alignment
-(define_expand "movstrqi"
+(define_expand "movmemqi"
[(parallel [(set (match_operand:BLK 0 "memory_operand" "")
(match_operand:BLK 1 "memory_operand" ""))
(use (match_operand:DI 2 "immediate_operand" ""))
FAIL;
})
-(define_expand "movstrdi"
+(define_expand "movmemdi"
[(parallel [(set (match_operand:BLK 0 "memory_operand" "")
(match_operand:BLK 1 "memory_operand" ""))
(use (match_operand:DI 2 "immediate_operand" ""))
alpha_need_linkage (XSTR (operands[4], 0), 0);
})
-(define_insn "*movstrdi_1"
+(define_insn "*movmemdi_1"
[(set (match_operand:BLK 0 "memory_operand" "=m,=m")
(match_operand:BLK 1 "memory_operand" "m,m"))
(use (match_operand:DI 2 "nonmemory_operand" "r,i"))
[(set_attr "type" "multi")
(set_attr "length" "28")])
-(define_expand "clrstrqi"
+(define_expand "clrmemqi"
[(parallel [(set (match_operand:BLK 0 "memory_operand" "")
(const_int 0))
(use (match_operand:DI 1 "immediate_operand" ""))
FAIL;
})
-(define_expand "clrstrdi"
+(define_expand "clrmemdi"
[(parallel [(set (match_operand:BLK 0 "memory_operand" "")
(const_int 0))
(use (match_operand:DI 1 "immediate_operand" ""))
alpha_need_linkage (XSTR (operands[3], 0), 0);
})
-(define_insn "*clrstrdi_1"
+(define_insn "*clrmemdi_1"
[(set (match_operand:BLK 0 "memory_operand" "=m,=m")
(const_int 0))
(use (match_operand:DI 1 "nonmemory_operand" "r,i"))
extern const char * emit_stm_seq (rtx *, int);
extern rtx arm_gen_load_multiple (int, int, rtx, int, int, int, int, int);
extern rtx arm_gen_store_multiple (int, int, rtx, int, int, int, int, int);
-extern int arm_gen_movstrqi (rtx *);
+extern int arm_gen_movmemqi (rtx *);
extern rtx arm_gen_rotated_half_load (rtx);
extern enum machine_mode arm_select_cc_mode (RTX_CODE, rtx, rtx);
extern enum machine_mode arm_select_dominance_cc_mode (rtx, rtx,
extern void thumb_final_prescan_insn (rtx);
extern const char *thumb_load_double_from_address (rtx *);
extern const char *thumb_output_move_mem_multiple (int, rtx *);
-extern void thumb_expand_movstrqi (rtx *);
+extern void thumb_expand_movmemqi (rtx *);
extern int thumb_cmp_operand (rtx, enum machine_mode);
extern int thumb_cbrch_target_operand (rtx, enum machine_mode);
extern rtx *thumb_legitimize_pic_address (rtx, enum machine_mode, rtx);
}
int
-arm_gen_movstrqi (rtx *operands)
+arm_gen_movmemqi (rtx *operands)
{
HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
int i;
/* Routines for generating rtl. */
void
-thumb_expand_movstrqi (rtx *operands)
+thumb_expand_movmemqi (rtx *operands)
{
rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
;; We could let this apply for blocks of less than this, but it clobbers so
;; many registers that there is then probably a better way.
-(define_expand "movstrqi"
+(define_expand "movmemqi"
[(match_operand:BLK 0 "general_operand" "")
(match_operand:BLK 1 "general_operand" "")
(match_operand:SI 2 "const_int_operand" "")
"
if (TARGET_ARM)
{
- if (arm_gen_movstrqi (operands))
+ if (arm_gen_movmemqi (operands))
DONE;
FAIL;
}
|| INTVAL (operands[2]) > 48)
FAIL;
- thumb_expand_movstrqi (operands);
+ thumb_expand_movmemqi (operands);
DONE;
}
"
;;=========================================================================
;; move string (like memcpy)
-(define_expand "movstrhi"
+(define_expand "movmemhi"
[(parallel [(set (match_operand:BLK 0 "memory_operand" "")
(match_operand:BLK 1 "memory_operand" ""))
(use (match_operand:HI 2 "const_int_operand" ""))
operands[1] = gen_rtx_MEM (BLKmode, addr1);
}")
-(define_insn "*movstrqi_insn"
+(define_insn "*movmemqi_insn"
[(set (mem:BLK (match_operand:HI 0 "register_operand" "e"))
(mem:BLK (match_operand:HI 1 "register_operand" "e")))
(use (match_operand:QI 2 "register_operand" "r"))
[(set_attr "length" "4")
(set_attr "cc" "clobber")])
-(define_insn "*movstrhi"
+(define_insn "*movmemhi"
[(set (mem:BLK (match_operand:HI 0 "register_operand" "e,e"))
(mem:BLK (match_operand:HI 1 "register_operand" "e,e")))
(use (match_operand:HI 2 "register_operand" "!w,d"))
;; =0 =0 =0 =0 =0 =0 =0 =0 =0 =0 =0 =0 =0 =0 =0 =0 =0 =0 =0 =0 =0 =0 =0 =0
;; memset (%0, 0, %1)
-(define_expand "clrstrhi"
+(define_expand "clrmemhi"
[(parallel [(set (match_operand:BLK 0 "memory_operand" "")
(const_int 0))
(use (match_operand:HI 1 "const_int_operand" ""))
operands[0] = gen_rtx_MEM (BLKmode, addr0);
}")
-(define_insn "*clrstrqi"
+(define_insn "*clrmemqi"
[(set (mem:BLK (match_operand:HI 0 "register_operand" "e"))
(const_int 0))
(use (match_operand:QI 1 "register_operand" "r"))
[(set_attr "length" "3")
(set_attr "cc" "clobber")])
-(define_insn "*clrstrhi"
+(define_insn "*clrmemhi"
[(set (mem:BLK (match_operand:HI 0 "register_operand" "e,e"))
(const_int 0))
(use (match_operand:HI 1 "register_operand" "!w,d"))
rtx count_reg;
/* If the count register has not been allocated to RC, say if
- there is a movstr pattern in the loop, then do not insert a
+ there is a movmem pattern in the loop, then do not insert a
RPTB instruction. Instead we emit a decrement and branch
at the end of the loop. */
count_reg = XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn), 0, 0)), 0), 0);
"0"
"")
-(define_expand "movstrqi_small"
+(define_expand "movmemqi_small"
[(parallel [(set (mem:BLK (match_operand:BLK 0 "src_operand" ""))
(mem:BLK (match_operand:BLK 1 "src_operand" "")))
(use (match_operand:QI 2 "immediate_operand" ""))
; operand 3 is the shared alignment
; operand 4 is a scratch register
-(define_insn "movstrqi_large"
+(define_insn "movmemqi_large"
[(set (mem:BLK (match_operand:QI 0 "addr_reg_operand" "a"))
(mem:BLK (match_operand:QI 1 "addr_reg_operand" "a")))
(use (match_operand:QI 2 "immediate_operand" "i"))
[(set_attr "type" "multi")])
; Operand 2 is the count, operand 3 is the alignment.
-(define_expand "movstrqi"
+(define_expand "movmemqi"
[(parallel [(set (mem:BLK (match_operand:BLK 0 "src_operand" ""))
(mem:BLK (match_operand:BLK 1 "src_operand" "")))
(use (match_operand:QI 2 "immediate_operand" ""))
tmp = gen_reg_rtx (QImode);
/* Disabled because of reload problems. */
if (0 && INTVAL (operands[2]) < 8)
- emit_insn (gen_movstrqi_small (operands[0], operands[1], operands[2],
+ emit_insn (gen_movmemqi_small (operands[0], operands[1], operands[2],
operands[3], tmp));
else
{
- emit_insn (gen_movstrqi_large (operands[0], operands[1], operands[2],
+ emit_insn (gen_movmemqi_large (operands[0], operands[1], operands[2],
operands[3], tmp));
}
DONE;
;; Argument 2 is the length
;; Argument 3 is the alignment
-(define_expand "movstrsi"
+(define_expand "movmemsi"
[(parallel [(set (match_operand:BLK 0 "" "")
(match_operand:BLK 1 "" ""))
(use (match_operand:SI 2 "" ""))
;; Argument 1 is the length
;; Argument 2 is the alignment
-(define_expand "clrstrsi"
+(define_expand "clrmemsi"
[(parallel [(set (match_operand:BLK 0 "" "")
(const_int 0))
(use (match_operand:SI 1 "" ""))
extern int aligned_operand (rtx, enum machine_mode);
extern enum machine_mode ix86_cc_mode (enum rtx_code, rtx, rtx);
-extern int ix86_expand_movstr (rtx, rtx, rtx, rtx);
-extern int ix86_expand_clrstr (rtx, rtx, rtx);
+extern int ix86_expand_movmem (rtx, rtx, rtx, rtx);
+extern int ix86_expand_clrmem (rtx, rtx, rtx);
extern int ix86_expand_strlen (rtx, rtx, rtx, rtx);
extern bool legitimate_constant_p (rtx);
}
/* Expand string move (memcpy) operation. Use i386 string operations when
- profitable. expand_clrstr contains similar code. */
+ profitable. expand_clrmem contains similar code. */
int
-ix86_expand_movstr (rtx dst, rtx src, rtx count_exp, rtx align_exp)
+ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp)
{
rtx srcreg, destreg, countreg, srcexp, destexp;
enum machine_mode counter_mode;
}
/* Expand string clear operation (bzero). Use i386 string operations when
- profitable. expand_movstr contains similar code. */
+ profitable. expand_movmem contains similar code. */
int
-ix86_expand_clrstr (rtx dst, rtx count_exp, rtx align_exp)
+ix86_expand_clrmem (rtx dst, rtx count_exp, rtx align_exp)
{
rtx destreg, zeroreg, countreg, destexp;
enum machine_mode counter_mode;
#define MOVE_MAX_PIECES (TARGET_64BIT ? 8 : 4)
/* If a memory-to-memory move would take MOVE_RATIO or more simple
- move-instruction pairs, we will do a movstr or libcall instead.
+ move-instruction pairs, we will do a movmem or libcall instead.
Increasing the value will always make code faster, but eventually
incurs high cost in increased code size.
"cld"
[(set_attr "type" "cld")])
-(define_expand "movstrsi"
+(define_expand "movmemsi"
[(use (match_operand:BLK 0 "memory_operand" ""))
(use (match_operand:BLK 1 "memory_operand" ""))
(use (match_operand:SI 2 "nonmemory_operand" ""))
(use (match_operand:SI 3 "const_int_operand" ""))]
"! optimize_size"
{
- if (ix86_expand_movstr (operands[0], operands[1], operands[2], operands[3]))
+ if (ix86_expand_movmem (operands[0], operands[1], operands[2], operands[3]))
DONE;
else
FAIL;
})
-(define_expand "movstrdi"
+(define_expand "movmemdi"
[(use (match_operand:BLK 0 "memory_operand" ""))
(use (match_operand:BLK 1 "memory_operand" ""))
(use (match_operand:DI 2 "nonmemory_operand" ""))
(use (match_operand:DI 3 "const_int_operand" ""))]
"TARGET_64BIT"
{
- if (ix86_expand_movstr (operands[0], operands[1], operands[2], operands[3]))
+ if (ix86_expand_movmem (operands[0], operands[1], operands[2], operands[3]))
DONE;
else
FAIL;
(set_attr "memory" "both")
(set_attr "mode" "SI")])
-(define_expand "clrstrsi"
+(define_expand "clrmemsi"
[(use (match_operand:BLK 0 "memory_operand" ""))
(use (match_operand:SI 1 "nonmemory_operand" ""))
(use (match_operand 2 "const_int_operand" ""))]
""
{
- if (ix86_expand_clrstr (operands[0], operands[1], operands[2]))
+ if (ix86_expand_clrmem (operands[0], operands[1], operands[2]))
DONE;
else
FAIL;
})
-(define_expand "clrstrdi"
+(define_expand "clrmemdi"
[(use (match_operand:BLK 0 "memory_operand" ""))
(use (match_operand:DI 1 "nonmemory_operand" ""))
(use (match_operand 2 "const_int_operand" ""))]
"TARGET_64BIT"
{
- if (ix86_expand_clrstr (operands[0], operands[1], operands[2]))
+ if (ix86_expand_clrmem (operands[0], operands[1], operands[2]))
DONE;
else
FAIL;
#if 0
rtx zoperands[10];
#endif
- static int movstrsi_label = 0;
+ static int movmemsi_label = 0;
int i;
rtx temp1 = operands[4];
rtx alignrtx = operands[3];
/* Generate number for unique label. */
- xoperands[3] = GEN_INT (movstrsi_label++);
+ xoperands[3] = GEN_INT (movmemsi_label++);
/* Calculate the size of the chunks we will be trying to move first. */
;; but it should suffice
;; that anything generated as this insn will be recognized as one
;; and that it won't successfully combine with anything.
-(define_expand "movstrsi"
+(define_expand "movmemsi"
[(parallel [(set (match_operand:BLK 0 "general_operand" "")
(match_operand:BLK 1 "general_operand" ""))
(use (match_operand:SI 2 "nonmemory_operand" ""))
;; Copy a block of bytes (memcpy()). We expand the definition to convert
;; our memory operand into a register pointer operand instead.
;;
-(define_expand "movstrhi"
+(define_expand "movmemhi"
[(use (match_operand:BLK 0 "memory_operand" ""))
(use (match_operand:BLK 1 "memory_operand" ""))
(use (match_operand:HI 2 "general_operand" ""))
else
count = operands[2];
- emit_insn (gen_movstrhi_expanded (addr0, count, addr1));
+ emit_insn (gen_movmemhi_expanded (addr0, count, addr1));
DONE;
}")
;; the general case where we have either a variable block size or one that is
;; greater than 255 bytes.
;;
-(define_insn "movstrhi_expanded"
+(define_insn "movmemhi_expanded"
[(set
(mem:BLK
(match_operand:HI 0 "nonimmediate_operand" "rS,ro,rS, rS, ro, rS"))
(match_operand:HI 1 "general_operand" "P, P, P,rSi,rSi,roi"))]
""
"@
- push\\t%L1%<\;push\\t%L2%<\;push\\t%H2%<\;push\\t%L0%<\;push\\t%H0%>%>%>%>\;page\\t__movstrhi_countqi\;call\\t__movstrhi_countqi
- push\\t%L1%<\;push\\t%L2%<\;push\\t%H2%<\;push\\t%L0%<\;push\\t%H0%>%>%>%>\;page\\t__movstrhi_countqi\;call\\t__movstrhi_countqi
- push\\t%L1%<\;push\\t%L2%<\;push\\t%H2%<\;push\\t%L0%<\;push\\t%H0%>%>%>%>\;page\\t__movstrhi_countqi\;call\\t__movstrhi_countqi
- push\\t%L1%<\;push\\t%H1%<\;push\\t%L2%<\;push\\t%H2%<\;push\\t%L0%<\;push\\t%H0%>%>%>%>%>\;page\\t__movstrhi_counthi\;call\\t__movstrhi_counthi
- push\\t%L1%<\;push\\t%H1%<\;push\\t%L2%<\;push\\t%H2%<\;push\\t%L0%<\;push\\t%H0%>%>%>%>%>\;page\\t__movstrhi_counthi\;call\\t__movstrhi_counthi
- push\\t%L1%<\;push\\t%H1%<\;push\\t%L2%<\;push\\t%H2%<\;push\\t%L0%<\;push\\t%H0%>%>%>%>%>\;page\\t__movstrhi_counthi\;call\\t__movstrhi_counthi")
+ push\\t%L1%<\;push\\t%L2%<\;push\\t%H2%<\;push\\t%L0%<\;push\\t%H0%>%>%>%>\;page\\t__movmemhi_countqi\;call\\t__movmemhi_countqi
+ push\\t%L1%<\;push\\t%L2%<\;push\\t%H2%<\;push\\t%L0%<\;push\\t%H0%>%>%>%>\;page\\t__movmemhi_countqi\;call\\t__movmemhi_countqi
+ push\\t%L1%<\;push\\t%L2%<\;push\\t%H2%<\;push\\t%L0%<\;push\\t%H0%>%>%>%>\;page\\t__movmemhi_countqi\;call\\t__movmemhi_countqi
+ push\\t%L1%<\;push\\t%H1%<\;push\\t%L2%<\;push\\t%H2%<\;push\\t%L0%<\;push\\t%H0%>%>%>%>%>\;page\\t__movmemhi_counthi\;call\\t__movmemhi_counthi
+ push\\t%L1%<\;push\\t%H1%<\;push\\t%L2%<\;push\\t%H2%<\;push\\t%L0%<\;push\\t%H0%>%>%>%>%>\;page\\t__movmemhi_counthi\;call\\t__movmemhi_counthi
+ push\\t%L1%<\;push\\t%H1%<\;push\\t%L2%<\;push\\t%H2%<\;push\\t%L0%<\;push\\t%H0%>%>%>%>%>\;page\\t__movmemhi_counthi\;call\\t__movmemhi_counthi")
\f
;; Bit insert
.endfunc
#endif /* L_leaf_fp_pop_args_ret */
-#if defined(L_movstrhi_countqi)
+#if defined(L_movmemhi_countqi)
.sect .pram.libgcc,"ax"
- .global __movstrhi_countqi
- .func _movstrhi_countqi, __movstrhi_countqi
+ .global __movmemhi_countqi
+ .func _movmemhi_countqi, __movmemhi_countqi
-__movstrhi_countqi:
+__movmemhi_countqi:
push dph ; Save our pointer regs
push dpl
push iph
.endfunc
#endif
-#if defined(L_movstrhi_counthi)
+#if defined(L_movmemhi_counthi)
.sect .text.libgcc,"ax"
- .global __movstrhi_counthi
- .func _movstrhi_counthi, __movstrhi_counthi
+ .global __movmemhi_counthi
+ .func _movmemhi_counthi, __movmemhi_counthi
-__movstrhi_counthi:
+__movmemhi_counthi:
push dph ; Save our pointer regs
push dpl
push iph
_fp_pop_args_ret \
_pop2_args_ret \
_leaf_fp_pop_args_ret \
- _movstrhi_countqi \
- _movstrhi_counthi \
+ _movmemhi_countqi \
+ _movmemhi_counthi \
abort \
_exit
to the word after the end of the source block, and dst_reg to point
to the last word of the destination block, provided that the block
is MAX_MOVE_BYTES long. */
- emit_insn (gen_movstrsi_internal (dst_reg, src_reg, at_a_time,
+ emit_insn (gen_movmemsi_internal (dst_reg, src_reg, at_a_time,
new_dst_reg, new_src_reg));
emit_move_insn (dst_reg, new_dst_reg);
emit_move_insn (src_reg, new_src_reg);
}
if (leftover)
- emit_insn (gen_movstrsi_internal (dst_reg, src_reg, GEN_INT (leftover),
+ emit_insn (gen_movmemsi_internal (dst_reg, src_reg, GEN_INT (leftover),
gen_reg_rtx (SImode),
gen_reg_rtx (SImode)));
}
;; Argument 2 is the length
;; Argument 3 is the alignment
-(define_expand "movstrsi"
+(define_expand "movmemsi"
[(parallel [(set (match_operand:BLK 0 "general_operand" "")
(match_operand:BLK 1 "general_operand" ""))
(use (match_operand:SI 2 "immediate_operand" ""))
;; Insn generated by block moves
-(define_insn "movstrsi_internal"
+(define_insn "movmemsi_internal"
[(set (mem:BLK (match_operand:SI 0 "register_operand" "r")) ;; destination
(mem:BLK (match_operand:SI 1 "register_operand" "r"))) ;; source
(use (match_operand:SI 2 "m32r_block_immediate_operand" "J"));; # bytes to move
;; Block move - adapted from m88k.md
;; ------------------------------------------------------------------------
-(define_expand "movstrsi"
+(define_expand "movmemsi"
[(parallel [(set (mem:BLK (match_operand:BLK 0 "" ""))
(mem:BLK (match_operand:BLK 1 "" "")))
(use (match_operand:SI 2 "general_operand" ""))
mips_block_move_straight (dest, src, leftover);
}
\f
-/* Expand a movstrsi instruction. */
+/* Expand a movmemsi instruction. */
bool
mips_expand_block_move (rtx dest, rtx src, rtx length)
;; Argument 2 is the length
;; Argument 3 is the alignment
-(define_expand "movstrsi"
+(define_expand "movmemsi"
[(parallel [(set (match_operand:BLK 0 "general_operand")
(match_operand:BLK 1 "general_operand"))
(use (match_operand:SI 2 ""))
dest = copy_addr_to_reg (XEXP (operands[0], 0));
src = copy_addr_to_reg (XEXP (operands[1], 0));
- emit_insn (gen_movstrsi2(dest, src, GEN_INT (words)));
+ emit_insn (gen_movmemsi2(dest, src, GEN_INT (words)));
}
}
move_tail (operands, bytes & 3, bytes & ~3);
if (bytes >> 2)
{
emit_move_insn (count_reg, GEN_INT (bytes >> 2));
- emit_insn (gen_movstrsi1 (GEN_INT (4)));
+ emit_insn (gen_movmemsi1 (GEN_INT (4)));
}
/* insns to copy rest */
move_tail (operands, bytes & 3, 0);
{
/* insns to copy by words */
emit_insn (gen_lshrsi3 (count_reg, bytes_rtx, const2_rtx));
- emit_insn (gen_movstrsi1 (GEN_INT (4)));
+ emit_insn (gen_movmemsi1 (GEN_INT (4)));
if (constp)
{
move_tail (operands, bytes & 3, 0);
{
/* insns to copy rest */
emit_insn (gen_andsi3 (count_reg, bytes_rtx, GEN_INT (3)));
- emit_insn (gen_movstrsi1 (const1_rtx));
+ emit_insn (gen_movmemsi1 (const1_rtx));
}
}
else
emit_insn (gen_negsi2 (count_reg, src_reg));
emit_insn (gen_andsi3 (count_reg, count_reg, GEN_INT (3)));
emit_insn (gen_subsi3 (bytes_reg, bytes_reg, count_reg));
- emit_insn (gen_movstrsi1 (const1_rtx));
+ emit_insn (gen_movmemsi1 (const1_rtx));
if (!constp)
emit_label (aligned_label);
/* insns to copy by words */
emit_insn (gen_lshrsi3 (count_reg, bytes_reg, const2_rtx));
- emit_insn (gen_movstrsi1 (GEN_INT (4)));
+ emit_insn (gen_movmemsi1 (GEN_INT (4)));
/* insns to copy rest */
emit_insn (gen_andsi3 (count_reg, bytes_reg, GEN_INT (3)));
- emit_insn (gen_movstrsi1 (const1_rtx));
+ emit_insn (gen_movmemsi1 (const1_rtx));
}
}
\f
/* The number of scalar move insns which should be generated instead
of a string move insn or a library call.
- We have a smart movstrsi insn */
+ We have a smart movmemsi insn */
#define MOVE_RATIO 0
#define STORE_RATIO (optimize_size ? 3 : 15)
;;
;; Strategy: Use define_expand to
;; either emit insns directly if it can be done simply or
-;; emit rtl to match movstrsi1 which has extra scratch registers
+;; emit rtl to match movmemsi1 which has extra scratch registers
;; which can be used to generate more complex code.
-(define_expand "movstrsi"
+(define_expand "movmemsi"
[(parallel [(set (match_operand:BLK 0 "memory_operand" "")
(match_operand:BLK 1 "memory_operand" ""))
(use (match_operand:SI 2 "general_operand" ""))
;; r3 match
-(define_insn "movstrsi1"
+(define_insn "movmemsi1"
[(set (mem:BLK (reg:SI 2))
(mem:BLK (reg:SI 1)))
(use (reg:SI 0))
return \"movsb\";
}")
-(define_insn "movstrsi2"
+(define_insn "movmemsi2"
[(set (mem:BLK (match_operand:SI 0 "address_operand" "p"))
(mem:BLK (match_operand:SI 1 "address_operand" "p")))
(use (match_operand 2 "immediate_operand" "i"))]
static int forward_branch_p (rtx);
static int shadd_constant_p (int);
static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
-static int compute_movstr_length (rtx);
-static int compute_clrstr_length (rtx);
+static int compute_movmem_length (rtx);
+static int compute_clrmem_length (rtx);
static bool pa_assemble_integer (rtx, unsigned int, int);
static void remove_useless_addtr_insns (int);
static void store_reg (int, HOST_WIDE_INT, int);
count insns rather than emit them. */
static int
-compute_movstr_length (rtx insn)
+compute_movmem_length (rtx insn)
{
rtx pat = PATTERN (insn);
unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
count insns rather than emit them. */
static int
-compute_clrstr_length (rtx insn)
+compute_clrmem_length (rtx insn)
{
rtx pat = PATTERN (insn);
unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
&& GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
&& GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
&& GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
- return compute_movstr_length (insn) - 4;
+ return compute_movmem_length (insn) - 4;
/* Block clear pattern. */
else if (GET_CODE (insn) == INSN
&& GET_CODE (pat) == PARALLEL
&& GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
&& XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
&& GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
- return compute_clrstr_length (insn) - 4;
+ return compute_clrmem_length (insn) - 4;
/* Conditional branch with an unfilled delay slot. */
else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
{
;; The definition of this insn does not really explain what it does,
;; but it should suffice that anything generated as this insn will be
-;; recognized as a movstrsi operation, and that it will not successfully
+;; recognized as a movmemsi operation, and that it will not successfully
;; combine with anything.
-(define_expand "movstrsi"
+(define_expand "movmemsi"
[(parallel [(set (match_operand:BLK 0 "" "")
(match_operand:BLK 1 "" ""))
(clobber (match_dup 4))
;; operands 0 and 1 are both equivalent to symbolic MEMs. Thus, we are
;; forced to internally copy operands 0 and 1 to operands 7 and 8,
;; respectively. We then split or peephole optimize after reload.
-(define_insn "movstrsi_prereload"
+(define_insn "movmemsi_prereload"
[(set (mem:BLK (match_operand:SI 0 "register_operand" "r,r"))
(mem:BLK (match_operand:SI 1 "register_operand" "r,r")))
(clobber (match_operand:SI 2 "register_operand" "=&r,&r")) ;loop cnt/tmp
}
}")
-(define_insn "movstrsi_postreload"
+(define_insn "movmemsi_postreload"
[(set (mem:BLK (match_operand:SI 0 "register_operand" "+r,r"))
(mem:BLK (match_operand:SI 1 "register_operand" "+r,r")))
(clobber (match_operand:SI 2 "register_operand" "=&r,&r")) ;loop cnt/tmp
"* return output_block_move (operands, !which_alternative);"
[(set_attr "type" "multi,multi")])
-(define_expand "movstrdi"
+(define_expand "movmemdi"
[(parallel [(set (match_operand:BLK 0 "" "")
(match_operand:BLK 1 "" ""))
(clobber (match_dup 4))
;; operands 0 and 1 are both equivalent to symbolic MEMs. Thus, we are
;; forced to internally copy operands 0 and 1 to operands 7 and 8,
;; respectively. We then split or peephole optimize after reload.
-(define_insn "movstrdi_prereload"
+(define_insn "movmemdi_prereload"
[(set (mem:BLK (match_operand:DI 0 "register_operand" "r,r"))
(mem:BLK (match_operand:DI 1 "register_operand" "r,r")))
(clobber (match_operand:DI 2 "register_operand" "=&r,&r")) ;loop cnt/tmp
}
}")
-(define_insn "movstrdi_postreload"
+(define_insn "movmemdi_postreload"
[(set (mem:BLK (match_operand:DI 0 "register_operand" "+r,r"))
(mem:BLK (match_operand:DI 1 "register_operand" "+r,r")))
(clobber (match_operand:DI 2 "register_operand" "=&r,&r")) ;loop cnt/tmp
"* return output_block_move (operands, !which_alternative);"
[(set_attr "type" "multi,multi")])
-(define_expand "clrstrsi"
+(define_expand "clrmemsi"
[(parallel [(set (match_operand:BLK 0 "" "")
(const_int 0))
(clobber (match_dup 3))
operands[4] = gen_reg_rtx (SImode);
}")
-(define_insn "clrstrsi_prereload"
+(define_insn "clrmemsi_prereload"
[(set (mem:BLK (match_operand:SI 0 "register_operand" "r,r"))
(const_int 0))
(clobber (match_operand:SI 1 "register_operand" "=&r,&r")) ;loop cnt/tmp
}
}")
-(define_insn "clrstrsi_postreload"
+(define_insn "clrmemsi_postreload"
[(set (mem:BLK (match_operand:SI 0 "register_operand" "+r,r"))
(const_int 0))
(clobber (match_operand:SI 1 "register_operand" "=&r,&r")) ;loop cnt/tmp
"* return output_block_clear (operands, !which_alternative);"
[(set_attr "type" "multi,multi")])
-(define_expand "clrstrdi"
+(define_expand "clrmemdi"
[(parallel [(set (match_operand:BLK 0 "" "")
(const_int 0))
(clobber (match_dup 3))
operands[4] = gen_reg_rtx (DImode);
}")
-(define_insn "clrstrdi_prereload"
+(define_insn "clrmemdi_prereload"
[(set (mem:BLK (match_operand:DI 0 "register_operand" "r,r"))
(const_int 0))
(clobber (match_operand:DI 1 "register_operand" "=&r,&r")) ;loop cnt/tmp
}
}")
-(define_insn "clrstrdi_postreload"
+(define_insn "clrmemdi_postreload"
[(set (mem:BLK (match_operand:DI 0 "register_operand" "+r,r"))
(const_int 0))
(clobber (match_operand:DI 1 "register_operand" "=&r,&r")) ;loop cnt/tmp
{ "no-45", -8, "" }, \
/* is 11/10 */ \
{ "10", -12, N_("Generate code for an 11/10") }, \
-/* use movstrhi for bcopy */ \
+/* use movmemhi for bcopy */ \
{ "bcopy", 16, NULL }, \
{ "bcopy-builtin", -16, NULL }, \
/* use 32 bit for int */ \
;; maybe fiddle a bit with move_ratio, then
;; let constraints only accept a register ...
-(define_expand "movstrhi"
+(define_expand "movmemhi"
[(parallel [(set (match_operand:BLK 0 "general_operand" "=g,g")
(match_operand:BLK 1 "general_operand" "g,g"))
(use (match_operand:HI 2 "arith_operand" "n,&mr"))
}")
-(define_insn "" ; "movstrhi"
+(define_insn "" ; "movmemhi"
[(set (mem:BLK (match_operand:HI 0 "general_operand" "=r,r"))
(mem:BLK (match_operand:HI 1 "general_operand" "r,r")))
(use (match_operand:HI 2 "arith_operand" "n,&r"))
for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
{
union {
- rtx (*movstrsi) (rtx, rtx, rtx, rtx);
+ rtx (*movmemsi) (rtx, rtx, rtx, rtx);
rtx (*mov) (rtx, rtx);
} gen_func;
enum machine_mode mode = BLKmode;
&& ! fixed_regs[12])
{
move_bytes = (bytes > 32) ? 32 : bytes;
- gen_func.movstrsi = gen_movstrsi_8reg;
+ gen_func.movmemsi = gen_movmemsi_8reg;
}
else if (TARGET_STRING
&& bytes > 16 /* move up to 24 bytes at a time */
&& ! fixed_regs[10])
{
move_bytes = (bytes > 24) ? 24 : bytes;
- gen_func.movstrsi = gen_movstrsi_6reg;
+ gen_func.movmemsi = gen_movmemsi_6reg;
}
else if (TARGET_STRING
&& bytes > 8 /* move up to 16 bytes at a time */
&& ! fixed_regs[8])
{
move_bytes = (bytes > 16) ? 16 : bytes;
- gen_func.movstrsi = gen_movstrsi_4reg;
+ gen_func.movmemsi = gen_movmemsi_4reg;
}
else if (bytes >= 8 && TARGET_POWERPC64
/* 64-bit loads and stores require word-aligned
else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
{ /* move up to 8 bytes at a time */
move_bytes = (bytes > 8) ? 8 : bytes;
- gen_func.movstrsi = gen_movstrsi_2reg;
+ gen_func.movmemsi = gen_movmemsi_2reg;
}
else if (bytes >= 4 && (align >= 4 || ! STRICT_ALIGNMENT))
{ /* move 4 bytes */
else if (TARGET_STRING && bytes > 1)
{ /* move up to 4 bytes at a time */
move_bytes = (bytes > 4) ? 4 : bytes;
- gen_func.movstrsi = gen_movstrsi_1reg;
+ gen_func.movmemsi = gen_movmemsi_1reg;
}
else /* move 1 byte at a time */
{
if (mode == BLKmode)
{
- /* Move the address into scratch registers. The movstrsi
+ /* Move the address into scratch registers. The movmemsi
patterns require zero offset. */
if (!REG_P (XEXP (src, 0)))
{
}
set_mem_size (dest, GEN_INT (move_bytes));
- emit_insn ((*gen_func.movstrsi) (dest, src,
+ emit_insn ((*gen_func.movmemsi) (dest, src,
GEN_INT (move_bytes & 31),
align_rtx));
}
;; Argument 2 is the length
;; Argument 3 is the alignment
-(define_expand "movstrsi"
+(define_expand "movmemsi"
[(parallel [(set (match_operand:BLK 0 "" "")
(match_operand:BLK 1 "" ""))
(use (match_operand:SI 2 "" ""))
;; Move up to 32 bytes at a time. The fixed registers are needed because the
;; register allocator doesn't have a clue about allocating 8 word registers.
;; rD/rS = r5 is preferred, efficient form.
-(define_expand "movstrsi_8reg"
+(define_expand "movmemsi_8reg"
[(parallel [(set (match_operand 0 "" "")
(match_operand 1 "" ""))
(use (match_operand 2 "" ""))
;; Move up to 24 bytes at a time. The fixed registers are needed because the
;; register allocator doesn't have a clue about allocating 6 word registers.
;; rD/rS = r5 is preferred, efficient form.
-(define_expand "movstrsi_6reg"
+(define_expand "movmemsi_6reg"
[(parallel [(set (match_operand 0 "" "")
(match_operand 1 "" ""))
(use (match_operand 2 "" ""))
;; Move up to 16 bytes at a time, using 4 fixed registers to avoid spill
;; problems with TImode.
;; rD/rS = r5 is preferred, efficient form.
-(define_expand "movstrsi_4reg"
+(define_expand "movmemsi_4reg"
[(parallel [(set (match_operand 0 "" "")
(match_operand 1 "" ""))
(use (match_operand 2 "" ""))
(set_attr "length" "8")])
;; Move up to 8 bytes at a time.
-(define_expand "movstrsi_2reg"
+(define_expand "movmemsi_2reg"
[(parallel [(set (match_operand 0 "" "")
(match_operand 1 "" ""))
(use (match_operand 2 "" ""))
(set_attr "length" "8")])
;; Move up to 4 bytes at a time.
-(define_expand "movstrsi_1reg"
+(define_expand "movmemsi_1reg"
[(parallel [(set (match_operand 0 "" "")
(match_operand 1 "" ""))
(use (match_operand 2 "" ""))
extern void s390_expand_plus_operand (rtx, rtx, rtx);
extern void emit_symbolic_move (rtx *);
extern void s390_load_address (rtx, rtx);
-extern void s390_expand_movstr (rtx, rtx, rtx);
-extern void s390_expand_clrstr (rtx, rtx);
+extern void s390_expand_movmem (rtx, rtx, rtx);
+extern void s390_expand_clrmem (rtx, rtx);
extern void s390_expand_cmpmem (rtx, rtx, rtx, rtx);
extern bool s390_expand_addcc (enum rtx_code, rtx, rtx, rtx, rtx, rtx);
extern rtx s390_return_addr_rtx (int, rtx);
/* Emit code to move LEN bytes from DST to SRC. */
void
-s390_expand_movstr (rtx dst, rtx src, rtx len)
+s390_expand_movmem (rtx dst, rtx src, rtx len)
{
if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
{
if (INTVAL (len) > 0)
- emit_insn (gen_movstr_short (dst, src, GEN_INT (INTVAL (len) - 1)));
+ emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
}
else if (TARGET_MVCLE)
{
- emit_insn (gen_movstr_long (dst, src, convert_to_mode (Pmode, len, 1)));
+ emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
}
else
emit_label (loop_start_label);
- emit_insn (gen_movstr_short (dst, src, GEN_INT (255)));
+ emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
s390_load_address (dst_addr,
gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
s390_load_address (src_addr,
emit_jump (loop_start_label);
emit_label (loop_end_label);
- emit_insn (gen_movstr_short (dst, src,
+ emit_insn (gen_movmem_short (dst, src,
convert_to_mode (Pmode, count, 1)));
emit_label (end_label);
}
/* Emit code to clear LEN bytes at DST. */
void
-s390_expand_clrstr (rtx dst, rtx len)
+s390_expand_clrmem (rtx dst, rtx len)
{
if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
{
if (INTVAL (len) > 0)
- emit_insn (gen_clrstr_short (dst, GEN_INT (INTVAL (len) - 1)));
+ emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
}
else if (TARGET_MVCLE)
{
- emit_insn (gen_clrstr_long (dst, convert_to_mode (Pmode, len, 1)));
+ emit_insn (gen_clrmem_long (dst, convert_to_mode (Pmode, len, 1)));
}
else
emit_label (loop_start_label);
- emit_insn (gen_clrstr_short (dst, GEN_INT (255)));
+ emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
s390_load_address (dst_addr,
gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
emit_jump (loop_start_label);
emit_label (loop_end_label);
- emit_insn (gen_clrstr_short (dst, convert_to_mode (Pmode, count, 1)));
+ emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
emit_label (end_label);
}
}
(set_attr "length" "8")])
;
-; movstrM instruction pattern(s).
+; movmemM instruction pattern(s).
;
-(define_expand "movstrdi"
+(define_expand "movmemdi"
[(set (match_operand:BLK 0 "memory_operand" "")
(match_operand:BLK 1 "memory_operand" ""))
(use (match_operand:DI 2 "general_operand" ""))
(match_operand 3 "" "")]
"TARGET_64BIT"
- "s390_expand_movstr (operands[0], operands[1], operands[2]); DONE;")
+ "s390_expand_movmem (operands[0], operands[1], operands[2]); DONE;")
-(define_expand "movstrsi"
+(define_expand "movmemsi"
[(set (match_operand:BLK 0 "memory_operand" "")
(match_operand:BLK 1 "memory_operand" ""))
(use (match_operand:SI 2 "general_operand" ""))
(match_operand 3 "" "")]
""
- "s390_expand_movstr (operands[0], operands[1], operands[2]); DONE;")
+ "s390_expand_movmem (operands[0], operands[1], operands[2]); DONE;")
; Move a block that is up to 256 bytes in length.
; The block length is taken as (operands[2] % 256) + 1.
-(define_expand "movstr_short"
+(define_expand "movmem_short"
[(parallel
[(set (match_operand:BLK 0 "memory_operand" "")
(match_operand:BLK 1 "memory_operand" ""))
""
"operands[3] = gen_rtx_SCRATCH (Pmode);")
-(define_insn "*movstr_short"
+(define_insn "*movmem_short"
[(set (match_operand:BLK 0 "memory_operand" "=Q,Q")
(match_operand:BLK 1 "memory_operand" "Q,Q"))
(use (match_operand 2 "nonmemory_operand" "n,a"))
; Move a block of arbitrary length.
-(define_expand "movstr_long"
+(define_expand "movmem_long"
[(parallel
[(clobber (match_dup 2))
(clobber (match_dup 3))
operands[3] = reg1;
})
-(define_insn "*movstr_long_64"
+(define_insn "*movmem_long_64"
[(clobber (match_operand:TI 0 "register_operand" "=d"))
(clobber (match_operand:TI 1 "register_operand" "=d"))
(set (mem:BLK (subreg:DI (match_operand:TI 2 "register_operand" "0") 0))
(set_attr "type" "vs")
(set_attr "length" "8")])
-(define_insn "*movstr_long_31"
+(define_insn "*movmem_long_31"
[(clobber (match_operand:DI 0 "register_operand" "=d"))
(clobber (match_operand:DI 1 "register_operand" "=d"))
(set (mem:BLK (subreg:SI (match_operand:DI 2 "register_operand" "0") 0))
(set_attr "length" "8")])
;
-; clrstrM instruction pattern(s).
+; clrmemM instruction pattern(s).
;
-(define_expand "clrstrdi"
+(define_expand "clrmemdi"
[(set (match_operand:BLK 0 "memory_operand" "")
(const_int 0))
(use (match_operand:DI 1 "general_operand" ""))
(match_operand 2 "" "")]
"TARGET_64BIT"
- "s390_expand_clrstr (operands[0], operands[1]); DONE;")
+ "s390_expand_clrmem (operands[0], operands[1]); DONE;")
-(define_expand "clrstrsi"
+(define_expand "clrmemsi"
[(set (match_operand:BLK 0 "memory_operand" "")
(const_int 0))
(use (match_operand:SI 1 "general_operand" ""))
(match_operand 2 "" "")]
""
- "s390_expand_clrstr (operands[0], operands[1]); DONE;")
+ "s390_expand_clrmem (operands[0], operands[1]); DONE;")
; Clear a block that is up to 256 bytes in length.
; The block length is taken as (operands[1] % 256) + 1.
-(define_expand "clrstr_short"
+(define_expand "clrmem_short"
[(parallel
[(set (match_operand:BLK 0 "memory_operand" "")
(const_int 0))
""
"operands[2] = gen_rtx_SCRATCH (Pmode);")
-(define_insn "*clrstr_short"
+(define_insn "*clrmem_short"
[(set (match_operand:BLK 0 "memory_operand" "=Q,Q")
(const_int 0))
(use (match_operand 1 "nonmemory_operand" "n,a"))
; Clear a block of arbitrary length.
-(define_expand "clrstr_long"
+(define_expand "clrmem_long"
[(parallel
[(clobber (match_dup 1))
(set (match_operand:BLK 0 "memory_operand" "")
operands[2] = reg1;
})
-(define_insn "*clrstr_long_64"
+(define_insn "*clrmem_long_64"
[(clobber (match_operand:TI 0 "register_operand" "=d"))
(set (mem:BLK (subreg:DI (match_operand:TI 2 "register_operand" "0") 0))
(const_int 0))
(set_attr "type" "vs")
(set_attr "length" "8")])
-(define_insn "*clrstr_long_31"
+(define_insn "*clrmem_long_31"
[(clobber (match_operand:DI 0 "register_operand" "=d"))
(set (mem:BLK (subreg:SI (match_operand:DI 2 "register_operand" "0") 0))
(const_int 0))
ENDFUNC(GLOBAL(lshrsi3))
#endif
-#ifdef L_movstr
+#ifdef L_movmem
.text
! done all the large groups, do the remainder
-! jump to movstr+
+! jump to movmem+
done:
add #64,r5
- mova GLOBAL(movstrSI0),r0
+ mova GLOBAL(movmemSI0),r0
shll2 r6
add r6,r0
jmp @r0
add #64,r4
.align 4
- .global GLOBAL(movstrSI64)
- FUNC(GLOBAL(movstrSI64))
-GLOBAL(movstrSI64):
+ .global GLOBAL(movmemSI64)
+ FUNC(GLOBAL(movmemSI64))
+GLOBAL(movmemSI64):
mov.l @(60,r5),r0
mov.l r0,@(60,r4)
- .global GLOBAL(movstrSI60)
- FUNC(GLOBAL(movstrSI60))
-GLOBAL(movstrSI60):
+ .global GLOBAL(movmemSI60)
+ FUNC(GLOBAL(movmemSI60))
+GLOBAL(movmemSI60):
mov.l @(56,r5),r0
mov.l r0,@(56,r4)
- .global GLOBAL(movstrSI56)
- FUNC(GLOBAL(movstrSI56))
-GLOBAL(movstrSI56):
+ .global GLOBAL(movmemSI56)
+ FUNC(GLOBAL(movmemSI56))
+GLOBAL(movmemSI56):
mov.l @(52,r5),r0
mov.l r0,@(52,r4)
- .global GLOBAL(movstrSI52)
- FUNC(GLOBAL(movstrSI52))
-GLOBAL(movstrSI52):
+ .global GLOBAL(movmemSI52)
+ FUNC(GLOBAL(movmemSI52))
+GLOBAL(movmemSI52):
mov.l @(48,r5),r0
mov.l r0,@(48,r4)
- .global GLOBAL(movstrSI48)
- FUNC(GLOBAL(movstrSI48))
-GLOBAL(movstrSI48):
+ .global GLOBAL(movmemSI48)
+ FUNC(GLOBAL(movmemSI48))
+GLOBAL(movmemSI48):
mov.l @(44,r5),r0
mov.l r0,@(44,r4)
- .global GLOBAL(movstrSI44)
- FUNC(GLOBAL(movstrSI44))
-GLOBAL(movstrSI44):
+ .global GLOBAL(movmemSI44)
+ FUNC(GLOBAL(movmemSI44))
+GLOBAL(movmemSI44):
mov.l @(40,r5),r0
mov.l r0,@(40,r4)
- .global GLOBAL(movstrSI40)
- FUNC(GLOBAL(movstrSI40))
-GLOBAL(movstrSI40):
+ .global GLOBAL(movmemSI40)
+ FUNC(GLOBAL(movmemSI40))
+GLOBAL(movmemSI40):
mov.l @(36,r5),r0
mov.l r0,@(36,r4)
- .global GLOBAL(movstrSI36)
- FUNC(GLOBAL(movstrSI36))
-GLOBAL(movstrSI36):
+ .global GLOBAL(movmemSI36)
+ FUNC(GLOBAL(movmemSI36))
+GLOBAL(movmemSI36):
mov.l @(32,r5),r0
mov.l r0,@(32,r4)
- .global GLOBAL(movstrSI32)
- FUNC(GLOBAL(movstrSI32))
-GLOBAL(movstrSI32):
+ .global GLOBAL(movmemSI32)
+ FUNC(GLOBAL(movmemSI32))
+GLOBAL(movmemSI32):
mov.l @(28,r5),r0
mov.l r0,@(28,r4)
- .global GLOBAL(movstrSI28)
- FUNC(GLOBAL(movstrSI28))
-GLOBAL(movstrSI28):
+ .global GLOBAL(movmemSI28)
+ FUNC(GLOBAL(movmemSI28))
+GLOBAL(movmemSI28):
mov.l @(24,r5),r0
mov.l r0,@(24,r4)
- .global GLOBAL(movstrSI24)
- FUNC(GLOBAL(movstrSI24))
-GLOBAL(movstrSI24):
+ .global GLOBAL(movmemSI24)
+ FUNC(GLOBAL(movmemSI24))
+GLOBAL(movmemSI24):
mov.l @(20,r5),r0
mov.l r0,@(20,r4)
- .global GLOBAL(movstrSI20)
- FUNC(GLOBAL(movstrSI20))
-GLOBAL(movstrSI20):
+ .global GLOBAL(movmemSI20)
+ FUNC(GLOBAL(movmemSI20))
+GLOBAL(movmemSI20):
mov.l @(16,r5),r0
mov.l r0,@(16,r4)
- .global GLOBAL(movstrSI16)
- FUNC(GLOBAL(movstrSI16))
-GLOBAL(movstrSI16):
+ .global GLOBAL(movmemSI16)
+ FUNC(GLOBAL(movmemSI16))
+GLOBAL(movmemSI16):
mov.l @(12,r5),r0
mov.l r0,@(12,r4)
- .global GLOBAL(movstrSI12)
- FUNC(GLOBAL(movstrSI12))
-GLOBAL(movstrSI12):
+ .global GLOBAL(movmemSI12)
+ FUNC(GLOBAL(movmemSI12))
+GLOBAL(movmemSI12):
mov.l @(8,r5),r0
mov.l r0,@(8,r4)
- .global GLOBAL(movstrSI8)
- FUNC(GLOBAL(movstrSI8))
-GLOBAL(movstrSI8):
+ .global GLOBAL(movmemSI8)
+ FUNC(GLOBAL(movmemSI8))
+GLOBAL(movmemSI8):
mov.l @(4,r5),r0
mov.l r0,@(4,r4)
- .global GLOBAL(movstrSI4)
- FUNC(GLOBAL(movstrSI4))
-GLOBAL(movstrSI4):
+ .global GLOBAL(movmemSI4)
+ FUNC(GLOBAL(movmemSI4))
+GLOBAL(movmemSI4):
mov.l @(0,r5),r0
mov.l r0,@(0,r4)
- .global GLOBAL(movstrSI0)
- FUNC(GLOBAL(movstrSI0))
-GLOBAL(movstrSI0):
+ .global GLOBAL(movmemSI0)
+ FUNC(GLOBAL(movmemSI0))
+GLOBAL(movmemSI0):
rts
nop
- ENDFUNC(GLOBAL(movstrSI64))
- ENDFUNC(GLOBAL(movstrSI60))
- ENDFUNC(GLOBAL(movstrSI56))
- ENDFUNC(GLOBAL(movstrSI52))
- ENDFUNC(GLOBAL(movstrSI48))
- ENDFUNC(GLOBAL(movstrSI44))
- ENDFUNC(GLOBAL(movstrSI40))
- ENDFUNC(GLOBAL(movstrSI36))
- ENDFUNC(GLOBAL(movstrSI32))
- ENDFUNC(GLOBAL(movstrSI28))
- ENDFUNC(GLOBAL(movstrSI24))
- ENDFUNC(GLOBAL(movstrSI20))
- ENDFUNC(GLOBAL(movstrSI16))
- ENDFUNC(GLOBAL(movstrSI12))
- ENDFUNC(GLOBAL(movstrSI8))
- ENDFUNC(GLOBAL(movstrSI4))
- ENDFUNC(GLOBAL(movstrSI0))
+ ENDFUNC(GLOBAL(movmemSI64))
+ ENDFUNC(GLOBAL(movmemSI60))
+ ENDFUNC(GLOBAL(movmemSI56))
+ ENDFUNC(GLOBAL(movmemSI52))
+ ENDFUNC(GLOBAL(movmemSI48))
+ ENDFUNC(GLOBAL(movmemSI44))
+ ENDFUNC(GLOBAL(movmemSI40))
+ ENDFUNC(GLOBAL(movmemSI36))
+ ENDFUNC(GLOBAL(movmemSI32))
+ ENDFUNC(GLOBAL(movmemSI28))
+ ENDFUNC(GLOBAL(movmemSI24))
+ ENDFUNC(GLOBAL(movmemSI20))
+ ENDFUNC(GLOBAL(movmemSI16))
+ ENDFUNC(GLOBAL(movmemSI12))
+ ENDFUNC(GLOBAL(movmemSI8))
+ ENDFUNC(GLOBAL(movmemSI4))
+ ENDFUNC(GLOBAL(movmemSI0))
.align 4
- .global GLOBAL(movstr)
- FUNC(GLOBAL(movstr))
-GLOBAL(movstr):
+ .global GLOBAL(movmem)
+ FUNC(GLOBAL(movmem))
+GLOBAL(movmem):
mov.l @(60,r5),r0
mov.l r0,@(60,r4)
bf done
add #64,r5
- bra GLOBAL(movstr)
+ bra GLOBAL(movmem)
add #64,r4
- FUNC(GLOBAL(movstr))
+ FUNC(GLOBAL(movmem))
#endif
-#ifdef L_movstr_i4
+#ifdef L_movmem_i4
.text
- .global GLOBAL(movstr_i4_even)
- .global GLOBAL(movstr_i4_odd)
- .global GLOBAL(movstrSI12_i4)
+ .global GLOBAL(movmem_i4_even)
+ .global GLOBAL(movmem_i4_odd)
+ .global GLOBAL(movmemSI12_i4)
- FUNC(GLOBAL(movstr_i4_even))
- FUNC(GLOBAL(movstr_i4_odd))
- FUNC(GLOBAL(movstrSI12_i4))
+ FUNC(GLOBAL(movmem_i4_even))
+ FUNC(GLOBAL(movmem_i4_odd))
+ FUNC(GLOBAL(movmemSI12_i4))
.p2align 5
-L_movstr_2mod4_end:
+L_movmem_2mod4_end:
mov.l r0,@(16,r4)
rts
mov.l r1,@(20,r4)
.p2align 2
-GLOBAL(movstr_i4_even):
+GLOBAL(movmem_i4_even):
mov.l @r5+,r0
- bra L_movstr_start_even
+ bra L_movmem_start_even
mov.l @r5+,r1
-GLOBAL(movstr_i4_odd):
+GLOBAL(movmem_i4_odd):
mov.l @r5+,r1
add #-4,r4
mov.l @r5+,r2
mov.l r1,@(4,r4)
mov.l r2,@(8,r4)
-L_movstr_loop:
+L_movmem_loop:
mov.l r3,@(12,r4)
dt r6
mov.l @r5+,r0
- bt/s L_movstr_2mod4_end
+ bt/s L_movmem_2mod4_end
mov.l @r5+,r1
add #16,r4
-L_movstr_start_even:
+L_movmem_start_even:
mov.l @r5+,r2
mov.l @r5+,r3
mov.l r0,@r4
dt r6
mov.l r1,@(4,r4)
- bf/s L_movstr_loop
+ bf/s L_movmem_loop
mov.l r2,@(8,r4)
rts
mov.l r3,@(12,r4)
- ENDFUNC(GLOBAL(movstr_i4_even))
- ENDFUNC(GLOBAL(movstr_i4_odd))
+ ENDFUNC(GLOBAL(movmem_i4_even))
+ ENDFUNC(GLOBAL(movmem_i4_odd))
.p2align 4
-GLOBAL(movstrSI12_i4):
+GLOBAL(movmemSI12_i4):
mov.l @r5,r0
mov.l @(4,r5),r1
mov.l @(8,r5),r2
rts
mov.l r2,@(8,r4)
- ENDFUNC(GLOBAL(movstrSI12_i4))
+ ENDFUNC(GLOBAL(movmemSI12_i4))
#endif
#ifdef L_mulsi3
rtx r4 = gen_rtx_REG (SImode, 4);
rtx r5 = gen_rtx_REG (SImode, 5);
- entry_name = get_identifier ("__movstrSI12_i4");
+ entry_name = get_identifier ("__movmemSI12_i4");
sym = function_symbol (IDENTIFIER_POINTER (entry_name));
func_addr_rtx = copy_to_mode_reg (Pmode, sym);
rtx r6 = gen_rtx_REG (SImode, 6);
entry_name = get_identifier (bytes & 4
- ? "__movstr_i4_odd"
- : "__movstr_i4_even");
+ ? "__movmem_i4_odd"
+ : "__movmem_i4_even");
sym = function_symbol (IDENTIFIER_POINTER (entry_name));
func_addr_rtx = copy_to_mode_reg (Pmode, sym);
force_into (XEXP (operands[0], 0), r4);
rtx r4 = gen_rtx_REG (SImode, 4);
rtx r5 = gen_rtx_REG (SImode, 5);
- sprintf (entry, "__movstrSI%d", bytes);
+ sprintf (entry, "__movmemSI%d", bytes);
entry_name = get_identifier (entry);
sym = function_symbol (IDENTIFIER_POINTER (entry_name));
func_addr_rtx = copy_to_mode_reg (Pmode, sym);
rtx r5 = gen_rtx_REG (SImode, 5);
rtx r6 = gen_rtx_REG (SImode, 6);
- entry_name = get_identifier ("__movstr");
+ entry_name = get_identifier ("__movmem");
sym = function_symbol (IDENTIFIER_POINTER (entry_name));
func_addr_rtx = copy_to_mode_reg (Pmode, sym);
force_into (XEXP (operands[0], 0), r4);
;; String/block move insn.
-(define_expand "movstrsi"
+(define_expand "movmemsi"
[(parallel [(set (mem:BLK (match_operand:BLK 0 "" ""))
(mem:BLK (match_operand:BLK 1 "" "")))
(use (match_operand:SI 2 "nonmemory_operand" ""))
LIB1ASMSRC = sh/lib1funcs.asm
-LIB1ASMFUNCS = _ashiftrt _ashiftrt_n _ashiftlt _lshiftrt _movstr \
- _movstr_i4 _mulsi3 _sdivsi3 _sdivsi3_i4 _udivsi3 _udivsi3_i4 _set_fpscr \
+LIB1ASMFUNCS = _ashiftrt _ashiftrt_n _ashiftlt _lshiftrt _movmem \
+ _movmem_i4 _mulsi3 _sdivsi3 _sdivsi3_i4 _udivsi3 _udivsi3_i4 _set_fpscr \
$(LIB1ASMFUNCS_CACHE)
# We want fine grained libraries, so use the new code to build the
#define MOVE_MAX 8
/* If a memory-to-memory move would take MOVE_RATIO or more simple
- move-instruction pairs, we will do a movstr or libcall instead. */
+ move-instruction pairs, we will do a movmem or libcall instead. */
#define MOVE_RATIO (optimize_size ? 3 : 8)
}")
;; This is here to accept 4 arguments and pass the first 3 along
-;; to the movstrhi1 pattern that really does the work.
-(define_expand "movstrhi"
+;; to the movmemhi1 pattern that really does the work.
+(define_expand "movmemhi"
[(set (match_operand:BLK 0 "general_operand" "=g")
(match_operand:BLK 1 "general_operand" "g"))
(use (match_operand:HI 2 "general_operand" "g"))
(match_operand 3 "" "")]
""
"
- emit_insn (gen_movstrhi1 (operands[0], operands[1], operands[2]));
+ emit_insn (gen_movmemhi1 (operands[0], operands[1], operands[2]));
DONE;
")
;; but it should suffice
;; that anything generated as this insn will be recognized as one
;; and that it won't successfully combine with anything.
-(define_insn "movstrhi1"
+(define_insn "movmemhi1"
[(set (match_operand:BLK 0 "memory_operand" "=m")
(match_operand:BLK 1 "memory_operand" "m"))
(use (match_operand:HI 2 "general_operand" "g"))
operands[0] = validize_mem (dest);
operands[1] = validize_mem (src);
- emit_insn (gen_movstrsi_internal (operands[0], operands[1],
+ emit_insn (gen_movmemsi_internal (operands[0], operands[1],
operands[2], operands[3]));
return 1;
}
;; Block moves
-(define_expand "movstrsi"
+(define_expand "movmemsi"
[(parallel [(set (match_operand:BLK 0 "" "")
(match_operand:BLK 1 "" ""))
(use (match_operand:SI 2 "arith_operand" ""))
DONE;
})
-(define_insn "movstrsi_internal"
+(define_insn "movmemsi_internal"
[(set (match_operand:BLK 0 "memory_operand" "=U")
(match_operand:BLK 1 "memory_operand" "U"))
(use (match_operand:SI 2 "arith_operand" ""))
@item -mbcopy-builtin
@opindex bcopy-builtin
-Use inline @code{movstrhi} patterns for copying memory. This is the
+Use inline @code{movmemhi} patterns for copying memory. This is the
default.
@item -mbcopy
@opindex mbcopy
-Do not use inline @code{movstrhi} patterns for copying memory.
+Do not use inline @code{movmemhi} patterns for copying memory.
@item -mint16
@itemx -mno-int32
would no longer be clear which @code{set} operations were comparisons.
The @samp{cmp@var{m}} patterns should be used instead.
-@cindex @code{movstr@var{m}} instruction pattern
-@item @samp{movstr@var{m}}
+@cindex @code{movmem@var{m}} instruction pattern
+@item @samp{movmem@var{m}}
Block move instruction. The addresses of the destination and source
strings are the first two operands, and both are in mode @code{Pmode}.
compiler knows that both source and destination are word-aligned,
it may provide the value 4 for this operand.
-Descriptions of multiple @code{movstr@var{m}} patterns can only be
+Descriptions of multiple @code{movmem@var{m}} patterns can only be
beneficial if the patterns for smaller modes have fewer restrictions
on their first, second and fourth operands. Note that the mode @var{m}
-in @code{movstr@var{m}} does not impose any restriction on the mode of
+in @code{movmem@var{m}} does not impose any restriction on the mode of
individually moved data units in the block.
These patterns need not give special consideration to the possibility
that the source and destination strings might overlap.
-@cindex @code{clrstr@var{m}} instruction pattern
-@item @samp{clrstr@var{m}}
+@cindex @code{clrmem@var{m}} instruction pattern
+@item @samp{clrmem@var{m}}
Block clear instruction. The addresses of the destination string is the
first operand, in mode @code{Pmode}. The number of bytes to clear is
-the second operand, in mode @var{m}. See @samp{movstr@var{m}} for
+the second operand, in mode @var{m}. See @samp{movmem@var{m}} for
a discussion of the choice of mode.
The third operand is the known alignment of the destination, in the form
destination is word-aligned, it may provide the value 4 for this
operand.
-The use for multiple @code{clrstr@var{m}} is as for @code{movstr@var{m}}.
+The use for multiple @code{clrmem@var{m}} is as for @code{movmem@var{m}}.
@cindex @code{cmpstr@var{m}} instruction pattern
@item @samp{cmpstr@var{m}}
String compare instruction, with five operands. Operand 0 is the output;
it has mode @var{m}. The remaining four operands are like the operands
-of @samp{movstr@var{m}}. The two memory blocks specified are compared
+of @samp{movmem@var{m}}. The two memory blocks specified are compared
byte by byte in lexicographic order starting at the beginning of each
string. The instruction is not allowed to prefetch more than one byte
at a time since either string may end in the first byte and reading past
instead. The @code{use} RTX is most commonly useful to describe that
a fixed register is implicitly used in an insn. It is also safe to use
in patterns where the compiler knows for other reasons that the result
-of the whole pattern is variable, such as @samp{movstr@var{m}} or
+of the whole pattern is variable, such as @samp{movmem@var{m}} or
@samp{call} patterns.
During the reload phase, an insn that has a @code{use} as pattern
static void move_by_pieces_1 (rtx (*) (rtx, ...), enum machine_mode,
struct move_by_pieces *);
static bool block_move_libcall_safe_for_call_parm (void);
-static bool emit_block_move_via_movstr (rtx, rtx, rtx, unsigned);
+static bool emit_block_move_via_movmem (rtx, rtx, rtx, unsigned);
static rtx emit_block_move_via_libcall (rtx, rtx, rtx);
static tree emit_block_move_libcall_fn (int);
static void emit_block_move_via_loop (rtx, rtx, rtx, unsigned);
static void store_by_pieces_1 (struct store_by_pieces *, unsigned int);
static void store_by_pieces_2 (rtx (*) (rtx, ...), enum machine_mode,
struct store_by_pieces *);
-static bool clear_storage_via_clrstr (rtx, rtx, unsigned);
+static bool clear_storage_via_clrmem (rtx, rtx, unsigned);
static rtx clear_storage_via_libcall (rtx, rtx);
static tree clear_storage_libcall_fn (int);
static rtx compress_float_constant (rtx, rtx);
#endif
/* This array records the insn_code of insns to perform block moves. */
-enum insn_code movstr_optab[NUM_MACHINE_MODES];
+enum insn_code movmem_optab[NUM_MACHINE_MODES];
/* This array records the insn_code of insns to perform block clears. */
-enum insn_code clrstr_optab[NUM_MACHINE_MODES];
+enum insn_code clrmem_optab[NUM_MACHINE_MODES];
/* These arrays record the insn_code of two different kinds of insns
to perform block compares. */
if (GET_CODE (size) == CONST_INT && MOVE_BY_PIECES_P (INTVAL (size), align))
move_by_pieces (x, y, INTVAL (size), align, 0);
- else if (emit_block_move_via_movstr (x, y, size, align))
+ else if (emit_block_move_via_movmem (x, y, size, align))
;
else if (may_use_call)
retval = emit_block_move_via_libcall (x, y, size);
return true;
}
-/* A subroutine of emit_block_move. Expand a movstr pattern;
+/* A subroutine of emit_block_move. Expand a movmem pattern;
return true if successful. */
static bool
-emit_block_move_via_movstr (rtx x, rtx y, rtx size, unsigned int align)
+emit_block_move_via_movmem (rtx x, rtx y, rtx size, unsigned int align)
{
rtx opalign = GEN_INT (align / BITS_PER_UNIT);
int save_volatile_ok = volatile_ok;
for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
{
- enum insn_code code = movstr_optab[(int) mode];
+ enum insn_code code = movmem_optab[(int) mode];
insn_operand_predicate_fn pred;
if (code != CODE_FOR_nothing
else if (GET_CODE (size) == CONST_INT
&& CLEAR_BY_PIECES_P (INTVAL (size), align))
clear_by_pieces (object, INTVAL (size), align);
- else if (clear_storage_via_clrstr (object, size, align))
+ else if (clear_storage_via_clrmem (object, size, align))
;
else
retval = clear_storage_via_libcall (object, size);
return retval;
}
-/* A subroutine of clear_storage. Expand a clrstr pattern;
+/* A subroutine of clear_storage. Expand a clrmem pattern;
return true if successful. */
static bool
-clear_storage_via_clrstr (rtx object, rtx size, unsigned int align)
+clear_storage_via_clrmem (rtx object, rtx size, unsigned int align)
{
/* Try the most limited insn first, because there's no point
including more than one in the machine description unless
for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
{
- enum insn_code code = clrstr_optab[(int) mode];
+ enum insn_code code = clrmem_optab[(int) mode];
insn_operand_predicate_fn pred;
if (code != CODE_FOR_nothing
#define OK_DEFER_POP (inhibit_defer_pop -= 1)
\f
/* If a memory-to-memory move would take MOVE_RATIO or more simple
- move-instruction sequences, we will do a movstr or libcall instead. */
+ move-instruction sequences, we will do a movmem or libcall instead. */
#ifndef MOVE_RATIO
-#if defined (HAVE_movstrqi) || defined (HAVE_movstrhi) || defined (HAVE_movstrsi) || defined (HAVE_movstrdi) || defined (HAVE_movstrti)
+#if defined (HAVE_movmemqi) || defined (HAVE_movmemhi) || defined (HAVE_movmemsi) || defined (HAVE_movmemdi) || defined (HAVE_movmemti)
#define MOVE_RATIO 2
#else
/* If we are optimizing for space (-Os), cut down the default move ratio. */
#endif
/* If a clear memory operation would take CLEAR_RATIO or more simple
- move-instruction sequences, we will do a clrstr or libcall instead. */
+ move-instruction sequences, we will do a clrmem or libcall instead. */
#ifndef CLEAR_RATIO
-#if defined (HAVE_clrstrqi) || defined (HAVE_clrstrhi) || defined (HAVE_clrstrsi) || defined (HAVE_clrstrdi) || defined (HAVE_clrstrti)
+#if defined (HAVE_clrmemqi) || defined (HAVE_clrmemhi) || defined (HAVE_clrmemsi) || defined (HAVE_clrmemdi) || defined (HAVE_clrmemti)
#define CLEAR_RATIO 2
#else
/* If we are optimizing for space, cut down the default clear ratio. */
"push_optab->handlers[$A].insn_code = CODE_FOR_$(push$a1$)",
"reload_in_optab[$A] = CODE_FOR_$(reload_in$a$)",
"reload_out_optab[$A] = CODE_FOR_$(reload_out$a$)",
- "movstr_optab[$A] = CODE_FOR_$(movstr$a$)",
- "clrstr_optab[$A] = CODE_FOR_$(clrstr$a$)",
+ "movmem_optab[$A] = CODE_FOR_$(movmem$a$)",
+ "clrmem_optab[$A] = CODE_FOR_$(clrmem$a$)",
"cmpstr_optab[$A] = CODE_FOR_$(cmpstr$a$)",
"cmpmem_optab[$A] = CODE_FOR_$(cmpmem$a$)",
"vec_set_optab->handlers[$A].insn_code = CODE_FOR_$(vec_set$a$)",
case CLOBBER:
/* USE and CLOBBER are ordinary, but we convert (use (subreg foo))
to (use foo) if the original insn didn't have a subreg.
- Removing the subreg distorts the VAX movstrhi pattern
+ Removing the subreg distorts the VAX movmemhi pattern
by changing the mode of an operand. */
copy = copy_rtx_and_substitute (XEXP (orig, 0), map, code == CLOBBER);
if (GET_CODE (copy) == SUBREG && GET_CODE (XEXP (orig, 0)) != SUBREG)
We don't actually combine a hard reg with a pseudo; instead
we just record the hard reg as the suggestion for the pseudo's quantity.
If we really combined them, we could lose if the pseudo lives
- across an insn that clobbers the hard reg (eg, movstr).
+ across an insn that clobbers the hard reg (eg, movmem).
ALREADY_DEAD is nonzero if USEDREG is known to be dead even though
there is no REG_DEAD note on INSN. This occurs during the processing
for (i = 0; i < NUM_MACHINE_MODES; i++)
{
- movstr_optab[i] = CODE_FOR_nothing;
- clrstr_optab[i] = CODE_FOR_nothing;
+ movmem_optab[i] = CODE_FOR_nothing;
+ clrmem_optab[i] = CODE_FOR_nothing;
cmpstr_optab[i] = CODE_FOR_nothing;
cmpmem_optab[i] = CODE_FOR_nothing;
#endif
/* This array records the insn_code of insns to perform block moves. */
-extern enum insn_code movstr_optab[NUM_MACHINE_MODES];
+extern enum insn_code movmem_optab[NUM_MACHINE_MODES];
/* This array records the insn_code of insns to perform block clears. */
-extern enum insn_code clrstr_optab[NUM_MACHINE_MODES];
+extern enum insn_code clrmem_optab[NUM_MACHINE_MODES];
/* These arrays record the insn_code of two different kinds of insns
to perform block compares. */
+2004-07-07 Alexandre Oliva <aoliva@redhat.com>
+
+ * gcc.c-torture/execute/builtins/mempcpy-2.c: Rename movstr*,
+ except for movstrict*, to movmem* and clrstr* to clrmem*.
+
2004-07-07 Richard Sandiford <rsandifo@redhat.com>
* gcc.c-torture/execute/20040707-1.c: New test.
__builtin_memcpy (buf3, "aBcdEFghijklmnopq\0", 19);
- /* These should be handled either by movstrendM or mempcpy
+ /* These should be handled either by movmemendM or mempcpy
call. */
if (mempcpy ((char *) buf3 + 4, buf5, n + 6) != (char *) buf1 + 10
|| memcmp (buf1, "aBcdRSTUVWklmnopq\0", 19))
__builtin_memcpy (buf4, "aBcdEFghijklmnopq\0", 19);
- /* These should be handled either by movstrendM or mempcpy
+ /* These should be handled either by movmemendM or mempcpy
call. */
if (mempcpy (buf4 + 4, buf7, n + 6) != buf2 + 10
|| memcmp (buf2, "aBcdRSTUVWklmnopq\0", 19))