From f8f667be749428f92a33d6c4ff8b56538f958c10 Mon Sep 17 00:00:00 2001 From: Richard Sandiford Date: Thu, 21 Dec 2017 07:01:30 +0000 Subject: [PATCH] poly_int: emit_group_load/store This patch changes the sizes passed to emit_group_load and emit_group_store from int to poly_int64. 2017-12-21 Richard Sandiford Alan Hayward David Sherwood gcc/ * expr.h (emit_group_load, emit_group_load_into_temps) (emit_group_store): Take the size as a poly_int64 rather than an int. * expr.c (emit_group_load_1, emit_group_load): Likewise. (emit_group_load_into_temp, emit_group_store): Likewise. Co-Authored-By: Alan Hayward Co-Authored-By: David Sherwood From-SVN: r255925 --- gcc/ChangeLog | 9 ++++++ gcc/expr.c | 90 ++++++++++++++++++++++++++++----------------------- gcc/expr.h | 6 ++-- 3 files changed, 62 insertions(+), 43 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index bdfcb0cb660..9c985b24797 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,12 @@ +2017-12-21 Richard Sandiford + Alan Hayward + David Sherwood + + * expr.h (emit_group_load, emit_group_load_into_temps) + (emit_group_store): Take the size as a poly_int64 rather than an int. + * expr.c (emit_group_load_1, emit_group_load): Likewise. + (emit_group_load_into_temp, emit_group_store): Likewise. + 2017-12-21 Richard Sandiford Alan Hayward David Sherwood diff --git a/gcc/expr.c b/gcc/expr.c index 06b6f7760a8..8a1227908fa 100644 --- a/gcc/expr.c +++ b/gcc/expr.c @@ -2095,7 +2095,8 @@ gen_group_rtx (rtx orig) into corresponding XEXP (XVECEXP (DST, 0, i), 0) element. */ static void -emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, int ssize) +emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, + poly_int64 ssize) { rtx src; int start, i; @@ -2134,12 +2135,16 @@ emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, int ssize) for (i = start; i < XVECLEN (dst, 0); i++) { machine_mode mode = GET_MODE (XEXP (XVECEXP (dst, 0, i), 0)); - HOST_WIDE_INT bytepos = INTVAL (XEXP (XVECEXP (dst, 0, i), 1)); - unsigned int bytelen = GET_MODE_SIZE (mode); - int shift = 0; + poly_int64 bytepos = INTVAL (XEXP (XVECEXP (dst, 0, i), 1)); + poly_int64 bytelen = GET_MODE_SIZE (mode); + poly_int64 shift = 0; - /* Handle trailing fragments that run over the size of the struct. */ - if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize) + /* Handle trailing fragments that run over the size of the struct. + It's the target's responsibility to make sure that the fragment + cannot be strictly smaller in some cases and strictly larger + in others. */ + gcc_checking_assert (ordered_p (bytepos + bytelen, ssize)); + if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize)) { /* Arrange to shift the fragment to where it belongs. extract_bit_field loads to the lsb of the reg. */ @@ -2153,7 +2158,7 @@ emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, int ssize) ) shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT; bytelen = ssize - bytepos; - gcc_assert (bytelen > 0); + gcc_assert (maybe_gt (bytelen, 0)); } /* If we won't be loading directly from memory, protect the real source @@ -2177,33 +2182,34 @@ emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, int ssize) if (MEM_P (src) && (! targetm.slow_unaligned_access (mode, MEM_ALIGN (src)) || MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode)) - && bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0 - && bytelen == GET_MODE_SIZE (mode)) + && multiple_p (bytepos * BITS_PER_UNIT, GET_MODE_ALIGNMENT (mode)) + && known_eq (bytelen, GET_MODE_SIZE (mode))) { tmps[i] = gen_reg_rtx (mode); emit_move_insn (tmps[i], adjust_address (src, mode, bytepos)); } else if (COMPLEX_MODE_P (mode) && GET_MODE (src) == mode - && bytelen == GET_MODE_SIZE (mode)) + && known_eq (bytelen, GET_MODE_SIZE (mode))) /* Let emit_move_complex do the bulk of the work. */ tmps[i] = src; else if (GET_CODE (src) == CONCAT) { - unsigned int slen = GET_MODE_SIZE (GET_MODE (src)); - unsigned int slen0 = GET_MODE_SIZE (GET_MODE (XEXP (src, 0))); - unsigned int elt = bytepos / slen0; - unsigned int subpos = bytepos % slen0; + poly_int64 slen = GET_MODE_SIZE (GET_MODE (src)); + poly_int64 slen0 = GET_MODE_SIZE (GET_MODE (XEXP (src, 0))); + unsigned int elt; + poly_int64 subpos; - if (subpos + bytelen <= slen0) + if (can_div_trunc_p (bytepos, slen0, &elt, &subpos) + && known_le (subpos + bytelen, slen0)) { /* The following assumes that the concatenated objects all have the same size. In this case, a simple calculation can be used to determine the object and the bit field to be extracted. */ tmps[i] = XEXP (src, elt); - if (subpos != 0 - || subpos + bytelen != slen0 + if (maybe_ne (subpos, 0) + || maybe_ne (subpos + bytelen, slen0) || (!CONSTANT_P (tmps[i]) && (!REG_P (tmps[i]) || GET_MODE (tmps[i]) != mode))) tmps[i] = extract_bit_field (tmps[i], bytelen * BITS_PER_UNIT, @@ -2215,7 +2221,7 @@ emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, int ssize) { rtx mem; - gcc_assert (!bytepos); + gcc_assert (known_eq (bytepos, 0)); mem = assign_stack_temp (GET_MODE (src), slen); emit_move_insn (mem, src); tmps[i] = extract_bit_field (mem, bytelen * BITS_PER_UNIT, @@ -2234,23 +2240,21 @@ emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, int ssize) mem = assign_stack_temp (GET_MODE (src), slen); emit_move_insn (mem, src); - tmps[i] = adjust_address (mem, mode, (int) bytepos); + tmps[i] = adjust_address (mem, mode, bytepos); } else if (CONSTANT_P (src) && GET_MODE (dst) != BLKmode && XVECLEN (dst, 0) > 1) tmps[i] = simplify_gen_subreg (mode, src, GET_MODE (dst), bytepos); else if (CONSTANT_P (src)) { - HOST_WIDE_INT len = (HOST_WIDE_INT) bytelen; - - if (len == ssize) + if (known_eq (bytelen, ssize)) tmps[i] = src; else { rtx first, second; /* TODO: const_wide_int can have sizes other than this... */ - gcc_assert (2 * len == ssize); + gcc_assert (known_eq (2 * bytelen, ssize)); split_double (src, &first, &second); if (i) tmps[i] = second; @@ -2265,7 +2269,7 @@ emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, int ssize) bytepos * BITS_PER_UNIT, 1, NULL_RTX, mode, mode, false, NULL); - if (shift) + if (maybe_ne (shift, 0)) tmps[i] = expand_shift (LSHIFT_EXPR, mode, tmps[i], shift, tmps[i], 0); } @@ -2277,7 +2281,7 @@ emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, int ssize) if not known. */ void -emit_group_load (rtx dst, rtx src, tree type, int ssize) +emit_group_load (rtx dst, rtx src, tree type, poly_int64 ssize) { rtx *tmps; int i; @@ -2300,7 +2304,7 @@ emit_group_load (rtx dst, rtx src, tree type, int ssize) in the right place. */ rtx -emit_group_load_into_temps (rtx parallel, rtx src, tree type, int ssize) +emit_group_load_into_temps (rtx parallel, rtx src, tree type, poly_int64 ssize) { rtvec vec; int i; @@ -2371,7 +2375,8 @@ emit_group_move_into_temps (rtx src) known. */ void -emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED, int ssize) +emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED, + poly_int64 ssize) { rtx *tmps, dst; int start, finish, i; @@ -2502,24 +2507,28 @@ emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED, int ssize) /* Process the pieces. */ for (i = start; i < finish; i++) { - HOST_WIDE_INT bytepos = INTVAL (XEXP (XVECEXP (src, 0, i), 1)); + poly_int64 bytepos = INTVAL (XEXP (XVECEXP (src, 0, i), 1)); machine_mode mode = GET_MODE (tmps[i]); - unsigned int bytelen = GET_MODE_SIZE (mode); - unsigned int adj_bytelen; + poly_int64 bytelen = GET_MODE_SIZE (mode); + poly_uint64 adj_bytelen; rtx dest = dst; - /* Handle trailing fragments that run over the size of the struct. */ - if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize) + /* Handle trailing fragments that run over the size of the struct. + It's the target's responsibility to make sure that the fragment + cannot be strictly smaller in some cases and strictly larger + in others. */ + gcc_checking_assert (ordered_p (bytepos + bytelen, ssize)); + if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize)) adj_bytelen = ssize - bytepos; else adj_bytelen = bytelen; if (GET_CODE (dst) == CONCAT) { - if (bytepos + adj_bytelen - <= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))) + if (known_le (bytepos + adj_bytelen, + GET_MODE_SIZE (GET_MODE (XEXP (dst, 0))))) dest = XEXP (dst, 0); - else if (bytepos >= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))) + else if (known_ge (bytepos, GET_MODE_SIZE (GET_MODE (XEXP (dst, 0))))) { bytepos -= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0))); dest = XEXP (dst, 1); @@ -2529,7 +2538,7 @@ emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED, int ssize) machine_mode dest_mode = GET_MODE (dest); machine_mode tmp_mode = GET_MODE (tmps[i]); - gcc_assert (bytepos == 0 && XVECLEN (src, 0)); + gcc_assert (known_eq (bytepos, 0) && XVECLEN (src, 0)); if (GET_MODE_ALIGNMENT (dest_mode) >= GET_MODE_ALIGNMENT (tmp_mode)) @@ -2554,7 +2563,7 @@ emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED, int ssize) } /* Handle trailing fragments that run over the size of the struct. */ - if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize) + if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize)) { /* store_bit_field always takes its value from the lsb. Move the fragment to the lsb if it's not already there. */ @@ -2567,7 +2576,7 @@ emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED, int ssize) #endif ) { - int shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT; + poly_int64 shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT; tmps[i] = expand_shift (RSHIFT_EXPR, mode, tmps[i], shift, tmps[i], 0); } @@ -2583,8 +2592,9 @@ emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED, int ssize) else if (MEM_P (dest) && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (dest)) || MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode)) - && bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0 - && bytelen == GET_MODE_SIZE (mode)) + && multiple_p (bytepos * BITS_PER_UNIT, + GET_MODE_ALIGNMENT (mode)) + && known_eq (bytelen, GET_MODE_SIZE (mode))) emit_move_insn (adjust_address (dest, mode, bytepos), tmps[i]); else diff --git a/gcc/expr.h b/gcc/expr.h index 92a6b096a5a..105c30e6551 100644 --- a/gcc/expr.h +++ b/gcc/expr.h @@ -128,10 +128,10 @@ extern rtx gen_group_rtx (rtx); /* Load a BLKmode value into non-consecutive registers represented by a PARALLEL. */ -extern void emit_group_load (rtx, rtx, tree, int); +extern void emit_group_load (rtx, rtx, tree, poly_int64); /* Similarly, but load into new temporaries. */ -extern rtx emit_group_load_into_temps (rtx, rtx, tree, int); +extern rtx emit_group_load_into_temps (rtx, rtx, tree, poly_int64); /* Move a non-consecutive group of registers represented by a PARALLEL into a non-consecutive group of registers represented by a PARALLEL. */ @@ -142,7 +142,7 @@ extern rtx emit_group_move_into_temps (rtx); /* Store a BLKmode value from non-consecutive registers represented by a PARALLEL. */ -extern void emit_group_store (rtx, rtx, tree, int); +extern void emit_group_store (rtx, rtx, tree, poly_int64); extern rtx maybe_emit_group_store (rtx, tree); -- 2.30.2