From: Uros Bizjak Date: Thu, 12 Nov 2015 08:11:11 +0000 (+0100) Subject: i386.c (ix86_legitimate_combined_insn): Reject combined insn if the alignment of... X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=c471289ac8af8608dab424693d9411be6042ded6;p=gcc.git i386.c (ix86_legitimate_combined_insn): Reject combined insn if the alignment of vector mode memory operand is less... * config/i386/i386.c (ix86_legitimate_combined_insn): Reject combined insn if the alignment of vector mode memory operand is less than ssememalign. testsuite/ChangeLog: * gcc.target/i386/sse-1.c (swizzle): Assume that a is aligned to 64 bits. From-SVN: r230215 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 9ab4c6ac458..815bb089575 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,12 +1,18 @@ +2015-11-12 Uros Bizjak + + * config/i386/i386.c (ix86_legitimate_combined_insn): Reject + combined insn if the alignment of vector mode memory operand + is less than ssememalign. + 2015-11-12 Tom de Vries - * gen-pass-instances.awk (handle_line): Print parentheses and pass_name - explicitly. + * gen-pass-instances.awk (handle_line): Print parentheses and + pass_name explicitly. 2015-11-12 Tom de Vries - * gen-pass-instances.awk (handle_line): Add pass_num, prefix and postfix - vars. + * gen-pass-instances.awk (handle_line): Add pass_num, prefix + and postfix vars. 2015-11-12 Tom de Vries @@ -45,7 +51,7 @@ Move Convert C1/(X*C2) into (C1/C2)/X to match.pd. Move Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) to match.pd. - + * match.pd (rdiv (rdiv:s @0 @1) @2): New simplifier. (rdiv @0 (rdiv:s @1 @2)): New simplifier. (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2): diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c index baa0e031c83..d048b19f385 100644 --- a/gcc/config/i386/i386.c +++ b/gcc/config/i386/i386.c @@ -7236,11 +7236,12 @@ ix86_legitimate_combined_insn (rtx_insn *insn) /* For pre-AVX disallow unaligned loads/stores where the instructions don't support it. */ if (!TARGET_AVX - && VECTOR_MODE_P (GET_MODE (op)) - && misaligned_operand (op, GET_MODE (op))) + && VECTOR_MODE_P (mode) + && misaligned_operand (op, mode)) { - int min_align = get_attr_ssememalign (insn); - if (min_align == 0) + unsigned int min_align = get_attr_ssememalign (insn); + if (min_align == 0 + || MEM_ALIGN (op) < min_align) return false; } diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 57384e2fae6..a7326b56eac 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,8 @@ +2015-11-12 Uros Bizjak + + * gcc.target/i386/sse-1.c (swizzle): Assume that a is + aligned to 64 bits. + 2015-11-11 David Edelsohn * gcc.dg/pr65521.c: Fail on AIX. diff --git a/gcc/testsuite/gcc.target/i386/sse-1.c b/gcc/testsuite/gcc.target/i386/sse-1.c index afae22d3705..15d38f9d5dd 100644 --- a/gcc/testsuite/gcc.target/i386/sse-1.c +++ b/gcc/testsuite/gcc.target/i386/sse-1.c @@ -14,8 +14,10 @@ typedef union void swizzle (const void *a, vector4_t * b, vector4_t * c) { - b->v = _mm_loadl_pi (b->v, (__m64 *) a); - c->v = _mm_loadl_pi (c->v, ((__m64 *) a) + 1); + __m64 *t = __builtin_assume_aligned (a, 64); + + b->v = _mm_loadl_pi (b->v, t); + c->v = _mm_loadl_pi (c->v, t + 1); } /* While one legal rendering of each statement would be movaps;movlps;movaps,