From 5d057bfeff70e5b8d00e521844c476f62d51e22c Mon Sep 17 00:00:00 2001 From: Jakub Jelinek Date: Wed, 13 Jan 2021 10:15:13 +0100 Subject: [PATCH] i386: Add define_insn_and_split patterns for btrl [PR96938] In the following testcase we only optimize f2 and f7 to btrl, although we should optimize that way all of the functions. The problem is the type demotion/narrowing (which is performed solely during the generic folding and not later), without it we see the AND performed in SImode and match it as btrl, but with it while the shifts are still performed in SImode, the AND is already done in QImode or HImode low part of the shift. 2021-01-13 Jakub Jelinek PR target/96938 * config/i386/i386.md (*btr_1, *btr_2): New define_insn_and_split patterns. (splitter after *btr_2): New splitter. * gcc.target/i386/pr96938.c: New test. --- gcc/config/i386/i386.md | 65 ++++++++++++++++++++++++ gcc/testsuite/gcc.target/i386/pr96938.c | 66 +++++++++++++++++++++++++ 2 files changed, 131 insertions(+) create mode 100644 gcc/testsuite/gcc.target/i386/pr96938.c diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md index c1023123d5c..b60784a2908 100644 --- a/gcc/config/i386/i386.md +++ b/gcc/config/i386/i386.md @@ -12419,6 +12419,71 @@ (match_dup 3))) (clobber (reg:CC FLAGS_REG))])]) +(define_insn_and_split "*btr_1" + [(set (match_operand:SWI12 0 "register_operand") + (and:SWI12 + (subreg:SWI12 + (rotate:SI (const_int -2) + (match_operand:QI 2 "register_operand")) 0) + (match_operand:SWI12 1 "nonimmediate_operand"))) + (clobber (reg:CC FLAGS_REG))] + "TARGET_USE_BT && ix86_pre_reload_split ()" + "#" + "&& 1" + [(parallel + [(set (match_dup 0) + (and:SI (rotate:SI (const_int -2) (match_dup 2)) + (match_dup 1))) + (clobber (reg:CC FLAGS_REG))])] +{ + operands[0] = lowpart_subreg (SImode, operands[0], mode); + if (MEM_P (operands[1])) + operands[1] = force_reg (mode, operands[1]); + operands[1] = lowpart_subreg (SImode, operands[1], mode); +}) + +(define_insn_and_split "*btr_2" + [(set (zero_extract:HI + (match_operand:SWI12 0 "nonimmediate_operand") + (const_int 1) + (zero_extend:SI (match_operand:QI 1 "register_operand"))) + (const_int 0)) + (clobber (reg:CC FLAGS_REG))] + "TARGET_USE_BT && ix86_pre_reload_split ()" + "#" + "&& MEM_P (operands[0])" + [(set (match_dup 2) (match_dup 0)) + (parallel + [(set (match_dup 3) + (and:SI (rotate:SI (const_int -2) (match_dup 1)) + (match_dup 4))) + (clobber (reg:CC FLAGS_REG))]) + (set (match_dup 0) (match_dup 5))] +{ + operands[2] = gen_reg_rtx (mode); + operands[5] = gen_reg_rtx (mode); + operands[3] = lowpart_subreg (SImode, operands[5], mode); + operands[4] = lowpart_subreg (SImode, operands[2], mode); +}) + +(define_split + [(set (zero_extract:HI + (match_operand:SWI12 0 "register_operand") + (const_int 1) + (zero_extend:SI (match_operand:QI 1 "register_operand"))) + (const_int 0)) + (clobber (reg:CC FLAGS_REG))] + "TARGET_USE_BT && ix86_pre_reload_split ()" + [(parallel + [(set (match_dup 0) + (and:SI (rotate:SI (const_int -2) (match_dup 1)) + (match_dup 2))) + (clobber (reg:CC FLAGS_REG))])] +{ + operands[2] = lowpart_subreg (SImode, operands[0], mode); + operands[0] = lowpart_subreg (SImode, operands[0], mode); +}) + ;; These instructions are never faster than the corresponding ;; and/ior/xor operations when using immediate operand, so with ;; 32-bit there's no point. But in 64-bit, we can't hold the diff --git a/gcc/testsuite/gcc.target/i386/pr96938.c b/gcc/testsuite/gcc.target/i386/pr96938.c new file mode 100644 index 00000000000..832cdd4737a --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/pr96938.c @@ -0,0 +1,66 @@ +/* PR target/96938 */ +/* { dg-do compile } */ +/* { dg-options "-O2 -masm=att" } */ +/* { dg-final { scan-assembler-times "\tbtrl\t" 10 } } */ + +void +f1 (unsigned char *f, int o, unsigned char v) +{ + *f = (*f & ~(1 << o)) | (v << o); +} + +void +f2 (unsigned char *f, int o, unsigned char v) +{ + int t = *f & ~(1 << o); + *f = t | (v << o); +} + +void +f3 (unsigned char *f, int o, unsigned char v) +{ + *f &= ~(1 << o); +} + +void +f4 (unsigned char *f, int o, unsigned char v) +{ + *f = (*f & ~(1 << (o & 31))) | v; +} + +void +f5 (unsigned char *f, int o, unsigned char v) +{ + *f = (*f & ~(1 << (o & 31))) | (v << (o & 31)); +} + +void +f6 (unsigned short *f, int o, unsigned short v) +{ + *f = (*f & ~(1 << o)) | (v << o); +} + +void +f7 (unsigned short *f, int o, unsigned short v) +{ + int t = *f & ~(1 << o); + *f = t | (v << o); +} + +void +f8 (unsigned short *f, int o, unsigned short v) +{ + *f &= ~(1 << o); +} + +void +f9 (unsigned short *f, int o, unsigned short v) +{ + *f = (*f & ~(1 << (o & 31))) | v; +} + +void +f10 (unsigned short *f, int o, unsigned short v) +{ + *f = (*f & ~(1 << (o & 31))) | (v << (o & 31)); +} -- 2.30.2