* config/i386/constraints.md (Yv): New constraint.
* config/i386/i386.h (VALID_AVX512VL_128_REG_MODE): Allow
TFmode and V1TImode in xmm16+ registers for TARGET_AVX512VL.
* config/i386/i386.md (avx512fvecmode): New mode attr.
(*pushtf): Use v constraint instead of x.
(*movtf_internal): Likewise. For TARGET_AVX512VL and
xmm16+ registers, use vmovdqu64 or vmovdqa64 instructions.
(*absneg<mode>2): Use Yv constraint instead of x constraint.
(*absnegtf2_sse): Likewise.
(copysign<mode>3_const, copysign<mode>3_var): Likewise.
* config/i386/sse.md (*andnot<mode>3): Add avx512vl and
avx512f alternatives.
(*andnottf3, *<code><mode>3, *<code>tf3): Likewise.
* gcc.target/i386/avx512dq-abs-copysign-1.c: New test.
* gcc.target/i386/avx512vl-abs-copysign-1.c: New test.
* gcc.target/i386/avx512vl-abs-copysign-2.c: New test.
From-SVN: r236161
+2016-05-12 Jakub Jelinek <jakub@redhat.com>
+
+ * config/i386/constraints.md (Yv): New constraint.
+ * config/i386/i386.h (VALID_AVX512VL_128_REG_MODE): Allow
+ TFmode and V1TImode in xmm16+ registers for TARGET_AVX512VL.
+ * config/i386/i386.md (avx512fvecmode): New mode attr.
+ (*pushtf): Use v constraint instead of x.
+ (*movtf_internal): Likewise. For TARGET_AVX512VL and
+ xmm16+ registers, use vmovdqu64 or vmovdqa64 instructions.
+ (*absneg<mode>2): Use Yv constraint instead of x constraint.
+ (*absnegtf2_sse): Likewise.
+ (copysign<mode>3_const, copysign<mode>3_var): Likewise.
+ * config/i386/sse.md (*andnot<mode>3): Add avx512vl and
+ avx512f alternatives.
+ (*andnottf3, *<code><mode>3, *<code>tf3): Likewise.
+
2016-05-12 Richard Biener <rguenther@suse.de>
PR tree-optimization/71060
"TARGET_SSE ? (X86_TUNE_AVOID_4BYTE_PREFIXES ? NO_REX_SSE_REGS : ALL_SSE_REGS) : NO_REGS"
"@internal Lower SSE register when avoiding REX prefix and all SSE registers otherwise.")
+(define_register_constraint "Yv"
+ "TARGET_AVX512VL ? ALL_SSE_REGS : TARGET_SSE ? SSE_REGS : NO_REGS"
+ "@internal For AVX512VL, any EVEX encodable SSE register (@code{%xmm0-%xmm31}), otherwise any SSE register.")
+
;; We use the B prefix to denote any number of internal operands:
;; f FLAGS_REG
;; g GOT memory operand.
#define VALID_AVX512VL_128_REG_MODE(MODE) \
((MODE) == V2DImode || (MODE) == V2DFmode || (MODE) == V16QImode \
- || (MODE) == V4SImode || (MODE) == V4SFmode || (MODE) == V8HImode)
+ || (MODE) == V4SImode || (MODE) == V4SFmode || (MODE) == V8HImode \
+ || (MODE) == TFmode || (MODE) == V1TImode)
#define VALID_SSE2_REG_MODE(MODE) \
((MODE) == V16QImode || (MODE) == V8HImode || (MODE) == V2DFmode \
(define_mode_attr ssevecmodelower
[(QI "v16qi") (HI "v8hi") (SI "v4si") (DI "v2di") (SF "v4sf") (DF "v2df")])
+;; AVX512F vector mode corresponding to a scalar mode
+(define_mode_attr avx512fvecmode
+ [(QI "V64QI") (HI "V32HI") (SI "V16SI") (DI "V8DI") (SF "V16SF") (DF "V8DF")])
+
;; Instruction suffix for REX 64bit operators.
(define_mode_attr rex64suffix [(SI "") (DI "{q}")])
(define_insn "*pushtf"
[(set (match_operand:TF 0 "push_operand" "=<,<")
- (match_operand:TF 1 "general_no_elim_operand" "x,*roF"))]
+ (match_operand:TF 1 "general_no_elim_operand" "v,*roF"))]
"TARGET_64BIT || TARGET_SSE"
{
/* This insn should be already split before reg-stack. */
"ix86_expand_move (<MODE>mode, operands); DONE;")
(define_insn "*movtf_internal"
- [(set (match_operand:TF 0 "nonimmediate_operand" "=x,x ,m,?*r ,!o")
- (match_operand:TF 1 "general_operand" "C ,xm,x,*roF,*rC"))]
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=v,v ,m,?*r ,!o")
+ (match_operand:TF 1 "general_operand" "C ,vm,v,*roF,*rC"))]
"(TARGET_64BIT || TARGET_SSE)
&& !(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (!can_create_pseudo_p ()
{
if (get_attr_mode (insn) == MODE_V4SF)
return "%vmovups\t{%1, %0|%0, %1}";
+ else if (TARGET_AVX512VL
+ && (EXT_REX_SSE_REG_P (operands[0])
+ || EXT_REX_SSE_REG_P (operands[1])))
+ return "vmovdqu64\t{%1, %0|%0, %1}";
else
return "%vmovdqu\t{%1, %0|%0, %1}";
}
{
if (get_attr_mode (insn) == MODE_V4SF)
return "%vmovaps\t{%1, %0|%0, %1}";
+ else if (TARGET_AVX512VL
+ && (EXT_REX_SSE_REG_P (operands[0])
+ || EXT_REX_SSE_REG_P (operands[1])))
+ return "vmovdqa64\t{%1, %0|%0, %1}";
else
return "%vmovdqa\t{%1, %0|%0, %1}";
}
"ix86_expand_fp_absneg_operator (<CODE>, <MODE>mode, operands); DONE;")
(define_insn "*absneg<mode>2"
- [(set (match_operand:MODEF 0 "register_operand" "=x,x,f,!r")
+ [(set (match_operand:MODEF 0 "register_operand" "=Yv,Yv,f,!r")
(match_operator:MODEF 3 "absneg_operator"
- [(match_operand:MODEF 1 "register_operand" "0,x,0,0")]))
- (use (match_operand:<ssevecmode> 2 "nonimmediate_operand" "xm,0,X,X"))
+ [(match_operand:MODEF 1 "register_operand" "0,Yv,0,0")]))
+ (use (match_operand:<ssevecmode> 2 "nonimmediate_operand" "Yvm,0,X,X"))
(clobber (reg:CC FLAGS_REG))]
"TARGET_80387 || (SSE_FLOAT_MODE_P (<MODE>mode) && TARGET_SSE_MATH)"
"#"
"ix86_expand_fp_absneg_operator (<CODE>, TFmode, operands); DONE;")
(define_insn "*absnegtf2_sse"
- [(set (match_operand:TF 0 "register_operand" "=x,x")
+ [(set (match_operand:TF 0 "register_operand" "=Yv,Yv")
(match_operator:TF 3 "absneg_operator"
- [(match_operand:TF 1 "register_operand" "0,x")]))
- (use (match_operand:TF 2 "nonimmediate_operand" "xm,0"))
+ [(match_operand:TF 1 "register_operand" "0,Yv")]))
+ (use (match_operand:TF 2 "nonimmediate_operand" "Yvm,0"))
(clobber (reg:CC FLAGS_REG))]
"TARGET_SSE"
"#")
"ix86_expand_copysign (operands); DONE;")
(define_insn_and_split "copysign<mode>3_const"
- [(set (match_operand:CSGNMODE 0 "register_operand" "=x")
+ [(set (match_operand:CSGNMODE 0 "register_operand" "=Yv")
(unspec:CSGNMODE
- [(match_operand:<CSGNVMODE> 1 "vector_move_operand" "xmC")
+ [(match_operand:<CSGNVMODE> 1 "vector_move_operand" "YvmC")
(match_operand:CSGNMODE 2 "register_operand" "0")
- (match_operand:<CSGNVMODE> 3 "nonimmediate_operand" "xm")]
+ (match_operand:<CSGNVMODE> 3 "nonimmediate_operand" "Yvm")]
UNSPEC_COPYSIGN))]
"(SSE_FLOAT_MODE_P (<MODE>mode) && TARGET_SSE_MATH)
|| (TARGET_SSE && (<MODE>mode == TFmode))"
"ix86_split_copysign_const (operands); DONE;")
(define_insn "copysign<mode>3_var"
- [(set (match_operand:CSGNMODE 0 "register_operand" "=x,x,x,x,x")
+ [(set (match_operand:CSGNMODE 0 "register_operand" "=Yv,Yv,Yv,Yv,Yv")
(unspec:CSGNMODE
- [(match_operand:CSGNMODE 2 "register_operand" "x,0,0,x,x")
- (match_operand:CSGNMODE 3 "register_operand" "1,1,x,1,x")
- (match_operand:<CSGNVMODE> 4 "nonimmediate_operand" "X,xm,xm,0,0")
- (match_operand:<CSGNVMODE> 5 "nonimmediate_operand" "0,xm,1,xm,1")]
+ [(match_operand:CSGNMODE 2 "register_operand" "Yv,0,0,Yv,Yv")
+ (match_operand:CSGNMODE 3 "register_operand" "1,1,Yv,1,Yv")
+ (match_operand:<CSGNVMODE> 4
+ "nonimmediate_operand" "X,Yvm,Yvm,0,0")
+ (match_operand:<CSGNVMODE> 5
+ "nonimmediate_operand" "0,Yvm,1,Yvm,1")]
UNSPEC_COPYSIGN))
- (clobber (match_scratch:<CSGNVMODE> 1 "=x,x,x,x,x"))]
+ (clobber (match_scratch:<CSGNVMODE> 1 "=Yv,Yv,Yv,Yv,Yv"))]
"(SSE_FLOAT_MODE_P (<MODE>mode) && TARGET_SSE_MATH)
|| (TARGET_SSE && (<MODE>mode == TFmode))"
"#")
;; because the native instructions read the full 128-bits.
(define_insn "*andnot<mode>3"
- [(set (match_operand:MODEF 0 "register_operand" "=x,x")
+ [(set (match_operand:MODEF 0 "register_operand" "=x,x,v,v")
(and:MODEF
(not:MODEF
- (match_operand:MODEF 1 "register_operand" "0,x"))
- (match_operand:MODEF 2 "register_operand" "x,x")))]
+ (match_operand:MODEF 1 "register_operand" "0,x,v,v"))
+ (match_operand:MODEF 2 "register_operand" "x,x,v,v")))]
"SSE_FLOAT_MODE_P (<MODE>mode)"
{
static char buf[32];
case 1:
ops = "vandn%s\t{%%2, %%1, %%0|%%0, %%1, %%2}";
break;
+ case 2:
+ if (TARGET_AVX512DQ)
+ ops = "vandn%s\t{%%2, %%1, %%0|%%0, %%1, %%2}";
+ else
+ {
+ suffix = <MODE>mode == DFmode ? "q" : "d";
+ ops = "vpandn%s\t{%%2, %%1, %%0|%%0, %%1, %%2}";
+ }
+ break;
+ case 3:
+ if (TARGET_AVX512DQ)
+ ops = "vandn%s\t{%%g2, %%g1, %%g0|%%g0, %%g1, %%g2}";
+ else
+ {
+ suffix = <MODE>mode == DFmode ? "q" : "d";
+ ops = "vpandn%s\t{%%g2, %%g1, %%g0|%%g0, %%g1, %%g2}";
+ }
+ break;
default:
gcc_unreachable ();
}
snprintf (buf, sizeof (buf), ops, suffix);
return buf;
}
- [(set_attr "isa" "noavx,avx")
+ [(set_attr "isa" "noavx,avx,avx512vl,avx512f")
(set_attr "type" "sselog")
- (set_attr "prefix" "orig,vex")
+ (set_attr "prefix" "orig,vex,evex,evex")
(set (attr "mode")
- (cond [(and (match_test "<MODE_SIZE> == 16")
+ (cond [(eq_attr "alternative" "2")
+ (if_then_else (match_test "TARGET_AVX512DQ")
+ (const_string "<ssevecmode>")
+ (const_string "TI"))
+ (eq_attr "alternative" "3")
+ (if_then_else (match_test "TARGET_AVX512DQ")
+ (const_string "<avx512fvecmode>")
+ (const_string "XI"))
+ (and (match_test "<MODE_SIZE> == 16")
(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL"))
(const_string "V4SF")
(match_test "TARGET_AVX")
(const_string "<ssevecmode>")))])
(define_insn "*andnottf3"
- [(set (match_operand:TF 0 "register_operand" "=x,x")
+ [(set (match_operand:TF 0 "register_operand" "=x,x,v,v")
(and:TF
- (not:TF (match_operand:TF 1 "register_operand" "0,x"))
- (match_operand:TF 2 "vector_operand" "xBm,xm")))]
+ (not:TF (match_operand:TF 1 "register_operand" "0,x,v,v"))
+ (match_operand:TF 2 "vector_operand" "xBm,xm,vm,v")))]
"TARGET_SSE"
{
static char buf[32];
const char *ops;
const char *tmp
- = (get_attr_mode (insn) == MODE_V4SF) ? "andnps" : "pandn";
+ = (which_alternative >= 2 ? "pandnq"
+ : get_attr_mode (insn) == MODE_V4SF ? "andnps" : "pandn");
switch (which_alternative)
{
ops = "%s\t{%%2, %%0|%%0, %%2}";
break;
case 1:
+ case 2:
ops = "v%s\t{%%2, %%1, %%0|%%0, %%1, %%2}";
break;
+ case 3:
+ ops = "v%s\t{%%g2, %%g1, %%g0|%%g0, %%g1, %%g2}";
+ break;
default:
gcc_unreachable ();
}
snprintf (buf, sizeof (buf), ops, tmp);
return buf;
}
- [(set_attr "isa" "noavx,avx")
+ [(set_attr "isa" "noavx,avx,avx512vl,avx512f")
(set_attr "type" "sselog")
(set (attr "prefix_data16")
(if_then_else
(eq_attr "mode" "TI"))
(const_string "1")
(const_string "*")))
- (set_attr "prefix" "orig,vex")
+ (set_attr "prefix" "orig,vex,evex,evex")
(set (attr "mode")
- (cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
+ (cond [(eq_attr "alternative" "2")
+ (const_string "TI")
+ (eq_attr "alternative" "3")
+ (const_string "XI")
+ (match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
(const_string "V4SF")
(match_test "TARGET_AVX")
(const_string "TI")
(const_string "TI")))])
(define_insn "*<code><mode>3"
- [(set (match_operand:MODEF 0 "register_operand" "=x,x")
+ [(set (match_operand:MODEF 0 "register_operand" "=x,x,v,v")
(any_logic:MODEF
- (match_operand:MODEF 1 "register_operand" "%0,x")
- (match_operand:MODEF 2 "register_operand" "x,x")))]
+ (match_operand:MODEF 1 "register_operand" "%0,x,v,v")
+ (match_operand:MODEF 2 "register_operand" "x,x,v,v")))]
"SSE_FLOAT_MODE_P (<MODE>mode)"
{
static char buf[32];
case 0:
ops = "<logic>%s\t{%%2, %%0|%%0, %%2}";
break;
+ case 2:
+ if (!TARGET_AVX512DQ)
+ {
+ suffix = <MODE>mode == DFmode ? "q" : "d";
+ ops = "vp<logic>%s\t{%%2, %%1, %%0|%%0, %%1, %%2}";
+ break;
+ }
+ /* FALLTHRU */
case 1:
ops = "v<logic>%s\t{%%2, %%1, %%0|%%0, %%1, %%2}";
break;
+ case 3:
+ if (TARGET_AVX512DQ)
+ ops = "v<logic>%s\t{%%g2, %%g1, %%g0|%%g0, %%g1, %%g2}";
+ else
+ {
+ suffix = <MODE>mode == DFmode ? "q" : "d";
+ ops = "vp<logic>%s\t{%%g2, %%g1, %%g0|%%g0, %%g1, %%g2}";
+ }
+ break;
default:
gcc_unreachable ();
}
snprintf (buf, sizeof (buf), ops, suffix);
return buf;
}
- [(set_attr "isa" "noavx,avx")
+ [(set_attr "isa" "noavx,avx,avx512vl,avx512f")
(set_attr "type" "sselog")
- (set_attr "prefix" "orig,vex")
+ (set_attr "prefix" "orig,vex,evex,evex")
(set (attr "mode")
- (cond [(and (match_test "<MODE_SIZE> == 16")
+ (cond [(eq_attr "alternative" "2")
+ (if_then_else (match_test "TARGET_AVX512DQ")
+ (const_string "<ssevecmode>")
+ (const_string "TI"))
+ (eq_attr "alternative" "3")
+ (if_then_else (match_test "TARGET_AVX512DQ")
+ (const_string "<avx512fvecmode>")
+ (const_string "XI"))
+ (and (match_test "<MODE_SIZE> == 16")
(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL"))
(const_string "V4SF")
(match_test "TARGET_AVX")
"ix86_fixup_binary_operands_no_copy (<CODE>, TFmode, operands);")
(define_insn "*<code>tf3"
- [(set (match_operand:TF 0 "register_operand" "=x,x")
+ [(set (match_operand:TF 0 "register_operand" "=x,x,v,v")
(any_logic:TF
- (match_operand:TF 1 "vector_operand" "%0,x")
- (match_operand:TF 2 "vector_operand" "xBm,xm")))]
+ (match_operand:TF 1 "vector_operand" "%0,x,v,v")
+ (match_operand:TF 2 "vector_operand" "xBm,xm,vm,v")))]
"TARGET_SSE
&& ix86_binary_operator_ok (<CODE>, TFmode, operands)"
{
static char buf[32];
const char *ops;
const char *tmp
- = (get_attr_mode (insn) == MODE_V4SF) ? "<logic>ps" : "p<logic>";
+ = (which_alternative >= 2 ? "p<logic>q"
+ : get_attr_mode (insn) == MODE_V4SF ? "<logic>ps" : "p<logic>");
switch (which_alternative)
{
ops = "%s\t{%%2, %%0|%%0, %%2}";
break;
case 1:
+ case 2:
ops = "v%s\t{%%2, %%1, %%0|%%0, %%1, %%2}";
break;
+ case 3:
+ ops = "v%s\t{%%g2, %%g1, %%g0|%%g0, %%g1, %%g2}";
+ break;
default:
gcc_unreachable ();
}
snprintf (buf, sizeof (buf), ops, tmp);
return buf;
}
- [(set_attr "isa" "noavx,avx")
+ [(set_attr "isa" "noavx,avx,avx512vl,avx512f")
(set_attr "type" "sselog")
(set (attr "prefix_data16")
(if_then_else
(eq_attr "mode" "TI"))
(const_string "1")
(const_string "*")))
- (set_attr "prefix" "orig,vex")
+ (set_attr "prefix" "orig,vex,evex,evex")
(set (attr "mode")
- (cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
+ (cond [(eq_attr "alternative" "2")
+ (const_string "TI")
+ (eq_attr "alternative" "3")
+ (const_string "QI")
+ (match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
(const_string "V4SF")
(match_test "TARGET_AVX")
(const_string "TI")
+2016-05-12 Jakub Jelinek <jakub@redhat.com>
+
+ * gcc.target/i386/avx512dq-abs-copysign-1.c: New test.
+ * gcc.target/i386/avx512vl-abs-copysign-1.c: New test.
+ * gcc.target/i386/avx512vl-abs-copysign-2.c: New test.
+
2016-05-12 Richard Biener <rguenther@suse.de>
PR tree-optimization/70986
--- /dev/null
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-Ofast -mavx512vl -mavx512dq" } */
+
+void
+f1 (float x)
+{
+ register float a __asm ("xmm16");
+ a = x;
+ asm volatile ("" : "+v" (a));
+ a = __builtin_fabsf (a);
+ asm volatile ("" : "+v" (a));
+}
+
+void
+f2 (float x, float y)
+{
+ register float a __asm ("xmm16"), b __asm ("xmm17");
+ a = x;
+ b = y;
+ asm volatile ("" : "+v" (a), "+v" (b));
+ a = __builtin_copysignf (a, b);
+ asm volatile ("" : "+v" (a));
+}
+
+void
+f3 (float x)
+{
+ register float a __asm ("xmm16");
+ a = x;
+ asm volatile ("" : "+v" (a));
+ a = -a;
+ asm volatile ("" : "+v" (a));
+}
+
+void
+f4 (double x)
+{
+ register double a __asm ("xmm18");
+ a = x;
+ asm volatile ("" : "+v" (a));
+ a = __builtin_fabs (a);
+ asm volatile ("" : "+v" (a));
+}
+
+void
+f5 (double x, double y)
+{
+ register double a __asm ("xmm18"), b __asm ("xmm19");
+ a = x;
+ b = y;
+ asm volatile ("" : "+v" (a), "+v" (b));
+ a = __builtin_copysign (a, b);
+ asm volatile ("" : "+v" (a));
+}
+
+void
+f6 (double x)
+{
+ register double a __asm ("xmm18");
+ a = x;
+ asm volatile ("" : "+v" (a));
+ a = -a;
+ asm volatile ("" : "+v" (a));
+}
+
+/* { dg-final { scan-assembler "vandps\[^\n\r\]*xmm16" } } */
+/* { dg-final { scan-assembler "vorps\[^\n\r\]*xmm16" } } */
+/* { dg-final { scan-assembler "vxorps\[^\n\r\]*xmm16" } } */
+/* { dg-final { scan-assembler "vandpd\[^\n\r\]*xmm18" } } */
+/* { dg-final { scan-assembler "vorpd\[^\n\r\]*xmm18" } } */
+/* { dg-final { scan-assembler "vxorpd\[^\n\r\]*xmm18" } } */
--- /dev/null
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-Ofast -mavx512vl -mno-avx512dq" } */
+
+void
+f1 (float x)
+{
+ register float a __asm ("xmm16");
+ a = x;
+ asm volatile ("" : "+v" (a));
+ a = __builtin_fabsf (a);
+ asm volatile ("" : "+v" (a));
+}
+
+void
+f2 (float x, float y)
+{
+ register float a __asm ("xmm16"), b __asm ("xmm17");
+ a = x;
+ b = y;
+ asm volatile ("" : "+v" (a), "+v" (b));
+ a = __builtin_copysignf (a, b);
+ asm volatile ("" : "+v" (a));
+}
+
+void
+f3 (float x)
+{
+ register float a __asm ("xmm16");
+ a = x;
+ asm volatile ("" : "+v" (a));
+ a = -a;
+ asm volatile ("" : "+v" (a));
+}
+
+void
+f4 (double x)
+{
+ register double a __asm ("xmm18");
+ a = x;
+ asm volatile ("" : "+v" (a));
+ a = __builtin_fabs (a);
+ asm volatile ("" : "+v" (a));
+}
+
+void
+f5 (double x, double y)
+{
+ register double a __asm ("xmm18"), b __asm ("xmm19");
+ a = x;
+ b = y;
+ asm volatile ("" : "+v" (a), "+v" (b));
+ a = __builtin_copysign (a, b);
+ asm volatile ("" : "+v" (a));
+}
+
+void
+f6 (double x)
+{
+ register double a __asm ("xmm18");
+ a = x;
+ asm volatile ("" : "+v" (a));
+ a = -a;
+ asm volatile ("" : "+v" (a));
+}
+
+/* { dg-final { scan-assembler "vpandd\[^\n\r\]*xmm16" } } */
+/* { dg-final { scan-assembler "vpord\[^\n\r\]*xmm16" } } */
+/* { dg-final { scan-assembler "vpxord\[^\n\r\]*xmm16" } } */
+/* { dg-final { scan-assembler "vpandq\[^\n\r\]*xmm18" } } */
+/* { dg-final { scan-assembler "vporq\[^\n\r\]*xmm18" } } */
+/* { dg-final { scan-assembler "vpxorq\[^\n\r\]*xmm18" } } */
--- /dev/null
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-Ofast -mavx512vl" } */
+
+void
+f1 (__float128 x)
+{
+ register __float128 a __asm ("xmm16");
+ a = x;
+ asm volatile ("" : "+v" (a));
+ a = __builtin_fabsq (a);
+ asm volatile ("" : "+v" (a));
+}
+
+void
+f2 (__float128 x, __float128 y)
+{
+ register __float128 a __asm ("xmm16"), b __asm ("xmm17");
+ a = x;
+ b = y;
+ asm volatile ("" : "+v" (a), "+v" (b));
+ a = __builtin_copysignq (a, b);
+ asm volatile ("" : "+v" (a));
+}
+
+void
+f3 (__float128 x)
+{
+ register __float128 a __asm ("xmm16");
+ a = x;
+ asm volatile ("" : "+v" (a));
+ a = -a;
+ asm volatile ("" : "+v" (a));
+}
+
+__int128_t
+f4 (void)
+{
+ register __int128_t a __asm ("xmm16");
+ register __int128_t __attribute__((vector_size (16))) b __asm ("xmm17");
+ a = 1;
+ asm volatile ("" : "+v" (a));
+ b[0] = a;
+ asm volatile ("" : "+v" (b));
+ return b[0];
+}
+
+/* { dg-final { scan-assembler "vpandq\[^\n\r\]*xmm16" } } */
+/* { dg-final { scan-assembler "vporq\[^\n\r\]*xmm16" } } */
+/* { dg-final { scan-assembler "vpxorq\[^\n\r\]*xmm16" } } */