From 0c2a4754bc69f5642d1db07ecde1fa610404b745 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Sat, 10 Jul 2004 00:27:59 +0000 Subject: [PATCH] mm3dnow.h: New. * config/i386/mm3dnow.h: New. * config.gcc: Add mm3dnow.h to extra_headers for i?86 and x86-64. From-SVN: r84428 --- gcc/ChangeLog | 7 +- gcc/config.gcc | 4 +- gcc/config/i386/mm3dnow.h | 220 +++++++++++++++++++++++++++ gcc/testsuite/gcc.dg/i386-3dnow-1.c | 12 ++ gcc/testsuite/gcc.dg/i386-3dnow-2.c | 12 ++ gcc/testsuite/gcc.dg/i386-3dnowA-1.c | 12 ++ gcc/testsuite/gcc.dg/i386-3dnowA-2.c | 12 ++ 7 files changed, 276 insertions(+), 3 deletions(-) create mode 100644 gcc/config/i386/mm3dnow.h create mode 100644 gcc/testsuite/gcc.dg/i386-3dnow-1.c create mode 100644 gcc/testsuite/gcc.dg/i386-3dnow-2.c create mode 100644 gcc/testsuite/gcc.dg/i386-3dnowA-1.c create mode 100644 gcc/testsuite/gcc.dg/i386-3dnowA-2.c diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 245bf02c1c4..9b2c4c9d1eb 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,9 +1,14 @@ +2004-07-09 Jan Beulich + + * config/i386/mm3dnow.h: New. + * config.gcc: Add mm3dnow.h to extra_headers for i?86 and x86-64. + 2004-07-09 Richard Henderson * simplify-rtx.c (simplify_const_relational_operation): Only look at bounds of scalar integers. -2004-07-09 Jan Beulich +2004-07-09 Jan Beulich * config/i386/i386.md (sse2_clflush): Use correct operand for clflush. diff --git a/gcc/config.gcc b/gcc/config.gcc index 9bbda81f8d4..bc8fa23166b 100644 --- a/gcc/config.gcc +++ b/gcc/config.gcc @@ -248,11 +248,11 @@ xscale-*-*) ;; i[34567]86-*-*) cpu_type=i386 - extra_headers="mmintrin.h xmmintrin.h emmintrin.h pmmintrin.h" + extra_headers="mmintrin.h mm3dnow.h xmmintrin.h emmintrin.h pmmintrin.h" ;; x86_64-*-*) cpu_type=i386 - extra_headers="mmintrin.h xmmintrin.h emmintrin.h pmmintrin.h" + extra_headers="mmintrin.h mm3dnow.h xmmintrin.h emmintrin.h pmmintrin.h" need_64bit_hwint=yes ;; ia64-*-*) diff --git a/gcc/config/i386/mm3dnow.h b/gcc/config/i386/mm3dnow.h new file mode 100644 index 00000000000..7987c0a1419 --- /dev/null +++ b/gcc/config/i386/mm3dnow.h @@ -0,0 +1,220 @@ +/* Copyright (C) 2004 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 59 Temple Place - Suite 330, + Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the mm3dnow.h (of supposedly AMD origin) included with + MSVC 7.1. */ + +#ifndef _MM3DNOW_H_INCLUDED +#define _MM3DNOW_H_INCLUDED + +#ifdef __3dNOW__ + +#include + +/* Internal data types for implementing the intrinsics. */ +typedef int __v2sf __attribute__ ((__mode__ (__SF__), __vector_size__ (8))); + +static __inline void +_m_femms (void) +{ + __builtin_ia32_femms(); +} + +static __inline __m64 +_m_pavgusb (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pavgusb ((__v8qi)__A, (__v8qi)__B); +} + +static __inline __m64 +_m_pf2id (__m64 __A) +{ + return (__m64)__builtin_ia32_pf2id ((__v2sf)__A); +} + +static __inline __m64 +_m_pfacc (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfacc ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfadd (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfadd ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfcmpeq (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfcmpeq ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfcmpge (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfcmpge ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfcmpgt (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfcmpgt ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfmax (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfmax ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfmin (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfmin ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfmul (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfmul ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfrcp (__m64 __A) +{ + return (__m64)__builtin_ia32_pfrcp ((__v2sf)__A); +} + +static __inline __m64 +_m_pfrcpit1 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfrcpit1 ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfrcpit2 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfrcpit2 ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfrsqrt (__m64 __A) +{ + return (__m64)__builtin_ia32_pfrsqrt ((__v2sf)__A); +} + +static __inline __m64 +_m_pfrsqit1 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfrsqit1 ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfsub (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfsub ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfsubr (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfsubr ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pi2fd (__m64 __A) +{ + return (__m64)__builtin_ia32_pi2fd ((__v2si)__A); +} + +static __inline __m64 +_m_pmulhrw (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pmulhrw ((__v4hi)__A, (__v4hi)__B); +} + +static __inline void +_m_prefetch (void *__P) +{ + __builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */); +} + +static __inline void +_m_prefetchw (void *__P) +{ + __builtin_prefetch (__P, 1, 3 /* _MM_HINT_T0 */); +} + +static __inline __m64 +_m_from_float (float __A) +{ + return (__m64)(__v2sf){ __A, 0 }; +} + +static __inline float +_m_to_float (__m64 __A) +{ + union { __v2sf v; float a[2]; } __tmp = { (__v2sf)__A }; + return __tmp.a[0]; +} + +#ifdef __3dNOW_A__ + +static __inline __m64 +_m_pf2iw (__m64 __A) +{ + return (__m64)__builtin_ia32_pf2iw ((__v2sf)__A); +} + +static __inline __m64 +_m_pfnacc (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfnacc ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfpnacc (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfpnacc ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pi2fw (__m64 __A) +{ + return (__m64)__builtin_ia32_pi2fw ((__v2si)__A); +} + +static __inline __m64 +_m_pswapd (__m64 __A) +{ + return (__m64)__builtin_ia32_pswapdsf ((__v2sf)__A); +} + +#endif /* __3dNOW_A__ */ +#endif /* __3dNOW__ */ + +#endif /* _MM3DNOW_H_INCLUDED */ diff --git a/gcc/testsuite/gcc.dg/i386-3dnow-1.c b/gcc/testsuite/gcc.dg/i386-3dnow-1.c new file mode 100644 index 00000000000..a8dcb419c7e --- /dev/null +++ b/gcc/testsuite/gcc.dg/i386-3dnow-1.c @@ -0,0 +1,12 @@ +/* { dg-do assemble { target i?86-*-* x86_64-*-* } } */ +/* { dg-options "-O2 -Werror-implicit-function-declaration -m3dnow" } */ + +/* Test that the intrinsics compile with optimization. All of them are + defined as inline functions in mmintrin.h that reference the proper + builtin functions. Defining away "static" and "__inline" results in + all of them being compiled as proper functions. */ + +#define static +#define __inline + +#include diff --git a/gcc/testsuite/gcc.dg/i386-3dnow-2.c b/gcc/testsuite/gcc.dg/i386-3dnow-2.c new file mode 100644 index 00000000000..d9aa7a56420 --- /dev/null +++ b/gcc/testsuite/gcc.dg/i386-3dnow-2.c @@ -0,0 +1,12 @@ +/* { dg-do assemble { target i?86-*-* x86_64-*-* } } */ +/* { dg-options "-O0 -Werror-implicit-function-declaration -m3dnow" } */ + +/* Test that the intrinsics compile without optimization. All of them are + defined as inline functions in mmintrin.h that reference the proper + builtin functions. Defining away "static" and "__inline" results in + all of them being compiled as proper functions. */ + +#define static +#define __inline + +#include diff --git a/gcc/testsuite/gcc.dg/i386-3dnowA-1.c b/gcc/testsuite/gcc.dg/i386-3dnowA-1.c new file mode 100644 index 00000000000..2ae1a04d5cb --- /dev/null +++ b/gcc/testsuite/gcc.dg/i386-3dnowA-1.c @@ -0,0 +1,12 @@ +/* { dg-do assemble { target i?86-*-* } } */ +/* { dg-options "-O2 -Werror-implicit-function-declaration -m3dnow -march=athlon" } */ + +/* Test that the intrinsics compile with optimization. All of them are + defined as inline functions in mmintrin.h that reference the proper + builtin functions. Defining away "static" and "__inline" results in + all of them being compiled as proper functions. */ + +#define static +#define __inline + +#include diff --git a/gcc/testsuite/gcc.dg/i386-3dnowA-2.c b/gcc/testsuite/gcc.dg/i386-3dnowA-2.c new file mode 100644 index 00000000000..d8ed6cb4c4d --- /dev/null +++ b/gcc/testsuite/gcc.dg/i386-3dnowA-2.c @@ -0,0 +1,12 @@ +/* { dg-do assemble { target i?86-*-* } } */ +/* { dg-options "-O0 -Werror-implicit-function-declaration -m3dnow -march=athlon" } */ + +/* Test that the intrinsics compile without optimization. All of them are + defined as inline functions in mmintrin.h that reference the proper + builtin functions. Defining away "static" and "__inline" results in + all of them being compiled as proper functions. */ + +#define static +#define __inline + +#include -- 2.30.2