[AArch64] Split built-in function codes into major and minor codes
authorRichard Sandiford <richard.sandiford@arm.com>
Fri, 27 Sep 2019 08:47:21 +0000 (08:47 +0000)
committerRichard Sandiford <rsandifo@gcc.gnu.org>
Fri, 27 Sep 2019 08:47:21 +0000 (08:47 +0000)
It was easier to add the SVE ACLE support without enumerating every
function at build time.  This in turn meant that it was easier if the
SVE builtins occupied a distinct numberspace from the existing AArch64
ones, which *are* enumerated at build time.  This patch therefore
divides the built-in functions codes into "major" and "minor" codes.
At present the major code is just "general", but the SVE patch will add
"SVE" as well.

Also, it was convenient to put the SVE ACLE support in its own file,
so the patch makes aarch64.c provide the frontline target hooks directly,
forwarding to the other files for the real work.

The reason for organising the files this way is that aarch64.c needs
to define the target hook macros whatever happens, and having aarch64.c
macros forward to aarch64-builtins.c functions and aarch64-bulitins.c
functions forward to the SVE file seemed a bit indirect.  Doing things
the way the patch does them puts aarch64-builtins.c and the SVE code on
more of an equal footing.

The aarch64_(general_)gimple_fold_builtin change is mostly just
reindentation.

2019-09-27  Richard Sandiford  <richard.sandiford@arm.com>

gcc/
* config/aarch64/aarch64-protos.h (aarch64_builtin_class): New enum.
(AARCH64_BUILTIN_SHIFT, AARCH64_BUILTIN_CLASS): New constants.
(aarch64_gimple_fold_builtin, aarch64_mangle_builtin_type)
(aarch64_fold_builtin, aarch64_init_builtins, aarch64_expand_builtin):
(aarch64_builtin_decl, aarch64_builtin_rsqrt): Delete.
(aarch64_general_mangle_builtin_type, aarch64_general_init_builtins):
(aarch64_general_fold_builtin, aarch64_general_gimple_fold_builtin):
(aarch64_general_expand_builtin, aarch64_general_builtin_decl):
(aarch64_general_builtin_rsqrt): Declare.
* config/aarch64/aarch64-builtins.c (aarch64_general_add_builtin):
New function.
(aarch64_mangle_builtin_type): Rename to...
(aarch64_general_mangle_builtin_type): ...this.
(aarch64_init_fcmla_laneq_builtins, aarch64_init_simd_builtins)
(aarch64_init_crc32_builtins, aarch64_init_builtin_rsqrt)
(aarch64_init_pauth_hint_builtins, aarch64_init_tme_builtins): Use
aarch64_general_add_builtin instead of add_builtin_function.
(aarch64_init_builtins): Rename to...
(aarch64_general_init_builtins): ...this.  Use
aarch64_general_add_builtin instead of add_builtin_function.
(aarch64_builtin_decl): Rename to...
(aarch64_general_builtin_decl): ...this and remove the unused
arguments.
(aarch64_expand_builtin): Rename to...
(aarch64_general_expand_builtin): ...this and remove the unused
arguments.
(aarch64_builtin_rsqrt): Rename to...
(aarch64_general_builtin_rsqrt): ...this.
(aarch64_fold_builtin): Rename to...
(aarch64_general_fold_builtin): ...this.  Take the function subcode
and return type as arguments.  Remove the "ignored" argument.
(aarch64_gimple_fold_builtin): Rename to...
(aarch64_general_gimple_fold_builtin): ...this.  Take the function
subcode and gcall as arguments, and return the new function call.
* config/aarch64/aarch64.c (aarch64_init_builtins)
(aarch64_fold_builtin, aarch64_gimple_fold_builtin)
(aarch64_expand_builtin, aarch64_builtin_decl): New functions.
(aarch64_builtin_reciprocal): Call aarch64_general_builtin_rsqrt
instead of aarch64_builtin_rsqrt.
(aarch64_mangle_type): Call aarch64_general_mangle_builtin_type
instead of aarch64_mangle_builtin_type.

From-SVN: r276177

gcc/ChangeLog
gcc/config/aarch64/aarch64-builtins.c
gcc/config/aarch64/aarch64-protos.h
gcc/config/aarch64/aarch64.c

index b6a19c4e2f7e07fc73858aeb1821eac7053cf9b0..8d142db7f1247e0dc899500146f0d09c4685d146 100644 (file)
@@ -1,3 +1,47 @@
+2019-09-27  Richard Sandiford  <richard.sandiford@arm.com>
+
+       * config/aarch64/aarch64-protos.h (aarch64_builtin_class): New enum.
+       (AARCH64_BUILTIN_SHIFT, AARCH64_BUILTIN_CLASS): New constants.
+       (aarch64_gimple_fold_builtin, aarch64_mangle_builtin_type)
+       (aarch64_fold_builtin, aarch64_init_builtins, aarch64_expand_builtin):
+       (aarch64_builtin_decl, aarch64_builtin_rsqrt): Delete.
+       (aarch64_general_mangle_builtin_type, aarch64_general_init_builtins):
+       (aarch64_general_fold_builtin, aarch64_general_gimple_fold_builtin):
+       (aarch64_general_expand_builtin, aarch64_general_builtin_decl):
+       (aarch64_general_builtin_rsqrt): Declare.
+       * config/aarch64/aarch64-builtins.c (aarch64_general_add_builtin):
+       New function.
+       (aarch64_mangle_builtin_type): Rename to...
+       (aarch64_general_mangle_builtin_type): ...this.
+       (aarch64_init_fcmla_laneq_builtins, aarch64_init_simd_builtins)
+       (aarch64_init_crc32_builtins, aarch64_init_builtin_rsqrt)
+       (aarch64_init_pauth_hint_builtins, aarch64_init_tme_builtins): Use
+       aarch64_general_add_builtin instead of add_builtin_function.
+       (aarch64_init_builtins): Rename to...
+       (aarch64_general_init_builtins): ...this.  Use
+       aarch64_general_add_builtin instead of add_builtin_function.
+       (aarch64_builtin_decl): Rename to...
+       (aarch64_general_builtin_decl): ...this and remove the unused
+       arguments.
+       (aarch64_expand_builtin): Rename to...
+       (aarch64_general_expand_builtin): ...this and remove the unused
+       arguments.
+       (aarch64_builtin_rsqrt): Rename to...
+       (aarch64_general_builtin_rsqrt): ...this.
+       (aarch64_fold_builtin): Rename to...
+       (aarch64_general_fold_builtin): ...this.  Take the function subcode
+       and return type as arguments.  Remove the "ignored" argument.
+       (aarch64_gimple_fold_builtin): Rename to...
+       (aarch64_general_gimple_fold_builtin): ...this.  Take the function
+       subcode and gcall as arguments, and return the new function call.
+       * config/aarch64/aarch64.c (aarch64_init_builtins)
+       (aarch64_fold_builtin, aarch64_gimple_fold_builtin)
+       (aarch64_expand_builtin, aarch64_builtin_decl): New functions.
+       (aarch64_builtin_reciprocal): Call aarch64_general_builtin_rsqrt
+       instead of aarch64_builtin_rsqrt.
+       (aarch64_mangle_type): Call aarch64_general_mangle_builtin_type
+       instead of aarch64_mangle_builtin_type.
+
 2019-09-27  Richard Sandiford  <richard.sandiford@arm.com>
 
        * target.def (check_builtin_call): New target hook.
index 9f261043e8aca48a088198ea7e1b09f3f89d575c..e02ece8672a633833a63993e24a156dd5ff64e69 100644 (file)
@@ -556,6 +556,17 @@ static tree aarch64_simd_intXI_type_node = NULL_TREE;
 tree aarch64_fp16_type_node = NULL_TREE;
 tree aarch64_fp16_ptr_type_node = NULL_TREE;
 
+/* Wrapper around add_builtin_function.  NAME is the name of the built-in
+   function, TYPE is the function type, and CODE is the function subcode
+   (relative to AARCH64_BUILTIN_GENERAL).  */
+static tree
+aarch64_general_add_builtin (const char *name, tree type, unsigned int code)
+{
+  code = (code << AARCH64_BUILTIN_SHIFT) | AARCH64_BUILTIN_GENERAL;
+  return add_builtin_function (name, type, code, BUILT_IN_MD,
+                              NULL, NULL_TREE);
+}
+
 static const char *
 aarch64_mangle_builtin_scalar_type (const_tree type)
 {
@@ -594,7 +605,7 @@ aarch64_mangle_builtin_vector_type (const_tree type)
 }
 
 const char *
-aarch64_mangle_builtin_type (const_tree type)
+aarch64_general_mangle_builtin_type (const_tree type)
 {
   const char *mangle;
   /* Walk through all the AArch64 builtins types tables to filter out the
@@ -825,8 +836,7 @@ aarch64_init_fcmla_laneq_builtins (void)
        = aarch64_simd_builtin_std_type (SImode, qualifier_lane_pair_index);
       tree ftype = build_function_type_list (argtype, argtype, argtype,
                                             quadtype, lanetype, NULL_TREE);
-      tree fndecl = add_builtin_function (d->name, ftype, d->fcode,
-                                         BUILT_IN_MD, NULL, NULL_TREE);
+      tree fndecl = aarch64_general_add_builtin (d->name, ftype, d->fcode);
 
       aarch64_builtin_decls[d->fcode] = fndecl;
     }
@@ -855,10 +865,10 @@ aarch64_init_simd_builtins (void)
                                                  size_type_node,
                                                  intSI_type_node,
                                                  NULL);
-  aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_LANE_CHECK] =
-      add_builtin_function ("__builtin_aarch64_im_lane_boundsi", lane_check_fpr,
-                           AARCH64_SIMD_BUILTIN_LANE_CHECK, BUILT_IN_MD,
-                           NULL, NULL_TREE);
+  aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_LANE_CHECK]
+    = aarch64_general_add_builtin ("__builtin_aarch64_im_lane_boundsi",
+                                  lane_check_fpr,
+                                  AARCH64_SIMD_BUILTIN_LANE_CHECK);
 
   for (i = 0; i < ARRAY_SIZE (aarch64_simd_builtin_data); i++, fcode++)
     {
@@ -956,8 +966,7 @@ aarch64_init_simd_builtins (void)
        snprintf (namebuf, sizeof (namebuf), "__builtin_aarch64_%s",
                  d->name);
 
-      fndecl = add_builtin_function (namebuf, ftype, fcode, BUILT_IN_MD,
-                                    NULL, NULL_TREE);
+      fndecl = aarch64_general_add_builtin (namebuf, ftype, fcode);
       aarch64_builtin_decls[fcode] = fndecl;
     }
 
@@ -977,8 +986,7 @@ aarch64_init_crc32_builtins ()
       tree argtype = aarch64_simd_builtin_std_type (d->mode,
                                                    qualifier_unsigned);
       tree ftype = build_function_type_list (usi_type, usi_type, argtype, NULL_TREE);
-      tree fndecl = add_builtin_function (d->name, ftype, d->fcode,
-                                          BUILT_IN_MD, NULL, NULL_TREE);
+      tree fndecl = aarch64_general_add_builtin (d->name, ftype, d->fcode);
 
       aarch64_builtin_decls[d->fcode] = fndecl;
     }
@@ -1018,8 +1026,8 @@ aarch64_init_builtin_rsqrt (void)
   for (; bdd < bdd_end; bdd++)
   {
     ftype = build_function_type_list (bdd->type_node, bdd->type_node, NULL_TREE);
-    fndecl = add_builtin_function (bdd->builtin_name,
-      ftype, bdd->function_code, BUILT_IN_MD, NULL, NULL_TREE);
+    fndecl = aarch64_general_add_builtin (bdd->builtin_name,
+                                         ftype, bdd->function_code);
     aarch64_builtin_decls[bdd->function_code] = fndecl;
   }
 }
@@ -1053,25 +1061,25 @@ aarch64_init_pauth_hint_builtins (void)
     = build_function_type_list (ptr_type_node, ptr_type_node, NULL_TREE);
 
   aarch64_builtin_decls[AARCH64_PAUTH_BUILTIN_AUTIA1716]
-    = add_builtin_function ("__builtin_aarch64_autia1716", ftype_pointer_auth,
-                           AARCH64_PAUTH_BUILTIN_AUTIA1716, BUILT_IN_MD, NULL,
-                           NULL_TREE);
+    = aarch64_general_add_builtin ("__builtin_aarch64_autia1716",
+                                  ftype_pointer_auth,
+                                  AARCH64_PAUTH_BUILTIN_AUTIA1716);
   aarch64_builtin_decls[AARCH64_PAUTH_BUILTIN_PACIA1716]
-    = add_builtin_function ("__builtin_aarch64_pacia1716", ftype_pointer_auth,
-                           AARCH64_PAUTH_BUILTIN_PACIA1716, BUILT_IN_MD, NULL,
-                           NULL_TREE);
+    = aarch64_general_add_builtin ("__builtin_aarch64_pacia1716",
+                                  ftype_pointer_auth,
+                                  AARCH64_PAUTH_BUILTIN_PACIA1716);
   aarch64_builtin_decls[AARCH64_PAUTH_BUILTIN_AUTIB1716]
-    = add_builtin_function ("__builtin_aarch64_autib1716", ftype_pointer_auth,
-                           AARCH64_PAUTH_BUILTIN_AUTIB1716, BUILT_IN_MD, NULL,
-                           NULL_TREE);
+    = aarch64_general_add_builtin ("__builtin_aarch64_autib1716",
+                                  ftype_pointer_auth,
+                                  AARCH64_PAUTH_BUILTIN_AUTIB1716);
   aarch64_builtin_decls[AARCH64_PAUTH_BUILTIN_PACIB1716]
-    = add_builtin_function ("__builtin_aarch64_pacib1716", ftype_pointer_auth,
-                           AARCH64_PAUTH_BUILTIN_PACIB1716, BUILT_IN_MD, NULL,
-                           NULL_TREE);
+    = aarch64_general_add_builtin ("__builtin_aarch64_pacib1716",
+                                  ftype_pointer_auth,
+                                  AARCH64_PAUTH_BUILTIN_PACIB1716);
   aarch64_builtin_decls[AARCH64_PAUTH_BUILTIN_XPACLRI]
-    = add_builtin_function ("__builtin_aarch64_xpaclri", ftype_pointer_strip,
-                           AARCH64_PAUTH_BUILTIN_XPACLRI, BUILT_IN_MD, NULL,
-                           NULL_TREE);
+    = aarch64_general_add_builtin ("__builtin_aarch64_xpaclri",
+                                  ftype_pointer_strip,
+                                  AARCH64_PAUTH_BUILTIN_XPACLRI);
 }
 
 /* Initialize the transactional memory extension (TME) builtins.  */
@@ -1086,25 +1094,26 @@ aarch64_init_tme_builtins (void)
     = build_function_type_list (void_type_node, uint64_type_node, NULL);
 
   aarch64_builtin_decls[AARCH64_TME_BUILTIN_TSTART]
-    = add_builtin_function ("__builtin_aarch64_tstart", ftype_uint64_void,
-                           AARCH64_TME_BUILTIN_TSTART, BUILT_IN_MD,
-                           NULL, NULL_TREE);
+    = aarch64_general_add_builtin ("__builtin_aarch64_tstart",
+                                  ftype_uint64_void,
+                                  AARCH64_TME_BUILTIN_TSTART);
   aarch64_builtin_decls[AARCH64_TME_BUILTIN_TTEST]
-    = add_builtin_function ("__builtin_aarch64_ttest", ftype_uint64_void,
-                           AARCH64_TME_BUILTIN_TTEST, BUILT_IN_MD,
-                           NULL, NULL_TREE);
+    = aarch64_general_add_builtin ("__builtin_aarch64_ttest",
+                                  ftype_uint64_void,
+                                  AARCH64_TME_BUILTIN_TTEST);
   aarch64_builtin_decls[AARCH64_TME_BUILTIN_TCOMMIT]
-    = add_builtin_function ("__builtin_aarch64_tcommit", ftype_void_void,
-                           AARCH64_TME_BUILTIN_TCOMMIT, BUILT_IN_MD,
-                           NULL, NULL_TREE);
+    = aarch64_general_add_builtin ("__builtin_aarch64_tcommit",
+                                  ftype_void_void,
+                                  AARCH64_TME_BUILTIN_TCOMMIT);
   aarch64_builtin_decls[AARCH64_TME_BUILTIN_TCANCEL]
-    = add_builtin_function ("__builtin_aarch64_tcancel", ftype_void_uint64,
-                           AARCH64_TME_BUILTIN_TCANCEL, BUILT_IN_MD,
-                           NULL, NULL_TREE);
+    = aarch64_general_add_builtin ("__builtin_aarch64_tcancel",
+                                  ftype_void_uint64,
+                                  AARCH64_TME_BUILTIN_TCANCEL);
 }
 
+/* Initialize all builtins in the AARCH64_BUILTIN_GENERAL group.  */
 void
-aarch64_init_builtins (void)
+aarch64_general_init_builtins (void)
 {
   tree ftype_set_fpr
     = build_function_type_list (void_type_node, unsigned_type_node, NULL);
@@ -1112,17 +1121,21 @@ aarch64_init_builtins (void)
     = build_function_type_list (unsigned_type_node, NULL);
 
   aarch64_builtin_decls[AARCH64_BUILTIN_GET_FPCR]
-    = add_builtin_function ("__builtin_aarch64_get_fpcr", ftype_get_fpr,
-                           AARCH64_BUILTIN_GET_FPCR, BUILT_IN_MD, NULL, NULL_TREE);
+    = aarch64_general_add_builtin ("__builtin_aarch64_get_fpcr",
+                                  ftype_get_fpr,
+                                  AARCH64_BUILTIN_GET_FPCR);
   aarch64_builtin_decls[AARCH64_BUILTIN_SET_FPCR]
-    = add_builtin_function ("__builtin_aarch64_set_fpcr", ftype_set_fpr,
-                           AARCH64_BUILTIN_SET_FPCR, BUILT_IN_MD, NULL, NULL_TREE);
+    = aarch64_general_add_builtin ("__builtin_aarch64_set_fpcr",
+                                  ftype_set_fpr,
+                                  AARCH64_BUILTIN_SET_FPCR);
   aarch64_builtin_decls[AARCH64_BUILTIN_GET_FPSR]
-    = add_builtin_function ("__builtin_aarch64_get_fpsr", ftype_get_fpr,
-                           AARCH64_BUILTIN_GET_FPSR, BUILT_IN_MD, NULL, NULL_TREE);
+    = aarch64_general_add_builtin ("__builtin_aarch64_get_fpsr",
+                                  ftype_get_fpr,
+                                  AARCH64_BUILTIN_GET_FPSR);
   aarch64_builtin_decls[AARCH64_BUILTIN_SET_FPSR]
-    = add_builtin_function ("__builtin_aarch64_set_fpsr", ftype_set_fpr,
-                           AARCH64_BUILTIN_SET_FPSR, BUILT_IN_MD, NULL, NULL_TREE);
+    = aarch64_general_add_builtin ("__builtin_aarch64_set_fpsr",
+                                  ftype_set_fpr,
+                                  AARCH64_BUILTIN_SET_FPSR);
 
   aarch64_init_fp16_types ();
 
@@ -1135,8 +1148,8 @@ aarch64_init_builtins (void)
   tree ftype_jcvt
     = build_function_type_list (intSI_type_node, double_type_node, NULL);
   aarch64_builtin_decls[AARCH64_JSCVT]
-    = add_builtin_function ("__builtin_aarch64_jcvtzs", ftype_jcvt,
-                           AARCH64_JSCVT, BUILT_IN_MD, NULL, NULL_TREE);
+    = aarch64_general_add_builtin ("__builtin_aarch64_jcvtzs", ftype_jcvt,
+                                  AARCH64_JSCVT);
 
   /* Initialize pointer authentication builtins which are backed by instructions
      in NOP encoding space.
@@ -1151,8 +1164,9 @@ aarch64_init_builtins (void)
     aarch64_init_tme_builtins ();
 }
 
+/* Implement TARGET_BUILTIN_DECL for the AARCH64_BUILTIN_GENERAL group.  */
 tree
-aarch64_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
+aarch64_general_builtin_decl (unsigned code, bool)
 {
   if (code >= AARCH64_BUILTIN_MAX)
     return error_mark_node;
@@ -1593,17 +1607,11 @@ aarch64_expand_builtin_tme (int fcode, tree exp, rtx target)
     return target;
 }
 
-/* Expand an expression EXP that calls a built-in function,
+/* Expand an expression EXP that calls built-in function FCODE,
    with result going to TARGET if that's convenient.  */
 rtx
-aarch64_expand_builtin (tree exp,
-                    rtx target,
-                    rtx subtarget ATTRIBUTE_UNUSED,
-                    machine_mode mode ATTRIBUTE_UNUSED,
-                    int ignore ATTRIBUTE_UNUSED)
+aarch64_general_expand_builtin (unsigned int fcode, tree exp, rtx target)
 {
-  tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
-  int fcode = DECL_MD_FUNCTION_CODE (fndecl);
   int icode;
   rtx pat, op0;
   tree arg0;
@@ -1880,7 +1888,7 @@ aarch64_builtin_vectorized_function (unsigned int fn, tree type_out,
 /* Return builtin for reciprocal square root.  */
 
 tree
-aarch64_builtin_rsqrt (unsigned int fn)
+aarch64_general_builtin_rsqrt (unsigned int fn)
 {
   if (fn == AARCH64_SIMD_BUILTIN_UNOP_sqrtv2df)
     return aarch64_builtin_decls[AARCH64_BUILTIN_RSQRT_V2DF];
@@ -1895,13 +1903,14 @@ aarch64_builtin_rsqrt (unsigned int fn)
 #define VAR1(T, N, MAP, A) \
   case AARCH64_SIMD_BUILTIN_##T##_##N##A:
 
+/* Try to fold a call to the built-in function with subcode FCODE.  The
+   function is passed the N_ARGS arguments in ARGS and it returns a value
+   of type TYPE.  Return the new expression on success and NULL_TREE on
+   failure.  */
 tree
-aarch64_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *args,
-                     bool ignore ATTRIBUTE_UNUSED)
+aarch64_general_fold_builtin (unsigned int fcode, tree type,
+                             unsigned int n_args ATTRIBUTE_UNUSED, tree *args)
 {
-  int fcode = DECL_MD_FUNCTION_CODE (fndecl);
-  tree type = TREE_TYPE (TREE_TYPE (fndecl));
-
   switch (fcode)
     {
       BUILTIN_VDQF (UNOP, abs, 2)
@@ -1917,109 +1926,90 @@ aarch64_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *args,
   return NULL_TREE;
 }
 
-bool
-aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi)
+/* Try to fold STMT, given that it's a call to the built-in function with
+   subcode FCODE.  Return the new statement on success and null on
+   failure.  */
+gimple *
+aarch64_general_gimple_fold_builtin (unsigned int fcode, gcall *stmt)
 {
-  bool changed = false;
-  gimple *stmt = gsi_stmt (*gsi);
-  tree call = gimple_call_fn (stmt);
-  tree fndecl;
   gimple *new_stmt = NULL;
-
-  if (call)
+  unsigned nargs = gimple_call_num_args (stmt);
+  tree *args = (nargs > 0
+               ? gimple_call_arg_ptr (stmt, 0)
+               : &error_mark_node);
+
+  /* We use gimple's IFN_REDUC_(PLUS|MIN|MAX)s for float, signed int
+     and unsigned int; it will distinguish according to the types of
+     the arguments to the __builtin.  */
+  switch (fcode)
     {
-      fndecl = gimple_call_fndecl (stmt);
-      if (fndecl)
+      BUILTIN_VALL (UNOP, reduc_plus_scal_, 10)
+       new_stmt = gimple_build_call_internal (IFN_REDUC_PLUS,
+                                              1, args[0]);
+       gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
+       break;
+      BUILTIN_VDQIF (UNOP, reduc_smax_scal_, 10)
+      BUILTIN_VDQ_BHSI (UNOPU, reduc_umax_scal_, 10)
+       new_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
+                                              1, args[0]);
+       gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
+       break;
+      BUILTIN_VDQIF (UNOP, reduc_smin_scal_, 10)
+      BUILTIN_VDQ_BHSI (UNOPU, reduc_umin_scal_, 10)
+       new_stmt = gimple_build_call_internal (IFN_REDUC_MIN,
+                                              1, args[0]);
+       gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
+       break;
+      BUILTIN_GPF (BINOP, fmulx, 0)
        {
-         int fcode = DECL_MD_FUNCTION_CODE (fndecl);
-         unsigned nargs = gimple_call_num_args (stmt);
-         tree *args = (nargs > 0
-                       ? gimple_call_arg_ptr (stmt, 0)
-                       : &error_mark_node);
-
-         /* We use gimple's IFN_REDUC_(PLUS|MIN|MAX)s for float, signed int
-            and unsigned int; it will distinguish according to the types of
-            the arguments to the __builtin.  */
-         switch (fcode)
+         gcc_assert (nargs == 2);
+         bool a0_cst_p = TREE_CODE (args[0]) == REAL_CST;
+         bool a1_cst_p = TREE_CODE (args[1]) == REAL_CST;
+         if (a0_cst_p || a1_cst_p)
            {
-             BUILTIN_VALL (UNOP, reduc_plus_scal_, 10)
-               new_stmt = gimple_build_call_internal (IFN_REDUC_PLUS,
-                                                      1, args[0]);
-               gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
-               break;
-             BUILTIN_VDQIF (UNOP, reduc_smax_scal_, 10)
-             BUILTIN_VDQ_BHSI (UNOPU, reduc_umax_scal_, 10)
-               new_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
-                                                      1, args[0]);
-               gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
-               break;
-             BUILTIN_VDQIF (UNOP, reduc_smin_scal_, 10)
-             BUILTIN_VDQ_BHSI (UNOPU, reduc_umin_scal_, 10)
-               new_stmt = gimple_build_call_internal (IFN_REDUC_MIN,
-                                                      1, args[0]);
-               gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
-               break;
-             BUILTIN_GPF (BINOP, fmulx, 0)
+             if (a0_cst_p && a1_cst_p)
                {
-                 gcc_assert (nargs == 2);
-                 bool a0_cst_p = TREE_CODE (args[0]) == REAL_CST;
-                 bool a1_cst_p = TREE_CODE (args[1]) == REAL_CST;
-                 if (a0_cst_p || a1_cst_p)
-                   {
-                     if (a0_cst_p && a1_cst_p)
-                       {
-                         tree t0 = TREE_TYPE (args[0]);
-                         real_value a0 = (TREE_REAL_CST (args[0]));
-                         real_value a1 = (TREE_REAL_CST (args[1]));
-                         if (real_equal (&a1, &dconst0))
-                           std::swap (a0, a1);
-                         /* According to real_equal (), +0 equals -0.  */
-                         if (real_equal (&a0, &dconst0) && real_isinf (&a1))
-                           {
-                             real_value res = dconst2;
-                             res.sign = a0.sign ^ a1.sign;
-                             new_stmt =
-                               gimple_build_assign (gimple_call_lhs (stmt),
-                                                    REAL_CST,
-                                                    build_real (t0, res));
-                           }
-                         else
-                           new_stmt =
-                             gimple_build_assign (gimple_call_lhs (stmt),
-                                                  MULT_EXPR,
-                                                  args[0], args[1]);
-                       }
-                     else /* a0_cst_p ^ a1_cst_p.  */
-                       {
-                         real_value const_part = a0_cst_p
-                           ? TREE_REAL_CST (args[0]) : TREE_REAL_CST (args[1]);
-                         if (!real_equal (&const_part, &dconst0)
-                             && !real_isinf (&const_part))
-                           new_stmt =
-                             gimple_build_assign (gimple_call_lhs (stmt),
-                                                  MULT_EXPR, args[0], args[1]);
-                       }
-                   }
-                 if (new_stmt)
+                 tree t0 = TREE_TYPE (args[0]);
+                 real_value a0 = (TREE_REAL_CST (args[0]));
+                 real_value a1 = (TREE_REAL_CST (args[1]));
+                 if (real_equal (&a1, &dconst0))
+                   std::swap (a0, a1);
+                 /* According to real_equal (), +0 equals -0.  */
+                 if (real_equal (&a0, &dconst0) && real_isinf (&a1))
                    {
-                     gimple_set_vuse (new_stmt, gimple_vuse (stmt));
-                     gimple_set_vdef (new_stmt, gimple_vdef (stmt));
+                     real_value res = dconst2;
+                     res.sign = a0.sign ^ a1.sign;
+                     new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
+                                                     REAL_CST,
+                                                     build_real (t0, res));
                    }
-                 break;
+                 else
+                   new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
+                                                   MULT_EXPR,
+                                                   args[0], args[1]);
                }
-           default:
-             break;
+             else /* a0_cst_p ^ a1_cst_p.  */
+               {
+                 real_value const_part = a0_cst_p
+                   ? TREE_REAL_CST (args[0]) : TREE_REAL_CST (args[1]);
+                 if (!real_equal (&const_part, &dconst0)
+                     && !real_isinf (&const_part))
+                   new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
+                                                   MULT_EXPR, args[0],
+                                                   args[1]);
+               }
+           }
+         if (new_stmt)
+           {
+             gimple_set_vuse (new_stmt, gimple_vuse (stmt));
+             gimple_set_vdef (new_stmt, gimple_vdef (stmt));
            }
+         break;
        }
+    default:
+      break;
     }
-
-  if (new_stmt)
-    {
-      gsi_replace (gsi, new_stmt, true);
-      changed = true;
-    }
-
-  return changed;
+  return new_stmt;
 }
 
 void
index 1c1aac7201abfe87443c6bd39545ec9867a5d996..a870eb7713c4959ebc012cbb0439b2f95ab2787f 100644 (file)
@@ -433,6 +433,22 @@ enum aarch64_svpattern {
 };
 #undef AARCH64_SVENUM
 
+/* It's convenient to divide the built-in function codes into groups,
+   rather than having everything in a single enum.  This type enumerates
+   those groups.  */
+enum aarch64_builtin_class
+{
+  AARCH64_BUILTIN_GENERAL
+};
+
+/* Built-in function codes are structured so that the low
+   AARCH64_BUILTIN_SHIFT bits contain the aarch64_builtin_class
+   and the upper bits contain a group-specific subcode.  */
+const unsigned int AARCH64_BUILTIN_SHIFT = 1;
+
+/* Mask that selects the aarch64_builtin_class part of a function code.  */
+const unsigned int AARCH64_BUILTIN_CLASS = (1 << AARCH64_BUILTIN_SHIFT) - 1;
+
 void aarch64_post_cfi_startproc (void);
 poly_int64 aarch64_initial_elimination_offset (unsigned, unsigned);
 int aarch64_get_condition_code (rtx);
@@ -459,7 +475,6 @@ bool aarch64_float_const_rtx_p (rtx);
 bool aarch64_function_arg_regno_p (unsigned);
 bool aarch64_fusion_enabled_p (enum aarch64_fusion_pairs);
 bool aarch64_gen_cpymemqi (rtx *);
-bool aarch64_gimple_fold_builtin (gimple_stmt_iterator *);
 bool aarch64_is_extend_from_extract (scalar_int_mode, rtx, rtx);
 bool aarch64_is_long_call_p (rtx);
 bool aarch64_is_noplt_call_p (rtx);
@@ -517,7 +532,6 @@ bool aarch64_symbolic_address_p (rtx);
 bool aarch64_uimm12_shift (HOST_WIDE_INT);
 bool aarch64_use_return_insn_p (void);
 bool aarch64_use_simple_return_insn_p (void);
-const char *aarch64_mangle_builtin_type (const_tree);
 const char *aarch64_output_casesi (rtx *);
 
 enum aarch64_symbol_type aarch64_classify_symbol (rtx, HOST_WIDE_INT);
@@ -544,7 +558,6 @@ rtx aarch64_simd_vect_par_cnst_half (machine_mode, int, bool);
 rtx aarch64_gen_stepped_int_parallel (unsigned int, int, int);
 bool aarch64_stepped_int_parallel_p (rtx, int);
 rtx aarch64_tls_get_addr (void);
-tree aarch64_fold_builtin (tree, int, tree *, bool);
 unsigned aarch64_dbx_register_number (unsigned);
 unsigned aarch64_trampoline_size (void);
 void aarch64_asm_output_labelref (FILE *, const char *);
@@ -639,18 +652,16 @@ bool aarch64_prepare_sve_int_fma (rtx *, rtx_code);
 bool aarch64_prepare_sve_cond_int_fma (rtx *, rtx_code);
 #endif /* RTX_CODE */
 
-void aarch64_init_builtins (void);
-
 bool aarch64_process_target_attr (tree);
 void aarch64_override_options_internal (struct gcc_options *);
 
-rtx aarch64_expand_builtin (tree exp,
-                           rtx target,
-                           rtx subtarget ATTRIBUTE_UNUSED,
-                           machine_mode mode ATTRIBUTE_UNUSED,
-                           int ignore ATTRIBUTE_UNUSED);
-tree aarch64_builtin_decl (unsigned, bool ATTRIBUTE_UNUSED);
-tree aarch64_builtin_rsqrt (unsigned int);
+const char *aarch64_general_mangle_builtin_type (const_tree);
+void aarch64_general_init_builtins (void);
+tree aarch64_general_fold_builtin (unsigned int, tree, unsigned int, tree *);
+gimple *aarch64_general_gimple_fold_builtin (unsigned int, gcall *);
+rtx aarch64_general_expand_builtin (unsigned int, tree, rtx);
+tree aarch64_general_builtin_decl (unsigned, bool);
+tree aarch64_general_builtin_rsqrt (unsigned int);
 tree aarch64_builtin_vectorized_function (unsigned int, tree, tree);
 
 extern void aarch64_split_combinev16qi (rtx operands[3]);
index 71d44de1d0a9a7949ff8acd53bd5030649897873..81d0f2b3f1f51ba126e14f6031d419fcf59d0d42 100644 (file)
@@ -11646,6 +11646,79 @@ aarch64_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
   return aarch64_tune_params.memmov_cost;
 }
 
+/* Implement TARGET_INIT_BUILTINS.  */
+static void
+aarch64_init_builtins ()
+{
+  aarch64_general_init_builtins ();
+}
+
+/* Implement TARGET_FOLD_BUILTIN.  */
+static tree
+aarch64_fold_builtin (tree fndecl, int nargs, tree *args, bool)
+{
+  unsigned int code = DECL_MD_FUNCTION_CODE (fndecl);
+  unsigned int subcode = code >> AARCH64_BUILTIN_SHIFT;
+  tree type = TREE_TYPE (TREE_TYPE (fndecl));
+  switch (code & AARCH64_BUILTIN_CLASS)
+    {
+    case AARCH64_BUILTIN_GENERAL:
+      return aarch64_general_fold_builtin (subcode, type, nargs, args);
+    }
+  gcc_unreachable ();
+}
+
+/* Implement TARGET_GIMPLE_FOLD_BUILTIN.  */
+static bool
+aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi)
+{
+  gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi));
+  tree fndecl = gimple_call_fndecl (stmt);
+  unsigned int code = DECL_MD_FUNCTION_CODE (fndecl);
+  unsigned int subcode = code >> AARCH64_BUILTIN_SHIFT;
+  gimple *new_stmt = NULL;
+  switch (code & AARCH64_BUILTIN_CLASS)
+    {
+    case AARCH64_BUILTIN_GENERAL:
+      new_stmt = aarch64_general_gimple_fold_builtin (subcode, stmt);
+      break;
+    }
+
+  if (!new_stmt)
+    return false;
+
+  gsi_replace (gsi, new_stmt, true);
+  return true;
+}
+
+/* Implement TARGET_EXPAND_BUILTIN.  */
+static rtx
+aarch64_expand_builtin (tree exp, rtx target, rtx, machine_mode, int)
+{
+  tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+  unsigned int code = DECL_MD_FUNCTION_CODE (fndecl);
+  unsigned int subcode = code >> AARCH64_BUILTIN_SHIFT;
+  switch (code & AARCH64_BUILTIN_CLASS)
+    {
+    case AARCH64_BUILTIN_GENERAL:
+      return aarch64_general_expand_builtin (subcode, exp, target);
+    }
+  gcc_unreachable ();
+}
+
+/* Implement TARGET_BUILTIN_DECL.  */
+static tree
+aarch64_builtin_decl (unsigned int code, bool initialize_p)
+{
+  unsigned int subcode = code >> AARCH64_BUILTIN_SHIFT;
+  switch (code & AARCH64_BUILTIN_CLASS)
+    {
+    case AARCH64_BUILTIN_GENERAL:
+      return aarch64_general_builtin_decl (subcode, initialize_p);
+    }
+  gcc_unreachable ();
+}
+
 /* Return true if it is safe and beneficial to use the approximate rsqrt optabs
    to optimize 1.0/sqrt.  */
 
@@ -11669,7 +11742,14 @@ aarch64_builtin_reciprocal (tree fndecl)
 
   if (!use_rsqrt_p (mode))
     return NULL_TREE;
-  return aarch64_builtin_rsqrt (DECL_MD_FUNCTION_CODE (fndecl));
+  unsigned int code = DECL_MD_FUNCTION_CODE (fndecl);
+  unsigned int subcode = code >> AARCH64_BUILTIN_SHIFT;
+  switch (code & AARCH64_BUILTIN_CLASS)
+    {
+    case AARCH64_BUILTIN_GENERAL:
+      return aarch64_general_builtin_rsqrt (subcode);
+    }
+  gcc_unreachable ();
 }
 
 /* Emit instruction sequence to compute either the approximate square root
@@ -15081,7 +15161,7 @@ aarch64_mangle_type (const_tree type)
   /* Mangle AArch64-specific internal types.  TYPE_NAME is non-NULL_TREE for
      builtin types.  */
   if (TYPE_NAME (type) != NULL)
-    return aarch64_mangle_builtin_type (type);
+    return aarch64_general_mangle_builtin_type (type);
 
   /* Use the default mangling.  */
   return NULL;