Warn whenever a function's return type defaults to \"int\" (C), or about inconsistent return types (C++).
Wscalar-storage-order
-C ObjC C++ ObjC++ Init(1) Warning
+C ObjC C++ ObjC++ Var(warn_scalar_storage_order) Init(1) Warning
Warn on suspicious constructs involving reverse scalar storage order.
Wselector
}
}
+ /* See if the pointers point to incompatible scalar storage orders. */
+ if (warn_scalar_storage_order
+ && (AGGREGATE_TYPE_P (ttl) && TYPE_REVERSE_STORAGE_ORDER (ttl))
+ != (AGGREGATE_TYPE_P (ttr) && TYPE_REVERSE_STORAGE_ORDER (ttr)))
+ {
+ switch (errtype)
+ {
+ case ic_argpass:
+ /* Do not warn for built-in functions, for example memcpy, since we
+ control how they behave and they can be useful in this area. */
+ if (TREE_CODE (rname) != FUNCTION_DECL || !DECL_IS_BUILTIN (rname))
+ warning_at (location, OPT_Wscalar_storage_order,
+ "passing argument %d of %qE from incompatible "
+ "scalar storage order", parmnum, rname);
+ break;
+ case ic_assign:
+ warning_at (location, OPT_Wscalar_storage_order,
+ "assignment to %qT from pointer type %qT with "
+ "incompatible scalar storage order", type, rhstype);
+ break;
+ case ic_init:
+ warning_at (location, OPT_Wscalar_storage_order,
+ "initialization of %qT from pointer type %qT with "
+ "incompatible scalar storage order", type, rhstype);
+ break;
+ case ic_return:
+ warning_at (location, OPT_Wscalar_storage_order,
+ "returning %qT from pointer type with incompatible "
+ "scalar storage order %qT", rhstype, type);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+
/* Any non-function converts to a [const][volatile] void *
and vice versa; otherwise, targets must be the same.
Meanwhile, the lhs target must have all the qualifiers of the rhs. */
}
else
{
- tree srctype, desttype, destvar, srcvar, srcoff;
+ /* We cannot (easily) change the type of the copy if it is a storage
+ order barrier, i.e. is equivalent to a VIEW_CONVERT_EXPR that can
+ modify the storage order of objects (see storage_order_barrier_p). */
+ tree srctype
+ = POINTER_TYPE_P (TREE_TYPE (src))
+ ? TREE_TYPE (TREE_TYPE (src)) : NULL_TREE;
+ tree desttype
+ = POINTER_TYPE_P (TREE_TYPE (dest))
+ ? TREE_TYPE (TREE_TYPE (dest)) : NULL_TREE;
+ tree destvar, srcvar, srcoff;
unsigned int src_align, dest_align;
- tree off0;
- const char *tmp_str;
unsigned HOST_WIDE_INT tmp_len;
+ const char *tmp_str;
/* Build accesses at offset zero with a ref-all character type. */
- off0 = build_int_cst (build_pointer_type_for_mode (char_type_node,
- ptr_mode, true), 0);
+ tree off0
+ = build_int_cst (build_pointer_type_for_mode (char_type_node,
+ ptr_mode, true), 0);
/* If we can perform the copy efficiently with first doing all loads
and then all stores inline it that way. Currently efficiently
hack can be removed. */
&& !c_strlen (src, 1)
&& !((tmp_str = c_getstr (src, &tmp_len)) != NULL
- && memchr (tmp_str, 0, tmp_len) == NULL))
+ && memchr (tmp_str, 0, tmp_len) == NULL)
+ && !(srctype
+ && AGGREGATE_TYPE_P (srctype)
+ && TYPE_REVERSE_STORAGE_ORDER (srctype))
+ && !(desttype
+ && AGGREGATE_TYPE_P (desttype)
+ && TYPE_REVERSE_STORAGE_ORDER (desttype)))
{
unsigned ilen = tree_to_uhwi (len);
if (pow2p_hwi (ilen))
if (!tree_fits_shwi_p (len))
return false;
- if (!POINTER_TYPE_P (TREE_TYPE (src))
- || !POINTER_TYPE_P (TREE_TYPE (dest)))
+ if (!srctype
+ || (AGGREGATE_TYPE_P (srctype)
+ && TYPE_REVERSE_STORAGE_ORDER (srctype)))
+ return false;
+ if (!desttype
+ || (AGGREGATE_TYPE_P (desttype)
+ && TYPE_REVERSE_STORAGE_ORDER (desttype)))
return false;
/* In the following try to find a type that is most natural to be
used for the memcpy source and destination and that allows
using that type. In theory we could always use a char[len] type
but that only gains us that the destination and source possibly
no longer will have their address taken. */
- srctype = TREE_TYPE (TREE_TYPE (src));
if (TREE_CODE (srctype) == ARRAY_TYPE
&& !tree_int_cst_equal (TYPE_SIZE_UNIT (srctype), len))
srctype = TREE_TYPE (srctype);
- desttype = TREE_TYPE (TREE_TYPE (dest));
if (TREE_CODE (desttype) == ARRAY_TYPE
&& !tree_int_cst_equal (TYPE_SIZE_UNIT (desttype), len))
desttype = TREE_TYPE (desttype);
--- /dev/null
+/* Test support of scalar_storage_order attribute */
+
+/* { dg-do compile } */
+
+struct __attribute__((scalar_storage_order("big-endian"))) S1
+{
+ int i;
+};
+
+struct __attribute__((scalar_storage_order("little-endian"))) S2
+{
+ int i;
+};
+
+extern int foo (void *);
+
+int incompatible_call (int which, struct S1 *s1, struct S2 *s2)
+{
+ if (which == 1) return foo (s1); else foo (s2); /* { dg-warning "incompatible scalar storage order" } */
+}
+
+void incompatible_assign (struct S1 *s1, struct S2 *s2)
+{
+ void *p1, *p2;
+ p1 = s1, p2 = s2; /* { dg-warning "incompatible scalar storage order" } */
+}
+
+void incompatible_init (struct S1 *s1, struct S2 *s2)
+{
+ void *p1 = s1, *p2 = s2; /* { dg-warning "incompatible scalar storage order" } */
+}
+
+void *incompatible_return (int which, struct S1 *s1, struct S2 *s2)
+{
+ if (which == 1) return s1; else return s2; /* { dg-warning "incompatible scalar storage order" } */
+}
--- /dev/null
+/* { dg-do run } */
+
+typedef unsigned char uint8_t;
+typedef unsigned int uint32_t;
+
+#define __big_endian__ scalar_storage_order("big-endian")
+#define __little_endian__ scalar_storage_order("little-endian")
+
+typedef union
+{
+ uint32_t val;
+ uint8_t v[4];
+} __attribute__((__big_endian__)) upal_u32be_t;
+
+typedef union
+{
+ uint32_t val;
+ uint8_t v[4];
+} __attribute__((__little_endian__)) upal_u32le_t;
+
+static inline uint32_t native_to_big_endian(uint32_t t)
+{
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return t;
+#else
+ return __builtin_bswap32(t);
+#endif
+}
+static inline uint32_t native_to_little_endian(uint32_t t)
+{
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return __builtin_bswap32(t);
+#else
+ return t;
+#endif
+}
+#define test(p, p1, i) do { if (p[i] != p1[i]) __builtin_abort (); } while (0)
+
+#define tests(p, p1) do { test(p, p1, 0); test(p, p1, 1); \
+ test(p, p1, 2); test(p, p1, 3); } while (0)
+
+int main(void)
+{
+ const uint32_t u = 0x12345678;
+
+ upal_u32be_t tempb;
+ __builtin_memcpy (&tempb, &u, sizeof(uint32_t));
+ uint32_t bu = tempb.val;
+ uint32_t b1u = native_to_big_endian(u);
+ tests (((uint8_t*)&bu), ((uint8_t*)&b1u));
+
+ upal_u32le_t templ;
+ __builtin_memcpy (&templ, &u, sizeof(uint32_t));
+ uint32_t lu = templ.val;
+ uint32_t l1u = native_to_little_endian(u);
+ tests (((uint8_t*)&lu), ((uint8_t*)&l1u));
+
+ return 0;
+}
dg-init
set SSO_TORTURE_OPTIONS [list \
- { -O0 } \
- { -O1 -fno-inline } \
- { -O2 } \
- { -O3 -finline-functions } \
- { -Os } \
- { -Og -g } ]
+ { -Wno-scalar-storage-order -O0 } \
+ { -Wno-scalar-storage-order -O1 -fno-inline } \
+ { -Wno-scalar-storage-order -O2 } \
+ { -Wno-scalar-storage-order -O3 -finline-functions } \
+ { -Wno-scalar-storage-order -Os } \
+ { -Wno-scalar-storage-order -Og -g } ]
set-torture-options $SSO_TORTURE_OPTIONS
return NULL;
}
- /* 6) For memcpy copies translate the reference through them if
- the copy kills ref. */
+ /* 6) For memcpy copies translate the reference through them if the copy
+ kills ref. But we cannot (easily) do this translation if the memcpy is
+ a storage order barrier, i.e. is equivalent to a VIEW_CONVERT_EXPR that
+ can modify the storage order of objects (see storage_order_barrier_p). */
else if (data->vn_walk_kind == VN_WALKREWRITE
&& is_gimple_reg_type (vr->type)
/* ??? Handle BCOPY as well. */
}
if (TREE_CODE (lhs) == ADDR_EXPR)
{
+ if (AGGREGATE_TYPE_P (TREE_TYPE (TREE_TYPE (lhs)))
+ && TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (TREE_TYPE (lhs))))
+ return (void *)-1;
tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (lhs, 0),
&lhs_offset);
if (!tem)
rhs = vn_valueize (rhs);
if (TREE_CODE (rhs) == ADDR_EXPR)
{
+ if (AGGREGATE_TYPE_P (TREE_TYPE (TREE_TYPE (rhs)))
+ && TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (TREE_TYPE (rhs))))
+ return (void *)-1;
tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs, 0),
&rhs_offset);
if (!tem)