+2016-11-16 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/77823
+ * ubsan.c (ubsan_build_overflow_builtin): Add DATAP argument, if
+ it points to non-NULL tree, use it instead of ubsan_create_data.
+ (instrument_si_overflow): Handle vector signed integer overflow
+ checking.
+ * ubsan.h (ubsan_build_overflow_builtin): Add DATAP argument.
+ * tree-vrp.c (simplify_internal_call_using_ranges): Punt for
+ vector IFN_UBSAN_CHECK_*.
+ * internal-fn.c (expand_addsub_overflow): Add DATAP argument,
+ pass it through to ubsan_build_overflow_builtin.
+ (expand_neg_overflow, expand_mul_overflow): Likewise.
+ (expand_vector_ubsan_overflow): New function.
+ (expand_UBSAN_CHECK_ADD, expand_UBSAN_CHECK_SUB,
+ expand_UBSAN_CHECK_MUL): Use tit for vector arithmetics.
+ (expand_arith_overflow): Adjust expand_*_overflow callers.
+
2016-11-16 Matthias Klose <doko@ubuntu.com>
* doc/install.texi: Remove references to java/libjava.
#include "ubsan.h"
#include "recog.h"
#include "builtins.h"
+#include "optabs-tree.h"
/* The names of each internal function, indexed by function number. */
const char *const internal_fn_name_array[] = {
static void
expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
tree arg0, tree arg1, bool unsr_p, bool uns0_p,
- bool uns1_p, bool is_ubsan)
+ bool uns1_p, bool is_ubsan, tree *datap)
{
rtx res, target = NULL_RTX;
tree fn;
/* Expand the ubsan builtin call. */
push_temp_slots ();
fn = ubsan_build_overflow_builtin (code, loc, TREE_TYPE (arg0),
- arg0, arg1);
+ arg0, arg1, datap);
expand_normal (fn);
pop_temp_slots ();
do_pending_stack_adjust ();
/* Add negate overflow checking to the statement STMT. */
static void
-expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan)
+expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan,
+ tree *datap)
{
rtx res, op1;
tree fn;
/* Expand the ubsan builtin call. */
push_temp_slots ();
fn = ubsan_build_overflow_builtin (NEGATE_EXPR, loc, TREE_TYPE (arg1),
- arg1, NULL_TREE);
+ arg1, NULL_TREE, datap);
expand_normal (fn);
pop_temp_slots ();
do_pending_stack_adjust ();
static void
expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
- bool unsr_p, bool uns0_p, bool uns1_p, bool is_ubsan)
+ bool unsr_p, bool uns0_p, bool uns1_p, bool is_ubsan,
+ tree *datap)
{
rtx res, op0, op1;
tree fn, type;
/* Expand the ubsan builtin call. */
push_temp_slots ();
fn = ubsan_build_overflow_builtin (MULT_EXPR, loc, TREE_TYPE (arg0),
- arg0, arg1);
+ arg0, arg1, datap);
expand_normal (fn);
pop_temp_slots ();
do_pending_stack_adjust ();
}
}
+/* Expand UBSAN_CHECK_* internal function if it has vector operands. */
+
+static void
+expand_vector_ubsan_overflow (location_t loc, enum tree_code code, tree lhs,
+ tree arg0, tree arg1)
+{
+ int cnt = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
+ rtx_code_label *loop_lab = NULL;
+ rtx cntvar = NULL_RTX;
+ tree cntv = NULL_TREE;
+ tree eltype = TREE_TYPE (TREE_TYPE (arg0));
+ tree sz = TYPE_SIZE (eltype);
+ tree data = NULL_TREE;
+ tree resv = NULL_TREE;
+ rtx lhsr = NULL_RTX;
+ rtx resvr = NULL_RTX;
+
+ if (lhs)
+ {
+ optab op;
+ lhsr = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
+ if (GET_MODE (lhsr) == BLKmode
+ || (op = optab_for_tree_code (code, TREE_TYPE (arg0),
+ optab_default)) == unknown_optab
+ || (optab_handler (op, TYPE_MODE (TREE_TYPE (arg0)))
+ == CODE_FOR_nothing))
+ {
+ if (MEM_P (lhsr))
+ resv = make_tree (TREE_TYPE (lhs), lhsr);
+ else
+ {
+ resvr = assign_temp (TREE_TYPE (lhs), 1, 1);
+ resv = make_tree (TREE_TYPE (lhs), resvr);
+ }
+ }
+ }
+ if (cnt > 4)
+ {
+ do_pending_stack_adjust ();
+ loop_lab = gen_label_rtx ();
+ cntvar = gen_reg_rtx (TYPE_MODE (sizetype));
+ cntv = make_tree (sizetype, cntvar);
+ emit_move_insn (cntvar, const0_rtx);
+ emit_label (loop_lab);
+ }
+ if (TREE_CODE (arg0) != VECTOR_CST)
+ {
+ rtx arg0r = expand_normal (arg0);
+ arg0 = make_tree (TREE_TYPE (arg0), arg0r);
+ }
+ if (TREE_CODE (arg1) != VECTOR_CST)
+ {
+ rtx arg1r = expand_normal (arg1);
+ arg1 = make_tree (TREE_TYPE (arg1), arg1r);
+ }
+ for (int i = 0; i < (cnt > 4 ? 1 : cnt); i++)
+ {
+ tree op0, op1, res = NULL_TREE;
+ if (cnt > 4)
+ {
+ tree atype = build_array_type_nelts (eltype, cnt);
+ op0 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, arg0);
+ op0 = build4_loc (loc, ARRAY_REF, eltype, op0, cntv,
+ NULL_TREE, NULL_TREE);
+ op1 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, arg1);
+ op1 = build4_loc (loc, ARRAY_REF, eltype, op1, cntv,
+ NULL_TREE, NULL_TREE);
+ if (resv)
+ {
+ res = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, resv);
+ res = build4_loc (loc, ARRAY_REF, eltype, res, cntv,
+ NULL_TREE, NULL_TREE);
+ }
+ }
+ else
+ {
+ tree bitpos = bitsize_int (tree_to_uhwi (sz) * i);
+ op0 = fold_build3_loc (loc, BIT_FIELD_REF, eltype, arg0, sz, bitpos);
+ op1 = fold_build3_loc (loc, BIT_FIELD_REF, eltype, arg1, sz, bitpos);
+ if (resv)
+ res = fold_build3_loc (loc, BIT_FIELD_REF, eltype, resv, sz,
+ bitpos);
+ }
+ switch (code)
+ {
+ case PLUS_EXPR:
+ expand_addsub_overflow (loc, PLUS_EXPR, res, op0, op1,
+ false, false, false, true, &data);
+ break;
+ case MINUS_EXPR:
+ if (cnt > 4 ? integer_zerop (arg0) : integer_zerop (op0))
+ expand_neg_overflow (loc, res, op1, true, &data);
+ else
+ expand_addsub_overflow (loc, MINUS_EXPR, res, op0, op1,
+ false, false, false, true, &data);
+ break;
+ case MULT_EXPR:
+ expand_mul_overflow (loc, res, op0, op1, false, false, false,
+ true, &data);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ if (cnt > 4)
+ {
+ struct separate_ops ops;
+ ops.code = PLUS_EXPR;
+ ops.type = TREE_TYPE (cntv);
+ ops.op0 = cntv;
+ ops.op1 = build_int_cst (TREE_TYPE (cntv), 1);
+ ops.op2 = NULL_TREE;
+ ops.location = loc;
+ rtx ret = expand_expr_real_2 (&ops, cntvar, TYPE_MODE (sizetype),
+ EXPAND_NORMAL);
+ if (ret != cntvar)
+ emit_move_insn (cntvar, ret);
+ do_compare_rtx_and_jump (cntvar, GEN_INT (cnt), NE, false,
+ TYPE_MODE (sizetype), NULL_RTX, NULL, loop_lab,
+ PROB_VERY_LIKELY);
+ }
+ if (lhs && resv == NULL_TREE)
+ {
+ struct separate_ops ops;
+ ops.code = code;
+ ops.type = TREE_TYPE (arg0);
+ ops.op0 = arg0;
+ ops.op1 = arg1;
+ ops.op2 = NULL_TREE;
+ ops.location = loc;
+ rtx ret = expand_expr_real_2 (&ops, lhsr, TYPE_MODE (TREE_TYPE (arg0)),
+ EXPAND_NORMAL);
+ if (ret != lhsr)
+ emit_move_insn (lhsr, ret);
+ }
+ else if (resvr)
+ emit_move_insn (lhsr, resvr);
+}
+
/* Expand UBSAN_CHECK_ADD call STMT. */
static void
tree lhs = gimple_call_lhs (stmt);
tree arg0 = gimple_call_arg (stmt, 0);
tree arg1 = gimple_call_arg (stmt, 1);
- expand_addsub_overflow (loc, PLUS_EXPR, lhs, arg0, arg1,
- false, false, false, true);
+ if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
+ expand_vector_ubsan_overflow (loc, PLUS_EXPR, lhs, arg0, arg1);
+ else
+ expand_addsub_overflow (loc, PLUS_EXPR, lhs, arg0, arg1,
+ false, false, false, true, NULL);
}
/* Expand UBSAN_CHECK_SUB call STMT. */
tree lhs = gimple_call_lhs (stmt);
tree arg0 = gimple_call_arg (stmt, 0);
tree arg1 = gimple_call_arg (stmt, 1);
- if (integer_zerop (arg0))
- expand_neg_overflow (loc, lhs, arg1, true);
+ if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
+ expand_vector_ubsan_overflow (loc, MINUS_EXPR, lhs, arg0, arg1);
+ else if (integer_zerop (arg0))
+ expand_neg_overflow (loc, lhs, arg1, true, NULL);
else
expand_addsub_overflow (loc, MINUS_EXPR, lhs, arg0, arg1,
- false, false, false, true);
+ false, false, false, true, NULL);
}
/* Expand UBSAN_CHECK_MUL call STMT. */
tree lhs = gimple_call_lhs (stmt);
tree arg0 = gimple_call_arg (stmt, 0);
tree arg1 = gimple_call_arg (stmt, 1);
- expand_mul_overflow (loc, lhs, arg0, arg1, false, false, false, true);
+ if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
+ expand_vector_ubsan_overflow (loc, MULT_EXPR, lhs, arg0, arg1);
+ else
+ expand_mul_overflow (loc, lhs, arg0, arg1, false, false, false, true,
+ NULL);
}
/* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
case MINUS_EXPR:
if (integer_zerop (arg0) && !unsr_p)
{
- expand_neg_overflow (loc, lhs, arg1, false);
+ expand_neg_overflow (loc, lhs, arg1, false, NULL);
return;
}
/* FALLTHRU */
case PLUS_EXPR:
- expand_addsub_overflow (loc, code, lhs, arg0, arg1,
- unsr_p, unsr_p, unsr_p, false);
+ expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
+ unsr_p, unsr_p, false, NULL);
return;
case MULT_EXPR:
- expand_mul_overflow (loc, lhs, arg0, arg1,
- unsr_p, unsr_p, unsr_p, false);
+ expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
+ unsr_p, unsr_p, false, NULL);
return;
default:
gcc_unreachable ();
arg1 = fold_convert_loc (loc, types[uns1_p], arg1);
if (code != MULT_EXPR)
expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
- uns0_p, uns1_p, false);
+ uns0_p, uns1_p, false, NULL);
else
expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
- uns0_p, uns1_p, false);
+ uns0_p, uns1_p, false, NULL);
return;
}
+2016-11-16 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/77823
+ * c-c++-common/ubsan/overflow-vec-1.c: New test.
+ * c-c++-common/ubsan/overflow-vec-2.c: New test.
+
2016-11-15 Marek Polacek <polacek@redhat.com>
* g++.dg/cpp1z/init-statement6.C: Rename a function.
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-Wno-psabi -fsanitize=signed-integer-overflow -Wno-unused-variable -fno-sanitize-recover=signed-integer-overflow" } */
+
+#define SCHAR_MAX __SCHAR_MAX__
+#define SCHAR_MIN (-__SCHAR_MAX__ - 1)
+#define SHRT_MAX __SHRT_MAX__
+#define SHRT_MIN (-__SHRT_MAX__ - 1)
+#define INT_MAX __INT_MAX__
+#define INT_MIN (-__INT_MAX__ - 1)
+
+typedef signed char VC __attribute__((vector_size (16)));
+typedef short VS __attribute__((vector_size (8 * sizeof (short))));
+typedef int VI __attribute__((vector_size (4 * sizeof (int))));
+typedef int VI2 __attribute__((vector_size (16 * sizeof (int))));
+
+void __attribute__((noinline,noclone))
+checkvc (VC i, VC j)
+{
+ if (__builtin_memcmp (&i, &j, sizeof (VC)))
+ __builtin_abort ();
+}
+
+void __attribute__((noinline,noclone))
+checkvs (VS i, VS j)
+{
+ if (__builtin_memcmp (&i, &j, sizeof (VS)))
+ __builtin_abort ();
+}
+
+void __attribute__((noinline,noclone))
+checkvi (VI i, VI j)
+{
+ if (__builtin_memcmp (&i, &j, sizeof (VI)))
+ __builtin_abort ();
+}
+
+void __attribute__((noinline,noclone))
+checkvi2 (VI2 i, VI2 j)
+{
+ if (__builtin_memcmp (&i, &j, sizeof (VI2)))
+ __builtin_abort ();
+}
+
+VI __attribute__((noinline,noclone))
+foo (VI i)
+{
+ return -i;
+}
+
+VS __attribute__((noinline,noclone))
+bar (VS i, VS j)
+{
+ return i + j;
+}
+
+int
+main (void)
+{
+ /* Check that for a vector operation, only the first element with UB is reported. */
+ volatile VC a = (VC) { 0, SCHAR_MAX - 2, SCHAR_MAX - 2, 3, 2, 3, 4, 5, 0, 7, 1, 2, 3, 4, SCHAR_MAX - 13, SCHAR_MAX };
+ volatile VC b = (VC) { 5, 2, 1, 5, 0, 1, 2, 7, 8, 9, 10, 11, 6, -2, 13, 0 };
+ volatile VC k = b + a;
+ checkvc (k, (VC) { 5, SCHAR_MAX, SCHAR_MAX - 1, 8, 2, 4, 6, 12, 8, 16, 11, 13, 9, 2, SCHAR_MAX, SCHAR_MAX });
+ k = a + b;
+ checkvc (k, (VC) { 5, SCHAR_MAX, SCHAR_MAX - 1, 8, 2, 4, 6, 12, 8, 16, 11, 13, 9, 2, SCHAR_MAX, SCHAR_MAX });
+
+ volatile VS c = (VS) { 0, SHRT_MAX - 2, SHRT_MAX - 2, 3, 3, 4, SHRT_MAX - 13, SHRT_MAX };
+ volatile VS d = (VS) { 5, 2, -3, 5, 6, -2, 13, -1 };
+ volatile VS l = d + c;
+ checkvs (l, (VS) { 5, SHRT_MAX, SHRT_MAX - 5, 8, 9, 2, SHRT_MAX, SHRT_MAX - 1 });
+ l = bar (c, d);
+ checkvs (l, (VS) { 5, SHRT_MAX, SHRT_MAX - 5, 8, 9, 2, SHRT_MAX, SHRT_MAX - 1 });
+
+ volatile VI e = (VI) { INT_MAX - 4, INT_MAX - 5, INT_MAX - 13, INT_MAX };
+ volatile VI f = (VI) { 4, -6, 13, 0 };
+ volatile VI m = f + e;
+ checkvi (m, (VI) { INT_MAX, INT_MAX - 11,INT_MAX, INT_MAX });
+ m = e + f;
+ checkvi (m, (VI) { INT_MAX, INT_MAX - 11,INT_MAX, INT_MAX });
+
+ volatile VI2 g = (VI2) { 0, INT_MAX - 2, INT_MAX - 2, 3, 3, 4, INT_MAX - 13, INT_MAX };
+ volatile VI2 h = (VI2) { 5, 2, -5, 5, 6, -2, 13, -1 };
+ volatile VI2 n = h + g;
+ checkvi2 (n, (VI2) { 5, INT_MAX, INT_MAX - 7, 8, 9, 2, INT_MAX, INT_MAX - 1 });
+ n = g + h;
+ checkvi2 (n, (VI2) { 5, INT_MAX, INT_MAX - 7, 8, 9, 2, INT_MAX, INT_MAX - 1 });
+
+ volatile VC a2 = k - b;
+ checkvc (a2, a);
+ volatile VC b2 = k - a;
+ checkvc (b2, b);
+
+ volatile VS c2 = l - d;
+ checkvs (c2, c);
+ volatile VS d2 = l - c;
+ checkvs (d2, d);
+
+ volatile VI e2 = m - f;
+ checkvi (e2, e);
+ volatile VI f2 = m - e;
+ checkvi (f2, f);
+
+ volatile VI2 g2 = n - h;
+ checkvi2 (g2, g);
+ volatile VI2 h2 = n - g;
+ checkvi2 (h2, h);
+
+ a = (VC) { 0, SCHAR_MAX / 4, SCHAR_MAX / 4, 3, 2, 3, 4, 5, 0, 7, 1, 2, 3, 4, SCHAR_MAX - 13, SCHAR_MAX };
+ b = (VC) { SCHAR_MAX, 4, 3, 2, 3, 4, 5, 2, 9, 2, 9, 1, 0, 8, 1, 1 };
+ k = a * b;
+ checkvc (k, (VC) { 0, 124, 93, 6, 6,12,20,10, 0,14, 9, 2, 0,32, SCHAR_MAX - 13, SCHAR_MAX });
+
+ c = (VS) { 0, SHRT_MAX / 8, SHRT_MAX / 7, 5, 8, 9, SHRT_MAX - 10, SHRT_MAX };
+ d = (VS) { SHRT_MAX, 8, 6, 2, 3, 4, 1, 1 };
+ l = c * d;
+ checkvs (l, (VS) { 0, 32760, 28086, 10,24,36, SHRT_MAX - 10, SHRT_MAX });
+
+ e = (VI) { INT_MAX, INT_MAX / 5, INT_MAX / 6, INT_MAX };
+ f = (VI) { 0, 5, 5, 1 };
+ m = e * f;
+ checkvi (m, (VI) { 0, 2147483645, 1789569705, INT_MAX });
+
+ g = (VI2) { INT_MAX, INT_MAX / 9, INT_MAX / 8, 5, 6, 7, 8, INT_MAX };
+ h = (VI2) { 0, 8, 8, 2, 3, 4, 5, 1 };
+ n = g * h;
+ checkvi2 (n,(VI2) { 0, 1908874352, 2147483640, 10,18,28,40, INT_MAX });
+
+ a = (VC) { 5, 7, 8, 9, SCHAR_MAX, SCHAR_MIN + 1, 24, 32, 0, 1, 2, 3, 4, 5, SCHAR_MAX, SCHAR_MIN + 2 };
+ k = -a;
+ checkvc (k, (VC) {-5,-7,-8,-9,-SCHAR_MAX, SCHAR_MAX, -24,-32, 0,-1,-2,-3,-4,-5,-SCHAR_MAX, SCHAR_MAX - 1 });
+
+ c = (VS) { 0, 7, 23, SHRT_MIN + 1, SHRT_MIN + 2, SHRT_MAX, 2, 5 };
+ l = -c;
+ checkvs (l, (VS) { 0,-7,-23, SHRT_MAX, SHRT_MAX - 1,-SHRT_MAX,-2,-5 });
+
+ e = (VI) { 5, INT_MAX, INT_MIN + 1, INT_MIN + 2 };
+ m = foo (e);
+ checkvi (m, (VI) {-5,-INT_MAX, INT_MAX, INT_MAX - 1 });
+
+ g = (VI2) { 10, 11, 0, INT_MAX - 2, 1, INT_MIN + 1, 5, INT_MIN / 2 };
+ n = -g;
+ checkvi2 (n, (VI2) {-10,-11, 0,-INT_MAX + 2,-1, INT_MAX, -5, INT_MAX / 2 + 1 });
+ return 0;
+}
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-Wno-psabi -fsanitize=signed-integer-overflow -Wno-unused-variable -fsanitize-recover=signed-integer-overflow" } */
+
+#define SCHAR_MAX __SCHAR_MAX__
+#define SCHAR_MIN (-__SCHAR_MAX__ - 1)
+#define SHRT_MAX __SHRT_MAX__
+#define SHRT_MIN (-__SHRT_MAX__ - 1)
+#define INT_MAX __INT_MAX__
+#define INT_MIN (-__INT_MAX__ - 1)
+
+typedef signed char VC __attribute__((vector_size (16)));
+typedef short VS __attribute__((vector_size (8 * sizeof (short))));
+typedef int VI __attribute__((vector_size (4 * sizeof (int))));
+typedef int VI2 __attribute__((vector_size (16 * sizeof (int))));
+
+void __attribute__((noinline,noclone))
+checkvc (VC i, VC j)
+{
+ if (__builtin_memcmp (&i, &j, sizeof (VC)))
+ __builtin_abort ();
+}
+
+void __attribute__((noinline,noclone))
+checkvs (VS i, VS j)
+{
+ if (__builtin_memcmp (&i, &j, sizeof (VS)))
+ __builtin_abort ();
+}
+
+void __attribute__((noinline,noclone))
+checkvi (VI i, VI j)
+{
+ if (__builtin_memcmp (&i, &j, sizeof (VI)))
+ __builtin_abort ();
+}
+
+void __attribute__((noinline,noclone))
+checkvi2 (VI2 i, VI2 j)
+{
+ if (__builtin_memcmp (&i, &j, sizeof (VI2)))
+ __builtin_abort ();
+}
+
+VI __attribute__((noinline,noclone))
+foo (VI i)
+{
+ return -i;
+}
+
+VS __attribute__((noinline,noclone))
+bar (VS i, VS j)
+{
+ return i + j;
+}
+
+int
+main (void)
+{
+ /* Check that for a vector operation, only the first element with UB is reported. */
+ volatile VC a = (VC) { 0, SCHAR_MAX - 2, SCHAR_MAX - 2, 3, 2, 3, 4, 5, 0, 7, 1, 2, 3, 4, SCHAR_MAX - 13, SCHAR_MAX };
+ volatile VC b = (VC) { 5, 2, 3, 5, 0, 1, 2, 7, 8, 9, 10, 11, 6, -2, 13, 1 };
+ volatile VC k = b + a;
+ checkvc (k, (VC) { 5, SCHAR_MAX, SCHAR_MIN, 8, 2, 4, 6, 12, 8, 16, 11, 13, 9, 2, SCHAR_MAX, SCHAR_MIN });
+ k = a + b;
+ checkvc (k, (VC) { 5, SCHAR_MAX, SCHAR_MIN, 8, 2, 4, 6, 12, 8, 16, 11, 13, 9, 2, SCHAR_MAX, SCHAR_MIN });
+
+ volatile VS c = (VS) { 0, SHRT_MAX - 2, SHRT_MAX - 2, 3, 3, 4, SHRT_MAX - 13, SHRT_MAX };
+ volatile VS d = (VS) { 5, 2, 3, 5, 6, -2, 13, 1 };
+ volatile VS l = d + c;
+ checkvs (l, (VS) { 5, SHRT_MAX, SHRT_MIN, 8, 9, 2, SHRT_MAX, SHRT_MIN });
+ l = bar (c, d);
+ checkvs (l, (VS) { 5, SHRT_MAX, SHRT_MIN, 8, 9, 2, SHRT_MAX, SHRT_MIN });
+
+ volatile VI e = (VI) { INT_MAX - 4, INT_MAX - 5, INT_MAX - 13, INT_MAX };
+ volatile VI f = (VI) { 4, 6, 13, 1 };
+ volatile VI m = f + e;
+ checkvi (m, (VI) { INT_MAX, INT_MIN, INT_MAX, INT_MIN });
+ m = e + f;
+ checkvi (m, (VI) { INT_MAX, INT_MIN, INT_MAX, INT_MIN });
+
+ volatile VI2 g = (VI2) { 0, INT_MAX - 2, INT_MAX - 2, 3, 3, 4, INT_MAX - 13, INT_MAX };
+ volatile VI2 h = (VI2) { 5, 2, 3, 5, 6, -2, 13, 1 };
+ volatile VI2 n = h + g;
+ checkvi2 (n, (VI2) { 5, INT_MAX, INT_MIN, 8, 9, 2, INT_MAX, INT_MIN });
+ n = g + h;
+ checkvi2 (n, (VI2) { 5, INT_MAX, INT_MIN, 8, 9, 2, INT_MAX, INT_MIN });
+
+ volatile VC a2 = k - b;
+ checkvc (a2, a);
+ volatile VC b2 = k - a;
+ checkvc (b2, b);
+
+ volatile VS c2 = l - d;
+ checkvs (c2, c);
+ volatile VS d2 = l - c;
+ checkvs (d2, d);
+
+ volatile VI e2 = m - f;
+ checkvi (e2, e);
+ volatile VI f2 = m - e;
+ checkvi (f2, f);
+
+ volatile VI2 g2 = n - h;
+ checkvi2 (g2, g);
+ volatile VI2 h2 = n - g;
+ checkvi2 (h2, h);
+
+ a = (VC) { 0, SCHAR_MAX / 4, SCHAR_MAX / 4, 3, 2, 3, 4, 5, 0, 7, 1, 2, 3, 4, SCHAR_MAX - 13, SCHAR_MAX };
+ b = (VC) { SCHAR_MAX, 4, 5, 2, 3, 4, 5, 2, 9, 2, 9, 1, 0, 8, 1, 2 };
+ k = a * b;
+ checkvc (k, (VC) { 0, 124, -101, 6, 6,12,20,10, 0,14, 9, 2, 0,32, SCHAR_MAX - 13, -2 });
+
+ c = (VS) { 0, SHRT_MAX / 8, SHRT_MAX / 7, 5, 8, 9, SHRT_MAX - 10, SHRT_MAX };
+ d = (VS) { SHRT_MAX, 8, 17, 2, 3, 4, 1, 3 };
+ l = c * d;
+ checkvs (l, (VS) { 0, 32760, 14041, 10,24,36, SHRT_MAX - 10, 32765 });
+
+ e = (VI) { INT_MAX, INT_MAX / 5, INT_MAX / 6, INT_MAX };
+ f = (VI) { 0, 5, 7, 2 };
+ m = e * f;
+ checkvi (m, (VI) { 0, 2147483645, -1789569709, -2 });
+
+ g = (VI2) { INT_MAX, INT_MAX / 9, INT_MAX / 8, 5, 6, 7, 8, INT_MAX };
+ h = (VI2) { 0, 10, 8, 2, 3, 4, 5, 1 };
+ n = g * h;
+ checkvi2 (n,(VI2) { 0, -1908874356, 2147483640, 10,18,28,40, INT_MAX });
+
+ a = (VC) { 5, 7, 8, 9, SCHAR_MAX, SCHAR_MIN, 24, 32, 0, 1, 2, 3, 4, 5, SCHAR_MAX, SCHAR_MIN };
+ k = -a;
+ checkvc (k, (VC) {-5,-7,-8,-9,-SCHAR_MAX, SCHAR_MIN,-24,-32, 0,-1,-2,-3,-4,-5,-SCHAR_MAX, SCHAR_MIN });
+
+ c = (VS) { 0, 7, 23, SHRT_MIN, SHRT_MIN, SHRT_MAX, 2, 5 };
+ l = -c;
+ checkvs (l, (VS) { 0,-7,-23, SHRT_MIN, SHRT_MIN,-SHRT_MAX,-2,-5 });
+
+ e = (VI) { 5, INT_MAX, INT_MIN, INT_MIN };
+ m = foo (e);
+ checkvi (m, (VI) {-5,-INT_MAX, INT_MIN, INT_MIN });
+
+ g = (VI2) { 10, 11, 0, INT_MAX - 2, 1, INT_MIN + 1, 5, INT_MIN };
+ n = -g;
+ checkvi2 (n, (VI2) {-10,-11, 0,-INT_MAX + 2,-1, INT_MAX, -5, INT_MIN });
+ return 0;
+}
+
+/* { dg-output "signed integer overflow: 3 \\+ 125 cannot be represented in type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 125 \\+ 3 cannot be represented in type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 3 \\+ 32765 cannot be represented in type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 32765 \\+ 3 cannot be represented in type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 6 \\+ 2147483642 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 2147483642 \\+ 6 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 3 \\+ 2147483645 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 2147483645 \\+ 3 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -128 - 3 cannot be represented in type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -128 - 125 cannot be represented in type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -32768 - 3 cannot be represented in type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -32768 - 32765 cannot be represented in type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -2147483648 - 6 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -2147483648 - 2147483642 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -2147483648 - 3 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -2147483648 - 2147483645 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 31 \\* 5 cannot be represented in type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 4681 \\* 17 cannot be represented in type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 357913941 \\* 7 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 238609294 \\* 10 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*negation of -128 cannot be represented in type 'signed char'; cast to an unsigned type to negate this value to itself\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*negation of -32768 cannot be represented in type 'short int'; cast to an unsigned type to negate this value to itself\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*negation of -2147483648 cannot be represented in type 'int'; cast to an unsigned type to negate this value to itself\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*negation of -2147483648 cannot be represented in type 'int'; cast to an unsigned type to negate this value to itself" } */
tree op1 = gimple_call_arg (stmt, 1);
tree type;
if (is_ubsan)
- type = TREE_TYPE (op0);
+ {
+ type = TREE_TYPE (op0);
+ if (VECTOR_TYPE_P (type))
+ return false;
+ }
else if (gimple_call_lhs (stmt) == NULL_TREE)
return false;
else
tree
ubsan_build_overflow_builtin (tree_code code, location_t loc, tree lhstype,
- tree op0, tree op1)
+ tree op0, tree op1, tree *datap)
{
if (flag_sanitize_undefined_trap_on_error)
return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TRAP), 0);
- tree data = ubsan_create_data ("__ubsan_overflow_data", 1, &loc,
- ubsan_type_descriptor (lhstype), NULL_TREE,
- NULL_TREE);
+ tree data;
+ if (datap && *datap)
+ data = *datap;
+ else
+ data = ubsan_create_data ("__ubsan_overflow_data", 1, &loc,
+ ubsan_type_descriptor (lhstype), NULL_TREE,
+ NULL_TREE);
+ if (datap)
+ *datap = data;
enum built_in_function fn_code;
switch (code)
tree_code code = gimple_assign_rhs_code (stmt);
tree lhs = gimple_assign_lhs (stmt);
tree lhstype = TREE_TYPE (lhs);
+ tree lhsinner = VECTOR_TYPE_P (lhstype) ? TREE_TYPE (lhstype) : lhstype;
tree a, b;
gimple *g;
/* If this is not a signed operation, don't instrument anything here.
Also punt on bit-fields. */
- if (!INTEGRAL_TYPE_P (lhstype)
- || TYPE_OVERFLOW_WRAPS (lhstype)
- || GET_MODE_BITSIZE (TYPE_MODE (lhstype)) != TYPE_PRECISION (lhstype))
+ if (!INTEGRAL_TYPE_P (lhsinner)
+ || TYPE_OVERFLOW_WRAPS (lhsinner)
+ || GET_MODE_BITSIZE (TYPE_MODE (lhsinner)) != TYPE_PRECISION (lhsinner))
return;
switch (code)
/* Represent i = -u;
as
i = UBSAN_CHECK_SUB (0, u); */
- a = build_int_cst (lhstype, 0);
+ a = build_zero_cst (lhstype);
b = gimple_assign_rhs1 (stmt);
g = gimple_build_call_internal (IFN_UBSAN_CHECK_SUB, 2, a, b);
gimple_call_set_lhs (g, lhs);
into
_N = UBSAN_CHECK_SUB (0, u);
i = ABS_EXPR<_N>; */
- a = build_int_cst (lhstype, 0);
+ a = build_zero_cst (lhstype);
b = gimple_assign_rhs1 (stmt);
g = gimple_build_call_internal (IFN_UBSAN_CHECK_SUB, 2, a, b);
a = make_ssa_name (lhstype);
extern tree ubsan_type_descriptor (tree, enum ubsan_print_style = UBSAN_PRINT_NORMAL);
extern tree ubsan_encode_value (tree, bool = false);
extern bool is_ubsan_builtin_p (tree);
-extern tree ubsan_build_overflow_builtin (tree_code, location_t, tree, tree, tree);
+extern tree ubsan_build_overflow_builtin (tree_code, location_t, tree, tree,
+ tree, tree *);
extern tree ubsan_instrument_float_cast (location_t, tree, tree);
extern tree ubsan_get_source_location_type (void);