+2017-10-10 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * wide-int.h (wide_int_ref_storage): Make host_dependent_precision
+ a template parameter.
+ (WIDE_INT_REF_FOR): Update accordingly.
+ * tree.h (wi::int_traits <const_tree>): Delete.
+ (wi::tree_to_widest_ref, wi::tree_to_offset_ref): New typedefs.
+ (wi::to_widest, wi::to_offset): Use them. Expand commentary.
+ (wi::tree_to_wide_ref): New typedef.
+ (wi::to_wide): New function.
+ * calls.c (get_size_range): Use wi::to_wide when operating on
+ trees as wide_ints.
+ * cgraph.c (cgraph_node::create_thunk): Likewise.
+ * config/i386/i386.c (ix86_data_alignment): Likewise.
+ (ix86_local_alignment): Likewise.
+ * dbxout.c (stabstr_O): Likewise.
+ * dwarf2out.c (add_scalar_info, gen_enumeration_type_die): Likewise.
+ * expr.c (const_vector_from_tree): Likewise.
+ * fold-const-call.c (host_size_t_cst_p, fold_const_call_1): Likewise.
+ * fold-const.c (may_negate_without_overflow_p, negate_expr_p)
+ (fold_negate_expr_1, int_const_binop_1, const_binop)
+ (fold_convert_const_int_from_real, optimize_bit_field_compare)
+ (all_ones_mask_p, sign_bit_p, unextend, extract_muldiv_1)
+ (fold_div_compare, fold_single_bit_test, fold_plusminus_mult_expr)
+ (pointer_may_wrap_p, expr_not_equal_to, fold_binary_loc)
+ (fold_ternary_loc, multiple_of_p, fold_negate_const, fold_abs_const)
+ (fold_not_const, round_up_loc): Likewise.
+ * gimple-fold.c (gimple_fold_indirect_ref): Likewise.
+ * gimple-ssa-warn-alloca.c (alloca_call_type_by_arg): Likewise.
+ (alloca_call_type): Likewise.
+ * gimple.c (preprocess_case_label_vec_for_gimple): Likewise.
+ * godump.c (go_output_typedef): Likewise.
+ * graphite-sese-to-poly.c (tree_int_to_gmp): Likewise.
+ * internal-fn.c (get_min_precision): Likewise.
+ * ipa-cp.c (ipcp_store_vr_results): Likewise.
+ * ipa-polymorphic-call.c
+ (ipa_polymorphic_call_context::ipa_polymorphic_call_context): Likewise.
+ * ipa-prop.c (ipa_print_node_jump_functions_for_edge): Likewise.
+ (ipa_modify_call_arguments): Likewise.
+ * match.pd: Likewise.
+ * omp-low.c (scan_omp_1_op, lower_omp_ordered_clauses): Likewise.
+ * print-tree.c (print_node_brief, print_node): Likewise.
+ * stmt.c (expand_case): Likewise.
+ * stor-layout.c (layout_type): Likewise.
+ * tree-affine.c (tree_to_aff_combination): Likewise.
+ * tree-cfg.c (group_case_labels_stmt): Likewise.
+ * tree-data-ref.c (dr_analyze_indices): Likewise.
+ (prune_runtime_alias_test_list): Likewise.
+ * tree-dump.c (dequeue_and_dump): Likewise.
+ * tree-inline.c (remap_gimple_op_r, copy_tree_body_r): Likewise.
+ * tree-predcom.c (is_inv_store_elimination_chain): Likewise.
+ * tree-pretty-print.c (dump_generic_node): Likewise.
+ * tree-scalar-evolution.c (iv_can_overflow_p): Likewise.
+ (simple_iv_with_niters): Likewise.
+ * tree-ssa-address.c (addr_for_mem_ref): Likewise.
+ * tree-ssa-ccp.c (ccp_finalize, evaluate_stmt): Likewise.
+ * tree-ssa-loop-ivopts.c (constant_multiple_of): Likewise.
+ * tree-ssa-loop-niter.c (split_to_var_and_offset)
+ (refine_value_range_using_guard, number_of_iterations_ne_max)
+ (number_of_iterations_lt_to_ne, number_of_iterations_lt)
+ (get_cst_init_from_scev, record_nonwrapping_iv)
+ (scev_var_range_cant_overflow): Likewise.
+ * tree-ssa-phiopt.c (minmax_replacement): Likewise.
+ * tree-ssa-pre.c (compute_avail): Likewise.
+ * tree-ssa-sccvn.c (vn_reference_fold_indirect): Likewise.
+ (vn_reference_maybe_forwprop_address, valueized_wider_op): Likewise.
+ * tree-ssa-structalias.c (get_constraint_for_ptr_offset): Likewise.
+ * tree-ssa-uninit.c (is_pred_expr_subset_of): Likewise.
+ * tree-ssanames.c (set_nonzero_bits, get_nonzero_bits): Likewise.
+ * tree-switch-conversion.c (collect_switch_conv_info, array_value_type)
+ (dump_case_nodes, try_switch_expansion): Likewise.
+ * tree-vect-loop-manip.c (vect_gen_vector_loop_niters): Likewise.
+ (vect_do_peeling): Likewise.
+ * tree-vect-patterns.c (vect_recog_bool_pattern): Likewise.
+ * tree-vect-stmts.c (vectorizable_load): Likewise.
+ * tree-vrp.c (compare_values_warnv, vrp_int_const_binop): Likewise.
+ (zero_nonzero_bits_from_vr, ranges_from_anti_range): Likewise.
+ (extract_range_from_binary_expr_1, adjust_range_with_scev): Likewise.
+ (overflow_comparison_p_1, register_edge_assert_for_2): Likewise.
+ (is_masked_range_test, find_switch_asserts, maybe_set_nonzero_bits)
+ (vrp_evaluate_conditional_warnv_with_ops, intersect_ranges): Likewise.
+ (range_fits_type_p, two_valued_val_range_p, vrp_finalize): Likewise.
+ (evrp_dom_walker::before_dom_children): Likewise.
+ * tree.c (cache_integer_cst, real_value_from_int_cst, integer_zerop)
+ (integer_all_onesp, integer_pow2p, integer_nonzerop, tree_log2)
+ (tree_floor_log2, tree_ctz, mem_ref_offset, tree_int_cst_sign_bit)
+ (tree_int_cst_sgn, get_unwidened, int_fits_type_p): Likewise.
+ (get_type_static_bounds, num_ending_zeros, drop_tree_overflow)
+ (get_range_pos_neg): Likewise.
+ * ubsan.c (ubsan_expand_ptr_ifn): Likewise.
+ * config/darwin.c (darwin_mergeable_constant_section): Likewise.
+ * config/aarch64/aarch64.c (aapcs_vfp_sub_candidate): Likewise.
+ * config/arm/arm.c (aapcs_vfp_sub_candidate): Likewise.
+ * config/avr/avr.c (avr_fold_builtin): Likewise.
+ * config/bfin/bfin.c (bfin_local_alignment): Likewise.
+ * config/msp430/msp430.c (msp430_attr): Likewise.
+ * config/nds32/nds32.c (nds32_insert_attributes): Likewise.
+ * config/powerpcspe/powerpcspe-c.c
+ (altivec_resolve_overloaded_builtin): Likewise.
+ * config/powerpcspe/powerpcspe.c (rs6000_aggregate_candidate)
+ (rs6000_expand_ternop_builtin): Likewise.
+ * config/rs6000/rs6000-c.c
+ (altivec_resolve_overloaded_builtin): Likewise.
+ * config/rs6000/rs6000.c (rs6000_aggregate_candidate): Likewise.
+ (rs6000_expand_ternop_builtin): Likewise.
+ * config/s390/s390.c (s390_handle_hotpatch_attribute): Likewise.
+
2017-10-10 Bin Cheng <bin.cheng@arm.com>
* tree-vect-loop-manip.c (rename_variables_in_bb): Rename PHI nodes
+2017-10-10 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * gcc-interface/decl.c (annotate_value): Use wi::to_wide when
+ operating on trees as wide_ints.
+
2017-10-09 Hristian Kirtchev <kirtchev@adacore.com>
* sem_unit.adb (Find_Enclosing_Scope): Do not treat a block statement
can appear for discriminants in expressions for variants. */
if (tree_int_cst_sgn (gnu_size) < 0)
{
- tree t = wide_int_to_tree (sizetype, wi::neg (gnu_size));
+ tree t = wide_int_to_tree (sizetype, -wi::to_wide (gnu_size));
tcode = Negate_Expr;
ops[0] = UI_From_gnu (t);
}
if (TREE_CODE (TREE_OPERAND (gnu_size, 1)) == INTEGER_CST)
{
tree op1 = TREE_OPERAND (gnu_size, 1);
- wide_int signed_op1 = wi::sext (op1, TYPE_PRECISION (sizetype));
+ wide_int signed_op1 = wi::sext (wi::to_wide (op1),
+ TYPE_PRECISION (sizetype));
if (wi::neg_p (signed_op1))
{
op1 = wide_int_to_tree (sizetype, wi::neg (signed_op1));
+2017-10-10 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * c-ada-spec.c (dump_generic_ada_node): Use wi::to_wide when
+ operating on trees as wide_ints.
+ * c-common.c (pointer_int_sum): Likewise.
+ * c-pretty-print.c (pp_c_integer_constant): Likewise.
+ * c-warn.c (match_case_to_enum_1): Likewise.
+ (c_do_switch_warnings): Likewise.
+ (maybe_warn_shift_overflow): Likewise.
+
2017-10-10 Jakub Jelinek <jakub@redhat.com>
PR c/82437
pp_unsigned_wide_integer (buffer, tree_to_uhwi (node));
else
{
- wide_int val = node;
+ wide_int val = wi::to_wide (node);
int i;
if (wi::neg_p (val))
{
convert (TREE_TYPE (intop), size_exp));
intop = convert (sizetype, t);
if (TREE_OVERFLOW_P (intop) && !TREE_OVERFLOW (t))
- intop = wide_int_to_tree (TREE_TYPE (intop), intop);
+ intop = wide_int_to_tree (TREE_TYPE (intop), wi::to_wide (intop));
}
/* Create the sum or difference. */
pp_unsigned_wide_integer (pp, tree_to_uhwi (i));
else
{
- wide_int wi = i;
+ wide_int wi = wi::to_wide (i);
- if (wi::lt_p (i, 0, TYPE_SIGN (TREE_TYPE (i))))
+ if (wi::lt_p (wi::to_wide (i), 0, TYPE_SIGN (TREE_TYPE (i))))
{
pp_minus (pp);
wi = -wi;
char buf[WIDE_INT_PRINT_BUFFER_SIZE];
if (tree_fits_uhwi_p (key))
- print_dec (key, buf, UNSIGNED);
+ print_dec (wi::to_wide (key), buf, UNSIGNED);
else if (tree_fits_shwi_p (key))
- print_dec (key, buf, SIGNED);
+ print_dec (wi::to_wide (key), buf, SIGNED);
else
- print_hex (key, buf);
+ print_hex (wi::to_wide (key), buf);
if (TYPE_NAME (type) == NULL_TREE)
warning_at (DECL_SOURCE_LOCATION (CASE_LABEL (label)),
/* If there's a case value > 1 or < 0, that is outside bool
range, warn. */
if (outside_range_p
- || (max && wi::gts_p (max, 1))
- || (min && wi::lts_p (min, 0))
+ || (max && wi::gts_p (wi::to_wide (max), 1))
+ || (min && wi::lts_p (wi::to_wide (min), 0))
/* And handle the
switch (boolean)
{
}
case, where we want to warn. */
|| (default_node
- && max && wi::eq_p (max, 1)
- && min && wi::eq_p (min, 0)))
+ && max && wi::to_wide (max) == 1
+ && min && wi::to_wide (min) == 0))
warning_at (switch_location, OPT_Wswitch_bool,
"switch condition has boolean value");
}
if (TYPE_UNSIGNED (type0))
return false;
- unsigned int min_prec = (wi::min_precision (op0, SIGNED)
+ unsigned int min_prec = (wi::min_precision (wi::to_wide (op0), SIGNED)
+ TREE_INT_CST_LOW (op1));
/* Handle the case of left-shifting 1 into the sign bit.
* However, shifting 1 _out_ of the sign bit, as in
+2017-10-10 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * c-parser.c (c_parser_cilk_clause_vectorlength): Use wi::to_wide when
+ operating on trees as wide_ints.
+ * c-typeck.c (build_c_cast, c_finish_omp_clauses): Likewise.
+ (c_tree_equal): Likewise.
+
2017-10-04 David Malcolm <dmalcolm@redhat.com>
* c-decl.c (push_parm_decl): Store c_parm's location into the
|| !INTEGRAL_TYPE_P (TREE_TYPE (expr)))
error_at (loc, "vectorlength must be an integer constant");
- else if (wi::exact_log2 (expr) == -1)
+ else if (wi::exact_log2 (wi::to_wide (expr)) == -1)
error_at (loc, "vectorlength must be a power of 2");
else
{
}
else if (TREE_OVERFLOW (value))
/* Reset VALUE's overflow flags, ensuring constant sharing. */
- value = wide_int_to_tree (TREE_TYPE (value), value);
+ value = wide_int_to_tree (TREE_TYPE (value), wi::to_wide (value));
}
}
if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)
{
tree offset = TREE_PURPOSE (t);
- bool neg = wi::neg_p ((wide_int) offset);
+ bool neg = wi::neg_p (wi::to_wide (offset));
offset = fold_unary (ABS_EXPR, TREE_TYPE (offset), offset);
tree t2 = pointer_int_sum (OMP_CLAUSE_LOCATION (c),
neg ? MINUS_EXPR : PLUS_EXPR,
switch (code1)
{
case INTEGER_CST:
- return wi::eq_p (t1, t2);
+ return wi::to_wide (t1) == wi::to_wide (t2);
case REAL_CST:
return real_equal (&TREE_REAL_CST (t1), &TREE_REAL_CST (t2));
tree exptype = TREE_TYPE (exp);
unsigned expprec = TYPE_PRECISION (exptype);
- wide_int wzero = wi::zero (expprec);
- wide_int wmaxval = wide_int (TYPE_MAX_VALUE (exptype));
bool signed_p = !TYPE_UNSIGNED (exptype);
{
if (signed_p)
{
- if (wi::les_p (max, wzero))
+ if (wi::les_p (max, 0))
{
/* EXP is not in a strictly negative range. That means
it must be in some (not necessarily strictly) positive
conversions negative values end up converted to large
positive values, and otherwise they are not valid sizes,
the resulting range is in both cases [0, TYPE_MAX]. */
- min = wzero;
- max = wmaxval;
+ min = wi::zero (expprec);
+ max = wi::to_wide (TYPE_MAX_VALUE (exptype));
}
- else if (wi::les_p (min - 1, wzero))
+ else if (wi::les_p (min - 1, 0))
{
/* EXP is not in a negative-positive range. That means EXP
is either negative, or greater than max. Since negative
sizes are invalid make the range [MAX + 1, TYPE_MAX]. */
min = max + 1;
- max = wmaxval;
+ max = wi::to_wide (TYPE_MAX_VALUE (exptype));
}
else
{
max = min - 1;
- min = wzero;
+ min = wi::zero (expprec);
}
}
- else if (wi::eq_p (wzero, min - 1))
+ else if (wi::eq_p (0, min - 1))
{
/* EXP is unsigned and not in the range [1, MAX]. That means
it's either zero or greater than MAX. Even though 0 would
[MAX, TYPE_MAX] so that when MAX is greater than the limit
the whole range is diagnosed. */
min = max + 1;
- max = wmaxval;
+ max = wi::to_wide (TYPE_MAX_VALUE (exptype));
}
else
{
max = min - 1;
- min = wzero;
+ min = wi::zero (expprec);
}
}
/* Make sure that if VIRTUAL_OFFSET is in sync with VIRTUAL_VALUE. */
gcc_checking_assert (virtual_offset
- ? wi::eq_p (virtual_offset, virtual_value)
+ ? virtual_value == wi::to_wide (virtual_offset)
: virtual_value == 0);
node->thunk.fixed_offset = fixed_offset;
- tree_to_uhwi (TYPE_MIN_VALUE (index)));
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
}
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
}
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
- tree_to_uhwi (TYPE_MIN_VALUE (index)));
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
}
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
}
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
break;
}
- tmap = wide_int_to_tree (map_type, arg[0]);
+ tmap = wide_int_to_tree (map_type, wi::to_wide (arg[0]));
map = TREE_INT_CST_LOW (tmap);
if (TREE_CODE (tval) != INTEGER_CST
memcpy can use 32 bit loads/stores. */
if (TYPE_SIZE (type)
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && wi::gtu_p (TYPE_SIZE (type), 8)
+ && wi::gtu_p (wi::to_wide (TYPE_SIZE (type)), 8)
&& align < 32)
return 32;
return align;
if (TREE_CODE (size) == INTEGER_CST)
{
- if (wi::eq_p (size, 4))
+ if (wi::to_wide (size) == 4)
return darwin_sections[literal4_section];
- else if (wi::eq_p (size, 8))
+ else if (wi::to_wide (size) == 8)
return darwin_sections[literal8_section];
else if (HAVE_GAS_LITERAL16
&& TARGET_64BIT
- && wi::eq_p (size, 16))
+ && wi::to_wide (size) == 16)
return darwin_sections[literal16_section];
}
}
&& TYPE_SIZE (type)
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
{
- if (wi::geu_p (TYPE_SIZE (type), max_align_compat)
+ if (wi::geu_p (wi::to_wide (TYPE_SIZE (type)), max_align_compat)
&& align < max_align_compat)
align = max_align_compat;
- if (wi::geu_p (TYPE_SIZE (type), max_align)
- && align < max_align)
- align = max_align;
+ if (wi::geu_p (wi::to_wide (TYPE_SIZE (type)), max_align)
+ && align < max_align)
+ align = max_align;
}
/* x86-64 ABI requires arrays greater than 16 bytes to be aligned
if ((opt ? AGGREGATE_TYPE_P (type) : TREE_CODE (type) == ARRAY_TYPE)
&& TYPE_SIZE (type)
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && wi::geu_p (TYPE_SIZE (type), 128)
+ && wi::geu_p (wi::to_wide (TYPE_SIZE (type)), 128)
&& align < 128)
return 128;
}
!= TYPE_MAIN_VARIANT (va_list_type_node)))
&& TYPE_SIZE (type)
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && wi::geu_p (TYPE_SIZE (type), 128)
+ && wi::geu_p (wi::to_wide (TYPE_SIZE (type)), 128)
&& align < 128)
return 128;
}
break;
case INTEGER_CST:
- if (wi::gtu_p (value, 63))
+ if (wi::gtu_p (wi::to_wide (value), 63))
/* Allow the attribute to be added - the linker script
being used may still recognise this value. */
warning (OPT_Wattributes,
id = TREE_VALUE (id_list);
/* Issue error if it is not a valid integer value. */
if (TREE_CODE (id) != INTEGER_CST
- || wi::ltu_p (id, lower_bound)
- || wi::gtu_p (id, upper_bound))
+ || wi::ltu_p (wi::to_wide (id), lower_bound)
+ || wi::gtu_p (wi::to_wide (id), upper_bound))
error ("invalid id value for interrupt/exception attribute");
/* Advance to next id. */
/* 3. Check valid integer value for reset. */
if (TREE_CODE (id) != INTEGER_CST
- || wi::ltu_p (id, lower_bound)
- || wi::gtu_p (id, upper_bound))
+ || wi::ltu_p (wi::to_wide (id), lower_bound)
+ || wi::gtu_p (wi::to_wide (id), upper_bound))
error ("invalid id value for reset attribute");
/* 4. Check valid function for nmi/warm. */
/* If the second argument is an integer constant, if the value is in
the expected range, generate the built-in code if we can. We need
64-bit and direct move to extract the small integer vectors. */
- if (TREE_CODE (arg2) == INTEGER_CST && wi::ltu_p (arg2, nunits))
+ if (TREE_CODE (arg2) == INTEGER_CST
+ && wi::ltu_p (wi::to_wide (arg2), nunits))
{
switch (mode)
{
mode = TYPE_MODE (arg1_type);
if ((mode == V2DFmode || mode == V2DImode) && VECTOR_UNIT_VSX_P (mode)
&& TREE_CODE (arg2) == INTEGER_CST
- && wi::ltu_p (arg2, 2))
+ && wi::ltu_p (wi::to_wide (arg2), 2))
{
tree call = NULL_TREE;
}
else if (mode == V1TImode && VECTOR_UNIT_VSX_P (mode)
&& TREE_CODE (arg2) == INTEGER_CST
- && wi::eq_p (arg2, 0))
+ && wi::eq_p (wi::to_wide (arg2), 0))
{
tree call = rs6000_builtin_decls[VSX_BUILTIN_VEC_SET_V1TI];
- tree_to_uhwi (TYPE_MIN_VALUE (index)));
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
}
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
}
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
/* Check whether the 2nd and 3rd arguments are integer constants and in
range and prepare arguments. */
STRIP_NOPS (arg1);
- if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
+ if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
{
error ("argument 2 must be 0 or 1");
return CONST0_RTX (tmode);
}
STRIP_NOPS (arg2);
- if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg2, 16))
+ if (TREE_CODE (arg2) != INTEGER_CST
+ || wi::geu_p (wi::to_wide (arg2), 16))
{
error ("argument 3 must be in the range 0..15");
return CONST0_RTX (tmode);
/* If the second argument is an integer constant, if the value is in
the expected range, generate the built-in code if we can. We need
64-bit and direct move to extract the small integer vectors. */
- if (TREE_CODE (arg2) == INTEGER_CST && wi::ltu_p (arg2, nunits))
+ if (TREE_CODE (arg2) == INTEGER_CST
+ && wi::ltu_p (wi::to_wide (arg2), nunits))
{
switch (mode)
{
mode = TYPE_MODE (arg1_type);
if ((mode == V2DFmode || mode == V2DImode) && VECTOR_UNIT_VSX_P (mode)
&& TREE_CODE (arg2) == INTEGER_CST
- && wi::ltu_p (arg2, 2))
+ && wi::ltu_p (wi::to_wide (arg2), 2))
{
tree call = NULL_TREE;
}
else if (mode == V1TImode && VECTOR_UNIT_VSX_P (mode)
&& TREE_CODE (arg2) == INTEGER_CST
- && wi::eq_p (arg2, 0))
+ && wi::eq_p (wi::to_wide (arg2), 0))
{
tree call = rs6000_builtin_decls[VSX_BUILTIN_VEC_SET_V1TI];
- tree_to_uhwi (TYPE_MIN_VALUE (index)));
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
}
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
}
/* There must be no padding. */
- if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
+ if (wi::to_wide (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep))
return -1;
return count;
/* Check whether the 2nd and 3rd arguments are integer constants and in
range and prepare arguments. */
STRIP_NOPS (arg1);
- if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
+ if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
{
error ("argument 2 must be 0 or 1");
return CONST0_RTX (tmode);
}
STRIP_NOPS (arg2);
- if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg2, 16))
+ if (TREE_CODE (arg2) != INTEGER_CST
+ || wi::geu_p (wi::to_wide (arg2), 16))
{
error ("argument 3 must be in the range 0..15");
return CONST0_RTX (tmode);
err = 1;
else if (TREE_CODE (expr) != INTEGER_CST
|| !INTEGRAL_TYPE_P (TREE_TYPE (expr))
- || wi::gtu_p (expr, s390_hotpatch_hw_max))
+ || wi::gtu_p (wi::to_wide (expr), s390_hotpatch_hw_max))
err = 1;
else if (TREE_CODE (expr2) != INTEGER_CST
|| !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
- || wi::gtu_p (expr2, s390_hotpatch_hw_max))
+ || wi::gtu_p (wi::to_wide (expr2), s390_hotpatch_hw_max))
err = 1;
else
err = 0;
+2017-10-10 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * cvt.c (ignore_overflows): Use wi::to_wide when
+ operating on trees as wide_ints.
+ * decl.c (check_array_designated_initializer): Likewise.
+ * mangle.c (write_integer_cst): Likewise.
+ * semantics.c (cp_finish_omp_clause_depend_sink): Likewise.
+
2017-10-10 Nathan Sidwell <nathan@acm.org>
* name-lookup.c (set_global_binding): Don't deal with STAT_HACK.
{
gcc_assert (!TREE_OVERFLOW (orig));
/* Ensure constant sharing. */
- expr = wide_int_to_tree (TREE_TYPE (expr), expr);
+ expr = wide_int_to_tree (TREE_TYPE (expr), wi::to_wide (expr));
}
return expr;
}
== INTEGER_CST))
{
/* A C99 designator is OK if it matches the current index. */
- if (wi::eq_p (ce_index, index))
+ if (wi::to_wide (ce_index) == index)
return true;
else
sorry ("non-trivial designated initializers not supported");
type = c_common_signed_or_unsigned_type (1, TREE_TYPE (cst));
base = build_int_cstu (type, chunk);
- n = wide_int_to_tree (type, cst);
+ n = wide_int_to_tree (type, wi::to_wide (cst));
if (sign < 0)
{
if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)
{
tree offset = TREE_PURPOSE (t);
- bool neg = wi::neg_p ((wide_int) offset);
+ bool neg = wi::neg_p (wi::to_wide (offset));
offset = fold_unary (ABS_EXPR, TREE_TYPE (offset), offset);
decl = mark_rvalue_use (decl);
decl = convert_from_reference (decl);
/* If the value is zero, the base indicator will serve as the value
all by itself. */
- if (wi::eq_p (cst, 0))
+ if (wi::to_wide (cst) == 0)
return;
/* GDB wants constants with no extra leading "1" bits, so
present. */
if (res_pres == 1)
{
- digit = wi::extract_uhwi (cst, prec - 1, 1);
+ digit = wi::extract_uhwi (wi::to_wide (cst), prec - 1, 1);
stabstr_C ('0' + digit);
}
else if (res_pres == 2)
{
- digit = wi::extract_uhwi (cst, prec - 2, 2);
+ digit = wi::extract_uhwi (wi::to_wide (cst), prec - 2, 2);
stabstr_C ('0' + digit);
}
prec -= res_pres;
for (i = prec - 3; i >= 0; i = i - 3)
{
- digit = wi::extract_uhwi (cst, i, 3);
+ digit = wi::extract_uhwi (wi::to_wide (cst), i, 3);
stabstr_C ('0' + digit);
}
}
the precision of its type. The precision and signedness
of the type will be necessary to re-interpret it
unambiguously. */
- add_AT_wide (die, attr, value);
+ add_AT_wide (die, attr, wi::to_wide (value));
return;
}
/* Enumeration constants may be wider than HOST_WIDE_INT. Handle
that here. TODO: This should be re-worked to use correct
signed/unsigned double tags for all cases. */
- add_AT_wide (enum_die, DW_AT_const_value, value);
+ add_AT_wide (enum_die, DW_AT_const_value, wi::to_wide (value));
}
add_gnat_descriptive_type_attribute (type_die, type, context_die);
RTVEC_ELT (v, i) = CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (elt),
inner);
else
- RTVEC_ELT (v, i) = immed_wide_int_const (elt, inner);
+ RTVEC_ELT (v, i) = immed_wide_int_const (wi::to_wide (elt), inner);
}
return gen_rtx_CONST_VECTOR (mode, v);
{
if (types_compatible_p (size_type_node, TREE_TYPE (t))
&& integer_cst_p (t)
- && wi::min_precision (t, UNSIGNED) <= sizeof (size_t) * CHAR_BIT)
+ && (wi::min_precision (wi::to_wide (t), UNSIGNED)
+ <= sizeof (size_t) * CHAR_BIT))
{
*size_out = tree_to_uhwi (t);
return true;
if (SCALAR_INT_MODE_P (mode))
{
wide_int result;
- if (fold_const_call_ss (&result, fn, arg, TYPE_PRECISION (type),
- TREE_TYPE (arg)))
+ if (fold_const_call_ss (&result, fn, wi::to_wide (arg),
+ TYPE_PRECISION (type), TREE_TYPE (arg)))
return wide_int_to_tree (type, result);
}
return NULL_TREE;
/* real, int -> real. */
REAL_VALUE_TYPE result;
if (fold_const_call_sss (&result, fn, TREE_REAL_CST_PTR (arg0),
- arg1, REAL_MODE_FORMAT (mode)))
+ wi::to_wide (arg1),
+ REAL_MODE_FORMAT (mode)))
return build_real (type, result);
}
return NULL_TREE;
{
/* int, real -> real. */
REAL_VALUE_TYPE result;
- if (fold_const_call_sss (&result, fn, arg0,
+ if (fold_const_call_sss (&result, fn, wi::to_wide (arg0),
TREE_REAL_CST_PTR (arg1),
REAL_MODE_FORMAT (mode)))
return build_real (type, result);
if (TYPE_UNSIGNED (type))
return false;
- return !wi::only_sign_bit_p (t);
+ return !wi::only_sign_bit_p (wi::to_wide (t));
}
/* Determine whether an expression T can be cheaply negated using
if (INTEGRAL_TYPE_P (TREE_TYPE (t))
&& ! TYPE_OVERFLOW_WRAPS (TREE_TYPE (t))
&& ! ((TREE_CODE (TREE_OPERAND (t, 0)) == INTEGER_CST
- && wi::popcount (wi::abs (TREE_OPERAND (t, 0))) != 1)
+ && (wi::popcount
+ (wi::abs (wi::to_wide (TREE_OPERAND (t, 0))))) != 1)
|| (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST
- && wi::popcount (wi::abs (TREE_OPERAND (t, 1))) != 1)))
+ && (wi::popcount
+ (wi::abs (wi::to_wide (TREE_OPERAND (t, 1))))) != 1)))
break;
/* Fall through. */
if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
{
tree op1 = TREE_OPERAND (t, 1);
- if (wi::eq_p (op1, TYPE_PRECISION (type) - 1))
+ if (wi::to_wide (op1) == TYPE_PRECISION (type) - 1)
return true;
}
break;
if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
{
tree op1 = TREE_OPERAND (t, 1);
- if (wi::eq_p (op1, TYPE_PRECISION (type) - 1))
+ if (wi::to_wide (op1) == TYPE_PRECISION (type) - 1)
{
tree ntype = TYPE_UNSIGNED (type)
? signed_type_for (type)
}
-/* Combine two integer constants ARG1 and ARG2 under operation CODE
+/* Combine two integer constants PARG1 and PARG2 under operation CODE
to produce a new constant. Return NULL_TREE if we don't know how
to evaluate CODE at compile-time. */
static tree
-int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree parg2,
+int_const_binop_1 (enum tree_code code, const_tree parg1, const_tree parg2,
int overflowable)
{
wide_int res;
tree t;
- tree type = TREE_TYPE (arg1);
+ tree type = TREE_TYPE (parg1);
signop sign = TYPE_SIGN (type);
bool overflow = false;
+ wi::tree_to_wide_ref arg1 = wi::to_wide (parg1);
wide_int arg2 = wi::to_wide (parg2, TYPE_PRECISION (type));
switch (code)
t = force_fit_type (type, res, overflowable,
(((sign == SIGNED || overflowable == -1)
&& overflow)
- | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (parg2)));
+ | TREE_OVERFLOW (parg1) | TREE_OVERFLOW (parg2)));
return t;
}
{
if (TREE_CODE (arg2) != INTEGER_CST)
return NULL_TREE;
- wide_int w2 = arg2;
+ wi::tree_to_wide_ref w2 = wi::to_wide (arg2);
f2.data.high = w2.elt (1);
f2.data.low = w2.ulow ();
f2.mode = SImode;
if (real_less (&r, &l))
{
overflow = true;
- val = lt;
+ val = wi::to_wide (lt);
}
}
if (real_less (&u, &r))
{
overflow = true;
- val = ut;
+ val = wi::to_wide (ut);
}
}
}
if (lunsignedp)
{
- if (wi::lrshift (rhs, lbitsize) != 0)
+ if (wi::lrshift (wi::to_wide (rhs), lbitsize) != 0)
{
warning (0, "comparison is always %d due to width of bit-field",
code == NE_EXPR);
}
else
{
- wide_int tem = wi::arshift (rhs, lbitsize - 1);
+ wide_int tem = wi::arshift (wi::to_wide (rhs), lbitsize - 1);
if (tem != 0 && tem != -1)
{
warning (0, "comparison is always %d due to width of bit-field",
if (size > precision || TYPE_SIGN (type) == UNSIGNED)
return false;
- return wi::mask (size, false, precision) == mask;
+ return wi::mask (size, false, precision) == wi::to_wide (mask);
}
/* Subroutine for fold: determine if VAL is the INTEGER_CONST that
return NULL_TREE;
width = TYPE_PRECISION (t);
- if (wi::only_sign_bit_p (val, width))
+ if (wi::only_sign_bit_p (wi::to_wide (val), width))
return exp;
/* Handle extension from a narrower type. */
/* We work by getting just the sign bit into the low-order bit, then
into the high-order bit, then sign-extend. We then XOR that value
with C. */
- temp = build_int_cst (TREE_TYPE (c), wi::extract_uhwi (c, p - 1, 1));
+ temp = build_int_cst (TREE_TYPE (c),
+ wi::extract_uhwi (wi::to_wide (c), p - 1, 1));
/* We must use a signed type in order to get an arithmetic right shift.
However, we must also avoid introducing accidental overflows, so that
/* For a constant, we can always simplify if we are a multiply
or (for divide and modulus) if it is a multiple of our constant. */
if (code == MULT_EXPR
- || wi::multiple_of_p (t, c, TYPE_SIGN (type)))
+ || wi::multiple_of_p (wi::to_wide (t), wi::to_wide (c),
+ TYPE_SIGN (type)))
{
tree tem = const_binop (code, fold_convert (ctype, t),
fold_convert (ctype, c));
&& (tcode == RSHIFT_EXPR || TYPE_UNSIGNED (TREE_TYPE (op0)))
/* const_binop may not detect overflow correctly,
so check for it explicitly here. */
- && wi::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)), op1)
+ && wi::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)),
+ wi::to_wide (op1))
&& 0 != (t1 = fold_convert (ctype,
const_binop (LSHIFT_EXPR,
size_one_node,
/* If it's a multiply or a division/modulus operation of a multiple
of our constant, do the operation and verify it doesn't overflow. */
if (code == MULT_EXPR
- || wi::multiple_of_p (op1, c, TYPE_SIGN (type)))
+ || wi::multiple_of_p (wi::to_wide (op1), wi::to_wide (c),
+ TYPE_SIGN (type)))
{
op1 = const_binop (code, fold_convert (ctype, op1),
fold_convert (ctype, c));
/* If the multiplication can overflow we cannot optimize this. */
&& TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (t))
&& TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST
- && wi::multiple_of_p (op1, c, TYPE_SIGN (type)))
+ && wi::multiple_of_p (wi::to_wide (op1), wi::to_wide (c),
+ TYPE_SIGN (type)))
{
*strict_overflow_p = true;
return omit_one_operand (type, integer_zero_node, op0);
&& code != FLOOR_MOD_EXPR && code != ROUND_MOD_EXPR
&& code != MULT_EXPR)))
{
- if (wi::multiple_of_p (op1, c, TYPE_SIGN (type)))
+ if (wi::multiple_of_p (wi::to_wide (op1), wi::to_wide (c),
+ TYPE_SIGN (type)))
{
if (TYPE_OVERFLOW_UNDEFINED (ctype))
*strict_overflow_p = true;
const_binop (TRUNC_DIV_EXPR,
op1, c)));
}
- else if (wi::multiple_of_p (c, op1, TYPE_SIGN (type)))
+ else if (wi::multiple_of_p (wi::to_wide (c), wi::to_wide (op1),
+ TYPE_SIGN (type)))
{
if (TYPE_OVERFLOW_UNDEFINED (ctype))
*strict_overflow_p = true;
/* We have to do this the hard way to detect unsigned overflow.
prod = int_const_binop (MULT_EXPR, c1, c2); */
- wide_int val = wi::mul (c1, c2, sign, &overflow);
+ wide_int val = wi::mul (wi::to_wide (c1), wi::to_wide (c2), sign, &overflow);
prod = force_fit_type (type, val, -1, overflow);
*neg_overflow = false;
*lo = prod;
/* Likewise *hi = int_const_binop (PLUS_EXPR, prod, tmp). */
- val = wi::add (prod, tmp, sign, &overflow);
+ val = wi::add (wi::to_wide (prod), wi::to_wide (tmp), sign, &overflow);
*hi = force_fit_type (type, val, -1, overflow | TREE_OVERFLOW (prod));
}
else if (tree_int_cst_sgn (c1) >= 0)
if (TREE_CODE (inner) == RSHIFT_EXPR
&& TREE_CODE (TREE_OPERAND (inner, 1)) == INTEGER_CST
&& bitnum < TYPE_PRECISION (type)
- && wi::ltu_p (TREE_OPERAND (inner, 1),
+ && wi::ltu_p (wi::to_wide (TREE_OPERAND (inner, 1)),
TYPE_PRECISION (type) - bitnum))
{
bitnum += tree_to_uhwi (TREE_OPERAND (inner, 1));
arg10 = build_one_cst (type);
/* As we canonicalize A - 2 to A + -2 get rid of that sign for
the purpose of this canonicalization. */
- if (wi::neg_p (arg1, TYPE_SIGN (TREE_TYPE (arg1)))
+ if (wi::neg_p (wi::to_wide (arg1), TYPE_SIGN (TREE_TYPE (arg1)))
&& negate_expr_p (arg1)
&& code == PLUS_EXPR)
{
/* If the sum evaluated to a constant that is not -INF the multiplication
cannot overflow. */
if (TREE_CODE (tem) == INTEGER_CST
- && ! wi::eq_p (tem, wi::min_value (TYPE_PRECISION (utype), SIGNED)))
+ && (wi::to_wide (tem)
+ != wi::min_value (TYPE_PRECISION (utype), SIGNED)))
return fold_build2_loc (loc, MULT_EXPR, type,
fold_convert (type, tem), same);
else if (TREE_CODE (offset) != INTEGER_CST || TREE_OVERFLOW (offset))
return true;
else
- wi_offset = offset;
+ wi_offset = wi::to_wide (offset);
bool overflow;
wide_int units = wi::shwi (bitpos / BITS_PER_UNIT, precision);
switch (TREE_CODE (t))
{
case INTEGER_CST:
- return wi::ne_p (t, w);
+ return wi::to_wide (t) != w;
case SSA_NAME:
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
{
int width = TYPE_PRECISION (type), w;
- wide_int c1 = TREE_OPERAND (arg0, 1);
- wide_int c2 = arg1;
+ wide_int c1 = wi::to_wide (TREE_OPERAND (arg0, 1));
+ wide_int c2 = wi::to_wide (arg1);
/* If (C1&C2) == C1, then (X&C1)|C2 becomes (X,C2). */
if ((c1 & c2) == c1)
multiple of 1 << CST. */
if (TREE_CODE (arg1) == INTEGER_CST)
{
- wide_int cst1 = arg1;
+ wi::tree_to_wide_ref cst1 = wi::to_wide (arg1);
wide_int ncst1 = -cst1;
if ((cst1 & ncst1) == ncst1
&& multiple_of_p (type, arg0,
&& TREE_CODE (arg0) == MULT_EXPR
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
{
- wide_int warg1 = arg1;
- wide_int masked = mask_with_tz (type, warg1, TREE_OPERAND (arg0, 1));
+ wi::tree_to_wide_ref warg1 = wi::to_wide (arg1);
+ wide_int masked
+ = mask_with_tz (type, warg1, wi::to_wide (TREE_OPERAND (arg0, 1)));
if (masked == 0)
return omit_two_operands_loc (loc, type, build_zero_cst (type),
If B is constant and (B & M) == 0, fold into A & M. */
if (TREE_CODE (arg1) == INTEGER_CST)
{
- wide_int cst1 = arg1;
+ wi::tree_to_wide_ref cst1 = wi::to_wide (arg1);
if ((~cst1 != 0) && (cst1 & (cst1 + 1)) == 0
&& INTEGRAL_TYPE_P (TREE_TYPE (arg0))
&& (TREE_CODE (arg0) == PLUS_EXPR
if (TREE_CODE (TREE_OPERAND (pmop[which], 1))
!= INTEGER_CST)
break;
- cst0 = TREE_OPERAND (pmop[which], 1);
- cst0 &= cst1;
+ cst0 = wi::to_wide (TREE_OPERAND (pmop[which], 1)) & cst1;
if (TREE_CODE (pmop[which]) == BIT_AND_EXPR)
{
if (cst0 != cst1)
omitted (assumed 0). */
if ((TREE_CODE (arg0) == PLUS_EXPR
|| (TREE_CODE (arg0) == MINUS_EXPR && which == 0))
- && (cst1 & pmop[which]) == 0)
+ && (cst1 & wi::to_wide (pmop[which])) == 0)
pmop[which] = NULL;
break;
default:
{
prec = element_precision (TREE_TYPE (TREE_OPERAND (arg0, 0)));
- wide_int mask = wide_int::from (arg1, prec, UNSIGNED);
+ wide_int mask = wide_int::from (wi::to_wide (arg1), prec, UNSIGNED);
if (mask == -1)
return
fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0));
{
tree sh_cnt = TREE_OPERAND (arg1, 1);
tree pow2 = build_int_cst (TREE_TYPE (sh_cnt),
- wi::exact_log2 (sval));
+ wi::exact_log2 (wi::to_wide (sval)));
if (strict_overflow_p)
fold_overflow_warning (("assuming signed overflow does not "
if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
&& TREE_CODE (arg0) == RROTATE_EXPR
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
- && wi::umod_trunc (wi::add (arg1, TREE_OPERAND (arg0, 1)),
+ && wi::umod_trunc (wi::to_wide (arg1)
+ + wi::to_wide (TREE_OPERAND (arg0, 1)),
prec) == 0)
return fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0));
prec = TYPE_PRECISION (itype);
/* Check for a valid shift count. */
- if (wi::ltu_p (arg001, prec))
+ if (wi::ltu_p (wi::to_wide (arg001), prec))
{
tree arg01 = TREE_OPERAND (arg0, 1);
tree arg000 = TREE_OPERAND (TREE_OPERAND (arg0, 0), 0);
tree arg00 = TREE_OPERAND (arg0, 0);
tree arg01 = TREE_OPERAND (arg0, 1);
tree itype = TREE_TYPE (arg00);
- if (wi::eq_p (arg01, element_precision (itype) - 1))
+ if (wi::to_wide (arg01) == element_precision (itype) - 1)
{
if (TYPE_UNSIGNED (itype))
{
(inner_width, outer_width - inner_width, false,
TYPE_PRECISION (TREE_TYPE (arg1)));
- wide_int common = mask & arg1;
+ wide_int common = mask & wi::to_wide (arg1);
if (common == mask)
{
tem_type = signed_type_for (TREE_TYPE (tem));
/* Make sure that the perm value is in an acceptable
range. */
- wide_int t = val;
+ wi::tree_to_wide_ref t = wi::to_wide (val);
need_mask_canon |= wi::gtu_p (t, mask);
need_mask_canon2 |= wi::gtu_p (t, mask2);
unsigned int elt = t.to_uhwi () & mask;
{
unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (op2);
unsigned bitsize = TYPE_PRECISION (TREE_TYPE (arg1));
- wide_int tem = wi::bit_and (arg0,
- wi::shifted_mask (bitpos, bitsize, true,
- TYPE_PRECISION (type)));
+ wide_int tem = (wi::to_wide (arg0)
+ & wi::shifted_mask (bitpos, bitsize, true,
+ TYPE_PRECISION (type)));
wide_int tem2
= wi::lshift (wi::zext (wi::to_wide (arg1, TYPE_PRECISION (type)),
bitsize), bitpos);
op1 = TREE_OPERAND (top, 1);
/* const_binop may not detect overflow correctly,
so check for it explicitly here. */
- if (wi::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)), op1)
+ if (wi::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)),
+ wi::to_wide (op1))
&& 0 != (t1 = fold_convert (type,
const_binop (LSHIFT_EXPR,
size_one_node,
case INTEGER_CST:
{
bool overflow;
- wide_int val = wi::neg (arg0, &overflow);
+ wide_int val = wi::neg (wi::to_wide (arg0), &overflow);
t = force_fit_type (type, val, 1,
(overflow && ! TYPE_UNSIGNED (type))
|| TREE_OVERFLOW (arg0));
{
/* If the value is unsigned or non-negative, then the absolute value
is the same as the ordinary value. */
- if (!wi::neg_p (arg0, TYPE_SIGN (type)))
+ if (!wi::neg_p (wi::to_wide (arg0), TYPE_SIGN (type)))
t = arg0;
/* If the value is negative, then the absolute value is
else
{
bool overflow;
- wide_int val = wi::neg (arg0, &overflow);
+ wide_int val = wi::neg (wi::to_wide (arg0), &overflow);
t = force_fit_type (type, val, -1,
overflow | TREE_OVERFLOW (arg0));
}
{
gcc_assert (TREE_CODE (arg0) == INTEGER_CST);
- return force_fit_type (type, wi::bit_not (arg0), 0, TREE_OVERFLOW (arg0));
+ return force_fit_type (type, ~wi::to_wide (arg0), 0, TREE_OVERFLOW (arg0));
}
/* Given CODE, a relational operator, the target type, TYPE and two
{
if (TREE_CODE (value) == INTEGER_CST)
{
- wide_int val = value;
+ wide_int val = wi::to_wide (value);
bool overflow_p;
if ((val & (divisor - 1)) == 0)
+2017-10-10 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * target-memory.c (gfc_interpret_logical): Use wi::to_wide when
+ operating on trees as wide_ints.
+ * trans-const.c (gfc_conv_tree_to_mpz): Likewise.
+ * trans-expr.c (gfc_conv_cst_int_power): Likewise.
+ * trans-intrinsic.c (trans_this_image): Likewise.
+ (gfc_conv_intrinsic_bound): Likewise.
+ (conv_intrinsic_cobound): Likewise.
+
2017-10-08 Steven G. Kargl <kargl@gcc.gnu.org>
* check.c (gfc_check_x): Remove function.
{
tree t = native_interpret_expr (gfc_get_logical_type (kind), buffer,
buffer_size);
- *logical = wi::eq_p (t, 0) ? 0 : 1;
+ *logical = wi::to_wide (t) == 0 ? 0 : 1;
return size_logical (kind);
}
void
gfc_conv_tree_to_mpz (mpz_t i, tree source)
{
- wi::to_mpz (source, i, TYPE_SIGN (TREE_TYPE (source)));
+ wi::to_mpz (wi::to_wide (source), i, TYPE_SIGN (TREE_TYPE (source)));
}
/* Converts a real constant into backend form. */
HOST_WIDE_INT m;
unsigned HOST_WIDE_INT n;
int sgn;
- wide_int wrhs = rhs;
+ wi::tree_to_wide_ref wrhs = wi::to_wide (rhs);
/* If exponent is too large, we won't expand it anyway, so don't bother
with large integer values. */
if (INTEGER_CST_P (dim_arg))
{
- if (wi::ltu_p (dim_arg, 1)
- || wi::gtu_p (dim_arg, GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))))
+ if (wi::ltu_p (wi::to_wide (dim_arg), 1)
+ || wi::gtu_p (wi::to_wide (dim_arg),
+ GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))))
gfc_error ("%<dim%> argument of %s intrinsic at %L is not a valid "
"dimension index", expr->value.function.isym->name,
&expr->where);
if (INTEGER_CST_P (bound))
{
if (((!as || as->type != AS_ASSUMED_RANK)
- && wi::geu_p (bound, GFC_TYPE_ARRAY_RANK (TREE_TYPE (desc))))
- || wi::gtu_p (bound, GFC_MAX_DIMENSIONS))
+ && wi::geu_p (wi::to_wide (bound),
+ GFC_TYPE_ARRAY_RANK (TREE_TYPE (desc))))
+ || wi::gtu_p (wi::to_wide (bound), GFC_MAX_DIMENSIONS))
gfc_error ("%<dim%> argument of %s intrinsic at %L is not a valid "
"dimension index", upper ? "UBOUND" : "LBOUND",
&expr->where);
if (INTEGER_CST_P (bound))
{
- if (wi::ltu_p (bound, 1)
- || wi::gtu_p (bound, GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))))
+ if (wi::ltu_p (wi::to_wide (bound), 1)
+ || wi::gtu_p (wi::to_wide (bound),
+ GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))))
gfc_error ("%<dim%> argument of %s intrinsic at %L is not a valid "
"dimension index", expr->value.function.isym->name,
&expr->where);
|| DECL_P (TREE_OPERAND (addr, 0)))
return fold_build2 (MEM_REF, type,
addr,
- wide_int_to_tree (ptype, off));
+ wide_int_to_tree (ptype, wi::to_wide (off)));
}
/* *(foo *)fooarrptr => (*fooarrptr)[0] */
// degrade into "if (N > Y) alloca(N)".
if (cond_code == GT_EXPR || cond_code == GE_EXPR)
rhs = integer_zero_node;
- return alloca_type_and_limit (ALLOCA_BOUND_MAYBE_LARGE, rhs);
+ return alloca_type_and_limit (ALLOCA_BOUND_MAYBE_LARGE,
+ wi::to_wide (rhs));
}
}
else
if (TREE_CODE (len) == INTEGER_CST)
{
if (tree_to_uhwi (len) > max_size)
- return alloca_type_and_limit (ALLOCA_BOUND_DEFINITELY_LARGE, len);
+ return alloca_type_and_limit (ALLOCA_BOUND_DEFINITELY_LARGE,
+ wi::to_wide (len));
if (integer_zerop (len))
return alloca_type_and_limit (ALLOCA_ARG_IS_ZERO);
ret = alloca_type_and_limit (ALLOCA_OK);
if (CASE_HIGH (labels[i]) != NULL_TREE
&& (CASE_HIGH (widest_label) == NULL_TREE
- || wi::gtu_p (wi::sub (CASE_HIGH (labels[i]),
- CASE_LOW (labels[i])),
- wi::sub (CASE_HIGH (widest_label),
- CASE_LOW (widest_label)))))
+ || (wi::gtu_p
+ (wi::to_wide (CASE_HIGH (labels[i]))
+ - wi::to_wide (CASE_LOW (labels[i])),
+ wi::to_wide (CASE_HIGH (widest_label))
+ - wi::to_wide (CASE_LOW (widest_label))))))
widest_label = labels[i];
- if (wi::add (low, 1) != high)
+ if (wi::to_wide (low) + 1 != wi::to_wide (high))
break;
}
if (i == len)
snprintf (buf, sizeof buf, HOST_WIDE_INT_PRINT_UNSIGNED,
tree_to_uhwi (TREE_VALUE (element)));
else
- print_hex (element, buf);
+ print_hex (wi::to_wide (element), buf);
mhval->value = xstrdup (buf);
*slot = mhval;
static inline void
tree_int_to_gmp (tree t, mpz_t res)
{
- wi::to_mpz (t, res, TYPE_SIGN (TREE_TYPE (t)));
+ wi::to_mpz (wi::to_wide (t), res, TYPE_SIGN (TREE_TYPE (t)));
}
/* Return an isl identifier for the polyhedral basic block PBB. */
p = wi::min_precision (w, sign);
}
else
- p = wi::min_precision (arg, sign);
+ p = wi::min_precision (wi::to_wide (arg), sign);
return MIN (p, prec);
}
while (CONVERT_EXPR_P (arg)
{
vr.known = true;
vr.type = plats->m_value_range.m_vr.type;
- vr.min = plats->m_value_range.m_vr.min;
- vr.max = plats->m_value_range.m_vr.max;
+ vr.min = wi::to_wide (plats->m_value_range.m_vr.min);
+ vr.max = wi::to_wide (plats->m_value_range.m_vr.max);
}
else
{
else if (TREE_CODE (base_pointer) == POINTER_PLUS_EXPR
&& TREE_CODE (TREE_OPERAND (base_pointer, 1)) == INTEGER_CST)
{
- offset_int o = offset_int::from (TREE_OPERAND (base_pointer, 1),
- SIGNED);
+ offset_int o
+ = offset_int::from (wi::to_wide (TREE_OPERAND (base_pointer, 1)),
+ SIGNED);
o *= BITS_PER_UNIT;
o += offset;
if (!wi::fits_shwi_p (o))
fprintf (f, " VR ");
fprintf (f, "%s[",
(jump_func->m_vr->type == VR_ANTI_RANGE) ? "~" : "");
- print_decs (jump_func->m_vr->min, f);
+ print_decs (wi::to_wide (jump_func->m_vr->min), f);
fprintf (f, ", ");
- print_decs (jump_func->m_vr->max, f);
+ print_decs (wi::to_wide (jump_func->m_vr->max), f);
fprintf (f, "]\n");
}
else
if (TYPE_ALIGN (type) > align)
align = TYPE_ALIGN (type);
}
- misalign += (offset_int::from (off, SIGNED).to_short_addr ()
+ misalign += (offset_int::from (wi::to_wide (off),
+ SIGNED).to_short_addr ()
* BITS_PER_UNIT);
misalign = misalign & (align - 1);
if (misalign != 0)
+2017-10-10 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * lto.c (compare_tree_sccs_1): Use wi::to_wide when
+ operating on trees as wide_ints.
+
2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
if (CODE_CONTAINS_STRUCT (code, TS_INT_CST))
{
- if (!wi::eq_p (t1, t2))
+ if (wi::to_wide (t1) != wi::to_wide (t2))
return false;
}
(div (div @0 INTEGER_CST@1) INTEGER_CST@2)
(with {
bool overflow_p;
- wide_int mul = wi::mul (@1, @2, TYPE_SIGN (type), &overflow_p);
+ wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
+ TYPE_SIGN (type), &overflow_p);
}
(if (!overflow_p)
(div @0 { wide_int_to_tree (type, mul); })
(mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
(with {
bool overflow_p;
- wide_int mul = wi::mul (@1, @2, TYPE_SIGN (type), &overflow_p);
+ wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
+ TYPE_SIGN (type), &overflow_p);
}
/* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
otherwise undefined overflow implies that @0 must be zero. */
(if (integer_pow2p (@2)
&& tree_int_cst_sgn (@2) > 0
&& tree_nop_conversion_p (type, TREE_TYPE (@0))
- && wi::add (@2, @1) == 0)
- (rshift (convert @0) { build_int_cst (integer_type_node,
- wi::exact_log2 (@2)); }))))
+ && wi::to_wide (@2) + wi::to_wide (@1) == 0)
+ (rshift (convert @0)
+ { build_int_cst (integer_type_node,
+ wi::exact_log2 (wi::to_wide (@2))); }))))
/* If ARG1 is a constant, we can convert this to a multiply by the
reciprocal. This does not have the same rounding properties,
(mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
(if (ANY_INTEGRAL_TYPE_P (type)
&& TYPE_OVERFLOW_UNDEFINED (type)
- && wi::multiple_of_p (@1, @2, TYPE_SIGN (type)))
+ && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
+ TYPE_SIGN (type)))
{ build_zero_cst (type); })))
/* X % -C is the same as X % C. */
(trunc_mod @0 INTEGER_CST@1)
(if (TYPE_SIGN (type) == SIGNED
&& !TREE_OVERFLOW (@1)
- && wi::neg_p (@1)
+ && wi::neg_p (wi::to_wide (@1))
&& !TYPE_OVERFLOW_TRAPS (type)
/* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
&& !sign_bit_p (@1, @1))
/* Avoid this transformation if X might be INT_MIN or
Y might be -1, because we would then change valid
INT_MIN % -(-1) into invalid INT_MIN % -1. */
- && (expr_not_equal_to (@0, TYPE_MIN_VALUE (type))
+ && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
|| expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
(TREE_TYPE (@1))))))
(trunc_mod @0 (convert @1))))
(trunc_div (mult @0 integer_pow2p@1) @1)
(if (TYPE_UNSIGNED (TREE_TYPE (@0)))
(bit_and @0 { wide_int_to_tree
- (type, wi::mask (TYPE_PRECISION (type) - wi::exact_log2 (@1),
+ (type, wi::mask (TYPE_PRECISION (type)
+ - wi::exact_log2 (wi::to_wide (@1)),
false, TYPE_PRECISION (type))); })))
/* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
(for pows (POWI)
(simplify
(pows (op @0) INTEGER_CST@1)
- (if (wi::bit_and (@1, 1) == 0)
+ (if ((wi::to_wide (@1) & 1) == 0)
(pows @0 @1))))
/* Strip negate and abs from both operands of hypot. */
(for hypots (HYPOT)
copysigns (COPYSIGN)
(simplify
(pows (copysigns @0 @2) INTEGER_CST@1)
- (if (wi::bit_and (@1, 1) == 0)
+ (if ((wi::to_wide (@1) & 1) == 0)
(pows @0 @1))))
(for hypots (HYPOT)
(minus (bit_xor @0 @1) @1))
(simplify
(minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
- (if (wi::bit_not (@2) == @1)
+ (if (~wi::to_wide (@2) == wi::to_wide (@1))
(minus (bit_xor @0 @1) @1)))
/* Fold (A & B) - (A & ~B) into B - (A ^ B). */
(bit_xor @0 @1))
(simplify
(op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
- (if (wi::bit_not (@2) == @1)
+ (if (~wi::to_wide (@2) == wi::to_wide (@1))
(bit_xor @0 @1))))
/* PR53979: Transform ((a ^ b) | a) -> (a | b) */
(simplify
(bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
(if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
- && (get_nonzero_bits (@0) & wi::bit_not (@1)) == 0)
+ && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
(bit_xor @0 @1)))
#endif
(simplify
(bit_and SSA_NAME@0 INTEGER_CST@1)
(if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
- && (get_nonzero_bits (@0) & wi::bit_not (@1)) == 0)
+ && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
@0))
#endif
(convert2? (bit_and@5 @2 INTEGER_CST@3)))
(if (tree_nop_conversion_p (type, TREE_TYPE (@0))
&& tree_nop_conversion_p (type, TREE_TYPE (@2))
- && wi::bit_and (@1, @3) == 0)
+ && (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
(bit_ior (convert @4) (convert @5)))))
/* (X | Y) ^ X -> Y & ~ X*/
(if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
(cmp @0 @2)
(if (TREE_CODE (@1) == INTEGER_CST
- && wi::neg_p (@1, TYPE_SIGN (TREE_TYPE (@1))))
+ && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
(cmp @2 @0))))))
/* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
(if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& TYPE_UNSIGNED (TREE_TYPE (@0))
&& TYPE_PRECISION (TREE_TYPE (@0)) > 1
- && wi::eq_p (@2, wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)),
- SIGNED) - 1))
+ && (wi::to_wide (@2)
+ == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
(with { tree stype = signed_type_for (TREE_TYPE (@0)); }
(icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
(for cmp (simple_comparison)
(simplify
(cmp (exact_div @0 INTEGER_CST@2) (exact_div @1 @2))
- (if (wi::gt_p(@2, 0, TYPE_SIGN (TREE_TYPE (@2))))
+ (if (wi::gt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
(cmp @0 @1))))
/* X / C1 op C2 into a simple range test. */
(for cmp (eq ne)
(simplify
(cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
- (if ((~get_nonzero_bits (@0) & @1) != 0)
+ (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
{ constant_boolean_node (cmp == NE_EXPR, type); })))
/* ((X inner_op C0) outer_op C1)
if (inner_op == BIT_XOR_EXPR)
{
- C0 = wi::bit_and_not (@0, @1);
- cst_emit = wi::bit_or (C0, @1);
+ C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
+ cst_emit = C0 | wi::to_wide (@1);
}
else
{
- C0 = @0;
- cst_emit = wi::bit_xor (@0, @1);
+ C0 = wi::to_wide (@0);
+ cst_emit = C0 ^ wi::to_wide (@1);
}
}
- (if (!fail && wi::bit_and (C0, zero_mask_not) == 0)
+ (if (!fail && (C0 & zero_mask_not) == 0)
(outer_op @2 { wide_int_to_tree (type, cst_emit); })
- (if (!fail && wi::bit_and (@1, zero_mask_not) == 0)
+ (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
(inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
/* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
... = ptr & ~algn; */
(simplify
(pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
- (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), wi::bit_not (@1)); }
+ (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
(bit_and @0 { algn; })))
/* Try folding difference of addresses. */
unsigned HOST_WIDE_INT bitpos;
get_pointer_alignment_1 (@0, &align, &bitpos);
}
- (if (wi::ltu_p (@1, align / BITS_PER_UNIT))
- { wide_int_to_tree (type, wi::bit_and (@1, bitpos / BITS_PER_UNIT)); }))))
+ (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
+ { wide_int_to_tree (type, (wi::to_wide (@1)
+ & (bitpos / BITS_PER_UNIT))); }))))
/* We can't reassociate at all for saturating types. */
(inner_op @0 { cst; } )
/* X+INT_MAX+1 is X-INT_MIN. */
(if (INTEGRAL_TYPE_P (type) && cst
- && wi::eq_p (cst, wi::min_value (type)))
- (neg_inner_op @0 { wide_int_to_tree (type, cst); })
+ && wi::to_wide (cst) == wi::min_value (type))
+ (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
/* Last resort, use some unsigned type. */
(with { tree utype = unsigned_type_for (type); }
(view_convert (inner_op
(for cmp (eq ne)
(simplify
(cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
- (if (wi::lt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
+ (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
+ TYPE_SIGN (TREE_TYPE (@0))))
{ constant_boolean_node (cmp == NE_EXPR, type); }
- (if (wi::gt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
+ (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
+ TYPE_SIGN (TREE_TYPE (@0))))
(cmp @0 @2)))))
(for cmp (eq ne)
(simplify
(cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
- (if (wi::gt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
+ (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
+ TYPE_SIGN (TREE_TYPE (@0))))
{ constant_boolean_node (cmp == NE_EXPR, type); }
- (if (wi::lt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
+ (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
+ TYPE_SIGN (TREE_TYPE (@0))))
(cmp @0 @2)))))
/* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
(for minmax (min min max max min min max max )
/* Optimize (x >> c) << c into x & (-1<<c). */
(simplify
(lshift (rshift @0 INTEGER_CST@1) @1)
- (if (wi::ltu_p (@1, element_precision (type)))
+ (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
(bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
/* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
(simplify
(rshift (lshift @0 INTEGER_CST@1) @1)
(if (TYPE_UNSIGNED (type)
- && (wi::ltu_p (@1, element_precision (type))))
+ && (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
(bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
(for shiftrotate (lrotate rrotate lshift rshift)
(simplify
(op (op @0 INTEGER_CST@1) INTEGER_CST@2)
(with { unsigned int prec = element_precision (type); }
- (if (wi::ge_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1)))
- && wi::lt_p (@1, prec, TYPE_SIGN (TREE_TYPE (@1)))
- && wi::ge_p (@2, 0, TYPE_SIGN (TREE_TYPE (@2)))
- && wi::lt_p (@2, prec, TYPE_SIGN (TREE_TYPE (@2))))
+ (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
+ && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
+ && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
+ && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
(with { unsigned int low = (tree_to_uhwi (@1)
+ tree_to_uhwi (@2)); }
/* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
(for cmp (ne eq)
(simplify
(cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
- (with { int cand = wi::ctz (@2) - wi::ctz (@0); }
+ (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
(if (cand < 0
|| (!integer_zerop (@2)
- && wi::ne_p (wi::lshift (@0, cand), @2)))
+ && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
{ constant_boolean_node (cmp == NE_EXPR, type); }
(if (!integer_zerop (@2)
- && wi::eq_p (wi::lshift (@0, cand), @2))
+ && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
(cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
/* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
{
bool overflow = false;
enum tree_code code, cmp_code = cmp;
- wide_int real_c1, c1 = @1, c2 = @2, c3 = @3;
+ wide_int real_c1;
+ wide_int c1 = wi::to_wide (@1);
+ wide_int c2 = wi::to_wide (@2);
+ wide_int c3 = wi::to_wide (@3);
signop sgn = TYPE_SIGN (from_type);
/* Handle special case A), given x of unsigned type:
(simplify
(cmp @0 INTEGER_CST@1)
(if (tree_int_cst_sgn (@1) == -1)
- (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
+ (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))))
(for cmp (ge lt)
acmp (gt le)
(simplify
(cmp @0 INTEGER_CST@1)
(if (tree_int_cst_sgn (@1) == 1)
- (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
+ (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))))
/* We can simplify a logical negation of a comparison to the
(simplify
(cmp (exact_div @0 @1) INTEGER_CST@2)
(if (!integer_zerop (@1))
- (if (wi::eq_p (@2, 0))
+ (if (wi::to_wide (@2) == 0)
(cmp @0 @2)
(if (TREE_CODE (@1) == INTEGER_CST)
(with
{
bool ovf;
- wide_int prod = wi::mul (@2, @1, TYPE_SIGN (TREE_TYPE (@1)), &ovf);
+ wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
+ TYPE_SIGN (TREE_TYPE (@1)), &ovf);
}
(if (ovf)
{ constant_boolean_node (cmp == NE_EXPR, type); }
(for cmp (lt le gt ge)
(simplify
(cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
- (if (wi::gt_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1))))
+ (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
(with
{
bool ovf;
- wide_int prod = wi::mul (@2, @1, TYPE_SIGN (TREE_TYPE (@1)), &ovf);
+ wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
+ TYPE_SIGN (TREE_TYPE (@1)), &ovf);
}
(if (ovf)
- { constant_boolean_node (wi::lt_p (@2, 0, TYPE_SIGN (TREE_TYPE (@2)))
+ { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0,
+ TYPE_SIGN (TREE_TYPE (@2)))
!= (cmp == LT_EXPR || cmp == LE_EXPR), type); }
(cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
(simplify
(cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
(if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
- && wi::bit_and_not (@1, @2) != 0)
+ && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0)
{ constant_boolean_node (cmp == NE_EXPR, type); }))
/* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
(ne (bit_and @0 integer_pow2p@1) integer_zerop)
integer_pow2p@2 integer_zerop)
(with {
- int shift = wi::exact_log2 (@2) - wi::exact_log2 (@1);
+ int shift = (wi::exact_log2 (wi::to_wide (@2))
+ - wi::exact_log2 (wi::to_wide (@1)));
}
(if (shift > 0)
(bit_and
(if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& type_has_mode_precision_p (TREE_TYPE (@0))
&& element_precision (@2) >= element_precision (@0)
- && wi::only_sign_bit_p (@1, element_precision (@0)))
+ && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0)))
(with { tree stype = signed_type_for (TREE_TYPE (@0)); }
(ncmp (convert:stype @0) { build_zero_cst (stype); })))))
integer_pow2p@1 integer_zerop)
(if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
(with {
- int shift = element_precision (@0) - wi::exact_log2 (@1) - 1;
+ int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1;
}
(if (shift >= 0)
(bit_and
wide_int min = wi::min_value (arg1_type);
}
(switch
- (if (wi::eq_p (@1, max))
+ (if (wi::to_wide (@1) == max)
(switch
(if (cmp == GT_EXPR)
{ constant_boolean_node (false, type); })
{ constant_boolean_node (true, type); })
(if (cmp == LT_EXPR)
(ne @2 @1))))
- (if (wi::eq_p (@1, min))
+ (if (wi::to_wide (@1) == min)
(switch
(if (cmp == LT_EXPR)
{ constant_boolean_node (false, type); })
{ constant_boolean_node (true, type); })
(if (cmp == GT_EXPR)
(ne @2 @1))))
- (if (wi::eq_p (@1, max - 1))
+ (if (wi::to_wide (@1) == max - 1)
(switch
(if (cmp == GT_EXPR)
- (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))
+ (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))
(if (cmp == LE_EXPR)
- (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
- (if (wi::eq_p (@1, min + 1))
+ (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))))
+ (if (wi::to_wide (@1) == min + 1)
(switch
(if (cmp == GE_EXPR)
- (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))
+ (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))
(if (cmp == LT_EXPR)
- (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
- (if (wi::eq_p (@1, signed_max)
+ (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))))
+ (if (wi::to_wide (@1) == signed_max
&& TYPE_UNSIGNED (arg1_type)
/* We will flip the signedness of the comparison operator
associated with the mode of @1, so the sign bit is
(cmp:c (plus@2 @0 INTEGER_CST@1) @0)
(if (TYPE_UNSIGNED (TREE_TYPE (@0))
&& TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
- && wi::ne_p (@1, 0)
+ && wi::to_wide (@1) != 0
&& single_use (@2))
- (out @0 { wide_int_to_tree (TREE_TYPE (@0), wi::max_value
- (TYPE_PRECISION (TREE_TYPE (@0)), UNSIGNED) - @1); }))))
+ (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
+ (out @0 { wide_int_to_tree (TREE_TYPE (@0),
+ wi::max_value (prec, UNSIGNED)
+ - wi::to_wide (@1)); })))))
/* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
(POWI @0 INTEGER_CST@1)
(switch
/* powi(x,0) -> 1. */
- (if (wi::eq_p (@1, 0))
+ (if (wi::to_wide (@1) == 0)
{ build_real (type, dconst1); })
/* powi(x,1) -> x. */
- (if (wi::eq_p (@1, 1))
+ (if (wi::to_wide (@1) == 1)
@0)
/* powi(x,-1) -> 1/x. */
- (if (wi::eq_p (@1, -1))
+ (if (wi::to_wide (@1) == -1)
(rdiv { build_real (type, dconst1); } @0))))
/* Narrowing of arithmetic and logical operations.
&& types_match (@0, @1)
&& (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
<= TYPE_PRECISION (TREE_TYPE (@0)))
- && (wi::bit_and (@4, wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
- true, TYPE_PRECISION (type))) == 0))
+ && (wi::to_wide (@4)
+ & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
+ true, TYPE_PRECISION (type))) == 0)
(if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
(with { tree ntype = TREE_TYPE (@0); }
(convert (bit_and (op @0 @1) (convert:ntype @4))))
WARN_STRICT_OVERFLOW_CONDITIONAL);
bool less = cmp == LE_EXPR || cmp == LT_EXPR;
/* wi::ges_p (@2, 0) should be sufficient for a signed type. */
- bool ovf_high = wi::lt_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1)))
+ bool ovf_high = wi::lt_p (wi::to_wide (@1), 0,
+ TYPE_SIGN (TREE_TYPE (@1)))
!= (op == MINUS_EXPR);
constant_boolean_node (less == ovf_high, type);
}
isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
}
(switch
- (if (wi::leu_p (@ipos, @rpos)
- && wi::leu_p (wi::add (@rpos, @rsize), wi::add (@ipos, isize)))
+ (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
+ && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize),
+ wi::to_wide (@ipos) + isize))
(BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
- wi::sub (@rpos, @ipos)); }))
- (if (wi::geu_p (@ipos, wi::add (@rpos, @rsize))
- || wi::geu_p (@rpos, wi::add (@ipos, isize)))
+ wi::to_wide (@rpos)
+ - wi::to_wide (@ipos)); }))
+ (if (wi::geu_p (wi::to_wide (@ipos),
+ wi::to_wide (@rpos) + wi::to_wide (@rsize))
+ || wi::geu_p (wi::to_wide (@rpos),
+ wi::to_wide (@ipos) + isize))
(BIT_FIELD_REF @0 @rsize @rpos)))))
+2017-10-10 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * objc-act.c (objc_decl_method_attributes): Use wi::to_wide when
+ operating on trees as wide_ints.
+
2017-09-29 Jakub Jelinek <jakub@redhat.com>
* objc-act.c (check_ivars, gen_declaration): For OBJCPLUS look at
number = TREE_VALUE (second_argument);
if (number
&& TREE_CODE (number) == INTEGER_CST
- && !wi::eq_p (number, 0))
+ && wi::to_wide (number) != 0)
TREE_VALUE (second_argument)
= wide_int_to_tree (TREE_TYPE (number),
- wi::add (number, 2));
+ wi::to_wide (number) + 2);
/* This is the third argument, the "first-to-check",
which specifies the index of the first argument to
number = TREE_VALUE (third_argument);
if (number
&& TREE_CODE (number) == INTEGER_CST
- && !wi::eq_p (number, 0))
+ && wi::to_wide (number) != 0)
TREE_VALUE (third_argument)
= wide_int_to_tree (TREE_TYPE (number),
- wi::add (number, 2));
+ wi::to_wide (number) + 2);
}
filtered_attributes = chainon (filtered_attributes,
new_attribute);
/* Get the value of the argument and add 2. */
tree number = TREE_VALUE (argument);
if (number && TREE_CODE (number) == INTEGER_CST
- && !wi::eq_p (number, 0))
+ && wi::to_wide (number) != 0)
TREE_VALUE (argument)
= wide_int_to_tree (TREE_TYPE (number),
- wi::add (number, 2));
+ wi::to_wide (number) + 2);
argument = TREE_CHAIN (argument);
}
if (tem != TREE_TYPE (t))
{
if (TREE_CODE (t) == INTEGER_CST)
- *tp = wide_int_to_tree (tem, t);
+ *tp = wide_int_to_tree (tem, wi::to_wide (t));
else
TREE_TYPE (t) = tem;
}
tree itype = TREE_TYPE (TREE_VALUE (vec));
if (POINTER_TYPE_P (itype))
itype = sizetype;
- wide_int offset = wide_int::from (TREE_PURPOSE (vec),
+ wide_int offset = wide_int::from (wi::to_wide (TREE_PURPOSE (vec)),
TYPE_PRECISION (itype),
TYPE_SIGN (itype));
/* Ignore invalid offsets that are not multiples of the step. */
- if (!wi::multiple_of_p
- (wi::abs (offset), wi::abs ((wide_int) fd.loops[i].step),
- UNSIGNED))
+ if (!wi::multiple_of_p (wi::abs (offset),
+ wi::abs (wi::to_wide (fd.loops[i].step)),
+ UNSIGNED))
{
warning_at (OMP_CLAUSE_LOCATION (c), 0,
"ignoring sink clause with offset that is not "
fprintf (file, " overflow");
fprintf (file, " ");
- print_dec (node, file, TYPE_SIGN (TREE_TYPE (node)));
+ print_dec (wi::to_wide (node), file, TYPE_SIGN (TREE_TYPE (node)));
}
if (TREE_CODE (node) == REAL_CST)
{
fprintf (file, " overflow");
fprintf (file, " ");
- print_dec (node, file, TYPE_SIGN (TREE_TYPE (node)));
+ print_dec (wi::to_wide (node), file, TYPE_SIGN (TREE_TYPE (node)));
break;
case REAL_CST:
original type. Make sure to drop overflow flags. */
low = fold_convert (index_type, low);
if (TREE_OVERFLOW (low))
- low = wide_int_to_tree (index_type, low);
+ low = wide_int_to_tree (index_type, wi::to_wide (low));
/* The canonical from of a case label in GIMPLE is that a simple case
has an empty CASE_HIGH. For the casesi and tablejump expanders,
high = low;
high = fold_convert (index_type, high);
if (TREE_OVERFLOW (high))
- high = wide_int_to_tree (index_type, high);
+ high = wide_int_to_tree (index_type, wi::to_wide (high));
case_list.safe_push (simple_case_node (low, high, lab));
}
&& tree_int_cst_lt (ub, lb))
{
lb = wide_int_to_tree (ssizetype,
- offset_int::from (lb, SIGNED));
+ offset_int::from (wi::to_wide (lb),
+ SIGNED));
ub = wide_int_to_tree (ssizetype,
- offset_int::from (ub, SIGNED));
+ offset_int::from (wi::to_wide (ub),
+ SIGNED));
}
length
= fold_convert (sizetype,
&& get_range_info (op0, &minv, &maxv) == VR_RANGE)
{
if (icode == PLUS_EXPR)
- op1 = wide_int_to_tree (itype, wi::neg (op1));
- if (wi::geu_p (minv, op1))
+ op1 = wide_int_to_tree (itype, -wi::to_wide (op1));
+ if (wi::geu_p (minv, wi::to_wide (op1)))
{
op0 = fold_convert (otype, op0);
op1 = fold_convert (otype, op1);
{
tree merge_case = gimple_switch_label (stmt, next_index);
basic_block merge_bb = label_to_block (CASE_LABEL (merge_case));
- wide_int bhp1 = wi::add (base_high, 1);
+ wide_int bhp1 = wi::to_wide (base_high) + 1;
/* Merge the cases if they jump to the same place,
and their ranges are consecutive. */
if (merge_bb == base_bb
- && wi::eq_p (CASE_LOW (merge_case), bhp1))
+ && wi::to_wide (CASE_LOW (merge_case)) == bhp1)
{
base_high = CASE_HIGH (merge_case) ?
CASE_HIGH (merge_case) : CASE_LOW (merge_case);
if (TYPE_SIZE_UNIT (TREE_TYPE (ref))
&& TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (ref))) == INTEGER_CST
&& !integer_zerop (TYPE_SIZE_UNIT (TREE_TYPE (ref))))
- rem = wi::mod_trunc (off, TYPE_SIZE_UNIT (TREE_TYPE (ref)), SIGNED);
+ rem = wi::mod_trunc
+ (wi::to_wide (off),
+ wi::to_wide (TYPE_SIZE_UNIT (TREE_TYPE (ref))),
+ SIGNED);
else
/* If we can't compute the remainder simply force the initial
condition to zero. */
- rem = off;
- off = wide_int_to_tree (ssizetype, wi::sub (off, rem));
+ rem = wi::to_wide (off);
+ off = wide_int_to_tree (ssizetype, wi::to_wide (off) - rem);
memoff = wide_int_to_tree (TREE_TYPE (memoff), rem);
/* And finally replace the initial condition. */
access_fn = chrec_replace_initial_condition
std::swap (*dr_a1, *dr_a2);
bool do_remove = false;
- wide_int diff = wi::sub (DR_INIT (dr_a2->dr), DR_INIT (dr_a1->dr));
+ wide_int diff = (wi::to_wide (DR_INIT (dr_a2->dr))
+ - wi::to_wide (DR_INIT (dr_a1->dr)));
wide_int min_seg_len_b;
tree new_seg_len;
if (TREE_CODE (dr_b1->seg_len) == INTEGER_CST)
- min_seg_len_b = wi::abs (dr_b1->seg_len);
+ min_seg_len_b = wi::abs (wi::to_wide (dr_b1->seg_len));
else
- min_seg_len_b = wi::mul (factor, wi::abs (DR_STEP (dr_b1->dr)));
+ min_seg_len_b
+ = factor * wi::abs (wi::to_wide (DR_STEP (dr_b1->dr)));
/* Now we try to merge alias check dr_a1 & dr_b and dr_a2 & dr_b.
/* Adjust diff according to access size of both references. */
tree size_a1 = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_a1->dr)));
tree size_a2 = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_a2->dr)));
- diff = wi::add (diff, wi::sub (size_a2, size_a1));
+ diff += wi::to_wide (size_a2) - wi::to_wide (size_a1);
/* Case A.1. */
if (wi::leu_p (diff, min_seg_len_b)
/* Case A.2 and B combined. */
{
if (tree_fits_uhwi_p (dr_a1->seg_len)
&& tree_fits_uhwi_p (dr_a2->seg_len))
- new_seg_len
- = wide_int_to_tree (sizetype,
- wi::umin (wi::sub (dr_a1->seg_len,
- diff),
- dr_a2->seg_len));
+ {
+ wide_int min_len
+ = wi::umin (wi::to_wide (dr_a1->seg_len) - diff,
+ wi::to_wide (dr_a2->seg_len));
+ new_seg_len = wide_int_to_tree (sizetype, min_len);
+ }
else
new_seg_len
= size_binop (MINUS_EXPR, dr_a2->seg_len,
{
if (tree_fits_uhwi_p (dr_a1->seg_len)
&& tree_fits_uhwi_p (dr_a2->seg_len))
- new_seg_len
- = wide_int_to_tree (sizetype,
- wi::umax (wi::add (dr_a2->seg_len,
- diff),
- dr_a1->seg_len));
+ {
+ wide_int max_len
+ = wi::umax (wi::to_wide (dr_a2->seg_len) + diff,
+ wi::to_wide (dr_a1->seg_len));
+ new_seg_len = wide_int_to_tree (sizetype, max_len);
+ }
else
new_seg_len
= size_binop (PLUS_EXPR, dr_a2->seg_len,
case INTEGER_CST:
fprintf (di->stream, "int: ");
- print_decs (t, di->stream);
+ print_decs (wi::to_wide (t), di->stream);
break;
case STRING_CST:
*walk_subtrees = 0;
else if (TREE_CODE (*tp) == INTEGER_CST)
- *tp = wide_int_to_tree (new_type, *tp);
+ *tp = wide_int_to_tree (new_type, wi::to_wide (*tp));
else
{
*tp = copy_node (*tp);
*walk_subtrees = 0;
else if (TREE_CODE (*tp) == INTEGER_CST)
- *tp = wide_int_to_tree (new_type, *tp);
+ *tp = wide_int_to_tree (new_type, wi::to_wide (*tp));
else
{
*tp = copy_node (*tp);
/* If loop iterates for unknown times or fewer times than chain->lenght,
we still need to setup root variable and propagate it with PHI node. */
tree niters = number_of_latch_executions (loop);
- if (TREE_CODE (niters) != INTEGER_CST || wi::leu_p (niters, chain->length))
+ if (TREE_CODE (niters) != INTEGER_CST
+ || wi::leu_p (wi::to_wide (niters), chain->length))
return false;
/* Check stores in chain for elimination if they only store loop invariant
pp_unsigned_wide_integer (pp, tree_to_uhwi (node));
else
{
- wide_int val = node;
+ wide_int val = wi::to_wide (node);
if (wi::neg_p (val, TYPE_SIGN (TREE_TYPE (node))))
{
return false;
if (TREE_CODE (base) == INTEGER_CST)
- base_min = base_max = base;
+ base_min = base_max = wi::to_wide (base);
else if (TREE_CODE (base) == SSA_NAME
&& INTEGRAL_TYPE_P (TREE_TYPE (base))
&& get_range_info (base, &base_min, &base_max) == VR_RANGE)
return true;
if (TREE_CODE (step) == INTEGER_CST)
- step_min = step_max = step;
+ step_min = step_max = wi::to_wide (step);
else if (TREE_CODE (step) == SSA_NAME
&& INTEGRAL_TYPE_P (TREE_TYPE (step))
&& get_range_info (step, &step_min, &step_max) == VR_RANGE)
extreme = wi::max_value (type);
}
overflow = false;
- extreme = wi::sub (extreme, iv->step, TYPE_SIGN (type), &overflow);
+ extreme = wi::sub (extreme, wi::to_wide (iv->step),
+ TYPE_SIGN (type), &overflow);
if (overflow)
return true;
e = fold_build2 (code, boolean_type_node, base,
struct mem_addr_template *templ;
if (addr->step && !integer_onep (addr->step))
- st = immed_wide_int_const (addr->step, pointer_mode);
+ st = immed_wide_int_const (wi::to_wide (addr->step), pointer_mode);
else
st = NULL_RTX;
if (addr->offset && !integer_zerop (addr->offset))
{
- offset_int dc = offset_int::from (addr->offset, SIGNED);
+ offset_int dc = offset_int::from (wi::to_wide (addr->offset), SIGNED);
off = immed_wide_int_const (dc, pointer_mode);
}
else
else
{
unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
- wide_int nonzero_bits = wide_int::from (val->mask, precision,
- UNSIGNED) | val->value;
+ wide_int nonzero_bits
+ = (wide_int::from (val->mask, precision, UNSIGNED)
+ | wi::to_wide (val->value));
nonzero_bits &= get_nonzero_bits (name);
set_nonzero_bits (name, nonzero_bits);
}
}
else
{
- if (wi::bit_and_not (val.value, nonzero_bits) != 0)
+ if (wi::bit_and_not (wi::to_wide (val.value), nonzero_bits) != 0)
val.value = wide_int_to_tree (TREE_TYPE (lhs),
- nonzero_bits & val.value);
+ nonzero_bits
+ & wi::to_wide (val.value));
if (nonzero_bits == 0)
val.mask = 0;
else
if (TREE_CODE (bot) != INTEGER_CST)
return false;
- p0 = widest_int::from (top, SIGNED);
- p1 = widest_int::from (bot, SIGNED);
+ p0 = widest_int::from (wi::to_wide (top), SIGNED);
+ p1 = widest_int::from (wi::to_wide (bot), SIGNED);
if (p1 == 0)
return false;
*mul = wi::sext (wi::divmod_trunc (p0, p1, SIGNED, &res), precision);
*var = op0;
/* Always sign extend the offset. */
- wi::to_mpz (op1, offset, SIGNED);
+ wi::to_mpz (wi::to_wide (op1), offset, SIGNED);
if (negate)
mpz_neg (offset, offset);
break;
case INTEGER_CST:
*var = build_int_cst_type (type, 0);
- wi::to_mpz (expr, offset, TYPE_SIGN (type));
+ wi::to_mpz (wi::to_wide (expr), offset, TYPE_SIGN (type));
break;
default:
/* Case of comparing VAR with its below/up bounds. */
mpz_init (valc1);
- wi::to_mpz (c1, valc1, TYPE_SIGN (type));
+ wi::to_mpz (wi::to_wide (c1), valc1, TYPE_SIGN (type));
if (mpz_cmp (valc1, below) == 0)
cmp = GT_EXPR;
if (mpz_cmp (valc1, up) == 0)
wide_int min = wi::min_value (type);
wide_int max = wi::max_value (type);
- if (wi::eq_p (c1, min))
+ if (wi::to_wide (c1) == min)
cmp = GT_EXPR;
- if (wi::eq_p (c1, max))
+ if (wi::to_wide (c1) == max)
cmp = LT_EXPR;
}
/* Setup range information for varc1. */
if (integer_zerop (varc1))
{
- wi::to_mpz (integer_zero_node, minc1, TYPE_SIGN (type));
- wi::to_mpz (integer_zero_node, maxc1, TYPE_SIGN (type));
+ wi::to_mpz (0, minc1, TYPE_SIGN (type));
+ wi::to_mpz (0, maxc1, TYPE_SIGN (type));
}
else if (TREE_CODE (varc1) == SSA_NAME
&& INTEGRAL_TYPE_P (type)
if (integer_onep (s)
|| (TREE_CODE (c) == INTEGER_CST
&& TREE_CODE (s) == INTEGER_CST
- && wi::mod_trunc (c, s, TYPE_SIGN (type)) == 0)
+ && wi::mod_trunc (wi::to_wide (c), wi::to_wide (s),
+ TYPE_SIGN (type)) == 0)
|| (TYPE_OVERFLOW_UNDEFINED (type)
&& multiple_of_p (type, c, s)))
{
the whole # of iterations analysis will fail). */
if (!no_overflow)
{
- max = wi::mask <widest_int> (TYPE_PRECISION (type) - wi::ctz (s), false);
+ max = wi::mask <widest_int> (TYPE_PRECISION (type)
+ - wi::ctz (wi::to_wide (s)), false);
wi::to_mpz (max, bnd, UNSIGNED);
return;
}
/* ... then we can strengthen this to C / S, and possibly we can use
the upper bound on C given by BNDS. */
if (TREE_CODE (c) == INTEGER_CST)
- wi::to_mpz (c, bnd, UNSIGNED);
+ wi::to_mpz (wi::to_wide (c), bnd, UNSIGNED);
else if (bnds_u_valid)
mpz_set (bnd, bnds->up);
}
mpz_init (d);
- wi::to_mpz (s, d, UNSIGNED);
+ wi::to_mpz (wi::to_wide (s), d, UNSIGNED);
mpz_fdiv_q (bnd, bnd, d);
mpz_clear (d);
}
tmod = fold_convert (type1, mod);
mpz_init (mmod);
- wi::to_mpz (mod, mmod, UNSIGNED);
+ wi::to_mpz (wi::to_wide (mod), mmod, UNSIGNED);
mpz_neg (mmod, mmod);
/* If the induction variable does not overflow and the exit is taken,
mpz_init (mstep);
mpz_init (tmp);
- wi::to_mpz (step, mstep, UNSIGNED);
+ wi::to_mpz (wi::to_wide (step), mstep, UNSIGNED);
mpz_add (tmp, bnds->up, mstep);
mpz_sub_ui (tmp, tmp, 1);
mpz_fdiv_q (tmp, tmp, mstep);
if (is_min == tree_int_cst_sign_bit (iv.step))
return false;
- *init = iv.base;
+ *init = wi::to_wide (iv.base);
return true;
}
&& INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
&& (get_range_info (orig_base, &min, &max) == VR_RANGE
|| get_cst_init_from_scev (orig_base, &max, false))
- && wi::gts_p (high, max))
+ && wi::gts_p (wi::to_wide (high), max))
base = wide_int_to_tree (unsigned_type, max);
else if (TREE_CODE (base) != INTEGER_CST
&& dominated_by_p (CDI_DOMINATORS,
&& INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
&& (get_range_info (orig_base, &min, &max) == VR_RANGE
|| get_cst_init_from_scev (orig_base, &min, true))
- && wi::gts_p (min, low))
+ && wi::gts_p (min, wi::to_wide (low)))
base = wide_int_to_tree (unsigned_type, min);
else if (TREE_CODE (base) != INTEGER_CST
&& dominated_by_p (CDI_DOMINATORS,
MIN - type_MIN >= |step| ; if step < 0.
Or VAR must take value outside of value range, which is not true. */
- step_wi = step;
+ step_wi = wi::to_wide (step);
type = TREE_TYPE (var);
if (tree_int_cst_sign_bit (step))
{
- diff = lower_bound_in_type (type, type);
- diff = minv - diff;
+ diff = minv - wi::to_wide (lower_bound_in_type (type, type));
step_wi = - step_wi;
}
else
- {
- diff = upper_bound_in_type (type, type);
- diff = diff - maxv;
- }
+ diff = wi::to_wide (upper_bound_in_type (type, type)) - maxv;
return (wi::geu_p (diff, step_wi));
}
if (cmp == LT_EXPR)
{
bool overflow;
- wide_int alt = wi::sub (larger, 1, TYPE_SIGN (TREE_TYPE (larger)),
+ wide_int alt = wi::sub (wi::to_wide (larger), 1,
+ TYPE_SIGN (TREE_TYPE (larger)),
&overflow);
if (! overflow)
alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
else
{
bool overflow;
- wide_int alt = wi::add (larger, 1, TYPE_SIGN (TREE_TYPE (larger)),
+ wide_int alt = wi::add (wi::to_wide (larger), 1,
+ TYPE_SIGN (TREE_TYPE (larger)),
&overflow);
if (! overflow)
alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
if (cmp == GT_EXPR)
{
bool overflow;
- wide_int alt = wi::add (smaller, 1, TYPE_SIGN (TREE_TYPE (smaller)),
+ wide_int alt = wi::add (wi::to_wide (smaller), 1,
+ TYPE_SIGN (TREE_TYPE (smaller)),
&overflow);
if (! overflow)
alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
else
{
bool overflow;
- wide_int alt = wi::sub (smaller, 1, TYPE_SIGN (TREE_TYPE (smaller)),
+ wide_int alt = wi::sub (wi::to_wide (smaller), 1,
+ TYPE_SIGN (TREE_TYPE (smaller)),
&overflow);
if (! overflow)
alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
{
ref->set = set;
if (ref1->opcode == MEM_REF)
- ref1->op0 = wide_int_to_tree (TREE_TYPE (ref2->op0),
- ref1->op0);
+ ref1->op0
+ = wide_int_to_tree (TREE_TYPE (ref2->op0),
+ wi::to_wide (ref1->op0));
else
- ref1->op2 = wide_int_to_tree (TREE_TYPE (ref2->op2),
- ref1->op2);
+ ref1->op2
+ = wide_int_to_tree (TREE_TYPE (ref2->op2),
+ wi::to_wide (ref1->op2));
}
else
{
ref->set = 0;
if (ref1->opcode == MEM_REF)
- ref1->op0 = wide_int_to_tree (ptr_type_node,
- ref1->op0);
+ ref1->op0
+ = wide_int_to_tree (ptr_type_node,
+ wi::to_wide (ref1->op0));
else
- ref1->op2 = wide_int_to_tree (ptr_type_node,
- ref1->op2);
+ ref1->op2
+ = wide_int_to_tree (ptr_type_node,
+ wi::to_wide (ref1->op2));
}
operands.release ();
gcc_checking_assert (addr_base && TREE_CODE (addr_base) != MEM_REF);
if (addr_base != TREE_OPERAND (op->op0, 0))
{
- offset_int off = offset_int::from (mem_op->op0, SIGNED);
+ offset_int off = offset_int::from (wi::to_wide (mem_op->op0), SIGNED);
off += addr_offset;
mem_op->op0 = wide_int_to_tree (TREE_TYPE (mem_op->op0), off);
op->op0 = build_fold_addr_expr (addr_base);
&& code != POINTER_PLUS_EXPR)
return false;
- off = offset_int::from (mem_op->op0, SIGNED);
+ off = offset_int::from (wi::to_wide (mem_op->op0), SIGNED);
/* The only thing we have to do is from &OBJ.foo.bar add the offset
from .foo.bar to the preceding MEM_REF offset and replace the
&& tem[tem.length () - 2].opcode == MEM_REF)
{
vn_reference_op_t new_mem_op = &tem[tem.length () - 2];
- new_mem_op->op0 = wide_int_to_tree (TREE_TYPE (mem_op->op0),
- new_mem_op->op0);
+ new_mem_op->op0
+ = wide_int_to_tree (TREE_TYPE (mem_op->op0),
+ wi::to_wide (new_mem_op->op0));
}
else
gcc_assert (tem.last ().opcode == STRING_CST);
/* For constants simply extend it. */
if (TREE_CODE (op) == INTEGER_CST)
- return wide_int_to_tree (wide_type, op);
+ return wide_int_to_tree (wide_type, wi::to_wide (op));
return NULL_TREE;
}
else
{
/* Sign-extend the offset. */
- offset_int soffset = offset_int::from (offset, SIGNED);
+ offset_int soffset = offset_int::from (wi::to_wide (offset), SIGNED);
if (!wi::fits_shwi_p (soffset))
rhsoffset = UNKNOWN_OFFSET;
else
code2 = invert_tree_comparison (code2, false);
if ((code1 == EQ_EXPR || code1 == BIT_AND_EXPR) && code2 == BIT_AND_EXPR)
- return wi::eq_p (expr1.pred_rhs,
- wi::bit_and (expr1.pred_rhs, expr2.pred_rhs));
+ return (wi::to_wide (expr1.pred_rhs)
+ == (wi::to_wide (expr1.pred_rhs) & wi::to_wide (expr2.pred_rhs)));
if (code1 != code2 && code2 != NE_EXPR)
return false;
if (mask == -1)
return;
set_range_info_raw (name, VR_RANGE,
- TYPE_MIN_VALUE (TREE_TYPE (name)),
- TYPE_MAX_VALUE (TREE_TYPE (name)));
+ wi::to_wide (TYPE_MIN_VALUE (TREE_TYPE (name))),
+ wi::to_wide (TYPE_MAX_VALUE (TREE_TYPE (name))));
}
range_info_def *ri = SSA_NAME_RANGE_INFO (name);
ri->set_nonzero_bits (mask);
get_nonzero_bits (const_tree name)
{
if (TREE_CODE (name) == INTEGER_CST)
- return name;
+ return wi::to_wide (name);
/* Use element_precision instead of TYPE_PRECISION so complex and
vector types get a non-zero precision. */
for (i = 2; i < branch_num; i++)
{
tree elt = gimple_switch_label (swtch, i);
- wide_int w = last;
- if (w + 1 != CASE_LOW (elt))
+ if (wi::to_wide (last) + 1 != wi::to_wide (CASE_LOW (elt)))
{
info->contiguous_range = false;
break;
if (TREE_CODE (elt->value) != INTEGER_CST)
return type;
- cst = elt->value;
+ cst = wi::to_wide (elt->value);
while (1)
{
unsigned int prec = GET_MODE_BITSIZE (mode);
fputs (";; ", f);
fprintf (f, "%*s", indent_step * indent_level, "");
- print_dec (root->low, f, TYPE_SIGN (TREE_TYPE (root->low)));
+ print_dec (wi::to_wide (root->low), f, TYPE_SIGN (TREE_TYPE (root->low)));
if (!tree_int_cst_equal (root->low, root->high))
{
fprintf (f, " ... ");
- print_dec (root->high, f, TYPE_SIGN (TREE_TYPE (root->high)));
+ print_dec (wi::to_wide (root->high), f,
+ TYPE_SIGN (TREE_TYPE (root->high)));
}
fputs ("\n", f);
original type. Make sure to drop overflow flags. */
low = fold_convert (index_type, low);
if (TREE_OVERFLOW (low))
- low = wide_int_to_tree (index_type, low);
+ low = wide_int_to_tree (index_type, wi::to_wide (low));
/* The canonical from of a case label in GIMPLE is that a simple case
has an empty CASE_HIGH. For the casesi and tablejump expanders,
high = low;
high = fold_convert (index_type, high);
if (TREE_OVERFLOW (high))
- high = wide_int_to_tree (index_type, high);
+ high = wide_int_to_tree (index_type, wi::to_wide (high));
basic_block case_bb = label_to_block_fn (cfun, lab);
edge case_edge = find_edge (bb, case_bb);
/* Peeling algorithm guarantees that vector loop bound is at least ONE,
we set range information to make niters analyzer's life easier. */
if (stmts != NULL)
- set_range_info (niters_vector, VR_RANGE, build_int_cst (type, 1),
- fold_build2 (RSHIFT_EXPR, type,
- TYPE_MAX_VALUE (type), log_vf));
+ set_range_info (niters_vector, VR_RANGE,
+ wi::to_wide (build_int_cst (type, 1)),
+ wi::to_wide (fold_build2 (RSHIFT_EXPR, type,
+ TYPE_MAX_VALUE (type),
+ log_vf)));
}
*niters_vector_ptr = niters_vector;
least VF, so set range information for newly generated var. */
if (new_var_p)
set_range_info (niters, VR_RANGE,
- build_int_cst (type, vf), TYPE_MAX_VALUE (type));
+ wi::to_wide (build_int_cst (type, vf)),
+ wi::to_wide (TYPE_MAX_VALUE (type)));
/* Prolog iterates at most bound_prolog times, latch iterates at
most bound_prolog - 1 times. */
vectorized matches the vector type of the result in
size and number of elements. */
unsigned prec
- = wi::udiv_trunc (TYPE_SIZE (vectype),
+ = wi::udiv_trunc (wi::to_wide (TYPE_SIZE (vectype)),
TYPE_VECTOR_SUBPARTS (vectype)).to_uhwi ();
tree type
= build_nonstandard_integer_type (prec,
if (group_gap_adj != 0 && ! slp_perm
&& group_elt == group_size - group_gap_adj)
{
- bool ovf;
- tree bump
- = wide_int_to_tree (sizetype,
- wi::smul (TYPE_SIZE_UNIT (elem_type),
- group_gap_adj, &ovf));
+ wide_int bump_val = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
+ * group_gap_adj);
+ tree bump = wide_int_to_tree (sizetype, bump_val);
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
stmt, bump);
group_elt = 0;
elements loaded for a permuted SLP load. */
if (group_gap_adj != 0 && slp_perm)
{
- bool ovf;
- tree bump
- = wide_int_to_tree (sizetype,
- wi::smul (TYPE_SIZE_UNIT (elem_type),
- group_gap_adj, &ovf));
+ wide_int bump_val = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
+ * group_gap_adj);
+ tree bump = wide_int_to_tree (sizetype, bump_val);
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
stmt, bump);
}
if (!inv2)
inv2 = build_int_cst (TREE_TYPE (val2), 0);
- return wi::cmp (inv1, inv2, TYPE_SIGN (TREE_TYPE (val1)));
+ return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
+ TYPE_SIGN (TREE_TYPE (val1)));
}
const bool cst1 = is_gimple_min_invariant (val1);
/* Compute the difference between the constants. If it overflows or
underflows, this means that we can trivially compare the NAME with
it and, consequently, the two values with each other. */
- wide_int diff = wi::sub (cst, inv);
- if (wi::cmp (0, inv, sgn) != wi::cmp (diff, cst, sgn))
+ wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
+ if (wi::cmp (0, wi::to_wide (inv), sgn)
+ != wi::cmp (diff, wi::to_wide (cst), sgn))
{
- const int res = wi::cmp (cst, inv, sgn);
+ const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
return cst1 ? res : -res;
}
/* It's unclear from the C standard whether shifts can overflow.
The following code ignores overflow; perhaps a C standard
interpretation ruling is needed. */
- res = wi::rshift (val1, wval2, sign);
+ res = wi::rshift (wi::to_wide (val1), wval2, sign);
else
- res = wi::lshift (val1, wval2);
+ res = wi::lshift (wi::to_wide (val1), wval2);
break;
}
case MULT_EXPR:
- res = wi::mul (val1, val2, sign, &overflow);
+ res = wi::mul (wi::to_wide (val1),
+ wi::to_wide (val2), sign, &overflow);
break;
case TRUNC_DIV_EXPR:
return res;
}
else
- res = wi::div_trunc (val1, val2, sign, &overflow);
+ res = wi::div_trunc (wi::to_wide (val1),
+ wi::to_wide (val2), sign, &overflow);
break;
case FLOOR_DIV_EXPR:
*overflow_p = true;
return res;
}
- res = wi::div_floor (val1, val2, sign, &overflow);
+ res = wi::div_floor (wi::to_wide (val1),
+ wi::to_wide (val2), sign, &overflow);
break;
case CEIL_DIV_EXPR:
*overflow_p = true;
return res;
}
- res = wi::div_ceil (val1, val2, sign, &overflow);
+ res = wi::div_ceil (wi::to_wide (val1),
+ wi::to_wide (val2), sign, &overflow);
break;
case ROUND_DIV_EXPR:
*overflow_p = 0;
return res;
}
- res = wi::div_round (val1, val2, sign, &overflow);
+ res = wi::div_round (wi::to_wide (val1),
+ wi::to_wide (val2), sign, &overflow);
break;
default:
if (range_int_cst_singleton_p (vr))
{
- *may_be_nonzero = vr->min;
+ *may_be_nonzero = wi::to_wide (vr->min);
*must_be_nonzero = *may_be_nonzero;
}
else if (tree_int_cst_sgn (vr->min) >= 0
|| tree_int_cst_sgn (vr->max) < 0)
{
- wide_int xor_mask = wi::bit_xor (vr->min, vr->max);
- *may_be_nonzero = wi::bit_or (vr->min, vr->max);
- *must_be_nonzero = wi::bit_and (vr->min, vr->max);
+ wide_int xor_mask = wi::to_wide (vr->min) ^ wi::to_wide (vr->max);
+ *may_be_nonzero = wi::to_wide (vr->min) | wi::to_wide (vr->max);
+ *must_be_nonzero = wi::to_wide (vr->min) & wi::to_wide (vr->max);
if (xor_mask != 0)
{
wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
{
vr0->type = VR_RANGE;
vr0->min = vrp_val_min (type);
- vr0->max = wide_int_to_tree (type, wi::sub (ar->min, 1));
+ vr0->max = wide_int_to_tree (type, wi::to_wide (ar->min) - 1);
}
if (!vrp_val_is_max (ar->max))
{
vr1->type = VR_RANGE;
- vr1->min = wide_int_to_tree (type, wi::add (ar->max, 1));
+ vr1->min = wide_int_to_tree (type, wi::to_wide (ar->max) + 1);
vr1->max = vrp_val_max (type);
}
if (vr0->type == VR_UNDEFINED)
}
else
{
- type_min = vrp_val_min (expr_type);
- type_max = vrp_val_max (expr_type);
+ type_min = wi::to_wide (vrp_val_min (expr_type));
+ type_max = wi::to_wide (vrp_val_max (expr_type));
}
/* Combine the lower bounds, if any. */
{
if (minus_p)
{
- wmin = wi::sub (min_op0, min_op1);
+ wmin = wi::to_wide (min_op0) - wi::to_wide (min_op1);
/* Check for overflow. */
- if (wi::cmp (0, min_op1, sgn)
- != wi::cmp (wmin, min_op0, sgn))
- min_ovf = wi::cmp (min_op0, min_op1, sgn);
+ if (wi::cmp (0, wi::to_wide (min_op1), sgn)
+ != wi::cmp (wmin, wi::to_wide (min_op0), sgn))
+ min_ovf = wi::cmp (wi::to_wide (min_op0),
+ wi::to_wide (min_op1), sgn);
}
else
{
- wmin = wi::add (min_op0, min_op1);
+ wmin = wi::to_wide (min_op0) + wi::to_wide (min_op1);
/* Check for overflow. */
- if (wi::cmp (min_op1, 0, sgn)
- != wi::cmp (wmin, min_op0, sgn))
- min_ovf = wi::cmp (min_op0, wmin, sgn);
+ if (wi::cmp (wi::to_wide (min_op1), 0, sgn)
+ != wi::cmp (wmin, wi::to_wide (min_op0), sgn))
+ min_ovf = wi::cmp (wi::to_wide (min_op0), wmin, sgn);
}
}
else if (min_op0)
- wmin = min_op0;
+ wmin = wi::to_wide (min_op0);
else if (min_op1)
{
if (minus_p)
{
- wmin = wi::neg (min_op1);
+ wmin = -wi::to_wide (min_op1);
/* Check for overflow. */
- if (sgn == SIGNED && wi::neg_p (min_op1) && wi::neg_p (wmin))
+ if (sgn == SIGNED
+ && wi::neg_p (wi::to_wide (min_op1))
+ && wi::neg_p (wmin))
min_ovf = 1;
- else if (sgn == UNSIGNED && wi::ne_p (min_op1, 0))
+ else if (sgn == UNSIGNED && wi::to_wide (min_op1) != 0)
min_ovf = -1;
}
else
- wmin = min_op1;
+ wmin = wi::to_wide (min_op1);
}
else
wmin = wi::shwi (0, prec);
{
if (minus_p)
{
- wmax = wi::sub (max_op0, max_op1);
+ wmax = wi::to_wide (max_op0) - wi::to_wide (max_op1);
/* Check for overflow. */
- if (wi::cmp (0, max_op1, sgn)
- != wi::cmp (wmax, max_op0, sgn))
- max_ovf = wi::cmp (max_op0, max_op1, sgn);
+ if (wi::cmp (0, wi::to_wide (max_op1), sgn)
+ != wi::cmp (wmax, wi::to_wide (max_op0), sgn))
+ max_ovf = wi::cmp (wi::to_wide (max_op0),
+ wi::to_wide (max_op1), sgn);
}
else
{
- wmax = wi::add (max_op0, max_op1);
+ wmax = wi::to_wide (max_op0) + wi::to_wide (max_op1);
- if (wi::cmp (max_op1, 0, sgn)
- != wi::cmp (wmax, max_op0, sgn))
- max_ovf = wi::cmp (max_op0, wmax, sgn);
+ if (wi::cmp (wi::to_wide (max_op1), 0, sgn)
+ != wi::cmp (wmax, wi::to_wide (max_op0), sgn))
+ max_ovf = wi::cmp (wi::to_wide (max_op0), wmax, sgn);
}
}
else if (max_op0)
- wmax = max_op0;
+ wmax = wi::to_wide (max_op0);
else if (max_op1)
{
if (minus_p)
{
- wmax = wi::neg (max_op1);
+ wmax = -wi::to_wide (max_op1);
/* Check for overflow. */
- if (sgn == SIGNED && wi::neg_p (max_op1) && wi::neg_p (wmax))
+ if (sgn == SIGNED
+ && wi::neg_p (wi::to_wide (max_op1))
+ && wi::neg_p (wmax))
max_ovf = 1;
- else if (sgn == UNSIGNED && wi::ne_p (max_op1, 0))
+ else if (sgn == UNSIGNED && wi::to_wide (max_op1) != 0)
max_ovf = -1;
}
else
- wmax = max_op1;
+ wmax = wi::to_wide (max_op1);
}
else
wmax = wi::shwi (0, prec);
{
low_bound = bound;
high_bound = complement;
- if (wi::ltu_p (vr0.max, low_bound))
+ if (wi::ltu_p (wi::to_wide (vr0.max), low_bound))
{
/* [5, 6] << [1, 2] == [10, 24]. */
/* We're shifting out only zeroes, the value increases
monotonically. */
in_bounds = true;
}
- else if (wi::ltu_p (high_bound, vr0.min))
+ else if (wi::ltu_p (high_bound, wi::to_wide (vr0.min)))
{
/* [0xffffff00, 0xffffffff] << [1, 2]
== [0xfffffc00, 0xfffffffe]. */
/* [-1, 1] << [1, 2] == [-4, 4]. */
low_bound = complement;
high_bound = bound;
- if (wi::lts_p (vr0.max, high_bound)
- && wi::lts_p (low_bound, vr0.min))
+ if (wi::lts_p (wi::to_wide (vr0.max), high_bound)
+ && wi::lts_p (low_bound, wi::to_wide (vr0.min)))
{
/* For non-negative numbers, we're shifting out only
zeroes, the value increases monotonically.
signop sgn = TYPE_SIGN (expr_type);
unsigned int prec = TYPE_PRECISION (expr_type);
wide_int wmin, wmax, tmp;
- wide_int zero = wi::zero (prec);
- wide_int one = wi::one (prec);
if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1))
{
- wmax = wi::sub (vr1.max, one);
+ wmax = wi::to_wide (vr1.max) - 1;
if (sgn == SIGNED)
{
- tmp = wi::sub (wi::minus_one (prec), vr1.min);
+ tmp = -1 - wi::to_wide (vr1.min);
wmax = wi::smax (wmax, tmp);
}
}
wmax = wi::max_value (prec, sgn);
/* X % INT_MIN may be INT_MAX. */
if (sgn == UNSIGNED)
- wmax = wmax - one;
+ wmax = wmax - 1;
}
if (sgn == UNSIGNED)
- wmin = zero;
+ wmin = wi::zero (prec);
else
{
wmin = -wmax;
if (vr0.type == VR_RANGE && TREE_CODE (vr0.min) == INTEGER_CST)
{
- tmp = vr0.min;
- if (wi::gts_p (tmp, zero))
- tmp = zero;
+ tmp = wi::to_wide (vr0.min);
+ if (wi::gts_p (tmp, 0))
+ tmp = wi::zero (prec);
wmin = wi::smax (wmin, tmp);
}
}
if (vr0.type == VR_RANGE && TREE_CODE (vr0.max) == INTEGER_CST)
{
- tmp = vr0.max;
+ tmp = wi::to_wide (vr0.max);
if (sgn == SIGNED && wi::neg_p (tmp))
- tmp = zero;
+ tmp = wi::zero (prec);
wmax = wi::min (wmax, tmp, sgn);
}
range. */
if (vr0p && range_int_cst_p (vr0p))
{
- wide_int w = vr1p->min;
+ wide_int w = wi::to_wide (vr1p->min);
int m = 0, n = 0;
if (code == BIT_IOR_EXPR)
w = ~w;
m = wi::ctz (w) - n;
}
wide_int mask = wi::mask (m + n, true, w.get_precision ());
- if (wi::eq_p (mask & vr0p->min, mask & vr0p->max))
+ if ((mask & wi::to_wide (vr0p->min))
+ == (mask & wi::to_wide (vr0p->max)))
{
min = int_const_binop (code, vr0p->min, vr1p->min);
max = int_const_binop (code, vr0p->max, vr1p->min);
&& tree_int_cst_sgn (vr0.max) < 0
&& tree_int_cst_sgn (vr1.max) < 0)
{
- wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
- wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
+ wmax = wi::min (wmax, wi::to_wide (vr0.max),
+ TYPE_SIGN (expr_type));
+ wmax = wi::min (wmax, wi::to_wide (vr1.max),
+ TYPE_SIGN (expr_type));
}
/* If either input range contains only non-negative values
we can truncate the result range maximum to the respective
maximum of the input range. */
if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
- wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
+ wmax = wi::min (wmax, wi::to_wide (vr0.max),
+ TYPE_SIGN (expr_type));
if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
- wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
+ wmax = wi::min (wmax, wi::to_wide (vr1.max),
+ TYPE_SIGN (expr_type));
max = wide_int_to_tree (expr_type, wmax);
cmp = compare_values (min, max);
/* PR68217: In case of signed & sign-bit-CST should
if (!TYPE_UNSIGNED (expr_type)
&& ((int_cst_range0
&& value_range_constant_singleton (&vr0)
- && !wi::cmps (vr0.min, sign_bit))
+ && !wi::cmps (wi::to_wide (vr0.min), sign_bit))
|| (int_cst_range1
&& value_range_constant_singleton (&vr1)
- && !wi::cmps (vr1.min, sign_bit))))
+ && !wi::cmps (wi::to_wide (vr1.min), sign_bit))))
{
min = TYPE_MIN_VALUE (expr_type);
max = build_int_cst (expr_type, 0);
&& tree_int_cst_sgn (vr0.min) >= 0
&& tree_int_cst_sgn (vr1.min) >= 0)
{
- wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
- wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
+ wmin = wi::max (wmin, wi::to_wide (vr0.min),
+ TYPE_SIGN (expr_type));
+ wmin = wi::max (wmin, wi::to_wide (vr1.min),
+ TYPE_SIGN (expr_type));
}
/* If either input range contains only negative values
we can truncate the minimum of the result range to the
respective minimum range. */
if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
- wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
+ wmin = wi::max (wmin, wi::to_wide (vr0.min),
+ TYPE_SIGN (expr_type));
if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
- wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
+ wmin = wi::max (wmin, wi::to_wide (vr1.min),
+ TYPE_SIGN (expr_type));
min = wide_int_to_tree (expr_type, wmin);
}
else if (code == BIT_XOR_EXPR)
if (!overflow
&& wi::fits_to_tree_p (wtmp, TREE_TYPE (init))
&& (sgn == UNSIGNED
- || wi::gts_p (wtmp, 0) == wi::gts_p (step, 0)))
+ || wi::gts_p (wtmp, 0) == wi::gts_p (wi::to_wide (step), 0)))
{
tem = wide_int_to_tree (TREE_TYPE (init), wtmp);
extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
tree inc = gimple_assign_rhs2 (op1_def);
if (reversed)
- *new_cst = wide_int_to_tree (type, max + inc);
+ *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
else
- *new_cst = wide_int_to_tree (type, max - inc);
+ *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
return true;
}
}
wide_int minval
= wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
new_val = val2;
- if (minval == new_val)
+ if (minval == wi::to_wide (new_val))
new_val = NULL_TREE;
}
else
{
wide_int maxval
= wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
- mask |= val2;
- if (mask == maxval)
+ mask |= wi::to_wide (val2);
+ if (wi::eq_p (mask, maxval))
new_val = NULL_TREE;
else
new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
bool valid_p = false, valn, cst2n;
enum tree_code ccode = comp_code;
- valv = wide_int::from (val, nprec, UNSIGNED);
- cst2v = wide_int::from (cst2, nprec, UNSIGNED);
+ valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
+ cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
/* If CST2 doesn't have most significant bit set,
if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
return false;
- wide_int mask = maskt;
+ wi::tree_to_wide_ref mask = wi::to_wide (maskt);
wide_int inv_mask = ~mask;
- wide_int val = valt; // Assume VALT is INTEGER_CST
+ /* Assume VALT is INTEGER_CST. */
+ wi::tree_to_wide_ref val = wi::to_wide (valt);
if ((inv_mask & (inv_mask + 1)) != 0
|| (val & mask) != val)
next_min = CASE_LOW (next_cl);
next_max = CASE_HIGH (next_cl);
- wide_int difference = wi::sub (next_min, max ? max : min);
+ wide_int difference = (wi::to_wide (next_min)
+ - wi::to_wide (max ? max : min));
if (wi::eq_p (difference, 1))
max = next_max ? next_max : next_min;
else
return;
}
cst = gimple_assign_rhs2 (stmt);
- set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var), cst));
+ set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
+ wi::to_wide (cst)));
}
/* Convert range assertion expressions into the implied copies and
B = A + 1; if (A < B) -> B = A + 1; if (B != 0)
B = A - 1; if (B > A) -> B = A - 1; if (A == 0)
B = A - 1; if (B < A) -> B = A - 1; if (A != 0) */
- else if (wi::eq_p (x, max - 1))
+ else if (wi::to_wide (x) == max - 1)
{
op0 = op1;
op1 = wide_int_to_tree (TREE_TYPE (op0), 0);
== TYPE_PRECISION (ptr_type_node))
&& TREE_CODE (vr1max) == INTEGER_CST
&& TREE_CODE (vr1min) == INTEGER_CST
- && (wi::clz (wi::sub (vr1max, vr1min))
+ && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
< TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
;
/* Else choose the range. */
a signed wide_int, while a negative value cannot be represented
by an unsigned wide_int. */
if (src_sgn != dest_sgn
- && (wi::lts_p (vr->min, 0) || wi::lts_p (vr->max, 0)))
+ && (wi::lts_p (wi::to_wide (vr->min), 0)
+ || wi::lts_p (wi::to_wide (vr->max), 0)))
return false;
/* Then we can perform the conversion on both ends and compare
return false;
if (vr->type == VR_RANGE
- && wi::sub (vr->max, vr->min) == 1)
+ && wi::to_wide (vr->max) - wi::to_wide (vr->min) == 1)
{
*a = vr->min;
*b = vr->max;
/* ~[TYPE_MIN + 1, TYPE_MAX - 1] */
if (vr->type == VR_ANTI_RANGE
- && wi::sub (vr->min, vrp_val_min (TREE_TYPE (var))) == 1
- && wi::sub (vrp_val_max (TREE_TYPE (var)), vr->max) == 1)
+ && (wi::to_wide (vr->min)
+ - wi::to_wide (vrp_val_min (TREE_TYPE (var)))) == 1
+ && (wi::to_wide (vrp_val_max (TREE_TYPE (var)))
+ - wi::to_wide (vr->max)) == 1)
{
*a = vrp_val_min (TREE_TYPE (var));
*b = vrp_val_max (TREE_TYPE (var));
vr_value[i]->max) == 1)))
set_ptr_nonnull (name);
else if (!POINTER_TYPE_P (TREE_TYPE (name)))
- set_range_info (name, vr_value[i]->type, vr_value[i]->min,
- vr_value[i]->max);
+ set_range_info (name, vr_value[i]->type,
+ wi::to_wide (vr_value[i]->min),
+ wi::to_wide (vr_value[i]->max));
}
substitute_and_fold (op_with_constant_singleton_value_range, vrp_fold_stmt);
|| vr_result.type == VR_ANTI_RANGE)
&& (TREE_CODE (vr_result.min) == INTEGER_CST)
&& (TREE_CODE (vr_result.max) == INTEGER_CST))
- set_range_info (lhs,
- vr_result.type, vr_result.min, vr_result.max);
+ set_range_info (lhs, vr_result.type,
+ wi::to_wide (vr_result.min),
+ wi::to_wide (vr_result.max));
}
else if (POINTER_TYPE_P (TREE_TYPE (lhs))
&& ((vr_result.type == VR_RANGE
|| vr.type == VR_ANTI_RANGE)
&& (TREE_CODE (vr.min) == INTEGER_CST)
&& (TREE_CODE (vr.max) == INTEGER_CST))
- set_range_info (output, vr.type, vr.min, vr.max);
+ set_range_info (output, vr.type,
+ wi::to_wide (vr.min),
+ wi::to_wide (vr.max));
}
else if (POINTER_TYPE_P (TREE_TYPE (output))
&& ((vr.type == VR_RANGE
case BOOLEAN_TYPE:
/* Cache false or true. */
limit = 2;
- if (wi::ltu_p (t, 2))
+ if (wi::ltu_p (wi::to_wide (t), 2))
ix = TREE_INT_CST_ELT (t, 0);
break;
if (tree_to_uhwi (t) < (unsigned HOST_WIDE_INT) INTEGER_SHARE_LIMIT)
ix = tree_to_uhwi (t);
}
- else if (wi::ltu_p (t, INTEGER_SHARE_LIMIT))
+ else if (wi::ltu_p (wi::to_wide (t), INTEGER_SHARE_LIMIT))
ix = tree_to_uhwi (t);
}
else
if (integer_minus_onep (t))
ix = 0;
- else if (!wi::neg_p (t))
+ else if (!wi::neg_p (wi::to_wide (t)))
{
if (prec < HOST_BITS_PER_WIDE_INT)
{
if (tree_to_shwi (t) < INTEGER_SHARE_LIMIT)
ix = tree_to_shwi (t) + 1;
}
- else if (wi::ltu_p (t, INTEGER_SHARE_LIMIT))
+ else if (wi::ltu_p (wi::to_wide (t), INTEGER_SHARE_LIMIT))
ix = tree_to_shwi (t) + 1;
}
}
/* If there is already an entry for the number verify it's the
same. */
if (*slot)
- gcc_assert (wi::eq_p (tree (*slot), t));
+ gcc_assert (wi::to_wide (tree (*slot)) == wi::to_wide (t));
else
/* Otherwise insert this one into the hash table. */
*slot = t;
bitwise comparisons to see if two values are the same. */
memset (&d, 0, sizeof d);
- real_from_integer (&d, type ? TYPE_MODE (type) : VOIDmode, i,
+ real_from_integer (&d, type ? TYPE_MODE (type) : VOIDmode, wi::to_wide (i),
TYPE_SIGN (TREE_TYPE (i)));
return d;
}
switch (TREE_CODE (expr))
{
case INTEGER_CST:
- return wi::eq_p (expr, 0);
+ return wi::to_wide (expr) == 0;
case COMPLEX_CST:
return (integer_zerop (TREE_REALPART (expr))
&& integer_zerop (TREE_IMAGPART (expr)));
else if (TREE_CODE (expr) != INTEGER_CST)
return 0;
- return wi::max_value (TYPE_PRECISION (TREE_TYPE (expr)), UNSIGNED) == expr;
+ return (wi::max_value (TYPE_PRECISION (TREE_TYPE (expr)), UNSIGNED)
+ == wi::to_wide (expr));
}
/* Return 1 if EXPR is the integer constant minus one. */
if (TREE_CODE (expr) != INTEGER_CST)
return 0;
- return wi::popcount (expr) == 1;
+ return wi::popcount (wi::to_wide (expr)) == 1;
}
/* Return 1 if EXPR is an integer constant other than zero or a
integer_nonzerop (const_tree expr)
{
return ((TREE_CODE (expr) == INTEGER_CST
- && !wi::eq_p (expr, 0))
+ && wi::to_wide (expr) != 0)
|| (TREE_CODE (expr) == COMPLEX_CST
&& (integer_nonzerop (TREE_REALPART (expr))
|| integer_nonzerop (TREE_IMAGPART (expr)))));
if (TREE_CODE (expr) == COMPLEX_CST)
return tree_log2 (TREE_REALPART (expr));
- return wi::exact_log2 (expr);
+ return wi::exact_log2 (wi::to_wide (expr));
}
/* Similar, but return the largest integer Y such that 2 ** Y is less
if (TREE_CODE (expr) == COMPLEX_CST)
return tree_log2 (TREE_REALPART (expr));
- return wi::floor_log2 (expr);
+ return wi::floor_log2 (wi::to_wide (expr));
}
/* Return number of known trailing zero bits in EXPR, or, if the value of
switch (TREE_CODE (expr))
{
case INTEGER_CST:
- ret1 = wi::ctz (expr);
+ ret1 = wi::ctz (wi::to_wide (expr));
return MIN (ret1, prec);
case SSA_NAME:
ret1 = wi::ctz (get_nonzero_bits (expr));
offset_int
mem_ref_offset (const_tree t)
{
- return offset_int::from (TREE_OPERAND (t, 1), SIGNED);
+ return offset_int::from (wi::to_wide (TREE_OPERAND (t, 1)), SIGNED);
}
/* Return an invariant ADDR_EXPR of type TYPE taking the address of BASE
{
unsigned bitno = TYPE_PRECISION (TREE_TYPE (t)) - 1;
- return wi::extract_uhwi (t, bitno, 1);
+ return wi::extract_uhwi (wi::to_wide (t), bitno, 1);
}
/* Return an indication of the sign of the integer constant T.
int
tree_int_cst_sgn (const_tree t)
{
- if (wi::eq_p (t, 0))
+ if (wi::to_wide (t) == 0)
return 0;
else if (TYPE_UNSIGNED (TREE_TYPE (t)))
return 1;
- else if (wi::neg_p (t))
+ else if (wi::neg_p (wi::to_wide (t)))
return -1;
else
return 1;
if (TREE_CODE (win) == INTEGER_CST)
{
tree wtype = TREE_TYPE (win);
- unsigned prec = wi::min_precision (win, TYPE_SIGN (wtype));
+ unsigned prec = wi::min_precision (wi::to_wide (win), TYPE_SIGN (wtype));
if (for_type)
prec = MAX (prec, final_prec);
if (prec < TYPE_PRECISION (wtype))
/* Non-standard boolean types can have arbitrary precision but various
transformations assume that they can only take values 0 and +/-1. */
if (TREE_CODE (type) == BOOLEAN_TYPE)
- return wi::fits_to_boolean_p (c, type);
+ return wi::fits_to_boolean_p (wi::to_wide (c), type);
retry:
type_low_bound = TYPE_MIN_VALUE (type);
/* Perform some generic filtering which may allow making a decision
even if the bounds are not constant. First, negative integers
never fit in unsigned types, */
- if (TYPE_UNSIGNED (type) && sgn_c == SIGNED && wi::neg_p (c))
+ if (TYPE_UNSIGNED (type) && sgn_c == SIGNED && wi::neg_p (wi::to_wide (c)))
return false;
/* Second, narrower types always fit in wider ones. */
possible that the value will not fit. The test below
fails if any bit is set between the sign bit of the
underlying mode and the top bit of the type. */
- if (wi::ne_p (wi::zext (c, prec - 1), c))
+ if (wi::zext (wi::to_wide (c), prec - 1) != wi::to_wide (c))
return false;
}
- else if (wi::neg_p (c))
+ else if (wi::neg_p (wi::to_wide (c)))
return false;
}
}
/* Or to fits_to_tree_p, if nothing else. */
- return wi::fits_to_tree_p (c, type);
+ return wi::fits_to_tree_p (wi::to_wide (c), type);
}
/* Stores bounds of an integer TYPE in MIN and MAX. If TYPE has non-constant
{
if (!POINTER_TYPE_P (type) && TYPE_MIN_VALUE (type)
&& TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST)
- wi::to_mpz (TYPE_MIN_VALUE (type), min, TYPE_SIGN (type));
+ wi::to_mpz (wi::to_wide (TYPE_MIN_VALUE (type)), min, TYPE_SIGN (type));
else
{
if (TYPE_UNSIGNED (type))
if (!POINTER_TYPE_P (type) && TYPE_MAX_VALUE (type)
&& TREE_CODE (TYPE_MAX_VALUE (type)) == INTEGER_CST)
- wi::to_mpz (TYPE_MAX_VALUE (type), max, TYPE_SIGN (type));
+ wi::to_mpz (wi::to_wide (TYPE_MAX_VALUE (type)), max, TYPE_SIGN (type));
else
{
wide_int mn = wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
tree
num_ending_zeros (const_tree x)
{
- return build_int_cst (TREE_TYPE (x), wi::ctz (x));
+ return build_int_cst (TREE_TYPE (x), wi::ctz (wi::to_wide (x)));
}
/* For tree codes with a sharing machinery re-build the result. */
if (TREE_CODE (t) == INTEGER_CST)
- return wide_int_to_tree (TREE_TYPE (t), t);
+ return wide_int_to_tree (TREE_TYPE (t), wi::to_wide (t));
/* Otherwise, as all tcc_constants are possibly shared, copy the node
and drop the flag. */
int cnt = 0;
if (TREE_CODE (arg) == INTEGER_CST)
{
- wide_int w = wi::sext (arg, prec);
+ wide_int w = wi::sext (wi::to_wide (arg), prec);
if (wi::neg_p (w))
return 2;
else
/* The tree and const_tree overload templates. */
namespace wi
{
- template <>
- struct int_traits <const_tree>
- {
- static const enum precision_type precision_type = VAR_PRECISION;
- static const bool host_dependent_precision = false;
- static const bool is_sign_extended = false;
- static unsigned int get_precision (const_tree);
- static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
- const_tree);
- };
-
- template <>
- struct int_traits <tree> : public int_traits <const_tree> {};
-
template <int N>
class extended_tree
{
static const unsigned int precision = N;
};
- generic_wide_int <extended_tree <WIDE_INT_MAX_PRECISION> >
- to_widest (const_tree);
-
- generic_wide_int <extended_tree <ADDR_MAX_PRECISION> > to_offset (const_tree);
+ typedef const generic_wide_int <extended_tree <WIDE_INT_MAX_PRECISION> >
+ tree_to_widest_ref;
+ typedef const generic_wide_int <extended_tree <ADDR_MAX_PRECISION> >
+ tree_to_offset_ref;
+ typedef const generic_wide_int<wide_int_ref_storage<false, false> >
+ tree_to_wide_ref;
+ tree_to_widest_ref to_widest (const_tree);
+ tree_to_offset_ref to_offset (const_tree);
+ tree_to_wide_ref to_wide (const_tree);
wide_int to_wide (const_tree, unsigned int);
}
-inline unsigned int
-wi::int_traits <const_tree>::get_precision (const_tree tcst)
-{
- return TYPE_PRECISION (TREE_TYPE (tcst));
-}
+/* Refer to INTEGER_CST T as though it were a widest_int.
-/* Convert the tree_cst X into a wide_int of PRECISION. */
-inline wi::storage_ref
-wi::int_traits <const_tree>::decompose (HOST_WIDE_INT *,
- unsigned int precision, const_tree x)
-{
- gcc_checking_assert (precision == TYPE_PRECISION (TREE_TYPE (x)));
- return wi::storage_ref (&TREE_INT_CST_ELT (x, 0), TREE_INT_CST_NUNITS (x),
- precision);
-}
+ This function gives T's actual numerical value, influenced by the
+ signedness of its type. For example, a signed byte with just the
+ top bit set would be -128 while an unsigned byte with the same
+ bit pattern would be 128.
+
+ This is the right choice when operating on groups of INTEGER_CSTs
+ that might have different signedness or precision. It is also the
+ right choice in code that specifically needs an approximation of
+ infinite-precision arithmetic instead of normal modulo arithmetic.
+
+ The approximation of infinite precision is good enough for realistic
+ numbers of additions and subtractions of INTEGER_CSTs (where
+ "realistic" includes any number less than 1 << 31) but it cannot
+ represent the result of multiplying the two largest supported
+ INTEGER_CSTs. The overflow-checking form of wi::mul provides a way
+ of multiplying two arbitrary INTEGER_CSTs and checking that the
+ result is representable as a widest_int.
+
+ Note that any overflow checking done on these values is relative to
+ the range of widest_int rather than the range of a TREE_TYPE.
+
+ Calling this function should have no overhead in release builds,
+ so it is OK to call it several times for the same tree. If it is
+ useful for readability reasons to reduce the number of calls,
+ it is more efficient to use:
+
+ wi::tree_to_widest_ref wt = wi::to_widest (t);
+
+ instead of:
-inline generic_wide_int <wi::extended_tree <WIDE_INT_MAX_PRECISION> >
+ widest_int wt = wi::to_widest (t). */
+
+inline wi::tree_to_widest_ref
wi::to_widest (const_tree t)
{
return t;
}
-inline generic_wide_int <wi::extended_tree <ADDR_MAX_PRECISION> >
+/* Refer to INTEGER_CST T as though it were an offset_int.
+
+ This function is an optimisation of wi::to_widest for cases
+ in which T is known to be a bit or byte count in the range
+ (-(2 ^ (N + BITS_PER_UNIT)), 2 ^ (N + BITS_PER_UNIT)), where N is
+ the target's address size in bits.
+
+ This is the right choice when operating on bit or byte counts as
+ untyped numbers rather than M-bit values. The wi::to_widest comments
+ about addition, subtraction and multiplication apply here: sequences
+ of 1 << 31 additions and subtractions do not induce overflow, but
+ multiplying the largest sizes might. Again,
+
+ wi::tree_to_offset_ref wt = wi::to_offset (t);
+
+ is more efficient than:
+
+ offset_int wt = wi::to_offset (t). */
+
+inline wi::tree_to_offset_ref
wi::to_offset (const_tree t)
{
return t;
}
+/* Refer to INTEGER_CST T as though it were a wide_int.
+
+ In contrast to the approximation of infinite-precision numbers given
+ by wi::to_widest and wi::to_offset, this function treats T as a
+ signless collection of N bits, where N is the precision of T's type.
+ As with machine registers, signedness is determined by the operation
+ rather than the operands; for example, there is a distinction between
+ signed and unsigned division.
+
+ This is the right choice when operating on values with the same type
+ using normal modulo arithmetic. The overflow-checking forms of things
+ like wi::add check whether the result can be represented in T's type.
+
+ Calling this function should have no overhead in release builds,
+ so it is OK to call it several times for the same tree. If it is
+ useful for readability reasons to reduce the number of calls,
+ it is more efficient to use:
+
+ wi::tree_to_wide_ref wt = wi::to_wide (t);
+
+ instead of:
+
+ wide_int wt = wi::to_wide (t). */
+
+inline wi::tree_to_wide_ref
+wi::to_wide (const_tree t)
+{
+ return wi::storage_ref (&TREE_INT_CST_ELT (t, 0), TREE_INT_CST_NUNITS (t),
+ TYPE_PRECISION (TREE_TYPE (t)));
+}
+
/* Convert INTEGER_CST T to a wide_int of precision PREC, extending or
truncating as necessary. When extending, use sign extension if T's
type is signed and zero extension if T's type is unsigned. */
inline wide_int
wi::to_wide (const_tree t, unsigned int prec)
{
- return wide_int::from (t, prec, TYPE_SIGN (TREE_TYPE (t)));
+ return wide_int::from (wi::to_wide (t), prec, TYPE_SIGN (TREE_TYPE (t)));
}
template <int N>
unlink_stmt_vdef (stmt);
if (TREE_CODE (off) == INTEGER_CST)
- g = gimple_build_cond (wi::neg_p (off) ? LT_EXPR : GE_EXPR, ptri,
- fold_build1 (NEGATE_EXPR, sizetype, off),
+ g = gimple_build_cond (wi::neg_p (wi::to_wide (off)) ? LT_EXPR : GE_EXPR,
+ ptri, fold_build1 (NEGATE_EXPR, sizetype, off),
NULL_TREE, NULL_TREE);
else if (pos_neg != 3)
g = gimple_build_cond (pos_neg == 1 ? LT_EXPR : GT_EXPR,
and in wider precisions.
There are constructors to create the various forms of wide_int from
- trees, rtl and constants. For trees you can simply say:
+ trees, rtl and constants. For trees the options are:
tree t = ...;
- wide_int x = t;
+ wi::to_wide (t) // Treat T as a wide_int
+ wi::to_offset (t) // Treat T as an offset_int
+ wi::to_widest (t) // Treat T as a widest_int
- However, a little more syntax is required for rtl constants since
- they do not have an explicit precision. To make an rtl into a
- wide_int, you have to pair it with a mode. The canonical way to do
- this is with rtx_mode_t as in:
+ All three are light-weight accessors that should have no overhead
+ in release builds. If it is useful for readability reasons to
+ store the result in a temporary variable, the preferred method is:
+
+ wi::tree_to_wide_ref twide = wi::to_wide (t);
+ wi::tree_to_offset_ref toffset = wi::to_offset (t);
+ wi::tree_to_widest_ref twidest = wi::to_widest (t);
+
+ To make an rtx into a wide_int, you have to pair it with a mode.
+ The canonical way to do this is with rtx_mode_t as in:
rtx r = ...
wide_int x = rtx_mode_t (r, mode);
offset_int x = (int) c; // sign-extend C
widest_int x = (unsigned int) c; // zero-extend C
- It is also possible to do arithmetic directly on trees, rtxes and
+ It is also possible to do arithmetic directly on rtx_mode_ts and
constants. For example:
- wi::add (t1, t2); // add equal-sized INTEGER_CSTs t1 and t2
- wi::add (t1, 1); // add 1 to INTEGER_CST t1
- wi::add (r1, r2); // add equal-sized rtx constants r1 and r2
+ wi::add (r1, r2); // add equal-sized rtx_mode_ts r1 and r2
+ wi::add (r1, 1); // add 1 to rtx_mode_t r1
wi::lshift (1, 100); // 1 << 100 as a widest_int
Many binary operations place restrictions on the combinations of inputs,
using the following rules:
- - {tree, rtx, wide_int} op {tree, rtx, wide_int} -> wide_int
+ - {rtx, wide_int} op {rtx, wide_int} -> wide_int
The inputs must be the same precision. The result is a wide_int
of the same precision
- - {tree, rtx, wide_int} op (un)signed HOST_WIDE_INT -> wide_int
- (un)signed HOST_WIDE_INT op {tree, rtx, wide_int} -> wide_int
+ - {rtx, wide_int} op (un)signed HOST_WIDE_INT -> wide_int
+ (un)signed HOST_WIDE_INT op {rtx, wide_int} -> wide_int
The HOST_WIDE_INT is extended or truncated to the precision of
the other input. The result is a wide_int of the same precision
as that input.
typedef FIXED_WIDE_INT (ADDR_MAX_PRECISION) offset_int;
typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION) widest_int;
-template <bool SE>
+/* wi::storage_ref can be a reference to a primitive type,
+ so this is the conservatively-correct setting. */
+template <bool SE, bool HDP = true>
struct wide_int_ref_storage;
typedef generic_wide_int <wide_int_ref_storage <false> > wide_int_ref;
to use those. */
#define WIDE_INT_REF_FOR(T) \
generic_wide_int \
- <wide_int_ref_storage <wi::int_traits <T>::is_sign_extended> >
+ <wide_int_ref_storage <wi::int_traits <T>::is_sign_extended, \
+ wi::int_traits <T>::host_dependent_precision> >
namespace wi
{
/* Provide the storage for a wide_int_ref. This acts like a read-only
wide_int, with the optimization that VAL is normally a pointer to
another integer's storage, so that no array copy is needed. */
-template <bool SE>
+template <bool SE, bool HDP>
struct wide_int_ref_storage : public wi::storage_ref
{
private:
};
/* Create a reference from an existing reference. */
-template <bool SE>
-inline wide_int_ref_storage <SE>::
+template <bool SE, bool HDP>
+inline wide_int_ref_storage <SE, HDP>::
wide_int_ref_storage (const wi::storage_ref &x)
: storage_ref (x)
{}
/* Create a reference to integer X in its natural precision. Note
that the natural precision is host-dependent for primitive
types. */
-template <bool SE>
+template <bool SE, bool HDP>
template <typename T>
-inline wide_int_ref_storage <SE>::wide_int_ref_storage (const T &x)
+inline wide_int_ref_storage <SE, HDP>::wide_int_ref_storage (const T &x)
: storage_ref (wi::int_traits <T>::decompose (scratch,
wi::get_precision (x), x))
{
}
/* Create a reference to integer X in precision PRECISION. */
-template <bool SE>
+template <bool SE, bool HDP>
template <typename T>
-inline wide_int_ref_storage <SE>::wide_int_ref_storage (const T &x,
- unsigned int precision)
+inline wide_int_ref_storage <SE, HDP>::
+wide_int_ref_storage (const T &x, unsigned int precision)
: storage_ref (wi::int_traits <T>::decompose (scratch, precision, x))
{
}
namespace wi
{
- template <bool SE>
- struct int_traits <wide_int_ref_storage <SE> >
+ template <bool SE, bool HDP>
+ struct int_traits <wide_int_ref_storage <SE, HDP> >
{
static const enum precision_type precision_type = VAR_PRECISION;
- /* wi::storage_ref can be a reference to a primitive type,
- so this is the conservatively-correct setting. */
- static const bool host_dependent_precision = true;
+ static const bool host_dependent_precision = HDP;
static const bool is_sign_extended = SE;
};
}