+ {
+ /* Maximum available size in bytes. Half of the address space
+ minus the cookie size. */
+ double_int max_size
+ = double_int_lshift (double_int_one, TYPE_PRECISION (sizetype) - 1,
+ HOST_BITS_PER_DOUBLE_INT, false);
+ /* Size of the inner array elements. */
+ double_int inner_size;
+ /* Maximum number of outer elements which can be allocated. */
+ double_int max_outer_nelts;
+ tree max_outer_nelts_tree;
+
+ gcc_assert (TREE_CODE (size) == INTEGER_CST);
+ cookie_size = targetm.cxx.get_cookie_size (elt_type);
+ gcc_assert (TREE_CODE (cookie_size) == INTEGER_CST);
+ gcc_checking_assert (double_int_ucmp
+ (TREE_INT_CST (cookie_size), max_size) < 0);
+ /* Unconditionally substract the cookie size. This decreases the
+ maximum object size and is safe even if we choose not to use
+ a cookie after all. */
+ max_size = double_int_sub (max_size, TREE_INT_CST (cookie_size));
+ if (mul_double (TREE_INT_CST_LOW (size), TREE_INT_CST_HIGH (size),
+ inner_nelts_count.low, inner_nelts_count.high,
+ &inner_size.low, &inner_size.high)
+ || double_int_ucmp (inner_size, max_size) > 0)
+ {
+ if (complain & tf_error)
+ error ("size of array is too large");
+ return error_mark_node;
+ }
+ max_outer_nelts = double_int_udiv (max_size, inner_size, TRUNC_DIV_EXPR);
+ /* Only keep the top-most seven bits, to simplify encoding the
+ constant in the instruction stream. */
+ {
+ unsigned shift = HOST_BITS_PER_DOUBLE_INT - 7
+ - (max_outer_nelts.high ? clz_hwi (max_outer_nelts.high)
+ : (HOST_BITS_PER_WIDE_INT + clz_hwi (max_outer_nelts.low)));
+ max_outer_nelts
+ = double_int_lshift (double_int_rshift
+ (max_outer_nelts, shift,
+ HOST_BITS_PER_DOUBLE_INT, false),
+ shift, HOST_BITS_PER_DOUBLE_INT, false);
+ }
+ max_outer_nelts_tree = double_int_to_tree (sizetype, max_outer_nelts);
+
+ size = size_binop (MULT_EXPR, size, convert (sizetype, nelts));
+ outer_nelts_check = fold_build2 (LE_EXPR, boolean_type_node,
+ outer_nelts,
+ max_outer_nelts_tree);
+ }