From: François Dumont Date: Tue, 18 Sep 2018 20:36:16 +0000 (+0000) Subject: re PR libstdc++/87135 ([C++17] unordered containers violate iterator validity require... X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=a521e62615e439aea7502a52fd0f8a21eaa6304f;p=gcc.git re PR libstdc++/87135 ([C++17] unordered containers violate iterator validity requirements) 2018-09-18 François Dumont PR libstdc++/87135 * src/c++11/hashtable_c++0x.cc: (_Prime_rehash_policy::_M_next_bkt): Return a prime no smaller than requested size, but not necessarily greater. (_Prime_rehash_policy::_M_need_rehash): Rehash only if target size is strictly greater than next resize threshold. * testsuite/23_containers/unordered_map/modifiers/reserve.cc: Adapt test to validate that there is no rehash as long as number of insertion is lower or equal to the reserved number of elements. From-SVN: r264413 --- diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog index 0ed4bafbb77..433bcdd4444 100644 --- a/libstdc++-v3/ChangeLog +++ b/libstdc++-v3/ChangeLog @@ -1,3 +1,15 @@ +2018-09-18 François Dumont + + PR libstdc++/87135 + * src/c++11/hashtable_c++0x.cc: + (_Prime_rehash_policy::_M_next_bkt): Return a prime no smaller than + requested size, but not necessarily greater. + (_Prime_rehash_policy::_M_need_rehash): Rehash only if target size is + strictly greater than next resize threshold. + * testsuite/23_containers/unordered_map/modifiers/reserve.cc: Adapt test + to validate that there is no rehash as long as number of insertion is + lower or equal to the reserved number of elements. + 2018-09-18 Jonathan Wakely * include/bits/unique_ptr.h (__uniq_ptr_impl): Remove static assertion diff --git a/libstdc++-v3/src/c++11/hashtable_c++0x.cc b/libstdc++-v3/src/c++11/hashtable_c++0x.cc index a776a8506fe..462767612f0 100644 --- a/libstdc++-v3/src/c++11/hashtable_c++0x.cc +++ b/libstdc++-v3/src/c++11/hashtable_c++0x.cc @@ -46,10 +46,10 @@ namespace __detail { // Optimize lookups involving the first elements of __prime_list. // (useful to speed-up, eg, constructors) - static const unsigned char __fast_bkt[13] - = { 2, 2, 3, 5, 5, 7, 7, 11, 11, 11, 11, 13, 13 }; + static const unsigned char __fast_bkt[] + = { 2, 2, 2, 3, 5, 5, 7, 7, 11, 11, 11, 11, 13, 13 }; - if (__n <= 12) + if (__n < sizeof(__fast_bkt)) { _M_next_resize = __builtin_ceil(__fast_bkt[__n] * (long double)_M_max_load_factor); @@ -65,9 +65,8 @@ namespace __detail // iterator that can be dereferenced to get the last prime. constexpr auto __last_prime = __prime_list + __n_primes - 1; - // Look for 'n + 1' to make sure returned value will be greater than n. const unsigned long* __next_bkt = - std::lower_bound(__prime_list + 6, __last_prime, __n + 1); + std::lower_bound(__prime_list + 6, __last_prime, __n); if (__next_bkt == __last_prime) // Set next resize to the max value so that we never try to rehash again @@ -95,7 +94,7 @@ namespace __detail _M_need_rehash(std::size_t __n_bkt, std::size_t __n_elt, std::size_t __n_ins) const { - if (__n_elt + __n_ins >= _M_next_resize) + if (__n_elt + __n_ins > _M_next_resize) { long double __min_bkts = (__n_elt + __n_ins) / (long double)_M_max_load_factor; diff --git a/libstdc++-v3/testsuite/23_containers/unordered_map/modifiers/reserve.cc b/libstdc++-v3/testsuite/23_containers/unordered_map/modifiers/reserve.cc index e9cf7fd6f67..7f34325df87 100644 --- a/libstdc++-v3/testsuite/23_containers/unordered_map/modifiers/reserve.cc +++ b/libstdc++-v3/testsuite/23_containers/unordered_map/modifiers/reserve.cc @@ -18,23 +18,46 @@ // . #include + #include void test01() { - const int N = 1000; - typedef std::unordered_map Map; Map m; - m.reserve(N); - std::size_t bkts = m.bucket_count(); - for (int i = 0; i != N; ++i) + // Make sure max load factor is 1 so that reserved elements is directly + // the bucket count. + m.max_load_factor(1); + + int i = -1; + for (;;) { - m.insert(std::make_pair(i, i)); - // As long as we insert less than the reserved number of elements we - // shouldn't experiment any rehash. + m.reserve(m.bucket_count()); + + std::size_t bkts = m.bucket_count(); + + m.reserve(bkts); VERIFY( m.bucket_count() == bkts ); + + for (++i; i < bkts; ++i) + { + m.insert(std::make_pair(i, i)); + + // As long as we insert less than the reserved number of elements we + // shouldn't experiment any rehash. + VERIFY( m.bucket_count() == bkts ); + + VERIFY( m.load_factor() <= m.max_load_factor() ); + } + + // One more element should rehash. + m.insert(std::make_pair(i, i)); + VERIFY( m.bucket_count() != bkts ); + VERIFY( m.load_factor() <= m.max_load_factor() ); + + if (i > 1024) + break; } }