+2018-09-18 François Dumont <fdumont@gcc.gnu.org>
+
+ PR libstdc++/87135
+ * src/c++11/hashtable_c++0x.cc:
+ (_Prime_rehash_policy::_M_next_bkt): Return a prime no smaller than
+ requested size, but not necessarily greater.
+ (_Prime_rehash_policy::_M_need_rehash): Rehash only if target size is
+ strictly greater than next resize threshold.
+ * testsuite/23_containers/unordered_map/modifiers/reserve.cc: Adapt test
+ to validate that there is no rehash as long as number of insertion is
+ lower or equal to the reserved number of elements.
+
2018-09-18 Jonathan Wakely <jwakely@redhat.com>
* include/bits/unique_ptr.h (__uniq_ptr_impl): Remove static assertion
{
// Optimize lookups involving the first elements of __prime_list.
// (useful to speed-up, eg, constructors)
- static const unsigned char __fast_bkt[13]
- = { 2, 2, 3, 5, 5, 7, 7, 11, 11, 11, 11, 13, 13 };
+ static const unsigned char __fast_bkt[]
+ = { 2, 2, 2, 3, 5, 5, 7, 7, 11, 11, 11, 11, 13, 13 };
- if (__n <= 12)
+ if (__n < sizeof(__fast_bkt))
{
_M_next_resize =
__builtin_ceil(__fast_bkt[__n] * (long double)_M_max_load_factor);
// iterator that can be dereferenced to get the last prime.
constexpr auto __last_prime = __prime_list + __n_primes - 1;
- // Look for 'n + 1' to make sure returned value will be greater than n.
const unsigned long* __next_bkt =
- std::lower_bound(__prime_list + 6, __last_prime, __n + 1);
+ std::lower_bound(__prime_list + 6, __last_prime, __n);
if (__next_bkt == __last_prime)
// Set next resize to the max value so that we never try to rehash again
_M_need_rehash(std::size_t __n_bkt, std::size_t __n_elt,
std::size_t __n_ins) const
{
- if (__n_elt + __n_ins >= _M_next_resize)
+ if (__n_elt + __n_ins > _M_next_resize)
{
long double __min_bkts = (__n_elt + __n_ins)
/ (long double)_M_max_load_factor;
// <http://www.gnu.org/licenses/>.
#include <unordered_map>
+
#include <testsuite_hooks.h>
void test01()
{
- const int N = 1000;
-
typedef std::unordered_map<int, int> Map;
Map m;
- m.reserve(N);
- std::size_t bkts = m.bucket_count();
- for (int i = 0; i != N; ++i)
+ // Make sure max load factor is 1 so that reserved elements is directly
+ // the bucket count.
+ m.max_load_factor(1);
+
+ int i = -1;
+ for (;;)
{
- m.insert(std::make_pair(i, i));
- // As long as we insert less than the reserved number of elements we
- // shouldn't experiment any rehash.
+ m.reserve(m.bucket_count());
+
+ std::size_t bkts = m.bucket_count();
+
+ m.reserve(bkts);
VERIFY( m.bucket_count() == bkts );
+
+ for (++i; i < bkts; ++i)
+ {
+ m.insert(std::make_pair(i, i));
+
+ // As long as we insert less than the reserved number of elements we
+ // shouldn't experiment any rehash.
+ VERIFY( m.bucket_count() == bkts );
+
+ VERIFY( m.load_factor() <= m.max_load_factor() );
+ }
+
+ // One more element should rehash.
+ m.insert(std::make_pair(i, i));
+ VERIFY( m.bucket_count() != bkts );
+ VERIFY( m.load_factor() <= m.max_load_factor() );
+
+ if (i > 1024)
+ break;
}
}