1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
20 opLoad = mapOp("Load")
21 opStore = mapOp("Store")
22 opLoadOrStore = mapOp("LoadOrStore")
23 opLoadAndDelete = mapOp("LoadAndDelete")
24 opDelete = mapOp("Delete")
27 var mapOps = [...]mapOp{opLoad, opStore, opLoadOrStore, opLoadAndDelete, opDelete}
29 // mapCall is a quick.Generator for calls on mapInterface.
35 func (c mapCall) apply(m mapInterface) (interface{}, bool) {
43 return m.LoadOrStore(c.k, c.v)
45 return m.LoadAndDelete(c.k)
50 panic("invalid mapOp")
54 type mapResult struct {
59 func randValue(r *rand.Rand) interface{} {
60 b := make([]byte, r.Intn(4))
62 b[i] = 'a' + byte(rand.Intn(26))
67 func (mapCall) Generate(r *rand.Rand, size int) reflect.Value {
68 c := mapCall{op: mapOps[rand.Intn(len(mapOps))], k: randValue(r)}
70 case opStore, opLoadOrStore:
73 return reflect.ValueOf(c)
76 func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map[interface{}]interface{}) {
77 for _, c := range calls {
79 results = append(results, mapResult{v, ok})
82 final = make(map[interface{}]interface{})
83 m.Range(func(k, v interface{}) bool {
91 func applyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) {
92 return applyCalls(new(sync.Map), calls)
95 func applyRWMutexMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) {
96 return applyCalls(new(RWMutexMap), calls)
99 func applyDeepCopyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) {
100 return applyCalls(new(DeepCopyMap), calls)
103 func TestMapMatchesRWMutex(t *testing.T) {
104 if err := quick.CheckEqual(applyMap, applyRWMutexMap, nil); err != nil {
109 func TestMapMatchesDeepCopy(t *testing.T) {
110 if err := quick.CheckEqual(applyMap, applyDeepCopyMap, nil); err != nil {
115 func TestConcurrentRange(t *testing.T) {
116 const mapSize = 1 << 10
119 for n := int64(1); n <= mapSize; n++ {
123 done := make(chan struct{})
124 var wg sync.WaitGroup
129 for g := int64(runtime.GOMAXPROCS(0)); g > 0; g-- {
130 r := rand.New(rand.NewSource(g))
134 for i := int64(0); ; i++ {
140 for n := int64(1); n < mapSize; n++ {
141 if r.Int63n(mapSize) == 0 {
155 for n := iters; n > 0; n-- {
156 seen := make(map[int64]bool, mapSize)
158 m.Range(func(ki, vi interface{}) bool {
159 k, v := ki.(int64), vi.(int64)
161 t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v)
164 t.Fatalf("Range visited key %v twice", k)
170 if len(seen) != mapSize {
171 t.Fatalf("Range visited %v elements of %v-element Map", len(seen), mapSize)
176 func TestIssue40999(t *testing.T) {
179 // Since the miss-counting in missLocked (via Delete)
180 // compares the miss count with len(m.dirty),
181 // add an initial entry to bias len(m.dirty) above the miss count.
182 m.Store(nil, struct{}{})
186 // Set finalizers that count for collected keys. A non-zero count
187 // indicates that keys have not been leaked.
188 for atomic.LoadUint32(&finalized) == 0 {
190 runtime.SetFinalizer(p, func(*int) {
191 atomic.AddUint32(&finalized, 1)
193 m.Store(p, struct{}{})