Daily bump.
[gcc.git] / libgo / go / sync / map_test.go
1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 package sync_test
6
7 import (
8 "math/rand"
9 "reflect"
10 "runtime"
11 "sync"
12 "sync/atomic"
13 "testing"
14 "testing/quick"
15 )
16
17 type mapOp string
18
19 const (
20 opLoad = mapOp("Load")
21 opStore = mapOp("Store")
22 opLoadOrStore = mapOp("LoadOrStore")
23 opLoadAndDelete = mapOp("LoadAndDelete")
24 opDelete = mapOp("Delete")
25 )
26
27 var mapOps = [...]mapOp{opLoad, opStore, opLoadOrStore, opLoadAndDelete, opDelete}
28
29 // mapCall is a quick.Generator for calls on mapInterface.
30 type mapCall struct {
31 op mapOp
32 k, v interface{}
33 }
34
35 func (c mapCall) apply(m mapInterface) (interface{}, bool) {
36 switch c.op {
37 case opLoad:
38 return m.Load(c.k)
39 case opStore:
40 m.Store(c.k, c.v)
41 return nil, false
42 case opLoadOrStore:
43 return m.LoadOrStore(c.k, c.v)
44 case opLoadAndDelete:
45 return m.LoadAndDelete(c.k)
46 case opDelete:
47 m.Delete(c.k)
48 return nil, false
49 default:
50 panic("invalid mapOp")
51 }
52 }
53
54 type mapResult struct {
55 value interface{}
56 ok bool
57 }
58
59 func randValue(r *rand.Rand) interface{} {
60 b := make([]byte, r.Intn(4))
61 for i := range b {
62 b[i] = 'a' + byte(rand.Intn(26))
63 }
64 return string(b)
65 }
66
67 func (mapCall) Generate(r *rand.Rand, size int) reflect.Value {
68 c := mapCall{op: mapOps[rand.Intn(len(mapOps))], k: randValue(r)}
69 switch c.op {
70 case opStore, opLoadOrStore:
71 c.v = randValue(r)
72 }
73 return reflect.ValueOf(c)
74 }
75
76 func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map[interface{}]interface{}) {
77 for _, c := range calls {
78 v, ok := c.apply(m)
79 results = append(results, mapResult{v, ok})
80 }
81
82 final = make(map[interface{}]interface{})
83 m.Range(func(k, v interface{}) bool {
84 final[k] = v
85 return true
86 })
87
88 return results, final
89 }
90
91 func applyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) {
92 return applyCalls(new(sync.Map), calls)
93 }
94
95 func applyRWMutexMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) {
96 return applyCalls(new(RWMutexMap), calls)
97 }
98
99 func applyDeepCopyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) {
100 return applyCalls(new(DeepCopyMap), calls)
101 }
102
103 func TestMapMatchesRWMutex(t *testing.T) {
104 if err := quick.CheckEqual(applyMap, applyRWMutexMap, nil); err != nil {
105 t.Error(err)
106 }
107 }
108
109 func TestMapMatchesDeepCopy(t *testing.T) {
110 if err := quick.CheckEqual(applyMap, applyDeepCopyMap, nil); err != nil {
111 t.Error(err)
112 }
113 }
114
115 func TestConcurrentRange(t *testing.T) {
116 const mapSize = 1 << 10
117
118 m := new(sync.Map)
119 for n := int64(1); n <= mapSize; n++ {
120 m.Store(n, int64(n))
121 }
122
123 done := make(chan struct{})
124 var wg sync.WaitGroup
125 defer func() {
126 close(done)
127 wg.Wait()
128 }()
129 for g := int64(runtime.GOMAXPROCS(0)); g > 0; g-- {
130 r := rand.New(rand.NewSource(g))
131 wg.Add(1)
132 go func(g int64) {
133 defer wg.Done()
134 for i := int64(0); ; i++ {
135 select {
136 case <-done:
137 return
138 default:
139 }
140 for n := int64(1); n < mapSize; n++ {
141 if r.Int63n(mapSize) == 0 {
142 m.Store(n, n*i*g)
143 } else {
144 m.Load(n)
145 }
146 }
147 }
148 }(g)
149 }
150
151 iters := 1 << 10
152 if testing.Short() {
153 iters = 16
154 }
155 for n := iters; n > 0; n-- {
156 seen := make(map[int64]bool, mapSize)
157
158 m.Range(func(ki, vi interface{}) bool {
159 k, v := ki.(int64), vi.(int64)
160 if v%k != 0 {
161 t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v)
162 }
163 if seen[k] {
164 t.Fatalf("Range visited key %v twice", k)
165 }
166 seen[k] = true
167 return true
168 })
169
170 if len(seen) != mapSize {
171 t.Fatalf("Range visited %v elements of %v-element Map", len(seen), mapSize)
172 }
173 }
174 }
175
176 func TestIssue40999(t *testing.T) {
177 var m sync.Map
178
179 // Since the miss-counting in missLocked (via Delete)
180 // compares the miss count with len(m.dirty),
181 // add an initial entry to bias len(m.dirty) above the miss count.
182 m.Store(nil, struct{}{})
183
184 var finalized uint32
185
186 // Set finalizers that count for collected keys. A non-zero count
187 // indicates that keys have not been leaked.
188 for atomic.LoadUint32(&finalized) == 0 {
189 p := new(int)
190 runtime.SetFinalizer(p, func(*int) {
191 atomic.AddUint32(&finalized, 1)
192 })
193 m.Store(p, struct{}{})
194 m.Delete(p)
195 runtime.GC()
196 }
197 }