Fix r180999.
[gcc.git] / libgo / runtime / mcentral.c
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 // Central free lists.
6 //
7 // See malloc.h for an overview.
8 //
9 // The MCentral doesn't actually contain the list of free objects; the MSpan does.
10 // Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
11 // and those that are completely allocated (c->empty).
12 //
13 // TODO(rsc): tcmalloc uses a "transfer cache" to split the list
14 // into sections of class_to_transfercount[sizeclass] objects
15 // so that it is faster to move those lists between MCaches and MCentrals.
16
17 #include "runtime.h"
18 #include "arch.h"
19 #include "malloc.h"
20
21 static bool MCentral_Grow(MCentral *c);
22 static void* MCentral_Alloc(MCentral *c);
23 static void MCentral_Free(MCentral *c, void *v);
24
25 // Initialize a single central free list.
26 void
27 runtime_MCentral_Init(MCentral *c, int32 sizeclass)
28 {
29 runtime_initlock(c);
30 c->sizeclass = sizeclass;
31 runtime_MSpanList_Init(&c->nonempty);
32 runtime_MSpanList_Init(&c->empty);
33 }
34
35 // Allocate up to n objects from the central free list.
36 // Return the number of objects allocated.
37 // The objects are linked together by their first words.
38 // On return, *pstart points at the first object and *pend at the last.
39 int32
40 runtime_MCentral_AllocList(MCentral *c, int32 n, MLink **pfirst)
41 {
42 MLink *first, *last, *v;
43 int32 i;
44
45 runtime_lock(c);
46 // Replenish central list if empty.
47 if(runtime_MSpanList_IsEmpty(&c->nonempty)) {
48 if(!MCentral_Grow(c)) {
49 runtime_unlock(c);
50 *pfirst = nil;
51 return 0;
52 }
53 }
54
55 // Copy from list, up to n.
56 // First one is guaranteed to work, because we just grew the list.
57 first = MCentral_Alloc(c);
58 last = first;
59 for(i=1; i<n && (v = MCentral_Alloc(c)) != nil; i++) {
60 last->next = v;
61 last = v;
62 }
63 last->next = nil;
64 c->nfree -= i;
65
66 runtime_unlock(c);
67 *pfirst = first;
68 return i;
69 }
70
71 // Helper: allocate one object from the central free list.
72 static void*
73 MCentral_Alloc(MCentral *c)
74 {
75 MSpan *s;
76 MLink *v;
77
78 if(runtime_MSpanList_IsEmpty(&c->nonempty))
79 return nil;
80 s = c->nonempty.next;
81 s->ref++;
82 v = s->freelist;
83 s->freelist = v->next;
84 if(s->freelist == nil) {
85 runtime_MSpanList_Remove(s);
86 runtime_MSpanList_Insert(&c->empty, s);
87 }
88 return v;
89 }
90
91 // Free n objects back into the central free list.
92 // Return the number of objects allocated.
93 // The objects are linked together by their first words.
94 // On return, *pstart points at the first object and *pend at the last.
95 void
96 runtime_MCentral_FreeList(MCentral *c, int32 n, MLink *start)
97 {
98 MLink *v, *next;
99
100 // Assume next == nil marks end of list.
101 // n and end would be useful if we implemented
102 // the transfer cache optimization in the TODO above.
103 USED(n);
104
105 runtime_lock(c);
106 for(v=start; v; v=next) {
107 next = v->next;
108 MCentral_Free(c, v);
109 }
110 runtime_unlock(c);
111 }
112
113 // Helper: free one object back into the central free list.
114 static void
115 MCentral_Free(MCentral *c, void *v)
116 {
117 MSpan *s;
118 MLink *p;
119 int32 size;
120
121 // Find span for v.
122 s = runtime_MHeap_Lookup(&runtime_mheap, v);
123 if(s == nil || s->ref == 0)
124 runtime_throw("invalid free");
125
126 // Move to nonempty if necessary.
127 if(s->freelist == nil) {
128 runtime_MSpanList_Remove(s);
129 runtime_MSpanList_Insert(&c->nonempty, s);
130 }
131
132 // Add v back to s's free list.
133 p = v;
134 p->next = s->freelist;
135 s->freelist = p;
136 c->nfree++;
137
138 // If s is completely freed, return it to the heap.
139 if(--s->ref == 0) {
140 size = runtime_class_to_size[c->sizeclass];
141 runtime_MSpanList_Remove(s);
142 runtime_unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
143 *(uintptr*)(s->start<<PageShift) = 1; // needs zeroing
144 s->freelist = nil;
145 c->nfree -= (s->npages << PageShift) / size;
146 runtime_unlock(c);
147 runtime_MHeap_Free(&runtime_mheap, s, 0);
148 runtime_lock(c);
149 }
150 }
151
152 void
153 runtime_MGetSizeClassInfo(int32 sizeclass, uintptr *sizep, int32 *npagesp, int32 *nobj)
154 {
155 int32 size;
156 int32 npages;
157
158 npages = runtime_class_to_allocnpages[sizeclass];
159 size = runtime_class_to_size[sizeclass];
160 *npagesp = npages;
161 *sizep = size;
162 *nobj = (npages << PageShift) / size;
163 }
164
165 // Fetch a new span from the heap and
166 // carve into objects for the free list.
167 static bool
168 MCentral_Grow(MCentral *c)
169 {
170 int32 i, n, npages;
171 uintptr size;
172 MLink **tailp, *v;
173 byte *p;
174 MSpan *s;
175
176 runtime_unlock(c);
177 runtime_MGetSizeClassInfo(c->sizeclass, &size, &npages, &n);
178 s = runtime_MHeap_Alloc(&runtime_mheap, npages, c->sizeclass, 0);
179 if(s == nil) {
180 // TODO(rsc): Log out of memory
181 runtime_lock(c);
182 return false;
183 }
184
185 // Carve span into sequence of blocks.
186 tailp = &s->freelist;
187 p = (byte*)(s->start << PageShift);
188 s->limit = p + size*n;
189 for(i=0; i<n; i++) {
190 v = (MLink*)p;
191 *tailp = v;
192 tailp = &v->next;
193 p += size;
194 }
195 *tailp = nil;
196 runtime_markspan((byte*)(s->start<<PageShift), size, n, size*n < (s->npages<<PageShift));
197
198 runtime_lock(c);
199 c->nfree += n;
200 runtime_MSpanList_Insert(&c->nonempty, s);
201 return true;
202 }