libitm: Remove unused PAGE_SIZE macros
[gcc.git] / libgomp / work.c
1 /* Copyright (C) 2005-2014 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
3
4 This file is part of the GNU OpenMP Library (libgomp).
5
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
19
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
24
25 /* This file contains routines to manage the work-share queue for a team
26 of threads. */
27
28 #include "libgomp.h"
29 #include <stddef.h>
30 #include <stdlib.h>
31 #include <string.h>
32
33
34 /* Allocate a new work share structure, preferably from current team's
35 free gomp_work_share cache. */
36
37 static struct gomp_work_share *
38 alloc_work_share (struct gomp_team *team)
39 {
40 struct gomp_work_share *ws;
41 unsigned int i;
42
43 /* This is called in a critical section. */
44 if (team->work_share_list_alloc != NULL)
45 {
46 ws = team->work_share_list_alloc;
47 team->work_share_list_alloc = ws->next_free;
48 return ws;
49 }
50
51 #ifdef HAVE_SYNC_BUILTINS
52 ws = team->work_share_list_free;
53 /* We need atomic read from work_share_list_free,
54 as free_work_share can be called concurrently. */
55 __asm ("" : "+r" (ws));
56
57 if (ws && ws->next_free)
58 {
59 struct gomp_work_share *next = ws->next_free;
60 ws->next_free = NULL;
61 team->work_share_list_alloc = next->next_free;
62 return next;
63 }
64 #else
65 gomp_mutex_lock (&team->work_share_list_free_lock);
66 ws = team->work_share_list_free;
67 if (ws)
68 {
69 team->work_share_list_alloc = ws->next_free;
70 team->work_share_list_free = NULL;
71 gomp_mutex_unlock (&team->work_share_list_free_lock);
72 return ws;
73 }
74 gomp_mutex_unlock (&team->work_share_list_free_lock);
75 #endif
76
77 team->work_share_chunk *= 2;
78 ws = gomp_malloc (team->work_share_chunk * sizeof (struct gomp_work_share));
79 ws->next_alloc = team->work_shares[0].next_alloc;
80 team->work_shares[0].next_alloc = ws;
81 team->work_share_list_alloc = &ws[1];
82 for (i = 1; i < team->work_share_chunk - 1; i++)
83 ws[i].next_free = &ws[i + 1];
84 ws[i].next_free = NULL;
85 return ws;
86 }
87
88 /* Initialize an already allocated struct gomp_work_share.
89 This shouldn't touch the next_alloc field. */
90
91 void
92 gomp_init_work_share (struct gomp_work_share *ws, bool ordered,
93 unsigned nthreads)
94 {
95 gomp_mutex_init (&ws->lock);
96 if (__builtin_expect (ordered, 0))
97 {
98 #define INLINE_ORDERED_TEAM_IDS_CNT \
99 ((sizeof (struct gomp_work_share) \
100 - offsetof (struct gomp_work_share, inline_ordered_team_ids)) \
101 / sizeof (((struct gomp_work_share *) 0)->inline_ordered_team_ids[0]))
102
103 if (nthreads > INLINE_ORDERED_TEAM_IDS_CNT)
104 ws->ordered_team_ids
105 = gomp_malloc (nthreads * sizeof (*ws->ordered_team_ids));
106 else
107 ws->ordered_team_ids = ws->inline_ordered_team_ids;
108 memset (ws->ordered_team_ids, '\0',
109 nthreads * sizeof (*ws->ordered_team_ids));
110 ws->ordered_num_used = 0;
111 ws->ordered_owner = -1;
112 ws->ordered_cur = 0;
113 }
114 else
115 ws->ordered_team_ids = NULL;
116 gomp_ptrlock_init (&ws->next_ws, NULL);
117 ws->threads_completed = 0;
118 }
119
120 /* Do any needed destruction of gomp_work_share fields before it
121 is put back into free gomp_work_share cache or freed. */
122
123 void
124 gomp_fini_work_share (struct gomp_work_share *ws)
125 {
126 gomp_mutex_destroy (&ws->lock);
127 if (ws->ordered_team_ids != ws->inline_ordered_team_ids)
128 free (ws->ordered_team_ids);
129 gomp_ptrlock_destroy (&ws->next_ws);
130 }
131
132 /* Free a work share struct, if not orphaned, put it into current
133 team's free gomp_work_share cache. */
134
135 static inline void
136 free_work_share (struct gomp_team *team, struct gomp_work_share *ws)
137 {
138 gomp_fini_work_share (ws);
139 if (__builtin_expect (team == NULL, 0))
140 free (ws);
141 else
142 {
143 struct gomp_work_share *next_ws;
144 #ifdef HAVE_SYNC_BUILTINS
145 do
146 {
147 next_ws = team->work_share_list_free;
148 ws->next_free = next_ws;
149 }
150 while (!__sync_bool_compare_and_swap (&team->work_share_list_free,
151 next_ws, ws));
152 #else
153 gomp_mutex_lock (&team->work_share_list_free_lock);
154 next_ws = team->work_share_list_free;
155 ws->next_free = next_ws;
156 team->work_share_list_free = ws;
157 gomp_mutex_unlock (&team->work_share_list_free_lock);
158 #endif
159 }
160 }
161
162 /* The current thread is ready to begin the next work sharing construct.
163 In all cases, thr->ts.work_share is updated to point to the new
164 structure. In all cases the work_share lock is locked. Return true
165 if this was the first thread to reach this point. */
166
167 bool
168 gomp_work_share_start (bool ordered)
169 {
170 struct gomp_thread *thr = gomp_thread ();
171 struct gomp_team *team = thr->ts.team;
172 struct gomp_work_share *ws;
173
174 /* Work sharing constructs can be orphaned. */
175 if (team == NULL)
176 {
177 ws = gomp_malloc (sizeof (*ws));
178 gomp_init_work_share (ws, ordered, 1);
179 thr->ts.work_share = ws;
180 return ws;
181 }
182
183 ws = thr->ts.work_share;
184 thr->ts.last_work_share = ws;
185 ws = gomp_ptrlock_get (&ws->next_ws);
186 if (ws == NULL)
187 {
188 /* This thread encountered a new ws first. */
189 struct gomp_work_share *ws = alloc_work_share (team);
190 gomp_init_work_share (ws, ordered, team->nthreads);
191 thr->ts.work_share = ws;
192 return true;
193 }
194 else
195 {
196 thr->ts.work_share = ws;
197 return false;
198 }
199 }
200
201 /* The current thread is done with its current work sharing construct.
202 This version does imply a barrier at the end of the work-share. */
203
204 void
205 gomp_work_share_end (void)
206 {
207 struct gomp_thread *thr = gomp_thread ();
208 struct gomp_team *team = thr->ts.team;
209 gomp_barrier_state_t bstate;
210
211 /* Work sharing constructs can be orphaned. */
212 if (team == NULL)
213 {
214 free_work_share (NULL, thr->ts.work_share);
215 thr->ts.work_share = NULL;
216 return;
217 }
218
219 bstate = gomp_barrier_wait_start (&team->barrier);
220
221 if (gomp_barrier_last_thread (bstate))
222 {
223 if (__builtin_expect (thr->ts.last_work_share != NULL, 1))
224 {
225 team->work_shares_to_free = thr->ts.work_share;
226 free_work_share (team, thr->ts.last_work_share);
227 }
228 }
229
230 gomp_team_barrier_wait_end (&team->barrier, bstate);
231 thr->ts.last_work_share = NULL;
232 }
233
234 /* The current thread is done with its current work sharing construct.
235 This version implies a cancellable barrier at the end of the work-share. */
236
237 bool
238 gomp_work_share_end_cancel (void)
239 {
240 struct gomp_thread *thr = gomp_thread ();
241 struct gomp_team *team = thr->ts.team;
242 gomp_barrier_state_t bstate;
243
244 /* Cancellable work sharing constructs cannot be orphaned. */
245 bstate = gomp_barrier_wait_cancel_start (&team->barrier);
246
247 if (gomp_barrier_last_thread (bstate))
248 {
249 if (__builtin_expect (thr->ts.last_work_share != NULL, 1))
250 {
251 team->work_shares_to_free = thr->ts.work_share;
252 free_work_share (team, thr->ts.last_work_share);
253 }
254 }
255 thr->ts.last_work_share = NULL;
256
257 return gomp_team_barrier_wait_cancel_end (&team->barrier, bstate);
258 }
259
260 /* The current thread is done with its current work sharing construct.
261 This version does NOT imply a barrier at the end of the work-share. */
262
263 void
264 gomp_work_share_end_nowait (void)
265 {
266 struct gomp_thread *thr = gomp_thread ();
267 struct gomp_team *team = thr->ts.team;
268 struct gomp_work_share *ws = thr->ts.work_share;
269 unsigned completed;
270
271 /* Work sharing constructs can be orphaned. */
272 if (team == NULL)
273 {
274 free_work_share (NULL, ws);
275 thr->ts.work_share = NULL;
276 return;
277 }
278
279 if (__builtin_expect (thr->ts.last_work_share == NULL, 0))
280 return;
281
282 #ifdef HAVE_SYNC_BUILTINS
283 completed = __sync_add_and_fetch (&ws->threads_completed, 1);
284 #else
285 gomp_mutex_lock (&ws->lock);
286 completed = ++ws->threads_completed;
287 gomp_mutex_unlock (&ws->lock);
288 #endif
289
290 if (completed == team->nthreads)
291 {
292 team->work_shares_to_free = thr->ts.work_share;
293 free_work_share (team, thr->ts.last_work_share);
294 }
295 thr->ts.last_work_share = NULL;
296 }