sync/atomic, runtime/internal/atomic: don't assume reads from 0 fail
[gcc.git] / libgo / runtime / thread.c
1 // Copyright 2010 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 #include <errno.h>
6 #include <signal.h>
7 #include <sys/time.h>
8 #include <sys/resource.h>
9
10 #include "runtime.h"
11 #include "go-assert.h"
12
13 /* For targets which don't have the required sync support. Really
14 these should be provided by gcc itself. FIXME. */
15
16 #if !defined (HAVE_SYNC_BOOL_COMPARE_AND_SWAP_4) || !defined (HAVE_SYNC_BOOL_COMPARE_AND_SWAP_8) || !defined (HAVE_SYNC_FETCH_AND_ADD_4) || !defined (HAVE_SYNC_ADD_AND_FETCH_8)
17
18 static pthread_mutex_t sync_lock = PTHREAD_MUTEX_INITIALIZER;
19
20 #endif
21
22 #ifndef HAVE_SYNC_BOOL_COMPARE_AND_SWAP_4
23
24 _Bool
25 __sync_bool_compare_and_swap_4 (uint32*, uint32, uint32)
26 __attribute__ ((visibility ("hidden")));
27
28 _Bool
29 __sync_bool_compare_and_swap_4 (uint32* ptr, uint32 old, uint32 new)
30 {
31 int i;
32 _Bool ret;
33
34 i = pthread_mutex_lock (&sync_lock);
35 __go_assert (i == 0);
36
37 if (*ptr != old)
38 ret = 0;
39 else
40 {
41 *ptr = new;
42 ret = 1;
43 }
44
45 i = pthread_mutex_unlock (&sync_lock);
46 __go_assert (i == 0);
47
48 return ret;
49 }
50
51 #endif
52
53 #ifndef HAVE_SYNC_BOOL_COMPARE_AND_SWAP_8
54
55 _Bool
56 __sync_bool_compare_and_swap_8 (uint64*, uint64, uint64)
57 __attribute__ ((visibility ("hidden")));
58
59 _Bool
60 __sync_bool_compare_and_swap_8 (uint64* ptr, uint64 old, uint64 new)
61 {
62 int i;
63 _Bool ret;
64
65 i = pthread_mutex_lock (&sync_lock);
66 __go_assert (i == 0);
67
68 if (*ptr != old)
69 ret = 0;
70 else
71 {
72 *ptr = new;
73 ret = 1;
74 }
75
76 i = pthread_mutex_unlock (&sync_lock);
77 __go_assert (i == 0);
78
79 return ret;
80 }
81
82 #endif
83
84 #ifndef HAVE_SYNC_FETCH_AND_ADD_4
85
86 uint32
87 __sync_fetch_and_add_4 (uint32*, uint32)
88 __attribute__ ((visibility ("hidden")));
89
90 uint32
91 __sync_fetch_and_add_4 (uint32* ptr, uint32 add)
92 {
93 int i;
94 uint32 ret;
95
96 i = pthread_mutex_lock (&sync_lock);
97 __go_assert (i == 0);
98
99 ret = *ptr;
100 *ptr += add;
101
102 i = pthread_mutex_unlock (&sync_lock);
103 __go_assert (i == 0);
104
105 return ret;
106 }
107
108 #endif
109
110 #ifndef HAVE_SYNC_ADD_AND_FETCH_8
111
112 uint64
113 __sync_add_and_fetch_8 (uint64*, uint64)
114 __attribute__ ((visibility ("hidden")));
115
116 uint64
117 __sync_add_and_fetch_8 (uint64* ptr, uint64 add)
118 {
119 int i;
120 uint64 ret;
121
122 i = pthread_mutex_lock (&sync_lock);
123 __go_assert (i == 0);
124
125 *ptr += add;
126 ret = *ptr;
127
128 i = pthread_mutex_unlock (&sync_lock);
129 __go_assert (i == 0);
130
131 return ret;
132 }
133
134 #endif
135
136 uintptr
137 runtime_memlimit(void)
138 {
139 struct rlimit rl;
140 uintptr used;
141
142 if(getrlimit(RLIMIT_AS, &rl) != 0)
143 return 0;
144 if(rl.rlim_cur >= 0x7fffffff)
145 return 0;
146
147 // Estimate our VM footprint excluding the heap.
148 // Not an exact science: use size of binary plus
149 // some room for thread stacks.
150 used = (64<<20);
151 if(used >= rl.rlim_cur)
152 return 0;
153
154 // If there's not at least 16 MB left, we're probably
155 // not going to be able to do much. Treat as no limit.
156 rl.rlim_cur -= used;
157 if(rl.rlim_cur < (16<<20))
158 return 0;
159
160 return rl.rlim_cur - used;
161 }