egl: Update headers from Khronos
[mesa.git] / src / compiler / glsl / blob.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <string.h>
25
26 #include "main/macros.h"
27 #include "blob.h"
28
29 #ifdef HAVE_VALGRIND
30 #include <valgrind.h>
31 #include <memcheck.h>
32 #define VG(x) x
33 #else
34 #define VG(x)
35 #endif
36
37 #define BLOB_INITIAL_SIZE 4096
38
39 /* Ensure that \blob will be able to fit an additional object of size
40 * \additional. The growing (if any) will occur by doubling the existing
41 * allocation.
42 */
43 static bool
44 grow_to_fit(struct blob *blob, size_t additional)
45 {
46 size_t to_allocate;
47 uint8_t *new_data;
48
49 if (blob->size + additional <= blob->allocated)
50 return true;
51
52 if (blob->allocated == 0)
53 to_allocate = BLOB_INITIAL_SIZE;
54 else
55 to_allocate = blob->allocated * 2;
56
57 to_allocate = MAX2(to_allocate, blob->allocated + additional);
58
59 new_data = realloc(blob->data, to_allocate);
60 if (new_data == NULL)
61 return false;
62
63 blob->data = new_data;
64 blob->allocated = to_allocate;
65
66 return true;
67 }
68
69 /* Align the blob->size so that reading or writing a value at (blob->data +
70 * blob->size) will result in an access aligned to a granularity of \alignment
71 * bytes.
72 *
73 * \return True unless allocation fails
74 */
75 static bool
76 align_blob(struct blob *blob, size_t alignment)
77 {
78 const size_t new_size = ALIGN(blob->size, alignment);
79
80 if (blob->size < new_size) {
81 if (!grow_to_fit(blob, new_size - blob->size))
82 return false;
83
84 memset(blob->data + blob->size, 0, new_size - blob->size);
85 blob->size = new_size;
86 }
87
88 return true;
89 }
90
91 static void
92 align_blob_reader(struct blob_reader *blob, size_t alignment)
93 {
94 blob->current = blob->data + ALIGN(blob->current - blob->data, alignment);
95 }
96
97 struct blob *
98 blob_create()
99 {
100 struct blob *blob = (struct blob *) malloc(sizeof(struct blob));
101 if (blob == NULL)
102 return NULL;
103
104 blob->data = NULL;
105 blob->allocated = 0;
106 blob->size = 0;
107
108 return blob;
109 }
110
111 bool
112 blob_overwrite_bytes(struct blob *blob,
113 size_t offset,
114 const void *bytes,
115 size_t to_write)
116 {
117 /* Detect an attempt to overwrite data out of bounds. */
118 if (blob->size < offset + to_write)
119 return false;
120
121 VG(VALGRIND_CHECK_MEM_IS_DEFINED(bytes, to_write));
122
123 memcpy(blob->data + offset, bytes, to_write);
124
125 return true;
126 }
127
128 bool
129 blob_write_bytes(struct blob *blob, const void *bytes, size_t to_write)
130 {
131 if (! grow_to_fit(blob, to_write))
132 return false;
133
134 VG(VALGRIND_CHECK_MEM_IS_DEFINED(bytes, to_write));
135
136 memcpy(blob->data + blob->size, bytes, to_write);
137 blob->size += to_write;
138
139 return true;
140 }
141
142 uint8_t *
143 blob_reserve_bytes(struct blob *blob, size_t to_write)
144 {
145 uint8_t *ret;
146
147 if (! grow_to_fit (blob, to_write))
148 return NULL;
149
150 ret = blob->data + blob->size;
151 blob->size += to_write;
152
153 return ret;
154 }
155
156 bool
157 blob_write_uint32(struct blob *blob, uint32_t value)
158 {
159 align_blob(blob, sizeof(value));
160
161 return blob_write_bytes(blob, &value, sizeof(value));
162 }
163
164 bool
165 blob_overwrite_uint32 (struct blob *blob,
166 size_t offset,
167 uint32_t value)
168 {
169 return blob_overwrite_bytes(blob, offset, &value, sizeof(value));
170 }
171
172 bool
173 blob_write_uint64(struct blob *blob, uint64_t value)
174 {
175 align_blob(blob, sizeof(value));
176
177 return blob_write_bytes(blob, &value, sizeof(value));
178 }
179
180 bool
181 blob_write_intptr(struct blob *blob, intptr_t value)
182 {
183 align_blob(blob, sizeof(value));
184
185 return blob_write_bytes(blob, &value, sizeof(value));
186 }
187
188 bool
189 blob_write_string(struct blob *blob, const char *str)
190 {
191 return blob_write_bytes(blob, str, strlen(str) + 1);
192 }
193
194 void
195 blob_reader_init(struct blob_reader *blob, uint8_t *data, size_t size)
196 {
197 blob->data = data;
198 blob->end = data + size;
199 blob->current = data;
200 blob->overrun = false;
201 }
202
203 /* Check that an object of size \size can be read from this blob.
204 *
205 * If not, set blob->overrun to indicate that we attempted to read too far.
206 */
207 static bool
208 ensure_can_read(struct blob_reader *blob, size_t size)
209 {
210 if (blob->current < blob->end && blob->end - blob->current >= size)
211 return true;
212
213 blob->overrun = true;
214
215 return false;
216 }
217
218 void *
219 blob_read_bytes(struct blob_reader *blob, size_t size)
220 {
221 void *ret;
222
223 if (! ensure_can_read (blob, size))
224 return NULL;
225
226 ret = blob->current;
227
228 blob->current += size;
229
230 return ret;
231 }
232
233 void
234 blob_copy_bytes(struct blob_reader *blob, uint8_t *dest, size_t size)
235 {
236 uint8_t *bytes;
237
238 bytes = blob_read_bytes(blob, size);
239 if (bytes == NULL)
240 return;
241
242 memcpy(dest, bytes, size);
243 }
244
245 /* These next three read functions have identical form. If we add any beyond
246 * these first three we should probably switch to generating these with a
247 * preprocessor macro.
248 */
249 uint32_t
250 blob_read_uint32(struct blob_reader *blob)
251 {
252 uint32_t ret;
253 int size = sizeof(ret);
254
255 align_blob_reader(blob, size);
256
257 if (! ensure_can_read(blob, size))
258 return 0;
259
260 ret = *((uint32_t*) blob->current);
261
262 blob->current += size;
263
264 return ret;
265 }
266
267 uint64_t
268 blob_read_uint64(struct blob_reader *blob)
269 {
270 uint64_t ret;
271 int size = sizeof(ret);
272
273 align_blob_reader(blob, size);
274
275 if (! ensure_can_read(blob, size))
276 return 0;
277
278 ret = *((uint64_t*) blob->current);
279
280 blob->current += size;
281
282 return ret;
283 }
284
285 intptr_t
286 blob_read_intptr(struct blob_reader *blob)
287 {
288 intptr_t ret;
289 int size = sizeof(ret);
290
291 align_blob_reader(blob, size);
292
293 if (! ensure_can_read(blob, size))
294 return 0;
295
296 ret = *((intptr_t *) blob->current);
297
298 blob->current += size;
299
300 return ret;
301 }
302
303 char *
304 blob_read_string(struct blob_reader *blob)
305 {
306 int size;
307 char *ret;
308 uint8_t *nul;
309
310 /* If we're already at the end, then this is an overrun. */
311 if (blob->current >= blob->end) {
312 blob->overrun = true;
313 return NULL;
314 }
315
316 /* Similarly, if there is no zero byte in the data remaining in this blob,
317 * we also consider that an overrun.
318 */
319 nul = memchr(blob->current, 0, blob->end - blob->current);
320
321 if (nul == NULL) {
322 blob->overrun = true;
323 return NULL;
324 }
325
326 size = nul - blob->current + 1;
327
328 assert(ensure_can_read(blob, size));
329
330 ret = (char *) blob->current;
331
332 blob->current += size;
333
334 return ret;
335 }