re PR tree-optimization/55253 (Revision 193298 miscompiles sqlite with -Os)
[gcc.git] / gcc / vec.c
1 /* Vector API for GNU compiler.
2 Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4 Contributed by Nathan Sidwell <nathan@codesourcery.com>
5 Re-implemented in C++ by Diego Novillo <dnovillo@google.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 /* This file is compiled twice: once for the generator programs
24 once for the compiler. */
25 #ifdef GENERATOR_FILE
26 #include "bconfig.h"
27 #else
28 #include "config.h"
29 #endif
30
31 #include "system.h"
32 #include "coretypes.h"
33 #include "ggc.h"
34 #include "vec.h"
35 #include "diagnostic-core.h"
36 #include "hashtab.h"
37
38 /* Store information about each particular vector. */
39 struct vec_descriptor
40 {
41 const char *function;
42 const char *file;
43 int line;
44 size_t allocated;
45 size_t times;
46 size_t peak;
47 };
48
49
50 /* Hashtable mapping vec addresses to descriptors. */
51 static htab_t vec_desc_hash;
52
53 /* Hashtable helpers. */
54 static hashval_t
55 hash_descriptor (const void *p)
56 {
57 const struct vec_descriptor *const d =
58 (const struct vec_descriptor *) p;
59 return htab_hash_pointer (d->file) + d->line;
60 }
61 static int
62 eq_descriptor (const void *p1, const void *p2)
63 {
64 const struct vec_descriptor *const d = (const struct vec_descriptor *) p1;
65 const struct vec_descriptor *const l = (const struct vec_descriptor *) p2;
66 return d->file == l->file && d->function == l->function && d->line == l->line;
67 }
68
69 /* Hashtable converting address of allocated field to loc descriptor. */
70 static htab_t ptr_hash;
71 struct ptr_hash_entry
72 {
73 void *ptr;
74 struct vec_descriptor *loc;
75 size_t allocated;
76 };
77
78 /* Hash table helpers functions. */
79 static hashval_t
80 hash_ptr (const void *p)
81 {
82 const struct ptr_hash_entry *const d = (const struct ptr_hash_entry *) p;
83
84 return htab_hash_pointer (d->ptr);
85 }
86
87 static int
88 eq_ptr (const void *p1, const void *p2)
89 {
90 const struct ptr_hash_entry *const p = (const struct ptr_hash_entry *) p1;
91
92 return (p->ptr == p2);
93 }
94
95 /* Return descriptor for given call site, create new one if needed. */
96 static struct vec_descriptor *
97 vec_descriptor (const char *name, int line, const char *function)
98 {
99 struct vec_descriptor loc;
100 struct vec_descriptor **slot;
101
102 loc.file = name;
103 loc.line = line;
104 loc.function = function;
105 if (!vec_desc_hash)
106 vec_desc_hash = htab_create (10, hash_descriptor, eq_descriptor, NULL);
107
108 slot = (struct vec_descriptor **) htab_find_slot (vec_desc_hash, &loc,
109 INSERT);
110 if (*slot)
111 return *slot;
112 *slot = XCNEW (struct vec_descriptor);
113 (*slot)->file = name;
114 (*slot)->line = line;
115 (*slot)->function = function;
116 (*slot)->allocated = 0;
117 (*slot)->peak = 0;
118 return *slot;
119 }
120
121 /* Account the overhead. */
122 static void
123 register_overhead (struct vec_prefix *ptr, size_t size,
124 const char *name, int line, const char *function)
125 {
126 struct vec_descriptor *loc = vec_descriptor (name, line, function);
127 struct ptr_hash_entry *p = XNEW (struct ptr_hash_entry);
128 PTR *slot;
129
130 p->ptr = ptr;
131 p->loc = loc;
132 p->allocated = size;
133 if (!ptr_hash)
134 ptr_hash = htab_create (10, hash_ptr, eq_ptr, NULL);
135 slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr), INSERT);
136 gcc_assert (!*slot);
137 *slot = p;
138
139 loc->allocated += size;
140 if (loc->peak < loc->allocated)
141 loc->peak += loc->allocated;
142 loc->times++;
143 }
144
145 /* Notice that the pointer has been freed. */
146 static void
147 free_overhead (struct vec_prefix *ptr)
148 {
149 PTR *slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr),
150 NO_INSERT);
151 struct ptr_hash_entry *p = (struct ptr_hash_entry *) *slot;
152 p->loc->allocated -= p->allocated;
153 htab_clear_slot (ptr_hash, slot);
154 free (p);
155 }
156
157 void
158 vec_heap_free (void *ptr)
159 {
160 if (GATHER_STATISTICS)
161 free_overhead ((struct vec_prefix *)ptr);
162 free (ptr);
163 }
164
165 /* Calculate the new ALLOC value, making sure that RESERVE slots are
166 free. If EXACT grow exactly, otherwise grow exponentially. */
167
168 static inline unsigned
169 calculate_allocation (const struct vec_prefix *pfx, int reserve, bool exact)
170 {
171 unsigned alloc = 0;
172 unsigned num = 0;
173
174 gcc_assert (reserve >= 0);
175
176 if (pfx)
177 {
178 alloc = pfx->alloc_;
179 num = pfx->num_;
180 }
181 else if (!reserve)
182 /* If there's no prefix, and we've not requested anything, then we
183 will create a NULL vector. */
184 return 0;
185
186 /* We must have run out of room. */
187 gcc_assert (alloc - num < (unsigned) reserve);
188
189 if (exact)
190 /* Exact size. */
191 alloc = num + reserve;
192 else
193 {
194 /* Exponential growth. */
195 if (!alloc)
196 alloc = 4;
197 else if (alloc < 16)
198 /* Double when small. */
199 alloc = alloc * 2;
200 else
201 /* Grow slower when large. */
202 alloc = (alloc * 3 / 2);
203
204 /* If this is still too small, set it to the right size. */
205 if (alloc < num + reserve)
206 alloc = num + reserve;
207 }
208 return alloc;
209 }
210
211 /* Ensure there are at least RESERVE free slots in VEC. If EXACT grow
212 exactly, else grow exponentially. As a special case, if VEC is
213 NULL and RESERVE is 0, no vector will be created. The vector's
214 trailing array is at VEC_OFFSET offset and consists of ELT_SIZE
215 sized elements. */
216
217 void *
218 vec_gc_o_reserve_1 (void *vec, int reserve, size_t vec_offset, size_t elt_size,
219 bool exact MEM_STAT_DECL)
220 {
221 struct vec_prefix *pfx = (struct vec_prefix *) vec;
222 unsigned alloc = calculate_allocation (pfx, reserve, exact);
223 size_t size;
224
225 if (!alloc)
226 {
227 if (pfx)
228 ggc_free (pfx);
229 return NULL;
230 }
231
232 /* Calculate the amount of space we want. */
233 size = vec_offset + alloc * elt_size;
234 /* Ask the allocator how much space it will really give us. */
235 size = ggc_round_alloc_size (size);
236 /* Adjust the number of slots accordingly. */
237 alloc = (size - vec_offset) / elt_size;
238 /* And finally, recalculate the amount of space we ask for. */
239 size = vec_offset + alloc * elt_size;
240
241 vec = ggc_realloc_stat (vec, size PASS_MEM_STAT);
242
243 ((struct vec_prefix *)vec)->alloc_ = alloc;
244 if (!pfx)
245 ((struct vec_prefix *)vec)->num_ = 0;
246
247 return vec;
248 }
249
250
251 /* As for vec_gc_o_reserve_1, but for heap allocated vectors. */
252
253 void *
254 vec_heap_o_reserve_1 (void *vec, int reserve, size_t vec_offset,
255 size_t elt_size, bool exact MEM_STAT_DECL)
256 {
257 struct vec_prefix *pfx = (struct vec_prefix *) vec;
258 unsigned alloc = calculate_allocation (pfx, reserve, exact);
259
260 if (!alloc)
261 {
262 if (pfx)
263 vec_heap_free (pfx);
264 return NULL;
265 }
266
267 if (GATHER_STATISTICS && vec)
268 free_overhead (pfx);
269
270 vec = xrealloc (vec, vec_offset + alloc * elt_size);
271 ((struct vec_prefix *)vec)->alloc_ = alloc;
272 if (!pfx)
273 ((struct vec_prefix *)vec)->num_ = 0;
274 if (GATHER_STATISTICS && vec)
275 register_overhead ((struct vec_prefix *)vec,
276 vec_offset + alloc * elt_size FINAL_PASS_MEM_STAT);
277
278 return vec;
279 }
280
281
282 /* Stack vectors are a little different. VEC_alloc turns into a call
283 to vec_stack_p_reserve_exact1 and passes in space allocated via a
284 call to alloca. We record that pointer so that we know that we
285 shouldn't free it. If the vector is resized, we resize it on the
286 heap. We record the pointers in a vector and search it in LIFO
287 order--i.e., we look for the newest stack vectors first. We don't
288 expect too many stack vectors at any one level, and searching from
289 the end should normally be efficient even if they are used in a
290 recursive function. */
291
292 typedef void *void_p;
293 DEF_VEC_P(void_p);
294 DEF_VEC_ALLOC_P(void_p,heap);
295
296 static VEC(void_p,heap) *stack_vecs;
297
298 /* Allocate a vector which uses alloca for the initial allocation.
299 SPACE is space allocated using alloca, ALLOC is the number of
300 entries allocated. */
301
302 void *
303 vec_stack_p_reserve_exact_1 (int alloc, void *space)
304 {
305 struct vec_prefix *pfx = (struct vec_prefix *) space;
306
307 VEC_safe_push (void_p, heap, stack_vecs, space);
308
309 pfx->num_ = 0;
310 pfx->alloc_ = alloc;
311
312 return space;
313 }
314
315 /* Grow a vector allocated using alloca. When this happens, we switch
316 back to heap allocation. We remove the vector from stack_vecs, if
317 it is there, since we no longer need to avoid freeing it. */
318
319 static void *
320 vec_stack_o_reserve_1 (void *vec, int reserve, size_t vec_offset,
321 size_t elt_size, bool exact MEM_STAT_DECL)
322 {
323 bool found;
324 unsigned int ix;
325 void *newvec;
326
327 found = false;
328 for (ix = VEC_length (void_p, stack_vecs); ix > 0; --ix)
329 {
330 if (VEC_index (void_p, stack_vecs, ix - 1) == vec)
331 {
332 VEC_unordered_remove (void_p, stack_vecs, ix - 1);
333 found = true;
334 break;
335 }
336 }
337
338 if (!found)
339 {
340 /* VEC is already on the heap. */
341 return vec_heap_o_reserve_1 (vec, reserve, vec_offset, elt_size,
342 exact PASS_MEM_STAT);
343 }
344
345 /* Move VEC to the heap. */
346 reserve += ((struct vec_prefix *) vec)->num_;
347 newvec = vec_heap_o_reserve_1 (NULL, reserve, vec_offset, elt_size,
348 exact PASS_MEM_STAT);
349 if (newvec && vec)
350 {
351 ((struct vec_prefix *) newvec)->num_ = ((struct vec_prefix *) vec)->num_;
352 memcpy (((struct vec_prefix *) newvec)+1,
353 ((struct vec_prefix *) vec)+1,
354 ((struct vec_prefix *) vec)->num_ * elt_size);
355 }
356 return newvec;
357 }
358
359 /* Grow a vector allocated on the stack. */
360
361 void *
362 vec_stack_o_reserve (void *vec, int reserve, size_t vec_offset,
363 size_t elt_size MEM_STAT_DECL)
364 {
365 return vec_stack_o_reserve_1 (vec, reserve, vec_offset, elt_size, false
366 PASS_MEM_STAT);
367 }
368
369 /* Exact version of vec_stack_o_reserve. */
370
371 void *
372 vec_stack_o_reserve_exact (void *vec, int reserve, size_t vec_offset,
373 size_t elt_size MEM_STAT_DECL)
374 {
375 return vec_stack_o_reserve_1 (vec, reserve, vec_offset, elt_size, true
376 PASS_MEM_STAT);
377 }
378
379 /* Free a vector allocated on the stack. Don't actually free it if we
380 find it in the hash table. */
381
382 void
383 vec_stack_free (void *vec)
384 {
385 unsigned int ix;
386
387 for (ix = VEC_length (void_p, stack_vecs); ix > 0; --ix)
388 {
389 if (VEC_index (void_p, stack_vecs, ix - 1) == vec)
390 {
391 VEC_unordered_remove (void_p, stack_vecs, ix - 1);
392 return;
393 }
394 }
395
396 /* VEC was not on the list of vecs allocated on the stack, so it
397 must be allocated on the heap. */
398 vec_heap_free (vec);
399 }
400
401 #if ENABLE_CHECKING
402 /* Issue a vector domain error, and then fall over. */
403
404 void
405 vec_assert_fail (const char *op, const char *struct_name,
406 const char *file, unsigned int line, const char *function)
407 {
408 internal_error ("vector %s %s domain error, in %s at %s:%u",
409 struct_name, op, function, trim_filename (file), line);
410 }
411 #endif
412
413 /* Helper for qsort; sort descriptors by amount of memory consumed. */
414 static int
415 cmp_statistic (const void *loc1, const void *loc2)
416 {
417 const struct vec_descriptor *const l1 =
418 *(const struct vec_descriptor *const *) loc1;
419 const struct vec_descriptor *const l2 =
420 *(const struct vec_descriptor *const *) loc2;
421 long diff;
422 diff = l1->allocated - l2->allocated;
423 if (!diff)
424 diff = l1->peak - l2->peak;
425 if (!diff)
426 diff = l1->times - l2->times;
427 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
428 }
429 /* Collect array of the descriptors from hashtable. */
430 static struct vec_descriptor **loc_array;
431 static int
432 add_statistics (void **slot, void *b)
433 {
434 int *n = (int *)b;
435 loc_array[*n] = (struct vec_descriptor *) *slot;
436 (*n)++;
437 return 1;
438 }
439
440 /* Dump per-site memory statistics. */
441
442 void
443 dump_vec_loc_statistics (void)
444 {
445 int nentries = 0;
446 char s[4096];
447 size_t allocated = 0;
448 size_t times = 0;
449 int i;
450
451 if (! GATHER_STATISTICS)
452 return;
453
454 loc_array = XCNEWVEC (struct vec_descriptor *, vec_desc_hash->n_elements);
455 fprintf (stderr, "Heap vectors:\n");
456 fprintf (stderr, "\n%-48s %10s %10s %10s\n",
457 "source location", "Leak", "Peak", "Times");
458 fprintf (stderr, "-------------------------------------------------------\n");
459 htab_traverse (vec_desc_hash, add_statistics, &nentries);
460 qsort (loc_array, nentries, sizeof (*loc_array), cmp_statistic);
461 for (i = 0; i < nentries; i++)
462 {
463 struct vec_descriptor *d = loc_array[i];
464 allocated += d->allocated;
465 times += d->times;
466 }
467 for (i = 0; i < nentries; i++)
468 {
469 struct vec_descriptor *d = loc_array[i];
470 const char *s1 = d->file;
471 const char *s2;
472 while ((s2 = strstr (s1, "gcc/")))
473 s1 = s2 + 4;
474 sprintf (s, "%s:%i (%s)", s1, d->line, d->function);
475 s[48] = 0;
476 fprintf (stderr, "%-48s %10li:%4.1f%% %10li %10li:%4.1f%% \n", s,
477 (long)d->allocated,
478 (d->allocated) * 100.0 / allocated,
479 (long)d->peak,
480 (long)d->times,
481 (d->times) * 100.0 / times);
482 }
483 fprintf (stderr, "%-48s %10ld %10ld\n",
484 "Total", (long)allocated, (long)times);
485 fprintf (stderr, "\n%-48s %10s %10s %10s\n",
486 "source location", "Leak", "Peak", "Times");
487 fprintf (stderr, "-------------------------------------------------------\n");
488 }