regen config
[binutils-gdb.git] / gprofng / libcollector / iolib.c
1 /* Copyright (C) 2021-2023 Free Software Foundation, Inc.
2 Contributed by Oracle.
3
4 This file is part of GNU Binutils.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21 #include "config.h"
22 #include <dlfcn.h>
23 #include <pthread.h>
24 #include <errno.h>
25 #include <fcntl.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <unistd.h>
30 #include <sys/mman.h>
31 #include <sys/param.h>
32 #include <sys/stat.h>
33
34 #include "gp-defs.h"
35 #include "collector.h"
36 #include "gp-experiment.h"
37 #include "memmgr.h"
38
39 /* TprintfT(<level>,...) definitions. Adjust per module as needed */
40 #define DBG_LT0 0 // for high-level configuration, unexpected errors/warnings
41 #define DBG_LT1 1 // for configuration details, warnings
42 #define DBG_LT2 2
43 #define DBG_LT3 3
44
45 /* ------------- Data and prototypes for block management --------- */
46 #define IO_BLK 0 /* Concurrent requests */
47 #define IO_SEQ 1 /* All requests are sequential, f.e. JAVA_CLASSES */
48 #define IO_TXT 2 /* Sequential requests. Text strings. */
49 #define ST_INIT 0 /* Initial state. Not allocated */
50 #define ST_FREE 1 /* Available */
51 #define ST_BUSY 2 /* Not available */
52
53 /* IO_BLK, IO_SEQ */
54 #define NCHUNKS 64
55
56 /* IO_TXT */
57 #define NBUFS 64 /* Number of text buffers */
58 #define CUR_BUSY(x) ((uint32_t) ((x)>>63)) /* bit 63 */
59 #define CUR_INDX(x) ((uint32_t) (((x)>>57) & 0x3fULL)) /* bits 62:57 */
60 #define CUR_FOFF(x) ((x) & 0x01ffffffffffffffULL) /* bits 56: 0 */
61 #define CUR_MAKE(busy, indx, foff) ((((uint64_t)(busy))<<63) | (((uint64_t)(indx))<<57) | ((uint64_t)(foff)) )
62
63 typedef struct Buffer
64 {
65 uint8_t *vaddr;
66 uint32_t left; /* bytes left */
67 uint32_t state; /* ST_FREE or ST_BUSY */
68 } Buffer;
69
70 typedef struct DataHandle
71 {
72 Pckt_type kind; /* obsolete (to be removed) */
73 int iotype; /* IO_BLK, IO_SEQ, IO_TXT */
74 int active;
75 char fname[MAXPATHLEN]; /* data file name */
76
77 /* IO_BLK, IO_SEQ */
78 uint32_t nflow; /* number of data flows */
79 uint32_t *blkstate; /* block states, nflow*NCHUNKS array */
80 uint32_t *blkoff; /* block offset, nflow*NCHUNKS array */
81 uint32_t nchnk; /* number of active chunks, probably small for IO_BLK */
82 uint8_t *chunks[NCHUNKS]; /* chunks (nflow contiguous blocks in virtual memory) */
83 uint32_t chblk[NCHUNKS]; /* number of active blocks in a chunk */
84 uint32_t nblk; /* number of blocks in data file */
85 int exempt; /* if exempt from experiment size limit */
86
87 /* IO_TXT */
88 Buffer *buffers; /* array of text buffers */
89 uint64_t curpos; /* current buffer and file offset */
90 } DataHandle;
91
92 #define PROFILE_DATAHNDL_MAX 16
93 static DataHandle data_hndls[PROFILE_DATAHNDL_MAX];
94 static int initialized = 0;
95 static long blksz; /* Block size. Multiple of page size. Power of two to make (x%blksz)==(x&(blksz-1)) fast. */
96 static long log2blksz; /* log2(blksz) to make (x/blksz)==(x>>log2blksz) fast. */
97 static uint32_t size_limit; /* Experiment size limit */
98 static uint32_t cur_size; /* Current experiment size */
99 static void init ();
100 static void deleteHandle (DataHandle *hndl);
101 static int exp_size_ck (int nblocks, char *fname);
102
103 /* IO_BLK, IO_SEQ */
104 static int allocateChunk (DataHandle *hndl, unsigned ichunk);
105 static uint8_t *getBlock (DataHandle *hndl, unsigned iflow, unsigned ichunk);
106 static int remapBlock (DataHandle *hndl, unsigned iflow, unsigned ichunk);
107 static int newBlock (DataHandle *hndl, unsigned iflow, unsigned ichunk);
108 static void deleteBlock (DataHandle *hndl, unsigned iflow, unsigned ichunk);
109
110 /* IO_TXT */
111 static int is_not_the_log_file (char *fname);
112 static int mapBuffer (char *fname, Buffer *buf, off64_t foff);
113 static int newBuffer (DataHandle *hndl, uint64_t pos);
114 static void writeBuffer (Buffer *buf, int blk_off, char *src, int len);
115 static void deleteBuffer (Buffer *buf);
116
117 /*
118 * Common buffer management routines
119 */
120 static void
121 init ()
122 {
123 /* set the block size */
124 long pgsz = CALL_UTIL (sysconf)(_SC_PAGESIZE);
125 blksz = pgsz;
126 log2blksz = 16; /* ensure a minimum size */
127 while ((1 << log2blksz) < blksz)
128 log2blksz += 1;
129 blksz = 1L << log2blksz; /* ensure that blksz is a power of two */
130 TprintfT (DBG_LT1, "iolib init: page size=%ld (0x%lx) blksz=%ld (0x%lx) log2blksz=%ld\n",
131 pgsz, pgsz, (long) blksz, (long) blksz, (long) log2blksz);
132 size_limit = 0;
133 cur_size = 0;
134 initialized = 1;
135 }
136
137 DataHandle *
138 __collector_create_handle (char *descp)
139 {
140 int exempt = 0;
141 char *desc = descp;
142 if (desc[0] == '*')
143 {
144 desc++;
145 exempt = 1;
146 }
147 if (!initialized)
148 init ();
149
150 /* set up header for file, file name, etc. */
151 if (*__collector_exp_dir_name == 0)
152 {
153 __collector_log_write ("<event kind=\"%s\" id=\"%d\">__collector_exp_dir_name==NULL</event>\n",
154 SP_JCMD_CERROR, COL_ERROR_EXPOPEN);
155 return NULL;
156 }
157 char fname[MAXPATHLEN];
158 CALL_UTIL (strlcpy)(fname, __collector_exp_dir_name, sizeof (fname));
159 CALL_UTIL (strlcat)(fname, "/", sizeof (fname));
160 Pckt_type kind = 0;
161 int iotype = IO_BLK;
162 if (__collector_strcmp (desc, SP_HEAPTRACE_FILE) == 0)
163 kind = HEAP_PCKT;
164 else if (__collector_strcmp (desc, SP_SYNCTRACE_FILE) == 0)
165 kind = SYNC_PCKT;
166 else if (__collector_strcmp (desc, SP_IOTRACE_FILE) == 0)
167 kind = IOTRACE_PCKT;
168 else if (__collector_strcmp (desc, SP_RACETRACE_FILE) == 0)
169 kind = RACE_PCKT;
170 else if (__collector_strcmp (desc, SP_PROFILE_FILE) == 0)
171 kind = PROF_PCKT;
172 else if (__collector_strcmp (desc, SP_OMPTRACE_FILE) == 0)
173 kind = OMP_PCKT;
174 else if (__collector_strcmp (desc, SP_HWCNTR_FILE) == 0)
175 kind = HW_PCKT;
176 else if (__collector_strcmp (desc, SP_DEADLOCK_FILE) == 0)
177 kind = DEADLOCK_PCKT;
178 else if (__collector_strcmp (desc, SP_FRINFO_FILE) == 0)
179 CALL_UTIL (strlcat)(fname, "data.", sizeof (fname));
180 else if (__collector_strcmp (desc, SP_LOG_FILE) == 0)
181 iotype = IO_TXT;
182 else if (__collector_strcmp (desc, SP_MAP_FILE) == 0)
183 iotype = IO_TXT;
184 else if (__collector_strcmp (desc, SP_JCLASSES_FILE) == 0)
185 iotype = IO_SEQ;
186 else
187 {
188 __collector_log_write ("<event kind=\"%s\" id=\"%d\">iolib unknown file desc %s</event>\n",
189 SP_JCMD_CERROR, COL_ERROR_EXPOPEN, desc);
190 return NULL;
191 }
192
193 CALL_UTIL (strlcat)(fname, desc, sizeof (fname));
194 TprintfT (DBG_LT1, "createHandle calling open on fname = `%s', desc = `%s' %s\n",
195 fname, desc, (exempt == 0 ? "non-exempt" : "exempt"));
196
197 /* allocate a handle -- not mt-safe */
198 DataHandle *hndl = NULL;
199 for (int i = 0; i < PROFILE_DATAHNDL_MAX; ++i)
200 if (data_hndls[i].active == 0)
201 {
202 hndl = &data_hndls[i];
203 break;
204 }
205
206 /* out of handles? */
207 if (hndl == NULL)
208 {
209 __collector_log_write ("<event kind=\"%s\" id=\"%d\">%s</event>\n",
210 SP_JCMD_CERROR, COL_ERROR_NOHNDL, fname);
211 return NULL;
212 }
213
214 hndl->kind = kind;
215 hndl->nblk = 0;
216 hndl->exempt = exempt;
217 CALL_UTIL (strlcpy)(hndl->fname, fname, sizeof (hndl->fname));
218 int fd = CALL_UTIL (open)(hndl->fname,
219 O_RDWR | O_CREAT | O_TRUNC | O_EXCL,
220 S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
221 if (fd < 0)
222 {
223 TprintfT (0, "createHandle open failed -- hndl->fname = `%s', SP_LOG_FILE = `%s': %s\n",
224 hndl->fname, SP_LOG_FILE, CALL_UTIL (strerror)(errno));
225 if (is_not_the_log_file (hndl->fname) == 0)
226 {
227 char errbuf[4096];
228 /* If we are trying to create the handle for the log file, write to stderr, not the experiment */
229 CALL_UTIL (snprintf)(errbuf, sizeof (errbuf),
230 "create_handle: COL_ERROR_LOG_OPEN %s: %s\n", hndl->fname, CALL_UTIL (strerror)(errno));
231 CALL_UTIL (write)(2, errbuf, CALL_UTIL (strlen)(errbuf));
232
233 }
234 else
235 __collector_log_write ("<event kind=\"%s\" id=\"%d\" ec=\"%d\">%s: create_handle</event>\n",
236 SP_JCMD_CERROR, COL_ERROR_FILEOPN, errno, hndl->fname);
237 return NULL;
238 }
239 CALL_UTIL (close)(fd);
240
241 hndl->iotype = iotype;
242 if (hndl->iotype == IO_TXT)
243 {
244 /* allocate our buffers in virtual memory */
245 /* later, we will remap buffers individually to the file */
246 uint8_t *memory = (uint8_t*) CALL_UTIL (mmap64_) (0,
247 (size_t) (NBUFS * blksz), PROT_READ | PROT_WRITE,
248 #if ARCH(SPARC)
249 MAP_SHARED | MAP_ANON,
250 #else
251 MAP_PRIVATE | MAP_ANON,
252 #endif
253 -1, (off64_t) 0);
254 if (memory == MAP_FAILED)
255 {
256 TprintfT (0, "create_handle: can't mmap MAP_ANON (for %s): %s\n", hndl->fname, CALL_UTIL (strerror)(errno));
257 /* see if this is the log file */
258 if (is_not_the_log_file (hndl->fname) == 0)
259 {
260 /* If we are trying to map the log file, write to stderr, not to the experiment */
261 char errbuf[4096];
262 CALL_UTIL (snprintf)(errbuf, sizeof (errbuf),
263 "create_handle: can't mmap MAP_ANON (for %s): %s\n", hndl->fname, CALL_UTIL (strerror)(errno));
264 CALL_UTIL (write)(2, errbuf, CALL_UTIL (strlen)(errbuf));
265 }
266 else /* write the error message into the experiment */
267 __collector_log_write ("<event kind=\"%s\" id=\"%d\" ec=\"%d\">MAP_ANON (for %s); create_handle</event>\n",
268 SP_JCMD_CERROR, COL_ERROR_FILEMAP, errno, hndl->fname);
269 return NULL;
270 }
271 TprintfT (DBG_LT2, " create_handle IO_TXT data buffer length=%ld (0x%lx) file='%s' memory=%p -- %p\n",
272 (long) (NBUFS * blksz), (long) (NBUFS * blksz), hndl->fname,
273 memory, memory + (NBUFS * blksz) - 1);
274
275 /* set up an array of buffers, pointing them to the virtual addresses */
276 TprintfT (DBG_LT2, "create_handle IO_TXT Buffer structures fname = `%s', NBUFS= %d, size = %ld (0x%lx)\n", fname,
277 NBUFS, (long) NBUFS * sizeof (Buffer), (long) NBUFS * sizeof (Buffer));
278 hndl->buffers = (Buffer*) __collector_allocCSize (__collector_heap, NBUFS * sizeof (Buffer), 1);
279 if (hndl->buffers == NULL)
280 {
281 TprintfT (0, "create_handle allocCSize for hndl->buffers failed\n");
282 CALL_UTIL (munmap)(memory, NBUFS * blksz);
283 return NULL;
284 }
285 for (int i = 0; i < NBUFS; i++)
286 {
287 Buffer *buf = &hndl->buffers[i];
288 buf->vaddr = memory + i * blksz;
289 buf->state = ST_FREE;
290 }
291 /* set the file pointer to the beginning of the file */
292 hndl->curpos = CUR_MAKE (0, 0, 0);
293 }
294 else
295 {
296 if (hndl->iotype == IO_BLK)
297 {
298 long nflow = CALL_UTIL (sysconf)(_SC_NPROCESSORS_ONLN);
299 if (nflow < 16)
300 nflow = 16;
301 hndl->nflow = (uint32_t) nflow;
302 }
303 else if (hndl->iotype == IO_SEQ)
304 hndl->nflow = 1;
305 TprintfT (DBG_LT2, "create_handle calling allocCSize blkstate fname=`%s' nflow=%d NCHUNKS=%d size=%ld (0x%lx)\n",
306 fname, hndl->nflow, NCHUNKS,
307 (long) (hndl->nflow * NCHUNKS * sizeof (uint32_t)),
308 (long) (hndl->nflow * NCHUNKS * sizeof (uint32_t)));
309 uint32_t *blkstate = (uint32_t*) __collector_allocCSize (__collector_heap, hndl->nflow * NCHUNKS * sizeof (uint32_t), 1);
310 if (blkstate == NULL)
311 return NULL;
312 for (int j = 0; j < hndl->nflow * NCHUNKS; ++j)
313 blkstate[j] = ST_INIT;
314 hndl->blkstate = blkstate;
315 TprintfT (DBG_LT2, "create_handle calling allocCSize blkoff fname=`%s' nflow=%d NCHUNKS=%d size=%ld (0x%lx)\n",
316 fname, hndl->nflow, NCHUNKS,
317 (long) (hndl->nflow * NCHUNKS * sizeof (uint32_t)),
318 (long) (hndl->nflow * NCHUNKS * sizeof (uint32_t)));
319 hndl->blkoff = (uint32_t*) __collector_allocCSize (__collector_heap, hndl->nflow * NCHUNKS * sizeof (uint32_t), 1);
320 if (hndl->blkoff == NULL)
321 return NULL;
322 hndl->nchnk = 0;
323 for (int j = 0; j < NCHUNKS; ++j)
324 {
325 hndl->chunks[j] = NULL;
326 hndl->chblk[j] = 0;
327 }
328 }
329 hndl->active = 1;
330 return hndl;
331 }
332
333 static void
334 deleteHandle (DataHandle *hndl)
335 {
336 if (hndl->active == 0)
337 return;
338 hndl->active = 0;
339
340 if (hndl->iotype == IO_BLK || hndl->iotype == IO_SEQ)
341 {
342 /* Delete all blocks. */
343 /* Since access to hndl->active is not synchronized it's still
344 * possible that we leave some blocks undeleted.
345 */
346 for (int j = 0; j < hndl->nflow * NCHUNKS; ++j)
347 {
348 uint32_t oldstate = hndl->blkstate[j];
349 if (oldstate != ST_FREE)
350 continue;
351 /* Mark as busy */
352 uint32_t state = __collector_cas_32 (hndl->blkstate + j, oldstate, ST_BUSY);
353 if (state != oldstate)
354 continue;
355 deleteBlock (hndl, j / NCHUNKS, j % NCHUNKS);
356 }
357 }
358 else if (hndl->iotype == IO_TXT)
359 {
360 /*
361 * First, make sure that buffers are in some "coherent" state:
362 *
363 * At this point, the handle is no longer active. But some threads
364 * might already have passed the active-handle check and are now
365 * trying to schedule writes. So, set the handle pointer to "busy".
366 * This will prevent new writes from being scheduled. Threads that
367 * polling will time out.
368 */
369 hrtime_t timeout = __collector_gethrtime () + 10 * ((hrtime_t) 1000000000);
370 volatile uint32_t busy = 0;
371 while (1)
372 {
373 uint32_t indx;
374 uint64_t opos, npos, foff;
375 int blk_off;
376 /* read the current pointer */
377 opos = hndl->curpos;
378 busy = CUR_BUSY (opos);
379 indx = CUR_INDX (opos);
380 foff = CUR_FOFF (opos);
381 if (busy == 1)
382 {
383 if (__collector_gethrtime () > timeout)
384 {
385 TprintfT (0, "deleteHandle ERROR: timeout cleaning up handle for %s\n", hndl->fname);
386 return;
387 }
388 continue;
389 }
390 blk_off = foff & (blksz - 1);
391 if (blk_off > 0)
392 foff += blksz - blk_off;
393 npos = CUR_MAKE (1, indx, foff);
394
395 /* try to update the handle position atomically */
396 if (__collector_cas_64p (&hndl->curpos, &opos, &npos) != opos)
397 continue;
398
399 /*
400 * If the last buffer won't be filled, account for
401 * the white space at the end so that the buffer will
402 * be deleted properly.
403 */
404 if (blk_off > 0)
405 {
406 Buffer *buf = &hndl->buffers[indx];
407 if (__collector_subget_32 (&buf->left, blksz - blk_off) == 0)
408 deleteBuffer (buf);
409 }
410 break;
411 }
412 /* wait for buffers to be deleted */
413 timeout = __collector_gethrtime () + 10 * ((hrtime_t) 1000000000);
414 for (int i = 0; i < NBUFS; i++)
415 {
416 Buffer *buf = &hndl->buffers[i];
417 while (__collector_cas_32 (&buf->state, ST_FREE, ST_INIT) != ST_FREE)
418 {
419 if (__collector_gethrtime () > timeout)
420 {
421 TprintfT (0, "deleteHandle ERROR: timeout waiting for buffer %d for %s\n", i, hndl->fname);
422 return;
423 }
424 }
425 CALL_UTIL (munmap)(buf->vaddr, blksz);
426 }
427
428 /* free buffer array */
429 __collector_freeCSize (__collector_heap, hndl->buffers, NBUFS * sizeof (Buffer));
430 }
431 }
432
433 void
434 __collector_delete_handle (DataHandle *hndl)
435 {
436 if (hndl == NULL)
437 return;
438 deleteHandle (hndl);
439 }
440
441 static int
442 exp_size_ck (int nblocks, char *fname)
443 {
444 if (size_limit == 0)
445 return 0;
446 /* do an atomic add to the cur_size */
447 uint32_t old_size = cur_size;
448 uint32_t new_size;
449 for (;;)
450 {
451 new_size = __collector_cas_32 (&cur_size, old_size, old_size + nblocks);
452 if (new_size == old_size)
453 {
454 new_size = old_size + nblocks;
455 break;
456 }
457 old_size = new_size;
458 }
459 TprintfT (DBG_LT2, "exp_size_ck() adding %d block(s); new_size = %d, limit = %d blocks; fname = %s\n",
460 nblocks, new_size, size_limit, fname);
461
462 /* pause the entire collector if we have exceeded the limit */
463 if (old_size < size_limit && new_size >= size_limit)
464 {
465 TprintfT (0, "exp_size_ck() experiment size limit exceeded; new_size = %ld, limit = %ld blocks; fname = %s\n",
466 (long) new_size, (long) size_limit, fname);
467 (void) __collector_log_write ("<event kind=\"%s\" id=\"%d\">%ld blocks (each %ld bytes)</event>\n",
468 SP_JCMD_CWARN, COL_ERROR_SIZELIM, (long) size_limit, (long) blksz);
469 __collector_pause_m ("size-limit");
470 __collector_terminate_expt ();
471 return -1;
472 }
473 return 0;
474 }
475
476 int
477 __collector_set_size_limit (char *par)
478 {
479 if (!initialized)
480 init ();
481
482 int lim = CALL_UTIL (strtol)(par, &par, 0);
483 size_limit = (uint32_t) ((uint64_t) lim * 1024 * 1024 / blksz);
484 TprintfT (DBG_LT0, "collector_size_limit set to %d MB. = %d blocks\n",
485 lim, size_limit);
486 (void) __collector_log_write ("<setting limit=\"%d\"/>\n", lim);
487 return COL_ERROR_NONE;
488 }
489
490 /*
491 * IO_BLK and IO_SEQ files
492 */
493
494 /*
495 * Allocate a chunk (nflow blocks) contiguously in virtual memory.
496 * Its blocks will be mmapped to the file individually.
497 */
498 static int
499 allocateChunk (DataHandle *hndl, unsigned ichunk)
500 {
501 /*
502 * hndl->chunks[ichunk] is one of:
503 * - NULL (initial value)
504 * - CHUNK_BUSY (transition state when allocating the chunk)
505 * - some address (the allocated chunk)
506 */
507 uint8_t *CHUNK_BUSY = (uint8_t *) 1;
508 hrtime_t timeout = 0;
509 while (1)
510 {
511 if (hndl->chunks[ichunk] > CHUNK_BUSY)
512 return 0; /* the chunk has already been allocated */
513 /* try to allocate the chunk (change: NULL => CHUNK_BUSY) */
514 if (__collector_cas_ptr (&hndl->chunks[ichunk], NULL, CHUNK_BUSY) == NULL)
515 {
516 /* allocate virtual memory */
517 uint8_t *newchunk = (uint8_t*) CALL_UTIL (mmap64_) (0,
518 (size_t) (blksz * hndl->nflow), PROT_READ | PROT_WRITE,
519 #if ARCH(SPARC)
520 MAP_SHARED | MAP_ANON,
521 #else
522 MAP_PRIVATE | MAP_ANON,
523 #endif
524 -1, (off64_t) 0);
525 if (newchunk == MAP_FAILED)
526 {
527 deleteHandle (hndl);
528 TprintfT (DBG_LT1, " allocateChunk mmap: start=0x%x length=%ld (0x%lx), offset=%d ret=%p\n",
529 0, (long) (blksz * hndl->nflow),
530 (long) (blksz * hndl->nflow), 0, newchunk);
531 TprintfT (0, "allocateChunk: can't mmap MAP_ANON (for %s): %s\n", hndl->fname, CALL_UTIL (strerror) (errno));
532 __collector_log_write ("<event kind=\"%s\" id=\"%d\" ec=\"%d\">MAP_ANON (for %s)</event>\n",
533 SP_JCMD_CERROR, COL_ERROR_FILEMAP, errno, hndl->fname);
534 return 1;
535 }
536
537 /* assign allocated address to our chunk */
538 if (__collector_cas_ptr (&hndl->chunks[ichunk], CHUNK_BUSY, newchunk) != CHUNK_BUSY)
539 {
540 TprintfT (0, "allocateChunk: can't release chunk CAS lock for %s\n", hndl->fname);
541 __collector_log_write ("<event kind=\"%s\" id=\"%d\">couldn't release chunk CAS lock (%s)</event>\n",
542 SP_JCMD_CERROR, COL_ERROR_GENERAL, hndl->fname);
543 }
544 __collector_inc_32 (&hndl->nchnk);
545 return 0;
546 }
547
548 /* check for time out */
549 if (timeout == 0)
550 timeout = __collector_gethrtime () + 10 * ((hrtime_t) 1000000000);
551 if (__collector_gethrtime () > timeout)
552 {
553 TprintfT (0, "allocateChunk: timeout for %s\n", hndl->fname);
554 __collector_log_write ("<event kind=\"%s\" id=\"%d\">timeout allocating chunk for %s</event>\n",
555 SP_JCMD_CERROR, COL_ERROR_GENERAL, hndl->fname);
556 return 1;
557 }
558 }
559 }
560
561 /*
562 * Get the address for block (iflow,ichunk).
563 */
564 static uint8_t *
565 getBlock (DataHandle *hndl, unsigned iflow, unsigned ichunk)
566 {
567 return hndl->chunks[ichunk] + iflow * blksz;
568 }
569
570 /*
571 * Map block (iflow,ichunk) to the next part of the file.
572 */
573 static int
574 remapBlock (DataHandle *hndl, unsigned iflow, unsigned ichunk)
575 {
576 int rc = 0;
577 int fd;
578 /* Get the old file nblk and increment it atomically. */
579 uint32_t oldblk = hndl->nblk;
580 for (;;)
581 {
582 uint32_t newblk = __collector_cas_32 (&hndl->nblk, oldblk, oldblk + 1);
583 if (newblk == oldblk)
584 break;
585 oldblk = newblk;
586 }
587 off64_t offset = (off64_t) oldblk * blksz;
588
589 /* 6618470: disable thread cancellation */
590 int old_cstate;
591 pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, &old_cstate);
592
593 /* Open the file. */
594 int iter = 0;
595 hrtime_t tso = __collector_gethrtime ();
596 for (;;)
597 {
598 fd = CALL_UTIL (open)(hndl->fname, O_RDWR, 0);
599 if (fd < 0)
600 {
601 if (errno == EMFILE)
602 {
603 /* too many open files */
604 iter++;
605 if (iter > 1000)
606 {
607 /* we've tried 1000 times; kick error back to caller */
608 char errmsg[MAXPATHLEN + 50];
609 hrtime_t teo = __collector_gethrtime ();
610 double deltato = (double) (teo - tso) / 1000000.;
611 (void) CALL_UTIL (snprintf) (errmsg, sizeof (errmsg),
612 " t=%lu, %s: open-retries-failed=%d, %3.6f ms.; remap\n",
613 (unsigned long) __collector_thr_self (), hndl->fname,
614 iter, deltato);
615 __collector_log_write ("<event kind=\"%s\" id=\"%d\">%s</event>\n",
616 SP_JCMD_COMMENT, COL_COMMENT_NONE, errmsg);
617 rc = 1;
618 goto exit;
619 }
620 /* keep trying */
621 continue;
622 }
623 deleteHandle (hndl);
624 TprintfT (0, "remapBlock: can't open file: %s: %s\n", hndl->fname, STR (CALL_UTIL (strerror)(errno)));
625 __collector_log_write ("<event kind=\"%s\" id=\"%d\" ec=\"%d\">t=%lu, %s: remap </event>\n",
626 SP_JCMD_CERROR, COL_ERROR_FILEOPN, errno,
627 (unsigned long) __collector_thr_self (),
628 hndl->fname);
629 rc = 1;
630 goto exit;
631 }
632 else
633 break;
634 }
635
636 /* report number of retries of the open due to too many open fd's */
637 if (iter > 0)
638 {
639 char errmsg[MAXPATHLEN + 50];
640 hrtime_t teo = __collector_gethrtime ();
641 double deltato = (double) (teo - tso) / 1000000.;
642 (void) CALL_UTIL (snprintf) (errmsg, sizeof (errmsg),
643 " t=%d, %s: open-retries=%lu, %3.6f ms.; remap\n",
644 (unsigned long) __collector_thr_self (), hndl->fname,
645 iter, deltato);
646 __collector_log_write ("<event kind=\"%s\" id=\"%d\">%s</event>\n",
647 SP_JCMD_COMMENT, COL_COMMENT_NONE, errmsg);
648 }
649
650 /* Ensure disk space is allocated and the block offset is 0 */
651 uint32_t zero = 0;
652 int n = CALL_UTIL (pwrite64_) (fd, &zero, sizeof (zero),
653 (off64_t) (offset + blksz - sizeof (zero)));
654 if (n <= 0)
655 {
656 deleteHandle (hndl);
657 TprintfT (0, "remapBlock: can't pwrite file: %s : errno=%d\n", hndl->fname, errno);
658 __collector_log_write ("<event kind=\"%s\" id=\"%d\" ec=\"%d\">%s: remap</event>\n",
659 SP_JCMD_CERROR, COL_ERROR_NOSPACE, errno, hndl->fname);
660 CALL_UTIL (close)(fd);
661 rc = 1;
662 goto exit;
663 }
664 hndl->blkoff[iflow * NCHUNKS + ichunk] = 0;
665
666 /* Map block to file */
667 uint8_t *bptr = getBlock (hndl, iflow, ichunk);
668 uint8_t *vaddr = (uint8_t *) CALL_UTIL (mmap64_) ((void*) bptr,
669 (size_t) blksz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED,
670 fd, offset);
671
672 if (vaddr != bptr)
673 {
674 deleteHandle (hndl);
675 TprintfT (DBG_LT1, " remapBlock mmap: start=%p length=%ld (0x%lx) offset=0x%llx ret=%p\n",
676 bptr, (long) blksz, (long) blksz, (long long) offset, vaddr);
677 TprintfT (0, "remapBlock: can't mmap file: %s : errno=%d\n", hndl->fname, errno);
678 (void) __collector_log_write ("<event kind=\"%s\" id=\"%d\" ec=\"%d\">%s: remap</event>\n",
679 SP_JCMD_CERROR, COL_ERROR_FILEMAP, errno, hndl->fname);
680 CALL_UTIL (close)(fd);
681 rc = 1;
682 goto exit;
683 }
684 CALL_UTIL (close)(fd);
685
686 if (hndl->exempt == 0)
687 exp_size_ck (1, hndl->fname);
688 else
689 Tprintf (DBG_LT1, "exp_size_ck() bypassed for %d block(s); exempt fname = %s\n",
690 1, hndl->fname);
691 exit:
692 /* Restore the previous cancellation state */
693 pthread_setcancelstate (old_cstate, NULL);
694
695 return rc;
696 }
697
698 static int
699 newBlock (DataHandle *hndl, unsigned iflow, unsigned ichunk)
700 {
701 if (allocateChunk (hndl, ichunk) != 0)
702 return 1;
703 if (remapBlock (hndl, iflow, ichunk) != 0)
704 return 1;
705
706 /* Update the number of active blocks */
707 __collector_inc_32 (hndl->chblk + ichunk);
708 return 0;
709 }
710
711 static void
712 deleteBlock (DataHandle *hndl, unsigned iflow, unsigned ichunk)
713 {
714 uint8_t *bptr = getBlock (hndl, iflow, ichunk);
715 CALL_UTIL (munmap)((void*) bptr, blksz);
716 hndl->blkstate[iflow * NCHUNKS + ichunk] = ST_INIT;
717
718 /* Update the number of active blocks */
719 __collector_dec_32 (hndl->chblk + ichunk);
720 }
721
722 int
723 __collector_write_record (DataHandle *hndl, Common_packet *pckt)
724 {
725 if (hndl == NULL || !hndl->active)
726 return 1;
727 /* fill in the fields of the common packet structure */
728 if (pckt->type == 0)
729 pckt->type = hndl->kind;
730 if (pckt->tstamp == 0)
731 pckt->tstamp = __collector_gethrtime ();
732 if (pckt->lwp_id == 0)
733 pckt->lwp_id = __collector_lwp_self ();
734 if (pckt->thr_id == 0)
735 pckt->thr_id = __collector_thr_self ();
736 if (pckt->cpu_id == 0)
737 pckt->cpu_id = CALL_UTIL (getcpuid)();
738 if (pckt->tsize == 0)
739 pckt->tsize = sizeof (Common_packet);
740 TprintfT (DBG_LT3, "collector_write_record to %s, type:%d tsize:%d\n",
741 hndl->fname, pckt->type, pckt->tsize);
742 return __collector_write_packet (hndl, (CM_Packet*) pckt);
743 }
744
745 int
746 __collector_write_packet (DataHandle *hndl, CM_Packet *pckt)
747 {
748 if (hndl == NULL || !hndl->active)
749 return 1;
750
751 /* if the experiment is not open, there should be no writes */
752 if (__collector_expstate != EXP_OPEN)
753 {
754 #ifdef DEBUG
755 char *xstate;
756 switch (__collector_expstate)
757 {
758 case EXP_INIT:
759 xstate = "EXP_INIT";
760 break;
761 case EXP_OPEN:
762 xstate = "EXP_OPEN";
763 break;
764 case EXP_PAUSED:
765 xstate = "EXP_PAUSED";
766 break;
767 case EXP_CLOSED:
768 xstate = "EXP_CLOSED";
769 break;
770 default:
771 xstate = "Unknown";
772 break;
773 }
774 TprintfT (0, "collector_write_packet: write to %s while experiment state is %s\n",
775 hndl->fname, xstate);
776 #endif
777 return 1;
778 }
779 int recsz = pckt->tsize;
780 if (recsz > blksz)
781 {
782 TprintfT (0, "collector_write_packet: packet too long: %d (max %ld)\n", recsz, blksz);
783 return 1;
784 }
785 collector_thread_t tid = __collector_no_threads ? __collector_lwp_self ()
786 : __collector_thr_self ();
787 unsigned iflow = (unsigned) (((unsigned long) tid) % hndl->nflow);
788
789 /* Acquire block */
790 uint32_t *sptr = &hndl->blkstate[iflow * NCHUNKS];
791 uint32_t state = ST_BUSY;
792 unsigned ichunk;
793 for (ichunk = 0; ichunk < NCHUNKS; ++ichunk)
794 {
795 uint32_t oldstate = sptr[ichunk];
796 if (oldstate == ST_BUSY)
797 continue;
798 /* Mark as busy */
799 state = __collector_cas_32 (sptr + ichunk, oldstate, ST_BUSY);
800 if (state == oldstate)
801 break;
802 if (state == ST_BUSY)
803 continue;
804 /* It's possible the state changed from ST_INIT to ST_FREE */
805 oldstate = state;
806 state = __collector_cas_32 (sptr + ichunk, oldstate, ST_BUSY);
807 if (state == oldstate)
808 break;
809 }
810
811 if (state == ST_BUSY || ichunk == NCHUNKS)
812 {
813 /* We are out of blocks for this data flow.
814 * We might switch to another flow but for now report and return.
815 */
816 TprintfT (0, "collector_write_packet: all %d blocks on flow %d for %s are busy\n",
817 NCHUNKS, iflow, hndl->fname);
818 return 1;
819 }
820
821 if (state == ST_INIT && newBlock (hndl, iflow, ichunk) != 0)
822 return 1;
823 uint8_t *bptr = getBlock (hndl, iflow, ichunk);
824 uint32_t blkoff = hndl->blkoff[iflow * NCHUNKS + ichunk];
825 if (blkoff + recsz > blksz)
826 {
827 /* The record doesn't fit. Close the block */
828 if (blkoff < blksz)
829 {
830 Common_packet *closed = (Common_packet *) (bptr + blkoff);
831 closed->type = CLOSED_PCKT;
832 closed->tsize = blksz - blkoff; /* redundant */
833 }
834 if (remapBlock (hndl, iflow, ichunk) != 0)
835 return 1;
836 blkoff = hndl->blkoff[iflow * NCHUNKS + ichunk];
837 }
838 if (blkoff + recsz < blksz)
839 {
840 /* Set the empty padding */
841 Common_packet *empty = (Common_packet *) (bptr + blkoff + recsz);
842 empty->type = EMPTY_PCKT;
843 empty->tsize = blksz - blkoff - recsz;
844 }
845 __collector_memcpy (bptr + blkoff, pckt, recsz);
846
847 /* Release block */
848 if (hndl->active == 0)
849 {
850 deleteBlock (hndl, iflow, ichunk);
851 return 0;
852 }
853 hndl->blkoff[iflow * NCHUNKS + ichunk] += recsz;
854 sptr[ichunk] = ST_FREE;
855 return 0;
856 }
857
858 /*
859 * IO_TXT files
860 *
861 * IO_TXT covers the case where many threads are trying to write text messages
862 * sequentially (atomically) to a file. Examples include SP_LOG_FILE and SP_MAP_FILE.
863 *
864 * The file is not written directly, but by writing to mmapped virtual memory.
865 * The granularity of the mapping is a "Buffer". There may be as many as
866 * NBUFS buffers at any one time.
867 *
868 * The current position of the file is handled via hndl->curpos.
869 *
870 * * It is accessed atomically with 64-bit CAS instructions.
871 *
872 * * This 64-bit word encapsulates:
873 * - busy: a bit to lock access to hndl->curpos
874 * - indx: an index indicating which Buffer to use for the current position
875 * - foff: the file offset
876 *
877 * * The contents are accessed with:
878 * - unpack macros: CUR_BUSY CUR_INDX CUR_FOFF
879 * - pack macro : CUR_MAKE
880 *
881 * Conceptually, what happens when a thread wants to write a message is:
882 * - acquire the hndl->curpos "busy" lock
883 * . acquire and map new Buffers if needed to complete the message
884 * . update the file offset
885 * . release the lock
886 * - write to the corresponding buffers
887 *
888 * Each Buffer has a buf->left field that tracks how many more bytes
889 * need to be written to the Buffer. After a thread writes to a Buffer,
890 * it decrements buf->left atomically. When buf->left reaches 0, the
891 * Buffer (mapping) is deleted, freeing the Buffer for a new mapping.
892 *
893 * The actual implementation has some twists:
894 *
895 * * If the entire text message fits into the current Buffer -- that is,
896 * no new Buffers are needed -- the thread does not acquire the lock.
897 * It simply updates hndl->curpos atomically to the new file offset.
898 *
899 * * There are various timeouts to prevent hangs in case of abnormalities.
900 */
901 static int
902 is_not_the_log_file (char *fname)
903 {
904 if (CALL_UTIL (strstr)(fname, SP_LOG_FILE) == NULL)
905 return 1;
906 return 0;
907 }
908
909 static int
910 mapBuffer (char *fname, Buffer *buf, off64_t foff)
911 {
912 int rc = 0;
913 /* open fname */
914 int fd = CALL_UTIL (open)(fname, O_RDWR, 0);
915 if (fd < 0)
916 {
917 TprintfT (0, "mapBuffer ERROR: can't open file: %s\n", fname);
918 if (is_not_the_log_file (fname))
919 __collector_log_write ("<event kind=\"%s\" id=\"%d\" ec=\"%d\">%s: mapBuffer</event>\n",
920 SP_JCMD_CERROR, COL_ERROR_FILEOPN, errno, fname);
921 return 1;
922 }
923 TprintfT (DBG_LT2, "mapBuffer pwrite file %s at 0x%llx\n", fname, (long long) foff);
924
925 /* ensure disk space is allocated */
926 char nl = '\n';
927 int n = CALL_UTIL (pwrite64_) (fd, &nl, sizeof (nl),
928 (off64_t) (foff + blksz - sizeof (nl)));
929 if (n <= 0)
930 {
931 TprintfT (0, "mapBuffer ERROR: can't pwrite file %s at 0x%llx\n", fname,
932 (long long) (foff + blksz - sizeof (nl)));
933 if (is_not_the_log_file (fname))
934 __collector_log_write ("<event kind=\"%s\" id=\"%d\" ec=\"%d\">%s: mapBuffer</event>\n",
935 SP_JCMD_CERROR, COL_ERROR_FILETRNC, errno, fname);
936 rc = 1;
937 goto exit;
938 }
939 /* mmap buf->vaddr to fname at foff */
940 uint8_t *vaddr = CALL_UTIL (mmap64_) (buf->vaddr, (size_t) blksz,
941 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, foff);
942 if (vaddr != buf->vaddr)
943 {
944 TprintfT (DBG_LT1, " mapBuffer mmap: start=%p length=%ld (0x%lx) offset=0x%llx ret=%p\n",
945 buf->vaddr, blksz, blksz, (long long) foff, vaddr);
946 TprintfT (0, "mapBuffer ERROR: can't mmap %s: vaddr=%p size=%ld (0x%lx) ret=%p off=0x%llx errno=%d\n",
947 fname, buf->vaddr, blksz, blksz, vaddr, (long long) foff, errno);
948 if (is_not_the_log_file (fname))
949 __collector_log_write ("<event kind=\"%s\" id=\"%d\" ec=\"%d\">%s: mapBuffer</event>\n",
950 SP_JCMD_CERROR, COL_ERROR_FILEMAP, errno, fname);
951 rc = 1;
952 }
953 else
954 buf->left = blksz;
955 exit:
956 CALL_UTIL (close)(fd);
957
958 /* Should we check buffer size? Let's not since:
959 * - IO_TXT is typically not going to be that big
960 * - we want log.xml to be treated specially
961 */
962 /* exp_size_ck( 1, fname ); */
963 return rc;
964 }
965
966 static int
967 newBuffer (DataHandle *hndl, uint64_t foff)
968 {
969 /* find a ST_FREE buffer and mark it ST_BUSY */
970 int ibuf;
971 for (ibuf = 0; ibuf < NBUFS; ibuf++)
972 if (__collector_cas_32 (&hndl->buffers[ibuf].state, ST_FREE, ST_BUSY) == ST_FREE)
973 break;
974 if (ibuf >= NBUFS)
975 {
976 TprintfT (0, "newBuffer ERROR: all buffers busy for %s\n", hndl->fname);
977 return -1;
978 }
979 Buffer *nbuf = hndl->buffers + ibuf;
980
981 /* map buffer */
982 if (mapBuffer (hndl->fname, nbuf, foff) != 0)
983 {
984 nbuf->state = ST_FREE;
985 ibuf = -1;
986 goto exit;
987 }
988 exit:
989 return ibuf;
990 }
991
992 static void
993 writeBuffer (Buffer *buf, int blk_off, char *src, int len)
994 {
995 __collector_memcpy (buf->vaddr + blk_off, src, len);
996 if (__collector_subget_32 (&buf->left, len) == 0)
997 deleteBuffer (buf);
998 }
999
1000 static void
1001 deleteBuffer (Buffer *buf)
1002 {
1003 buf->state = ST_FREE;
1004 }
1005
1006 int
1007 __collector_write_string (DataHandle *hndl, char *src, int len)
1008 {
1009 if (hndl == NULL || !hndl->active)
1010 return 1;
1011 if (len <= 0)
1012 return 0;
1013
1014 hrtime_t timeout = __collector_gethrtime () + 20 * ((hrtime_t) 1000000000);
1015 volatile uint32_t busy = 0;
1016 while (1)
1017 {
1018 uint32_t indx;
1019 uint64_t opos, foff, base;
1020 int blk_off, buf_indices[NBUFS], ibuf, nbufs;
1021
1022 /* read and decode the current pointer */
1023 opos = hndl->curpos;
1024 busy = CUR_BUSY (opos);
1025 indx = CUR_INDX (opos);
1026 foff = CUR_FOFF (opos);
1027 if (busy == 1)
1028 {
1029 if (__collector_gethrtime () > timeout)
1030 {
1031 /*
1032 * E.g., if another thread deleted the handle
1033 * after we checked hndl->active.
1034 */
1035 TprintfT (0, "__collector_write_string ERROR: timeout writing length=%d to text file: %s\n", len, hndl->fname);
1036 return 1;
1037 }
1038 continue;
1039 }
1040
1041 /* initial block offset */
1042 blk_off = foff & (blksz - 1);
1043
1044 /* number of new buffers to map */
1045 int lastbuf = ((foff + len - 1) >> log2blksz); /* last block file index we will write */
1046 int firstbuf = ((foff - 1) >> log2blksz); /* last block file index we have written */
1047 nbufs = lastbuf - firstbuf;
1048 TprintfT (DBG_LT2, "__collector_write_string firstbuf = %d, lastbuf = %d, nbufs = %d, log2blksz = %ld\n",
1049 firstbuf, lastbuf, nbufs, log2blksz);
1050 if (nbufs >= NBUFS)
1051 {
1052 Tprintf (0, "__collector_write_string ERROR: string of length %d too long to be written to text file: %s\n", len, hndl->fname);
1053 return 1;
1054 }
1055
1056 /* things are simple if we don't need new buffers */
1057 if (nbufs == 0)
1058 {
1059 /* try to update the handle position atomically */
1060 uint64_t npos = CUR_MAKE (0, indx, foff + len);
1061 if (__collector_cas_64p (&hndl->curpos, &opos, &npos) != opos)
1062 continue;
1063
1064 /* success! copy our string and we're done */
1065 TprintfT (DBG_LT2, "__collector_write_string writeBuffer[%d]: vaddr = %p, len = %d, foff = %lld, '%s'\n",
1066 indx, hndl->buffers[indx].vaddr, len, (long long) foff, src);
1067 writeBuffer (&hndl->buffers[indx], foff & (blksz - 1), src, len);
1068 break;
1069 }
1070
1071 /* initialize the new signal mask */
1072 sigset_t new_mask;
1073 sigset_t old_mask;
1074 CALL_UTIL (sigfillset)(&new_mask);
1075
1076 /* 6618470: disable thread cancellation */
1077 int old_cstate;
1078 pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, &old_cstate);
1079 /* block all signals */
1080 CALL_UTIL (sigprocmask)(SIG_SETMASK, &new_mask, &old_mask);
1081
1082 /* but if we need new buffers, "lock" the handle pointer */
1083 uint64_t lpos = CUR_MAKE (1, indx, foff);
1084 if (__collector_cas_64p (&hndl->curpos, &opos, &lpos) != opos)
1085 {
1086 /* restore signal mask */
1087 CALL_UTIL (sigprocmask)(SIG_SETMASK, &old_mask, NULL);
1088 /* Restore the previous cancellation state */
1089 pthread_setcancelstate (old_cstate, NULL);
1090 continue;
1091 }
1092
1093 /* map new buffers */
1094 base = ((foff - 1) & ~(blksz - 1)); /* last buffer to have been mapped */
1095 for (ibuf = 0; ibuf < nbufs; ibuf++)
1096 {
1097 base += blksz;
1098 buf_indices[ibuf] = newBuffer (hndl, base);
1099 if (buf_indices[ibuf] < 0)
1100 break;
1101 }
1102
1103 /* "unlock" the handle pointer */
1104 uint64_t npos = CUR_MAKE (0, indx, foff);
1105 if (ibuf == nbufs)
1106 npos = CUR_MAKE (0, buf_indices[nbufs - 1], foff + len);
1107 if (__collector_cas_64p (&hndl->curpos, &lpos, &npos) != lpos)
1108 {
1109 TprintfT (0, "__collector_write_string ERROR: file handle corrupted: %s\n", hndl->fname);
1110 /*
1111 * At this point, the handle is apparently corrupted and
1112 * presumably locked. No telling what's going on. Still
1113 * let's proceed and write our data and let a later thread
1114 * raise an error if it encounters one.
1115 */
1116 }
1117
1118 /* restore signal mask */
1119 CALL_UTIL (sigprocmask)(SIG_SETMASK, &old_mask, NULL);
1120 /* Restore the previous cancellation state */
1121 pthread_setcancelstate (old_cstate, NULL);
1122
1123 /* if we couldn't map all the buffers we needed, don't write any part of the string */
1124 if (ibuf < nbufs)
1125 {
1126 TprintfT (0, "__collector_write_string ERROR: can't map new buffer: %s\n", hndl->fname);
1127 return 1;
1128 }
1129
1130 /* write any data to the old block */
1131 if (blk_off > 0)
1132 {
1133 TprintfT (DBG_LT2, "__collector_write_string partial writeBuffer[%d]: len=%ld, foff = %d '%s'\n",
1134 indx, blksz - blk_off, blk_off, src);
1135 writeBuffer (&hndl->buffers[indx], blk_off, src, blksz - blk_off);
1136 src += blksz - blk_off;
1137 len -= blksz - blk_off;
1138 }
1139
1140 /* write data to the new blocks */
1141 for (ibuf = 0; ibuf < nbufs; ibuf++)
1142 {
1143 int clen = blksz;
1144 if (clen > len)
1145 clen = len;
1146 TprintfT (DBG_LT2, "__collector_write_string continue writeBuffer[%d]: len= %d, %s",
1147 ibuf, clen, src);
1148 writeBuffer (&hndl->buffers[buf_indices[ibuf]], 0, src, clen);
1149 src += clen;
1150 len -= clen;
1151 }
1152 break;
1153 }
1154 return 0;
1155 }
1156