2007-03-27 Jon Ringle <jon@ringle.org>
[binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
21
22 #include "server.h"
23 #include "linux-low.h"
24
25 #include <sys/wait.h>
26 #include <stdio.h>
27 #include <sys/param.h>
28 #include <sys/dir.h>
29 #include <sys/ptrace.h>
30 #include <sys/user.h>
31 #include <signal.h>
32 #include <sys/ioctl.h>
33 #include <fcntl.h>
34 #include <string.h>
35 #include <stdlib.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/syscall.h>
39
40 #ifndef PTRACE_GETSIGINFO
41 # define PTRACE_GETSIGINFO 0x4202
42 # define PTRACE_SETSIGINFO 0x4203
43 #endif
44
45 #ifdef __UCLIBC__
46 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
47 #define HAS_NOMMU
48 #endif
49 #endif
50
51 /* ``all_threads'' is keyed by the LWP ID - it should be the thread ID instead,
52 however. This requires changing the ID in place when we go from !using_threads
53 to using_threads, immediately.
54
55 ``all_processes'' is keyed by the process ID - which on Linux is (presently)
56 the same as the LWP ID. */
57
58 struct inferior_list all_processes;
59
60 /* FIXME this is a bit of a hack, and could be removed. */
61 int stopping_threads;
62
63 /* FIXME make into a target method? */
64 int using_threads;
65
66 static void linux_resume_one_process (struct inferior_list_entry *entry,
67 int step, int signal, siginfo_t *info);
68 static void linux_resume (struct thread_resume *resume_info);
69 static void stop_all_processes (void);
70 static int linux_wait_for_event (struct thread_info *child);
71
72 struct pending_signals
73 {
74 int signal;
75 siginfo_t info;
76 struct pending_signals *prev;
77 };
78
79 #define PTRACE_ARG3_TYPE long
80 #define PTRACE_XFER_TYPE long
81
82 #ifdef HAVE_LINUX_REGSETS
83 static int use_regsets_p = 1;
84 #endif
85
86 #define pid_of(proc) ((proc)->head.id)
87
88 /* FIXME: Delete eventually. */
89 #define inferior_pid (pid_of (get_thread_process (current_inferior)))
90
91 /* This function should only be called if the process got a SIGTRAP.
92 The SIGTRAP could mean several things.
93
94 On i386, where decr_pc_after_break is non-zero:
95 If we were single-stepping this process using PTRACE_SINGLESTEP,
96 we will get only the one SIGTRAP (even if the instruction we
97 stepped over was a breakpoint). The value of $eip will be the
98 next instruction.
99 If we continue the process using PTRACE_CONT, we will get a
100 SIGTRAP when we hit a breakpoint. The value of $eip will be
101 the instruction after the breakpoint (i.e. needs to be
102 decremented). If we report the SIGTRAP to GDB, we must also
103 report the undecremented PC. If we cancel the SIGTRAP, we
104 must resume at the decremented PC.
105
106 (Presumably, not yet tested) On a non-decr_pc_after_break machine
107 with hardware or kernel single-step:
108 If we single-step over a breakpoint instruction, our PC will
109 point at the following instruction. If we continue and hit a
110 breakpoint instruction, our PC will point at the breakpoint
111 instruction. */
112
113 static CORE_ADDR
114 get_stop_pc (void)
115 {
116 CORE_ADDR stop_pc = (*the_low_target.get_pc) ();
117
118 if (get_thread_process (current_inferior)->stepping)
119 return stop_pc;
120 else
121 return stop_pc - the_low_target.decr_pc_after_break;
122 }
123
124 static void *
125 add_process (unsigned long pid)
126 {
127 struct process_info *process;
128
129 process = (struct process_info *) malloc (sizeof (*process));
130 memset (process, 0, sizeof (*process));
131
132 process->head.id = pid;
133
134 /* Default to tid == lwpid == pid. */
135 process->tid = pid;
136 process->lwpid = pid;
137
138 add_inferior_to_list (&all_processes, &process->head);
139
140 return process;
141 }
142
143 /* Start an inferior process and returns its pid.
144 ALLARGS is a vector of program-name and args. */
145
146 static int
147 linux_create_inferior (char *program, char **allargs)
148 {
149 void *new_process;
150 int pid;
151
152 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
153 pid = vfork ();
154 #else
155 pid = fork ();
156 #endif
157 if (pid < 0)
158 perror_with_name ("fork");
159
160 if (pid == 0)
161 {
162 ptrace (PTRACE_TRACEME, 0, 0, 0);
163
164 signal (__SIGRTMIN + 1, SIG_DFL);
165
166 setpgid (0, 0);
167
168 execv (program, allargs);
169
170 fprintf (stderr, "Cannot exec %s: %s.\n", program,
171 strerror (errno));
172 fflush (stderr);
173 _exit (0177);
174 }
175
176 new_process = add_process (pid);
177 add_thread (pid, new_process, pid);
178
179 return pid;
180 }
181
182 /* Attach to an inferior process. */
183
184 void
185 linux_attach_lwp (unsigned long pid, unsigned long tid)
186 {
187 struct process_info *new_process;
188
189 if (ptrace (PTRACE_ATTACH, pid, 0, 0) != 0)
190 {
191 fprintf (stderr, "Cannot attach to process %ld: %s (%d)\n", pid,
192 strerror (errno), errno);
193 fflush (stderr);
194
195 /* If we fail to attach to an LWP, just return. */
196 if (!using_threads)
197 _exit (0177);
198 return;
199 }
200
201 new_process = (struct process_info *) add_process (pid);
202 add_thread (tid, new_process, pid);
203
204 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
205 brings it to a halt. We should ignore that SIGSTOP and resume the process
206 (unless this is the first process, in which case the flag will be cleared
207 in linux_attach).
208
209 On the other hand, if we are currently trying to stop all threads, we
210 should treat the new thread as if we had sent it a SIGSTOP. This works
211 because we are guaranteed that add_process added us to the end of the
212 list, and so the new thread has not yet reached wait_for_sigstop (but
213 will). */
214 if (! stopping_threads)
215 new_process->stop_expected = 1;
216 }
217
218 int
219 linux_attach (unsigned long pid)
220 {
221 struct process_info *process;
222
223 linux_attach_lwp (pid, pid);
224
225 /* Don't ignore the initial SIGSTOP if we just attached to this process. */
226 process = (struct process_info *) find_inferior_id (&all_processes, pid);
227 process->stop_expected = 0;
228
229 return 0;
230 }
231
232 /* Kill the inferior process. Make us have no inferior. */
233
234 static void
235 linux_kill_one_process (struct inferior_list_entry *entry)
236 {
237 struct thread_info *thread = (struct thread_info *) entry;
238 struct process_info *process = get_thread_process (thread);
239 int wstat;
240
241 /* We avoid killing the first thread here, because of a Linux kernel (at
242 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
243 the children get a chance to be reaped, it will remain a zombie
244 forever. */
245 if (entry == all_threads.head)
246 return;
247
248 do
249 {
250 ptrace (PTRACE_KILL, pid_of (process), 0, 0);
251
252 /* Make sure it died. The loop is most likely unnecessary. */
253 wstat = linux_wait_for_event (thread);
254 } while (WIFSTOPPED (wstat));
255 }
256
257 static void
258 linux_kill (void)
259 {
260 struct thread_info *thread = (struct thread_info *) all_threads.head;
261 struct process_info *process;
262 int wstat;
263
264 if (thread == NULL)
265 return;
266
267 for_each_inferior (&all_threads, linux_kill_one_process);
268
269 /* See the comment in linux_kill_one_process. We did not kill the first
270 thread in the list, so do so now. */
271 process = get_thread_process (thread);
272 do
273 {
274 ptrace (PTRACE_KILL, pid_of (process), 0, 0);
275
276 /* Make sure it died. The loop is most likely unnecessary. */
277 wstat = linux_wait_for_event (thread);
278 } while (WIFSTOPPED (wstat));
279 }
280
281 static void
282 linux_detach_one_process (struct inferior_list_entry *entry)
283 {
284 struct thread_info *thread = (struct thread_info *) entry;
285 struct process_info *process = get_thread_process (thread);
286
287 ptrace (PTRACE_DETACH, pid_of (process), 0, 0);
288 }
289
290 static void
291 linux_detach (void)
292 {
293 for_each_inferior (&all_threads, linux_detach_one_process);
294 }
295
296 /* Return nonzero if the given thread is still alive. */
297 static int
298 linux_thread_alive (unsigned long tid)
299 {
300 if (find_inferior_id (&all_threads, tid) != NULL)
301 return 1;
302 else
303 return 0;
304 }
305
306 /* Return nonzero if this process stopped at a breakpoint which
307 no longer appears to be inserted. Also adjust the PC
308 appropriately to resume where the breakpoint used to be. */
309 static int
310 check_removed_breakpoint (struct process_info *event_child)
311 {
312 CORE_ADDR stop_pc;
313 struct thread_info *saved_inferior;
314
315 if (event_child->pending_is_breakpoint == 0)
316 return 0;
317
318 if (debug_threads)
319 fprintf (stderr, "Checking for breakpoint.\n");
320
321 saved_inferior = current_inferior;
322 current_inferior = get_process_thread (event_child);
323
324 stop_pc = get_stop_pc ();
325
326 /* If the PC has changed since we stopped, then we shouldn't do
327 anything. This happens if, for instance, GDB handled the
328 decr_pc_after_break subtraction itself. */
329 if (stop_pc != event_child->pending_stop_pc)
330 {
331 if (debug_threads)
332 fprintf (stderr, "Ignoring, PC was changed.\n");
333
334 event_child->pending_is_breakpoint = 0;
335 current_inferior = saved_inferior;
336 return 0;
337 }
338
339 /* If the breakpoint is still there, we will report hitting it. */
340 if ((*the_low_target.breakpoint_at) (stop_pc))
341 {
342 if (debug_threads)
343 fprintf (stderr, "Ignoring, breakpoint is still present.\n");
344 current_inferior = saved_inferior;
345 return 0;
346 }
347
348 if (debug_threads)
349 fprintf (stderr, "Removed breakpoint.\n");
350
351 /* For decr_pc_after_break targets, here is where we perform the
352 decrement. We go immediately from this function to resuming,
353 and can not safely call get_stop_pc () again. */
354 if (the_low_target.set_pc != NULL)
355 (*the_low_target.set_pc) (stop_pc);
356
357 /* We consumed the pending SIGTRAP. */
358 event_child->pending_is_breakpoint = 0;
359 event_child->status_pending_p = 0;
360 event_child->status_pending = 0;
361
362 current_inferior = saved_inferior;
363 return 1;
364 }
365
366 /* Return 1 if this process has an interesting status pending. This function
367 may silently resume an inferior process. */
368 static int
369 status_pending_p (struct inferior_list_entry *entry, void *dummy)
370 {
371 struct process_info *process = (struct process_info *) entry;
372
373 if (process->status_pending_p)
374 if (check_removed_breakpoint (process))
375 {
376 /* This thread was stopped at a breakpoint, and the breakpoint
377 is now gone. We were told to continue (or step...) all threads,
378 so GDB isn't trying to single-step past this breakpoint.
379 So instead of reporting the old SIGTRAP, pretend we got to
380 the breakpoint just after it was removed instead of just
381 before; resume the process. */
382 linux_resume_one_process (&process->head, 0, 0, NULL);
383 return 0;
384 }
385
386 return process->status_pending_p;
387 }
388
389 static void
390 linux_wait_for_process (struct process_info **childp, int *wstatp)
391 {
392 int ret;
393 int to_wait_for = -1;
394
395 if (*childp != NULL)
396 to_wait_for = (*childp)->lwpid;
397
398 while (1)
399 {
400 ret = waitpid (to_wait_for, wstatp, WNOHANG);
401
402 if (ret == -1)
403 {
404 if (errno != ECHILD)
405 perror_with_name ("waitpid");
406 }
407 else if (ret > 0)
408 break;
409
410 ret = waitpid (to_wait_for, wstatp, WNOHANG | __WCLONE);
411
412 if (ret == -1)
413 {
414 if (errno != ECHILD)
415 perror_with_name ("waitpid (WCLONE)");
416 }
417 else if (ret > 0)
418 break;
419
420 usleep (1000);
421 }
422
423 if (debug_threads
424 && (!WIFSTOPPED (*wstatp)
425 || (WSTOPSIG (*wstatp) != 32
426 && WSTOPSIG (*wstatp) != 33)))
427 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
428
429 if (to_wait_for == -1)
430 *childp = (struct process_info *) find_inferior_id (&all_processes, ret);
431
432 (*childp)->stopped = 1;
433 (*childp)->pending_is_breakpoint = 0;
434
435 (*childp)->last_status = *wstatp;
436
437 if (debug_threads
438 && WIFSTOPPED (*wstatp))
439 {
440 current_inferior = (struct thread_info *)
441 find_inferior_id (&all_threads, (*childp)->tid);
442 /* For testing only; i386_stop_pc prints out a diagnostic. */
443 if (the_low_target.get_pc != NULL)
444 get_stop_pc ();
445 }
446 }
447
448 static int
449 linux_wait_for_event (struct thread_info *child)
450 {
451 CORE_ADDR stop_pc;
452 struct process_info *event_child;
453 int wstat;
454
455 /* Check for a process with a pending status. */
456 /* It is possible that the user changed the pending task's registers since
457 it stopped. We correctly handle the change of PC if we hit a breakpoint
458 (in check_removed_breakpoint); signals should be reported anyway. */
459 if (child == NULL)
460 {
461 event_child = (struct process_info *)
462 find_inferior (&all_processes, status_pending_p, NULL);
463 if (debug_threads && event_child)
464 fprintf (stderr, "Got a pending child %ld\n", event_child->lwpid);
465 }
466 else
467 {
468 event_child = get_thread_process (child);
469 if (event_child->status_pending_p
470 && check_removed_breakpoint (event_child))
471 event_child = NULL;
472 }
473
474 if (event_child != NULL)
475 {
476 if (event_child->status_pending_p)
477 {
478 if (debug_threads)
479 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
480 event_child->lwpid, event_child->status_pending);
481 wstat = event_child->status_pending;
482 event_child->status_pending_p = 0;
483 event_child->status_pending = 0;
484 current_inferior = get_process_thread (event_child);
485 return wstat;
486 }
487 }
488
489 /* We only enter this loop if no process has a pending wait status. Thus
490 any action taken in response to a wait status inside this loop is
491 responding as soon as we detect the status, not after any pending
492 events. */
493 while (1)
494 {
495 if (child == NULL)
496 event_child = NULL;
497 else
498 event_child = get_thread_process (child);
499
500 linux_wait_for_process (&event_child, &wstat);
501
502 if (event_child == NULL)
503 error ("event from unknown child");
504
505 current_inferior = (struct thread_info *)
506 find_inferior_id (&all_threads, event_child->tid);
507
508 /* Check for thread exit. */
509 if (using_threads && ! WIFSTOPPED (wstat))
510 {
511 if (debug_threads)
512 fprintf (stderr, "Thread %ld (LWP %ld) exiting\n",
513 event_child->tid, event_child->head.id);
514
515 /* If the last thread is exiting, just return. */
516 if (all_threads.head == all_threads.tail)
517 return wstat;
518
519 dead_thread_notify (event_child->tid);
520
521 remove_inferior (&all_processes, &event_child->head);
522 free (event_child);
523 remove_thread (current_inferior);
524 current_inferior = (struct thread_info *) all_threads.head;
525
526 /* If we were waiting for this particular child to do something...
527 well, it did something. */
528 if (child != NULL)
529 return wstat;
530
531 /* Wait for a more interesting event. */
532 continue;
533 }
534
535 if (using_threads
536 && WIFSTOPPED (wstat)
537 && WSTOPSIG (wstat) == SIGSTOP
538 && event_child->stop_expected)
539 {
540 if (debug_threads)
541 fprintf (stderr, "Expected stop.\n");
542 event_child->stop_expected = 0;
543 linux_resume_one_process (&event_child->head,
544 event_child->stepping, 0, NULL);
545 continue;
546 }
547
548 /* If GDB is not interested in this signal, don't stop other
549 threads, and don't report it to GDB. Just resume the
550 inferior right away. We do this for threading-related
551 signals as well as any that GDB specifically requested
552 we ignore. But never ignore SIGSTOP if we sent it
553 ourselves. */
554 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
555 thread library? */
556 if (WIFSTOPPED (wstat)
557 && ((using_threads && (WSTOPSIG (wstat) == __SIGRTMIN
558 || WSTOPSIG (wstat) == __SIGRTMIN + 1))
559 || (pass_signals[target_signal_from_host (WSTOPSIG (wstat))]
560 && (WSTOPSIG (wstat) != SIGSTOP
561 || !event_child->sigstop_sent))))
562 {
563 siginfo_t info, *info_p;
564
565 if (debug_threads)
566 fprintf (stderr, "Ignored signal %d for %ld (LWP %ld).\n",
567 WSTOPSIG (wstat), event_child->tid,
568 event_child->head.id);
569
570 if (ptrace (PTRACE_GETSIGINFO, event_child->lwpid, 0, &info) == 0)
571 info_p = &info;
572 else
573 info_p = NULL;
574 linux_resume_one_process (&event_child->head,
575 event_child->stepping,
576 WSTOPSIG (wstat), info_p);
577 continue;
578 }
579
580 /* If this event was not handled above, and is not a SIGTRAP, report
581 it. */
582 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGTRAP)
583 return wstat;
584
585 /* If this target does not support breakpoints, we simply report the
586 SIGTRAP; it's of no concern to us. */
587 if (the_low_target.get_pc == NULL)
588 return wstat;
589
590 stop_pc = get_stop_pc ();
591
592 /* bp_reinsert will only be set if we were single-stepping.
593 Notice that we will resume the process after hitting
594 a gdbserver breakpoint; single-stepping to/over one
595 is not supported (yet). */
596 if (event_child->bp_reinsert != 0)
597 {
598 if (debug_threads)
599 fprintf (stderr, "Reinserted breakpoint.\n");
600 reinsert_breakpoint (event_child->bp_reinsert);
601 event_child->bp_reinsert = 0;
602
603 /* Clear the single-stepping flag and SIGTRAP as we resume. */
604 linux_resume_one_process (&event_child->head, 0, 0, NULL);
605 continue;
606 }
607
608 if (debug_threads)
609 fprintf (stderr, "Hit a (non-reinsert) breakpoint.\n");
610
611 if (check_breakpoints (stop_pc) != 0)
612 {
613 /* We hit one of our own breakpoints. We mark it as a pending
614 breakpoint, so that check_removed_breakpoint () will do the PC
615 adjustment for us at the appropriate time. */
616 event_child->pending_is_breakpoint = 1;
617 event_child->pending_stop_pc = stop_pc;
618
619 /* Now we need to put the breakpoint back. We continue in the event
620 loop instead of simply replacing the breakpoint right away,
621 in order to not lose signals sent to the thread that hit the
622 breakpoint. Unfortunately this increases the window where another
623 thread could sneak past the removed breakpoint. For the current
624 use of server-side breakpoints (thread creation) this is
625 acceptable; but it needs to be considered before this breakpoint
626 mechanism can be used in more general ways. For some breakpoints
627 it may be necessary to stop all other threads, but that should
628 be avoided where possible.
629
630 If breakpoint_reinsert_addr is NULL, that means that we can
631 use PTRACE_SINGLESTEP on this platform. Uninsert the breakpoint,
632 mark it for reinsertion, and single-step.
633
634 Otherwise, call the target function to figure out where we need
635 our temporary breakpoint, create it, and continue executing this
636 process. */
637 if (the_low_target.breakpoint_reinsert_addr == NULL)
638 {
639 event_child->bp_reinsert = stop_pc;
640 uninsert_breakpoint (stop_pc);
641 linux_resume_one_process (&event_child->head, 1, 0, NULL);
642 }
643 else
644 {
645 reinsert_breakpoint_by_bp
646 (stop_pc, (*the_low_target.breakpoint_reinsert_addr) ());
647 linux_resume_one_process (&event_child->head, 0, 0, NULL);
648 }
649
650 continue;
651 }
652
653 /* If we were single-stepping, we definitely want to report the
654 SIGTRAP. The single-step operation has completed, so also
655 clear the stepping flag; in general this does not matter,
656 because the SIGTRAP will be reported to the client, which
657 will give us a new action for this thread, but clear it for
658 consistency anyway. It's safe to clear the stepping flag
659 because the only consumer of get_stop_pc () after this point
660 is check_removed_breakpoint, and pending_is_breakpoint is not
661 set. It might be wiser to use a step_completed flag instead. */
662 if (event_child->stepping)
663 {
664 event_child->stepping = 0;
665 return wstat;
666 }
667
668 /* A SIGTRAP that we can't explain. It may have been a breakpoint.
669 Check if it is a breakpoint, and if so mark the process information
670 accordingly. This will handle both the necessary fiddling with the
671 PC on decr_pc_after_break targets and suppressing extra threads
672 hitting a breakpoint if two hit it at once and then GDB removes it
673 after the first is reported. Arguably it would be better to report
674 multiple threads hitting breakpoints simultaneously, but the current
675 remote protocol does not allow this. */
676 if ((*the_low_target.breakpoint_at) (stop_pc))
677 {
678 event_child->pending_is_breakpoint = 1;
679 event_child->pending_stop_pc = stop_pc;
680 }
681
682 return wstat;
683 }
684
685 /* NOTREACHED */
686 return 0;
687 }
688
689 /* Wait for process, returns status. */
690
691 static unsigned char
692 linux_wait (char *status)
693 {
694 int w;
695 struct thread_info *child = NULL;
696
697 retry:
698 /* If we were only supposed to resume one thread, only wait for
699 that thread - if it's still alive. If it died, however - which
700 can happen if we're coming from the thread death case below -
701 then we need to make sure we restart the other threads. We could
702 pick a thread at random or restart all; restarting all is less
703 arbitrary. */
704 if (cont_thread != 0 && cont_thread != -1)
705 {
706 child = (struct thread_info *) find_inferior_id (&all_threads,
707 cont_thread);
708
709 /* No stepping, no signal - unless one is pending already, of course. */
710 if (child == NULL)
711 {
712 struct thread_resume resume_info;
713 resume_info.thread = -1;
714 resume_info.step = resume_info.sig = resume_info.leave_stopped = 0;
715 linux_resume (&resume_info);
716 }
717 }
718
719 enable_async_io ();
720 unblock_async_io ();
721 w = linux_wait_for_event (child);
722 stop_all_processes ();
723 disable_async_io ();
724
725 /* If we are waiting for a particular child, and it exited,
726 linux_wait_for_event will return its exit status. Similarly if
727 the last child exited. If this is not the last child, however,
728 do not report it as exited until there is a 'thread exited' response
729 available in the remote protocol. Instead, just wait for another event.
730 This should be safe, because if the thread crashed we will already
731 have reported the termination signal to GDB; that should stop any
732 in-progress stepping operations, etc.
733
734 Report the exit status of the last thread to exit. This matches
735 LinuxThreads' behavior. */
736
737 if (all_threads.head == all_threads.tail)
738 {
739 if (WIFEXITED (w))
740 {
741 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
742 *status = 'W';
743 clear_inferiors ();
744 free (all_processes.head);
745 all_processes.head = all_processes.tail = NULL;
746 return WEXITSTATUS (w);
747 }
748 else if (!WIFSTOPPED (w))
749 {
750 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
751 *status = 'X';
752 clear_inferiors ();
753 free (all_processes.head);
754 all_processes.head = all_processes.tail = NULL;
755 return target_signal_from_host (WTERMSIG (w));
756 }
757 }
758 else
759 {
760 if (!WIFSTOPPED (w))
761 goto retry;
762 }
763
764 *status = 'T';
765 return target_signal_from_host (WSTOPSIG (w));
766 }
767
768 /* Send a signal to an LWP. For LinuxThreads, kill is enough; however, if
769 thread groups are in use, we need to use tkill. */
770
771 static int
772 kill_lwp (unsigned long lwpid, int signo)
773 {
774 static int tkill_failed;
775
776 errno = 0;
777
778 #ifdef SYS_tkill
779 if (!tkill_failed)
780 {
781 int ret = syscall (SYS_tkill, lwpid, signo);
782 if (errno != ENOSYS)
783 return ret;
784 errno = 0;
785 tkill_failed = 1;
786 }
787 #endif
788
789 return kill (lwpid, signo);
790 }
791
792 static void
793 send_sigstop (struct inferior_list_entry *entry)
794 {
795 struct process_info *process = (struct process_info *) entry;
796
797 if (process->stopped)
798 return;
799
800 /* If we already have a pending stop signal for this process, don't
801 send another. */
802 if (process->stop_expected)
803 {
804 process->stop_expected = 0;
805 return;
806 }
807
808 if (debug_threads)
809 fprintf (stderr, "Sending sigstop to process %ld\n", process->head.id);
810
811 kill_lwp (process->head.id, SIGSTOP);
812 process->sigstop_sent = 1;
813 }
814
815 static void
816 wait_for_sigstop (struct inferior_list_entry *entry)
817 {
818 struct process_info *process = (struct process_info *) entry;
819 struct thread_info *saved_inferior, *thread;
820 int wstat;
821 unsigned long saved_tid;
822
823 if (process->stopped)
824 return;
825
826 saved_inferior = current_inferior;
827 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
828 thread = (struct thread_info *) find_inferior_id (&all_threads,
829 process->tid);
830 wstat = linux_wait_for_event (thread);
831
832 /* If we stopped with a non-SIGSTOP signal, save it for later
833 and record the pending SIGSTOP. If the process exited, just
834 return. */
835 if (WIFSTOPPED (wstat)
836 && WSTOPSIG (wstat) != SIGSTOP)
837 {
838 if (debug_threads)
839 fprintf (stderr, "Stopped with non-sigstop signal\n");
840 process->status_pending_p = 1;
841 process->status_pending = wstat;
842 process->stop_expected = 1;
843 }
844
845 if (linux_thread_alive (saved_tid))
846 current_inferior = saved_inferior;
847 else
848 {
849 if (debug_threads)
850 fprintf (stderr, "Previously current thread died.\n");
851
852 /* Set a valid thread as current. */
853 set_desired_inferior (0);
854 }
855 }
856
857 static void
858 stop_all_processes (void)
859 {
860 stopping_threads = 1;
861 for_each_inferior (&all_processes, send_sigstop);
862 for_each_inferior (&all_processes, wait_for_sigstop);
863 stopping_threads = 0;
864 }
865
866 /* Resume execution of the inferior process.
867 If STEP is nonzero, single-step it.
868 If SIGNAL is nonzero, give it that signal. */
869
870 static void
871 linux_resume_one_process (struct inferior_list_entry *entry,
872 int step, int signal, siginfo_t *info)
873 {
874 struct process_info *process = (struct process_info *) entry;
875 struct thread_info *saved_inferior;
876
877 if (process->stopped == 0)
878 return;
879
880 /* If we have pending signals or status, and a new signal, enqueue the
881 signal. Also enqueue the signal if we are waiting to reinsert a
882 breakpoint; it will be picked up again below. */
883 if (signal != 0
884 && (process->status_pending_p || process->pending_signals != NULL
885 || process->bp_reinsert != 0))
886 {
887 struct pending_signals *p_sig;
888 p_sig = malloc (sizeof (*p_sig));
889 p_sig->prev = process->pending_signals;
890 p_sig->signal = signal;
891 if (info == NULL)
892 memset (&p_sig->info, 0, sizeof (siginfo_t));
893 else
894 memcpy (&p_sig->info, info, sizeof (siginfo_t));
895 process->pending_signals = p_sig;
896 }
897
898 if (process->status_pending_p && !check_removed_breakpoint (process))
899 return;
900
901 saved_inferior = current_inferior;
902 current_inferior = get_process_thread (process);
903
904 if (debug_threads)
905 fprintf (stderr, "Resuming process %ld (%s, signal %d, stop %s)\n", inferior_pid,
906 step ? "step" : "continue", signal,
907 process->stop_expected ? "expected" : "not expected");
908
909 /* This bit needs some thinking about. If we get a signal that
910 we must report while a single-step reinsert is still pending,
911 we often end up resuming the thread. It might be better to
912 (ew) allow a stack of pending events; then we could be sure that
913 the reinsert happened right away and not lose any signals.
914
915 Making this stack would also shrink the window in which breakpoints are
916 uninserted (see comment in linux_wait_for_process) but not enough for
917 complete correctness, so it won't solve that problem. It may be
918 worthwhile just to solve this one, however. */
919 if (process->bp_reinsert != 0)
920 {
921 if (debug_threads)
922 fprintf (stderr, " pending reinsert at %08lx", (long)process->bp_reinsert);
923 if (step == 0)
924 fprintf (stderr, "BAD - reinserting but not stepping.\n");
925 step = 1;
926
927 /* Postpone any pending signal. It was enqueued above. */
928 signal = 0;
929 }
930
931 check_removed_breakpoint (process);
932
933 if (debug_threads && the_low_target.get_pc != NULL)
934 {
935 fprintf (stderr, " ");
936 (*the_low_target.get_pc) ();
937 }
938
939 /* If we have pending signals, consume one unless we are trying to reinsert
940 a breakpoint. */
941 if (process->pending_signals != NULL && process->bp_reinsert == 0)
942 {
943 struct pending_signals **p_sig;
944
945 p_sig = &process->pending_signals;
946 while ((*p_sig)->prev != NULL)
947 p_sig = &(*p_sig)->prev;
948
949 signal = (*p_sig)->signal;
950 if ((*p_sig)->info.si_signo != 0)
951 ptrace (PTRACE_SETSIGINFO, process->lwpid, 0, &(*p_sig)->info);
952
953 free (*p_sig);
954 *p_sig = NULL;
955 }
956
957 regcache_invalidate_one ((struct inferior_list_entry *)
958 get_process_thread (process));
959 errno = 0;
960 process->stopped = 0;
961 process->stepping = step;
962 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, process->lwpid, 0, signal);
963
964 current_inferior = saved_inferior;
965 if (errno)
966 perror_with_name ("ptrace");
967 }
968
969 static struct thread_resume *resume_ptr;
970
971 /* This function is called once per thread. We look up the thread
972 in RESUME_PTR, and mark the thread with a pointer to the appropriate
973 resume request.
974
975 This algorithm is O(threads * resume elements), but resume elements
976 is small (and will remain small at least until GDB supports thread
977 suspension). */
978 static void
979 linux_set_resume_request (struct inferior_list_entry *entry)
980 {
981 struct process_info *process;
982 struct thread_info *thread;
983 int ndx;
984
985 thread = (struct thread_info *) entry;
986 process = get_thread_process (thread);
987
988 ndx = 0;
989 while (resume_ptr[ndx].thread != -1 && resume_ptr[ndx].thread != entry->id)
990 ndx++;
991
992 process->resume = &resume_ptr[ndx];
993 }
994
995 /* This function is called once per thread. We check the thread's resume
996 request, which will tell us whether to resume, step, or leave the thread
997 stopped; and what signal, if any, it should be sent. For threads which
998 we aren't explicitly told otherwise, we preserve the stepping flag; this
999 is used for stepping over gdbserver-placed breakpoints. */
1000
1001 static void
1002 linux_continue_one_thread (struct inferior_list_entry *entry)
1003 {
1004 struct process_info *process;
1005 struct thread_info *thread;
1006 int step;
1007
1008 thread = (struct thread_info *) entry;
1009 process = get_thread_process (thread);
1010
1011 if (process->resume->leave_stopped)
1012 return;
1013
1014 if (process->resume->thread == -1)
1015 step = process->stepping || process->resume->step;
1016 else
1017 step = process->resume->step;
1018
1019 linux_resume_one_process (&process->head, step, process->resume->sig, NULL);
1020
1021 process->resume = NULL;
1022 }
1023
1024 /* This function is called once per thread. We check the thread's resume
1025 request, which will tell us whether to resume, step, or leave the thread
1026 stopped; and what signal, if any, it should be sent. We queue any needed
1027 signals, since we won't actually resume. We already have a pending event
1028 to report, so we don't need to preserve any step requests; they should
1029 be re-issued if necessary. */
1030
1031 static void
1032 linux_queue_one_thread (struct inferior_list_entry *entry)
1033 {
1034 struct process_info *process;
1035 struct thread_info *thread;
1036
1037 thread = (struct thread_info *) entry;
1038 process = get_thread_process (thread);
1039
1040 if (process->resume->leave_stopped)
1041 return;
1042
1043 /* If we have a new signal, enqueue the signal. */
1044 if (process->resume->sig != 0)
1045 {
1046 struct pending_signals *p_sig;
1047 p_sig = malloc (sizeof (*p_sig));
1048 p_sig->prev = process->pending_signals;
1049 p_sig->signal = process->resume->sig;
1050 memset (&p_sig->info, 0, sizeof (siginfo_t));
1051
1052 /* If this is the same signal we were previously stopped by,
1053 make sure to queue its siginfo. We can ignore the return
1054 value of ptrace; if it fails, we'll skip
1055 PTRACE_SETSIGINFO. */
1056 if (WIFSTOPPED (process->last_status)
1057 && WSTOPSIG (process->last_status) == process->resume->sig)
1058 ptrace (PTRACE_GETSIGINFO, process->lwpid, 0, &p_sig->info);
1059
1060 process->pending_signals = p_sig;
1061 }
1062
1063 process->resume = NULL;
1064 }
1065
1066 /* Set DUMMY if this process has an interesting status pending. */
1067 static int
1068 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
1069 {
1070 struct process_info *process = (struct process_info *) entry;
1071
1072 /* Processes which will not be resumed are not interesting, because
1073 we might not wait for them next time through linux_wait. */
1074 if (process->resume->leave_stopped)
1075 return 0;
1076
1077 /* If this thread has a removed breakpoint, we won't have any
1078 events to report later, so check now. check_removed_breakpoint
1079 may clear status_pending_p. We avoid calling check_removed_breakpoint
1080 for any thread that we are not otherwise going to resume - this
1081 lets us preserve stopped status when two threads hit a breakpoint.
1082 GDB removes the breakpoint to single-step a particular thread
1083 past it, then re-inserts it and resumes all threads. We want
1084 to report the second thread without resuming it in the interim. */
1085 if (process->status_pending_p)
1086 check_removed_breakpoint (process);
1087
1088 if (process->status_pending_p)
1089 * (int *) flag_p = 1;
1090
1091 return 0;
1092 }
1093
1094 static void
1095 linux_resume (struct thread_resume *resume_info)
1096 {
1097 int pending_flag;
1098
1099 /* Yes, the use of a global here is rather ugly. */
1100 resume_ptr = resume_info;
1101
1102 for_each_inferior (&all_threads, linux_set_resume_request);
1103
1104 /* If there is a thread which would otherwise be resumed, which
1105 has a pending status, then don't resume any threads - we can just
1106 report the pending status. Make sure to queue any signals
1107 that would otherwise be sent. */
1108 pending_flag = 0;
1109 find_inferior (&all_processes, resume_status_pending_p, &pending_flag);
1110
1111 if (debug_threads)
1112 {
1113 if (pending_flag)
1114 fprintf (stderr, "Not resuming, pending status\n");
1115 else
1116 fprintf (stderr, "Resuming, no pending status\n");
1117 }
1118
1119 if (pending_flag)
1120 for_each_inferior (&all_threads, linux_queue_one_thread);
1121 else
1122 {
1123 block_async_io ();
1124 enable_async_io ();
1125 for_each_inferior (&all_threads, linux_continue_one_thread);
1126 }
1127 }
1128
1129 #ifdef HAVE_LINUX_USRREGS
1130
1131 int
1132 register_addr (int regnum)
1133 {
1134 int addr;
1135
1136 if (regnum < 0 || regnum >= the_low_target.num_regs)
1137 error ("Invalid register number %d.", regnum);
1138
1139 addr = the_low_target.regmap[regnum];
1140
1141 return addr;
1142 }
1143
1144 /* Fetch one register. */
1145 static void
1146 fetch_register (int regno)
1147 {
1148 CORE_ADDR regaddr;
1149 int i, size;
1150 char *buf;
1151
1152 if (regno >= the_low_target.num_regs)
1153 return;
1154 if ((*the_low_target.cannot_fetch_register) (regno))
1155 return;
1156
1157 regaddr = register_addr (regno);
1158 if (regaddr == -1)
1159 return;
1160 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
1161 & - sizeof (PTRACE_XFER_TYPE);
1162 buf = alloca (size);
1163 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
1164 {
1165 errno = 0;
1166 *(PTRACE_XFER_TYPE *) (buf + i) =
1167 ptrace (PTRACE_PEEKUSER, inferior_pid, (PTRACE_ARG3_TYPE) regaddr, 0);
1168 regaddr += sizeof (PTRACE_XFER_TYPE);
1169 if (errno != 0)
1170 {
1171 /* Warning, not error, in case we are attached; sometimes the
1172 kernel doesn't let us at the registers. */
1173 char *err = strerror (errno);
1174 char *msg = alloca (strlen (err) + 128);
1175 sprintf (msg, "reading register %d: %s", regno, err);
1176 error (msg);
1177 goto error_exit;
1178 }
1179 }
1180 if (the_low_target.left_pad_xfer
1181 && register_size (regno) < sizeof (PTRACE_XFER_TYPE))
1182 supply_register (regno, (buf + sizeof (PTRACE_XFER_TYPE)
1183 - register_size (regno)));
1184 else
1185 supply_register (regno, buf);
1186
1187 error_exit:;
1188 }
1189
1190 /* Fetch all registers, or just one, from the child process. */
1191 static void
1192 usr_fetch_inferior_registers (int regno)
1193 {
1194 if (regno == -1 || regno == 0)
1195 for (regno = 0; regno < the_low_target.num_regs; regno++)
1196 fetch_register (regno);
1197 else
1198 fetch_register (regno);
1199 }
1200
1201 /* Store our register values back into the inferior.
1202 If REGNO is -1, do this for all registers.
1203 Otherwise, REGNO specifies which register (so we can save time). */
1204 static void
1205 usr_store_inferior_registers (int regno)
1206 {
1207 CORE_ADDR regaddr;
1208 int i, size;
1209 char *buf;
1210
1211 if (regno >= 0)
1212 {
1213 if (regno >= the_low_target.num_regs)
1214 return;
1215
1216 if ((*the_low_target.cannot_store_register) (regno) == 1)
1217 return;
1218
1219 regaddr = register_addr (regno);
1220 if (regaddr == -1)
1221 return;
1222 errno = 0;
1223 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
1224 & - sizeof (PTRACE_XFER_TYPE);
1225 buf = alloca (size);
1226 memset (buf, 0, size);
1227 if (the_low_target.left_pad_xfer
1228 && register_size (regno) < sizeof (PTRACE_XFER_TYPE))
1229 collect_register (regno, (buf + sizeof (PTRACE_XFER_TYPE)
1230 - register_size (regno)));
1231 else
1232 collect_register (regno, buf);
1233 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
1234 {
1235 errno = 0;
1236 ptrace (PTRACE_POKEUSER, inferior_pid, (PTRACE_ARG3_TYPE) regaddr,
1237 *(PTRACE_XFER_TYPE *) (buf + i));
1238 if (errno != 0)
1239 {
1240 if ((*the_low_target.cannot_store_register) (regno) == 0)
1241 {
1242 char *err = strerror (errno);
1243 char *msg = alloca (strlen (err) + 128);
1244 sprintf (msg, "writing register %d: %s",
1245 regno, err);
1246 error (msg);
1247 return;
1248 }
1249 }
1250 regaddr += sizeof (PTRACE_XFER_TYPE);
1251 }
1252 }
1253 else
1254 for (regno = 0; regno < the_low_target.num_regs; regno++)
1255 usr_store_inferior_registers (regno);
1256 }
1257 #endif /* HAVE_LINUX_USRREGS */
1258
1259
1260
1261 #ifdef HAVE_LINUX_REGSETS
1262
1263 static int
1264 regsets_fetch_inferior_registers ()
1265 {
1266 struct regset_info *regset;
1267 int saw_general_regs = 0;
1268
1269 regset = target_regsets;
1270
1271 while (regset->size >= 0)
1272 {
1273 void *buf;
1274 int res;
1275
1276 if (regset->size == 0)
1277 {
1278 regset ++;
1279 continue;
1280 }
1281
1282 buf = malloc (regset->size);
1283 res = ptrace (regset->get_request, inferior_pid, 0, buf);
1284 if (res < 0)
1285 {
1286 if (errno == EIO)
1287 {
1288 /* If we get EIO on the first regset, do not try regsets again.
1289 If we get EIO on a later regset, disable that regset. */
1290 if (regset == target_regsets)
1291 {
1292 use_regsets_p = 0;
1293 return -1;
1294 }
1295 else
1296 {
1297 regset->size = 0;
1298 continue;
1299 }
1300 }
1301 else
1302 {
1303 char s[256];
1304 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%ld",
1305 inferior_pid);
1306 perror (s);
1307 }
1308 }
1309 else if (regset->type == GENERAL_REGS)
1310 saw_general_regs = 1;
1311 regset->store_function (buf);
1312 regset ++;
1313 }
1314 if (saw_general_regs)
1315 return 0;
1316 else
1317 return 1;
1318 }
1319
1320 static int
1321 regsets_store_inferior_registers ()
1322 {
1323 struct regset_info *regset;
1324 int saw_general_regs = 0;
1325
1326 regset = target_regsets;
1327
1328 while (regset->size >= 0)
1329 {
1330 void *buf;
1331 int res;
1332
1333 if (regset->size == 0)
1334 {
1335 regset ++;
1336 continue;
1337 }
1338
1339 buf = malloc (regset->size);
1340
1341 /* First fill the buffer with the current register set contents,
1342 in case there are any items in the kernel's regset that are
1343 not in gdbserver's regcache. */
1344 res = ptrace (regset->get_request, inferior_pid, 0, buf);
1345
1346 if (res == 0)
1347 {
1348 /* Then overlay our cached registers on that. */
1349 regset->fill_function (buf);
1350
1351 /* Only now do we write the register set. */
1352 res = ptrace (regset->set_request, inferior_pid, 0, buf);
1353 }
1354
1355 if (res < 0)
1356 {
1357 if (errno == EIO)
1358 {
1359 /* If we get EIO on the first regset, do not try regsets again.
1360 If we get EIO on a later regset, disable that regset. */
1361 if (regset == target_regsets)
1362 {
1363 use_regsets_p = 0;
1364 return -1;
1365 }
1366 else
1367 {
1368 regset->size = 0;
1369 continue;
1370 }
1371 }
1372 else
1373 {
1374 perror ("Warning: ptrace(regsets_store_inferior_registers)");
1375 }
1376 }
1377 else if (regset->type == GENERAL_REGS)
1378 saw_general_regs = 1;
1379 regset ++;
1380 free (buf);
1381 }
1382 if (saw_general_regs)
1383 return 0;
1384 else
1385 return 1;
1386 return 0;
1387 }
1388
1389 #endif /* HAVE_LINUX_REGSETS */
1390
1391
1392 void
1393 linux_fetch_registers (int regno)
1394 {
1395 #ifdef HAVE_LINUX_REGSETS
1396 if (use_regsets_p)
1397 {
1398 if (regsets_fetch_inferior_registers () == 0)
1399 return;
1400 }
1401 #endif
1402 #ifdef HAVE_LINUX_USRREGS
1403 usr_fetch_inferior_registers (regno);
1404 #endif
1405 }
1406
1407 void
1408 linux_store_registers (int regno)
1409 {
1410 #ifdef HAVE_LINUX_REGSETS
1411 if (use_regsets_p)
1412 {
1413 if (regsets_store_inferior_registers () == 0)
1414 return;
1415 }
1416 #endif
1417 #ifdef HAVE_LINUX_USRREGS
1418 usr_store_inferior_registers (regno);
1419 #endif
1420 }
1421
1422
1423 /* Copy LEN bytes from inferior's memory starting at MEMADDR
1424 to debugger memory starting at MYADDR. */
1425
1426 static int
1427 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
1428 {
1429 register int i;
1430 /* Round starting address down to longword boundary. */
1431 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
1432 /* Round ending address up; get number of longwords that makes. */
1433 register int count
1434 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
1435 / sizeof (PTRACE_XFER_TYPE);
1436 /* Allocate buffer of that many longwords. */
1437 register PTRACE_XFER_TYPE *buffer
1438 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
1439
1440 /* Read all the longwords */
1441 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
1442 {
1443 errno = 0;
1444 buffer[i] = ptrace (PTRACE_PEEKTEXT, inferior_pid, (PTRACE_ARG3_TYPE) addr, 0);
1445 if (errno)
1446 return errno;
1447 }
1448
1449 /* Copy appropriate bytes out of the buffer. */
1450 memcpy (myaddr, (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), len);
1451
1452 return 0;
1453 }
1454
1455 /* Copy LEN bytes of data from debugger memory at MYADDR
1456 to inferior's memory at MEMADDR.
1457 On failure (cannot write the inferior)
1458 returns the value of errno. */
1459
1460 static int
1461 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
1462 {
1463 register int i;
1464 /* Round starting address down to longword boundary. */
1465 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
1466 /* Round ending address up; get number of longwords that makes. */
1467 register int count
1468 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
1469 /* Allocate buffer of that many longwords. */
1470 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
1471 extern int errno;
1472
1473 if (debug_threads)
1474 {
1475 fprintf (stderr, "Writing %02x to %08lx\n", (unsigned)myaddr[0], (long)memaddr);
1476 }
1477
1478 /* Fill start and end extra bytes of buffer with existing memory data. */
1479
1480 buffer[0] = ptrace (PTRACE_PEEKTEXT, inferior_pid,
1481 (PTRACE_ARG3_TYPE) addr, 0);
1482
1483 if (count > 1)
1484 {
1485 buffer[count - 1]
1486 = ptrace (PTRACE_PEEKTEXT, inferior_pid,
1487 (PTRACE_ARG3_TYPE) (addr + (count - 1)
1488 * sizeof (PTRACE_XFER_TYPE)),
1489 0);
1490 }
1491
1492 /* Copy data to be written over corresponding part of buffer */
1493
1494 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
1495
1496 /* Write the entire buffer. */
1497
1498 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
1499 {
1500 errno = 0;
1501 ptrace (PTRACE_POKETEXT, inferior_pid, (PTRACE_ARG3_TYPE) addr, buffer[i]);
1502 if (errno)
1503 return errno;
1504 }
1505
1506 return 0;
1507 }
1508
1509 static void
1510 linux_look_up_symbols (void)
1511 {
1512 #ifdef USE_THREAD_DB
1513 if (using_threads)
1514 return;
1515
1516 using_threads = thread_db_init ();
1517 #endif
1518 }
1519
1520 static void
1521 linux_request_interrupt (void)
1522 {
1523 extern unsigned long signal_pid;
1524
1525 if (cont_thread != 0 && cont_thread != -1)
1526 {
1527 struct process_info *process;
1528
1529 process = get_thread_process (current_inferior);
1530 kill_lwp (process->lwpid, SIGINT);
1531 }
1532 else
1533 kill_lwp (signal_pid, SIGINT);
1534 }
1535
1536 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
1537 to debugger memory starting at MYADDR. */
1538
1539 static int
1540 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
1541 {
1542 char filename[PATH_MAX];
1543 int fd, n;
1544
1545 snprintf (filename, sizeof filename, "/proc/%ld/auxv", inferior_pid);
1546
1547 fd = open (filename, O_RDONLY);
1548 if (fd < 0)
1549 return -1;
1550
1551 if (offset != (CORE_ADDR) 0
1552 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
1553 n = -1;
1554 else
1555 n = read (fd, myaddr, len);
1556
1557 close (fd);
1558
1559 return n;
1560 }
1561
1562 /* These watchpoint related wrapper functions simply pass on the function call
1563 if the target has registered a corresponding function. */
1564
1565 static int
1566 linux_insert_watchpoint (char type, CORE_ADDR addr, int len)
1567 {
1568 if (the_low_target.insert_watchpoint != NULL)
1569 return the_low_target.insert_watchpoint (type, addr, len);
1570 else
1571 /* Unsupported (see target.h). */
1572 return 1;
1573 }
1574
1575 static int
1576 linux_remove_watchpoint (char type, CORE_ADDR addr, int len)
1577 {
1578 if (the_low_target.remove_watchpoint != NULL)
1579 return the_low_target.remove_watchpoint (type, addr, len);
1580 else
1581 /* Unsupported (see target.h). */
1582 return 1;
1583 }
1584
1585 static int
1586 linux_stopped_by_watchpoint (void)
1587 {
1588 if (the_low_target.stopped_by_watchpoint != NULL)
1589 return the_low_target.stopped_by_watchpoint ();
1590 else
1591 return 0;
1592 }
1593
1594 static CORE_ADDR
1595 linux_stopped_data_address (void)
1596 {
1597 if (the_low_target.stopped_data_address != NULL)
1598 return the_low_target.stopped_data_address ();
1599 else
1600 return 0;
1601 }
1602
1603 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
1604 #if defined(__mcoldfire__)
1605 /* These should really be defined in the kernel's ptrace.h header. */
1606 #define PT_TEXT_ADDR 49*4
1607 #define PT_DATA_ADDR 50*4
1608 #define PT_TEXT_END_ADDR 51*4
1609 #endif
1610
1611 /* Under uClinux, programs are loaded at non-zero offsets, which we need
1612 to tell gdb about. */
1613
1614 static int
1615 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
1616 {
1617 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
1618 unsigned long text, text_end, data;
1619 int pid = get_thread_process (current_inferior)->head.id;
1620
1621 errno = 0;
1622
1623 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
1624 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
1625 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
1626
1627 if (errno == 0)
1628 {
1629 /* Both text and data offsets produced at compile-time (and so
1630 used by gdb) are relative to the beginning of the program,
1631 with the data segment immediately following the text segment.
1632 However, the actual runtime layout in memory may put the data
1633 somewhere else, so when we send gdb a data base-address, we
1634 use the real data base address and subtract the compile-time
1635 data base-address from it (which is just the length of the
1636 text segment). BSS immediately follows data in both
1637 cases. */
1638 *text_p = text;
1639 *data_p = data - (text_end - text);
1640
1641 return 1;
1642 }
1643 #endif
1644 return 0;
1645 }
1646 #endif
1647
1648 static const char *
1649 linux_arch_string (void)
1650 {
1651 return the_low_target.arch_string;
1652 }
1653
1654 static struct target_ops linux_target_ops = {
1655 linux_create_inferior,
1656 linux_attach,
1657 linux_kill,
1658 linux_detach,
1659 linux_thread_alive,
1660 linux_resume,
1661 linux_wait,
1662 linux_fetch_registers,
1663 linux_store_registers,
1664 linux_read_memory,
1665 linux_write_memory,
1666 linux_look_up_symbols,
1667 linux_request_interrupt,
1668 linux_read_auxv,
1669 linux_insert_watchpoint,
1670 linux_remove_watchpoint,
1671 linux_stopped_by_watchpoint,
1672 linux_stopped_data_address,
1673 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
1674 linux_read_offsets,
1675 #else
1676 NULL,
1677 #endif
1678 #ifdef USE_THREAD_DB
1679 thread_db_get_tls_address,
1680 #else
1681 NULL,
1682 #endif
1683 linux_arch_string,
1684 };
1685
1686 static void
1687 linux_init_signals ()
1688 {
1689 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
1690 to find what the cancel signal actually is. */
1691 signal (__SIGRTMIN+1, SIG_IGN);
1692 }
1693
1694 void
1695 initialize_low (void)
1696 {
1697 using_threads = 0;
1698 set_target_ops (&linux_target_ops);
1699 set_breakpoint_data (the_low_target.breakpoint,
1700 the_low_target.breakpoint_len);
1701 init_registers ();
1702 linux_init_signals ();
1703 }