d48624fc723b0c383da2e34e28c1862e4100ec92
[binutils-gdb.git] / gdb / aarch64-linux-nat.c
1 /* Native-dependent code for GNU/Linux AArch64.
2
3 Copyright (C) 2011-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "inferior.h"
24 #include "gdbcore.h"
25 #include "regcache.h"
26 #include "linux-nat.h"
27 #include "target-descriptions.h"
28 #include "auxv.h"
29 #include "gdbcmd.h"
30 #include "aarch64-tdep.h"
31 #include "aarch64-linux-tdep.h"
32 #include "aarch32-linux-nat.h"
33
34 #include "elf/external.h"
35 #include "elf/common.h"
36
37 #include <sys/ptrace.h>
38 #include <sys/utsname.h>
39 #include <asm/ptrace.h>
40
41 #include "gregset.h"
42
43 /* Defines ps_err_e, struct ps_prochandle. */
44 #include "gdb_proc_service.h"
45
46 #ifndef TRAP_HWBKPT
47 #define TRAP_HWBKPT 0x0004
48 #endif
49
50 /* On GNU/Linux, threads are implemented as pseudo-processes, in which
51 case we may be tracing more than one process at a time. In that
52 case, inferior_ptid will contain the main process ID and the
53 individual thread (process) ID. get_thread_id () is used to get
54 the thread id if it's available, and the process id otherwise. */
55
56 static int
57 get_thread_id (ptid_t ptid)
58 {
59 int tid = ptid_get_lwp (ptid);
60
61 if (0 == tid)
62 tid = ptid_get_pid (ptid);
63 return tid;
64 }
65
66 /* Macro definitions, data structures, and code for the hardware
67 breakpoint and hardware watchpoint support follow. We use the
68 following abbreviations throughout the code:
69
70 hw - hardware
71 bp - breakpoint
72 wp - watchpoint */
73
74 /* Maximum number of hardware breakpoint and watchpoint registers.
75 Neither of these values may exceed the width of dr_changed_t
76 measured in bits. */
77
78 #define AARCH64_HBP_MAX_NUM 16
79 #define AARCH64_HWP_MAX_NUM 16
80
81 /* Alignment requirement in bytes for addresses written to
82 hardware breakpoint and watchpoint value registers.
83
84 A ptrace call attempting to set an address that does not meet the
85 alignment criteria will fail. Limited support has been provided in
86 this port for unaligned watchpoints, such that from a GDB user
87 perspective, an unaligned watchpoint may be requested.
88
89 This is achieved by minimally enlarging the watched area to meet the
90 alignment requirement, and if necessary, splitting the watchpoint
91 over several hardware watchpoint registers. */
92
93 #define AARCH64_HBP_ALIGNMENT 4
94 #define AARCH64_HWP_ALIGNMENT 8
95
96 /* The maximum length of a memory region that can be watched by one
97 hardware watchpoint register. */
98
99 #define AARCH64_HWP_MAX_LEN_PER_REG 8
100
101 /* ptrace hardware breakpoint resource info is formatted as follows:
102
103 31 24 16 8 0
104 +---------------+--------------+---------------+---------------+
105 | RESERVED | RESERVED | DEBUG_ARCH | NUM_SLOTS |
106 +---------------+--------------+---------------+---------------+ */
107
108
109 /* Macros to extract fields from the hardware debug information word. */
110 #define AARCH64_DEBUG_NUM_SLOTS(x) ((x) & 0xff)
111 #define AARCH64_DEBUG_ARCH(x) (((x) >> 8) & 0xff)
112
113 /* Macro for the expected version of the ARMv8-A debug architecture. */
114 #define AARCH64_DEBUG_ARCH_V8 0x6
115
116 /* Number of hardware breakpoints/watchpoints the target supports.
117 They are initialized with values obtained via the ptrace calls
118 with NT_ARM_HW_BREAK and NT_ARM_HW_WATCH respectively. */
119
120 static int aarch64_num_bp_regs;
121 static int aarch64_num_wp_regs;
122
123 /* Each bit of a variable of this type is used to indicate whether a
124 hardware breakpoint or watchpoint setting has been changed since
125 the last update.
126
127 Bit N corresponds to the Nth hardware breakpoint or watchpoint
128 setting which is managed in aarch64_debug_reg_state, where N is
129 valid between 0 and the total number of the hardware breakpoint or
130 watchpoint debug registers minus 1.
131
132 When bit N is 1, the corresponding breakpoint or watchpoint setting
133 has changed, and therefore the corresponding hardware debug
134 register needs to be updated via the ptrace interface.
135
136 In the per-thread arch-specific data area, we define two such
137 variables for per-thread hardware breakpoint and watchpoint
138 settings respectively.
139
140 This type is part of the mechanism which helps reduce the number of
141 ptrace calls to the kernel, i.e. avoid asking the kernel to write
142 to the debug registers with unchanged values. */
143
144 typedef ULONGEST dr_changed_t;
145
146 /* Set each of the lower M bits of X to 1; assert X is wide enough. */
147
148 #define DR_MARK_ALL_CHANGED(x, m) \
149 do \
150 { \
151 gdb_assert (sizeof ((x)) * 8 >= (m)); \
152 (x) = (((dr_changed_t)1 << (m)) - 1); \
153 } while (0)
154
155 #define DR_MARK_N_CHANGED(x, n) \
156 do \
157 { \
158 (x) |= ((dr_changed_t)1 << (n)); \
159 } while (0)
160
161 #define DR_CLEAR_CHANGED(x) \
162 do \
163 { \
164 (x) = 0; \
165 } while (0)
166
167 #define DR_HAS_CHANGED(x) ((x) != 0)
168 #define DR_N_HAS_CHANGED(x, n) ((x) & ((dr_changed_t)1 << (n)))
169
170 /* Structure for managing the hardware breakpoint/watchpoint resources.
171 DR_ADDR_* stores the address, DR_CTRL_* stores the control register
172 content, and DR_REF_COUNT_* counts the numbers of references to the
173 corresponding bp/wp, by which way the limited hardware resources
174 are not wasted on duplicated bp/wp settings (though so far gdb has
175 done a good job by not sending duplicated bp/wp requests). */
176
177 struct aarch64_debug_reg_state
178 {
179 /* hardware breakpoint */
180 CORE_ADDR dr_addr_bp[AARCH64_HBP_MAX_NUM];
181 unsigned int dr_ctrl_bp[AARCH64_HBP_MAX_NUM];
182 unsigned int dr_ref_count_bp[AARCH64_HBP_MAX_NUM];
183
184 /* hardware watchpoint */
185 CORE_ADDR dr_addr_wp[AARCH64_HWP_MAX_NUM];
186 unsigned int dr_ctrl_wp[AARCH64_HWP_MAX_NUM];
187 unsigned int dr_ref_count_wp[AARCH64_HWP_MAX_NUM];
188 };
189
190 /* Per-process data. We don't bind this to a per-inferior registry
191 because of targets like x86 GNU/Linux that need to keep track of
192 processes that aren't bound to any inferior (e.g., fork children,
193 checkpoints). */
194
195 struct aarch64_process_info
196 {
197 /* Linked list. */
198 struct aarch64_process_info *next;
199
200 /* The process identifier. */
201 pid_t pid;
202
203 /* Copy of aarch64 hardware debug registers. */
204 struct aarch64_debug_reg_state state;
205 };
206
207 static struct aarch64_process_info *aarch64_process_list = NULL;
208
209 /* Find process data for process PID. */
210
211 static struct aarch64_process_info *
212 aarch64_find_process_pid (pid_t pid)
213 {
214 struct aarch64_process_info *proc;
215
216 for (proc = aarch64_process_list; proc; proc = proc->next)
217 if (proc->pid == pid)
218 return proc;
219
220 return NULL;
221 }
222
223 /* Add process data for process PID. Returns newly allocated info
224 object. */
225
226 static struct aarch64_process_info *
227 aarch64_add_process (pid_t pid)
228 {
229 struct aarch64_process_info *proc;
230
231 proc = xcalloc (1, sizeof (*proc));
232 proc->pid = pid;
233
234 proc->next = aarch64_process_list;
235 aarch64_process_list = proc;
236
237 return proc;
238 }
239
240 /* Get data specific info for process PID, creating it if necessary.
241 Never returns NULL. */
242
243 static struct aarch64_process_info *
244 aarch64_process_info_get (pid_t pid)
245 {
246 struct aarch64_process_info *proc;
247
248 proc = aarch64_find_process_pid (pid);
249 if (proc == NULL)
250 proc = aarch64_add_process (pid);
251
252 return proc;
253 }
254
255 /* Called whenever GDB is no longer debugging process PID. It deletes
256 data structures that keep track of debug register state. */
257
258 static void
259 aarch64_forget_process (pid_t pid)
260 {
261 struct aarch64_process_info *proc, **proc_link;
262
263 proc = aarch64_process_list;
264 proc_link = &aarch64_process_list;
265
266 while (proc != NULL)
267 {
268 if (proc->pid == pid)
269 {
270 *proc_link = proc->next;
271
272 xfree (proc);
273 return;
274 }
275
276 proc_link = &proc->next;
277 proc = *proc_link;
278 }
279 }
280
281 /* Get debug registers state for process PID. */
282
283 static struct aarch64_debug_reg_state *
284 aarch64_get_debug_reg_state (pid_t pid)
285 {
286 return &aarch64_process_info_get (pid)->state;
287 }
288
289 /* Per-thread arch-specific data we want to keep. */
290
291 struct arch_lwp_info
292 {
293 /* When bit N is 1, it indicates the Nth hardware breakpoint or
294 watchpoint register pair needs to be updated when the thread is
295 resumed; see aarch64_linux_prepare_to_resume. */
296 dr_changed_t dr_changed_bp;
297 dr_changed_t dr_changed_wp;
298 };
299
300 /* Call ptrace to set the thread TID's hardware breakpoint/watchpoint
301 registers with data from *STATE. */
302
303 static void
304 aarch64_linux_set_debug_regs (const struct aarch64_debug_reg_state *state,
305 int tid, int watchpoint)
306 {
307 int i, count;
308 struct iovec iov;
309 struct user_hwdebug_state regs;
310 const CORE_ADDR *addr;
311 const unsigned int *ctrl;
312
313 memset (&regs, 0, sizeof (regs));
314 iov.iov_base = &regs;
315 count = watchpoint ? aarch64_num_wp_regs : aarch64_num_bp_regs;
316 addr = watchpoint ? state->dr_addr_wp : state->dr_addr_bp;
317 ctrl = watchpoint ? state->dr_ctrl_wp : state->dr_ctrl_bp;
318 if (count == 0)
319 return;
320 iov.iov_len = (offsetof (struct user_hwdebug_state, dbg_regs[count - 1])
321 + sizeof (regs.dbg_regs [count - 1]));
322
323 for (i = 0; i < count; i++)
324 {
325 regs.dbg_regs[i].addr = addr[i];
326 regs.dbg_regs[i].ctrl = ctrl[i];
327 }
328
329 if (ptrace (PTRACE_SETREGSET, tid,
330 watchpoint ? NT_ARM_HW_WATCH : NT_ARM_HW_BREAK,
331 (void *) &iov))
332 error (_("Unexpected error setting hardware debug registers"));
333 }
334
335 struct aarch64_dr_update_callback_param
336 {
337 int is_watchpoint;
338 unsigned int idx;
339 };
340
341 /* Callback for iterate_over_lwps. Records the
342 information about the change of one hardware breakpoint/watchpoint
343 setting for the thread LWP.
344 The information is passed in via PTR.
345 N.B. The actual updating of hardware debug registers is not
346 carried out until the moment the thread is resumed. */
347
348 static int
349 debug_reg_change_callback (struct lwp_info *lwp, void *ptr)
350 {
351 struct aarch64_dr_update_callback_param *param_p
352 = (struct aarch64_dr_update_callback_param *) ptr;
353 int pid = get_thread_id (lwp->ptid);
354 int idx = param_p->idx;
355 int is_watchpoint = param_p->is_watchpoint;
356 struct arch_lwp_info *info = lwp->arch_private;
357 dr_changed_t *dr_changed_ptr;
358 dr_changed_t dr_changed;
359
360 if (info == NULL)
361 info = lwp->arch_private = XCNEW (struct arch_lwp_info);
362
363 if (show_debug_regs)
364 {
365 fprintf_unfiltered (gdb_stdlog,
366 "debug_reg_change_callback: \n\tOn entry:\n");
367 fprintf_unfiltered (gdb_stdlog,
368 "\tpid%d, dr_changed_bp=0x%s, "
369 "dr_changed_wp=0x%s\n",
370 pid, phex (info->dr_changed_bp, 8),
371 phex (info->dr_changed_wp, 8));
372 }
373
374 dr_changed_ptr = is_watchpoint ? &info->dr_changed_wp
375 : &info->dr_changed_bp;
376 dr_changed = *dr_changed_ptr;
377
378 gdb_assert (idx >= 0
379 && (idx <= (is_watchpoint ? aarch64_num_wp_regs
380 : aarch64_num_bp_regs)));
381
382 /* The actual update is done later just before resuming the lwp,
383 we just mark that one register pair needs updating. */
384 DR_MARK_N_CHANGED (dr_changed, idx);
385 *dr_changed_ptr = dr_changed;
386
387 /* If the lwp isn't stopped, force it to momentarily pause, so
388 we can update its debug registers. */
389 if (!lwp->stopped)
390 linux_stop_lwp (lwp);
391
392 if (show_debug_regs)
393 {
394 fprintf_unfiltered (gdb_stdlog,
395 "\tOn exit:\n\tpid%d, dr_changed_bp=0x%s, "
396 "dr_changed_wp=0x%s\n",
397 pid, phex (info->dr_changed_bp, 8),
398 phex (info->dr_changed_wp, 8));
399 }
400
401 /* Continue the iteration. */
402 return 0;
403 }
404
405 /* Notify each thread that their IDXth breakpoint/watchpoint register
406 pair needs to be updated. The message will be recorded in each
407 thread's arch-specific data area, the actual updating will be done
408 when the thread is resumed. */
409
410 static void
411 aarch64_notify_debug_reg_change (const struct aarch64_debug_reg_state *state,
412 int is_watchpoint, unsigned int idx)
413 {
414 struct aarch64_dr_update_callback_param param;
415 ptid_t pid_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
416
417 param.is_watchpoint = is_watchpoint;
418 param.idx = idx;
419
420 iterate_over_lwps (pid_ptid, debug_reg_change_callback, (void *) &param);
421 }
422
423 /* Print the values of the cached breakpoint/watchpoint registers. */
424
425 static void
426 aarch64_show_debug_reg_state (struct aarch64_debug_reg_state *state,
427 const char *func, CORE_ADDR addr,
428 int len, int type)
429 {
430 int i;
431
432 fprintf_unfiltered (gdb_stdlog, "%s", func);
433 if (addr || len)
434 fprintf_unfiltered (gdb_stdlog, " (addr=0x%08lx, len=%d, type=%s)",
435 (unsigned long) addr, len,
436 type == hw_write ? "hw-write-watchpoint"
437 : (type == hw_read ? "hw-read-watchpoint"
438 : (type == hw_access ? "hw-access-watchpoint"
439 : (type == hw_execute ? "hw-breakpoint"
440 : "??unknown??"))));
441 fprintf_unfiltered (gdb_stdlog, ":\n");
442
443 fprintf_unfiltered (gdb_stdlog, "\tBREAKPOINTs:\n");
444 for (i = 0; i < aarch64_num_bp_regs; i++)
445 fprintf_unfiltered (gdb_stdlog,
446 "\tBP%d: addr=0x%08lx, ctrl=0x%08x, ref.count=%d\n",
447 i, state->dr_addr_bp[i],
448 state->dr_ctrl_bp[i], state->dr_ref_count_bp[i]);
449
450 fprintf_unfiltered (gdb_stdlog, "\tWATCHPOINTs:\n");
451 for (i = 0; i < aarch64_num_wp_regs; i++)
452 fprintf_unfiltered (gdb_stdlog,
453 "\tWP%d: addr=0x%08lx, ctrl=0x%08x, ref.count=%d\n",
454 i, state->dr_addr_wp[i],
455 state->dr_ctrl_wp[i], state->dr_ref_count_wp[i]);
456 }
457
458 /* Fill GDB's register array with the general-purpose register values
459 from the current thread. */
460
461 static void
462 fetch_gregs_from_thread (struct regcache *regcache)
463 {
464 int ret, tid;
465 struct gdbarch *gdbarch = get_regcache_arch (regcache);
466 elf_gregset_t regs;
467 struct iovec iovec;
468
469 /* Make sure REGS can hold all registers contents on both aarch64
470 and arm. */
471 gdb_static_assert (sizeof (regs) >= 18 * 4);
472
473 tid = get_thread_id (inferior_ptid);
474
475 iovec.iov_base = &regs;
476 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 32)
477 iovec.iov_len = 18 * 4;
478 else
479 iovec.iov_len = sizeof (regs);
480
481 ret = ptrace (PTRACE_GETREGSET, tid, NT_PRSTATUS, &iovec);
482 if (ret < 0)
483 perror_with_name (_("Unable to fetch general registers."));
484
485 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 32)
486 aarch32_gp_regcache_supply (regcache, (uint32_t *) regs, 1);
487 else
488 {
489 int regno;
490
491 for (regno = AARCH64_X0_REGNUM; regno <= AARCH64_CPSR_REGNUM; regno++)
492 regcache_raw_supply (regcache, regno, &regs[regno - AARCH64_X0_REGNUM]);
493 }
494 }
495
496 /* Store to the current thread the valid general-purpose register
497 values in the GDB's register array. */
498
499 static void
500 store_gregs_to_thread (const struct regcache *regcache)
501 {
502 int ret, tid;
503 elf_gregset_t regs;
504 struct iovec iovec;
505 struct gdbarch *gdbarch = get_regcache_arch (regcache);
506
507 /* Make sure REGS can hold all registers contents on both aarch64
508 and arm. */
509 gdb_static_assert (sizeof (regs) >= 18 * 4);
510 tid = get_thread_id (inferior_ptid);
511
512 iovec.iov_base = &regs;
513 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 32)
514 iovec.iov_len = 18 * 4;
515 else
516 iovec.iov_len = sizeof (regs);
517
518 ret = ptrace (PTRACE_GETREGSET, tid, NT_PRSTATUS, &iovec);
519 if (ret < 0)
520 perror_with_name (_("Unable to fetch general registers."));
521
522 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 32)
523 aarch32_gp_regcache_collect (regcache, (uint32_t *) regs, 1);
524 else
525 {
526 int regno;
527
528 for (regno = AARCH64_X0_REGNUM; regno <= AARCH64_CPSR_REGNUM; regno++)
529 if (REG_VALID == regcache_register_status (regcache, regno))
530 regcache_raw_collect (regcache, regno,
531 &regs[regno - AARCH64_X0_REGNUM]);
532 }
533
534 ret = ptrace (PTRACE_SETREGSET, tid, NT_PRSTATUS, &iovec);
535 if (ret < 0)
536 perror_with_name (_("Unable to store general registers."));
537 }
538
539 /* Fill GDB's register array with the fp/simd register values
540 from the current thread. */
541
542 static void
543 fetch_fpregs_from_thread (struct regcache *regcache)
544 {
545 int ret, tid;
546 elf_fpregset_t regs;
547 struct iovec iovec;
548 struct gdbarch *gdbarch = get_regcache_arch (regcache);
549
550 /* Make sure REGS can hold all VFP registers contents on both aarch64
551 and arm. */
552 gdb_static_assert (sizeof regs >= VFP_REGS_SIZE);
553
554 tid = get_thread_id (inferior_ptid);
555
556 iovec.iov_base = &regs;
557
558 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 32)
559 {
560 iovec.iov_len = VFP_REGS_SIZE;
561
562 ret = ptrace (PTRACE_GETREGSET, tid, NT_ARM_VFP, &iovec);
563 if (ret < 0)
564 perror_with_name (_("Unable to fetch VFP registers."));
565
566 aarch32_vfp_regcache_supply (regcache, (gdb_byte *) &regs, 32);
567 }
568 else
569 {
570 int regno;
571
572 iovec.iov_len = sizeof (regs);
573
574 ret = ptrace (PTRACE_GETREGSET, tid, NT_FPREGSET, &iovec);
575 if (ret < 0)
576 perror_with_name (_("Unable to fetch vFP/SIMD registers."));
577
578 for (regno = AARCH64_V0_REGNUM; regno <= AARCH64_V31_REGNUM; regno++)
579 regcache_raw_supply (regcache, regno,
580 &regs.vregs[regno - AARCH64_V0_REGNUM]);
581
582 regcache_raw_supply (regcache, AARCH64_FPSR_REGNUM, &regs.fpsr);
583 regcache_raw_supply (regcache, AARCH64_FPCR_REGNUM, &regs.fpcr);
584 }
585 }
586
587 /* Store to the current thread the valid fp/simd register
588 values in the GDB's register array. */
589
590 static void
591 store_fpregs_to_thread (const struct regcache *regcache)
592 {
593 int ret, tid;
594 elf_fpregset_t regs;
595 struct iovec iovec;
596 struct gdbarch *gdbarch = get_regcache_arch (regcache);
597
598 /* Make sure REGS can hold all VFP registers contents on both aarch64
599 and arm. */
600 gdb_static_assert (sizeof regs >= VFP_REGS_SIZE);
601 tid = get_thread_id (inferior_ptid);
602
603 iovec.iov_base = &regs;
604
605 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 32)
606 {
607 iovec.iov_len = VFP_REGS_SIZE;
608
609 ret = ptrace (PTRACE_GETREGSET, tid, NT_ARM_VFP, &iovec);
610 if (ret < 0)
611 perror_with_name (_("Unable to fetch VFP registers."));
612
613 aarch32_vfp_regcache_collect (regcache, (gdb_byte *) &regs, 32);
614 }
615 else
616 {
617 int regno;
618
619 iovec.iov_len = sizeof (regs);
620
621 ret = ptrace (PTRACE_GETREGSET, tid, NT_FPREGSET, &iovec);
622 if (ret < 0)
623 perror_with_name (_("Unable to fetch FP/SIMD registers."));
624
625 for (regno = AARCH64_V0_REGNUM; regno <= AARCH64_V31_REGNUM; regno++)
626 if (REG_VALID == regcache_register_status (regcache, regno))
627 regcache_raw_collect (regcache, regno,
628 (char *) &regs.vregs[regno - AARCH64_V0_REGNUM]);
629
630 if (REG_VALID == regcache_register_status (regcache, AARCH64_FPSR_REGNUM))
631 regcache_raw_collect (regcache, AARCH64_FPSR_REGNUM,
632 (char *) &regs.fpsr);
633 if (REG_VALID == regcache_register_status (regcache, AARCH64_FPCR_REGNUM))
634 regcache_raw_collect (regcache, AARCH64_FPCR_REGNUM,
635 (char *) &regs.fpcr);
636 }
637
638 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 32)
639 {
640 ret = ptrace (PTRACE_SETREGSET, tid, NT_ARM_VFP, &iovec);
641 if (ret < 0)
642 perror_with_name (_("Unable to store VFP registers."));
643 }
644 else
645 {
646 ret = ptrace (PTRACE_SETREGSET, tid, NT_FPREGSET, &iovec);
647 if (ret < 0)
648 perror_with_name (_("Unable to store FP/SIMD registers."));
649 }
650 }
651
652 /* Implement the "to_fetch_register" target_ops method. */
653
654 static void
655 aarch64_linux_fetch_inferior_registers (struct target_ops *ops,
656 struct regcache *regcache,
657 int regno)
658 {
659 if (regno == -1)
660 {
661 fetch_gregs_from_thread (regcache);
662 fetch_fpregs_from_thread (regcache);
663 }
664 else if (regno < AARCH64_V0_REGNUM)
665 fetch_gregs_from_thread (regcache);
666 else
667 fetch_fpregs_from_thread (regcache);
668 }
669
670 /* Implement the "to_store_register" target_ops method. */
671
672 static void
673 aarch64_linux_store_inferior_registers (struct target_ops *ops,
674 struct regcache *regcache,
675 int regno)
676 {
677 if (regno == -1)
678 {
679 store_gregs_to_thread (regcache);
680 store_fpregs_to_thread (regcache);
681 }
682 else if (regno < AARCH64_V0_REGNUM)
683 store_gregs_to_thread (regcache);
684 else
685 store_fpregs_to_thread (regcache);
686 }
687
688 /* Fill register REGNO (if it is a general-purpose register) in
689 *GREGSETPS with the value in GDB's register array. If REGNO is -1,
690 do this for all registers. */
691
692 void
693 fill_gregset (const struct regcache *regcache,
694 gdb_gregset_t *gregsetp, int regno)
695 {
696 regcache_collect_regset (&aarch64_linux_gregset, regcache,
697 regno, (gdb_byte *) gregsetp,
698 AARCH64_LINUX_SIZEOF_GREGSET);
699 }
700
701 /* Fill GDB's register array with the general-purpose register values
702 in *GREGSETP. */
703
704 void
705 supply_gregset (struct regcache *regcache, const gdb_gregset_t *gregsetp)
706 {
707 regcache_supply_regset (&aarch64_linux_gregset, regcache, -1,
708 (const gdb_byte *) gregsetp,
709 AARCH64_LINUX_SIZEOF_GREGSET);
710 }
711
712 /* Fill register REGNO (if it is a floating-point register) in
713 *FPREGSETP with the value in GDB's register array. If REGNO is -1,
714 do this for all registers. */
715
716 void
717 fill_fpregset (const struct regcache *regcache,
718 gdb_fpregset_t *fpregsetp, int regno)
719 {
720 regcache_collect_regset (&aarch64_linux_fpregset, regcache,
721 regno, (gdb_byte *) fpregsetp,
722 AARCH64_LINUX_SIZEOF_FPREGSET);
723 }
724
725 /* Fill GDB's register array with the floating-point register values
726 in *FPREGSETP. */
727
728 void
729 supply_fpregset (struct regcache *regcache, const gdb_fpregset_t *fpregsetp)
730 {
731 regcache_supply_regset (&aarch64_linux_fpregset, regcache, -1,
732 (const gdb_byte *) fpregsetp,
733 AARCH64_LINUX_SIZEOF_FPREGSET);
734 }
735
736 /* Called when resuming a thread.
737 The hardware debug registers are updated when there is any change. */
738
739 static void
740 aarch64_linux_prepare_to_resume (struct lwp_info *lwp)
741 {
742 struct arch_lwp_info *info = lwp->arch_private;
743
744 /* NULL means this is the main thread still going through the shell,
745 or, no watchpoint has been set yet. In that case, there's
746 nothing to do. */
747 if (info == NULL)
748 return;
749
750 if (DR_HAS_CHANGED (info->dr_changed_bp)
751 || DR_HAS_CHANGED (info->dr_changed_wp))
752 {
753 int tid = ptid_get_lwp (lwp->ptid);
754 struct aarch64_debug_reg_state *state
755 = aarch64_get_debug_reg_state (ptid_get_pid (lwp->ptid));
756
757 if (show_debug_regs)
758 fprintf_unfiltered (gdb_stdlog, "prepare_to_resume thread %d\n", tid);
759
760 /* Watchpoints. */
761 if (DR_HAS_CHANGED (info->dr_changed_wp))
762 {
763 aarch64_linux_set_debug_regs (state, tid, 1);
764 DR_CLEAR_CHANGED (info->dr_changed_wp);
765 }
766
767 /* Breakpoints. */
768 if (DR_HAS_CHANGED (info->dr_changed_bp))
769 {
770 aarch64_linux_set_debug_regs (state, tid, 0);
771 DR_CLEAR_CHANGED (info->dr_changed_bp);
772 }
773 }
774 }
775
776 static void
777 aarch64_linux_new_thread (struct lwp_info *lp)
778 {
779 struct arch_lwp_info *info = XCNEW (struct arch_lwp_info);
780
781 /* Mark that all the hardware breakpoint/watchpoint register pairs
782 for this thread need to be initialized. */
783 DR_MARK_ALL_CHANGED (info->dr_changed_bp, aarch64_num_bp_regs);
784 DR_MARK_ALL_CHANGED (info->dr_changed_wp, aarch64_num_wp_regs);
785
786 lp->arch_private = info;
787 }
788
789 /* linux_nat_new_fork hook. */
790
791 static void
792 aarch64_linux_new_fork (struct lwp_info *parent, pid_t child_pid)
793 {
794 pid_t parent_pid;
795 struct aarch64_debug_reg_state *parent_state;
796 struct aarch64_debug_reg_state *child_state;
797
798 /* NULL means no watchpoint has ever been set in the parent. In
799 that case, there's nothing to do. */
800 if (parent->arch_private == NULL)
801 return;
802
803 /* GDB core assumes the child inherits the watchpoints/hw
804 breakpoints of the parent, and will remove them all from the
805 forked off process. Copy the debug registers mirrors into the
806 new process so that all breakpoints and watchpoints can be
807 removed together. */
808
809 parent_pid = ptid_get_pid (parent->ptid);
810 parent_state = aarch64_get_debug_reg_state (parent_pid);
811 child_state = aarch64_get_debug_reg_state (child_pid);
812 *child_state = *parent_state;
813 }
814 \f
815
816 /* Called by libthread_db. Returns a pointer to the thread local
817 storage (or its descriptor). */
818
819 ps_err_e
820 ps_get_thread_area (const struct ps_prochandle *ph,
821 lwpid_t lwpid, int idx, void **base)
822 {
823 struct iovec iovec;
824 uint64_t reg;
825
826 iovec.iov_base = &reg;
827 iovec.iov_len = sizeof (reg);
828
829 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
830 return PS_ERR;
831
832 /* IDX is the bias from the thread pointer to the beginning of the
833 thread descriptor. It has to be subtracted due to implementation
834 quirks in libthread_db. */
835 *base = (void *) (reg - idx);
836
837 return PS_OK;
838 }
839 \f
840
841 /* Get the hardware debug register capacity information. */
842
843 static void
844 aarch64_linux_get_debug_reg_capacity (void)
845 {
846 int tid;
847 struct iovec iov;
848 struct user_hwdebug_state dreg_state;
849
850 tid = get_thread_id (inferior_ptid);
851 iov.iov_base = &dreg_state;
852 iov.iov_len = sizeof (dreg_state);
853
854 /* Get hardware watchpoint register info. */
855 if (ptrace (PTRACE_GETREGSET, tid, NT_ARM_HW_WATCH, &iov) == 0
856 && AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8)
857 {
858 aarch64_num_wp_regs = AARCH64_DEBUG_NUM_SLOTS (dreg_state.dbg_info);
859 if (aarch64_num_wp_regs > AARCH64_HWP_MAX_NUM)
860 {
861 warning (_("Unexpected number of hardware watchpoint registers"
862 " reported by ptrace, got %d, expected %d."),
863 aarch64_num_wp_regs, AARCH64_HWP_MAX_NUM);
864 aarch64_num_wp_regs = AARCH64_HWP_MAX_NUM;
865 }
866 }
867 else
868 {
869 warning (_("Unable to determine the number of hardware watchpoints"
870 " available."));
871 aarch64_num_wp_regs = 0;
872 }
873
874 /* Get hardware breakpoint register info. */
875 if (ptrace (PTRACE_GETREGSET, tid, NT_ARM_HW_BREAK, &iov) == 0
876 && AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8)
877 {
878 aarch64_num_bp_regs = AARCH64_DEBUG_NUM_SLOTS (dreg_state.dbg_info);
879 if (aarch64_num_bp_regs > AARCH64_HBP_MAX_NUM)
880 {
881 warning (_("Unexpected number of hardware breakpoint registers"
882 " reported by ptrace, got %d, expected %d."),
883 aarch64_num_bp_regs, AARCH64_HBP_MAX_NUM);
884 aarch64_num_bp_regs = AARCH64_HBP_MAX_NUM;
885 }
886 }
887 else
888 {
889 warning (_("Unable to determine the number of hardware breakpoints"
890 " available."));
891 aarch64_num_bp_regs = 0;
892 }
893 }
894
895 static void (*super_post_startup_inferior) (struct target_ops *self,
896 ptid_t ptid);
897
898 /* Implement the "to_post_startup_inferior" target_ops method. */
899
900 static void
901 aarch64_linux_child_post_startup_inferior (struct target_ops *self,
902 ptid_t ptid)
903 {
904 aarch64_forget_process (ptid_get_pid (ptid));
905 aarch64_linux_get_debug_reg_capacity ();
906 super_post_startup_inferior (self, ptid);
907 }
908
909 extern struct target_desc *tdesc_arm_with_vfpv3;
910 extern struct target_desc *tdesc_arm_with_neon;
911
912 /* Implement the "to_read_description" target_ops method. */
913
914 static const struct target_desc *
915 aarch64_linux_read_description (struct target_ops *ops)
916 {
917 CORE_ADDR at_phent;
918
919 if (target_auxv_search (ops, AT_PHENT, &at_phent) == 1)
920 {
921 if (at_phent == sizeof (Elf64_External_Phdr))
922 return tdesc_aarch64;
923 else
924 {
925 CORE_ADDR arm_hwcap = 0;
926
927 if (target_auxv_search (ops, AT_HWCAP, &arm_hwcap) != 1)
928 return ops->beneath->to_read_description (ops->beneath);
929
930 #ifndef COMPAT_HWCAP_VFP
931 #define COMPAT_HWCAP_VFP (1 << 6)
932 #endif
933 #ifndef COMPAT_HWCAP_NEON
934 #define COMPAT_HWCAP_NEON (1 << 12)
935 #endif
936 #ifndef COMPAT_HWCAP_VFPv3
937 #define COMPAT_HWCAP_VFPv3 (1 << 13)
938 #endif
939
940 if (arm_hwcap & COMPAT_HWCAP_VFP)
941 {
942 char *buf;
943 const struct target_desc *result = NULL;
944
945 if (arm_hwcap & COMPAT_HWCAP_NEON)
946 result = tdesc_arm_with_neon;
947 else if (arm_hwcap & COMPAT_HWCAP_VFPv3)
948 result = tdesc_arm_with_vfpv3;
949
950 return result;
951 }
952
953 return NULL;
954 }
955 }
956
957 return tdesc_aarch64;
958 }
959
960 /* Given the (potentially unaligned) watchpoint address in ADDR and
961 length in LEN, return the aligned address and aligned length in
962 *ALIGNED_ADDR_P and *ALIGNED_LEN_P, respectively. The returned
963 aligned address and length will be valid values to write to the
964 hardware watchpoint value and control registers.
965
966 The given watchpoint may get truncated if more than one hardware
967 register is needed to cover the watched region. *NEXT_ADDR_P
968 and *NEXT_LEN_P, if non-NULL, will return the address and length
969 of the remaining part of the watchpoint (which can be processed
970 by calling this routine again to generate another aligned address
971 and length pair.
972
973 See the comment above the function of the same name in
974 gdbserver/linux-aarch64-low.c for more information. */
975
976 static void
977 aarch64_align_watchpoint (CORE_ADDR addr, int len, CORE_ADDR *aligned_addr_p,
978 int *aligned_len_p, CORE_ADDR *next_addr_p,
979 int *next_len_p)
980 {
981 int aligned_len;
982 unsigned int offset;
983 CORE_ADDR aligned_addr;
984 const unsigned int alignment = AARCH64_HWP_ALIGNMENT;
985 const unsigned int max_wp_len = AARCH64_HWP_MAX_LEN_PER_REG;
986
987 /* As assumed by the algorithm. */
988 gdb_assert (alignment == max_wp_len);
989
990 if (len <= 0)
991 return;
992
993 /* Address to be put into the hardware watchpoint value register
994 must be aligned. */
995 offset = addr & (alignment - 1);
996 aligned_addr = addr - offset;
997
998 gdb_assert (offset >= 0 && offset < alignment);
999 gdb_assert (aligned_addr >= 0 && aligned_addr <= addr);
1000 gdb_assert (offset + len > 0);
1001
1002 if (offset + len >= max_wp_len)
1003 {
1004 /* Need more than one watchpoint registers; truncate it at the
1005 alignment boundary. */
1006 aligned_len = max_wp_len;
1007 len -= (max_wp_len - offset);
1008 addr += (max_wp_len - offset);
1009 gdb_assert ((addr & (alignment - 1)) == 0);
1010 }
1011 else
1012 {
1013 /* Find the smallest valid length that is large enough to
1014 accommodate this watchpoint. */
1015 static const unsigned char
1016 aligned_len_array[AARCH64_HWP_MAX_LEN_PER_REG] =
1017 { 1, 2, 4, 4, 8, 8, 8, 8 };
1018
1019 aligned_len = aligned_len_array[offset + len - 1];
1020 addr += len;
1021 len = 0;
1022 }
1023
1024 if (aligned_addr_p)
1025 *aligned_addr_p = aligned_addr;
1026 if (aligned_len_p)
1027 *aligned_len_p = aligned_len;
1028 if (next_addr_p)
1029 *next_addr_p = addr;
1030 if (next_len_p)
1031 *next_len_p = len;
1032 }
1033
1034 /* Returns the number of hardware watchpoints of type TYPE that we can
1035 set. Value is positive if we can set CNT watchpoints, zero if
1036 setting watchpoints of type TYPE is not supported, and negative if
1037 CNT is more than the maximum number of watchpoints of type TYPE
1038 that we can support. TYPE is one of bp_hardware_watchpoint,
1039 bp_read_watchpoint, bp_write_watchpoint, or bp_hardware_breakpoint.
1040 CNT is the number of such watchpoints used so far (including this
1041 one). OTHERTYPE is non-zero if other types of watchpoints are
1042 currently enabled.
1043
1044 We always return 1 here because we don't have enough information
1045 about possible overlap of addresses that they want to watch. As an
1046 extreme example, consider the case where all the watchpoints watch
1047 the same address and the same region length: then we can handle a
1048 virtually unlimited number of watchpoints, due to debug register
1049 sharing implemented via reference counts. */
1050
1051 static int
1052 aarch64_linux_can_use_hw_breakpoint (struct target_ops *self,
1053 int type, int cnt, int othertype)
1054 {
1055 return 1;
1056 }
1057
1058 /* ptrace expects control registers to be formatted as follows:
1059
1060 31 13 5 3 1 0
1061 +--------------------------------+----------+------+------+----+
1062 | RESERVED (SBZ) | LENGTH | TYPE | PRIV | EN |
1063 +--------------------------------+----------+------+------+----+
1064
1065 The TYPE field is ignored for breakpoints. */
1066
1067 #define DR_CONTROL_ENABLED(ctrl) (((ctrl) & 0x1) == 1)
1068 #define DR_CONTROL_LENGTH(ctrl) (((ctrl) >> 5) & 0xff)
1069
1070 /* Utility function that returns the length in bytes of a watchpoint
1071 according to the content of a hardware debug control register CTRL.
1072 Note that the kernel currently only supports the following Byte
1073 Address Select (BAS) values: 0x1, 0x3, 0xf and 0xff, which means
1074 that for a hardware watchpoint, its valid length can only be 1
1075 byte, 2 bytes, 4 bytes or 8 bytes. */
1076
1077 static inline unsigned int
1078 aarch64_watchpoint_length (unsigned int ctrl)
1079 {
1080 switch (DR_CONTROL_LENGTH (ctrl))
1081 {
1082 case 0x01:
1083 return 1;
1084 case 0x03:
1085 return 2;
1086 case 0x0f:
1087 return 4;
1088 case 0xff:
1089 return 8;
1090 default:
1091 return 0;
1092 }
1093 }
1094
1095 /* Given the hardware breakpoint or watchpoint type TYPE and its
1096 length LEN, return the expected encoding for a hardware
1097 breakpoint/watchpoint control register. */
1098
1099 static unsigned int
1100 aarch64_point_encode_ctrl_reg (int type, int len)
1101 {
1102 unsigned int ctrl, ttype;
1103
1104 /* type */
1105 switch (type)
1106 {
1107 case hw_write:
1108 ttype = 2;
1109 break;
1110 case hw_read:
1111 ttype = 1;
1112 break;
1113 case hw_access:
1114 ttype = 3;
1115 break;
1116 case hw_execute:
1117 ttype = 0;
1118 break;
1119 default:
1120 perror_with_name (_("Unrecognized breakpoint/watchpoint type"));
1121 }
1122 ctrl = ttype << 3;
1123
1124 /* length bitmask */
1125 ctrl |= ((1 << len) - 1) << 5;
1126 /* enabled at el0 */
1127 ctrl |= (2 << 1) | 1;
1128
1129 return ctrl;
1130 }
1131
1132 /* Addresses to be written to the hardware breakpoint and watchpoint
1133 value registers need to be aligned; the alignment is 4-byte and
1134 8-type respectively. Linux kernel rejects any non-aligned address
1135 it receives from the related ptrace call. Furthermore, the kernel
1136 currently only supports the following Byte Address Select (BAS)
1137 values: 0x1, 0x3, 0xf and 0xff, which means that for a hardware
1138 watchpoint to be accepted by the kernel (via ptrace call), its
1139 valid length can only be 1 byte, 2 bytes, 4 bytes or 8 bytes.
1140 Despite these limitations, the unaligned watchpoint is supported in
1141 this port.
1142
1143 Return 0 for any non-compliant ADDR and/or LEN; return 1 otherwise. */
1144
1145 static int
1146 aarch64_point_is_aligned (int is_watchpoint, CORE_ADDR addr, int len)
1147 {
1148 unsigned int alignment = is_watchpoint ? AARCH64_HWP_ALIGNMENT
1149 : AARCH64_HBP_ALIGNMENT;
1150
1151 if (addr & (alignment - 1))
1152 return 0;
1153
1154 if (len != 8 && len != 4 && len != 2 && len != 1)
1155 return 0;
1156
1157 return 1;
1158 }
1159
1160 /* Record the insertion of one breakpoint/watchpoint, as represented
1161 by ADDR and CTRL, in the cached debug register state area *STATE. */
1162
1163 static int
1164 aarch64_dr_state_insert_one_point (struct aarch64_debug_reg_state *state,
1165 int type, CORE_ADDR addr, int len)
1166 {
1167 int i, idx, num_regs, is_watchpoint;
1168 unsigned int ctrl, *dr_ctrl_p, *dr_ref_count;
1169 CORE_ADDR *dr_addr_p;
1170
1171 /* Set up state pointers. */
1172 is_watchpoint = (type != hw_execute);
1173 gdb_assert (aarch64_point_is_aligned (is_watchpoint, addr, len));
1174 if (is_watchpoint)
1175 {
1176 num_regs = aarch64_num_wp_regs;
1177 dr_addr_p = state->dr_addr_wp;
1178 dr_ctrl_p = state->dr_ctrl_wp;
1179 dr_ref_count = state->dr_ref_count_wp;
1180 }
1181 else
1182 {
1183 num_regs = aarch64_num_bp_regs;
1184 dr_addr_p = state->dr_addr_bp;
1185 dr_ctrl_p = state->dr_ctrl_bp;
1186 dr_ref_count = state->dr_ref_count_bp;
1187 }
1188
1189 ctrl = aarch64_point_encode_ctrl_reg (type, len);
1190
1191 /* Find an existing or free register in our cache. */
1192 idx = -1;
1193 for (i = 0; i < num_regs; ++i)
1194 {
1195 if ((dr_ctrl_p[i] & 1) == 0)
1196 {
1197 gdb_assert (dr_ref_count[i] == 0);
1198 idx = i;
1199 /* no break; continue hunting for an existing one. */
1200 }
1201 else if (dr_addr_p[i] == addr && dr_ctrl_p[i] == ctrl)
1202 {
1203 gdb_assert (dr_ref_count[i] != 0);
1204 idx = i;
1205 break;
1206 }
1207 }
1208
1209 /* No space. */
1210 if (idx == -1)
1211 return -1;
1212
1213 /* Update our cache. */
1214 if ((dr_ctrl_p[idx] & 1) == 0)
1215 {
1216 /* new entry */
1217 dr_addr_p[idx] = addr;
1218 dr_ctrl_p[idx] = ctrl;
1219 dr_ref_count[idx] = 1;
1220 /* Notify the change. */
1221 aarch64_notify_debug_reg_change (state, is_watchpoint, idx);
1222 }
1223 else
1224 {
1225 /* existing entry */
1226 dr_ref_count[idx]++;
1227 }
1228
1229 return 0;
1230 }
1231
1232 /* Record the removal of one breakpoint/watchpoint, as represented by
1233 ADDR and CTRL, in the cached debug register state area *STATE. */
1234
1235 static int
1236 aarch64_dr_state_remove_one_point (struct aarch64_debug_reg_state *state,
1237 int type, CORE_ADDR addr, int len)
1238 {
1239 int i, num_regs, is_watchpoint;
1240 unsigned int ctrl, *dr_ctrl_p, *dr_ref_count;
1241 CORE_ADDR *dr_addr_p;
1242
1243 /* Set up state pointers. */
1244 is_watchpoint = (type != hw_execute);
1245 gdb_assert (aarch64_point_is_aligned (is_watchpoint, addr, len));
1246 if (is_watchpoint)
1247 {
1248 num_regs = aarch64_num_wp_regs;
1249 dr_addr_p = state->dr_addr_wp;
1250 dr_ctrl_p = state->dr_ctrl_wp;
1251 dr_ref_count = state->dr_ref_count_wp;
1252 }
1253 else
1254 {
1255 num_regs = aarch64_num_bp_regs;
1256 dr_addr_p = state->dr_addr_bp;
1257 dr_ctrl_p = state->dr_ctrl_bp;
1258 dr_ref_count = state->dr_ref_count_bp;
1259 }
1260
1261 ctrl = aarch64_point_encode_ctrl_reg (type, len);
1262
1263 /* Find the entry that matches the ADDR and CTRL. */
1264 for (i = 0; i < num_regs; ++i)
1265 if (dr_addr_p[i] == addr && dr_ctrl_p[i] == ctrl)
1266 {
1267 gdb_assert (dr_ref_count[i] != 0);
1268 break;
1269 }
1270
1271 /* Not found. */
1272 if (i == num_regs)
1273 return -1;
1274
1275 /* Clear our cache. */
1276 if (--dr_ref_count[i] == 0)
1277 {
1278 /* Clear the enable bit. */
1279 ctrl &= ~1;
1280 dr_addr_p[i] = 0;
1281 dr_ctrl_p[i] = ctrl;
1282 /* Notify the change. */
1283 aarch64_notify_debug_reg_change (state, is_watchpoint, i);
1284 }
1285
1286 return 0;
1287 }
1288
1289 /* Implement insertion and removal of a single breakpoint. */
1290
1291 static int
1292 aarch64_handle_breakpoint (int type, CORE_ADDR addr, int len, int is_insert)
1293 {
1294 struct aarch64_debug_reg_state *state;
1295
1296 /* The hardware breakpoint on AArch64 should always be 4-byte
1297 aligned. */
1298 if (!aarch64_point_is_aligned (0 /* is_watchpoint */ , addr, len))
1299 return -1;
1300
1301 state = aarch64_get_debug_reg_state (ptid_get_pid (inferior_ptid));
1302
1303 if (is_insert)
1304 return aarch64_dr_state_insert_one_point (state, type, addr, len);
1305 else
1306 return aarch64_dr_state_remove_one_point (state, type, addr, len);
1307 }
1308
1309 /* Insert a hardware-assisted breakpoint at BP_TGT->reqstd_address.
1310 Return 0 on success, -1 on failure. */
1311
1312 static int
1313 aarch64_linux_insert_hw_breakpoint (struct target_ops *self,
1314 struct gdbarch *gdbarch,
1315 struct bp_target_info *bp_tgt)
1316 {
1317 int ret;
1318 CORE_ADDR addr = bp_tgt->placed_address = bp_tgt->reqstd_address;
1319 const int len = 4;
1320 const int type = hw_execute;
1321
1322 if (show_debug_regs)
1323 fprintf_unfiltered
1324 (gdb_stdlog,
1325 "insert_hw_breakpoint on entry (addr=0x%08lx, len=%d))\n",
1326 (unsigned long) addr, len);
1327
1328 ret = aarch64_handle_breakpoint (type, addr, len, 1 /* is_insert */);
1329
1330 if (show_debug_regs)
1331 {
1332 struct aarch64_debug_reg_state *state
1333 = aarch64_get_debug_reg_state (ptid_get_pid (inferior_ptid));
1334
1335 aarch64_show_debug_reg_state (state,
1336 "insert_hw_breakpoint", addr, len, type);
1337 }
1338
1339 return ret;
1340 }
1341
1342 /* Remove a hardware-assisted breakpoint at BP_TGT->placed_address.
1343 Return 0 on success, -1 on failure. */
1344
1345 static int
1346 aarch64_linux_remove_hw_breakpoint (struct target_ops *self,
1347 struct gdbarch *gdbarch,
1348 struct bp_target_info *bp_tgt)
1349 {
1350 int ret;
1351 CORE_ADDR addr = bp_tgt->placed_address;
1352 const int len = 4;
1353 const int type = hw_execute;
1354
1355 if (show_debug_regs)
1356 fprintf_unfiltered
1357 (gdb_stdlog, "remove_hw_breakpoint on entry (addr=0x%08lx, len=%d))\n",
1358 (unsigned long) addr, len);
1359
1360 ret = aarch64_handle_breakpoint (type, addr, len, 0 /* is_insert */);
1361
1362 if (show_debug_regs)
1363 {
1364 struct aarch64_debug_reg_state *state
1365 = aarch64_get_debug_reg_state (ptid_get_pid (inferior_ptid));
1366
1367 aarch64_show_debug_reg_state (state,
1368 "remove_hw_watchpoint", addr, len, type);
1369 }
1370
1371 return ret;
1372 }
1373
1374 /* This is essentially the same as aarch64_handle_breakpoint, apart
1375 from that it is an aligned watchpoint to be handled. */
1376
1377 static int
1378 aarch64_handle_aligned_watchpoint (int type, CORE_ADDR addr, int len,
1379 int is_insert)
1380 {
1381 struct aarch64_debug_reg_state *state
1382 = aarch64_get_debug_reg_state (ptid_get_pid (inferior_ptid));
1383
1384 if (is_insert)
1385 return aarch64_dr_state_insert_one_point (state, type, addr, len);
1386 else
1387 return aarch64_dr_state_remove_one_point (state, type, addr, len);
1388 }
1389
1390 /* Insert/remove unaligned watchpoint by calling
1391 aarch64_align_watchpoint repeatedly until the whole watched region,
1392 as represented by ADDR and LEN, has been properly aligned and ready
1393 to be written to one or more hardware watchpoint registers.
1394 IS_INSERT indicates whether this is an insertion or a deletion.
1395 Return 0 if succeed. */
1396
1397 static int
1398 aarch64_handle_unaligned_watchpoint (int type, CORE_ADDR addr, int len,
1399 int is_insert)
1400 {
1401 struct aarch64_debug_reg_state *state
1402 = aarch64_get_debug_reg_state (ptid_get_pid (inferior_ptid));
1403
1404 while (len > 0)
1405 {
1406 CORE_ADDR aligned_addr;
1407 int aligned_len, ret;
1408
1409 aarch64_align_watchpoint (addr, len, &aligned_addr, &aligned_len,
1410 &addr, &len);
1411
1412 if (is_insert)
1413 ret = aarch64_dr_state_insert_one_point (state, type, aligned_addr,
1414 aligned_len);
1415 else
1416 ret = aarch64_dr_state_remove_one_point (state, type, aligned_addr,
1417 aligned_len);
1418
1419 if (show_debug_regs)
1420 fprintf_unfiltered (gdb_stdlog,
1421 "handle_unaligned_watchpoint: is_insert: %d\n"
1422 " aligned_addr: 0x%08lx, aligned_len: %d\n"
1423 " next_addr: 0x%08lx, next_len: %d\n",
1424 is_insert, aligned_addr, aligned_len, addr, len);
1425
1426 if (ret != 0)
1427 return ret;
1428 }
1429
1430 return 0;
1431 }
1432
1433 /* Implements insertion and removal of a single watchpoint. */
1434
1435 static int
1436 aarch64_handle_watchpoint (int type, CORE_ADDR addr, int len, int is_insert)
1437 {
1438 if (aarch64_point_is_aligned (1 /* is_watchpoint */ , addr, len))
1439 return aarch64_handle_aligned_watchpoint (type, addr, len, is_insert);
1440 else
1441 return aarch64_handle_unaligned_watchpoint (type, addr, len, is_insert);
1442 }
1443
1444 /* Implement the "to_insert_watchpoint" target_ops method.
1445
1446 Insert a watchpoint to watch a memory region which starts at
1447 address ADDR and whose length is LEN bytes. Watch memory accesses
1448 of the type TYPE. Return 0 on success, -1 on failure. */
1449
1450 static int
1451 aarch64_linux_insert_watchpoint (struct target_ops *self,
1452 CORE_ADDR addr, int len, int type,
1453 struct expression *cond)
1454 {
1455 int ret;
1456
1457 if (show_debug_regs)
1458 fprintf_unfiltered (gdb_stdlog,
1459 "insert_watchpoint on entry (addr=0x%08lx, len=%d)\n",
1460 (unsigned long) addr, len);
1461
1462 gdb_assert (type != hw_execute);
1463
1464 ret = aarch64_handle_watchpoint (type, addr, len, 1 /* is_insert */);
1465
1466 if (show_debug_regs)
1467 {
1468 struct aarch64_debug_reg_state *state
1469 = aarch64_get_debug_reg_state (ptid_get_pid (inferior_ptid));
1470
1471 aarch64_show_debug_reg_state (state,
1472 "insert_watchpoint", addr, len, type);
1473 }
1474
1475 return ret;
1476 }
1477
1478 /* Implement the "to_remove_watchpoint" target_ops method.
1479 Remove a watchpoint that watched the memory region which starts at
1480 address ADDR, whose length is LEN bytes, and for accesses of the
1481 type TYPE. Return 0 on success, -1 on failure. */
1482
1483 static int
1484 aarch64_linux_remove_watchpoint (struct target_ops *self,
1485 CORE_ADDR addr, int len, int type,
1486 struct expression *cond)
1487 {
1488 int ret;
1489
1490 if (show_debug_regs)
1491 fprintf_unfiltered (gdb_stdlog,
1492 "remove_watchpoint on entry (addr=0x%08lx, len=%d)\n",
1493 (unsigned long) addr, len);
1494
1495 gdb_assert (type != hw_execute);
1496
1497 ret = aarch64_handle_watchpoint (type, addr, len, 0 /* is_insert */);
1498
1499 if (show_debug_regs)
1500 {
1501 struct aarch64_debug_reg_state *state
1502 = aarch64_get_debug_reg_state (ptid_get_pid (inferior_ptid));
1503
1504 aarch64_show_debug_reg_state (state,
1505 "remove_watchpoint", addr, len, type);
1506 }
1507
1508 return ret;
1509 }
1510
1511 /* Implement the "to_region_ok_for_hw_watchpoint" target_ops method. */
1512
1513 static int
1514 aarch64_linux_region_ok_for_hw_watchpoint (struct target_ops *self,
1515 CORE_ADDR addr, int len)
1516 {
1517 CORE_ADDR aligned_addr;
1518
1519 /* Can not set watchpoints for zero or negative lengths. */
1520 if (len <= 0)
1521 return 0;
1522
1523 /* Must have hardware watchpoint debug register(s). */
1524 if (aarch64_num_wp_regs == 0)
1525 return 0;
1526
1527 /* We support unaligned watchpoint address and arbitrary length,
1528 as long as the size of the whole watched area after alignment
1529 doesn't exceed size of the total area that all watchpoint debug
1530 registers can watch cooperatively.
1531
1532 This is a very relaxed rule, but unfortunately there are
1533 limitations, e.g. false-positive hits, due to limited support of
1534 hardware debug registers in the kernel. See comment above
1535 aarch64_align_watchpoint for more information. */
1536
1537 aligned_addr = addr & ~(AARCH64_HWP_MAX_LEN_PER_REG - 1);
1538 if (aligned_addr + aarch64_num_wp_regs * AARCH64_HWP_MAX_LEN_PER_REG
1539 < addr + len)
1540 return 0;
1541
1542 /* All tests passed so we are likely to be able to set the watchpoint.
1543 The reason that it is 'likely' rather than 'must' is because
1544 we don't check the current usage of the watchpoint registers, and
1545 there may not be enough registers available for this watchpoint.
1546 Ideally we should check the cached debug register state, however
1547 the checking is costly. */
1548 return 1;
1549 }
1550
1551 /* Implement the "to_stopped_data_address" target_ops method. */
1552
1553 static int
1554 aarch64_linux_stopped_data_address (struct target_ops *target,
1555 CORE_ADDR *addr_p)
1556 {
1557 siginfo_t siginfo;
1558 int i, tid;
1559 struct aarch64_debug_reg_state *state;
1560
1561 if (!linux_nat_get_siginfo (inferior_ptid, &siginfo))
1562 return 0;
1563
1564 /* This must be a hardware breakpoint. */
1565 if (siginfo.si_signo != SIGTRAP
1566 || (siginfo.si_code & 0xffff) != TRAP_HWBKPT)
1567 return 0;
1568
1569 /* Check if the address matches any watched address. */
1570 state = aarch64_get_debug_reg_state (ptid_get_pid (inferior_ptid));
1571 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
1572 {
1573 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
1574 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
1575 const CORE_ADDR addr_watch = state->dr_addr_wp[i];
1576
1577 if (state->dr_ref_count_wp[i]
1578 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
1579 && addr_trap >= addr_watch
1580 && addr_trap < addr_watch + len)
1581 {
1582 *addr_p = addr_trap;
1583 return 1;
1584 }
1585 }
1586
1587 return 0;
1588 }
1589
1590 /* Implement the "to_stopped_by_watchpoint" target_ops method. */
1591
1592 static int
1593 aarch64_linux_stopped_by_watchpoint (struct target_ops *ops)
1594 {
1595 CORE_ADDR addr;
1596
1597 return aarch64_linux_stopped_data_address (ops, &addr);
1598 }
1599
1600 /* Implement the "to_watchpoint_addr_within_range" target_ops method. */
1601
1602 static int
1603 aarch64_linux_watchpoint_addr_within_range (struct target_ops *target,
1604 CORE_ADDR addr,
1605 CORE_ADDR start, int length)
1606 {
1607 return start <= addr && start + length - 1 >= addr;
1608 }
1609
1610 /* Define AArch64 maintenance commands. */
1611
1612 static void
1613 add_show_debug_regs_command (void)
1614 {
1615 /* A maintenance command to enable printing the internal DRi mirror
1616 variables. */
1617 add_setshow_boolean_cmd ("show-debug-regs", class_maintenance,
1618 &show_debug_regs, _("\
1619 Set whether to show variables that mirror the AArch64 debug registers."), _("\
1620 Show whether to show variables that mirror the AArch64 debug registers."), _("\
1621 Use \"on\" to enable, \"off\" to disable.\n\
1622 If enabled, the debug registers values are shown when GDB inserts\n\
1623 or removes a hardware breakpoint or watchpoint, and when the inferior\n\
1624 triggers a breakpoint or watchpoint."),
1625 NULL,
1626 NULL,
1627 &maintenance_set_cmdlist,
1628 &maintenance_show_cmdlist);
1629 }
1630
1631 /* -Wmissing-prototypes. */
1632 void _initialize_aarch64_linux_nat (void);
1633
1634 void
1635 _initialize_aarch64_linux_nat (void)
1636 {
1637 struct target_ops *t;
1638
1639 /* Fill in the generic GNU/Linux methods. */
1640 t = linux_target ();
1641
1642 add_show_debug_regs_command ();
1643
1644 /* Add our register access methods. */
1645 t->to_fetch_registers = aarch64_linux_fetch_inferior_registers;
1646 t->to_store_registers = aarch64_linux_store_inferior_registers;
1647
1648 t->to_read_description = aarch64_linux_read_description;
1649
1650 t->to_can_use_hw_breakpoint = aarch64_linux_can_use_hw_breakpoint;
1651 t->to_insert_hw_breakpoint = aarch64_linux_insert_hw_breakpoint;
1652 t->to_remove_hw_breakpoint = aarch64_linux_remove_hw_breakpoint;
1653 t->to_region_ok_for_hw_watchpoint =
1654 aarch64_linux_region_ok_for_hw_watchpoint;
1655 t->to_insert_watchpoint = aarch64_linux_insert_watchpoint;
1656 t->to_remove_watchpoint = aarch64_linux_remove_watchpoint;
1657 t->to_stopped_by_watchpoint = aarch64_linux_stopped_by_watchpoint;
1658 t->to_stopped_data_address = aarch64_linux_stopped_data_address;
1659 t->to_watchpoint_addr_within_range =
1660 aarch64_linux_watchpoint_addr_within_range;
1661
1662 /* Override the GNU/Linux inferior startup hook. */
1663 super_post_startup_inferior = t->to_post_startup_inferior;
1664 t->to_post_startup_inferior = aarch64_linux_child_post_startup_inferior;
1665
1666 /* Register the target. */
1667 linux_nat_add_target (t);
1668 linux_nat_set_new_thread (t, aarch64_linux_new_thread);
1669 linux_nat_set_new_fork (t, aarch64_linux_new_fork);
1670 linux_nat_set_forget_process (t, aarch64_forget_process);
1671 linux_nat_set_prepare_to_resume (t, aarch64_linux_prepare_to_resume);
1672 }