Automatic date update in version.in
[binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2023 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/x86-xstate.h"
29 #include "nat/gdb_ptrace.h"
30
31 #ifdef __x86_64__
32 #include "nat/amd64-linux-siginfo.h"
33 #endif
34
35 #include "gdb_proc_service.h"
36 /* Don't include elf/common.h if linux/elf.h got included by
37 gdb_proc_service.h. */
38 #ifndef ELFMAG0
39 #include "elf/common.h"
40 #endif
41
42 #include "gdbsupport/agent.h"
43 #include "tdesc.h"
44 #include "tracepoint.h"
45 #include "ax.h"
46 #include "nat/linux-nat.h"
47 #include "nat/x86-linux.h"
48 #include "nat/x86-linux-dregs.h"
49 #include "linux-x86-tdesc.h"
50
51 #ifdef __x86_64__
52 static target_desc_up tdesc_amd64_linux_no_xml;
53 #endif
54 static target_desc_up tdesc_i386_linux_no_xml;
55
56
57 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
58 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
59
60 /* Backward compatibility for gdb without XML support. */
61
62 static const char xmltarget_i386_linux_no_xml[] = "@<target>\
63 <architecture>i386</architecture>\
64 <osabi>GNU/Linux</osabi>\
65 </target>";
66
67 #ifdef __x86_64__
68 static const char xmltarget_amd64_linux_no_xml[] = "@<target>\
69 <architecture>i386:x86-64</architecture>\
70 <osabi>GNU/Linux</osabi>\
71 </target>";
72 #endif
73
74 #include <sys/reg.h>
75 #include <sys/procfs.h>
76 #include <sys/uio.h>
77
78 #ifndef PTRACE_GET_THREAD_AREA
79 #define PTRACE_GET_THREAD_AREA 25
80 #endif
81
82 /* This definition comes from prctl.h, but some kernels may not have it. */
83 #ifndef PTRACE_ARCH_PRCTL
84 #define PTRACE_ARCH_PRCTL 30
85 #endif
86
87 /* The following definitions come from prctl.h, but may be absent
88 for certain configurations. */
89 #ifndef ARCH_GET_FS
90 #define ARCH_SET_GS 0x1001
91 #define ARCH_SET_FS 0x1002
92 #define ARCH_GET_FS 0x1003
93 #define ARCH_GET_GS 0x1004
94 #endif
95
96 /* Linux target op definitions for the x86 architecture.
97 This is initialized assuming an amd64 target.
98 'low_arch_setup' will correct it for i386 or amd64 targets. */
99
100 class x86_target : public linux_process_target
101 {
102 public:
103
104 const regs_info *get_regs_info () override;
105
106 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
107
108 bool supports_z_point_type (char z_type) override;
109
110 void process_qsupported (gdb::array_view<const char * const> features) override;
111
112 bool supports_tracepoints () override;
113
114 bool supports_fast_tracepoints () override;
115
116 int install_fast_tracepoint_jump_pad
117 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
118 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
119 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
120 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
121 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
122 char *err) override;
123
124 int get_min_fast_tracepoint_insn_len () override;
125
126 struct emit_ops *emit_ops () override;
127
128 int get_ipa_tdesc_idx () override;
129
130 protected:
131
132 void low_arch_setup () override;
133
134 bool low_cannot_fetch_register (int regno) override;
135
136 bool low_cannot_store_register (int regno) override;
137
138 bool low_supports_breakpoints () override;
139
140 CORE_ADDR low_get_pc (regcache *regcache) override;
141
142 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
143
144 int low_decr_pc_after_break () override;
145
146 bool low_breakpoint_at (CORE_ADDR pc) override;
147
148 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
149 int size, raw_breakpoint *bp) override;
150
151 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
152 int size, raw_breakpoint *bp) override;
153
154 bool low_stopped_by_watchpoint () override;
155
156 CORE_ADDR low_stopped_data_address () override;
157
158 /* collect_ptrace_register/supply_ptrace_register are not needed in the
159 native i386 case (no registers smaller than an xfer unit), and are not
160 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
161
162 /* Need to fix up i386 siginfo if host is amd64. */
163 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
164 int direction) override;
165
166 arch_process_info *low_new_process () override;
167
168 void low_delete_process (arch_process_info *info) override;
169
170 void low_new_thread (lwp_info *) override;
171
172 void low_delete_thread (arch_lwp_info *) override;
173
174 void low_new_fork (process_info *parent, process_info *child) override;
175
176 void low_prepare_to_resume (lwp_info *lwp) override;
177
178 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
179
180 bool low_supports_range_stepping () override;
181
182 bool low_supports_catch_syscall () override;
183
184 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
185
186 private:
187
188 /* Update all the target description of all processes; a new GDB
189 connected, and it may or not support xml target descriptions. */
190 void update_xmltarget ();
191 };
192
193 /* The singleton target ops object. */
194
195 static x86_target the_x86_target;
196
197 /* Per-process arch-specific data we want to keep. */
198
199 struct arch_process_info
200 {
201 struct x86_debug_reg_state debug_reg_state;
202 };
203
204 #ifdef __x86_64__
205
206 /* Mapping between the general-purpose registers in `struct user'
207 format and GDB's register array layout.
208 Note that the transfer layout uses 64-bit regs. */
209 static /*const*/ int i386_regmap[] =
210 {
211 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
212 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
213 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
214 DS * 8, ES * 8, FS * 8, GS * 8
215 };
216
217 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
218
219 /* So code below doesn't have to care, i386 or amd64. */
220 #define ORIG_EAX ORIG_RAX
221 #define REGSIZE 8
222
223 static const int x86_64_regmap[] =
224 {
225 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
226 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
227 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
228 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
229 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
230 DS * 8, ES * 8, FS * 8, GS * 8,
231 -1, -1, -1, -1, -1, -1, -1, -1,
232 -1, -1, -1, -1, -1, -1, -1, -1,
233 -1, -1, -1, -1, -1, -1, -1, -1,
234 -1,
235 -1, -1, -1, -1, -1, -1, -1, -1,
236 ORIG_RAX * 8,
237 21 * 8, 22 * 8,
238 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
239 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
240 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
241 -1, -1, -1, -1, -1, -1, -1, -1,
242 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
243 -1, -1, -1, -1, -1, -1, -1, -1,
244 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
245 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
246 -1, -1, -1, -1, -1, -1, -1, -1,
247 -1, -1, -1, -1, -1, -1, -1, -1,
248 -1, -1, -1, -1, -1, -1, -1, -1,
249 -1 /* pkru */
250 };
251
252 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
253 #define X86_64_USER_REGS (GS + 1)
254
255 #else /* ! __x86_64__ */
256
257 /* Mapping between the general-purpose registers in `struct user'
258 format and GDB's register array layout. */
259 static /*const*/ int i386_regmap[] =
260 {
261 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
262 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
263 EIP * 4, EFL * 4, CS * 4, SS * 4,
264 DS * 4, ES * 4, FS * 4, GS * 4
265 };
266
267 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
268
269 #define REGSIZE 4
270
271 #endif
272
273 #ifdef __x86_64__
274
275 /* Returns true if THREAD belongs to a x86-64 process, per the tdesc. */
276
277 static int
278 is_64bit_tdesc (thread_info *thread)
279 {
280 struct regcache *regcache = get_thread_regcache (thread, 0);
281
282 return register_size (regcache->tdesc, 0) == 8;
283 }
284
285 #endif
286
287 \f
288 /* Called by libthread_db. */
289
290 ps_err_e
291 ps_get_thread_area (struct ps_prochandle *ph,
292 lwpid_t lwpid, int idx, void **base)
293 {
294 #ifdef __x86_64__
295 lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
296 gdb_assert (lwp != nullptr);
297 int use_64bit = is_64bit_tdesc (get_lwp_thread (lwp));
298
299 if (use_64bit)
300 {
301 switch (idx)
302 {
303 case FS:
304 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
305 return PS_OK;
306 break;
307 case GS:
308 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
309 return PS_OK;
310 break;
311 default:
312 return PS_BADADDR;
313 }
314 return PS_ERR;
315 }
316 #endif
317
318 {
319 unsigned int desc[4];
320
321 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
322 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
323 return PS_ERR;
324
325 /* Ensure we properly extend the value to 64-bits for x86_64. */
326 *base = (void *) (uintptr_t) desc[1];
327 return PS_OK;
328 }
329 }
330
331 /* Get the thread area address. This is used to recognize which
332 thread is which when tracing with the in-process agent library. We
333 don't read anything from the address, and treat it as opaque; it's
334 the address itself that we assume is unique per-thread. */
335
336 int
337 x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
338 {
339 lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
340 gdb_assert (lwp != nullptr);
341 #ifdef __x86_64__
342 int use_64bit = is_64bit_tdesc (get_lwp_thread (lwp));
343
344 if (use_64bit)
345 {
346 void *base;
347 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
348 {
349 *addr = (CORE_ADDR) (uintptr_t) base;
350 return 0;
351 }
352
353 return -1;
354 }
355 #endif
356
357 {
358 struct thread_info *thr = get_lwp_thread (lwp);
359 struct regcache *regcache = get_thread_regcache (thr, 1);
360 unsigned int desc[4];
361 ULONGEST gs = 0;
362 const int reg_thread_area = 3; /* bits to scale down register value. */
363 int idx;
364
365 collect_register_by_name (regcache, "gs", &gs);
366
367 idx = gs >> reg_thread_area;
368
369 if (ptrace (PTRACE_GET_THREAD_AREA,
370 lwpid_of (thr),
371 (void *) (long) idx, (unsigned long) &desc) < 0)
372 return -1;
373
374 *addr = desc[1];
375 return 0;
376 }
377 }
378
379
380 \f
381 bool
382 x86_target::low_cannot_store_register (int regno)
383 {
384 #ifdef __x86_64__
385 if (is_64bit_tdesc (current_thread))
386 return false;
387 #endif
388
389 return regno >= I386_NUM_REGS;
390 }
391
392 bool
393 x86_target::low_cannot_fetch_register (int regno)
394 {
395 #ifdef __x86_64__
396 if (is_64bit_tdesc (current_thread))
397 return false;
398 #endif
399
400 return regno >= I386_NUM_REGS;
401 }
402
403 static void
404 collect_register_i386 (struct regcache *regcache, int regno, void *buf)
405 {
406 collect_register (regcache, regno, buf);
407
408 #ifdef __x86_64__
409 /* In case of x86_64 -m32, collect_register only writes 4 bytes, but the
410 space reserved in buf for the register is 8 bytes. Make sure the entire
411 reserved space is initialized. */
412
413 gdb_assert (register_size (regcache->tdesc, regno) == 4);
414
415 if (regno == RAX)
416 {
417 /* Sign extend EAX value to avoid potential syscall restart
418 problems.
419
420 See amd64_linux_collect_native_gregset() in
421 gdb/amd64-linux-nat.c for a detailed explanation. */
422 *(int64_t *) buf = *(int32_t *) buf;
423 }
424 else
425 {
426 /* Zero-extend. */
427 *(uint64_t *) buf = *(uint32_t *) buf;
428 }
429 #endif
430 }
431
432 static void
433 x86_fill_gregset (struct regcache *regcache, void *buf)
434 {
435 int i;
436
437 #ifdef __x86_64__
438 if (register_size (regcache->tdesc, 0) == 8)
439 {
440 for (i = 0; i < X86_64_NUM_REGS; i++)
441 if (x86_64_regmap[i] != -1)
442 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
443
444 return;
445 }
446 #endif
447
448 for (i = 0; i < I386_NUM_REGS; i++)
449 collect_register_i386 (regcache, i, ((char *) buf) + i386_regmap[i]);
450
451 /* Handle ORIG_EAX, which is not in i386_regmap. */
452 collect_register_i386 (regcache, find_regno (regcache->tdesc, "orig_eax"),
453 ((char *) buf) + ORIG_EAX * REGSIZE);
454 }
455
456 static void
457 x86_store_gregset (struct regcache *regcache, const void *buf)
458 {
459 int i;
460
461 #ifdef __x86_64__
462 if (register_size (regcache->tdesc, 0) == 8)
463 {
464 for (i = 0; i < X86_64_NUM_REGS; i++)
465 if (x86_64_regmap[i] != -1)
466 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
467
468 return;
469 }
470 #endif
471
472 for (i = 0; i < I386_NUM_REGS; i++)
473 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
474
475 supply_register_by_name (regcache, "orig_eax",
476 ((char *) buf) + ORIG_EAX * REGSIZE);
477 }
478
479 static void
480 x86_fill_fpregset (struct regcache *regcache, void *buf)
481 {
482 #ifdef __x86_64__
483 i387_cache_to_fxsave (regcache, buf);
484 #else
485 i387_cache_to_fsave (regcache, buf);
486 #endif
487 }
488
489 static void
490 x86_store_fpregset (struct regcache *regcache, const void *buf)
491 {
492 #ifdef __x86_64__
493 i387_fxsave_to_cache (regcache, buf);
494 #else
495 i387_fsave_to_cache (regcache, buf);
496 #endif
497 }
498
499 #ifndef __x86_64__
500
501 static void
502 x86_fill_fpxregset (struct regcache *regcache, void *buf)
503 {
504 i387_cache_to_fxsave (regcache, buf);
505 }
506
507 static void
508 x86_store_fpxregset (struct regcache *regcache, const void *buf)
509 {
510 i387_fxsave_to_cache (regcache, buf);
511 }
512
513 #endif
514
515 static void
516 x86_fill_xstateregset (struct regcache *regcache, void *buf)
517 {
518 i387_cache_to_xsave (regcache, buf);
519 }
520
521 static void
522 x86_store_xstateregset (struct regcache *regcache, const void *buf)
523 {
524 i387_xsave_to_cache (regcache, buf);
525 }
526
527 /* ??? The non-biarch i386 case stores all the i387 regs twice.
528 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
529 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
530 doesn't work. IWBN to avoid the duplication in the case where it
531 does work. Maybe the arch_setup routine could check whether it works
532 and update the supported regsets accordingly. */
533
534 static struct regset_info x86_regsets[] =
535 {
536 #ifdef HAVE_PTRACE_GETREGS
537 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
538 GENERAL_REGS,
539 x86_fill_gregset, x86_store_gregset },
540 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
541 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
542 # ifndef __x86_64__
543 # ifdef HAVE_PTRACE_GETFPXREGS
544 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
545 EXTENDED_REGS,
546 x86_fill_fpxregset, x86_store_fpxregset },
547 # endif
548 # endif
549 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
550 FP_REGS,
551 x86_fill_fpregset, x86_store_fpregset },
552 #endif /* HAVE_PTRACE_GETREGS */
553 NULL_REGSET
554 };
555
556 bool
557 x86_target::low_supports_breakpoints ()
558 {
559 return true;
560 }
561
562 CORE_ADDR
563 x86_target::low_get_pc (regcache *regcache)
564 {
565 int use_64bit = register_size (regcache->tdesc, 0) == 8;
566
567 if (use_64bit)
568 {
569 uint64_t pc;
570
571 collect_register_by_name (regcache, "rip", &pc);
572 return (CORE_ADDR) pc;
573 }
574 else
575 {
576 uint32_t pc;
577
578 collect_register_by_name (regcache, "eip", &pc);
579 return (CORE_ADDR) pc;
580 }
581 }
582
583 void
584 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
585 {
586 int use_64bit = register_size (regcache->tdesc, 0) == 8;
587
588 if (use_64bit)
589 {
590 uint64_t newpc = pc;
591
592 supply_register_by_name (regcache, "rip", &newpc);
593 }
594 else
595 {
596 uint32_t newpc = pc;
597
598 supply_register_by_name (regcache, "eip", &newpc);
599 }
600 }
601
602 int
603 x86_target::low_decr_pc_after_break ()
604 {
605 return 1;
606 }
607
608 \f
609 static const gdb_byte x86_breakpoint[] = { 0xCC };
610 #define x86_breakpoint_len 1
611
612 bool
613 x86_target::low_breakpoint_at (CORE_ADDR pc)
614 {
615 unsigned char c;
616
617 read_memory (pc, &c, 1);
618 if (c == 0xCC)
619 return true;
620
621 return false;
622 }
623 \f
624 /* Low-level function vector. */
625 struct x86_dr_low_type x86_dr_low =
626 {
627 x86_linux_dr_set_control,
628 x86_linux_dr_set_addr,
629 x86_linux_dr_get_addr,
630 x86_linux_dr_get_status,
631 x86_linux_dr_get_control,
632 sizeof (void *),
633 };
634 \f
635 /* Breakpoint/Watchpoint support. */
636
637 bool
638 x86_target::supports_z_point_type (char z_type)
639 {
640 switch (z_type)
641 {
642 case Z_PACKET_SW_BP:
643 case Z_PACKET_HW_BP:
644 case Z_PACKET_WRITE_WP:
645 case Z_PACKET_ACCESS_WP:
646 return true;
647 default:
648 return false;
649 }
650 }
651
652 int
653 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
654 int size, raw_breakpoint *bp)
655 {
656 struct process_info *proc = current_process ();
657
658 switch (type)
659 {
660 case raw_bkpt_type_hw:
661 case raw_bkpt_type_write_wp:
662 case raw_bkpt_type_access_wp:
663 {
664 enum target_hw_bp_type hw_type
665 = raw_bkpt_type_to_target_hw_bp_type (type);
666 struct x86_debug_reg_state *state
667 = &proc->priv->arch_private->debug_reg_state;
668
669 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
670 }
671
672 default:
673 /* Unsupported. */
674 return 1;
675 }
676 }
677
678 int
679 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
680 int size, raw_breakpoint *bp)
681 {
682 struct process_info *proc = current_process ();
683
684 switch (type)
685 {
686 case raw_bkpt_type_hw:
687 case raw_bkpt_type_write_wp:
688 case raw_bkpt_type_access_wp:
689 {
690 enum target_hw_bp_type hw_type
691 = raw_bkpt_type_to_target_hw_bp_type (type);
692 struct x86_debug_reg_state *state
693 = &proc->priv->arch_private->debug_reg_state;
694
695 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
696 }
697 default:
698 /* Unsupported. */
699 return 1;
700 }
701 }
702
703 bool
704 x86_target::low_stopped_by_watchpoint ()
705 {
706 struct process_info *proc = current_process ();
707 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
708 }
709
710 CORE_ADDR
711 x86_target::low_stopped_data_address ()
712 {
713 struct process_info *proc = current_process ();
714 CORE_ADDR addr;
715 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
716 &addr))
717 return addr;
718 return 0;
719 }
720 \f
721 /* Called when a new process is created. */
722
723 arch_process_info *
724 x86_target::low_new_process ()
725 {
726 struct arch_process_info *info = XCNEW (struct arch_process_info);
727
728 x86_low_init_dregs (&info->debug_reg_state);
729
730 return info;
731 }
732
733 /* Called when a process is being deleted. */
734
735 void
736 x86_target::low_delete_process (arch_process_info *info)
737 {
738 xfree (info);
739 }
740
741 void
742 x86_target::low_new_thread (lwp_info *lwp)
743 {
744 /* This comes from nat/. */
745 x86_linux_new_thread (lwp);
746 }
747
748 void
749 x86_target::low_delete_thread (arch_lwp_info *alwp)
750 {
751 /* This comes from nat/. */
752 x86_linux_delete_thread (alwp);
753 }
754
755 /* Target routine for new_fork. */
756
757 void
758 x86_target::low_new_fork (process_info *parent, process_info *child)
759 {
760 /* These are allocated by linux_add_process. */
761 gdb_assert (parent->priv != NULL
762 && parent->priv->arch_private != NULL);
763 gdb_assert (child->priv != NULL
764 && child->priv->arch_private != NULL);
765
766 /* Linux kernel before 2.6.33 commit
767 72f674d203cd230426437cdcf7dd6f681dad8b0d
768 will inherit hardware debug registers from parent
769 on fork/vfork/clone. Newer Linux kernels create such tasks with
770 zeroed debug registers.
771
772 GDB core assumes the child inherits the watchpoints/hw
773 breakpoints of the parent, and will remove them all from the
774 forked off process. Copy the debug registers mirrors into the
775 new process so that all breakpoints and watchpoints can be
776 removed together. The debug registers mirror will become zeroed
777 in the end before detaching the forked off process, thus making
778 this compatible with older Linux kernels too. */
779
780 *child->priv->arch_private = *parent->priv->arch_private;
781 }
782
783 void
784 x86_target::low_prepare_to_resume (lwp_info *lwp)
785 {
786 /* This comes from nat/. */
787 x86_linux_prepare_to_resume (lwp);
788 }
789
790 /* See nat/x86-dregs.h. */
791
792 struct x86_debug_reg_state *
793 x86_debug_reg_state (pid_t pid)
794 {
795 struct process_info *proc = find_process_pid (pid);
796
797 return &proc->priv->arch_private->debug_reg_state;
798 }
799 \f
800 /* When GDBSERVER is built as a 64-bit application on linux, the
801 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
802 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
803 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
804 conversion in-place ourselves. */
805
806 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
807 layout of the inferiors' architecture. Returns true if any
808 conversion was done; false otherwise. If DIRECTION is 1, then copy
809 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
810 INF. */
811
812 bool
813 x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
814 {
815 #ifdef __x86_64__
816 unsigned int machine;
817 int tid = lwpid_of (current_thread);
818 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
819
820 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
821 if (!is_64bit_tdesc (current_thread))
822 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
823 FIXUP_32);
824 /* No fixup for native x32 GDB. */
825 else if (!is_elf64 && sizeof (void *) == 8)
826 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
827 FIXUP_X32);
828 #endif
829
830 return false;
831 }
832 \f
833 static int use_xml;
834
835 /* Format of XSAVE extended state is:
836 struct
837 {
838 fxsave_bytes[0..463]
839 sw_usable_bytes[464..511]
840 xstate_hdr_bytes[512..575]
841 avx_bytes[576..831]
842 future_state etc
843 };
844
845 Same memory layout will be used for the coredump NT_X86_XSTATE
846 representing the XSAVE extended state registers.
847
848 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
849 extended state mask, which is the same as the extended control register
850 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
851 together with the mask saved in the xstate_hdr_bytes to determine what
852 states the processor/OS supports and what state, used or initialized,
853 the process/thread is in. */
854 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
855
856 /* Does the current host support the GETFPXREGS request? The header
857 file may or may not define it, and even if it is defined, the
858 kernel will return EIO if it's running on a pre-SSE processor. */
859 int have_ptrace_getfpxregs =
860 #ifdef HAVE_PTRACE_GETFPXREGS
861 -1
862 #else
863 0
864 #endif
865 ;
866
867 /* Get Linux/x86 target description from running target. */
868
869 static const struct target_desc *
870 x86_linux_read_description (void)
871 {
872 unsigned int machine;
873 int is_elf64;
874 int xcr0_features;
875 int tid;
876 static uint64_t xcr0;
877 static int xsave_len;
878 struct regset_info *regset;
879
880 tid = lwpid_of (current_thread);
881
882 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
883
884 if (sizeof (void *) == 4)
885 {
886 if (is_elf64 > 0)
887 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
888 #ifndef __x86_64__
889 else if (machine == EM_X86_64)
890 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
891 #endif
892 }
893
894 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
895 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
896 {
897 elf_fpxregset_t fpxregs;
898
899 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
900 {
901 have_ptrace_getfpxregs = 0;
902 have_ptrace_getregset = 0;
903 return i386_linux_read_description (X86_XSTATE_X87);
904 }
905 else
906 have_ptrace_getfpxregs = 1;
907 }
908 #endif
909
910 if (!use_xml)
911 {
912 /* Don't use XML. */
913 #ifdef __x86_64__
914 if (machine == EM_X86_64)
915 return tdesc_amd64_linux_no_xml.get ();
916 else
917 #endif
918 return tdesc_i386_linux_no_xml.get ();
919 }
920
921 if (have_ptrace_getregset == -1)
922 {
923 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
924 struct iovec iov;
925
926 iov.iov_base = xstateregs;
927 iov.iov_len = sizeof (xstateregs);
928
929 /* Check if PTRACE_GETREGSET works. */
930 if (ptrace (PTRACE_GETREGSET, tid,
931 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
932 have_ptrace_getregset = 0;
933 else
934 {
935 have_ptrace_getregset = 1;
936
937 /* Get XCR0 from XSAVE extended state. */
938 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
939 / sizeof (uint64_t))];
940
941 xsave_len = x86_xsave_length ();
942
943 /* Use PTRACE_GETREGSET if it is available. */
944 for (regset = x86_regsets;
945 regset->fill_function != NULL; regset++)
946 if (regset->get_request == PTRACE_GETREGSET)
947 regset->size = xsave_len;
948 else if (regset->type != GENERAL_REGS)
949 regset->size = 0;
950 }
951 }
952
953 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
954 xcr0_features = (have_ptrace_getregset
955 && (xcr0 & X86_XSTATE_ALL_MASK));
956
957 if (xcr0_features)
958 i387_set_xsave_mask (xcr0, xsave_len);
959
960 if (machine == EM_X86_64)
961 {
962 #ifdef __x86_64__
963 const target_desc *tdesc = NULL;
964
965 if (xcr0_features)
966 {
967 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
968 !is_elf64);
969 }
970
971 if (tdesc == NULL)
972 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
973 return tdesc;
974 #endif
975 }
976 else
977 {
978 const target_desc *tdesc = NULL;
979
980 if (xcr0_features)
981 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
982
983 if (tdesc == NULL)
984 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
985
986 return tdesc;
987 }
988
989 gdb_assert_not_reached ("failed to return tdesc");
990 }
991
992 /* Update all the target description of all processes; a new GDB
993 connected, and it may or not support xml target descriptions. */
994
995 void
996 x86_target::update_xmltarget ()
997 {
998 scoped_restore_current_thread restore_thread;
999
1000 /* Before changing the register cache's internal layout, flush the
1001 contents of the current valid caches back to the threads, and
1002 release the current regcache objects. */
1003 regcache_release ();
1004
1005 for_each_process ([this] (process_info *proc) {
1006 int pid = proc->pid;
1007
1008 /* Look up any thread of this process. */
1009 switch_to_thread (find_any_thread_of_pid (pid));
1010
1011 low_arch_setup ();
1012 });
1013 }
1014
1015 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1016 PTRACE_GETREGSET. */
1017
1018 void
1019 x86_target::process_qsupported (gdb::array_view<const char * const> features)
1020 {
1021 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1022 with "i386" in qSupported query, it supports x86 XML target
1023 descriptions. */
1024 use_xml = 0;
1025
1026 for (const char *feature : features)
1027 {
1028 if (startswith (feature, "xmlRegisters="))
1029 {
1030 char *copy = xstrdup (feature + 13);
1031
1032 char *saveptr;
1033 for (char *p = strtok_r (copy, ",", &saveptr);
1034 p != NULL;
1035 p = strtok_r (NULL, ",", &saveptr))
1036 {
1037 if (strcmp (p, "i386") == 0)
1038 {
1039 use_xml = 1;
1040 break;
1041 }
1042 }
1043
1044 free (copy);
1045 }
1046 }
1047
1048 update_xmltarget ();
1049 }
1050
1051 /* Common for x86/x86-64. */
1052
1053 static struct regsets_info x86_regsets_info =
1054 {
1055 x86_regsets, /* regsets */
1056 0, /* num_regsets */
1057 NULL, /* disabled_regsets */
1058 };
1059
1060 #ifdef __x86_64__
1061 static struct regs_info amd64_linux_regs_info =
1062 {
1063 NULL, /* regset_bitmap */
1064 NULL, /* usrregs_info */
1065 &x86_regsets_info
1066 };
1067 #endif
1068 static struct usrregs_info i386_linux_usrregs_info =
1069 {
1070 I386_NUM_REGS,
1071 i386_regmap,
1072 };
1073
1074 static struct regs_info i386_linux_regs_info =
1075 {
1076 NULL, /* regset_bitmap */
1077 &i386_linux_usrregs_info,
1078 &x86_regsets_info
1079 };
1080
1081 const regs_info *
1082 x86_target::get_regs_info ()
1083 {
1084 #ifdef __x86_64__
1085 if (is_64bit_tdesc (current_thread))
1086 return &amd64_linux_regs_info;
1087 else
1088 #endif
1089 return &i386_linux_regs_info;
1090 }
1091
1092 /* Initialize the target description for the architecture of the
1093 inferior. */
1094
1095 void
1096 x86_target::low_arch_setup ()
1097 {
1098 current_process ()->tdesc = x86_linux_read_description ();
1099 }
1100
1101 bool
1102 x86_target::low_supports_catch_syscall ()
1103 {
1104 return true;
1105 }
1106
1107 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1108 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1109
1110 void
1111 x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
1112 {
1113 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1114
1115 if (use_64bit)
1116 {
1117 long l_sysno;
1118
1119 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1120 *sysno = (int) l_sysno;
1121 }
1122 else
1123 collect_register_by_name (regcache, "orig_eax", sysno);
1124 }
1125
1126 bool
1127 x86_target::supports_tracepoints ()
1128 {
1129 return true;
1130 }
1131
1132 static void
1133 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1134 {
1135 target_write_memory (*to, buf, len);
1136 *to += len;
1137 }
1138
1139 static int
1140 push_opcode (unsigned char *buf, const char *op)
1141 {
1142 unsigned char *buf_org = buf;
1143
1144 while (1)
1145 {
1146 char *endptr;
1147 unsigned long ul = strtoul (op, &endptr, 16);
1148
1149 if (endptr == op)
1150 break;
1151
1152 *buf++ = ul;
1153 op = endptr;
1154 }
1155
1156 return buf - buf_org;
1157 }
1158
1159 #ifdef __x86_64__
1160
1161 /* Build a jump pad that saves registers and calls a collection
1162 function. Writes a jump instruction to the jump pad to
1163 JJUMPAD_INSN. The caller is responsible to write it in at the
1164 tracepoint address. */
1165
1166 static int
1167 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1168 CORE_ADDR collector,
1169 CORE_ADDR lockaddr,
1170 ULONGEST orig_size,
1171 CORE_ADDR *jump_entry,
1172 CORE_ADDR *trampoline,
1173 ULONGEST *trampoline_size,
1174 unsigned char *jjump_pad_insn,
1175 ULONGEST *jjump_pad_insn_size,
1176 CORE_ADDR *adjusted_insn_addr,
1177 CORE_ADDR *adjusted_insn_addr_end,
1178 char *err)
1179 {
1180 unsigned char buf[40];
1181 int i, offset;
1182 int64_t loffset;
1183
1184 CORE_ADDR buildaddr = *jump_entry;
1185
1186 /* Build the jump pad. */
1187
1188 /* First, do tracepoint data collection. Save registers. */
1189 i = 0;
1190 /* Need to ensure stack pointer saved first. */
1191 buf[i++] = 0x54; /* push %rsp */
1192 buf[i++] = 0x55; /* push %rbp */
1193 buf[i++] = 0x57; /* push %rdi */
1194 buf[i++] = 0x56; /* push %rsi */
1195 buf[i++] = 0x52; /* push %rdx */
1196 buf[i++] = 0x51; /* push %rcx */
1197 buf[i++] = 0x53; /* push %rbx */
1198 buf[i++] = 0x50; /* push %rax */
1199 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1200 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1201 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1202 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1203 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1204 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1205 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1206 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1207 buf[i++] = 0x9c; /* pushfq */
1208 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1209 buf[i++] = 0xbf;
1210 memcpy (buf + i, &tpaddr, 8);
1211 i += 8;
1212 buf[i++] = 0x57; /* push %rdi */
1213 append_insns (&buildaddr, i, buf);
1214
1215 /* Stack space for the collecting_t object. */
1216 i = 0;
1217 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1218 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1219 memcpy (buf + i, &tpoint, 8);
1220 i += 8;
1221 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1222 i += push_opcode (&buf[i],
1223 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1224 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1225 append_insns (&buildaddr, i, buf);
1226
1227 /* spin-lock. */
1228 i = 0;
1229 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1230 memcpy (&buf[i], (void *) &lockaddr, 8);
1231 i += 8;
1232 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1233 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1234 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1235 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1236 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1237 append_insns (&buildaddr, i, buf);
1238
1239 /* Set up the gdb_collect call. */
1240 /* At this point, (stack pointer + 0x18) is the base of our saved
1241 register block. */
1242
1243 i = 0;
1244 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1245 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1246
1247 /* tpoint address may be 64-bit wide. */
1248 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1249 memcpy (buf + i, &tpoint, 8);
1250 i += 8;
1251 append_insns (&buildaddr, i, buf);
1252
1253 /* The collector function being in the shared library, may be
1254 >31-bits away off the jump pad. */
1255 i = 0;
1256 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1257 memcpy (buf + i, &collector, 8);
1258 i += 8;
1259 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1260 append_insns (&buildaddr, i, buf);
1261
1262 /* Clear the spin-lock. */
1263 i = 0;
1264 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1265 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1266 memcpy (buf + i, &lockaddr, 8);
1267 i += 8;
1268 append_insns (&buildaddr, i, buf);
1269
1270 /* Remove stack that had been used for the collect_t object. */
1271 i = 0;
1272 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1273 append_insns (&buildaddr, i, buf);
1274
1275 /* Restore register state. */
1276 i = 0;
1277 buf[i++] = 0x48; /* add $0x8,%rsp */
1278 buf[i++] = 0x83;
1279 buf[i++] = 0xc4;
1280 buf[i++] = 0x08;
1281 buf[i++] = 0x9d; /* popfq */
1282 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1283 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1284 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1285 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1286 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1287 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1288 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1289 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1290 buf[i++] = 0x58; /* pop %rax */
1291 buf[i++] = 0x5b; /* pop %rbx */
1292 buf[i++] = 0x59; /* pop %rcx */
1293 buf[i++] = 0x5a; /* pop %rdx */
1294 buf[i++] = 0x5e; /* pop %rsi */
1295 buf[i++] = 0x5f; /* pop %rdi */
1296 buf[i++] = 0x5d; /* pop %rbp */
1297 buf[i++] = 0x5c; /* pop %rsp */
1298 append_insns (&buildaddr, i, buf);
1299
1300 /* Now, adjust the original instruction to execute in the jump
1301 pad. */
1302 *adjusted_insn_addr = buildaddr;
1303 relocate_instruction (&buildaddr, tpaddr);
1304 *adjusted_insn_addr_end = buildaddr;
1305
1306 /* Finally, write a jump back to the program. */
1307
1308 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1309 if (loffset > INT_MAX || loffset < INT_MIN)
1310 {
1311 sprintf (err,
1312 "E.Jump back from jump pad too far from tracepoint "
1313 "(offset 0x%" PRIx64 " > int32).", loffset);
1314 return 1;
1315 }
1316
1317 offset = (int) loffset;
1318 memcpy (buf, jump_insn, sizeof (jump_insn));
1319 memcpy (buf + 1, &offset, 4);
1320 append_insns (&buildaddr, sizeof (jump_insn), buf);
1321
1322 /* The jump pad is now built. Wire in a jump to our jump pad. This
1323 is always done last (by our caller actually), so that we can
1324 install fast tracepoints with threads running. This relies on
1325 the agent's atomic write support. */
1326 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1327 if (loffset > INT_MAX || loffset < INT_MIN)
1328 {
1329 sprintf (err,
1330 "E.Jump pad too far from tracepoint "
1331 "(offset 0x%" PRIx64 " > int32).", loffset);
1332 return 1;
1333 }
1334
1335 offset = (int) loffset;
1336
1337 memcpy (buf, jump_insn, sizeof (jump_insn));
1338 memcpy (buf + 1, &offset, 4);
1339 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1340 *jjump_pad_insn_size = sizeof (jump_insn);
1341
1342 /* Return the end address of our pad. */
1343 *jump_entry = buildaddr;
1344
1345 return 0;
1346 }
1347
1348 #endif /* __x86_64__ */
1349
1350 /* Build a jump pad that saves registers and calls a collection
1351 function. Writes a jump instruction to the jump pad to
1352 JJUMPAD_INSN. The caller is responsible to write it in at the
1353 tracepoint address. */
1354
1355 static int
1356 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1357 CORE_ADDR collector,
1358 CORE_ADDR lockaddr,
1359 ULONGEST orig_size,
1360 CORE_ADDR *jump_entry,
1361 CORE_ADDR *trampoline,
1362 ULONGEST *trampoline_size,
1363 unsigned char *jjump_pad_insn,
1364 ULONGEST *jjump_pad_insn_size,
1365 CORE_ADDR *adjusted_insn_addr,
1366 CORE_ADDR *adjusted_insn_addr_end,
1367 char *err)
1368 {
1369 unsigned char buf[0x100];
1370 int i, offset;
1371 CORE_ADDR buildaddr = *jump_entry;
1372
1373 /* Build the jump pad. */
1374
1375 /* First, do tracepoint data collection. Save registers. */
1376 i = 0;
1377 buf[i++] = 0x60; /* pushad */
1378 buf[i++] = 0x68; /* push tpaddr aka $pc */
1379 *((int *)(buf + i)) = (int) tpaddr;
1380 i += 4;
1381 buf[i++] = 0x9c; /* pushf */
1382 buf[i++] = 0x1e; /* push %ds */
1383 buf[i++] = 0x06; /* push %es */
1384 buf[i++] = 0x0f; /* push %fs */
1385 buf[i++] = 0xa0;
1386 buf[i++] = 0x0f; /* push %gs */
1387 buf[i++] = 0xa8;
1388 buf[i++] = 0x16; /* push %ss */
1389 buf[i++] = 0x0e; /* push %cs */
1390 append_insns (&buildaddr, i, buf);
1391
1392 /* Stack space for the collecting_t object. */
1393 i = 0;
1394 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1395
1396 /* Build the object. */
1397 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1398 memcpy (buf + i, &tpoint, 4);
1399 i += 4;
1400 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1401
1402 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1403 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1404 append_insns (&buildaddr, i, buf);
1405
1406 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1407 If we cared for it, this could be using xchg alternatively. */
1408
1409 i = 0;
1410 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1411 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1412 %esp,<lockaddr> */
1413 memcpy (&buf[i], (void *) &lockaddr, 4);
1414 i += 4;
1415 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1416 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1417 append_insns (&buildaddr, i, buf);
1418
1419
1420 /* Set up arguments to the gdb_collect call. */
1421 i = 0;
1422 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1423 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1424 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1425 append_insns (&buildaddr, i, buf);
1426
1427 i = 0;
1428 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1429 append_insns (&buildaddr, i, buf);
1430
1431 i = 0;
1432 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1433 memcpy (&buf[i], (void *) &tpoint, 4);
1434 i += 4;
1435 append_insns (&buildaddr, i, buf);
1436
1437 buf[0] = 0xe8; /* call <reladdr> */
1438 offset = collector - (buildaddr + sizeof (jump_insn));
1439 memcpy (buf + 1, &offset, 4);
1440 append_insns (&buildaddr, 5, buf);
1441 /* Clean up after the call. */
1442 buf[0] = 0x83; /* add $0x8,%esp */
1443 buf[1] = 0xc4;
1444 buf[2] = 0x08;
1445 append_insns (&buildaddr, 3, buf);
1446
1447
1448 /* Clear the spin-lock. This would need the LOCK prefix on older
1449 broken archs. */
1450 i = 0;
1451 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1452 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1453 memcpy (buf + i, &lockaddr, 4);
1454 i += 4;
1455 append_insns (&buildaddr, i, buf);
1456
1457
1458 /* Remove stack that had been used for the collect_t object. */
1459 i = 0;
1460 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1461 append_insns (&buildaddr, i, buf);
1462
1463 i = 0;
1464 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1465 buf[i++] = 0xc4;
1466 buf[i++] = 0x04;
1467 buf[i++] = 0x17; /* pop %ss */
1468 buf[i++] = 0x0f; /* pop %gs */
1469 buf[i++] = 0xa9;
1470 buf[i++] = 0x0f; /* pop %fs */
1471 buf[i++] = 0xa1;
1472 buf[i++] = 0x07; /* pop %es */
1473 buf[i++] = 0x1f; /* pop %ds */
1474 buf[i++] = 0x9d; /* popf */
1475 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1476 buf[i++] = 0xc4;
1477 buf[i++] = 0x04;
1478 buf[i++] = 0x61; /* popad */
1479 append_insns (&buildaddr, i, buf);
1480
1481 /* Now, adjust the original instruction to execute in the jump
1482 pad. */
1483 *adjusted_insn_addr = buildaddr;
1484 relocate_instruction (&buildaddr, tpaddr);
1485 *adjusted_insn_addr_end = buildaddr;
1486
1487 /* Write the jump back to the program. */
1488 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1489 memcpy (buf, jump_insn, sizeof (jump_insn));
1490 memcpy (buf + 1, &offset, 4);
1491 append_insns (&buildaddr, sizeof (jump_insn), buf);
1492
1493 /* The jump pad is now built. Wire in a jump to our jump pad. This
1494 is always done last (by our caller actually), so that we can
1495 install fast tracepoints with threads running. This relies on
1496 the agent's atomic write support. */
1497 if (orig_size == 4)
1498 {
1499 /* Create a trampoline. */
1500 *trampoline_size = sizeof (jump_insn);
1501 if (!claim_trampoline_space (*trampoline_size, trampoline))
1502 {
1503 /* No trampoline space available. */
1504 strcpy (err,
1505 "E.Cannot allocate trampoline space needed for fast "
1506 "tracepoints on 4-byte instructions.");
1507 return 1;
1508 }
1509
1510 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1511 memcpy (buf, jump_insn, sizeof (jump_insn));
1512 memcpy (buf + 1, &offset, 4);
1513 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1514
1515 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1516 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1517 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1518 memcpy (buf + 2, &offset, 2);
1519 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1520 *jjump_pad_insn_size = sizeof (small_jump_insn);
1521 }
1522 else
1523 {
1524 /* Else use a 32-bit relative jump instruction. */
1525 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1526 memcpy (buf, jump_insn, sizeof (jump_insn));
1527 memcpy (buf + 1, &offset, 4);
1528 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1529 *jjump_pad_insn_size = sizeof (jump_insn);
1530 }
1531
1532 /* Return the end address of our pad. */
1533 *jump_entry = buildaddr;
1534
1535 return 0;
1536 }
1537
1538 bool
1539 x86_target::supports_fast_tracepoints ()
1540 {
1541 return true;
1542 }
1543
1544 int
1545 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1546 CORE_ADDR tpaddr,
1547 CORE_ADDR collector,
1548 CORE_ADDR lockaddr,
1549 ULONGEST orig_size,
1550 CORE_ADDR *jump_entry,
1551 CORE_ADDR *trampoline,
1552 ULONGEST *trampoline_size,
1553 unsigned char *jjump_pad_insn,
1554 ULONGEST *jjump_pad_insn_size,
1555 CORE_ADDR *adjusted_insn_addr,
1556 CORE_ADDR *adjusted_insn_addr_end,
1557 char *err)
1558 {
1559 #ifdef __x86_64__
1560 if (is_64bit_tdesc (current_thread))
1561 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1562 collector, lockaddr,
1563 orig_size, jump_entry,
1564 trampoline, trampoline_size,
1565 jjump_pad_insn,
1566 jjump_pad_insn_size,
1567 adjusted_insn_addr,
1568 adjusted_insn_addr_end,
1569 err);
1570 #endif
1571
1572 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1573 collector, lockaddr,
1574 orig_size, jump_entry,
1575 trampoline, trampoline_size,
1576 jjump_pad_insn,
1577 jjump_pad_insn_size,
1578 adjusted_insn_addr,
1579 adjusted_insn_addr_end,
1580 err);
1581 }
1582
1583 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1584 architectures. */
1585
1586 int
1587 x86_target::get_min_fast_tracepoint_insn_len ()
1588 {
1589 static int warned_about_fast_tracepoints = 0;
1590
1591 #ifdef __x86_64__
1592 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1593 used for fast tracepoints. */
1594 if (is_64bit_tdesc (current_thread))
1595 return 5;
1596 #endif
1597
1598 if (agent_loaded_p ())
1599 {
1600 char errbuf[IPA_BUFSIZ];
1601
1602 errbuf[0] = '\0';
1603
1604 /* On x86, if trampolines are available, then 4-byte jump instructions
1605 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1606 with a 4-byte offset are used instead. */
1607 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1608 return 4;
1609 else
1610 {
1611 /* GDB has no channel to explain to user why a shorter fast
1612 tracepoint is not possible, but at least make GDBserver
1613 mention that something has gone awry. */
1614 if (!warned_about_fast_tracepoints)
1615 {
1616 warning ("4-byte fast tracepoints not available; %s", errbuf);
1617 warned_about_fast_tracepoints = 1;
1618 }
1619 return 5;
1620 }
1621 }
1622 else
1623 {
1624 /* Indicate that the minimum length is currently unknown since the IPA
1625 has not loaded yet. */
1626 return 0;
1627 }
1628 }
1629
1630 static void
1631 add_insns (unsigned char *start, int len)
1632 {
1633 CORE_ADDR buildaddr = current_insn_ptr;
1634
1635 threads_debug_printf ("Adding %d bytes of insn at %s",
1636 len, paddress (buildaddr));
1637
1638 append_insns (&buildaddr, len, start);
1639 current_insn_ptr = buildaddr;
1640 }
1641
1642 /* Our general strategy for emitting code is to avoid specifying raw
1643 bytes whenever possible, and instead copy a block of inline asm
1644 that is embedded in the function. This is a little messy, because
1645 we need to keep the compiler from discarding what looks like dead
1646 code, plus suppress various warnings. */
1647
1648 #define EMIT_ASM(NAME, INSNS) \
1649 do \
1650 { \
1651 extern unsigned char start_ ## NAME, end_ ## NAME; \
1652 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1653 __asm__ ("jmp end_" #NAME "\n" \
1654 "\t" "start_" #NAME ":" \
1655 "\t" INSNS "\n" \
1656 "\t" "end_" #NAME ":"); \
1657 } while (0)
1658
1659 #ifdef __x86_64__
1660
1661 #define EMIT_ASM32(NAME,INSNS) \
1662 do \
1663 { \
1664 extern unsigned char start_ ## NAME, end_ ## NAME; \
1665 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1666 __asm__ (".code32\n" \
1667 "\t" "jmp end_" #NAME "\n" \
1668 "\t" "start_" #NAME ":\n" \
1669 "\t" INSNS "\n" \
1670 "\t" "end_" #NAME ":\n" \
1671 ".code64\n"); \
1672 } while (0)
1673
1674 #else
1675
1676 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1677
1678 #endif
1679
1680 #ifdef __x86_64__
1681
1682 static void
1683 amd64_emit_prologue (void)
1684 {
1685 EMIT_ASM (amd64_prologue,
1686 "pushq %rbp\n\t"
1687 "movq %rsp,%rbp\n\t"
1688 "sub $0x20,%rsp\n\t"
1689 "movq %rdi,-8(%rbp)\n\t"
1690 "movq %rsi,-16(%rbp)");
1691 }
1692
1693
1694 static void
1695 amd64_emit_epilogue (void)
1696 {
1697 EMIT_ASM (amd64_epilogue,
1698 "movq -16(%rbp),%rdi\n\t"
1699 "movq %rax,(%rdi)\n\t"
1700 "xor %rax,%rax\n\t"
1701 "leave\n\t"
1702 "ret");
1703 }
1704
1705 static void
1706 amd64_emit_add (void)
1707 {
1708 EMIT_ASM (amd64_add,
1709 "add (%rsp),%rax\n\t"
1710 "lea 0x8(%rsp),%rsp");
1711 }
1712
1713 static void
1714 amd64_emit_sub (void)
1715 {
1716 EMIT_ASM (amd64_sub,
1717 "sub %rax,(%rsp)\n\t"
1718 "pop %rax");
1719 }
1720
1721 static void
1722 amd64_emit_mul (void)
1723 {
1724 emit_error = 1;
1725 }
1726
1727 static void
1728 amd64_emit_lsh (void)
1729 {
1730 emit_error = 1;
1731 }
1732
1733 static void
1734 amd64_emit_rsh_signed (void)
1735 {
1736 emit_error = 1;
1737 }
1738
1739 static void
1740 amd64_emit_rsh_unsigned (void)
1741 {
1742 emit_error = 1;
1743 }
1744
1745 static void
1746 amd64_emit_ext (int arg)
1747 {
1748 switch (arg)
1749 {
1750 case 8:
1751 EMIT_ASM (amd64_ext_8,
1752 "cbtw\n\t"
1753 "cwtl\n\t"
1754 "cltq");
1755 break;
1756 case 16:
1757 EMIT_ASM (amd64_ext_16,
1758 "cwtl\n\t"
1759 "cltq");
1760 break;
1761 case 32:
1762 EMIT_ASM (amd64_ext_32,
1763 "cltq");
1764 break;
1765 default:
1766 emit_error = 1;
1767 }
1768 }
1769
1770 static void
1771 amd64_emit_log_not (void)
1772 {
1773 EMIT_ASM (amd64_log_not,
1774 "test %rax,%rax\n\t"
1775 "sete %cl\n\t"
1776 "movzbq %cl,%rax");
1777 }
1778
1779 static void
1780 amd64_emit_bit_and (void)
1781 {
1782 EMIT_ASM (amd64_and,
1783 "and (%rsp),%rax\n\t"
1784 "lea 0x8(%rsp),%rsp");
1785 }
1786
1787 static void
1788 amd64_emit_bit_or (void)
1789 {
1790 EMIT_ASM (amd64_or,
1791 "or (%rsp),%rax\n\t"
1792 "lea 0x8(%rsp),%rsp");
1793 }
1794
1795 static void
1796 amd64_emit_bit_xor (void)
1797 {
1798 EMIT_ASM (amd64_xor,
1799 "xor (%rsp),%rax\n\t"
1800 "lea 0x8(%rsp),%rsp");
1801 }
1802
1803 static void
1804 amd64_emit_bit_not (void)
1805 {
1806 EMIT_ASM (amd64_bit_not,
1807 "xorq $0xffffffffffffffff,%rax");
1808 }
1809
1810 static void
1811 amd64_emit_equal (void)
1812 {
1813 EMIT_ASM (amd64_equal,
1814 "cmp %rax,(%rsp)\n\t"
1815 "je .Lamd64_equal_true\n\t"
1816 "xor %rax,%rax\n\t"
1817 "jmp .Lamd64_equal_end\n\t"
1818 ".Lamd64_equal_true:\n\t"
1819 "mov $0x1,%rax\n\t"
1820 ".Lamd64_equal_end:\n\t"
1821 "lea 0x8(%rsp),%rsp");
1822 }
1823
1824 static void
1825 amd64_emit_less_signed (void)
1826 {
1827 EMIT_ASM (amd64_less_signed,
1828 "cmp %rax,(%rsp)\n\t"
1829 "jl .Lamd64_less_signed_true\n\t"
1830 "xor %rax,%rax\n\t"
1831 "jmp .Lamd64_less_signed_end\n\t"
1832 ".Lamd64_less_signed_true:\n\t"
1833 "mov $1,%rax\n\t"
1834 ".Lamd64_less_signed_end:\n\t"
1835 "lea 0x8(%rsp),%rsp");
1836 }
1837
1838 static void
1839 amd64_emit_less_unsigned (void)
1840 {
1841 EMIT_ASM (amd64_less_unsigned,
1842 "cmp %rax,(%rsp)\n\t"
1843 "jb .Lamd64_less_unsigned_true\n\t"
1844 "xor %rax,%rax\n\t"
1845 "jmp .Lamd64_less_unsigned_end\n\t"
1846 ".Lamd64_less_unsigned_true:\n\t"
1847 "mov $1,%rax\n\t"
1848 ".Lamd64_less_unsigned_end:\n\t"
1849 "lea 0x8(%rsp),%rsp");
1850 }
1851
1852 static void
1853 amd64_emit_ref (int size)
1854 {
1855 switch (size)
1856 {
1857 case 1:
1858 EMIT_ASM (amd64_ref1,
1859 "movb (%rax),%al");
1860 break;
1861 case 2:
1862 EMIT_ASM (amd64_ref2,
1863 "movw (%rax),%ax");
1864 break;
1865 case 4:
1866 EMIT_ASM (amd64_ref4,
1867 "movl (%rax),%eax");
1868 break;
1869 case 8:
1870 EMIT_ASM (amd64_ref8,
1871 "movq (%rax),%rax");
1872 break;
1873 }
1874 }
1875
1876 static void
1877 amd64_emit_if_goto (int *offset_p, int *size_p)
1878 {
1879 EMIT_ASM (amd64_if_goto,
1880 "mov %rax,%rcx\n\t"
1881 "pop %rax\n\t"
1882 "cmp $0,%rcx\n\t"
1883 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1884 if (offset_p)
1885 *offset_p = 10;
1886 if (size_p)
1887 *size_p = 4;
1888 }
1889
1890 static void
1891 amd64_emit_goto (int *offset_p, int *size_p)
1892 {
1893 EMIT_ASM (amd64_goto,
1894 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1895 if (offset_p)
1896 *offset_p = 1;
1897 if (size_p)
1898 *size_p = 4;
1899 }
1900
1901 static void
1902 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1903 {
1904 int diff = (to - (from + size));
1905 unsigned char buf[sizeof (int)];
1906
1907 if (size != 4)
1908 {
1909 emit_error = 1;
1910 return;
1911 }
1912
1913 memcpy (buf, &diff, sizeof (int));
1914 target_write_memory (from, buf, sizeof (int));
1915 }
1916
1917 static void
1918 amd64_emit_const (LONGEST num)
1919 {
1920 unsigned char buf[16];
1921 int i;
1922 CORE_ADDR buildaddr = current_insn_ptr;
1923
1924 i = 0;
1925 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1926 memcpy (&buf[i], &num, sizeof (num));
1927 i += 8;
1928 append_insns (&buildaddr, i, buf);
1929 current_insn_ptr = buildaddr;
1930 }
1931
1932 static void
1933 amd64_emit_call (CORE_ADDR fn)
1934 {
1935 unsigned char buf[16];
1936 int i;
1937 CORE_ADDR buildaddr;
1938 LONGEST offset64;
1939
1940 /* The destination function being in the shared library, may be
1941 >31-bits away off the compiled code pad. */
1942
1943 buildaddr = current_insn_ptr;
1944
1945 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1946
1947 i = 0;
1948
1949 if (offset64 > INT_MAX || offset64 < INT_MIN)
1950 {
1951 /* Offset is too large for a call. Use callq, but that requires
1952 a register, so avoid it if possible. Use r10, since it is
1953 call-clobbered, we don't have to push/pop it. */
1954 buf[i++] = 0x48; /* mov $fn,%r10 */
1955 buf[i++] = 0xba;
1956 memcpy (buf + i, &fn, 8);
1957 i += 8;
1958 buf[i++] = 0xff; /* callq *%r10 */
1959 buf[i++] = 0xd2;
1960 }
1961 else
1962 {
1963 int offset32 = offset64; /* we know we can't overflow here. */
1964
1965 buf[i++] = 0xe8; /* call <reladdr> */
1966 memcpy (buf + i, &offset32, 4);
1967 i += 4;
1968 }
1969
1970 append_insns (&buildaddr, i, buf);
1971 current_insn_ptr = buildaddr;
1972 }
1973
1974 static void
1975 amd64_emit_reg (int reg)
1976 {
1977 unsigned char buf[16];
1978 int i;
1979 CORE_ADDR buildaddr;
1980
1981 /* Assume raw_regs is still in %rdi. */
1982 buildaddr = current_insn_ptr;
1983 i = 0;
1984 buf[i++] = 0xbe; /* mov $<n>,%esi */
1985 memcpy (&buf[i], &reg, sizeof (reg));
1986 i += 4;
1987 append_insns (&buildaddr, i, buf);
1988 current_insn_ptr = buildaddr;
1989 amd64_emit_call (get_raw_reg_func_addr ());
1990 }
1991
1992 static void
1993 amd64_emit_pop (void)
1994 {
1995 EMIT_ASM (amd64_pop,
1996 "pop %rax");
1997 }
1998
1999 static void
2000 amd64_emit_stack_flush (void)
2001 {
2002 EMIT_ASM (amd64_stack_flush,
2003 "push %rax");
2004 }
2005
2006 static void
2007 amd64_emit_zero_ext (int arg)
2008 {
2009 switch (arg)
2010 {
2011 case 8:
2012 EMIT_ASM (amd64_zero_ext_8,
2013 "and $0xff,%rax");
2014 break;
2015 case 16:
2016 EMIT_ASM (amd64_zero_ext_16,
2017 "and $0xffff,%rax");
2018 break;
2019 case 32:
2020 EMIT_ASM (amd64_zero_ext_32,
2021 "mov $0xffffffff,%rcx\n\t"
2022 "and %rcx,%rax");
2023 break;
2024 default:
2025 emit_error = 1;
2026 }
2027 }
2028
2029 static void
2030 amd64_emit_swap (void)
2031 {
2032 EMIT_ASM (amd64_swap,
2033 "mov %rax,%rcx\n\t"
2034 "pop %rax\n\t"
2035 "push %rcx");
2036 }
2037
2038 static void
2039 amd64_emit_stack_adjust (int n)
2040 {
2041 unsigned char buf[16];
2042 int i;
2043 CORE_ADDR buildaddr = current_insn_ptr;
2044
2045 i = 0;
2046 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2047 buf[i++] = 0x8d;
2048 buf[i++] = 0x64;
2049 buf[i++] = 0x24;
2050 /* This only handles adjustments up to 16, but we don't expect any more. */
2051 buf[i++] = n * 8;
2052 append_insns (&buildaddr, i, buf);
2053 current_insn_ptr = buildaddr;
2054 }
2055
2056 /* FN's prototype is `LONGEST(*fn)(int)'. */
2057
2058 static void
2059 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2060 {
2061 unsigned char buf[16];
2062 int i;
2063 CORE_ADDR buildaddr;
2064
2065 buildaddr = current_insn_ptr;
2066 i = 0;
2067 buf[i++] = 0xbf; /* movl $<n>,%edi */
2068 memcpy (&buf[i], &arg1, sizeof (arg1));
2069 i += 4;
2070 append_insns (&buildaddr, i, buf);
2071 current_insn_ptr = buildaddr;
2072 amd64_emit_call (fn);
2073 }
2074
2075 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2076
2077 static void
2078 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2079 {
2080 unsigned char buf[16];
2081 int i;
2082 CORE_ADDR buildaddr;
2083
2084 buildaddr = current_insn_ptr;
2085 i = 0;
2086 buf[i++] = 0xbf; /* movl $<n>,%edi */
2087 memcpy (&buf[i], &arg1, sizeof (arg1));
2088 i += 4;
2089 append_insns (&buildaddr, i, buf);
2090 current_insn_ptr = buildaddr;
2091 EMIT_ASM (amd64_void_call_2_a,
2092 /* Save away a copy of the stack top. */
2093 "push %rax\n\t"
2094 /* Also pass top as the second argument. */
2095 "mov %rax,%rsi");
2096 amd64_emit_call (fn);
2097 EMIT_ASM (amd64_void_call_2_b,
2098 /* Restore the stack top, %rax may have been trashed. */
2099 "pop %rax");
2100 }
2101
2102 static void
2103 amd64_emit_eq_goto (int *offset_p, int *size_p)
2104 {
2105 EMIT_ASM (amd64_eq,
2106 "cmp %rax,(%rsp)\n\t"
2107 "jne .Lamd64_eq_fallthru\n\t"
2108 "lea 0x8(%rsp),%rsp\n\t"
2109 "pop %rax\n\t"
2110 /* jmp, but don't trust the assembler to choose the right jump */
2111 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2112 ".Lamd64_eq_fallthru:\n\t"
2113 "lea 0x8(%rsp),%rsp\n\t"
2114 "pop %rax");
2115
2116 if (offset_p)
2117 *offset_p = 13;
2118 if (size_p)
2119 *size_p = 4;
2120 }
2121
2122 static void
2123 amd64_emit_ne_goto (int *offset_p, int *size_p)
2124 {
2125 EMIT_ASM (amd64_ne,
2126 "cmp %rax,(%rsp)\n\t"
2127 "je .Lamd64_ne_fallthru\n\t"
2128 "lea 0x8(%rsp),%rsp\n\t"
2129 "pop %rax\n\t"
2130 /* jmp, but don't trust the assembler to choose the right jump */
2131 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2132 ".Lamd64_ne_fallthru:\n\t"
2133 "lea 0x8(%rsp),%rsp\n\t"
2134 "pop %rax");
2135
2136 if (offset_p)
2137 *offset_p = 13;
2138 if (size_p)
2139 *size_p = 4;
2140 }
2141
2142 static void
2143 amd64_emit_lt_goto (int *offset_p, int *size_p)
2144 {
2145 EMIT_ASM (amd64_lt,
2146 "cmp %rax,(%rsp)\n\t"
2147 "jnl .Lamd64_lt_fallthru\n\t"
2148 "lea 0x8(%rsp),%rsp\n\t"
2149 "pop %rax\n\t"
2150 /* jmp, but don't trust the assembler to choose the right jump */
2151 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2152 ".Lamd64_lt_fallthru:\n\t"
2153 "lea 0x8(%rsp),%rsp\n\t"
2154 "pop %rax");
2155
2156 if (offset_p)
2157 *offset_p = 13;
2158 if (size_p)
2159 *size_p = 4;
2160 }
2161
2162 static void
2163 amd64_emit_le_goto (int *offset_p, int *size_p)
2164 {
2165 EMIT_ASM (amd64_le,
2166 "cmp %rax,(%rsp)\n\t"
2167 "jnle .Lamd64_le_fallthru\n\t"
2168 "lea 0x8(%rsp),%rsp\n\t"
2169 "pop %rax\n\t"
2170 /* jmp, but don't trust the assembler to choose the right jump */
2171 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2172 ".Lamd64_le_fallthru:\n\t"
2173 "lea 0x8(%rsp),%rsp\n\t"
2174 "pop %rax");
2175
2176 if (offset_p)
2177 *offset_p = 13;
2178 if (size_p)
2179 *size_p = 4;
2180 }
2181
2182 static void
2183 amd64_emit_gt_goto (int *offset_p, int *size_p)
2184 {
2185 EMIT_ASM (amd64_gt,
2186 "cmp %rax,(%rsp)\n\t"
2187 "jng .Lamd64_gt_fallthru\n\t"
2188 "lea 0x8(%rsp),%rsp\n\t"
2189 "pop %rax\n\t"
2190 /* jmp, but don't trust the assembler to choose the right jump */
2191 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2192 ".Lamd64_gt_fallthru:\n\t"
2193 "lea 0x8(%rsp),%rsp\n\t"
2194 "pop %rax");
2195
2196 if (offset_p)
2197 *offset_p = 13;
2198 if (size_p)
2199 *size_p = 4;
2200 }
2201
2202 static void
2203 amd64_emit_ge_goto (int *offset_p, int *size_p)
2204 {
2205 EMIT_ASM (amd64_ge,
2206 "cmp %rax,(%rsp)\n\t"
2207 "jnge .Lamd64_ge_fallthru\n\t"
2208 ".Lamd64_ge_jump:\n\t"
2209 "lea 0x8(%rsp),%rsp\n\t"
2210 "pop %rax\n\t"
2211 /* jmp, but don't trust the assembler to choose the right jump */
2212 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2213 ".Lamd64_ge_fallthru:\n\t"
2214 "lea 0x8(%rsp),%rsp\n\t"
2215 "pop %rax");
2216
2217 if (offset_p)
2218 *offset_p = 13;
2219 if (size_p)
2220 *size_p = 4;
2221 }
2222
2223 static emit_ops amd64_emit_ops =
2224 {
2225 amd64_emit_prologue,
2226 amd64_emit_epilogue,
2227 amd64_emit_add,
2228 amd64_emit_sub,
2229 amd64_emit_mul,
2230 amd64_emit_lsh,
2231 amd64_emit_rsh_signed,
2232 amd64_emit_rsh_unsigned,
2233 amd64_emit_ext,
2234 amd64_emit_log_not,
2235 amd64_emit_bit_and,
2236 amd64_emit_bit_or,
2237 amd64_emit_bit_xor,
2238 amd64_emit_bit_not,
2239 amd64_emit_equal,
2240 amd64_emit_less_signed,
2241 amd64_emit_less_unsigned,
2242 amd64_emit_ref,
2243 amd64_emit_if_goto,
2244 amd64_emit_goto,
2245 amd64_write_goto_address,
2246 amd64_emit_const,
2247 amd64_emit_call,
2248 amd64_emit_reg,
2249 amd64_emit_pop,
2250 amd64_emit_stack_flush,
2251 amd64_emit_zero_ext,
2252 amd64_emit_swap,
2253 amd64_emit_stack_adjust,
2254 amd64_emit_int_call_1,
2255 amd64_emit_void_call_2,
2256 amd64_emit_eq_goto,
2257 amd64_emit_ne_goto,
2258 amd64_emit_lt_goto,
2259 amd64_emit_le_goto,
2260 amd64_emit_gt_goto,
2261 amd64_emit_ge_goto
2262 };
2263
2264 #endif /* __x86_64__ */
2265
2266 static void
2267 i386_emit_prologue (void)
2268 {
2269 EMIT_ASM32 (i386_prologue,
2270 "push %ebp\n\t"
2271 "mov %esp,%ebp\n\t"
2272 "push %ebx");
2273 /* At this point, the raw regs base address is at 8(%ebp), and the
2274 value pointer is at 12(%ebp). */
2275 }
2276
2277 static void
2278 i386_emit_epilogue (void)
2279 {
2280 EMIT_ASM32 (i386_epilogue,
2281 "mov 12(%ebp),%ecx\n\t"
2282 "mov %eax,(%ecx)\n\t"
2283 "mov %ebx,0x4(%ecx)\n\t"
2284 "xor %eax,%eax\n\t"
2285 "pop %ebx\n\t"
2286 "pop %ebp\n\t"
2287 "ret");
2288 }
2289
2290 static void
2291 i386_emit_add (void)
2292 {
2293 EMIT_ASM32 (i386_add,
2294 "add (%esp),%eax\n\t"
2295 "adc 0x4(%esp),%ebx\n\t"
2296 "lea 0x8(%esp),%esp");
2297 }
2298
2299 static void
2300 i386_emit_sub (void)
2301 {
2302 EMIT_ASM32 (i386_sub,
2303 "subl %eax,(%esp)\n\t"
2304 "sbbl %ebx,4(%esp)\n\t"
2305 "pop %eax\n\t"
2306 "pop %ebx\n\t");
2307 }
2308
2309 static void
2310 i386_emit_mul (void)
2311 {
2312 emit_error = 1;
2313 }
2314
2315 static void
2316 i386_emit_lsh (void)
2317 {
2318 emit_error = 1;
2319 }
2320
2321 static void
2322 i386_emit_rsh_signed (void)
2323 {
2324 emit_error = 1;
2325 }
2326
2327 static void
2328 i386_emit_rsh_unsigned (void)
2329 {
2330 emit_error = 1;
2331 }
2332
2333 static void
2334 i386_emit_ext (int arg)
2335 {
2336 switch (arg)
2337 {
2338 case 8:
2339 EMIT_ASM32 (i386_ext_8,
2340 "cbtw\n\t"
2341 "cwtl\n\t"
2342 "movl %eax,%ebx\n\t"
2343 "sarl $31,%ebx");
2344 break;
2345 case 16:
2346 EMIT_ASM32 (i386_ext_16,
2347 "cwtl\n\t"
2348 "movl %eax,%ebx\n\t"
2349 "sarl $31,%ebx");
2350 break;
2351 case 32:
2352 EMIT_ASM32 (i386_ext_32,
2353 "movl %eax,%ebx\n\t"
2354 "sarl $31,%ebx");
2355 break;
2356 default:
2357 emit_error = 1;
2358 }
2359 }
2360
2361 static void
2362 i386_emit_log_not (void)
2363 {
2364 EMIT_ASM32 (i386_log_not,
2365 "or %ebx,%eax\n\t"
2366 "test %eax,%eax\n\t"
2367 "sete %cl\n\t"
2368 "xor %ebx,%ebx\n\t"
2369 "movzbl %cl,%eax");
2370 }
2371
2372 static void
2373 i386_emit_bit_and (void)
2374 {
2375 EMIT_ASM32 (i386_and,
2376 "and (%esp),%eax\n\t"
2377 "and 0x4(%esp),%ebx\n\t"
2378 "lea 0x8(%esp),%esp");
2379 }
2380
2381 static void
2382 i386_emit_bit_or (void)
2383 {
2384 EMIT_ASM32 (i386_or,
2385 "or (%esp),%eax\n\t"
2386 "or 0x4(%esp),%ebx\n\t"
2387 "lea 0x8(%esp),%esp");
2388 }
2389
2390 static void
2391 i386_emit_bit_xor (void)
2392 {
2393 EMIT_ASM32 (i386_xor,
2394 "xor (%esp),%eax\n\t"
2395 "xor 0x4(%esp),%ebx\n\t"
2396 "lea 0x8(%esp),%esp");
2397 }
2398
2399 static void
2400 i386_emit_bit_not (void)
2401 {
2402 EMIT_ASM32 (i386_bit_not,
2403 "xor $0xffffffff,%eax\n\t"
2404 "xor $0xffffffff,%ebx\n\t");
2405 }
2406
2407 static void
2408 i386_emit_equal (void)
2409 {
2410 EMIT_ASM32 (i386_equal,
2411 "cmpl %ebx,4(%esp)\n\t"
2412 "jne .Li386_equal_false\n\t"
2413 "cmpl %eax,(%esp)\n\t"
2414 "je .Li386_equal_true\n\t"
2415 ".Li386_equal_false:\n\t"
2416 "xor %eax,%eax\n\t"
2417 "jmp .Li386_equal_end\n\t"
2418 ".Li386_equal_true:\n\t"
2419 "mov $1,%eax\n\t"
2420 ".Li386_equal_end:\n\t"
2421 "xor %ebx,%ebx\n\t"
2422 "lea 0x8(%esp),%esp");
2423 }
2424
2425 static void
2426 i386_emit_less_signed (void)
2427 {
2428 EMIT_ASM32 (i386_less_signed,
2429 "cmpl %ebx,4(%esp)\n\t"
2430 "jl .Li386_less_signed_true\n\t"
2431 "jne .Li386_less_signed_false\n\t"
2432 "cmpl %eax,(%esp)\n\t"
2433 "jl .Li386_less_signed_true\n\t"
2434 ".Li386_less_signed_false:\n\t"
2435 "xor %eax,%eax\n\t"
2436 "jmp .Li386_less_signed_end\n\t"
2437 ".Li386_less_signed_true:\n\t"
2438 "mov $1,%eax\n\t"
2439 ".Li386_less_signed_end:\n\t"
2440 "xor %ebx,%ebx\n\t"
2441 "lea 0x8(%esp),%esp");
2442 }
2443
2444 static void
2445 i386_emit_less_unsigned (void)
2446 {
2447 EMIT_ASM32 (i386_less_unsigned,
2448 "cmpl %ebx,4(%esp)\n\t"
2449 "jb .Li386_less_unsigned_true\n\t"
2450 "jne .Li386_less_unsigned_false\n\t"
2451 "cmpl %eax,(%esp)\n\t"
2452 "jb .Li386_less_unsigned_true\n\t"
2453 ".Li386_less_unsigned_false:\n\t"
2454 "xor %eax,%eax\n\t"
2455 "jmp .Li386_less_unsigned_end\n\t"
2456 ".Li386_less_unsigned_true:\n\t"
2457 "mov $1,%eax\n\t"
2458 ".Li386_less_unsigned_end:\n\t"
2459 "xor %ebx,%ebx\n\t"
2460 "lea 0x8(%esp),%esp");
2461 }
2462
2463 static void
2464 i386_emit_ref (int size)
2465 {
2466 switch (size)
2467 {
2468 case 1:
2469 EMIT_ASM32 (i386_ref1,
2470 "movb (%eax),%al");
2471 break;
2472 case 2:
2473 EMIT_ASM32 (i386_ref2,
2474 "movw (%eax),%ax");
2475 break;
2476 case 4:
2477 EMIT_ASM32 (i386_ref4,
2478 "movl (%eax),%eax");
2479 break;
2480 case 8:
2481 EMIT_ASM32 (i386_ref8,
2482 "movl 4(%eax),%ebx\n\t"
2483 "movl (%eax),%eax");
2484 break;
2485 }
2486 }
2487
2488 static void
2489 i386_emit_if_goto (int *offset_p, int *size_p)
2490 {
2491 EMIT_ASM32 (i386_if_goto,
2492 "mov %eax,%ecx\n\t"
2493 "or %ebx,%ecx\n\t"
2494 "pop %eax\n\t"
2495 "pop %ebx\n\t"
2496 "cmpl $0,%ecx\n\t"
2497 /* Don't trust the assembler to choose the right jump */
2498 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2499
2500 if (offset_p)
2501 *offset_p = 11; /* be sure that this matches the sequence above */
2502 if (size_p)
2503 *size_p = 4;
2504 }
2505
2506 static void
2507 i386_emit_goto (int *offset_p, int *size_p)
2508 {
2509 EMIT_ASM32 (i386_goto,
2510 /* Don't trust the assembler to choose the right jump */
2511 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2512 if (offset_p)
2513 *offset_p = 1;
2514 if (size_p)
2515 *size_p = 4;
2516 }
2517
2518 static void
2519 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2520 {
2521 int diff = (to - (from + size));
2522 unsigned char buf[sizeof (int)];
2523
2524 /* We're only doing 4-byte sizes at the moment. */
2525 if (size != 4)
2526 {
2527 emit_error = 1;
2528 return;
2529 }
2530
2531 memcpy (buf, &diff, sizeof (int));
2532 target_write_memory (from, buf, sizeof (int));
2533 }
2534
2535 static void
2536 i386_emit_const (LONGEST num)
2537 {
2538 unsigned char buf[16];
2539 int i, hi, lo;
2540 CORE_ADDR buildaddr = current_insn_ptr;
2541
2542 i = 0;
2543 buf[i++] = 0xb8; /* mov $<n>,%eax */
2544 lo = num & 0xffffffff;
2545 memcpy (&buf[i], &lo, sizeof (lo));
2546 i += 4;
2547 hi = ((num >> 32) & 0xffffffff);
2548 if (hi)
2549 {
2550 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2551 memcpy (&buf[i], &hi, sizeof (hi));
2552 i += 4;
2553 }
2554 else
2555 {
2556 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2557 }
2558 append_insns (&buildaddr, i, buf);
2559 current_insn_ptr = buildaddr;
2560 }
2561
2562 static void
2563 i386_emit_call (CORE_ADDR fn)
2564 {
2565 unsigned char buf[16];
2566 int i, offset;
2567 CORE_ADDR buildaddr;
2568
2569 buildaddr = current_insn_ptr;
2570 i = 0;
2571 buf[i++] = 0xe8; /* call <reladdr> */
2572 offset = ((int) fn) - (buildaddr + 5);
2573 memcpy (buf + 1, &offset, 4);
2574 append_insns (&buildaddr, 5, buf);
2575 current_insn_ptr = buildaddr;
2576 }
2577
2578 static void
2579 i386_emit_reg (int reg)
2580 {
2581 unsigned char buf[16];
2582 int i;
2583 CORE_ADDR buildaddr;
2584
2585 EMIT_ASM32 (i386_reg_a,
2586 "sub $0x8,%esp");
2587 buildaddr = current_insn_ptr;
2588 i = 0;
2589 buf[i++] = 0xb8; /* mov $<n>,%eax */
2590 memcpy (&buf[i], &reg, sizeof (reg));
2591 i += 4;
2592 append_insns (&buildaddr, i, buf);
2593 current_insn_ptr = buildaddr;
2594 EMIT_ASM32 (i386_reg_b,
2595 "mov %eax,4(%esp)\n\t"
2596 "mov 8(%ebp),%eax\n\t"
2597 "mov %eax,(%esp)");
2598 i386_emit_call (get_raw_reg_func_addr ());
2599 EMIT_ASM32 (i386_reg_c,
2600 "xor %ebx,%ebx\n\t"
2601 "lea 0x8(%esp),%esp");
2602 }
2603
2604 static void
2605 i386_emit_pop (void)
2606 {
2607 EMIT_ASM32 (i386_pop,
2608 "pop %eax\n\t"
2609 "pop %ebx");
2610 }
2611
2612 static void
2613 i386_emit_stack_flush (void)
2614 {
2615 EMIT_ASM32 (i386_stack_flush,
2616 "push %ebx\n\t"
2617 "push %eax");
2618 }
2619
2620 static void
2621 i386_emit_zero_ext (int arg)
2622 {
2623 switch (arg)
2624 {
2625 case 8:
2626 EMIT_ASM32 (i386_zero_ext_8,
2627 "and $0xff,%eax\n\t"
2628 "xor %ebx,%ebx");
2629 break;
2630 case 16:
2631 EMIT_ASM32 (i386_zero_ext_16,
2632 "and $0xffff,%eax\n\t"
2633 "xor %ebx,%ebx");
2634 break;
2635 case 32:
2636 EMIT_ASM32 (i386_zero_ext_32,
2637 "xor %ebx,%ebx");
2638 break;
2639 default:
2640 emit_error = 1;
2641 }
2642 }
2643
2644 static void
2645 i386_emit_swap (void)
2646 {
2647 EMIT_ASM32 (i386_swap,
2648 "mov %eax,%ecx\n\t"
2649 "mov %ebx,%edx\n\t"
2650 "pop %eax\n\t"
2651 "pop %ebx\n\t"
2652 "push %edx\n\t"
2653 "push %ecx");
2654 }
2655
2656 static void
2657 i386_emit_stack_adjust (int n)
2658 {
2659 unsigned char buf[16];
2660 int i;
2661 CORE_ADDR buildaddr = current_insn_ptr;
2662
2663 i = 0;
2664 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2665 buf[i++] = 0x64;
2666 buf[i++] = 0x24;
2667 buf[i++] = n * 8;
2668 append_insns (&buildaddr, i, buf);
2669 current_insn_ptr = buildaddr;
2670 }
2671
2672 /* FN's prototype is `LONGEST(*fn)(int)'. */
2673
2674 static void
2675 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2676 {
2677 unsigned char buf[16];
2678 int i;
2679 CORE_ADDR buildaddr;
2680
2681 EMIT_ASM32 (i386_int_call_1_a,
2682 /* Reserve a bit of stack space. */
2683 "sub $0x8,%esp");
2684 /* Put the one argument on the stack. */
2685 buildaddr = current_insn_ptr;
2686 i = 0;
2687 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2688 buf[i++] = 0x04;
2689 buf[i++] = 0x24;
2690 memcpy (&buf[i], &arg1, sizeof (arg1));
2691 i += 4;
2692 append_insns (&buildaddr, i, buf);
2693 current_insn_ptr = buildaddr;
2694 i386_emit_call (fn);
2695 EMIT_ASM32 (i386_int_call_1_c,
2696 "mov %edx,%ebx\n\t"
2697 "lea 0x8(%esp),%esp");
2698 }
2699
2700 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2701
2702 static void
2703 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2704 {
2705 unsigned char buf[16];
2706 int i;
2707 CORE_ADDR buildaddr;
2708
2709 EMIT_ASM32 (i386_void_call_2_a,
2710 /* Preserve %eax only; we don't have to worry about %ebx. */
2711 "push %eax\n\t"
2712 /* Reserve a bit of stack space for arguments. */
2713 "sub $0x10,%esp\n\t"
2714 /* Copy "top" to the second argument position. (Note that
2715 we can't assume function won't scribble on its
2716 arguments, so don't try to restore from this.) */
2717 "mov %eax,4(%esp)\n\t"
2718 "mov %ebx,8(%esp)");
2719 /* Put the first argument on the stack. */
2720 buildaddr = current_insn_ptr;
2721 i = 0;
2722 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2723 buf[i++] = 0x04;
2724 buf[i++] = 0x24;
2725 memcpy (&buf[i], &arg1, sizeof (arg1));
2726 i += 4;
2727 append_insns (&buildaddr, i, buf);
2728 current_insn_ptr = buildaddr;
2729 i386_emit_call (fn);
2730 EMIT_ASM32 (i386_void_call_2_b,
2731 "lea 0x10(%esp),%esp\n\t"
2732 /* Restore original stack top. */
2733 "pop %eax");
2734 }
2735
2736
2737 static void
2738 i386_emit_eq_goto (int *offset_p, int *size_p)
2739 {
2740 EMIT_ASM32 (eq,
2741 /* Check low half first, more likely to be decider */
2742 "cmpl %eax,(%esp)\n\t"
2743 "jne .Leq_fallthru\n\t"
2744 "cmpl %ebx,4(%esp)\n\t"
2745 "jne .Leq_fallthru\n\t"
2746 "lea 0x8(%esp),%esp\n\t"
2747 "pop %eax\n\t"
2748 "pop %ebx\n\t"
2749 /* jmp, but don't trust the assembler to choose the right jump */
2750 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2751 ".Leq_fallthru:\n\t"
2752 "lea 0x8(%esp),%esp\n\t"
2753 "pop %eax\n\t"
2754 "pop %ebx");
2755
2756 if (offset_p)
2757 *offset_p = 18;
2758 if (size_p)
2759 *size_p = 4;
2760 }
2761
2762 static void
2763 i386_emit_ne_goto (int *offset_p, int *size_p)
2764 {
2765 EMIT_ASM32 (ne,
2766 /* Check low half first, more likely to be decider */
2767 "cmpl %eax,(%esp)\n\t"
2768 "jne .Lne_jump\n\t"
2769 "cmpl %ebx,4(%esp)\n\t"
2770 "je .Lne_fallthru\n\t"
2771 ".Lne_jump:\n\t"
2772 "lea 0x8(%esp),%esp\n\t"
2773 "pop %eax\n\t"
2774 "pop %ebx\n\t"
2775 /* jmp, but don't trust the assembler to choose the right jump */
2776 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2777 ".Lne_fallthru:\n\t"
2778 "lea 0x8(%esp),%esp\n\t"
2779 "pop %eax\n\t"
2780 "pop %ebx");
2781
2782 if (offset_p)
2783 *offset_p = 18;
2784 if (size_p)
2785 *size_p = 4;
2786 }
2787
2788 static void
2789 i386_emit_lt_goto (int *offset_p, int *size_p)
2790 {
2791 EMIT_ASM32 (lt,
2792 "cmpl %ebx,4(%esp)\n\t"
2793 "jl .Llt_jump\n\t"
2794 "jne .Llt_fallthru\n\t"
2795 "cmpl %eax,(%esp)\n\t"
2796 "jnl .Llt_fallthru\n\t"
2797 ".Llt_jump:\n\t"
2798 "lea 0x8(%esp),%esp\n\t"
2799 "pop %eax\n\t"
2800 "pop %ebx\n\t"
2801 /* jmp, but don't trust the assembler to choose the right jump */
2802 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2803 ".Llt_fallthru:\n\t"
2804 "lea 0x8(%esp),%esp\n\t"
2805 "pop %eax\n\t"
2806 "pop %ebx");
2807
2808 if (offset_p)
2809 *offset_p = 20;
2810 if (size_p)
2811 *size_p = 4;
2812 }
2813
2814 static void
2815 i386_emit_le_goto (int *offset_p, int *size_p)
2816 {
2817 EMIT_ASM32 (le,
2818 "cmpl %ebx,4(%esp)\n\t"
2819 "jle .Lle_jump\n\t"
2820 "jne .Lle_fallthru\n\t"
2821 "cmpl %eax,(%esp)\n\t"
2822 "jnle .Lle_fallthru\n\t"
2823 ".Lle_jump:\n\t"
2824 "lea 0x8(%esp),%esp\n\t"
2825 "pop %eax\n\t"
2826 "pop %ebx\n\t"
2827 /* jmp, but don't trust the assembler to choose the right jump */
2828 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2829 ".Lle_fallthru:\n\t"
2830 "lea 0x8(%esp),%esp\n\t"
2831 "pop %eax\n\t"
2832 "pop %ebx");
2833
2834 if (offset_p)
2835 *offset_p = 20;
2836 if (size_p)
2837 *size_p = 4;
2838 }
2839
2840 static void
2841 i386_emit_gt_goto (int *offset_p, int *size_p)
2842 {
2843 EMIT_ASM32 (gt,
2844 "cmpl %ebx,4(%esp)\n\t"
2845 "jg .Lgt_jump\n\t"
2846 "jne .Lgt_fallthru\n\t"
2847 "cmpl %eax,(%esp)\n\t"
2848 "jng .Lgt_fallthru\n\t"
2849 ".Lgt_jump:\n\t"
2850 "lea 0x8(%esp),%esp\n\t"
2851 "pop %eax\n\t"
2852 "pop %ebx\n\t"
2853 /* jmp, but don't trust the assembler to choose the right jump */
2854 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2855 ".Lgt_fallthru:\n\t"
2856 "lea 0x8(%esp),%esp\n\t"
2857 "pop %eax\n\t"
2858 "pop %ebx");
2859
2860 if (offset_p)
2861 *offset_p = 20;
2862 if (size_p)
2863 *size_p = 4;
2864 }
2865
2866 static void
2867 i386_emit_ge_goto (int *offset_p, int *size_p)
2868 {
2869 EMIT_ASM32 (ge,
2870 "cmpl %ebx,4(%esp)\n\t"
2871 "jge .Lge_jump\n\t"
2872 "jne .Lge_fallthru\n\t"
2873 "cmpl %eax,(%esp)\n\t"
2874 "jnge .Lge_fallthru\n\t"
2875 ".Lge_jump:\n\t"
2876 "lea 0x8(%esp),%esp\n\t"
2877 "pop %eax\n\t"
2878 "pop %ebx\n\t"
2879 /* jmp, but don't trust the assembler to choose the right jump */
2880 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2881 ".Lge_fallthru:\n\t"
2882 "lea 0x8(%esp),%esp\n\t"
2883 "pop %eax\n\t"
2884 "pop %ebx");
2885
2886 if (offset_p)
2887 *offset_p = 20;
2888 if (size_p)
2889 *size_p = 4;
2890 }
2891
2892 static emit_ops i386_emit_ops =
2893 {
2894 i386_emit_prologue,
2895 i386_emit_epilogue,
2896 i386_emit_add,
2897 i386_emit_sub,
2898 i386_emit_mul,
2899 i386_emit_lsh,
2900 i386_emit_rsh_signed,
2901 i386_emit_rsh_unsigned,
2902 i386_emit_ext,
2903 i386_emit_log_not,
2904 i386_emit_bit_and,
2905 i386_emit_bit_or,
2906 i386_emit_bit_xor,
2907 i386_emit_bit_not,
2908 i386_emit_equal,
2909 i386_emit_less_signed,
2910 i386_emit_less_unsigned,
2911 i386_emit_ref,
2912 i386_emit_if_goto,
2913 i386_emit_goto,
2914 i386_write_goto_address,
2915 i386_emit_const,
2916 i386_emit_call,
2917 i386_emit_reg,
2918 i386_emit_pop,
2919 i386_emit_stack_flush,
2920 i386_emit_zero_ext,
2921 i386_emit_swap,
2922 i386_emit_stack_adjust,
2923 i386_emit_int_call_1,
2924 i386_emit_void_call_2,
2925 i386_emit_eq_goto,
2926 i386_emit_ne_goto,
2927 i386_emit_lt_goto,
2928 i386_emit_le_goto,
2929 i386_emit_gt_goto,
2930 i386_emit_ge_goto
2931 };
2932
2933
2934 emit_ops *
2935 x86_target::emit_ops ()
2936 {
2937 #ifdef __x86_64__
2938 if (is_64bit_tdesc (current_thread))
2939 return &amd64_emit_ops;
2940 else
2941 #endif
2942 return &i386_emit_ops;
2943 }
2944
2945 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2946
2947 const gdb_byte *
2948 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2949 {
2950 *size = x86_breakpoint_len;
2951 return x86_breakpoint;
2952 }
2953
2954 bool
2955 x86_target::low_supports_range_stepping ()
2956 {
2957 return true;
2958 }
2959
2960 int
2961 x86_target::get_ipa_tdesc_idx ()
2962 {
2963 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2964 const struct target_desc *tdesc = regcache->tdesc;
2965
2966 #ifdef __x86_64__
2967 return amd64_get_ipa_tdesc_idx (tdesc);
2968 #endif
2969
2970 if (tdesc == tdesc_i386_linux_no_xml.get ())
2971 return X86_TDESC_SSE;
2972
2973 return i386_get_ipa_tdesc_idx (tdesc);
2974 }
2975
2976 /* The linux target ops object. */
2977
2978 linux_process_target *the_linux_target = &the_x86_target;
2979
2980 void
2981 initialize_low_arch (void)
2982 {
2983 /* Initialize the Linux target descriptions. */
2984 #ifdef __x86_64__
2985 tdesc_amd64_linux_no_xml = allocate_target_description ();
2986 copy_target_description (tdesc_amd64_linux_no_xml.get (),
2987 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2988 false));
2989 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2990 #endif
2991
2992 tdesc_i386_linux_no_xml = allocate_target_description ();
2993 copy_target_description (tdesc_i386_linux_no_xml.get (),
2994 i386_linux_read_description (X86_XSTATE_SSE_MASK));
2995 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2996
2997 initialize_regsets_info (&x86_regsets_info);
2998 }