--- /dev/null
+/* Copyright (C) 2010 Free Software Foundation, Inc.
+
+ This file is part of GDB.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+#include "defs.h"
+#include "ia64-tdep.h"
+#include "inferior.h"
+#include "inf-ttrace.h"
+#include "regcache.h"
+#include "solib-ia64-hpux.h"
+
+#include <ia64/sys/uregs.h>
+#include <sys/ttrace.h>
+
+/* The offsets used with ttrace to read the value of the raw registers. */
+
+static int u_offsets[] =
+{ /* Static General Registers. */
+ -1, __r1, __r2, __r3, __r4, __r5, __r6, __r7,
+ __r8, __r9, __r10, __r11, __r12, __r13, __r14, __r15,
+ __r16, __r17, __r18, __r19, __r20, __r21, __r22, __r23,
+ __r24, __r25, __r26, __r27, __r28, __r29, __r30, __r31,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+
+ /* Static Floating-Point Registers. */
+ -1, -1, __f2, __f3, __f4, __f5, __f6, __f7,
+ __f8, __f9, __f10, __f11, __f12, __f13, __f14, __f15,
+ __f16, __f17, __f18, __f19, __f20, __f21, __f22, __f23,
+ __f24, __f25, __f26, __f27, __f28, __f29, __f30, __f31,
+ __f32, __f33, __f34, __f35, __f36, __f37, __f38, __f39,
+ __f40, __f41, __f42, __f43, __f44, __f45, __f46, __f47,
+ __f48, __f49, __f50, __f51, __f52, __f53, __f54, __f55,
+ __f56, __f57, __f58, __f59, __f60, __f61, __f62, __f63,
+ __f64, __f65, __f66, __f67, __f68, __f69, __f70, __f71,
+ __f72, __f73, __f74, __f75, __f76, __f77, __f78, __f79,
+ __f80, __f81, __f82, __f83, __f84, __f85, __f86, __f87,
+ __f88, __f89, __f90, __f91, __f92, __f93, __f94, __f95,
+ __f96, __f97, __f98, __f99, __f100, __f101, __f102, __f103,
+ __f104, __f105, __f106, __f107, __f108, __f109, __f110, __f111,
+ __f112, __f113, __f114, __f115, __f116, __f117, __f118, __f119,
+ __f120, __f121, __f122, __f123, __f124, __f125, __f126, __f127,
+
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+
+ /* Branch Registers. */
+ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7,
+
+ /* Virtual frame pointer and virtual return address pointer. */
+ -1, -1,
+
+ /* Other registers. */
+ __pr, __ip, __cr_ipsr, __cfm,
+
+ /* Kernel registers. */
+ -1, -1, -1, -1,
+ -1, -1, -1, -1,
+
+ -1, -1, -1, -1, -1, -1, -1, -1,
+
+ /* Some application registers. */
+ __ar_rsc, __ar_bsp, __ar_bspstore, __ar_rnat,
+
+ -1,
+ -1, /* Not available: FCR, IA32 floating control register. */
+ -1, -1,
+
+ -1, /* Not available: EFLAG. */
+ -1, /* Not available: CSD. */
+ -1, /* Not available: SSD. */
+ -1, /* Not available: CFLG. */
+ -1, /* Not available: FSR. */
+ -1, /* Not available: FIR. */
+ -1, /* Not available: FDR. */
+ -1,
+ __ar_ccv, -1, -1, -1, __ar_unat, -1, -1, -1,
+ __ar_fpsr, -1, -1, -1,
+ -1, /* Not available: ITC. */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ __ar_pfs, __ar_lc, __ar_ec,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1
+ /* All following registers, starting with nat0, are handled as
+ pseudo registers, and hence are handled separately. */
+};
+
+/* Some register have a fixed value and can not be modified.
+ Store their value in static constant buffers that can be used
+ later to fill the register cache. */
+static const char r0_value[8] = {0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00};
+static const char f0_value[16] = {0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00};
+static const char f1_value[16] = {0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff,
+ 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00};
+
+/* The "to_wait" routine from the "inf-ttrace" layer. */
+
+static ptid_t (*super_to_wait) (struct target_ops *, ptid_t,
+ struct target_waitstatus *, int);
+
+/* The "to_wait" target_ops routine routine for ia64-hpux. */
+
+static ptid_t
+ia64_hpux_wait (struct target_ops *ops, ptid_t ptid,
+ struct target_waitstatus *ourstatus, int options)
+{
+ ptid_t new_ptid;
+
+ new_ptid = super_to_wait (ops, ptid, ourstatus, options);
+
+ /* If this is a DLD event (hard-coded breakpoint instruction
+ that was activated by the solib-ia64-hpux module), we need to
+ process it, and then resume the execution as if the event did
+ not happen. */
+ if (ourstatus->kind == TARGET_WAITKIND_STOPPED
+ && ourstatus->value.sig == TARGET_SIGNAL_TRAP
+ && ia64_hpux_at_dld_breakpoint_p (new_ptid))
+ {
+ ia64_hpux_handle_dld_breakpoint (new_ptid);
+
+ target_resume (new_ptid, 0, TARGET_SIGNAL_0);
+ ourstatus->kind = TARGET_WAITKIND_IGNORE;
+ }
+
+ return new_ptid;
+}
+
+/* Fetch the RNAT register and supply it to the REGCACHE. */
+
+static void
+ia64_hpux_fetch_rnat_register (struct regcache *regcache)
+{
+ CORE_ADDR addr;
+ gdb_byte buf[8];
+ int status;
+
+ /* The value of RNAT is stored at bsp|0x1f8, and must be read using
+ TT_LWP_RDRSEBS. */
+
+ regcache_raw_read_unsigned (regcache, IA64_BSP_REGNUM, &addr);
+ addr |= 0x1f8;
+
+ status = ttrace (TT_LWP_RDRSEBS, ptid_get_pid (inferior_ptid),
+ ptid_get_lwp (inferior_ptid), addr, sizeof (buf),
+ (uintptr_t) buf);
+ if (status < 0)
+ error (_("failed to read RNAT register at %s"),
+ paddress (get_regcache_arch(regcache), addr));
+
+ regcache_raw_supply (regcache, IA64_RNAT_REGNUM, buf);
+}
+
+/* Read the value of the register saved at OFFSET in the save_state_t
+ structure, and store its value in BUF. LEN is the size of the register
+ to be read. */
+
+static int
+ia64_hpux_read_register_from_save_state_t (int offset, gdb_byte *buf, int len)
+{
+ int status;
+
+ status = ttrace (TT_LWP_RUREGS, ptid_get_pid (inferior_ptid),
+ ptid_get_lwp (inferior_ptid), offset, len, (uintptr_t) buf);
+
+ return status;
+}
+
+/* Fetch register REGNUM from the inferior. */
+
+static void
+ia64_hpux_fetch_register (struct regcache *regcache, int regnum)
+{
+ struct gdbarch *gdbarch = get_regcache_arch (regcache);
+ int offset, len, status;
+ gdb_byte *buf;
+
+ if (regnum == IA64_GR0_REGNUM)
+ {
+ /* r0 is always 0. */
+ regcache_raw_supply (regcache, regnum, r0_value);
+ return;
+ }
+
+ if (regnum == IA64_FR0_REGNUM)
+ {
+ /* f0 is always 0.0. */
+ regcache_raw_supply (regcache, regnum, f0_value);
+ return;
+ }
+
+ if (regnum == IA64_FR1_REGNUM)
+ {
+ /* f1 is always 1.0. */
+ regcache_raw_supply (regcache, regnum, f1_value);
+ return;
+ }
+
+ if (regnum == IA64_RNAT_REGNUM)
+ {
+ ia64_hpux_fetch_rnat_register (regcache);
+ return;
+ }
+
+ /* Get the register location. If the register can not be fetched,
+ then return now. */
+ offset = u_offsets[regnum];
+ if (offset == -1)
+ return;
+
+ len = register_size (gdbarch, regnum);
+ buf = alloca (len * sizeof (gdb_byte));
+ status = ia64_hpux_read_register_from_save_state_t (offset, buf, len);
+ if (status < 0)
+ warning (_("Failed to read register value for %s.\n"),
+ gdbarch_register_name (gdbarch, regnum));
+
+ regcache_raw_supply (regcache, regnum, buf);
+}
+
+/* The "to_fetch_registers" target_ops routine for ia64-hpux. */
+
+static void
+ia64_hpux_fetch_registers (struct target_ops *ops,
+ struct regcache *regcache, int regnum)
+{
+ if (regnum == -1)
+ for (regnum = 0;
+ regnum < gdbarch_num_regs (get_regcache_arch (regcache));
+ regnum++)
+ ia64_hpux_fetch_register (regcache, regnum);
+ else
+ ia64_hpux_fetch_register (regcache, regnum);
+}
+
+/* Save register REGNUM (stored in BUF) in the save_state_t structure.
+ LEN is the size of the register in bytes.
+
+ Return the value from the corresponding ttrace call (a negative value
+ means that the operation failed). */
+
+static int
+ia64_hpux_write_register_to_saved_state_t (int offset, gdb_byte *buf, int len)
+{
+ return ttrace (TT_LWP_WUREGS, ptid_get_pid (inferior_ptid),
+ ptid_get_lwp (inferior_ptid), offset, len, (uintptr_t) buf);
+}
+
+/* Store register REGNUM into the inferior. */
+
+static void
+ia64_hpux_store_register (const struct regcache *regcache, int regnum)
+{
+ struct gdbarch *gdbarch = get_regcache_arch (regcache);
+ int offset = u_offsets[regnum];
+ gdb_byte *buf;
+ int len, status;
+
+ /* If the register can not be stored, then return now. */
+ if (offset == -1)
+ return;
+
+ /* I don't know how to store that register for now. So just ignore any
+ request to store it, to avoid an internal error. */
+ if (regnum == IA64_PSR_REGNUM)
+ return;
+
+ len = register_size (gdbarch, regnum);
+ buf = alloca (len * sizeof (gdb_byte));
+ regcache_raw_collect (regcache, regnum, buf);
+
+ status = ia64_hpux_write_register_to_saved_state_t (offset, buf, len);
+
+ if (status < 0)
+ error (_("failed to write register value for %s.\n"),
+ gdbarch_register_name (gdbarch, regnum));
+}
+
+/* The "to_store_registers" target_ops routine for ia64-hpux. */
+
+static void
+ia64_hpux_store_registers (struct target_ops *ops,
+ struct regcache *regcache, int regnum)
+{
+ if (regnum == -1)
+ for (regnum = 0;
+ regnum < gdbarch_num_regs (get_regcache_arch (regcache));
+ regnum++)
+ ia64_hpux_store_register (regcache, regnum);
+ else
+ ia64_hpux_store_register (regcache, regnum);
+}
+
+/* The "xfer_partial" routine from the "inf-ttrace" target layer.
+ Ideally, we would like to use this routine for all transfer
+ requests, but this platforms has a lot of special cases that
+ need to be handled manually. So we override this routine and
+ delegate back if we detect that we are not in a special case. */
+
+static LONGEST (*super_xfer_partial) (struct target_ops *, enum target_object,
+ const char *, gdb_byte *,
+ const gdb_byte *, ULONGEST, LONGEST);
+
+/* The "xfer_partial" routine for a memory region that is completely
+ outside of the backing-store region. */
+
+static LONGEST
+ia64_hpux_xfer_memory_no_bs (struct target_ops *ops, const char *annex,
+ gdb_byte *readbuf, const gdb_byte *writebuf,
+ CORE_ADDR addr, LONGEST len)
+{
+ /* Memory writes need to be aligned on 16byte boundaries, at least
+ when writing in the text section. On the other hand, the size
+ of the buffer does not need to be a multiple of 16bytes.
+
+ No such restriction when performing memory reads. */
+
+ if (writebuf && addr & 0x0f)
+ {
+ const CORE_ADDR aligned_addr = addr & ~0x0f;
+ const int aligned_len = len + (addr - aligned_addr);
+ gdb_byte *aligned_buf = alloca (aligned_len * sizeof (gdb_byte));
+ LONGEST status;
+
+ /* Read the portion of memory between ALIGNED_ADDR and ADDR, so
+ that we can write it back during our aligned memory write. */
+ status = super_xfer_partial (ops, TARGET_OBJECT_MEMORY, annex,
+ aligned_buf /* read */,
+ NULL /* write */,
+ aligned_addr, addr - aligned_addr);
+ if (status <= 0)
+ return 0;
+ memcpy (aligned_buf + (addr - aligned_addr), writebuf, len);
+
+ return super_xfer_partial (ops, TARGET_OBJECT_MEMORY, annex,
+ NULL /* read */, aligned_buf /* write */,
+ aligned_addr, aligned_len);
+ }
+ else
+ /* Memory read or properly aligned memory write. */
+ return super_xfer_partial (ops, TARGET_OBJECT_MEMORY, annex, readbuf,
+ writebuf, addr, len);
+}
+
+/* Read LEN bytes at ADDR from memory, and store it in BUF. This memory
+ region is assumed to be inside the backing store.
+
+ Return zero if the operation failed. */
+
+static int
+ia64_hpux_read_memory_bs (gdb_byte *buf, CORE_ADDR addr, int len)
+{
+ gdb_byte tmp_buf[8];
+ CORE_ADDR tmp_addr = addr & ~0x7;
+
+ while (tmp_addr < addr + len)
+ {
+ int status;
+ int skip_lo = 0;
+ int skip_hi = 0;
+
+ status = ttrace (TT_LWP_RDRSEBS, ptid_get_pid (inferior_ptid),
+ ptid_get_lwp (inferior_ptid), tmp_addr,
+ sizeof (tmp_buf), (uintptr_t) tmp_buf);
+ if (status < 0)
+ return 0;
+
+ if (tmp_addr < addr)
+ skip_lo = addr - tmp_addr;
+
+ if (tmp_addr + sizeof (tmp_buf) > addr + len)
+ skip_hi = (tmp_addr + sizeof (tmp_buf)) - (addr + len);
+
+ memcpy (buf + (tmp_addr + skip_lo - addr),
+ tmp_buf + skip_lo,
+ sizeof (tmp_buf) - skip_lo - skip_hi);
+
+ tmp_addr += sizeof (tmp_buf);
+ }
+
+ return 1;
+}
+
+/* Write LEN bytes from BUF in memory at ADDR. This memory region is assumed
+ to be inside the backing store.
+
+ Return zero if the operation failed. */
+
+static int
+ia64_hpux_write_memory_bs (const gdb_byte *buf, CORE_ADDR addr, int len)
+{
+ gdb_byte tmp_buf[8];
+ CORE_ADDR tmp_addr = addr & ~0x7;
+
+ while (tmp_addr < addr + len)
+ {
+ int status;
+ int lo = 0;
+ int hi = 7;
+
+ if (tmp_addr < addr || tmp_addr + sizeof (tmp_buf) > addr + len)
+ /* Part of the 8byte region pointed by tmp_addr needs to be preserved.
+ So read it in before we copy the data that needs to be changed. */
+ if (!ia64_hpux_read_memory_bs (tmp_buf, tmp_addr, sizeof (tmp_buf)))
+ return 0;
+
+ if (tmp_addr < addr)
+ lo = addr - tmp_addr;
+
+ if (tmp_addr + sizeof (tmp_buf) > addr + len)
+ hi = addr - tmp_addr + len - 1;
+
+ memcpy (tmp_buf + lo, buf + tmp_addr - addr + lo, hi - lo + 1);
+
+ status = ttrace (TT_LWP_WRRSEBS, ptid_get_pid (inferior_ptid),
+ ptid_get_lwp (inferior_ptid), tmp_addr,
+ sizeof (tmp_buf), (uintptr_t) tmp_buf);
+ if (status < 0)
+ return 0;
+
+ tmp_addr += sizeof (tmp_buf);
+ }
+
+ return 1;
+}
+
+/* The "xfer_partial" routine for a memory region that is completely
+ inside of the backing-store region. */
+
+static LONGEST
+ia64_hpux_xfer_memory_bs (struct target_ops *ops, const char *annex,
+ gdb_byte *readbuf, const gdb_byte *writebuf,
+ CORE_ADDR addr, LONGEST len)
+{
+ int success;
+
+ if (readbuf)
+ success = ia64_hpux_read_memory_bs (readbuf, addr, len);
+ else
+ success = ia64_hpux_write_memory_bs (writebuf, addr, len);
+
+ if (success)
+ return len;
+ else
+ return 0;
+}
+
+/* The "xfer_partial" target_ops routine for ia64-hpux, in the case
+ where the requested object is TARGET_OBJECT_MEMORY. */
+
+static LONGEST
+ia64_hpux_xfer_memory (struct target_ops *ops, const char *annex,
+ gdb_byte *readbuf, const gdb_byte *writebuf,
+ CORE_ADDR addr, LONGEST len)
+{
+ CORE_ADDR bsp, bspstore;
+ CORE_ADDR start_addr, short_len;
+ int status = 0;
+
+ /* The back-store region cannot be read/written by the standard memory
+ read/write operations. So we handle the memory region piecemeal:
+ (1) and (2) The regions before and after the backing-store region,
+ which can be treated as normal memory;
+ (3) The region inside the backing-store, which needs to be
+ read/written specially. */
+
+ regcache_raw_read_unsigned (get_current_regcache (), IA64_BSP_REGNUM, &bsp);
+ regcache_raw_read_unsigned (get_current_regcache (), IA64_BSPSTORE_REGNUM,
+ &bspstore);
+
+ /* 1. Memory region before BSPSTORE. */
+
+ if (addr < bspstore)
+ {
+ short_len = len;
+ if (addr + len > bspstore)
+ short_len = bspstore - addr;
+
+ status = ia64_hpux_xfer_memory_no_bs (ops, annex, readbuf, writebuf,
+ addr, short_len);
+ if (status <= 0)
+ return 0;
+ }
+
+ /* 2. Memory region after BSP. */
+
+ if (addr + len > bsp)
+ {
+ start_addr = addr;
+ if (start_addr < bsp)
+ start_addr = bsp;
+ short_len = len + addr - start_addr;
+
+ status = ia64_hpux_xfer_memory_no_bs
+ (ops, annex,
+ readbuf ? readbuf + (start_addr - addr) : NULL,
+ writebuf ? writebuf + (start_addr - addr) : NULL,
+ start_addr, short_len);
+ if (status <= 0)
+ return 0;
+ }
+
+ /* 3. Memory region between BSPSTORE and BSP. */
+
+ if (bspstore != bsp
+ && ((addr < bspstore && addr + len > bspstore)
+ || (addr + len <= bsp && addr + len > bsp)))
+ {
+ start_addr = addr;
+ if (addr < bspstore)
+ start_addr = bspstore;
+ short_len = len + addr - start_addr;
+
+ if (start_addr + short_len > bsp)
+ short_len = bsp - start_addr;
+
+ gdb_assert (short_len > 0);
+
+ status = ia64_hpux_xfer_memory_bs
+ (ops, annex,
+ readbuf ? readbuf + (start_addr - addr) : NULL,
+ writebuf ? writebuf + (start_addr - addr) : NULL,
+ start_addr, short_len);
+ if (status < 0)
+ return 0;
+ }
+
+ return len;
+}
+
+/* The "to_xfer_partial" target_ops routine for ia64-hpux. */
+
+static LONGEST
+ia64_hpux_xfer_partial (struct target_ops *ops, enum target_object object,
+ const char *annex, gdb_byte *readbuf,
+ const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
+{
+ LONGEST val;
+
+ if (object == TARGET_OBJECT_MEMORY)
+ val = ia64_hpux_xfer_memory (ops, annex, readbuf, writebuf, offset, len);
+ else
+ val = super_xfer_partial (ops, object, annex, readbuf, writebuf, offset,
+ len);
+
+ return val;
+}
+
+/* The "to_can_use_hw_breakpoint" target_ops routine for ia64-hpux. */
+
+static int
+ia64_hpux_can_use_hw_breakpoint (int type, int cnt, int othertype)
+{
+ /* No hardware watchpoint/breakpoint support yet. */
+ return 0;
+}
+
+/* The "to_mourn_inferior" routine from the "inf-ttrace" target_ops layer. */
+
+static void (*super_mourn_inferior) (struct target_ops *);
+
+/* The "to_mourn_inferior" target_ops routine for ia64-hpux. */
+
+static void
+ia64_hpux_mourn_inferior (struct target_ops *ops)
+{
+ const int pid = ptid_get_pid (inferior_ptid);
+ int status;
+
+ super_mourn_inferior (ops);
+
+ /* On this platform, the process still exists even after we received
+ an exit event. Detaching from the process isn't sufficient either,
+ as it only turns the process into a zombie. So the only solution
+ we found is to kill it. */
+ ttrace (TT_PROC_EXIT, pid, 0, 0, 0, 0);
+ wait (&status);
+}
+
+/* Prevent warning from -Wmissing-prototypes. */
+void _initialize_hppa_hpux_nat (void);
+
+void
+_initialize_hppa_hpux_nat (void)
+{
+ struct target_ops *t;
+
+ t = inf_ttrace_target ();
+ super_to_wait = t->to_wait;
+ super_xfer_partial = t->to_xfer_partial;
+ super_mourn_inferior = t->to_mourn_inferior;
+
+ t->to_wait = ia64_hpux_wait;
+ t->to_fetch_registers = ia64_hpux_fetch_registers;
+ t->to_store_registers = ia64_hpux_store_registers;
+ t->to_xfer_partial = ia64_hpux_xfer_partial;
+ t->to_can_use_hw_breakpoint = ia64_hpux_can_use_hw_breakpoint;
+ t->to_mourn_inferior = ia64_hpux_mourn_inferior;
+ t->to_attach_no_wait = 1;
+
+ add_target (t);
+}
--- /dev/null
+/* Copyright (C) 2010 Free Software Foundation, Inc.
+
+ This file is part of GDB.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+#include "defs.h"
+#include "ia64-tdep.h"
+#include "ia64-hpux-tdep.h"
+#include "solib-ia64-hpux.h"
+#include "solist.h"
+#include "solib.h"
+#include "target.h"
+#include "gdbtypes.h"
+#include "inferior.h"
+#include "gdbcore.h"
+#include "regcache.h"
+#include "opcode/ia64.h"
+#include "symfile.h"
+#include "objfiles.h"
+#include "elf-bfd.h"
+#include "exceptions.h"
+
+/* Need to define the following macro in order to get the complete
+ load_module_desc struct definition in dlfcn.h Otherwise, it doesn't
+ match the size of the struct the loader is providing us during load
+ events. */
+#define _LOAD_MODULE_DESC_EXT
+
+#include <sys/ttrace.h>
+#include <dlfcn.h>
+#include <elf.h>
+#include <service_mgr.h>
+
+/* The following is to have access to the definition of type load_info_t. */
+#include <crt0.h>
+
+/* The r32 pseudo-register number.
+
+ Like all stacked registers, r32 is treated as a pseudo-register,
+ because it is not always available for read/write via the ttrace
+ interface. */
+/* This is a bit of a hack, as we duplicate something hidden inside
+ ia64-tdep.c, but oh well... */
+#define IA64_R32_PSEUDO_REGNUM (IA64_NAT127_REGNUM + 2)
+
+/* Our struct so_list private data structure. */
+
+struct lm_info
+{
+ /* The shared library module descriptor. We extract this structure
+ from the loader at the time the shared library gets mapped. */
+ struct load_module_desc module_desc;
+
+ /* The text segment address as defined in the shared library object
+ (this is not the address where this segment got loaded). This
+ field is initially set to zero, and computed lazily. */
+ CORE_ADDR text_start;
+
+ /* The data segment address as defined in the shared library object
+ (this is not the address where this segment got loaded). This
+ field is initially set to zero, and computed lazily. */
+ CORE_ADDR data_start;
+};
+
+/* The list of shared libraries currently mapped by the inferior. */
+
+static struct so_list *so_list_head = NULL;
+
+/* Create a new so_list element. The result should be deallocated
+ when no longer in use. */
+
+static struct so_list *
+new_so_list (char *so_name, struct load_module_desc module_desc)
+{
+ struct so_list *new_so;
+
+ new_so = (struct so_list *) XZALLOC (struct so_list);
+ new_so->lm_info = (struct lm_info *) XZALLOC (struct lm_info);
+ new_so->lm_info->module_desc = module_desc;
+
+ strncpy (new_so->so_name, so_name, SO_NAME_MAX_PATH_SIZE - 1);
+ new_so->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
+ strcpy (new_so->so_original_name, new_so->so_name);
+
+ return new_so;
+}
+
+/* Return non-zero if the instruction at the current PC is a breakpoint
+ part of the dynamic loading process.
+
+ We identify such instructions by checking that the instruction at
+ the current pc is a break insn where no software breakpoint has been
+ inserted by us. We also verify that the operands have specific
+ known values, to be extra certain.
+
+ PTID is the ptid of the thread that should be checked, but this
+ function also assumes that inferior_ptid is already equal to PTID.
+ Ideally, we would like to avoid the requirement on inferior_ptid,
+ but many routines still use the inferior_ptid global to access
+ the relevant thread's register and memory. We still have the ptid
+ as parameter to be able to pass it to the routines that do take a ptid
+ - that way we avoid increasing explicit uses of the inferior_ptid
+ global. */
+
+static int
+ia64_hpux_at_dld_breakpoint_1_p (ptid_t ptid)
+{
+ struct regcache *regcache = get_thread_regcache (ptid);
+ CORE_ADDR pc = regcache_read_pc (regcache);
+ struct address_space *aspace = get_regcache_aspace (regcache);
+ ia64_insn t0, t1, slot[3], template, insn;
+ int slotnum;
+ bfd_byte bundle[16];
+
+ /* If this is a regular breakpoint, then it can not be a dld one. */
+ if (breakpoint_inserted_here_p (aspace, pc))
+ return 0;
+
+ slotnum = ((long) pc) & 0xf;
+ if (slotnum > 2)
+ internal_error (__FILE__, __LINE__,
+ "invalid slot (%d) for address %s", slotnum,
+ paddress (get_regcache_arch (regcache), pc));
+
+ pc -= (pc & 0xf);
+ read_memory (pc, bundle, sizeof (bundle));
+
+ /* bundles are always in little-endian byte order */
+ t0 = bfd_getl64 (bundle);
+ t1 = bfd_getl64 (bundle + 8);
+ template = (t0 >> 1) & 0xf;
+ slot[0] = (t0 >> 5) & 0x1ffffffffffLL;
+ slot[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
+ slot[2] = (t1 >> 23) & 0x1ffffffffffLL;
+
+ if (template == 2 && slotnum == 1)
+ {
+ /* skip L slot in MLI template: */
+ slotnum = 2;
+ }
+
+ insn = slot[slotnum];
+
+ return (insn == 0x1c0c9c0 /* break.i 0x070327 */
+ || insn == 0x3c0c9c0); /* break.i 0x0f0327 */
+}
+
+/* Same as ia64_hpux_at_dld_breakpoint_1_p above, with the following
+ differences: It temporarily sets inferior_ptid to PTID, and also
+ contains any exception being raised. */
+
+int
+ia64_hpux_at_dld_breakpoint_p (ptid_t ptid)
+{
+ struct gdb_exception e;
+ ptid_t saved_ptid = inferior_ptid;
+ int result = 0;
+
+ inferior_ptid = ptid;
+ TRY_CATCH (e, RETURN_MASK_ALL)
+ {
+ result = ia64_hpux_at_dld_breakpoint_1_p (ptid);
+ }
+ inferior_ptid = saved_ptid;
+ if (e.reason < 0)
+ warning (_("error while checking for dld breakpoint: %s"), e.message);
+
+ return result;
+}
+
+/* Handler for library load event: Read the information provided by
+ the loader, and then use it to read the shared library symbols. */
+
+static void
+ia64_hpux_handle_load_event (struct regcache *regcache)
+{
+ CORE_ADDR module_desc_addr;
+ ULONGEST module_desc_size;
+ CORE_ADDR so_path_addr;
+ char so_path[MAXPATHLEN];
+ struct load_module_desc module_desc;
+ struct so_list *new_so;
+
+ /* Extract the data provided by the loader as follow:
+ - r33: Address of load_module_desc structure
+ - r34: size of struct load_module_desc
+ - r35: Address of string holding shared library path
+ */
+ regcache_cooked_read_unsigned (regcache, IA64_R32_PSEUDO_REGNUM + 1,
+ &module_desc_addr);
+ regcache_cooked_read_unsigned (regcache, IA64_R32_PSEUDO_REGNUM + 2,
+ &module_desc_size);
+ regcache_cooked_read_unsigned (regcache, IA64_R32_PSEUDO_REGNUM + 3,
+ &so_path_addr);
+
+ if (module_desc_size != sizeof (struct load_module_desc))
+ warning (_("load_module_desc size (%ld) != size returned by kernel (%s)"),
+ sizeof (struct load_module_desc),
+ pulongest (module_desc_size));
+
+ read_memory_string (so_path_addr, so_path, MAXPATHLEN);
+ read_memory (module_desc_addr, (gdb_byte *) &module_desc,
+ sizeof (module_desc));
+
+ /* Create a new so_list element and insert it at the start of our
+ so_list_head (we insert at the start of the list only because
+ it is less work compared to inserting it elsewhere). */
+ new_so = new_so_list (so_path, module_desc);
+ new_so->next = so_list_head;
+ so_list_head = new_so;
+}
+
+/* Update the value of the PC to point to the begining of the next
+ instruction bundle. */
+
+static void
+ia64_hpux_move_pc_to_next_bundle (struct regcache *regcache)
+{
+ CORE_ADDR pc = regcache_read_pc (regcache);
+
+ pc -= pc & 0xf;
+ pc += 16;
+ ia64_write_pc (regcache, pc);
+}
+
+/* Handle loader events.
+
+ PTID is the ptid of the thread corresponding to the event being
+ handled. Similarly to ia64_hpux_at_dld_breakpoint_1_p, this
+ function assumes that inferior_ptid is set to PTID. */
+
+static void
+ia64_hpux_handle_dld_breakpoint_1 (ptid_t ptid)
+{
+ struct regcache *regcache = get_thread_regcache (ptid);
+ ULONGEST arg0;
+
+ /* The type of event is provided by the loaded via r32. */
+ regcache_cooked_read_unsigned (regcache, IA64_R32_PSEUDO_REGNUM, &arg0);
+ switch (arg0)
+ {
+ case BREAK_DE_SVC_LOADED:
+ /* Currently, the only service loads are uld and dld,
+ so we shouldn't need to do anything. Just ignore. */
+ break;
+ case BREAK_DE_LIB_LOADED:
+ ia64_hpux_handle_load_event (regcache);
+ solib_add (NULL, 0, ¤t_target, auto_solib_add);
+ break;
+ case BREAK_DE_LIB_UNLOADED:
+ case BREAK_DE_LOAD_COMPLETE:
+ case BREAK_DE_BOR:
+ /* Ignore for now. */
+ break;
+ }
+
+ /* Now that we have handled the event, we can move the PC to
+ the next instruction bundle, past the break instruction. */
+ ia64_hpux_move_pc_to_next_bundle (regcache);
+}
+
+/* Same as ia64_hpux_handle_dld_breakpoint_1 above, with the following
+ differences: This function temporarily sets inferior_ptid to PTID,
+ and also contains any exception. */
+
+void
+ia64_hpux_handle_dld_breakpoint (ptid_t ptid)
+{
+ struct gdb_exception e;
+ ptid_t saved_ptid = inferior_ptid;
+
+ inferior_ptid = ptid;
+ TRY_CATCH (e, RETURN_MASK_ALL)
+ {
+ ia64_hpux_handle_dld_breakpoint_1 (ptid);
+ }
+ inferior_ptid = saved_ptid;
+ if (e.reason < 0)
+ warning (_("error detected while handling dld breakpoint: %s"), e.message);
+}
+
+/* Find the address of the code and data segments in ABFD, and update
+ TEXT_START and DATA_START accordingly. */
+
+static void
+ia64_hpux_find_start_vma (bfd *abfd, CORE_ADDR *text_start,
+ CORE_ADDR *data_start)
+{
+ Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
+ Elf64_Phdr phdr;
+ int i;
+
+ *text_start = 0;
+ *data_start = 0;
+
+ if (bfd_seek (abfd, i_ehdrp->e_phoff, SEEK_SET) == -1)
+ error (_("invalid program header offset in %s"), abfd->filename);
+
+ for (i = 0; i < i_ehdrp->e_phnum; i++)
+ {
+ if (bfd_bread ((PTR) & phdr, sizeof (phdr), abfd) != sizeof (phdr))
+ error (_("failed to read segment %d in %s"), i, abfd->filename);
+
+ if (phdr.p_flags & PF_X
+ && (*text_start == 0 || phdr.p_vaddr < *text_start))
+ *text_start = phdr.p_vaddr;
+
+ if (phdr.p_flags & PF_W
+ && (*data_start == 0 || phdr.p_vaddr < *data_start))
+ *data_start = phdr.p_vaddr;
+ }
+}
+
+/* The "relocate_section_addresses" target_so_ops routine for ia64-hpux. */
+
+static void
+ia64_hpux_relocate_section_addresses (struct so_list *so,
+ struct target_section *sec)
+{
+ CORE_ADDR offset = 0;
+
+ /* If we haven't computed the text & data segment addresses, do so now.
+ We do this here, because we now have direct access to the associated
+ bfd, whereas we would have had to open our own if we wanted to do it
+ while processing the library-load event. */
+ if (so->lm_info->text_start == 0 && so->lm_info->data_start == 0)
+ ia64_hpux_find_start_vma (sec->bfd, &so->lm_info->text_start,
+ &so->lm_info->data_start);
+
+ /* Determine the relocation offset based on which segment
+ the section belongs to. */
+ if ((so->lm_info->text_start < so->lm_info->data_start
+ && sec->addr < so->lm_info->data_start)
+ || (so->lm_info->text_start > so->lm_info->data_start
+ && sec->addr >= so->lm_info->text_start))
+ offset = so->lm_info->module_desc.text_base - so->lm_info->text_start;
+ else if ((so->lm_info->text_start < so->lm_info->data_start
+ && sec->addr >= so->lm_info->data_start)
+ || (so->lm_info->text_start > so->lm_info->data_start
+ && sec->addr < so->lm_info->text_start))
+ offset = so->lm_info->module_desc.data_base - so->lm_info->data_start;
+
+ /* And now apply the relocation. */
+ sec->addr += offset;
+ sec->endaddr += offset;
+
+ /* Best effort to set addr_high/addr_low. This is used only by
+ 'info sharedlibrary'. */
+ if (so->addr_low == 0 || sec->addr < so->addr_low)
+ so->addr_low = sec->addr;
+
+ if (so->addr_high == 0 || sec->endaddr > so->addr_high)
+ so->addr_high = sec->endaddr;
+}
+
+/* The "free_so" target_so_ops routine for ia64-hpux. */
+
+static void
+ia64_hpux_free_so (struct so_list *so)
+{
+ xfree (so->lm_info);
+}
+
+/* The "clear_solib" target_so_ops routine for ia64-hpux. */
+
+static void
+ia64_hpux_clear_solib (void)
+{
+ struct so_list *so;
+
+ while (so_list_head != NULL)
+ {
+ so = so_list_head;
+ so_list_head = so_list_head->next;
+
+ ia64_hpux_free_so (so);
+ xfree (so);
+ }
+}
+
+/* Assuming the inferior just stopped on an EXEC event, return
+ the address of the load_info_t structure. */
+
+static CORE_ADDR
+ia64_hpux_get_load_info_addr (void)
+{
+ struct type *data_ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
+ CORE_ADDR addr;
+ int status;
+
+ /* The address of the load_info_t structure is stored in the 4th
+ argument passed to the initial thread of the process (in other
+ words, in argv[3]). So get the address of these arguments,
+ and extract the 4th one. */
+ status = ttrace (TT_PROC_GET_ARGS, ptid_get_pid (inferior_ptid),
+ 0, (uintptr_t) &addr, sizeof (CORE_ADDR), 0);
+ if (status == -1 && errno)
+ perror_with_name (_("Unable to get argument list"));
+ return (read_memory_typed_address (addr + 3 * 8, data_ptr_type));
+}
+
+/* A structure used to aggregate some information extracted from
+ the dynamic section of the main executable. */
+
+struct dld_info
+{
+ long long dld_flags;
+ CORE_ADDR load_map;
+};
+
+/* Scan the ".dynamic" section referenced by ABFD and DYN_SECT,
+ and extract the information needed to fill in INFO. */
+
+static void
+ia64_hpux_read_dynamic_info (struct gdbarch *gdbarch, bfd *abfd,
+ asection *dyn_sect, struct dld_info *info)
+{
+ int sect_size;
+ char *buf;
+ char *buf_end;
+
+ /* Make sure that info always has initialized data, even if we fail
+ to read the syn_sect section. */
+ memset (info, 0, sizeof (struct dld_info));
+
+ sect_size = bfd_section_size (abfd, dyn_sect);
+ buf = alloca (sect_size);
+ buf_end = buf + sect_size;
+
+ if (bfd_seek (abfd, dyn_sect->filepos, SEEK_SET) != 0
+ || bfd_bread (buf, sect_size, abfd) != sect_size)
+ error (_("failed to read contents of .dynamic section"));
+
+ for (; buf < buf_end; buf += sizeof (Elf64_Dyn))
+ {
+ Elf64_Dyn *dynp = (Elf64_Dyn *) buf;
+ Elf64_Sxword d_tag;
+
+ d_tag = bfd_h_get_64 (abfd, &dynp->d_tag);
+ switch (d_tag)
+ {
+ case DT_HP_DLD_FLAGS:
+ info->dld_flags = bfd_h_get_64 (abfd, &dynp->d_un);
+ break;
+
+ case DT_HP_LOAD_MAP:
+ {
+ CORE_ADDR load_map_addr = bfd_h_get_64 (abfd, &dynp->d_un.d_ptr);
+
+ if (target_read_memory (load_map_addr, (char *) &info->load_map,
+ sizeof (info->load_map)) != 0)
+ error (_("failed to read load map at %s"),
+ paddress (gdbarch, load_map_addr));
+ }
+ break;
+ }
+ }
+}
+
+/* Wrapper around target_read_memory used with libdl. */
+
+static void *
+ia64_hpux_read_tgt_mem (void *buffer, uint64_t ptr, size_t bufsiz, int ident)
+{
+ if (target_read_memory (ptr, (gdb_byte *) buffer, bufsiz) != 0)
+ return 0;
+ else
+ return buffer;
+}
+
+/* Create a new so_list object for a shared library, and store that
+ new so_list object in our SO_LIST_HEAD list.
+
+ SO_INDEX is an index specifying the placement of the loaded shared
+ library in the dynamic loader's search list. Normally, this index
+ is strictly positive, but an index of -1 refers to the loader itself.
+
+ Return nonzero if the so_list object could be created. A null
+ return value with a positive SO_INDEX normally means that there are
+ no more entries in the dynamic loader's search list at SO_INDEX or
+ beyond. */
+
+static int
+ia64_hpux_add_so_from_dld_info (struct dld_info info, int so_index)
+{
+ struct load_module_desc module_desc;
+ uint64_t so_handle;
+ char *so_path;
+ struct so_list *so;
+
+ so_handle = dlgetmodinfo (so_index, &module_desc, sizeof (module_desc),
+ ia64_hpux_read_tgt_mem, 0, info.load_map);
+
+ if (so_handle == 0)
+ /* No such entry. We probably reached the end of the list. */
+ return 0;
+
+ so_path = dlgetname (&module_desc, sizeof (module_desc),
+ ia64_hpux_read_tgt_mem, 0, info.load_map);
+ if (so_path == NULL)
+ {
+ /* Should never happen, but let's not crash if it does. */
+ warning (_("unable to get shared library name, symbols not loaded"));
+ return 0;
+ }
+
+ /* Create a new so_list and insert it at the start of our list.
+ The order is not extremely important, but it's less work to do so
+ at the end of the list. */
+ so = new_so_list (so_path, module_desc);
+ so->next = so_list_head;
+ so_list_head = so;
+
+ return 1;
+}
+
+/* Assuming we just attached to a process, update our list of shared
+ libraries (SO_LIST_HEAD) as well as GDB's list. */
+
+static void
+ia64_hpux_solib_add_after_attach (void)
+{
+ bfd *abfd;
+ asection *dyn_sect;
+ struct dld_info info;
+ int i;
+
+ if (symfile_objfile == NULL)
+ return;
+
+ abfd = symfile_objfile->obfd;
+ dyn_sect = bfd_get_section_by_name (abfd, ".dynamic");
+
+ if (dyn_sect == NULL || bfd_section_size (abfd, dyn_sect) == 0)
+ return;
+
+ ia64_hpux_read_dynamic_info (get_objfile_arch (symfile_objfile), abfd,
+ dyn_sect, &info);
+
+ if ((info.dld_flags & DT_HP_DEBUG_PRIVATE) == 0)
+ {
+ warning (_(
+"The shared libraries were not privately mapped; setting a breakpoint\n\
+in a shared library will not work until you rerun the program.\n\
+Use the following command to enable debugging of shared libraries.\n\
+chatr +dbg enable a.out"));
+ }
+
+ /* Read the symbols of the dynamic loader (dld.so). */
+ ia64_hpux_add_so_from_dld_info (info, -1);
+
+ /* Read the symbols of all the other shared libraries. */
+ for (i = 1; ; i++)
+ if (!ia64_hpux_add_so_from_dld_info (info, i))
+ break; /* End of list. */
+
+ /* Resync the library list at the core level. */
+ solib_add (NULL, 1, ¤t_target, auto_solib_add);
+}
+
+/* The "create_inferior_hook" target_so_ops routine for ia64-hpux. */
+
+static void
+ia64_hpux_solib_create_inferior_hook (int from_tty)
+{
+ CORE_ADDR load_info_addr;
+ load_info_t load_info;
+
+ /* Initially, we were thinking about adding a check that the program
+ (accessible through symfile_objfile) was linked against some shared
+ libraries, by searching for a ".dynamic" section. However, could
+ this break in the case of a statically linked program that later
+ uses dlopen? Programs that are fully statically linked are very
+ rare, and we will worry about them when we encounter one that
+ causes trouble. */
+
+ /* Set the LI_TRACE flag in the load_info_t structure. This enables
+ notifications when shared libraries are being mapped. */
+ load_info_addr = ia64_hpux_get_load_info_addr ();
+ read_memory (load_info_addr, (gdb_byte *) &load_info, sizeof (load_info));
+ load_info.li_flags |= LI_TRACE;
+ write_memory (load_info_addr, (gdb_byte *) &load_info, sizeof (load_info));
+
+ /* If we just attached to our process, some shard libraries have
+ already been mapped. Find which ones they are... */
+ if (current_inferior ()->attach_flag)
+ ia64_hpux_solib_add_after_attach ();
+}
+
+/* The "special_symbol_handling" target_so_ops routine for ia64-hpux. */
+
+static void
+ia64_hpux_special_symbol_handling (void)
+{
+ /* Nothing to do. */
+}
+
+/* The "current_sos" target_so_ops routine for ia64-hpux. */
+
+static struct so_list *
+ia64_hpux_current_sos (void)
+{
+ /* Return a deep copy of our own list. */
+ struct so_list *new_head = NULL, *prev_new_so = NULL;
+ struct so_list *our_so;
+
+ for (our_so = so_list_head; our_so != NULL; our_so = our_so->next)
+ {
+ struct so_list *new_so;
+
+ new_so = new_so_list (our_so->so_name, our_so->lm_info->module_desc);
+ if (prev_new_so != NULL)
+ prev_new_so->next = new_so;
+ prev_new_so = new_so;
+ if (new_head == NULL)
+ new_head = new_so;
+ }
+
+ return new_head;
+}
+
+/* The "open_symbol_file_object" target_so_ops routine for ia64-hpux. */
+
+static int
+ia64_hpux_open_symbol_file_object (void *from_ttyp)
+{
+ return 0;
+}
+
+/* The "in_dynsym_resolve_code" target_so_ops routine for ia64-hpux. */
+
+static int
+ia64_hpux_in_dynsym_resolve_code (CORE_ADDR pc)
+{
+ return 0;
+}
+
+/* If FADDR is the address of a function inside one of the shared
+ libraries, return the shared library linkage address. */
+
+CORE_ADDR
+ia64_hpux_get_solib_linkage_addr (CORE_ADDR faddr)
+{
+ struct so_list *so = so_list_head;
+
+ while (so != NULL)
+ {
+ struct load_module_desc module_desc = so->lm_info->module_desc;
+
+ if (module_desc.text_base <= faddr
+ && (module_desc.text_base + module_desc.text_size) > faddr)
+ return module_desc.linkage_ptr;
+
+ so = so->next;
+ }
+
+ return 0;
+}
+
+/* Create a new target_so_ops structure suitable for ia64-hpux, and
+ return its address. */
+
+static struct target_so_ops *
+ia64_hpux_target_so_ops (void)
+{
+ struct target_so_ops *ops = XZALLOC (struct target_so_ops);
+
+ ops->relocate_section_addresses = ia64_hpux_relocate_section_addresses;
+ ops->free_so = ia64_hpux_free_so;
+ ops->clear_solib = ia64_hpux_clear_solib;
+ ops->solib_create_inferior_hook = ia64_hpux_solib_create_inferior_hook;
+ ops->special_symbol_handling = ia64_hpux_special_symbol_handling;
+ ops->current_sos = ia64_hpux_current_sos;
+ ops->open_symbol_file_object = ia64_hpux_open_symbol_file_object;
+ ops->in_dynsym_resolve_code = ia64_hpux_in_dynsym_resolve_code;
+ ops->bfd_open = solib_bfd_open;
+
+ return ops;
+}
+
+/* Prevent warning from -Wmissing-prototypes. */
+void _initialize_solib_ia64_hpux (void);
+
+void
+_initialize_solib_ia64_hpux (void)
+{
+ ia64_hpux_so_ops = ia64_hpux_target_so_ops ();
+}