/* GNU/Linux/x86-64 specific low level interface, for the remote server
for GDB.
- Copyright (C) 2002-2020 Free Software Foundation, Inc.
+ Copyright (C) 2002-2024 Free Software Foundation, Inc.
This file is part of GDB.
#include "i387-fp.h"
#include "x86-low.h"
#include "gdbsupport/x86-xstate.h"
+#include "nat/x86-xstate.h"
#include "nat/gdb_ptrace.h"
#ifdef __x86_64__
#include "linux-x86-tdesc.h"
#ifdef __x86_64__
-static struct target_desc *tdesc_amd64_linux_no_xml;
+static target_desc_up tdesc_amd64_linux_no_xml;
#endif
-static struct target_desc *tdesc_i386_linux_no_xml;
+static target_desc_up tdesc_i386_linux_no_xml;
static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
/* Backward compatibility for gdb without XML support. */
-static const char *xmltarget_i386_linux_no_xml = "@<target>\
+static const char xmltarget_i386_linux_no_xml[] = "@<target>\
<architecture>i386</architecture>\
<osabi>GNU/Linux</osabi>\
</target>";
#ifdef __x86_64__
-static const char *xmltarget_amd64_linux_no_xml = "@<target>\
+static const char xmltarget_amd64_linux_no_xml[] = "@<target>\
<architecture>i386:x86-64</architecture>\
<osabi>GNU/Linux</osabi>\
</target>";
{
public:
- /* Update all the target description of all processes; a new GDB
- connected, and it may or not support xml target descriptions. */
- void update_xmltarget ();
-
const regs_info *get_regs_info () override;
+ const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
+
+ bool supports_z_point_type (char z_type) override;
+
+ void process_qsupported (gdb::array_view<const char * const> features) override;
+
+ bool supports_tracepoints () override;
+
+ bool supports_fast_tracepoints () override;
+
+ int install_fast_tracepoint_jump_pad
+ (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
+ CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
+ CORE_ADDR *trampoline, ULONGEST *trampoline_size,
+ unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
+ CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
+ char *err) override;
+
+ int get_min_fast_tracepoint_insn_len () override;
+
+ struct emit_ops *emit_ops () override;
+
+ int get_ipa_tdesc_idx () override;
+
protected:
void low_arch_setup () override;
CORE_ADDR low_get_pc (regcache *regcache) override;
void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
+
+ int low_decr_pc_after_break () override;
+
+ bool low_breakpoint_at (CORE_ADDR pc) override;
+
+ int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
+ int size, raw_breakpoint *bp) override;
+
+ int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
+ int size, raw_breakpoint *bp) override;
+
+ bool low_stopped_by_watchpoint () override;
+
+ CORE_ADDR low_stopped_data_address () override;
+
+ /* collect_ptrace_register/supply_ptrace_register are not needed in the
+ native i386 case (no registers smaller than an xfer unit), and are not
+ used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
+
+ /* Need to fix up i386 siginfo if host is amd64. */
+ bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
+ int direction) override;
+
+ arch_process_info *low_new_process () override;
+
+ void low_delete_process (arch_process_info *info) override;
+
+ void low_new_thread (lwp_info *) override;
+
+ void low_delete_thread (arch_lwp_info *) override;
+
+ void low_new_fork (process_info *parent, process_info *child) override;
+
+ void low_prepare_to_resume (lwp_info *lwp) override;
+
+ int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
+
+ bool low_supports_range_stepping () override;
+
+ bool low_supports_catch_syscall () override;
+
+ void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
+
+private:
+
+ /* Update all the target description of all processes; a new GDB
+ connected, and it may or not support xml target descriptions. */
+ void update_xmltarget ();
};
/* The singleton target ops object. */
-1,
-1, -1, -1, -1, -1, -1, -1, -1,
ORIG_RAX * 8,
-#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
21 * 8, 22 * 8,
-#else
- -1, -1,
-#endif
-1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
-1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
-1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
#ifdef __x86_64__
-/* Returns true if the current inferior belongs to a x86-64 process,
- per the tdesc. */
+/* Returns true if THREAD belongs to a x86-64 process, per the tdesc. */
static int
-is_64bit_tdesc (void)
+is_64bit_tdesc (thread_info *thread)
{
- struct regcache *regcache = get_thread_regcache (current_thread, 0);
+ struct regcache *regcache = get_thread_regcache (thread, 0);
return register_size (regcache->tdesc, 0) == 8;
}
lwpid_t lwpid, int idx, void **base)
{
#ifdef __x86_64__
- int use_64bit = is_64bit_tdesc ();
+ lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
+ gdb_assert (lwp != nullptr);
+ int use_64bit = is_64bit_tdesc (get_lwp_thread (lwp));
if (use_64bit)
{
don't read anything from the address, and treat it as opaque; it's
the address itself that we assume is unique per-thread. */
-static int
-x86_get_thread_area (int lwpid, CORE_ADDR *addr)
+int
+x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
{
+ lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
+ gdb_assert (lwp != nullptr);
#ifdef __x86_64__
- int use_64bit = is_64bit_tdesc ();
+ int use_64bit = is_64bit_tdesc (get_lwp_thread (lwp));
if (use_64bit)
{
#endif
{
- struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
struct thread_info *thr = get_lwp_thread (lwp);
struct regcache *regcache = get_thread_regcache (thr, 1);
unsigned int desc[4];
x86_target::low_cannot_store_register (int regno)
{
#ifdef __x86_64__
- if (is_64bit_tdesc ())
+ if (is_64bit_tdesc (current_thread))
return false;
#endif
x86_target::low_cannot_fetch_register (int regno)
{
#ifdef __x86_64__
- if (is_64bit_tdesc ())
+ if (is_64bit_tdesc (current_thread))
return false;
#endif
return regno >= I386_NUM_REGS;
}
+static void
+collect_register_i386 (struct regcache *regcache, int regno, void *buf)
+{
+ collect_register (regcache, regno, buf);
+
+#ifdef __x86_64__
+ /* In case of x86_64 -m32, collect_register only writes 4 bytes, but the
+ space reserved in buf for the register is 8 bytes. Make sure the entire
+ reserved space is initialized. */
+
+ gdb_assert (register_size (regcache->tdesc, regno) == 4);
+
+ if (regno == RAX)
+ {
+ /* Sign extend EAX value to avoid potential syscall restart
+ problems.
+
+ See amd64_linux_collect_native_gregset() in
+ gdb/amd64-linux-nat.c for a detailed explanation. */
+ *(int64_t *) buf = *(int32_t *) buf;
+ }
+ else
+ {
+ /* Zero-extend. */
+ *(uint64_t *) buf = *(uint32_t *) buf;
+ }
+#endif
+}
+
static void
x86_fill_gregset (struct regcache *regcache, void *buf)
{
if (x86_64_regmap[i] != -1)
collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
-#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
- {
- unsigned long base;
- int lwpid = lwpid_of (current_thread);
-
- collect_register_by_name (regcache, "fs_base", &base);
- ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
-
- collect_register_by_name (regcache, "gs_base", &base);
- ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
- }
-#endif
-
return;
}
-
- /* 32-bit inferior registers need to be zero-extended.
- Callers would read uninitialized memory otherwise. */
- memset (buf, 0x00, X86_64_USER_REGS * 8);
#endif
for (i = 0; i < I386_NUM_REGS; i++)
- collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
-
- collect_register_by_name (regcache, "orig_eax",
- ((char *) buf) + ORIG_EAX * REGSIZE);
-
-#ifdef __x86_64__
- /* Sign extend EAX value to avoid potential syscall restart
- problems.
-
- See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
- for a detailed explanation. */
- if (register_size (regcache->tdesc, 0) == 4)
- {
- void *ptr = ((gdb_byte *) buf
- + i386_regmap[find_regno (regcache->tdesc, "eax")]);
+ collect_register_i386 (regcache, i, ((char *) buf) + i386_regmap[i]);
- *(int64_t *) ptr = *(int32_t *) ptr;
- }
-#endif
+ /* Handle ORIG_EAX, which is not in i386_regmap. */
+ collect_register_i386 (regcache, find_regno (regcache->tdesc, "orig_eax"),
+ ((char *) buf) + ORIG_EAX * REGSIZE);
}
static void
if (x86_64_regmap[i] != -1)
supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
-#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
- {
- unsigned long base;
- int lwpid = lwpid_of (current_thread);
-
- if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
- supply_register_by_name (regcache, "fs_base", &base);
-
- if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
- supply_register_by_name (regcache, "gs_base", &base);
- }
-#endif
return;
}
#endif
supply_register_by_name (regcache, "eip", &newpc);
}
}
+
+int
+x86_target::low_decr_pc_after_break ()
+{
+ return 1;
+}
+
\f
static const gdb_byte x86_breakpoint[] = { 0xCC };
#define x86_breakpoint_len 1
-static int
-x86_breakpoint_at (CORE_ADDR pc)
+bool
+x86_target::low_breakpoint_at (CORE_ADDR pc)
{
unsigned char c;
- the_target->read_memory (pc, &c, 1);
+ read_memory (pc, &c, 1);
if (c == 0xCC)
- return 1;
+ return true;
- return 0;
+ return false;
}
\f
/* Low-level function vector. */
\f
/* Breakpoint/Watchpoint support. */
-static int
-x86_supports_z_point_type (char z_type)
+bool
+x86_target::supports_z_point_type (char z_type)
{
switch (z_type)
{
case Z_PACKET_HW_BP:
case Z_PACKET_WRITE_WP:
case Z_PACKET_ACCESS_WP:
- return 1;
+ return true;
default:
- return 0;
+ return false;
}
}
-static int
-x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
- int size, struct raw_breakpoint *bp)
+int
+x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
+ int size, raw_breakpoint *bp)
{
struct process_info *proc = current_process ();
}
}
-static int
-x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
- int size, struct raw_breakpoint *bp)
+int
+x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
+ int size, raw_breakpoint *bp)
{
struct process_info *proc = current_process ();
}
}
-static int
-x86_stopped_by_watchpoint (void)
+bool
+x86_target::low_stopped_by_watchpoint ()
{
struct process_info *proc = current_process ();
return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
}
-static CORE_ADDR
-x86_stopped_data_address (void)
+CORE_ADDR
+x86_target::low_stopped_data_address ()
{
struct process_info *proc = current_process ();
CORE_ADDR addr;
\f
/* Called when a new process is created. */
-static struct arch_process_info *
-x86_linux_new_process (void)
+arch_process_info *
+x86_target::low_new_process ()
{
struct arch_process_info *info = XCNEW (struct arch_process_info);
/* Called when a process is being deleted. */
-static void
-x86_linux_delete_process (struct arch_process_info *info)
+void
+x86_target::low_delete_process (arch_process_info *info)
{
xfree (info);
}
-/* Target routine for linux_new_fork. */
+void
+x86_target::low_new_thread (lwp_info *lwp)
+{
+ /* This comes from nat/. */
+ x86_linux_new_thread (lwp);
+}
-static void
-x86_linux_new_fork (struct process_info *parent, struct process_info *child)
+void
+x86_target::low_delete_thread (arch_lwp_info *alwp)
+{
+ /* This comes from nat/. */
+ x86_linux_delete_thread (alwp);
+}
+
+/* Target routine for new_fork. */
+
+void
+x86_target::low_new_fork (process_info *parent, process_info *child)
{
/* These are allocated by linux_add_process. */
gdb_assert (parent->priv != NULL
*child->priv->arch_private = *parent->priv->arch_private;
}
+void
+x86_target::low_prepare_to_resume (lwp_info *lwp)
+{
+ /* This comes from nat/. */
+ x86_linux_prepare_to_resume (lwp);
+}
+
/* See nat/x86-dregs.h. */
struct x86_debug_reg_state *
from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
INF. */
-static int
-x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
+bool
+x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
{
#ifdef __x86_64__
unsigned int machine;
int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
/* Is the inferior 32-bit? If so, then fixup the siginfo object. */
- if (!is_64bit_tdesc ())
+ if (!is_64bit_tdesc (current_thread))
return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
FIXUP_32);
/* No fixup for native x32 GDB. */
FIXUP_X32);
#endif
- return 0;
+ return false;
}
\f
static int use_xml;
int xcr0_features;
int tid;
static uint64_t xcr0;
+ static int xsave_len;
struct regset_info *regset;
tid = lwpid_of (current_thread);
if (!use_xml)
{
- x86_xcr0 = X86_XSTATE_SSE_MASK;
-
/* Don't use XML. */
#ifdef __x86_64__
if (machine == EM_X86_64)
- return tdesc_amd64_linux_no_xml;
+ return tdesc_amd64_linux_no_xml.get ();
else
#endif
- return tdesc_i386_linux_no_xml;
+ return tdesc_i386_linux_no_xml.get ();
}
if (have_ptrace_getregset == -1)
xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
/ sizeof (uint64_t))];
+ xsave_len = x86_xsave_length ();
+
/* Use PTRACE_GETREGSET if it is available. */
for (regset = x86_regsets;
regset->fill_function != NULL; regset++)
if (regset->get_request == PTRACE_GETREGSET)
- regset->size = X86_XSTATE_SIZE (xcr0);
+ regset->size = xsave_len;
else if (regset->type != GENERAL_REGS)
regset->size = 0;
}
&& (xcr0 & X86_XSTATE_ALL_MASK));
if (xcr0_features)
- x86_xcr0 = xcr0;
+ i387_set_xsave_mask (xcr0, xsave_len);
if (machine == EM_X86_64)
{
void
x86_target::update_xmltarget ()
{
- struct thread_info *saved_thread = current_thread;
+ scoped_restore_current_thread restore_thread;
/* Before changing the register cache's internal layout, flush the
contents of the current valid caches back to the threads, and
int pid = proc->pid;
/* Look up any thread of this process. */
- current_thread = find_any_thread_of_pid (pid);
+ switch_to_thread (find_any_thread_of_pid (pid));
low_arch_setup ();
});
-
- current_thread = saved_thread;
}
/* Process qSupported query, "xmlRegisters=". Update the buffer size for
PTRACE_GETREGSET. */
-static void
-x86_linux_process_qsupported (char **features, int count)
+void
+x86_target::process_qsupported (gdb::array_view<const char * const> features)
{
- int i;
-
/* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
with "i386" in qSupported query, it supports x86 XML target
descriptions. */
use_xml = 0;
- for (i = 0; i < count; i++)
- {
- const char *feature = features[i];
+ for (const char *feature : features)
+ {
if (startswith (feature, "xmlRegisters="))
{
char *copy = xstrdup (feature + 13);
free (copy);
}
}
- the_x86_target.update_xmltarget ();
+
+ update_xmltarget ();
}
/* Common for x86/x86-64. */
x86_target::get_regs_info ()
{
#ifdef __x86_64__
- if (is_64bit_tdesc ())
+ if (is_64bit_tdesc (current_thread))
return &amd64_linux_regs_info;
else
#endif
current_process ()->tdesc = x86_linux_read_description ();
}
+bool
+x86_target::low_supports_catch_syscall ()
+{
+ return true;
+}
+
/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
-static void
-x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
+void
+x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
{
int use_64bit = register_size (regcache->tdesc, 0) == 8;
collect_register_by_name (regcache, "orig_eax", sysno);
}
-static int
-x86_supports_tracepoints (void)
+bool
+x86_target::supports_tracepoints ()
{
- return 1;
+ return true;
}
static void
return 0;
}
-static int
-x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
- CORE_ADDR collector,
- CORE_ADDR lockaddr,
- ULONGEST orig_size,
- CORE_ADDR *jump_entry,
- CORE_ADDR *trampoline,
- ULONGEST *trampoline_size,
- unsigned char *jjump_pad_insn,
- ULONGEST *jjump_pad_insn_size,
- CORE_ADDR *adjusted_insn_addr,
- CORE_ADDR *adjusted_insn_addr_end,
- char *err)
+bool
+x86_target::supports_fast_tracepoints ()
+{
+ return true;
+}
+
+int
+x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
+ CORE_ADDR tpaddr,
+ CORE_ADDR collector,
+ CORE_ADDR lockaddr,
+ ULONGEST orig_size,
+ CORE_ADDR *jump_entry,
+ CORE_ADDR *trampoline,
+ ULONGEST *trampoline_size,
+ unsigned char *jjump_pad_insn,
+ ULONGEST *jjump_pad_insn_size,
+ CORE_ADDR *adjusted_insn_addr,
+ CORE_ADDR *adjusted_insn_addr_end,
+ char *err)
{
#ifdef __x86_64__
- if (is_64bit_tdesc ())
+ if (is_64bit_tdesc (current_thread))
return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
collector, lockaddr,
orig_size, jump_entry,
/* Return the minimum instruction length for fast tracepoints on x86/x86-64
architectures. */
-static int
-x86_get_min_fast_tracepoint_insn_len (void)
+int
+x86_target::get_min_fast_tracepoint_insn_len ()
{
static int warned_about_fast_tracepoints = 0;
#ifdef __x86_64__
/* On x86-64, 5-byte jump instructions with a 4-byte offset are always
used for fast tracepoints. */
- if (is_64bit_tdesc ())
+ if (is_64bit_tdesc (current_thread))
return 5;
#endif
{
CORE_ADDR buildaddr = current_insn_ptr;
- if (debug_threads)
- debug_printf ("Adding %d bytes of insn at %s\n",
- len, paddress (buildaddr));
+ threads_debug_printf ("Adding %d bytes of insn at %s",
+ len, paddress (buildaddr));
append_insns (&buildaddr, len, start);
current_insn_ptr = buildaddr;
*size_p = 4;
}
-struct emit_ops amd64_emit_ops =
+static emit_ops amd64_emit_ops =
{
amd64_emit_prologue,
amd64_emit_epilogue,
*size_p = 4;
}
-struct emit_ops i386_emit_ops =
+static emit_ops i386_emit_ops =
{
i386_emit_prologue,
i386_emit_epilogue,
};
-static struct emit_ops *
-x86_emit_ops (void)
+emit_ops *
+x86_target::emit_ops ()
{
#ifdef __x86_64__
- if (is_64bit_tdesc ())
+ if (is_64bit_tdesc (current_thread))
return &amd64_emit_ops;
else
#endif
return &i386_emit_ops;
}
-/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
+/* Implementation of target ops method "sw_breakpoint_from_kind". */
-static const gdb_byte *
-x86_sw_breakpoint_from_kind (int kind, int *size)
+const gdb_byte *
+x86_target::sw_breakpoint_from_kind (int kind, int *size)
{
*size = x86_breakpoint_len;
return x86_breakpoint;
}
-static int
-x86_supports_range_stepping (void)
-{
- return 1;
-}
-
-/* Implementation of linux_target_ops method "supports_hardware_single_step".
- */
-
-static int
-x86_supports_hardware_single_step (void)
+bool
+x86_target::low_supports_range_stepping ()
{
- return 1;
+ return true;
}
-static int
-x86_get_ipa_tdesc_idx (void)
+int
+x86_target::get_ipa_tdesc_idx ()
{
struct regcache *regcache = get_thread_regcache (current_thread, 0);
const struct target_desc *tdesc = regcache->tdesc;
return amd64_get_ipa_tdesc_idx (tdesc);
#endif
- if (tdesc == tdesc_i386_linux_no_xml)
+ if (tdesc == tdesc_i386_linux_no_xml.get ())
return X86_TDESC_SSE;
return i386_get_ipa_tdesc_idx (tdesc);
}
-/* This is initialized assuming an amd64 target.
- x86_arch_setup will correct it for i386 or amd64 targets. */
-
-struct linux_target_ops the_low_target =
-{
- NULL, /* breakpoint_kind_from_pc */
- x86_sw_breakpoint_from_kind,
- NULL,
- 1,
- x86_breakpoint_at,
- x86_supports_z_point_type,
- x86_insert_point,
- x86_remove_point,
- x86_stopped_by_watchpoint,
- x86_stopped_data_address,
- /* collect_ptrace_register/supply_ptrace_register are not needed in the
- native i386 case (no registers smaller than an xfer unit), and are not
- used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
- NULL,
- NULL,
- /* need to fix up i386 siginfo if host is amd64 */
- x86_siginfo_fixup,
- x86_linux_new_process,
- x86_linux_delete_process,
- x86_linux_new_thread,
- x86_linux_delete_thread,
- x86_linux_new_fork,
- x86_linux_prepare_to_resume,
- x86_linux_process_qsupported,
- x86_supports_tracepoints,
- x86_get_thread_area,
- x86_install_fast_tracepoint_jump_pad,
- x86_emit_ops,
- x86_get_min_fast_tracepoint_insn_len,
- x86_supports_range_stepping,
- NULL, /* breakpoint_kind_from_current_state */
- x86_supports_hardware_single_step,
- x86_get_syscall_trapinfo,
- x86_get_ipa_tdesc_idx,
-};
-
/* The linux target ops object. */
linux_process_target *the_linux_target = &the_x86_target;
/* Initialize the Linux target descriptions. */
#ifdef __x86_64__
tdesc_amd64_linux_no_xml = allocate_target_description ();
- copy_target_description (tdesc_amd64_linux_no_xml,
+ copy_target_description (tdesc_amd64_linux_no_xml.get (),
amd64_linux_read_description (X86_XSTATE_SSE_MASK,
false));
tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
#endif
tdesc_i386_linux_no_xml = allocate_target_description ();
- copy_target_description (tdesc_i386_linux_no_xml,
+ copy_target_description (tdesc_i386_linux_no_xml.get (),
i386_linux_read_description (X86_XSTATE_SSE_MASK));
tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;