GDBserver
/home/stan/gdb/src/gdb/gdbserver/linux-x86-low.c
Go to the documentation of this file.
00001 /* GNU/Linux/x86-64 specific low level interface, for the remote server
00002    for GDB.
00003    Copyright (C) 2002-2013 Free Software Foundation, Inc.
00004 
00005    This file is part of GDB.
00006 
00007    This program is free software; you can redistribute it and/or modify
00008    it under the terms of the GNU General Public License as published by
00009    the Free Software Foundation; either version 3 of the License, or
00010    (at your option) any later version.
00011 
00012    This program is distributed in the hope that it will be useful,
00013    but WITHOUT ANY WARRANTY; without even the implied warranty of
00014    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00015    GNU General Public License for more details.
00016 
00017    You should have received a copy of the GNU General Public License
00018    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
00019 
00020 #include <stddef.h>
00021 #include <signal.h>
00022 #include <limits.h>
00023 #include <inttypes.h>
00024 #include "server.h"
00025 #include "linux-low.h"
00026 #include "i387-fp.h"
00027 #include "i386-low.h"
00028 #include "i386-xstate.h"
00029 #include "elf/common.h"
00030 
00031 #include "gdb_proc_service.h"
00032 #include "agent.h"
00033 #include "tdesc.h"
00034 #include "tracepoint.h"
00035 #include "ax.h"
00036 
00037 #ifdef __x86_64__
00038 /* Defined in auto-generated file amd64-linux.c.  */
00039 void init_registers_amd64_linux (void);
00040 extern const struct target_desc *tdesc_amd64_linux;
00041 
00042 /* Defined in auto-generated file amd64-avx-linux.c.  */
00043 void init_registers_amd64_avx_linux (void);
00044 extern const struct target_desc *tdesc_amd64_avx_linux;
00045 
00046 /* Defined in auto-generated file x32-linux.c.  */
00047 void init_registers_x32_linux (void);
00048 extern const struct target_desc *tdesc_x32_linux;
00049 
00050 /* Defined in auto-generated file x32-avx-linux.c.  */
00051 void init_registers_x32_avx_linux (void);
00052 extern const struct target_desc *tdesc_x32_avx_linux;
00053 #endif
00054 
00055 /* Defined in auto-generated file i386-linux.c.  */
00056 void init_registers_i386_linux (void);
00057 extern const struct target_desc *tdesc_i386_linux;
00058 
00059 /* Defined in auto-generated file i386-mmx-linux.c.  */
00060 void init_registers_i386_mmx_linux (void);
00061 extern const struct target_desc *tdesc_i386_mmx_linux;
00062 
00063 /* Defined in auto-generated file i386-avx-linux.c.  */
00064 void init_registers_i386_avx_linux (void);
00065 extern const struct target_desc *tdesc_i386_avx_linux;
00066 
00067 #ifdef __x86_64__
00068 static struct target_desc *tdesc_amd64_linux_no_xml;
00069 #endif
00070 static struct target_desc *tdesc_i386_linux_no_xml;
00071 
00072 
00073 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
00074 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
00075 
00076 /* Backward compatibility for gdb without XML support.  */
00077 
00078 static const char *xmltarget_i386_linux_no_xml = "@<target>\
00079 <architecture>i386</architecture>\
00080 <osabi>GNU/Linux</osabi>\
00081 </target>";
00082 
00083 #ifdef __x86_64__
00084 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
00085 <architecture>i386:x86-64</architecture>\
00086 <osabi>GNU/Linux</osabi>\
00087 </target>";
00088 #endif
00089 
00090 #include <sys/reg.h>
00091 #include <sys/procfs.h>
00092 #include <sys/ptrace.h>
00093 #include <sys/uio.h>
00094 
00095 #ifndef PTRACE_GETREGSET
00096 #define PTRACE_GETREGSET        0x4204
00097 #endif
00098 
00099 #ifndef PTRACE_SETREGSET
00100 #define PTRACE_SETREGSET        0x4205
00101 #endif
00102 
00103 
00104 #ifndef PTRACE_GET_THREAD_AREA
00105 #define PTRACE_GET_THREAD_AREA 25
00106 #endif
00107 
00108 /* This definition comes from prctl.h, but some kernels may not have it.  */
00109 #ifndef PTRACE_ARCH_PRCTL
00110 #define PTRACE_ARCH_PRCTL      30
00111 #endif
00112 
00113 /* The following definitions come from prctl.h, but may be absent
00114    for certain configurations.  */
00115 #ifndef ARCH_GET_FS
00116 #define ARCH_SET_GS 0x1001
00117 #define ARCH_SET_FS 0x1002
00118 #define ARCH_GET_FS 0x1003
00119 #define ARCH_GET_GS 0x1004
00120 #endif
00121 
00122 /* Per-process arch-specific data we want to keep.  */
00123 
00124 struct arch_process_info
00125 {
00126   struct i386_debug_reg_state debug_reg_state;
00127 };
00128 
00129 /* Per-thread arch-specific data we want to keep.  */
00130 
00131 struct arch_lwp_info
00132 {
00133   /* Non-zero if our copy differs from what's recorded in the thread.  */
00134   int debug_registers_changed;
00135 };
00136 
00137 #ifdef __x86_64__
00138 
00139 /* Mapping between the general-purpose registers in `struct user'
00140    format and GDB's register array layout.
00141    Note that the transfer layout uses 64-bit regs.  */
00142 static /*const*/ int i386_regmap[] = 
00143 {
00144   RAX * 8, RCX * 8, RDX * 8, RBX * 8,
00145   RSP * 8, RBP * 8, RSI * 8, RDI * 8,
00146   RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
00147   DS * 8, ES * 8, FS * 8, GS * 8
00148 };
00149 
00150 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
00151 
00152 /* So code below doesn't have to care, i386 or amd64.  */
00153 #define ORIG_EAX ORIG_RAX
00154 
00155 static const int x86_64_regmap[] =
00156 {
00157   RAX * 8, RBX * 8, RCX * 8, RDX * 8,
00158   RSI * 8, RDI * 8, RBP * 8, RSP * 8,
00159   R8 * 8, R9 * 8, R10 * 8, R11 * 8,
00160   R12 * 8, R13 * 8, R14 * 8, R15 * 8,
00161   RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
00162   DS * 8, ES * 8, FS * 8, GS * 8,
00163   -1, -1, -1, -1, -1, -1, -1, -1,
00164   -1, -1, -1, -1, -1, -1, -1, -1,
00165   -1, -1, -1, -1, -1, -1, -1, -1,
00166   -1, -1, -1, -1, -1, -1, -1, -1, -1,
00167   ORIG_RAX * 8
00168 };
00169 
00170 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
00171 
00172 #else /* ! __x86_64__ */
00173 
00174 /* Mapping between the general-purpose registers in `struct user'
00175    format and GDB's register array layout.  */
00176 static /*const*/ int i386_regmap[] = 
00177 {
00178   EAX * 4, ECX * 4, EDX * 4, EBX * 4,
00179   UESP * 4, EBP * 4, ESI * 4, EDI * 4,
00180   EIP * 4, EFL * 4, CS * 4, SS * 4,
00181   DS * 4, ES * 4, FS * 4, GS * 4
00182 };
00183 
00184 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
00185 
00186 #endif
00187 
00188 #ifdef __x86_64__
00189 
00190 /* Returns true if the current inferior belongs to a x86-64 process,
00191    per the tdesc.  */
00192 
00193 static int
00194 is_64bit_tdesc (void)
00195 {
00196   struct regcache *regcache = get_thread_regcache (current_inferior, 0);
00197 
00198   return register_size (regcache->tdesc, 0) == 8;
00199 }
00200 
00201 #endif
00202 
00203 
00204 /* Called by libthread_db.  */
00205 
00206 ps_err_e
00207 ps_get_thread_area (const struct ps_prochandle *ph,
00208                     lwpid_t lwpid, int idx, void **base)
00209 {
00210 #ifdef __x86_64__
00211   int use_64bit = is_64bit_tdesc ();
00212 
00213   if (use_64bit)
00214     {
00215       switch (idx)
00216         {
00217         case FS:
00218           if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
00219             return PS_OK;
00220           break;
00221         case GS:
00222           if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
00223             return PS_OK;
00224           break;
00225         default:
00226           return PS_BADADDR;
00227         }
00228       return PS_ERR;
00229     }
00230 #endif
00231 
00232   {
00233     unsigned int desc[4];
00234 
00235     if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
00236                 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
00237       return PS_ERR;
00238 
00239     /* Ensure we properly extend the value to 64-bits for x86_64.  */
00240     *base = (void *) (uintptr_t) desc[1];
00241     return PS_OK;
00242   }
00243 }
00244 
00245 /* Get the thread area address.  This is used to recognize which
00246    thread is which when tracing with the in-process agent library.  We
00247    don't read anything from the address, and treat it as opaque; it's
00248    the address itself that we assume is unique per-thread.  */
00249 
00250 static int
00251 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
00252 {
00253 #ifdef __x86_64__
00254   int use_64bit = is_64bit_tdesc ();
00255 
00256   if (use_64bit)
00257     {
00258       void *base;
00259       if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
00260         {
00261           *addr = (CORE_ADDR) (uintptr_t) base;
00262           return 0;
00263         }
00264 
00265       return -1;
00266     }
00267 #endif
00268 
00269   {
00270     struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
00271     struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
00272     unsigned int desc[4];
00273     ULONGEST gs = 0;
00274     const int reg_thread_area = 3; /* bits to scale down register value.  */
00275     int idx;
00276 
00277     collect_register_by_name (regcache, "gs", &gs);
00278 
00279     idx = gs >> reg_thread_area;
00280 
00281     if (ptrace (PTRACE_GET_THREAD_AREA,
00282                 lwpid_of (lwp),
00283                 (void *) (long) idx, (unsigned long) &desc) < 0)
00284       return -1;
00285 
00286     *addr = desc[1];
00287     return 0;
00288   }
00289 }
00290 
00291 
00292 
00293 static int
00294 x86_cannot_store_register (int regno)
00295 {
00296 #ifdef __x86_64__
00297   if (is_64bit_tdesc ())
00298     return 0;
00299 #endif
00300 
00301   return regno >= I386_NUM_REGS;
00302 }
00303 
00304 static int
00305 x86_cannot_fetch_register (int regno)
00306 {
00307 #ifdef __x86_64__
00308   if (is_64bit_tdesc ())
00309     return 0;
00310 #endif
00311 
00312   return regno >= I386_NUM_REGS;
00313 }
00314 
00315 static void
00316 x86_fill_gregset (struct regcache *regcache, void *buf)
00317 {
00318   int i;
00319 
00320 #ifdef __x86_64__
00321   if (register_size (regcache->tdesc, 0) == 8)
00322     {
00323       for (i = 0; i < X86_64_NUM_REGS; i++)
00324         if (x86_64_regmap[i] != -1)
00325           collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
00326       return;
00327     }
00328 #endif
00329 
00330   for (i = 0; i < I386_NUM_REGS; i++)
00331     collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
00332 
00333   collect_register_by_name (regcache, "orig_eax",
00334                             ((char *) buf) + ORIG_EAX * 4);
00335 }
00336 
00337 static void
00338 x86_store_gregset (struct regcache *regcache, const void *buf)
00339 {
00340   int i;
00341 
00342 #ifdef __x86_64__
00343   if (register_size (regcache->tdesc, 0) == 8)
00344     {
00345       for (i = 0; i < X86_64_NUM_REGS; i++)
00346         if (x86_64_regmap[i] != -1)
00347           supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
00348       return;
00349     }
00350 #endif
00351 
00352   for (i = 0; i < I386_NUM_REGS; i++)
00353     supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
00354 
00355   supply_register_by_name (regcache, "orig_eax",
00356                            ((char *) buf) + ORIG_EAX * 4);
00357 }
00358 
00359 static void
00360 x86_fill_fpregset (struct regcache *regcache, void *buf)
00361 {
00362 #ifdef __x86_64__
00363   i387_cache_to_fxsave (regcache, buf);
00364 #else
00365   i387_cache_to_fsave (regcache, buf);
00366 #endif
00367 }
00368 
00369 static void
00370 x86_store_fpregset (struct regcache *regcache, const void *buf)
00371 {
00372 #ifdef __x86_64__
00373   i387_fxsave_to_cache (regcache, buf);
00374 #else
00375   i387_fsave_to_cache (regcache, buf);
00376 #endif
00377 }
00378 
00379 #ifndef __x86_64__
00380 
00381 static void
00382 x86_fill_fpxregset (struct regcache *regcache, void *buf)
00383 {
00384   i387_cache_to_fxsave (regcache, buf);
00385 }
00386 
00387 static void
00388 x86_store_fpxregset (struct regcache *regcache, const void *buf)
00389 {
00390   i387_fxsave_to_cache (regcache, buf);
00391 }
00392 
00393 #endif
00394 
00395 static void
00396 x86_fill_xstateregset (struct regcache *regcache, void *buf)
00397 {
00398   i387_cache_to_xsave (regcache, buf);
00399 }
00400 
00401 static void
00402 x86_store_xstateregset (struct regcache *regcache, const void *buf)
00403 {
00404   i387_xsave_to_cache (regcache, buf);
00405 }
00406 
00407 /* ??? The non-biarch i386 case stores all the i387 regs twice.
00408    Once in i387_.*fsave.* and once in i387_.*fxsave.*.
00409    This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
00410    doesn't work.  IWBN to avoid the duplication in the case where it
00411    does work.  Maybe the arch_setup routine could check whether it works
00412    and update the supported regsets accordingly.  */
00413 
00414 static struct regset_info x86_regsets[] =
00415 {
00416 #ifdef HAVE_PTRACE_GETREGS
00417   { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
00418     GENERAL_REGS,
00419     x86_fill_gregset, x86_store_gregset },
00420   { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
00421     EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
00422 # ifndef __x86_64__
00423 #  ifdef HAVE_PTRACE_GETFPXREGS
00424   { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
00425     EXTENDED_REGS,
00426     x86_fill_fpxregset, x86_store_fpxregset },
00427 #  endif
00428 # endif
00429   { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
00430     FP_REGS,
00431     x86_fill_fpregset, x86_store_fpregset },
00432 #endif /* HAVE_PTRACE_GETREGS */
00433   { 0, 0, 0, -1, -1, NULL, NULL }
00434 };
00435 
00436 static CORE_ADDR
00437 x86_get_pc (struct regcache *regcache)
00438 {
00439   int use_64bit = register_size (regcache->tdesc, 0) == 8;
00440 
00441   if (use_64bit)
00442     {
00443       unsigned long pc;
00444       collect_register_by_name (regcache, "rip", &pc);
00445       return (CORE_ADDR) pc;
00446     }
00447   else
00448     {
00449       unsigned int pc;
00450       collect_register_by_name (regcache, "eip", &pc);
00451       return (CORE_ADDR) pc;
00452     }
00453 }
00454 
00455 static void
00456 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
00457 {
00458   int use_64bit = register_size (regcache->tdesc, 0) == 8;
00459 
00460   if (use_64bit)
00461     {
00462       unsigned long newpc = pc;
00463       supply_register_by_name (regcache, "rip", &newpc);
00464     }
00465   else
00466     {
00467       unsigned int newpc = pc;
00468       supply_register_by_name (regcache, "eip", &newpc);
00469     }
00470 }
00471 
00472 static const unsigned char x86_breakpoint[] = { 0xCC };
00473 #define x86_breakpoint_len 1
00474 
00475 static int
00476 x86_breakpoint_at (CORE_ADDR pc)
00477 {
00478   unsigned char c;
00479 
00480   (*the_target->read_memory) (pc, &c, 1);
00481   if (c == 0xCC)
00482     return 1;
00483 
00484   return 0;
00485 }
00486 
00487 /* Support for debug registers.  */
00488 
00489 static unsigned long
00490 x86_linux_dr_get (ptid_t ptid, int regnum)
00491 {
00492   int tid;
00493   unsigned long value;
00494 
00495   tid = ptid_get_lwp (ptid);
00496 
00497   errno = 0;
00498   value = ptrace (PTRACE_PEEKUSER, tid,
00499                   offsetof (struct user, u_debugreg[regnum]), 0);
00500   if (errno != 0)
00501     error ("Couldn't read debug register");
00502 
00503   return value;
00504 }
00505 
00506 static void
00507 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
00508 {
00509   int tid;
00510 
00511   tid = ptid_get_lwp (ptid);
00512 
00513   errno = 0;
00514   ptrace (PTRACE_POKEUSER, tid,
00515           offsetof (struct user, u_debugreg[regnum]), value);
00516   if (errno != 0)
00517     error ("Couldn't write debug register");
00518 }
00519 
00520 static int
00521 update_debug_registers_callback (struct inferior_list_entry *entry,
00522                                  void *pid_p)
00523 {
00524   struct lwp_info *lwp = (struct lwp_info *) entry;
00525   int pid = *(int *) pid_p;
00526 
00527   /* Only update the threads of this process.  */
00528   if (pid_of (lwp) == pid)
00529     {
00530       /* The actual update is done later just before resuming the lwp,
00531          we just mark that the registers need updating.  */
00532       lwp->arch_private->debug_registers_changed = 1;
00533 
00534       /* If the lwp isn't stopped, force it to momentarily pause, so
00535          we can update its debug registers.  */
00536       if (!lwp->stopped)
00537         linux_stop_lwp (lwp);
00538     }
00539 
00540   return 0;
00541 }
00542 
00543 /* Update the inferior's debug register REGNUM from STATE.  */
00544 
00545 void
00546 i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
00547 {
00548   /* Only update the threads of this process.  */
00549   int pid = pid_of (get_thread_lwp (current_inferior));
00550 
00551   if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
00552     fatal ("Invalid debug register %d", regnum);
00553 
00554   find_inferior (&all_lwps, update_debug_registers_callback, &pid);
00555 }
00556 
00557 /* Return the inferior's debug register REGNUM.  */
00558 
00559 CORE_ADDR
00560 i386_dr_low_get_addr (int regnum)
00561 {
00562   struct lwp_info *lwp = get_thread_lwp (current_inferior);
00563   ptid_t ptid = ptid_of (lwp);
00564 
00565   /* DR6 and DR7 are retrieved with some other way.  */
00566   gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
00567 
00568   return x86_linux_dr_get (ptid, regnum);
00569 }
00570 
00571 /* Update the inferior's DR7 debug control register from STATE.  */
00572 
00573 void
00574 i386_dr_low_set_control (const struct i386_debug_reg_state *state)
00575 {
00576   /* Only update the threads of this process.  */
00577   int pid = pid_of (get_thread_lwp (current_inferior));
00578 
00579   find_inferior (&all_lwps, update_debug_registers_callback, &pid);
00580 }
00581 
00582 /* Return the inferior's DR7 debug control register.  */
00583 
00584 unsigned
00585 i386_dr_low_get_control (void)
00586 {
00587   struct lwp_info *lwp = get_thread_lwp (current_inferior);
00588   ptid_t ptid = ptid_of (lwp);
00589 
00590   return x86_linux_dr_get (ptid, DR_CONTROL);
00591 }
00592 
00593 /* Get the value of the DR6 debug status register from the inferior
00594    and record it in STATE.  */
00595 
00596 unsigned
00597 i386_dr_low_get_status (void)
00598 {
00599   struct lwp_info *lwp = get_thread_lwp (current_inferior);
00600   ptid_t ptid = ptid_of (lwp);
00601 
00602   return x86_linux_dr_get (ptid, DR_STATUS);
00603 }
00604 
00605 /* Breakpoint/Watchpoint support.  */
00606 
00607 static int
00608 x86_insert_point (char type, CORE_ADDR addr, int len)
00609 {
00610   struct process_info *proc = current_process ();
00611   switch (type)
00612     {
00613     case '0': /* software-breakpoint */
00614       {
00615         int ret;
00616 
00617         ret = prepare_to_access_memory ();
00618         if (ret)
00619           return -1;
00620         ret = set_gdb_breakpoint_at (addr);
00621         done_accessing_memory ();
00622         return ret;
00623       }
00624     case '1': /* hardware-breakpoint */
00625     case '2': /* write watchpoint */
00626     case '3': /* read watchpoint */
00627     case '4': /* access watchpoint */
00628       return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
00629                                          type, addr, len);
00630 
00631     default:
00632       /* Unsupported.  */
00633       return 1;
00634     }
00635 }
00636 
00637 static int
00638 x86_remove_point (char type, CORE_ADDR addr, int len)
00639 {
00640   struct process_info *proc = current_process ();
00641   switch (type)
00642     {
00643     case '0': /* software-breakpoint */
00644       {
00645         int ret;
00646 
00647         ret = prepare_to_access_memory ();
00648         if (ret)
00649           return -1;
00650         ret = delete_gdb_breakpoint_at (addr);
00651         done_accessing_memory ();
00652         return ret;
00653       }
00654     case '1': /* hardware-breakpoint */
00655     case '2': /* write watchpoint */
00656     case '3': /* read watchpoint */
00657     case '4': /* access watchpoint */
00658       return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
00659                                          type, addr, len);
00660     default:
00661       /* Unsupported.  */
00662       return 1;
00663     }
00664 }
00665 
00666 static int
00667 x86_stopped_by_watchpoint (void)
00668 {
00669   struct process_info *proc = current_process ();
00670   return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
00671 }
00672 
00673 static CORE_ADDR
00674 x86_stopped_data_address (void)
00675 {
00676   struct process_info *proc = current_process ();
00677   CORE_ADDR addr;
00678   if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
00679                                      &addr))
00680     return addr;
00681   return 0;
00682 }
00683 
00684 /* Called when a new process is created.  */
00685 
00686 static struct arch_process_info *
00687 x86_linux_new_process (void)
00688 {
00689   struct arch_process_info *info = xcalloc (1, sizeof (*info));
00690 
00691   i386_low_init_dregs (&info->debug_reg_state);
00692 
00693   return info;
00694 }
00695 
00696 /* Called when a new thread is detected.  */
00697 
00698 static struct arch_lwp_info *
00699 x86_linux_new_thread (void)
00700 {
00701   struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
00702 
00703   info->debug_registers_changed = 1;
00704 
00705   return info;
00706 }
00707 
00708 /* Called when resuming a thread.
00709    If the debug regs have changed, update the thread's copies.  */
00710 
00711 static void
00712 x86_linux_prepare_to_resume (struct lwp_info *lwp)
00713 {
00714   ptid_t ptid = ptid_of (lwp);
00715   int clear_status = 0;
00716 
00717   if (lwp->arch_private->debug_registers_changed)
00718     {
00719       int i;
00720       int pid = ptid_get_pid (ptid);
00721       struct process_info *proc = find_process_pid (pid);
00722       struct i386_debug_reg_state *state
00723         = &proc->private->arch_private->debug_reg_state;
00724 
00725       for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
00726         if (state->dr_ref_count[i] > 0)
00727           {
00728             x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
00729 
00730             /* If we're setting a watchpoint, any change the inferior
00731                had done itself to the debug registers needs to be
00732                discarded, otherwise, i386_low_stopped_data_address can
00733                get confused.  */
00734             clear_status = 1;
00735           }
00736 
00737       x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
00738 
00739       lwp->arch_private->debug_registers_changed = 0;
00740     }
00741 
00742   if (clear_status || lwp->stopped_by_watchpoint)
00743     x86_linux_dr_set (ptid, DR_STATUS, 0);
00744 }
00745 
00746 /* When GDBSERVER is built as a 64-bit application on linux, the
00747    PTRACE_GETSIGINFO data is always presented in 64-bit layout.  Since
00748    debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
00749    as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
00750    conversion in-place ourselves.  */
00751 
00752 /* These types below (compat_*) define a siginfo type that is layout
00753    compatible with the siginfo type exported by the 32-bit userspace
00754    support.  */
00755 
00756 #ifdef __x86_64__
00757 
00758 typedef int compat_int_t;
00759 typedef unsigned int compat_uptr_t;
00760 
00761 typedef int compat_time_t;
00762 typedef int compat_timer_t;
00763 typedef int compat_clock_t;
00764 
00765 struct compat_timeval
00766 {
00767   compat_time_t tv_sec;
00768   int tv_usec;
00769 };
00770 
00771 typedef union compat_sigval
00772 {
00773   compat_int_t sival_int;
00774   compat_uptr_t sival_ptr;
00775 } compat_sigval_t;
00776 
00777 typedef struct compat_siginfo
00778 {
00779   int si_signo;
00780   int si_errno;
00781   int si_code;
00782 
00783   union
00784   {
00785     int _pad[((128 / sizeof (int)) - 3)];
00786 
00787     /* kill() */
00788     struct
00789     {
00790       unsigned int _pid;
00791       unsigned int _uid;
00792     } _kill;
00793 
00794     /* POSIX.1b timers */
00795     struct
00796     {
00797       compat_timer_t _tid;
00798       int _overrun;
00799       compat_sigval_t _sigval;
00800     } _timer;
00801 
00802     /* POSIX.1b signals */
00803     struct
00804     {
00805       unsigned int _pid;
00806       unsigned int _uid;
00807       compat_sigval_t _sigval;
00808     } _rt;
00809 
00810     /* SIGCHLD */
00811     struct
00812     {
00813       unsigned int _pid;
00814       unsigned int _uid;
00815       int _status;
00816       compat_clock_t _utime;
00817       compat_clock_t _stime;
00818     } _sigchld;
00819 
00820     /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
00821     struct
00822     {
00823       unsigned int _addr;
00824     } _sigfault;
00825 
00826     /* SIGPOLL */
00827     struct
00828     {
00829       int _band;
00830       int _fd;
00831     } _sigpoll;
00832   } _sifields;
00833 } compat_siginfo_t;
00834 
00835 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes.  */
00836 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
00837 
00838 typedef struct compat_x32_siginfo
00839 {
00840   int si_signo;
00841   int si_errno;
00842   int si_code;
00843 
00844   union
00845   {
00846     int _pad[((128 / sizeof (int)) - 3)];
00847 
00848     /* kill() */
00849     struct
00850     {
00851       unsigned int _pid;
00852       unsigned int _uid;
00853     } _kill;
00854 
00855     /* POSIX.1b timers */
00856     struct
00857     {
00858       compat_timer_t _tid;
00859       int _overrun;
00860       compat_sigval_t _sigval;
00861     } _timer;
00862 
00863     /* POSIX.1b signals */
00864     struct
00865     {
00866       unsigned int _pid;
00867       unsigned int _uid;
00868       compat_sigval_t _sigval;
00869     } _rt;
00870 
00871     /* SIGCHLD */
00872     struct
00873     {
00874       unsigned int _pid;
00875       unsigned int _uid;
00876       int _status;
00877       compat_x32_clock_t _utime;
00878       compat_x32_clock_t _stime;
00879     } _sigchld;
00880 
00881     /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
00882     struct
00883     {
00884       unsigned int _addr;
00885     } _sigfault;
00886 
00887     /* SIGPOLL */
00888     struct
00889     {
00890       int _band;
00891       int _fd;
00892     } _sigpoll;
00893   } _sifields;
00894 } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
00895 
00896 #define cpt_si_pid _sifields._kill._pid
00897 #define cpt_si_uid _sifields._kill._uid
00898 #define cpt_si_timerid _sifields._timer._tid
00899 #define cpt_si_overrun _sifields._timer._overrun
00900 #define cpt_si_status _sifields._sigchld._status
00901 #define cpt_si_utime _sifields._sigchld._utime
00902 #define cpt_si_stime _sifields._sigchld._stime
00903 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
00904 #define cpt_si_addr _sifields._sigfault._addr
00905 #define cpt_si_band _sifields._sigpoll._band
00906 #define cpt_si_fd _sifields._sigpoll._fd
00907 
00908 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
00909    In their place is si_timer1,si_timer2.  */
00910 #ifndef si_timerid
00911 #define si_timerid si_timer1
00912 #endif
00913 #ifndef si_overrun
00914 #define si_overrun si_timer2
00915 #endif
00916 
00917 static void
00918 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
00919 {
00920   memset (to, 0, sizeof (*to));
00921 
00922   to->si_signo = from->si_signo;
00923   to->si_errno = from->si_errno;
00924   to->si_code = from->si_code;
00925 
00926   if (to->si_code == SI_TIMER)
00927     {
00928       to->cpt_si_timerid = from->si_timerid;
00929       to->cpt_si_overrun = from->si_overrun;
00930       to->cpt_si_ptr = (intptr_t) from->si_ptr;
00931     }
00932   else if (to->si_code == SI_USER)
00933     {
00934       to->cpt_si_pid = from->si_pid;
00935       to->cpt_si_uid = from->si_uid;
00936     }
00937   else if (to->si_code < 0)
00938     {
00939       to->cpt_si_pid = from->si_pid;
00940       to->cpt_si_uid = from->si_uid;
00941       to->cpt_si_ptr = (intptr_t) from->si_ptr;
00942     }
00943   else
00944     {
00945       switch (to->si_signo)
00946         {
00947         case SIGCHLD:
00948           to->cpt_si_pid = from->si_pid;
00949           to->cpt_si_uid = from->si_uid;
00950           to->cpt_si_status = from->si_status;
00951           to->cpt_si_utime = from->si_utime;
00952           to->cpt_si_stime = from->si_stime;
00953           break;
00954         case SIGILL:
00955         case SIGFPE:
00956         case SIGSEGV:
00957         case SIGBUS:
00958           to->cpt_si_addr = (intptr_t) from->si_addr;
00959           break;
00960         case SIGPOLL:
00961           to->cpt_si_band = from->si_band;
00962           to->cpt_si_fd = from->si_fd;
00963           break;
00964         default:
00965           to->cpt_si_pid = from->si_pid;
00966           to->cpt_si_uid = from->si_uid;
00967           to->cpt_si_ptr = (intptr_t) from->si_ptr;
00968           break;
00969         }
00970     }
00971 }
00972 
00973 static void
00974 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
00975 {
00976   memset (to, 0, sizeof (*to));
00977 
00978   to->si_signo = from->si_signo;
00979   to->si_errno = from->si_errno;
00980   to->si_code = from->si_code;
00981 
00982   if (to->si_code == SI_TIMER)
00983     {
00984       to->si_timerid = from->cpt_si_timerid;
00985       to->si_overrun = from->cpt_si_overrun;
00986       to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
00987     }
00988   else if (to->si_code == SI_USER)
00989     {
00990       to->si_pid = from->cpt_si_pid;
00991       to->si_uid = from->cpt_si_uid;
00992     }
00993   else if (to->si_code < 0)
00994     {
00995       to->si_pid = from->cpt_si_pid;
00996       to->si_uid = from->cpt_si_uid;
00997       to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
00998     }
00999   else
01000     {
01001       switch (to->si_signo)
01002         {
01003         case SIGCHLD:
01004           to->si_pid = from->cpt_si_pid;
01005           to->si_uid = from->cpt_si_uid;
01006           to->si_status = from->cpt_si_status;
01007           to->si_utime = from->cpt_si_utime;
01008           to->si_stime = from->cpt_si_stime;
01009           break;
01010         case SIGILL:
01011         case SIGFPE:
01012         case SIGSEGV:
01013         case SIGBUS:
01014           to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
01015           break;
01016         case SIGPOLL:
01017           to->si_band = from->cpt_si_band;
01018           to->si_fd = from->cpt_si_fd;
01019           break;
01020         default:
01021           to->si_pid = from->cpt_si_pid;
01022           to->si_uid = from->cpt_si_uid;
01023           to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
01024           break;
01025         }
01026     }
01027 }
01028 
01029 static void
01030 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
01031                                  siginfo_t *from)
01032 {
01033   memset (to, 0, sizeof (*to));
01034 
01035   to->si_signo = from->si_signo;
01036   to->si_errno = from->si_errno;
01037   to->si_code = from->si_code;
01038 
01039   if (to->si_code == SI_TIMER)
01040     {
01041       to->cpt_si_timerid = from->si_timerid;
01042       to->cpt_si_overrun = from->si_overrun;
01043       to->cpt_si_ptr = (intptr_t) from->si_ptr;
01044     }
01045   else if (to->si_code == SI_USER)
01046     {
01047       to->cpt_si_pid = from->si_pid;
01048       to->cpt_si_uid = from->si_uid;
01049     }
01050   else if (to->si_code < 0)
01051     {
01052       to->cpt_si_pid = from->si_pid;
01053       to->cpt_si_uid = from->si_uid;
01054       to->cpt_si_ptr = (intptr_t) from->si_ptr;
01055     }
01056   else
01057     {
01058       switch (to->si_signo)
01059         {
01060         case SIGCHLD:
01061           to->cpt_si_pid = from->si_pid;
01062           to->cpt_si_uid = from->si_uid;
01063           to->cpt_si_status = from->si_status;
01064           to->cpt_si_utime = from->si_utime;
01065           to->cpt_si_stime = from->si_stime;
01066           break;
01067         case SIGILL:
01068         case SIGFPE:
01069         case SIGSEGV:
01070         case SIGBUS:
01071           to->cpt_si_addr = (intptr_t) from->si_addr;
01072           break;
01073         case SIGPOLL:
01074           to->cpt_si_band = from->si_band;
01075           to->cpt_si_fd = from->si_fd;
01076           break;
01077         default:
01078           to->cpt_si_pid = from->si_pid;
01079           to->cpt_si_uid = from->si_uid;
01080           to->cpt_si_ptr = (intptr_t) from->si_ptr;
01081           break;
01082         }
01083     }
01084 }
01085 
01086 static void
01087 siginfo_from_compat_x32_siginfo (siginfo_t *to,
01088                                  compat_x32_siginfo_t *from)
01089 {
01090   memset (to, 0, sizeof (*to));
01091 
01092   to->si_signo = from->si_signo;
01093   to->si_errno = from->si_errno;
01094   to->si_code = from->si_code;
01095 
01096   if (to->si_code == SI_TIMER)
01097     {
01098       to->si_timerid = from->cpt_si_timerid;
01099       to->si_overrun = from->cpt_si_overrun;
01100       to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
01101     }
01102   else if (to->si_code == SI_USER)
01103     {
01104       to->si_pid = from->cpt_si_pid;
01105       to->si_uid = from->cpt_si_uid;
01106     }
01107   else if (to->si_code < 0)
01108     {
01109       to->si_pid = from->cpt_si_pid;
01110       to->si_uid = from->cpt_si_uid;
01111       to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
01112     }
01113   else
01114     {
01115       switch (to->si_signo)
01116         {
01117         case SIGCHLD:
01118           to->si_pid = from->cpt_si_pid;
01119           to->si_uid = from->cpt_si_uid;
01120           to->si_status = from->cpt_si_status;
01121           to->si_utime = from->cpt_si_utime;
01122           to->si_stime = from->cpt_si_stime;
01123           break;
01124         case SIGILL:
01125         case SIGFPE:
01126         case SIGSEGV:
01127         case SIGBUS:
01128           to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
01129           break;
01130         case SIGPOLL:
01131           to->si_band = from->cpt_si_band;
01132           to->si_fd = from->cpt_si_fd;
01133           break;
01134         default:
01135           to->si_pid = from->cpt_si_pid;
01136           to->si_uid = from->cpt_si_uid;
01137           to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
01138           break;
01139         }
01140     }
01141 }
01142 
01143 #endif /* __x86_64__ */
01144 
01145 /* Convert a native/host siginfo object, into/from the siginfo in the
01146    layout of the inferiors' architecture.  Returns true if any
01147    conversion was done; false otherwise.  If DIRECTION is 1, then copy
01148    from INF to NATIVE.  If DIRECTION is 0, copy from NATIVE to
01149    INF.  */
01150 
01151 static int
01152 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
01153 {
01154 #ifdef __x86_64__
01155   unsigned int machine;
01156   int tid = lwpid_of (get_thread_lwp (current_inferior));
01157   int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
01158 
01159   /* Is the inferior 32-bit?  If so, then fixup the siginfo object.  */
01160   if (!is_64bit_tdesc ())
01161     {
01162       if (sizeof (siginfo_t) != sizeof (compat_siginfo_t))
01163         fatal ("unexpected difference in siginfo");
01164 
01165       if (direction == 0)
01166         compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
01167       else
01168         siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
01169 
01170       return 1;
01171     }
01172   /* No fixup for native x32 GDB.  */
01173   else if (!is_elf64 && sizeof (void *) == 8)
01174     {
01175       if (sizeof (siginfo_t) != sizeof (compat_x32_siginfo_t))
01176         fatal ("unexpected difference in siginfo");
01177 
01178       if (direction == 0)
01179         compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
01180                                          native);
01181       else
01182         siginfo_from_compat_x32_siginfo (native,
01183                                          (struct compat_x32_siginfo *) inf);
01184 
01185       return 1;
01186     }
01187 #endif
01188 
01189   return 0;
01190 }
01191 
01192 static int use_xml;
01193 
01194 /* Format of XSAVE extended state is:
01195         struct
01196         {
01197           fxsave_bytes[0..463]
01198           sw_usable_bytes[464..511]
01199           xstate_hdr_bytes[512..575]
01200           avx_bytes[576..831]
01201           future_state etc
01202         };
01203 
01204   Same memory layout will be used for the coredump NT_X86_XSTATE
01205   representing the XSAVE extended state registers.
01206 
01207   The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
01208   extended state mask, which is the same as the extended control register
01209   0 (the XFEATURE_ENABLED_MASK register), XCR0.  We can use this mask
01210   together with the mask saved in the xstate_hdr_bytes to determine what
01211   states the processor/OS supports and what state, used or initialized,
01212   the process/thread is in.  */
01213 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
01214 
01215 /* Does the current host support the GETFPXREGS request?  The header
01216    file may or may not define it, and even if it is defined, the
01217    kernel will return EIO if it's running on a pre-SSE processor.  */
01218 int have_ptrace_getfpxregs =
01219 #ifdef HAVE_PTRACE_GETFPXREGS
01220   -1
01221 #else
01222   0
01223 #endif
01224 ;
01225 
01226 /* Does the current host support PTRACE_GETREGSET?  */
01227 static int have_ptrace_getregset = -1;
01228 
01229 /* Get Linux/x86 target description from running target.  */
01230 
01231 static const struct target_desc *
01232 x86_linux_read_description (void)
01233 {
01234   unsigned int machine;
01235   int is_elf64;
01236   int avx;
01237   int tid;
01238   static uint64_t xcr0;
01239   struct regset_info *regset;
01240 
01241   tid = lwpid_of (get_thread_lwp (current_inferior));
01242 
01243   is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
01244 
01245   if (sizeof (void *) == 4)
01246     {
01247       if (is_elf64 > 0)
01248        error (_("Can't debug 64-bit process with 32-bit GDBserver"));
01249 #ifndef __x86_64__
01250       else if (machine == EM_X86_64)
01251        error (_("Can't debug x86-64 process with 32-bit GDBserver"));
01252 #endif
01253     }
01254 
01255 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
01256   if (machine == EM_386 && have_ptrace_getfpxregs == -1)
01257     {
01258       elf_fpxregset_t fpxregs;
01259 
01260       if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
01261         {
01262           have_ptrace_getfpxregs = 0;
01263           have_ptrace_getregset = 0;
01264           return tdesc_i386_mmx_linux;
01265         }
01266       else
01267         have_ptrace_getfpxregs = 1;
01268     }
01269 #endif
01270 
01271   if (!use_xml)
01272     {
01273       x86_xcr0 = I386_XSTATE_SSE_MASK;
01274 
01275       /* Don't use XML.  */
01276 #ifdef __x86_64__
01277       if (machine == EM_X86_64)
01278         return tdesc_amd64_linux_no_xml;
01279       else
01280 #endif
01281         return tdesc_i386_linux_no_xml;
01282     }
01283 
01284   if (have_ptrace_getregset == -1)
01285     {
01286       uint64_t xstateregs[(I386_XSTATE_SSE_SIZE / sizeof (uint64_t))];
01287       struct iovec iov;
01288 
01289       iov.iov_base = xstateregs;
01290       iov.iov_len = sizeof (xstateregs);
01291 
01292       /* Check if PTRACE_GETREGSET works.  */
01293       if (ptrace (PTRACE_GETREGSET, tid,
01294                   (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
01295         have_ptrace_getregset = 0;
01296       else
01297         {
01298           have_ptrace_getregset = 1;
01299 
01300           /* Get XCR0 from XSAVE extended state.  */
01301           xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
01302                              / sizeof (uint64_t))];
01303 
01304           /* Use PTRACE_GETREGSET if it is available.  */
01305           for (regset = x86_regsets;
01306                regset->fill_function != NULL; regset++)
01307             if (regset->get_request == PTRACE_GETREGSET)
01308               regset->size = I386_XSTATE_SIZE (xcr0);
01309             else if (regset->type != GENERAL_REGS)
01310               regset->size = 0;
01311         }
01312     }
01313 
01314   /* Check the native XCR0 only if PTRACE_GETREGSET is available.  */
01315   avx = (have_ptrace_getregset
01316          && (xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK);
01317 
01318   /* AVX is the highest feature we support.  */
01319   if (avx)
01320     x86_xcr0 = xcr0;
01321 
01322   if (machine == EM_X86_64)
01323     {
01324 #ifdef __x86_64__
01325       if (avx)
01326         {
01327           if (!is_elf64)
01328             return tdesc_x32_avx_linux;
01329           else
01330             return tdesc_amd64_avx_linux;
01331         }
01332       else
01333         {
01334           if (!is_elf64)
01335             return tdesc_x32_linux;
01336           else
01337             return tdesc_amd64_linux;
01338         }
01339 #endif
01340     }
01341   else
01342     {
01343       if (avx)
01344         return tdesc_i386_avx_linux;
01345       else
01346         return tdesc_i386_linux;
01347     }
01348 
01349   gdb_assert_not_reached ("failed to return tdesc");
01350 }
01351 
01352 /* Callback for find_inferior.  Stops iteration when a thread with a
01353    given PID is found.  */
01354 
01355 static int
01356 same_process_callback (struct inferior_list_entry *entry, void *data)
01357 {
01358   int pid = *(int *) data;
01359 
01360   return (ptid_get_pid (entry->id) == pid);
01361 }
01362 
01363 /* Callback for for_each_inferior.  Calls the arch_setup routine for
01364    each process.  */
01365 
01366 static void
01367 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
01368 {
01369   int pid = ptid_get_pid (entry->id);
01370 
01371   /* Look up any thread of this processes.  */
01372   current_inferior
01373     = (struct thread_info *) find_inferior (&all_threads,
01374                                             same_process_callback, &pid);
01375 
01376   the_low_target.arch_setup ();
01377 }
01378 
01379 /* Update all the target description of all processes; a new GDB
01380    connected, and it may or not support xml target descriptions.  */
01381 
01382 static void
01383 x86_linux_update_xmltarget (void)
01384 {
01385   struct thread_info *save_inferior = current_inferior;
01386 
01387   /* Before changing the register cache's internal layout, flush the
01388      contents of the current valid caches back to the threads, and
01389      release the current regcache objects.  */
01390   regcache_release ();
01391 
01392   for_each_inferior (&all_processes, x86_arch_setup_process_callback);
01393 
01394   current_inferior = save_inferior;
01395 }
01396 
01397 /* Process qSupported query, "xmlRegisters=".  Update the buffer size for
01398    PTRACE_GETREGSET.  */
01399 
01400 static void
01401 x86_linux_process_qsupported (const char *query)
01402 {
01403   /* Return if gdb doesn't support XML.  If gdb sends "xmlRegisters="
01404      with "i386" in qSupported query, it supports x86 XML target
01405      descriptions.  */
01406   use_xml = 0;
01407   if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
01408     {
01409       char *copy = xstrdup (query + 13);
01410       char *p;
01411 
01412       for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
01413         {
01414           if (strcmp (p, "i386") == 0)
01415             {
01416               use_xml = 1;
01417               break;
01418             }
01419         } 
01420 
01421       free (copy);
01422     }
01423 
01424   x86_linux_update_xmltarget ();
01425 }
01426 
01427 /* Common for x86/x86-64.  */
01428 
01429 static struct regsets_info x86_regsets_info =
01430   {
01431     x86_regsets, /* regsets */
01432     0, /* num_regsets */
01433     NULL, /* disabled_regsets */
01434   };
01435 
01436 #ifdef __x86_64__
01437 static struct regs_info amd64_linux_regs_info =
01438   {
01439     NULL, /* regset_bitmap */
01440     NULL, /* usrregs_info */
01441     &x86_regsets_info
01442   };
01443 #endif
01444 static struct usrregs_info i386_linux_usrregs_info =
01445   {
01446     I386_NUM_REGS,
01447     i386_regmap,
01448   };
01449 
01450 static struct regs_info i386_linux_regs_info =
01451   {
01452     NULL, /* regset_bitmap */
01453     &i386_linux_usrregs_info,
01454     &x86_regsets_info
01455   };
01456 
01457 const struct regs_info *
01458 x86_linux_regs_info (void)
01459 {
01460 #ifdef __x86_64__
01461   if (is_64bit_tdesc ())
01462     return &amd64_linux_regs_info;
01463   else
01464 #endif
01465     return &i386_linux_regs_info;
01466 }
01467 
01468 /* Initialize the target description for the architecture of the
01469    inferior.  */
01470 
01471 static void
01472 x86_arch_setup (void)
01473 {
01474   current_process ()->tdesc = x86_linux_read_description ();
01475 }
01476 
01477 static int
01478 x86_supports_tracepoints (void)
01479 {
01480   return 1;
01481 }
01482 
01483 static void
01484 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
01485 {
01486   write_inferior_memory (*to, buf, len);
01487   *to += len;
01488 }
01489 
01490 static int
01491 push_opcode (unsigned char *buf, char *op)
01492 {
01493   unsigned char *buf_org = buf;
01494 
01495   while (1)
01496     {
01497       char *endptr;
01498       unsigned long ul = strtoul (op, &endptr, 16);
01499 
01500       if (endptr == op)
01501         break;
01502 
01503       *buf++ = ul;
01504       op = endptr;
01505     }
01506 
01507   return buf - buf_org;
01508 }
01509 
01510 #ifdef __x86_64__
01511 
01512 /* Build a jump pad that saves registers and calls a collection
01513    function.  Writes a jump instruction to the jump pad to
01514    JJUMPAD_INSN.  The caller is responsible to write it in at the
01515    tracepoint address.  */
01516 
01517 static int
01518 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
01519                                         CORE_ADDR collector,
01520                                         CORE_ADDR lockaddr,
01521                                         ULONGEST orig_size,
01522                                         CORE_ADDR *jump_entry,
01523                                         CORE_ADDR *trampoline,
01524                                         ULONGEST *trampoline_size,
01525                                         unsigned char *jjump_pad_insn,
01526                                         ULONGEST *jjump_pad_insn_size,
01527                                         CORE_ADDR *adjusted_insn_addr,
01528                                         CORE_ADDR *adjusted_insn_addr_end,
01529                                         char *err)
01530 {
01531   unsigned char buf[40];
01532   int i, offset;
01533   int64_t loffset;
01534 
01535   CORE_ADDR buildaddr = *jump_entry;
01536 
01537   /* Build the jump pad.  */
01538 
01539   /* First, do tracepoint data collection.  Save registers.  */
01540   i = 0;
01541   /* Need to ensure stack pointer saved first.  */
01542   buf[i++] = 0x54; /* push %rsp */
01543   buf[i++] = 0x55; /* push %rbp */
01544   buf[i++] = 0x57; /* push %rdi */
01545   buf[i++] = 0x56; /* push %rsi */
01546   buf[i++] = 0x52; /* push %rdx */
01547   buf[i++] = 0x51; /* push %rcx */
01548   buf[i++] = 0x53; /* push %rbx */
01549   buf[i++] = 0x50; /* push %rax */
01550   buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
01551   buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
01552   buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
01553   buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
01554   buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
01555   buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
01556   buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
01557   buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
01558   buf[i++] = 0x9c; /* pushfq */
01559   buf[i++] = 0x48; /* movl <addr>,%rdi */
01560   buf[i++] = 0xbf;
01561   *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
01562   i += sizeof (unsigned long);
01563   buf[i++] = 0x57; /* push %rdi */
01564   append_insns (&buildaddr, i, buf);
01565 
01566   /* Stack space for the collecting_t object.  */
01567   i = 0;
01568   i += push_opcode (&buf[i], "48 83 ec 18");    /* sub $0x18,%rsp */
01569   i += push_opcode (&buf[i], "48 b8");          /* mov <tpoint>,%rax */
01570   memcpy (buf + i, &tpoint, 8);
01571   i += 8;
01572   i += push_opcode (&buf[i], "48 89 04 24");    /* mov %rax,(%rsp) */
01573   i += push_opcode (&buf[i],
01574                     "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
01575   i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
01576   append_insns (&buildaddr, i, buf);
01577 
01578   /* spin-lock.  */
01579   i = 0;
01580   i += push_opcode (&buf[i], "48 be");          /* movl <lockaddr>,%rsi */
01581   memcpy (&buf[i], (void *) &lockaddr, 8);
01582   i += 8;
01583   i += push_opcode (&buf[i], "48 89 e1");       /* mov %rsp,%rcx */
01584   i += push_opcode (&buf[i], "31 c0");          /* xor %eax,%eax */
01585   i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
01586   i += push_opcode (&buf[i], "48 85 c0");       /* test %rax,%rax */
01587   i += push_opcode (&buf[i], "75 f4");          /* jne <again> */
01588   append_insns (&buildaddr, i, buf);
01589 
01590   /* Set up the gdb_collect call.  */
01591   /* At this point, (stack pointer + 0x18) is the base of our saved
01592      register block.  */
01593 
01594   i = 0;
01595   i += push_opcode (&buf[i], "48 89 e6");       /* mov %rsp,%rsi */
01596   i += push_opcode (&buf[i], "48 83 c6 18");    /* add $0x18,%rsi */
01597 
01598   /* tpoint address may be 64-bit wide.  */
01599   i += push_opcode (&buf[i], "48 bf");          /* movl <addr>,%rdi */
01600   memcpy (buf + i, &tpoint, 8);
01601   i += 8;
01602   append_insns (&buildaddr, i, buf);
01603 
01604   /* The collector function being in the shared library, may be
01605      >31-bits away off the jump pad.  */
01606   i = 0;
01607   i += push_opcode (&buf[i], "48 b8");          /* mov $collector,%rax */
01608   memcpy (buf + i, &collector, 8);
01609   i += 8;
01610   i += push_opcode (&buf[i], "ff d0");          /* callq *%rax */
01611   append_insns (&buildaddr, i, buf);
01612 
01613   /* Clear the spin-lock.  */
01614   i = 0;
01615   i += push_opcode (&buf[i], "31 c0");          /* xor %eax,%eax */
01616   i += push_opcode (&buf[i], "48 a3");          /* mov %rax, lockaddr */
01617   memcpy (buf + i, &lockaddr, 8);
01618   i += 8;
01619   append_insns (&buildaddr, i, buf);
01620 
01621   /* Remove stack that had been used for the collect_t object.  */
01622   i = 0;
01623   i += push_opcode (&buf[i], "48 83 c4 18");    /* add $0x18,%rsp */
01624   append_insns (&buildaddr, i, buf);
01625 
01626   /* Restore register state.  */
01627   i = 0;
01628   buf[i++] = 0x48; /* add $0x8,%rsp */
01629   buf[i++] = 0x83;
01630   buf[i++] = 0xc4;
01631   buf[i++] = 0x08;
01632   buf[i++] = 0x9d; /* popfq */
01633   buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
01634   buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
01635   buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
01636   buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
01637   buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
01638   buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
01639   buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
01640   buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
01641   buf[i++] = 0x58; /* pop %rax */
01642   buf[i++] = 0x5b; /* pop %rbx */
01643   buf[i++] = 0x59; /* pop %rcx */
01644   buf[i++] = 0x5a; /* pop %rdx */
01645   buf[i++] = 0x5e; /* pop %rsi */
01646   buf[i++] = 0x5f; /* pop %rdi */
01647   buf[i++] = 0x5d; /* pop %rbp */
01648   buf[i++] = 0x5c; /* pop %rsp */
01649   append_insns (&buildaddr, i, buf);
01650 
01651   /* Now, adjust the original instruction to execute in the jump
01652      pad.  */
01653   *adjusted_insn_addr = buildaddr;
01654   relocate_instruction (&buildaddr, tpaddr);
01655   *adjusted_insn_addr_end = buildaddr;
01656 
01657   /* Finally, write a jump back to the program.  */
01658 
01659   loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
01660   if (loffset > INT_MAX || loffset < INT_MIN)
01661     {
01662       sprintf (err,
01663                "E.Jump back from jump pad too far from tracepoint "
01664                "(offset 0x%" PRIx64 " > int32).", loffset);
01665       return 1;
01666     }
01667 
01668   offset = (int) loffset;
01669   memcpy (buf, jump_insn, sizeof (jump_insn));
01670   memcpy (buf + 1, &offset, 4);
01671   append_insns (&buildaddr, sizeof (jump_insn), buf);
01672 
01673   /* The jump pad is now built.  Wire in a jump to our jump pad.  This
01674      is always done last (by our caller actually), so that we can
01675      install fast tracepoints with threads running.  This relies on
01676      the agent's atomic write support.  */
01677   loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
01678   if (loffset > INT_MAX || loffset < INT_MIN)
01679     {
01680       sprintf (err,
01681                "E.Jump pad too far from tracepoint "
01682                "(offset 0x%" PRIx64 " > int32).", loffset);
01683       return 1;
01684     }
01685 
01686   offset = (int) loffset;
01687 
01688   memcpy (buf, jump_insn, sizeof (jump_insn));
01689   memcpy (buf + 1, &offset, 4);
01690   memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
01691   *jjump_pad_insn_size = sizeof (jump_insn);
01692 
01693   /* Return the end address of our pad.  */
01694   *jump_entry = buildaddr;
01695 
01696   return 0;
01697 }
01698 
01699 #endif /* __x86_64__ */
01700 
01701 /* Build a jump pad that saves registers and calls a collection
01702    function.  Writes a jump instruction to the jump pad to
01703    JJUMPAD_INSN.  The caller is responsible to write it in at the
01704    tracepoint address.  */
01705 
01706 static int
01707 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
01708                                        CORE_ADDR collector,
01709                                        CORE_ADDR lockaddr,
01710                                        ULONGEST orig_size,
01711                                        CORE_ADDR *jump_entry,
01712                                        CORE_ADDR *trampoline,
01713                                        ULONGEST *trampoline_size,
01714                                        unsigned char *jjump_pad_insn,
01715                                        ULONGEST *jjump_pad_insn_size,
01716                                        CORE_ADDR *adjusted_insn_addr,
01717                                        CORE_ADDR *adjusted_insn_addr_end,
01718                                        char *err)
01719 {
01720   unsigned char buf[0x100];
01721   int i, offset;
01722   CORE_ADDR buildaddr = *jump_entry;
01723 
01724   /* Build the jump pad.  */
01725 
01726   /* First, do tracepoint data collection.  Save registers.  */
01727   i = 0;
01728   buf[i++] = 0x60; /* pushad */
01729   buf[i++] = 0x68; /* push tpaddr aka $pc */
01730   *((int *)(buf + i)) = (int) tpaddr;
01731   i += 4;
01732   buf[i++] = 0x9c; /* pushf */
01733   buf[i++] = 0x1e; /* push %ds */
01734   buf[i++] = 0x06; /* push %es */
01735   buf[i++] = 0x0f; /* push %fs */
01736   buf[i++] = 0xa0;
01737   buf[i++] = 0x0f; /* push %gs */
01738   buf[i++] = 0xa8;
01739   buf[i++] = 0x16; /* push %ss */
01740   buf[i++] = 0x0e; /* push %cs */
01741   append_insns (&buildaddr, i, buf);
01742 
01743   /* Stack space for the collecting_t object.  */
01744   i = 0;
01745   i += push_opcode (&buf[i], "83 ec 08");       /* sub    $0x8,%esp */
01746 
01747   /* Build the object.  */
01748   i += push_opcode (&buf[i], "b8");             /* mov    <tpoint>,%eax */
01749   memcpy (buf + i, &tpoint, 4);
01750   i += 4;
01751   i += push_opcode (&buf[i], "89 04 24");          /* mov %eax,(%esp) */
01752 
01753   i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
01754   i += push_opcode (&buf[i], "89 44 24 04");       /* mov %eax,0x4(%esp) */
01755   append_insns (&buildaddr, i, buf);
01756 
01757   /* spin-lock.  Note this is using cmpxchg, which leaves i386 behind.
01758      If we cared for it, this could be using xchg alternatively.  */
01759 
01760   i = 0;
01761   i += push_opcode (&buf[i], "31 c0");          /* xor %eax,%eax */
01762   i += push_opcode (&buf[i], "f0 0f b1 25");    /* lock cmpxchg
01763                                                    %esp,<lockaddr> */
01764   memcpy (&buf[i], (void *) &lockaddr, 4);
01765   i += 4;
01766   i += push_opcode (&buf[i], "85 c0");          /* test %eax,%eax */
01767   i += push_opcode (&buf[i], "75 f2");          /* jne <again> */
01768   append_insns (&buildaddr, i, buf);
01769 
01770 
01771   /* Set up arguments to the gdb_collect call.  */
01772   i = 0;
01773   i += push_opcode (&buf[i], "89 e0");          /* mov %esp,%eax */
01774   i += push_opcode (&buf[i], "83 c0 08");       /* add $0x08,%eax */
01775   i += push_opcode (&buf[i], "89 44 24 fc");    /* mov %eax,-0x4(%esp) */
01776   append_insns (&buildaddr, i, buf);
01777 
01778   i = 0;
01779   i += push_opcode (&buf[i], "83 ec 08");       /* sub $0x8,%esp */
01780   append_insns (&buildaddr, i, buf);
01781 
01782   i = 0;
01783   i += push_opcode (&buf[i], "c7 04 24");       /* movl <addr>,(%esp) */
01784   memcpy (&buf[i], (void *) &tpoint, 4);
01785   i += 4;
01786   append_insns (&buildaddr, i, buf);
01787 
01788   buf[0] = 0xe8; /* call <reladdr> */
01789   offset = collector - (buildaddr + sizeof (jump_insn));
01790   memcpy (buf + 1, &offset, 4);
01791   append_insns (&buildaddr, 5, buf);
01792   /* Clean up after the call.  */
01793   buf[0] = 0x83; /* add $0x8,%esp */
01794   buf[1] = 0xc4;
01795   buf[2] = 0x08;
01796   append_insns (&buildaddr, 3, buf);
01797 
01798 
01799   /* Clear the spin-lock.  This would need the LOCK prefix on older
01800      broken archs.  */
01801   i = 0;
01802   i += push_opcode (&buf[i], "31 c0");          /* xor %eax,%eax */
01803   i += push_opcode (&buf[i], "a3");             /* mov %eax, lockaddr */
01804   memcpy (buf + i, &lockaddr, 4);
01805   i += 4;
01806   append_insns (&buildaddr, i, buf);
01807 
01808 
01809   /* Remove stack that had been used for the collect_t object.  */
01810   i = 0;
01811   i += push_opcode (&buf[i], "83 c4 08");       /* add $0x08,%esp */
01812   append_insns (&buildaddr, i, buf);
01813 
01814   i = 0;
01815   buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
01816   buf[i++] = 0xc4;
01817   buf[i++] = 0x04;
01818   buf[i++] = 0x17; /* pop %ss */
01819   buf[i++] = 0x0f; /* pop %gs */
01820   buf[i++] = 0xa9;
01821   buf[i++] = 0x0f; /* pop %fs */
01822   buf[i++] = 0xa1;
01823   buf[i++] = 0x07; /* pop %es */
01824   buf[i++] = 0x1f; /* pop %ds */
01825   buf[i++] = 0x9d; /* popf */
01826   buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
01827   buf[i++] = 0xc4;
01828   buf[i++] = 0x04;
01829   buf[i++] = 0x61; /* popad */
01830   append_insns (&buildaddr, i, buf);
01831 
01832   /* Now, adjust the original instruction to execute in the jump
01833      pad.  */
01834   *adjusted_insn_addr = buildaddr;
01835   relocate_instruction (&buildaddr, tpaddr);
01836   *adjusted_insn_addr_end = buildaddr;
01837 
01838   /* Write the jump back to the program.  */
01839   offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
01840   memcpy (buf, jump_insn, sizeof (jump_insn));
01841   memcpy (buf + 1, &offset, 4);
01842   append_insns (&buildaddr, sizeof (jump_insn), buf);
01843 
01844   /* The jump pad is now built.  Wire in a jump to our jump pad.  This
01845      is always done last (by our caller actually), so that we can
01846      install fast tracepoints with threads running.  This relies on
01847      the agent's atomic write support.  */
01848   if (orig_size == 4)
01849     {
01850       /* Create a trampoline.  */
01851       *trampoline_size = sizeof (jump_insn);
01852       if (!claim_trampoline_space (*trampoline_size, trampoline))
01853         {
01854           /* No trampoline space available.  */
01855           strcpy (err,
01856                   "E.Cannot allocate trampoline space needed for fast "
01857                   "tracepoints on 4-byte instructions.");
01858           return 1;
01859         }
01860 
01861       offset = *jump_entry - (*trampoline + sizeof (jump_insn));
01862       memcpy (buf, jump_insn, sizeof (jump_insn));
01863       memcpy (buf + 1, &offset, 4);
01864       write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
01865 
01866       /* Use a 16-bit relative jump instruction to jump to the trampoline.  */
01867       offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
01868       memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
01869       memcpy (buf + 2, &offset, 2);
01870       memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
01871       *jjump_pad_insn_size = sizeof (small_jump_insn);
01872     }
01873   else
01874     {
01875       /* Else use a 32-bit relative jump instruction.  */
01876       offset = *jump_entry - (tpaddr + sizeof (jump_insn));
01877       memcpy (buf, jump_insn, sizeof (jump_insn));
01878       memcpy (buf + 1, &offset, 4);
01879       memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
01880       *jjump_pad_insn_size = sizeof (jump_insn);
01881     }
01882 
01883   /* Return the end address of our pad.  */
01884   *jump_entry = buildaddr;
01885 
01886   return 0;
01887 }
01888 
01889 static int
01890 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
01891                                       CORE_ADDR collector,
01892                                       CORE_ADDR lockaddr,
01893                                       ULONGEST orig_size,
01894                                       CORE_ADDR *jump_entry,
01895                                       CORE_ADDR *trampoline,
01896                                       ULONGEST *trampoline_size,
01897                                       unsigned char *jjump_pad_insn,
01898                                       ULONGEST *jjump_pad_insn_size,
01899                                       CORE_ADDR *adjusted_insn_addr,
01900                                       CORE_ADDR *adjusted_insn_addr_end,
01901                                       char *err)
01902 {
01903 #ifdef __x86_64__
01904   if (is_64bit_tdesc ())
01905     return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
01906                                                    collector, lockaddr,
01907                                                    orig_size, jump_entry,
01908                                                    trampoline, trampoline_size,
01909                                                    jjump_pad_insn,
01910                                                    jjump_pad_insn_size,
01911                                                    adjusted_insn_addr,
01912                                                    adjusted_insn_addr_end,
01913                                                    err);
01914 #endif
01915 
01916   return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
01917                                                 collector, lockaddr,
01918                                                 orig_size, jump_entry,
01919                                                 trampoline, trampoline_size,
01920                                                 jjump_pad_insn,
01921                                                 jjump_pad_insn_size,
01922                                                 adjusted_insn_addr,
01923                                                 adjusted_insn_addr_end,
01924                                                 err);
01925 }
01926 
01927 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
01928    architectures.  */
01929 
01930 static int
01931 x86_get_min_fast_tracepoint_insn_len (void)
01932 {
01933   static int warned_about_fast_tracepoints = 0;
01934 
01935 #ifdef __x86_64__
01936   /*  On x86-64, 5-byte jump instructions with a 4-byte offset are always
01937       used for fast tracepoints.  */
01938   if (is_64bit_tdesc ())
01939     return 5;
01940 #endif
01941 
01942   if (agent_loaded_p ())
01943     {
01944       char errbuf[IPA_BUFSIZ];
01945 
01946       errbuf[0] = '\0';
01947 
01948       /* On x86, if trampolines are available, then 4-byte jump instructions
01949          with a 2-byte offset may be used, otherwise 5-byte jump instructions
01950          with a 4-byte offset are used instead.  */
01951       if (have_fast_tracepoint_trampoline_buffer (errbuf))
01952         return 4;
01953       else
01954         {
01955           /* GDB has no channel to explain to user why a shorter fast
01956              tracepoint is not possible, but at least make GDBserver
01957              mention that something has gone awry.  */
01958           if (!warned_about_fast_tracepoints)
01959             {
01960               warning ("4-byte fast tracepoints not available; %s\n", errbuf);
01961               warned_about_fast_tracepoints = 1;
01962             }
01963           return 5;
01964         }
01965     }
01966   else
01967     {
01968       /* Indicate that the minimum length is currently unknown since the IPA
01969          has not loaded yet.  */
01970       return 0;
01971     }
01972 }
01973 
01974 static void
01975 add_insns (unsigned char *start, int len)
01976 {
01977   CORE_ADDR buildaddr = current_insn_ptr;
01978 
01979   if (debug_threads)
01980     fprintf (stderr, "Adding %d bytes of insn at %s\n",
01981              len, paddress (buildaddr));
01982 
01983   append_insns (&buildaddr, len, start);
01984   current_insn_ptr = buildaddr;
01985 }
01986 
01987 /* Our general strategy for emitting code is to avoid specifying raw
01988    bytes whenever possible, and instead copy a block of inline asm
01989    that is embedded in the function.  This is a little messy, because
01990    we need to keep the compiler from discarding what looks like dead
01991    code, plus suppress various warnings.  */
01992 
01993 #define EMIT_ASM(NAME, INSNS)                                           \
01994   do                                                                    \
01995     {                                                                   \
01996       extern unsigned char start_ ## NAME, end_ ## NAME;                \
01997       add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME);     \
01998       __asm__ ("jmp end_" #NAME "\n"                                    \
01999                "\t" "start_" #NAME ":"                                  \
02000                "\t" INSNS "\n"                                          \
02001                "\t" "end_" #NAME ":");                                  \
02002     } while (0)
02003 
02004 #ifdef __x86_64__
02005 
02006 #define EMIT_ASM32(NAME,INSNS)                                          \
02007   do                                                                    \
02008     {                                                                   \
02009       extern unsigned char start_ ## NAME, end_ ## NAME;                \
02010       add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME);     \
02011       __asm__ (".code32\n"                                              \
02012                "\t" "jmp end_" #NAME "\n"                               \
02013                "\t" "start_" #NAME ":\n"                                \
02014                "\t" INSNS "\n"                                          \
02015                "\t" "end_" #NAME ":\n"                                  \
02016                ".code64\n");                                            \
02017     } while (0)
02018 
02019 #else
02020 
02021 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
02022 
02023 #endif
02024 
02025 #ifdef __x86_64__
02026 
02027 static void
02028 amd64_emit_prologue (void)
02029 {
02030   EMIT_ASM (amd64_prologue,
02031             "pushq %rbp\n\t"
02032             "movq %rsp,%rbp\n\t"
02033             "sub $0x20,%rsp\n\t"
02034             "movq %rdi,-8(%rbp)\n\t"
02035             "movq %rsi,-16(%rbp)");
02036 }
02037 
02038 
02039 static void
02040 amd64_emit_epilogue (void)
02041 {
02042   EMIT_ASM (amd64_epilogue,
02043             "movq -16(%rbp),%rdi\n\t"
02044             "movq %rax,(%rdi)\n\t"
02045             "xor %rax,%rax\n\t"
02046             "leave\n\t"
02047             "ret");
02048 }
02049 
02050 static void
02051 amd64_emit_add (void)
02052 {
02053   EMIT_ASM (amd64_add,
02054             "add (%rsp),%rax\n\t"
02055             "lea 0x8(%rsp),%rsp");
02056 }
02057 
02058 static void
02059 amd64_emit_sub (void)
02060 {
02061   EMIT_ASM (amd64_sub,
02062             "sub %rax,(%rsp)\n\t"
02063             "pop %rax");
02064 }
02065 
02066 static void
02067 amd64_emit_mul (void)
02068 {
02069   emit_error = 1;
02070 }
02071 
02072 static void
02073 amd64_emit_lsh (void)
02074 {
02075   emit_error = 1;
02076 }
02077 
02078 static void
02079 amd64_emit_rsh_signed (void)
02080 {
02081   emit_error = 1;
02082 }
02083 
02084 static void
02085 amd64_emit_rsh_unsigned (void)
02086 {
02087   emit_error = 1;
02088 }
02089 
02090 static void
02091 amd64_emit_ext (int arg)
02092 {
02093   switch (arg)
02094     {
02095     case 8:
02096       EMIT_ASM (amd64_ext_8,
02097                 "cbtw\n\t"
02098                 "cwtl\n\t"
02099                 "cltq");
02100       break;
02101     case 16:
02102       EMIT_ASM (amd64_ext_16,
02103                 "cwtl\n\t"
02104                 "cltq");
02105       break;
02106     case 32:
02107       EMIT_ASM (amd64_ext_32,
02108                 "cltq");
02109       break;
02110     default:
02111       emit_error = 1;
02112     }
02113 }
02114 
02115 static void
02116 amd64_emit_log_not (void)
02117 {
02118   EMIT_ASM (amd64_log_not,
02119             "test %rax,%rax\n\t"
02120             "sete %cl\n\t"
02121             "movzbq %cl,%rax");
02122 }
02123 
02124 static void
02125 amd64_emit_bit_and (void)
02126 {
02127   EMIT_ASM (amd64_and,
02128             "and (%rsp),%rax\n\t"
02129             "lea 0x8(%rsp),%rsp");
02130 }
02131 
02132 static void
02133 amd64_emit_bit_or (void)
02134 {
02135   EMIT_ASM (amd64_or,
02136             "or (%rsp),%rax\n\t"
02137             "lea 0x8(%rsp),%rsp");
02138 }
02139 
02140 static void
02141 amd64_emit_bit_xor (void)
02142 {
02143   EMIT_ASM (amd64_xor,
02144             "xor (%rsp),%rax\n\t"
02145             "lea 0x8(%rsp),%rsp");
02146 }
02147 
02148 static void
02149 amd64_emit_bit_not (void)
02150 {
02151   EMIT_ASM (amd64_bit_not,
02152             "xorq $0xffffffffffffffff,%rax");
02153 }
02154 
02155 static void
02156 amd64_emit_equal (void)
02157 {
02158   EMIT_ASM (amd64_equal,
02159             "cmp %rax,(%rsp)\n\t"
02160             "je .Lamd64_equal_true\n\t"
02161             "xor %rax,%rax\n\t"
02162             "jmp .Lamd64_equal_end\n\t"
02163             ".Lamd64_equal_true:\n\t"
02164             "mov $0x1,%rax\n\t"
02165             ".Lamd64_equal_end:\n\t"
02166             "lea 0x8(%rsp),%rsp");
02167 }
02168 
02169 static void
02170 amd64_emit_less_signed (void)
02171 {
02172   EMIT_ASM (amd64_less_signed,
02173             "cmp %rax,(%rsp)\n\t"
02174             "jl .Lamd64_less_signed_true\n\t"
02175             "xor %rax,%rax\n\t"
02176             "jmp .Lamd64_less_signed_end\n\t"
02177             ".Lamd64_less_signed_true:\n\t"
02178             "mov $1,%rax\n\t"
02179             ".Lamd64_less_signed_end:\n\t"
02180             "lea 0x8(%rsp),%rsp");
02181 }
02182 
02183 static void
02184 amd64_emit_less_unsigned (void)
02185 {
02186   EMIT_ASM (amd64_less_unsigned,
02187             "cmp %rax,(%rsp)\n\t"
02188             "jb .Lamd64_less_unsigned_true\n\t"
02189             "xor %rax,%rax\n\t"
02190             "jmp .Lamd64_less_unsigned_end\n\t"
02191             ".Lamd64_less_unsigned_true:\n\t"
02192             "mov $1,%rax\n\t"
02193             ".Lamd64_less_unsigned_end:\n\t"
02194             "lea 0x8(%rsp),%rsp");
02195 }
02196 
02197 static void
02198 amd64_emit_ref (int size)
02199 {
02200   switch (size)
02201     {
02202     case 1:
02203       EMIT_ASM (amd64_ref1,
02204                 "movb (%rax),%al");
02205       break;
02206     case 2:
02207       EMIT_ASM (amd64_ref2,
02208                 "movw (%rax),%ax");
02209       break;
02210     case 4:
02211       EMIT_ASM (amd64_ref4,
02212                 "movl (%rax),%eax");
02213       break;
02214     case 8:
02215       EMIT_ASM (amd64_ref8,
02216                 "movq (%rax),%rax");
02217       break;
02218     }
02219 }
02220 
02221 static void
02222 amd64_emit_if_goto (int *offset_p, int *size_p)
02223 {
02224   EMIT_ASM (amd64_if_goto,
02225             "mov %rax,%rcx\n\t"
02226             "pop %rax\n\t"
02227             "cmp $0,%rcx\n\t"
02228             ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
02229   if (offset_p)
02230     *offset_p = 10;
02231   if (size_p)
02232     *size_p = 4;
02233 }
02234 
02235 static void
02236 amd64_emit_goto (int *offset_p, int *size_p)
02237 {
02238   EMIT_ASM (amd64_goto,
02239             ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
02240   if (offset_p)
02241     *offset_p = 1;
02242   if (size_p)
02243     *size_p = 4;
02244 }
02245 
02246 static void
02247 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
02248 {
02249   int diff = (to - (from + size));
02250   unsigned char buf[sizeof (int)];
02251 
02252   if (size != 4)
02253     {
02254       emit_error = 1;
02255       return;
02256     }
02257 
02258   memcpy (buf, &diff, sizeof (int));
02259   write_inferior_memory (from, buf, sizeof (int));
02260 }
02261 
02262 static void
02263 amd64_emit_const (LONGEST num)
02264 {
02265   unsigned char buf[16];
02266   int i;
02267   CORE_ADDR buildaddr = current_insn_ptr;
02268 
02269   i = 0;
02270   buf[i++] = 0x48;  buf[i++] = 0xb8; /* mov $<n>,%rax */
02271   memcpy (&buf[i], &num, sizeof (num));
02272   i += 8;
02273   append_insns (&buildaddr, i, buf);
02274   current_insn_ptr = buildaddr;
02275 }
02276 
02277 static void
02278 amd64_emit_call (CORE_ADDR fn)
02279 {
02280   unsigned char buf[16];
02281   int i;
02282   CORE_ADDR buildaddr;
02283   LONGEST offset64;
02284 
02285   /* The destination function being in the shared library, may be
02286      >31-bits away off the compiled code pad.  */
02287 
02288   buildaddr = current_insn_ptr;
02289 
02290   offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
02291 
02292   i = 0;
02293 
02294   if (offset64 > INT_MAX || offset64 < INT_MIN)
02295     {
02296       /* Offset is too large for a call.  Use callq, but that requires
02297          a register, so avoid it if possible.  Use r10, since it is
02298          call-clobbered, we don't have to push/pop it.  */
02299       buf[i++] = 0x48; /* mov $fn,%r10 */
02300       buf[i++] = 0xba;
02301       memcpy (buf + i, &fn, 8);
02302       i += 8;
02303       buf[i++] = 0xff; /* callq *%r10 */
02304       buf[i++] = 0xd2;
02305     }
02306   else
02307     {
02308       int offset32 = offset64; /* we know we can't overflow here.  */
02309       memcpy (buf + i, &offset32, 4);
02310       i += 4;
02311     }
02312 
02313   append_insns (&buildaddr, i, buf);
02314   current_insn_ptr = buildaddr;
02315 }
02316 
02317 static void
02318 amd64_emit_reg (int reg)
02319 {
02320   unsigned char buf[16];
02321   int i;
02322   CORE_ADDR buildaddr;
02323 
02324   /* Assume raw_regs is still in %rdi.  */
02325   buildaddr = current_insn_ptr;
02326   i = 0;
02327   buf[i++] = 0xbe; /* mov $<n>,%esi */
02328   memcpy (&buf[i], &reg, sizeof (reg));
02329   i += 4;
02330   append_insns (&buildaddr, i, buf);
02331   current_insn_ptr = buildaddr;
02332   amd64_emit_call (get_raw_reg_func_addr ());
02333 }
02334 
02335 static void
02336 amd64_emit_pop (void)
02337 {
02338   EMIT_ASM (amd64_pop,
02339             "pop %rax");
02340 }
02341 
02342 static void
02343 amd64_emit_stack_flush (void)
02344 {
02345   EMIT_ASM (amd64_stack_flush,
02346             "push %rax");
02347 }
02348 
02349 static void
02350 amd64_emit_zero_ext (int arg)
02351 {
02352   switch (arg)
02353     {
02354     case 8:
02355       EMIT_ASM (amd64_zero_ext_8,
02356                 "and $0xff,%rax");
02357       break;
02358     case 16:
02359       EMIT_ASM (amd64_zero_ext_16,
02360                 "and $0xffff,%rax");
02361       break;
02362     case 32:
02363       EMIT_ASM (amd64_zero_ext_32,
02364                 "mov $0xffffffff,%rcx\n\t"
02365                 "and %rcx,%rax");
02366       break;
02367     default:
02368       emit_error = 1;
02369     }
02370 }
02371 
02372 static void
02373 amd64_emit_swap (void)
02374 {
02375   EMIT_ASM (amd64_swap,
02376             "mov %rax,%rcx\n\t"
02377             "pop %rax\n\t"
02378             "push %rcx");
02379 }
02380 
02381 static void
02382 amd64_emit_stack_adjust (int n)
02383 {
02384   unsigned char buf[16];
02385   int i;
02386   CORE_ADDR buildaddr = current_insn_ptr;
02387 
02388   i = 0;
02389   buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
02390   buf[i++] = 0x8d;
02391   buf[i++] = 0x64;
02392   buf[i++] = 0x24;
02393   /* This only handles adjustments up to 16, but we don't expect any more.  */
02394   buf[i++] = n * 8;
02395   append_insns (&buildaddr, i, buf);
02396   current_insn_ptr = buildaddr;
02397 }
02398 
02399 /* FN's prototype is `LONGEST(*fn)(int)'.  */
02400 
02401 static void
02402 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
02403 {
02404   unsigned char buf[16];
02405   int i;
02406   CORE_ADDR buildaddr;
02407 
02408   buildaddr = current_insn_ptr;
02409   i = 0;
02410   buf[i++] = 0xbf; /* movl $<n>,%edi */
02411   memcpy (&buf[i], &arg1, sizeof (arg1));
02412   i += 4;
02413   append_insns (&buildaddr, i, buf);
02414   current_insn_ptr = buildaddr;
02415   amd64_emit_call (fn);
02416 }
02417 
02418 /* FN's prototype is `void(*fn)(int,LONGEST)'.  */
02419 
02420 static void
02421 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
02422 {
02423   unsigned char buf[16];
02424   int i;
02425   CORE_ADDR buildaddr;
02426 
02427   buildaddr = current_insn_ptr;
02428   i = 0;
02429   buf[i++] = 0xbf; /* movl $<n>,%edi */
02430   memcpy (&buf[i], &arg1, sizeof (arg1));
02431   i += 4;
02432   append_insns (&buildaddr, i, buf);
02433   current_insn_ptr = buildaddr;
02434   EMIT_ASM (amd64_void_call_2_a,
02435             /* Save away a copy of the stack top.  */
02436             "push %rax\n\t"
02437             /* Also pass top as the second argument.  */
02438             "mov %rax,%rsi");
02439   amd64_emit_call (fn);
02440   EMIT_ASM (amd64_void_call_2_b,
02441             /* Restore the stack top, %rax may have been trashed.  */
02442             "pop %rax");
02443 }
02444 
02445 void
02446 amd64_emit_eq_goto (int *offset_p, int *size_p)
02447 {
02448   EMIT_ASM (amd64_eq,
02449             "cmp %rax,(%rsp)\n\t"
02450             "jne .Lamd64_eq_fallthru\n\t"
02451             "lea 0x8(%rsp),%rsp\n\t"
02452             "pop %rax\n\t"
02453             /* jmp, but don't trust the assembler to choose the right jump */
02454             ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
02455             ".Lamd64_eq_fallthru:\n\t"
02456             "lea 0x8(%rsp),%rsp\n\t"
02457             "pop %rax");
02458 
02459   if (offset_p)
02460     *offset_p = 13;
02461   if (size_p)
02462     *size_p = 4;
02463 }
02464 
02465 void
02466 amd64_emit_ne_goto (int *offset_p, int *size_p)
02467 {
02468   EMIT_ASM (amd64_ne,
02469             "cmp %rax,(%rsp)\n\t"
02470             "je .Lamd64_ne_fallthru\n\t"
02471             "lea 0x8(%rsp),%rsp\n\t"
02472             "pop %rax\n\t"
02473             /* jmp, but don't trust the assembler to choose the right jump */
02474             ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
02475             ".Lamd64_ne_fallthru:\n\t"
02476             "lea 0x8(%rsp),%rsp\n\t"
02477             "pop %rax");
02478 
02479   if (offset_p)
02480     *offset_p = 13;
02481   if (size_p)
02482     *size_p = 4;
02483 }
02484 
02485 void
02486 amd64_emit_lt_goto (int *offset_p, int *size_p)
02487 {
02488   EMIT_ASM (amd64_lt,
02489             "cmp %rax,(%rsp)\n\t"
02490             "jnl .Lamd64_lt_fallthru\n\t"
02491             "lea 0x8(%rsp),%rsp\n\t"
02492             "pop %rax\n\t"
02493             /* jmp, but don't trust the assembler to choose the right jump */
02494             ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
02495             ".Lamd64_lt_fallthru:\n\t"
02496             "lea 0x8(%rsp),%rsp\n\t"
02497             "pop %rax");
02498 
02499   if (offset_p)
02500     *offset_p = 13;
02501   if (size_p)
02502     *size_p = 4;
02503 }
02504 
02505 void
02506 amd64_emit_le_goto (int *offset_p, int *size_p)
02507 {
02508   EMIT_ASM (amd64_le,
02509             "cmp %rax,(%rsp)\n\t"
02510             "jnle .Lamd64_le_fallthru\n\t"
02511             "lea 0x8(%rsp),%rsp\n\t"
02512             "pop %rax\n\t"
02513             /* jmp, but don't trust the assembler to choose the right jump */
02514             ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
02515             ".Lamd64_le_fallthru:\n\t"
02516             "lea 0x8(%rsp),%rsp\n\t"
02517             "pop %rax");
02518 
02519   if (offset_p)
02520     *offset_p = 13;
02521   if (size_p)
02522     *size_p = 4;
02523 }
02524 
02525 void
02526 amd64_emit_gt_goto (int *offset_p, int *size_p)
02527 {
02528   EMIT_ASM (amd64_gt,
02529             "cmp %rax,(%rsp)\n\t"
02530             "jng .Lamd64_gt_fallthru\n\t"
02531             "lea 0x8(%rsp),%rsp\n\t"
02532             "pop %rax\n\t"
02533             /* jmp, but don't trust the assembler to choose the right jump */
02534             ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
02535             ".Lamd64_gt_fallthru:\n\t"
02536             "lea 0x8(%rsp),%rsp\n\t"
02537             "pop %rax");
02538 
02539   if (offset_p)
02540     *offset_p = 13;
02541   if (size_p)
02542     *size_p = 4;
02543 }
02544 
02545 void
02546 amd64_emit_ge_goto (int *offset_p, int *size_p)
02547 {
02548   EMIT_ASM (amd64_ge,
02549             "cmp %rax,(%rsp)\n\t"
02550             "jnge .Lamd64_ge_fallthru\n\t"
02551             ".Lamd64_ge_jump:\n\t"
02552             "lea 0x8(%rsp),%rsp\n\t"
02553             "pop %rax\n\t"
02554             /* jmp, but don't trust the assembler to choose the right jump */
02555             ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
02556             ".Lamd64_ge_fallthru:\n\t"
02557             "lea 0x8(%rsp),%rsp\n\t"
02558             "pop %rax");
02559 
02560   if (offset_p)
02561     *offset_p = 13;
02562   if (size_p)
02563     *size_p = 4;
02564 }
02565 
02566 struct emit_ops amd64_emit_ops =
02567   {
02568     amd64_emit_prologue,
02569     amd64_emit_epilogue,
02570     amd64_emit_add,
02571     amd64_emit_sub,
02572     amd64_emit_mul,
02573     amd64_emit_lsh,
02574     amd64_emit_rsh_signed,
02575     amd64_emit_rsh_unsigned,
02576     amd64_emit_ext,
02577     amd64_emit_log_not,
02578     amd64_emit_bit_and,
02579     amd64_emit_bit_or,
02580     amd64_emit_bit_xor,
02581     amd64_emit_bit_not,
02582     amd64_emit_equal,
02583     amd64_emit_less_signed,
02584     amd64_emit_less_unsigned,
02585     amd64_emit_ref,
02586     amd64_emit_if_goto,
02587     amd64_emit_goto,
02588     amd64_write_goto_address,
02589     amd64_emit_const,
02590     amd64_emit_call,
02591     amd64_emit_reg,
02592     amd64_emit_pop,
02593     amd64_emit_stack_flush,
02594     amd64_emit_zero_ext,
02595     amd64_emit_swap,
02596     amd64_emit_stack_adjust,
02597     amd64_emit_int_call_1,
02598     amd64_emit_void_call_2,
02599     amd64_emit_eq_goto,
02600     amd64_emit_ne_goto,
02601     amd64_emit_lt_goto,
02602     amd64_emit_le_goto,
02603     amd64_emit_gt_goto,
02604     amd64_emit_ge_goto
02605   };
02606 
02607 #endif /* __x86_64__ */
02608 
02609 static void
02610 i386_emit_prologue (void)
02611 {
02612   EMIT_ASM32 (i386_prologue,
02613             "push %ebp\n\t"
02614             "mov %esp,%ebp\n\t"
02615             "push %ebx");
02616   /* At this point, the raw regs base address is at 8(%ebp), and the
02617      value pointer is at 12(%ebp).  */
02618 }
02619 
02620 static void
02621 i386_emit_epilogue (void)
02622 {
02623   EMIT_ASM32 (i386_epilogue,
02624             "mov 12(%ebp),%ecx\n\t"
02625             "mov %eax,(%ecx)\n\t"
02626             "mov %ebx,0x4(%ecx)\n\t"
02627             "xor %eax,%eax\n\t"
02628             "pop %ebx\n\t"
02629             "pop %ebp\n\t"
02630             "ret");
02631 }
02632 
02633 static void
02634 i386_emit_add (void)
02635 {
02636   EMIT_ASM32 (i386_add,
02637             "add (%esp),%eax\n\t"
02638             "adc 0x4(%esp),%ebx\n\t"
02639             "lea 0x8(%esp),%esp");
02640 }
02641 
02642 static void
02643 i386_emit_sub (void)
02644 {
02645   EMIT_ASM32 (i386_sub,
02646             "subl %eax,(%esp)\n\t"
02647             "sbbl %ebx,4(%esp)\n\t"
02648             "pop %eax\n\t"
02649             "pop %ebx\n\t");
02650 }
02651 
02652 static void
02653 i386_emit_mul (void)
02654 {
02655   emit_error = 1;
02656 }
02657 
02658 static void
02659 i386_emit_lsh (void)
02660 {
02661   emit_error = 1;
02662 }
02663 
02664 static void
02665 i386_emit_rsh_signed (void)
02666 {
02667   emit_error = 1;
02668 }
02669 
02670 static void
02671 i386_emit_rsh_unsigned (void)
02672 {
02673   emit_error = 1;
02674 }
02675 
02676 static void
02677 i386_emit_ext (int arg)
02678 {
02679   switch (arg)
02680     {
02681     case 8:
02682       EMIT_ASM32 (i386_ext_8,
02683                 "cbtw\n\t"
02684                 "cwtl\n\t"
02685                 "movl %eax,%ebx\n\t"
02686                 "sarl $31,%ebx");
02687       break;
02688     case 16:
02689       EMIT_ASM32 (i386_ext_16,
02690                 "cwtl\n\t"
02691                 "movl %eax,%ebx\n\t"
02692                 "sarl $31,%ebx");
02693       break;
02694     case 32:
02695       EMIT_ASM32 (i386_ext_32,
02696                 "movl %eax,%ebx\n\t"
02697                 "sarl $31,%ebx");
02698       break;
02699     default:
02700       emit_error = 1;
02701     }
02702 }
02703 
02704 static void
02705 i386_emit_log_not (void)
02706 {
02707   EMIT_ASM32 (i386_log_not,
02708             "or %ebx,%eax\n\t"
02709             "test %eax,%eax\n\t"
02710             "sete %cl\n\t"
02711             "xor %ebx,%ebx\n\t"
02712             "movzbl %cl,%eax");
02713 }
02714 
02715 static void
02716 i386_emit_bit_and (void)
02717 {
02718   EMIT_ASM32 (i386_and,
02719             "and (%esp),%eax\n\t"
02720             "and 0x4(%esp),%ebx\n\t"
02721             "lea 0x8(%esp),%esp");
02722 }
02723 
02724 static void
02725 i386_emit_bit_or (void)
02726 {
02727   EMIT_ASM32 (i386_or,
02728             "or (%esp),%eax\n\t"
02729             "or 0x4(%esp),%ebx\n\t"
02730             "lea 0x8(%esp),%esp");
02731 }
02732 
02733 static void
02734 i386_emit_bit_xor (void)
02735 {
02736   EMIT_ASM32 (i386_xor,
02737             "xor (%esp),%eax\n\t"
02738             "xor 0x4(%esp),%ebx\n\t"
02739             "lea 0x8(%esp),%esp");
02740 }
02741 
02742 static void
02743 i386_emit_bit_not (void)
02744 {
02745   EMIT_ASM32 (i386_bit_not,
02746             "xor $0xffffffff,%eax\n\t"
02747             "xor $0xffffffff,%ebx\n\t");
02748 }
02749 
02750 static void
02751 i386_emit_equal (void)
02752 {
02753   EMIT_ASM32 (i386_equal,
02754             "cmpl %ebx,4(%esp)\n\t"
02755             "jne .Li386_equal_false\n\t"
02756             "cmpl %eax,(%esp)\n\t"
02757             "je .Li386_equal_true\n\t"
02758             ".Li386_equal_false:\n\t"
02759             "xor %eax,%eax\n\t"
02760             "jmp .Li386_equal_end\n\t"
02761             ".Li386_equal_true:\n\t"
02762             "mov $1,%eax\n\t"
02763             ".Li386_equal_end:\n\t"
02764             "xor %ebx,%ebx\n\t"
02765             "lea 0x8(%esp),%esp");
02766 }
02767 
02768 static void
02769 i386_emit_less_signed (void)
02770 {
02771   EMIT_ASM32 (i386_less_signed,
02772             "cmpl %ebx,4(%esp)\n\t"
02773             "jl .Li386_less_signed_true\n\t"
02774             "jne .Li386_less_signed_false\n\t"
02775             "cmpl %eax,(%esp)\n\t"
02776             "jl .Li386_less_signed_true\n\t"
02777             ".Li386_less_signed_false:\n\t"
02778             "xor %eax,%eax\n\t"
02779             "jmp .Li386_less_signed_end\n\t"
02780             ".Li386_less_signed_true:\n\t"
02781             "mov $1,%eax\n\t"
02782             ".Li386_less_signed_end:\n\t"
02783             "xor %ebx,%ebx\n\t"
02784             "lea 0x8(%esp),%esp");
02785 }
02786 
02787 static void
02788 i386_emit_less_unsigned (void)
02789 {
02790   EMIT_ASM32 (i386_less_unsigned,
02791             "cmpl %ebx,4(%esp)\n\t"
02792             "jb .Li386_less_unsigned_true\n\t"
02793             "jne .Li386_less_unsigned_false\n\t"
02794             "cmpl %eax,(%esp)\n\t"
02795             "jb .Li386_less_unsigned_true\n\t"
02796             ".Li386_less_unsigned_false:\n\t"
02797             "xor %eax,%eax\n\t"
02798             "jmp .Li386_less_unsigned_end\n\t"
02799             ".Li386_less_unsigned_true:\n\t"
02800             "mov $1,%eax\n\t"
02801             ".Li386_less_unsigned_end:\n\t"
02802             "xor %ebx,%ebx\n\t"
02803             "lea 0x8(%esp),%esp");
02804 }
02805 
02806 static void
02807 i386_emit_ref (int size)
02808 {
02809   switch (size)
02810     {
02811     case 1:
02812       EMIT_ASM32 (i386_ref1,
02813                 "movb (%eax),%al");
02814       break;
02815     case 2:
02816       EMIT_ASM32 (i386_ref2,
02817                 "movw (%eax),%ax");
02818       break;
02819     case 4:
02820       EMIT_ASM32 (i386_ref4,
02821                 "movl (%eax),%eax");
02822       break;
02823     case 8:
02824       EMIT_ASM32 (i386_ref8,
02825                 "movl 4(%eax),%ebx\n\t"
02826                 "movl (%eax),%eax");
02827       break;
02828     }
02829 }
02830 
02831 static void
02832 i386_emit_if_goto (int *offset_p, int *size_p)
02833 {
02834   EMIT_ASM32 (i386_if_goto,
02835             "mov %eax,%ecx\n\t"
02836             "or %ebx,%ecx\n\t"
02837             "pop %eax\n\t"
02838             "pop %ebx\n\t"
02839             "cmpl $0,%ecx\n\t"
02840             /* Don't trust the assembler to choose the right jump */
02841             ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
02842 
02843   if (offset_p)
02844     *offset_p = 11; /* be sure that this matches the sequence above */
02845   if (size_p)
02846     *size_p = 4;
02847 }
02848 
02849 static void
02850 i386_emit_goto (int *offset_p, int *size_p)
02851 {
02852   EMIT_ASM32 (i386_goto,
02853             /* Don't trust the assembler to choose the right jump */
02854             ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
02855   if (offset_p)
02856     *offset_p = 1;
02857   if (size_p)
02858     *size_p = 4;
02859 }
02860 
02861 static void
02862 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
02863 {
02864   int diff = (to - (from + size));
02865   unsigned char buf[sizeof (int)];
02866 
02867   /* We're only doing 4-byte sizes at the moment.  */
02868   if (size != 4)
02869     {
02870       emit_error = 1;
02871       return;
02872     }
02873 
02874   memcpy (buf, &diff, sizeof (int));
02875   write_inferior_memory (from, buf, sizeof (int));
02876 }
02877 
02878 static void
02879 i386_emit_const (LONGEST num)
02880 {
02881   unsigned char buf[16];
02882   int i, hi, lo;
02883   CORE_ADDR buildaddr = current_insn_ptr;
02884 
02885   i = 0;
02886   buf[i++] = 0xb8; /* mov $<n>,%eax */
02887   lo = num & 0xffffffff;
02888   memcpy (&buf[i], &lo, sizeof (lo));
02889   i += 4;
02890   hi = ((num >> 32) & 0xffffffff);
02891   if (hi)
02892     {
02893       buf[i++] = 0xbb; /* mov $<n>,%ebx */
02894       memcpy (&buf[i], &hi, sizeof (hi));
02895       i += 4;
02896     }
02897   else
02898     {
02899       buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
02900     }
02901   append_insns (&buildaddr, i, buf);
02902   current_insn_ptr = buildaddr;
02903 }
02904 
02905 static void
02906 i386_emit_call (CORE_ADDR fn)
02907 {
02908   unsigned char buf[16];
02909   int i, offset;
02910   CORE_ADDR buildaddr;
02911 
02912   buildaddr = current_insn_ptr;
02913   i = 0;
02914   buf[i++] = 0xe8; /* call <reladdr> */
02915   offset = ((int) fn) - (buildaddr + 5);
02916   memcpy (buf + 1, &offset, 4);
02917   append_insns (&buildaddr, 5, buf);
02918   current_insn_ptr = buildaddr;
02919 }
02920 
02921 static void
02922 i386_emit_reg (int reg)
02923 {
02924   unsigned char buf[16];
02925   int i;
02926   CORE_ADDR buildaddr;
02927 
02928   EMIT_ASM32 (i386_reg_a,
02929             "sub $0x8,%esp");
02930   buildaddr = current_insn_ptr;
02931   i = 0;
02932   buf[i++] = 0xb8; /* mov $<n>,%eax */
02933   memcpy (&buf[i], &reg, sizeof (reg));
02934   i += 4;
02935   append_insns (&buildaddr, i, buf);
02936   current_insn_ptr = buildaddr;
02937   EMIT_ASM32 (i386_reg_b,
02938             "mov %eax,4(%esp)\n\t"
02939             "mov 8(%ebp),%eax\n\t"
02940             "mov %eax,(%esp)");
02941   i386_emit_call (get_raw_reg_func_addr ());
02942   EMIT_ASM32 (i386_reg_c,
02943             "xor %ebx,%ebx\n\t"
02944             "lea 0x8(%esp),%esp");
02945 }
02946 
02947 static void
02948 i386_emit_pop (void)
02949 {
02950   EMIT_ASM32 (i386_pop,
02951             "pop %eax\n\t"
02952             "pop %ebx");
02953 }
02954 
02955 static void
02956 i386_emit_stack_flush (void)
02957 {
02958   EMIT_ASM32 (i386_stack_flush,
02959             "push %ebx\n\t"
02960             "push %eax");
02961 }
02962 
02963 static void
02964 i386_emit_zero_ext (int arg)
02965 {
02966   switch (arg)
02967     {
02968     case 8:
02969       EMIT_ASM32 (i386_zero_ext_8,
02970                 "and $0xff,%eax\n\t"
02971                 "xor %ebx,%ebx");
02972       break;
02973     case 16:
02974       EMIT_ASM32 (i386_zero_ext_16,
02975                 "and $0xffff,%eax\n\t"
02976                 "xor %ebx,%ebx");
02977       break;
02978     case 32:
02979       EMIT_ASM32 (i386_zero_ext_32,
02980                 "xor %ebx,%ebx");
02981       break;
02982     default:
02983       emit_error = 1;
02984     }
02985 }
02986 
02987 static void
02988 i386_emit_swap (void)
02989 {
02990   EMIT_ASM32 (i386_swap,
02991             "mov %eax,%ecx\n\t"
02992             "mov %ebx,%edx\n\t"
02993             "pop %eax\n\t"
02994             "pop %ebx\n\t"
02995             "push %edx\n\t"
02996             "push %ecx");
02997 }
02998 
02999 static void
03000 i386_emit_stack_adjust (int n)
03001 {
03002   unsigned char buf[16];
03003   int i;
03004   CORE_ADDR buildaddr = current_insn_ptr;
03005 
03006   i = 0;
03007   buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
03008   buf[i++] = 0x64;
03009   buf[i++] = 0x24;
03010   buf[i++] = n * 8;
03011   append_insns (&buildaddr, i, buf);
03012   current_insn_ptr = buildaddr;
03013 }
03014 
03015 /* FN's prototype is `LONGEST(*fn)(int)'.  */
03016 
03017 static void
03018 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
03019 {
03020   unsigned char buf[16];
03021   int i;
03022   CORE_ADDR buildaddr;
03023 
03024   EMIT_ASM32 (i386_int_call_1_a,
03025             /* Reserve a bit of stack space.  */
03026             "sub $0x8,%esp");
03027   /* Put the one argument on the stack.  */
03028   buildaddr = current_insn_ptr;
03029   i = 0;
03030   buf[i++] = 0xc7;  /* movl $<arg1>,(%esp) */
03031   buf[i++] = 0x04;
03032   buf[i++] = 0x24;
03033   memcpy (&buf[i], &arg1, sizeof (arg1));
03034   i += 4;
03035   append_insns (&buildaddr, i, buf);
03036   current_insn_ptr = buildaddr;
03037   i386_emit_call (fn);
03038   EMIT_ASM32 (i386_int_call_1_c,
03039             "mov %edx,%ebx\n\t"
03040             "lea 0x8(%esp),%esp");
03041 }
03042 
03043 /* FN's prototype is `void(*fn)(int,LONGEST)'.  */
03044 
03045 static void
03046 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
03047 {
03048   unsigned char buf[16];
03049   int i;
03050   CORE_ADDR buildaddr;
03051 
03052   EMIT_ASM32 (i386_void_call_2_a,
03053             /* Preserve %eax only; we don't have to worry about %ebx.  */
03054             "push %eax\n\t"
03055             /* Reserve a bit of stack space for arguments.  */
03056             "sub $0x10,%esp\n\t"
03057             /* Copy "top" to the second argument position.  (Note that
03058                we can't assume function won't scribble on its
03059                arguments, so don't try to restore from this.)  */
03060             "mov %eax,4(%esp)\n\t"
03061             "mov %ebx,8(%esp)");
03062   /* Put the first argument on the stack.  */
03063   buildaddr = current_insn_ptr;
03064   i = 0;
03065   buf[i++] = 0xc7;  /* movl $<arg1>,(%esp) */
03066   buf[i++] = 0x04;
03067   buf[i++] = 0x24;
03068   memcpy (&buf[i], &arg1, sizeof (arg1));
03069   i += 4;
03070   append_insns (&buildaddr, i, buf);
03071   current_insn_ptr = buildaddr;
03072   i386_emit_call (fn);
03073   EMIT_ASM32 (i386_void_call_2_b,
03074             "lea 0x10(%esp),%esp\n\t"
03075             /* Restore original stack top.  */
03076             "pop %eax");
03077 }
03078 
03079 
03080 void
03081 i386_emit_eq_goto (int *offset_p, int *size_p)
03082 {
03083   EMIT_ASM32 (eq,
03084               /* Check low half first, more likely to be decider  */
03085               "cmpl %eax,(%esp)\n\t"
03086               "jne .Leq_fallthru\n\t"
03087               "cmpl %ebx,4(%esp)\n\t"
03088               "jne .Leq_fallthru\n\t"
03089               "lea 0x8(%esp),%esp\n\t"
03090               "pop %eax\n\t"
03091               "pop %ebx\n\t"
03092               /* jmp, but don't trust the assembler to choose the right jump */
03093               ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
03094               ".Leq_fallthru:\n\t"
03095               "lea 0x8(%esp),%esp\n\t"
03096               "pop %eax\n\t"
03097               "pop %ebx");
03098 
03099   if (offset_p)
03100     *offset_p = 18;
03101   if (size_p)
03102     *size_p = 4;
03103 }
03104 
03105 void
03106 i386_emit_ne_goto (int *offset_p, int *size_p)
03107 {
03108   EMIT_ASM32 (ne,
03109               /* Check low half first, more likely to be decider  */
03110               "cmpl %eax,(%esp)\n\t"
03111               "jne .Lne_jump\n\t"
03112               "cmpl %ebx,4(%esp)\n\t"
03113               "je .Lne_fallthru\n\t"
03114               ".Lne_jump:\n\t"
03115               "lea 0x8(%esp),%esp\n\t"
03116               "pop %eax\n\t"
03117               "pop %ebx\n\t"
03118               /* jmp, but don't trust the assembler to choose the right jump */
03119               ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
03120               ".Lne_fallthru:\n\t"
03121               "lea 0x8(%esp),%esp\n\t"
03122               "pop %eax\n\t"
03123               "pop %ebx");
03124 
03125   if (offset_p)
03126     *offset_p = 18;
03127   if (size_p)
03128     *size_p = 4;
03129 }
03130 
03131 void
03132 i386_emit_lt_goto (int *offset_p, int *size_p)
03133 {
03134   EMIT_ASM32 (lt,
03135               "cmpl %ebx,4(%esp)\n\t"
03136               "jl .Llt_jump\n\t"
03137               "jne .Llt_fallthru\n\t"
03138               "cmpl %eax,(%esp)\n\t"
03139               "jnl .Llt_fallthru\n\t"
03140               ".Llt_jump:\n\t"
03141               "lea 0x8(%esp),%esp\n\t"
03142               "pop %eax\n\t"
03143               "pop %ebx\n\t"
03144               /* jmp, but don't trust the assembler to choose the right jump */
03145               ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
03146               ".Llt_fallthru:\n\t"
03147               "lea 0x8(%esp),%esp\n\t"
03148               "pop %eax\n\t"
03149               "pop %ebx");
03150 
03151   if (offset_p)
03152     *offset_p = 20;
03153   if (size_p)
03154     *size_p = 4;
03155 }
03156 
03157 void
03158 i386_emit_le_goto (int *offset_p, int *size_p)
03159 {
03160   EMIT_ASM32 (le,
03161               "cmpl %ebx,4(%esp)\n\t"
03162               "jle .Lle_jump\n\t"
03163               "jne .Lle_fallthru\n\t"
03164               "cmpl %eax,(%esp)\n\t"
03165               "jnle .Lle_fallthru\n\t"
03166               ".Lle_jump:\n\t"
03167               "lea 0x8(%esp),%esp\n\t"
03168               "pop %eax\n\t"
03169               "pop %ebx\n\t"
03170               /* jmp, but don't trust the assembler to choose the right jump */
03171               ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
03172               ".Lle_fallthru:\n\t"
03173               "lea 0x8(%esp),%esp\n\t"
03174               "pop %eax\n\t"
03175               "pop %ebx");
03176 
03177   if (offset_p)
03178     *offset_p = 20;
03179   if (size_p)
03180     *size_p = 4;
03181 }
03182 
03183 void
03184 i386_emit_gt_goto (int *offset_p, int *size_p)
03185 {
03186   EMIT_ASM32 (gt,
03187               "cmpl %ebx,4(%esp)\n\t"
03188               "jg .Lgt_jump\n\t"
03189               "jne .Lgt_fallthru\n\t"
03190               "cmpl %eax,(%esp)\n\t"
03191               "jng .Lgt_fallthru\n\t"
03192               ".Lgt_jump:\n\t"
03193               "lea 0x8(%esp),%esp\n\t"
03194               "pop %eax\n\t"
03195               "pop %ebx\n\t"
03196               /* jmp, but don't trust the assembler to choose the right jump */
03197               ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
03198               ".Lgt_fallthru:\n\t"
03199               "lea 0x8(%esp),%esp\n\t"
03200               "pop %eax\n\t"
03201               "pop %ebx");
03202 
03203   if (offset_p)
03204     *offset_p = 20;
03205   if (size_p)
03206     *size_p = 4;
03207 }
03208 
03209 void
03210 i386_emit_ge_goto (int *offset_p, int *size_p)
03211 {
03212   EMIT_ASM32 (ge,
03213               "cmpl %ebx,4(%esp)\n\t"
03214               "jge .Lge_jump\n\t"
03215               "jne .Lge_fallthru\n\t"
03216               "cmpl %eax,(%esp)\n\t"
03217               "jnge .Lge_fallthru\n\t"
03218               ".Lge_jump:\n\t"
03219               "lea 0x8(%esp),%esp\n\t"
03220               "pop %eax\n\t"
03221               "pop %ebx\n\t"
03222               /* jmp, but don't trust the assembler to choose the right jump */
03223               ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
03224               ".Lge_fallthru:\n\t"
03225               "lea 0x8(%esp),%esp\n\t"
03226               "pop %eax\n\t"
03227               "pop %ebx");
03228 
03229   if (offset_p)
03230     *offset_p = 20;
03231   if (size_p)
03232     *size_p = 4;
03233 }
03234 
03235 struct emit_ops i386_emit_ops =
03236   {
03237     i386_emit_prologue,
03238     i386_emit_epilogue,
03239     i386_emit_add,
03240     i386_emit_sub,
03241     i386_emit_mul,
03242     i386_emit_lsh,
03243     i386_emit_rsh_signed,
03244     i386_emit_rsh_unsigned,
03245     i386_emit_ext,
03246     i386_emit_log_not,
03247     i386_emit_bit_and,
03248     i386_emit_bit_or,
03249     i386_emit_bit_xor,
03250     i386_emit_bit_not,
03251     i386_emit_equal,
03252     i386_emit_less_signed,
03253     i386_emit_less_unsigned,
03254     i386_emit_ref,
03255     i386_emit_if_goto,
03256     i386_emit_goto,
03257     i386_write_goto_address,
03258     i386_emit_const,
03259     i386_emit_call,
03260     i386_emit_reg,
03261     i386_emit_pop,
03262     i386_emit_stack_flush,
03263     i386_emit_zero_ext,
03264     i386_emit_swap,
03265     i386_emit_stack_adjust,
03266     i386_emit_int_call_1,
03267     i386_emit_void_call_2,
03268     i386_emit_eq_goto,
03269     i386_emit_ne_goto,
03270     i386_emit_lt_goto,
03271     i386_emit_le_goto,
03272     i386_emit_gt_goto,
03273     i386_emit_ge_goto
03274   };
03275 
03276 
03277 static struct emit_ops *
03278 x86_emit_ops (void)
03279 {
03280 #ifdef __x86_64__
03281   if (is_64bit_tdesc ())
03282     return &amd64_emit_ops;
03283   else
03284 #endif
03285     return &i386_emit_ops;
03286 }
03287 
03288 static int
03289 x86_supports_range_stepping (void)
03290 {
03291   return 1;
03292 }
03293 
03294 /* This is initialized assuming an amd64 target.
03295    x86_arch_setup will correct it for i386 or amd64 targets.  */
03296 
03297 struct linux_target_ops the_low_target =
03298 {
03299   x86_arch_setup,
03300   x86_linux_regs_info,
03301   x86_cannot_fetch_register,
03302   x86_cannot_store_register,
03303   NULL, /* fetch_register */
03304   x86_get_pc,
03305   x86_set_pc,
03306   x86_breakpoint,
03307   x86_breakpoint_len,
03308   NULL,
03309   1,
03310   x86_breakpoint_at,
03311   x86_insert_point,
03312   x86_remove_point,
03313   x86_stopped_by_watchpoint,
03314   x86_stopped_data_address,
03315   /* collect_ptrace_register/supply_ptrace_register are not needed in the
03316      native i386 case (no registers smaller than an xfer unit), and are not
03317      used in the biarch case (HAVE_LINUX_USRREGS is not defined).  */
03318   NULL,
03319   NULL,
03320   /* need to fix up i386 siginfo if host is amd64 */
03321   x86_siginfo_fixup,
03322   x86_linux_new_process,
03323   x86_linux_new_thread,
03324   x86_linux_prepare_to_resume,
03325   x86_linux_process_qsupported,
03326   x86_supports_tracepoints,
03327   x86_get_thread_area,
03328   x86_install_fast_tracepoint_jump_pad,
03329   x86_emit_ops,
03330   x86_get_min_fast_tracepoint_insn_len,
03331   x86_supports_range_stepping,
03332 };
03333 
03334 void
03335 initialize_low_arch (void)
03336 {
03337   /* Initialize the Linux target descriptions.  */
03338 #ifdef __x86_64__
03339   init_registers_amd64_linux ();
03340   init_registers_amd64_avx_linux ();
03341   init_registers_x32_linux ();
03342   init_registers_x32_avx_linux ();
03343 
03344   tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
03345   copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
03346   tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
03347 #endif
03348   init_registers_i386_linux ();
03349   init_registers_i386_mmx_linux ();
03350   init_registers_i386_avx_linux ();
03351 
03352   tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
03353   copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
03354   tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
03355 
03356   initialize_regsets_info (&x86_regsets_info);
03357 }
 All Classes Files Functions Variables Typedefs Enumerations Enumerator Defines