GDB (API)
/home/stan/gdb/src/gdb/amd64-tdep.c
Go to the documentation of this file.
00001 /* Target-dependent code for AMD64.
00002 
00003    Copyright (C) 2001-2013 Free Software Foundation, Inc.
00004 
00005    Contributed by Jiri Smid, SuSE Labs.
00006 
00007    This file is part of GDB.
00008 
00009    This program is free software; you can redistribute it and/or modify
00010    it under the terms of the GNU General Public License as published by
00011    the Free Software Foundation; either version 3 of the License, or
00012    (at your option) any later version.
00013 
00014    This program is distributed in the hope that it will be useful,
00015    but WITHOUT ANY WARRANTY; without even the implied warranty of
00016    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00017    GNU General Public License for more details.
00018 
00019    You should have received a copy of the GNU General Public License
00020    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
00021 
00022 #include "defs.h"
00023 #include "opcode/i386.h"
00024 #include "dis-asm.h"
00025 #include "arch-utils.h"
00026 #include "block.h"
00027 #include "dummy-frame.h"
00028 #include "frame.h"
00029 #include "frame-base.h"
00030 #include "frame-unwind.h"
00031 #include "inferior.h"
00032 #include "gdbcmd.h"
00033 #include "gdbcore.h"
00034 #include "objfiles.h"
00035 #include "regcache.h"
00036 #include "regset.h"
00037 #include "symfile.h"
00038 #include "disasm.h"
00039 #include "gdb_assert.h"
00040 #include "exceptions.h"
00041 #include "amd64-tdep.h"
00042 #include "i387-tdep.h"
00043 
00044 #include "features/i386/amd64.c"
00045 #include "features/i386/amd64-avx.c"
00046 #include "features/i386/x32.c"
00047 #include "features/i386/x32-avx.c"
00048 
00049 #include "ax.h"
00050 #include "ax-gdb.h"
00051 
00052 /* Note that the AMD64 architecture was previously known as x86-64.
00053    The latter is (forever) engraved into the canonical system name as
00054    returned by config.guess, and used as the name for the AMD64 port
00055    of GNU/Linux.  The BSD's have renamed their ports to amd64; they
00056    don't like to shout.  For GDB we prefer the amd64_-prefix over the
00057    x86_64_-prefix since it's so much easier to type.  */
00058 
00059 /* Register information.  */
00060 
00061 static const char *amd64_register_names[] = 
00062 {
00063   "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
00064 
00065   /* %r8 is indeed register number 8.  */
00066   "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
00067   "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
00068 
00069   /* %st0 is register number 24.  */
00070   "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
00071   "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
00072 
00073   /* %xmm0 is register number 40.  */
00074   "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
00075   "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
00076   "mxcsr",
00077 };
00078 
00079 static const char *amd64_ymm_names[] = 
00080 {
00081   "ymm0", "ymm1", "ymm2", "ymm3",
00082   "ymm4", "ymm5", "ymm6", "ymm7",
00083   "ymm8", "ymm9", "ymm10", "ymm11",
00084   "ymm12", "ymm13", "ymm14", "ymm15"
00085 };
00086 
00087 static const char *amd64_ymmh_names[] = 
00088 {
00089   "ymm0h", "ymm1h", "ymm2h", "ymm3h",
00090   "ymm4h", "ymm5h", "ymm6h", "ymm7h",
00091   "ymm8h", "ymm9h", "ymm10h", "ymm11h",
00092   "ymm12h", "ymm13h", "ymm14h", "ymm15h"
00093 };
00094 
00095 /* DWARF Register Number Mapping as defined in the System V psABI,
00096    section 3.6.  */
00097 
00098 static int amd64_dwarf_regmap[] =
00099 {
00100   /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI.  */
00101   AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
00102   AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
00103   AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
00104 
00105   /* Frame Pointer Register RBP.  */
00106   AMD64_RBP_REGNUM,
00107 
00108   /* Stack Pointer Register RSP.  */
00109   AMD64_RSP_REGNUM,
00110 
00111   /* Extended Integer Registers 8 - 15.  */
00112   AMD64_R8_REGNUM,              /* %r8 */
00113   AMD64_R9_REGNUM,              /* %r9 */
00114   AMD64_R10_REGNUM,             /* %r10 */
00115   AMD64_R11_REGNUM,             /* %r11 */
00116   AMD64_R12_REGNUM,             /* %r12 */
00117   AMD64_R13_REGNUM,             /* %r13 */
00118   AMD64_R14_REGNUM,             /* %r14 */
00119   AMD64_R15_REGNUM,             /* %r15 */
00120 
00121   /* Return Address RA.  Mapped to RIP.  */
00122   AMD64_RIP_REGNUM,
00123 
00124   /* SSE Registers 0 - 7.  */
00125   AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
00126   AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
00127   AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
00128   AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
00129 
00130   /* Extended SSE Registers 8 - 15.  */
00131   AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
00132   AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
00133   AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
00134   AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
00135 
00136   /* Floating Point Registers 0-7.  */
00137   AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
00138   AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
00139   AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
00140   AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
00141   
00142   /* Control and Status Flags Register.  */
00143   AMD64_EFLAGS_REGNUM,
00144 
00145   /* Selector Registers.  */
00146   AMD64_ES_REGNUM,
00147   AMD64_CS_REGNUM,
00148   AMD64_SS_REGNUM,
00149   AMD64_DS_REGNUM,
00150   AMD64_FS_REGNUM,
00151   AMD64_GS_REGNUM,
00152   -1,
00153   -1,
00154 
00155   /* Segment Base Address Registers.  */
00156   -1,
00157   -1,
00158   -1,
00159   -1,
00160 
00161   /* Special Selector Registers.  */
00162   -1,
00163   -1,
00164 
00165   /* Floating Point Control Registers.  */
00166   AMD64_MXCSR_REGNUM,
00167   AMD64_FCTRL_REGNUM,
00168   AMD64_FSTAT_REGNUM
00169 };
00170 
00171 static const int amd64_dwarf_regmap_len =
00172   (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
00173 
00174 /* Convert DWARF register number REG to the appropriate register
00175    number used by GDB.  */
00176 
00177 static int
00178 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
00179 {
00180   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
00181   int ymm0_regnum = tdep->ymm0_regnum;
00182   int regnum = -1;
00183 
00184   if (reg >= 0 && reg < amd64_dwarf_regmap_len)
00185     regnum = amd64_dwarf_regmap[reg];
00186 
00187   if (regnum == -1)
00188     warning (_("Unmapped DWARF Register #%d encountered."), reg);
00189   else if (ymm0_regnum >= 0
00190            && i386_xmm_regnum_p (gdbarch, regnum))
00191     regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
00192 
00193   return regnum;
00194 }
00195 
00196 /* Map architectural register numbers to gdb register numbers.  */
00197 
00198 static const int amd64_arch_regmap[16] =
00199 {
00200   AMD64_RAX_REGNUM,     /* %rax */
00201   AMD64_RCX_REGNUM,     /* %rcx */
00202   AMD64_RDX_REGNUM,     /* %rdx */
00203   AMD64_RBX_REGNUM,     /* %rbx */
00204   AMD64_RSP_REGNUM,     /* %rsp */
00205   AMD64_RBP_REGNUM,     /* %rbp */
00206   AMD64_RSI_REGNUM,     /* %rsi */
00207   AMD64_RDI_REGNUM,     /* %rdi */
00208   AMD64_R8_REGNUM,      /* %r8 */
00209   AMD64_R9_REGNUM,      /* %r9 */
00210   AMD64_R10_REGNUM,     /* %r10 */
00211   AMD64_R11_REGNUM,     /* %r11 */
00212   AMD64_R12_REGNUM,     /* %r12 */
00213   AMD64_R13_REGNUM,     /* %r13 */
00214   AMD64_R14_REGNUM,     /* %r14 */
00215   AMD64_R15_REGNUM      /* %r15 */
00216 };
00217 
00218 static const int amd64_arch_regmap_len =
00219   (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
00220 
00221 /* Convert architectural register number REG to the appropriate register
00222    number used by GDB.  */
00223 
00224 static int
00225 amd64_arch_reg_to_regnum (int reg)
00226 {
00227   gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
00228 
00229   return amd64_arch_regmap[reg];
00230 }
00231 
00232 /* Register names for byte pseudo-registers.  */
00233 
00234 static const char *amd64_byte_names[] =
00235 {
00236   "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
00237   "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
00238   "ah", "bh", "ch", "dh"
00239 };
00240 
00241 /* Number of lower byte registers.  */
00242 #define AMD64_NUM_LOWER_BYTE_REGS 16
00243 
00244 /* Register names for word pseudo-registers.  */
00245 
00246 static const char *amd64_word_names[] =
00247 {
00248   "ax", "bx", "cx", "dx", "si", "di", "bp", "", 
00249   "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
00250 };
00251 
00252 /* Register names for dword pseudo-registers.  */
00253 
00254 static const char *amd64_dword_names[] =
00255 {
00256   "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp", 
00257   "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
00258   "eip"
00259 };
00260 
00261 /* Return the name of register REGNUM.  */
00262 
00263 static const char *
00264 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
00265 {
00266   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
00267   if (i386_byte_regnum_p (gdbarch, regnum))
00268     return amd64_byte_names[regnum - tdep->al_regnum];
00269   else if (i386_ymm_regnum_p (gdbarch, regnum))
00270     return amd64_ymm_names[regnum - tdep->ymm0_regnum];
00271   else if (i386_word_regnum_p (gdbarch, regnum))
00272     return amd64_word_names[regnum - tdep->ax_regnum];
00273   else if (i386_dword_regnum_p (gdbarch, regnum))
00274     return amd64_dword_names[regnum - tdep->eax_regnum];
00275   else
00276     return i386_pseudo_register_name (gdbarch, regnum);
00277 }
00278 
00279 static struct value *
00280 amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
00281                                   struct regcache *regcache,
00282                                   int regnum)
00283 {
00284   gdb_byte raw_buf[MAX_REGISTER_SIZE];
00285   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
00286   enum register_status status;
00287   struct value *result_value;
00288   gdb_byte *buf;
00289 
00290   result_value = allocate_value (register_type (gdbarch, regnum));
00291   VALUE_LVAL (result_value) = lval_register;
00292   VALUE_REGNUM (result_value) = regnum;
00293   buf = value_contents_raw (result_value);
00294 
00295   if (i386_byte_regnum_p (gdbarch, regnum))
00296     {
00297       int gpnum = regnum - tdep->al_regnum;
00298 
00299       /* Extract (always little endian).  */
00300       if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
00301         {
00302           /* Special handling for AH, BH, CH, DH.  */
00303           status = regcache_raw_read (regcache,
00304                                       gpnum - AMD64_NUM_LOWER_BYTE_REGS,
00305                                       raw_buf);
00306           if (status == REG_VALID)
00307             memcpy (buf, raw_buf + 1, 1);
00308           else
00309             mark_value_bytes_unavailable (result_value, 0,
00310                                           TYPE_LENGTH (value_type (result_value)));
00311         }
00312       else
00313         {
00314           status = regcache_raw_read (regcache, gpnum, raw_buf);
00315           if (status == REG_VALID)
00316             memcpy (buf, raw_buf, 1);
00317           else
00318             mark_value_bytes_unavailable (result_value, 0,
00319                                           TYPE_LENGTH (value_type (result_value)));
00320         }
00321     }
00322   else if (i386_dword_regnum_p (gdbarch, regnum))
00323     {
00324       int gpnum = regnum - tdep->eax_regnum;
00325       /* Extract (always little endian).  */
00326       status = regcache_raw_read (regcache, gpnum, raw_buf);
00327       if (status == REG_VALID)
00328         memcpy (buf, raw_buf, 4);
00329       else
00330         mark_value_bytes_unavailable (result_value, 0,
00331                                       TYPE_LENGTH (value_type (result_value)));
00332     }
00333   else
00334     i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
00335                                           result_value);
00336 
00337   return result_value;
00338 }
00339 
00340 static void
00341 amd64_pseudo_register_write (struct gdbarch *gdbarch,
00342                              struct regcache *regcache,
00343                              int regnum, const gdb_byte *buf)
00344 {
00345   gdb_byte raw_buf[MAX_REGISTER_SIZE];
00346   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
00347 
00348   if (i386_byte_regnum_p (gdbarch, regnum))
00349     {
00350       int gpnum = regnum - tdep->al_regnum;
00351 
00352       if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
00353         {
00354           /* Read ... AH, BH, CH, DH.  */
00355           regcache_raw_read (regcache,
00356                              gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
00357           /* ... Modify ... (always little endian).  */
00358           memcpy (raw_buf + 1, buf, 1);
00359           /* ... Write.  */
00360           regcache_raw_write (regcache,
00361                               gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
00362         }
00363       else
00364         {
00365           /* Read ...  */
00366           regcache_raw_read (regcache, gpnum, raw_buf);
00367           /* ... Modify ... (always little endian).  */
00368           memcpy (raw_buf, buf, 1);
00369           /* ... Write.  */
00370           regcache_raw_write (regcache, gpnum, raw_buf);
00371         }
00372     }
00373   else if (i386_dword_regnum_p (gdbarch, regnum))
00374     {
00375       int gpnum = regnum - tdep->eax_regnum;
00376 
00377       /* Read ...  */
00378       regcache_raw_read (regcache, gpnum, raw_buf);
00379       /* ... Modify ... (always little endian).  */
00380       memcpy (raw_buf, buf, 4);
00381       /* ... Write.  */
00382       regcache_raw_write (regcache, gpnum, raw_buf);
00383     }
00384   else
00385     i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
00386 }
00387 
00388 
00389 
00390 /* Register classes as defined in the psABI.  */
00391 
00392 enum amd64_reg_class
00393 {
00394   AMD64_INTEGER,
00395   AMD64_SSE,
00396   AMD64_SSEUP,
00397   AMD64_X87,
00398   AMD64_X87UP,
00399   AMD64_COMPLEX_X87,
00400   AMD64_NO_CLASS,
00401   AMD64_MEMORY
00402 };
00403 
00404 /* Return the union class of CLASS1 and CLASS2.  See the psABI for
00405    details.  */
00406 
00407 static enum amd64_reg_class
00408 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
00409 {
00410   /* Rule (a): If both classes are equal, this is the resulting class.  */
00411   if (class1 == class2)
00412     return class1;
00413 
00414   /* Rule (b): If one of the classes is NO_CLASS, the resulting class
00415      is the other class.  */
00416   if (class1 == AMD64_NO_CLASS)
00417     return class2;
00418   if (class2 == AMD64_NO_CLASS)
00419     return class1;
00420 
00421   /* Rule (c): If one of the classes is MEMORY, the result is MEMORY.  */
00422   if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
00423     return AMD64_MEMORY;
00424 
00425   /* Rule (d): If one of the classes is INTEGER, the result is INTEGER.  */
00426   if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
00427     return AMD64_INTEGER;
00428 
00429   /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
00430      MEMORY is used as class.  */
00431   if (class1 == AMD64_X87 || class1 == AMD64_X87UP
00432       || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
00433       || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
00434     return AMD64_MEMORY;
00435 
00436   /* Rule (f): Otherwise class SSE is used.  */
00437   return AMD64_SSE;
00438 }
00439 
00440 static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
00441 
00442 /* Return non-zero if TYPE is a non-POD structure or union type.  */
00443 
00444 static int
00445 amd64_non_pod_p (struct type *type)
00446 {
00447   /* ??? A class with a base class certainly isn't POD, but does this
00448      catch all non-POD structure types?  */
00449   if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
00450     return 1;
00451 
00452   return 0;
00453 }
00454 
00455 /* Classify TYPE according to the rules for aggregate (structures and
00456    arrays) and union types, and store the result in CLASS.  */
00457 
00458 static void
00459 amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
00460 {
00461   /* 1. If the size of an object is larger than two eightbytes, or in
00462         C++, is a non-POD structure or union type, or contains
00463         unaligned fields, it has class memory.  */
00464   if (TYPE_LENGTH (type) > 16 || amd64_non_pod_p (type))
00465     {
00466       class[0] = class[1] = AMD64_MEMORY;
00467       return;
00468     }
00469 
00470   /* 2. Both eightbytes get initialized to class NO_CLASS.  */
00471   class[0] = class[1] = AMD64_NO_CLASS;
00472 
00473   /* 3. Each field of an object is classified recursively so that
00474         always two fields are considered. The resulting class is
00475         calculated according to the classes of the fields in the
00476         eightbyte: */
00477 
00478   if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
00479     {
00480       struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
00481 
00482       /* All fields in an array have the same type.  */
00483       amd64_classify (subtype, class);
00484       if (TYPE_LENGTH (type) > 8 && class[1] == AMD64_NO_CLASS)
00485         class[1] = class[0];
00486     }
00487   else
00488     {
00489       int i;
00490 
00491       /* Structure or union.  */
00492       gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
00493                   || TYPE_CODE (type) == TYPE_CODE_UNION);
00494 
00495       for (i = 0; i < TYPE_NFIELDS (type); i++)
00496         {
00497           struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
00498           int pos = TYPE_FIELD_BITPOS (type, i) / 64;
00499           enum amd64_reg_class subclass[2];
00500           int bitsize = TYPE_FIELD_BITSIZE (type, i);
00501           int endpos;
00502 
00503           if (bitsize == 0)
00504             bitsize = TYPE_LENGTH (subtype) * 8;
00505           endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
00506 
00507           /* Ignore static fields.  */
00508           if (field_is_static (&TYPE_FIELD (type, i)))
00509             continue;
00510 
00511           gdb_assert (pos == 0 || pos == 1);
00512 
00513           amd64_classify (subtype, subclass);
00514           class[pos] = amd64_merge_classes (class[pos], subclass[0]);
00515           if (bitsize <= 64 && pos == 0 && endpos == 1)
00516             /* This is a bit of an odd case:  We have a field that would
00517                normally fit in one of the two eightbytes, except that
00518                it is placed in a way that this field straddles them.
00519                This has been seen with a structure containing an array.
00520 
00521                The ABI is a bit unclear in this case, but we assume that
00522                this field's class (stored in subclass[0]) must also be merged
00523                into class[1].  In other words, our field has a piece stored
00524                in the second eight-byte, and thus its class applies to
00525                the second eight-byte as well.
00526 
00527                In the case where the field length exceeds 8 bytes,
00528                it should not be necessary to merge the field class
00529                into class[1].  As LEN > 8, subclass[1] is necessarily
00530                different from AMD64_NO_CLASS.  If subclass[1] is equal
00531                to subclass[0], then the normal class[1]/subclass[1]
00532                merging will take care of everything.  For subclass[1]
00533                to be different from subclass[0], I can only see the case
00534                where we have a SSE/SSEUP or X87/X87UP pair, which both
00535                use up all 16 bytes of the aggregate, and are already
00536                handled just fine (because each portion sits on its own
00537                8-byte).  */
00538             class[1] = amd64_merge_classes (class[1], subclass[0]);
00539           if (pos == 0)
00540             class[1] = amd64_merge_classes (class[1], subclass[1]);
00541         }
00542     }
00543 
00544   /* 4. Then a post merger cleanup is done:  */
00545 
00546   /* Rule (a): If one of the classes is MEMORY, the whole argument is
00547      passed in memory.  */
00548   if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
00549     class[0] = class[1] = AMD64_MEMORY;
00550 
00551   /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
00552      SSE.  */
00553   if (class[0] == AMD64_SSEUP)
00554     class[0] = AMD64_SSE;
00555   if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
00556     class[1] = AMD64_SSE;
00557 }
00558 
00559 /* Classify TYPE, and store the result in CLASS.  */
00560 
00561 static void
00562 amd64_classify (struct type *type, enum amd64_reg_class class[2])
00563 {
00564   enum type_code code = TYPE_CODE (type);
00565   int len = TYPE_LENGTH (type);
00566 
00567   class[0] = class[1] = AMD64_NO_CLASS;
00568 
00569   /* Arguments of types (signed and unsigned) _Bool, char, short, int,
00570      long, long long, and pointers are in the INTEGER class.  Similarly,
00571      range types, used by languages such as Ada, are also in the INTEGER
00572      class.  */
00573   if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
00574        || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
00575        || code == TYPE_CODE_CHAR
00576        || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
00577       && (len == 1 || len == 2 || len == 4 || len == 8))
00578     class[0] = AMD64_INTEGER;
00579 
00580   /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
00581      are in class SSE.  */
00582   else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
00583            && (len == 4 || len == 8))
00584     /* FIXME: __m64 .  */
00585     class[0] = AMD64_SSE;
00586 
00587   /* Arguments of types __float128, _Decimal128 and __m128 are split into
00588      two halves.  The least significant ones belong to class SSE, the most
00589      significant one to class SSEUP.  */
00590   else if (code == TYPE_CODE_DECFLOAT && len == 16)
00591     /* FIXME: __float128, __m128.  */
00592     class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
00593 
00594   /* The 64-bit mantissa of arguments of type long double belongs to
00595      class X87, the 16-bit exponent plus 6 bytes of padding belongs to
00596      class X87UP.  */
00597   else if (code == TYPE_CODE_FLT && len == 16)
00598     /* Class X87 and X87UP.  */
00599     class[0] = AMD64_X87, class[1] = AMD64_X87UP;
00600 
00601   /* Arguments of complex T where T is one of the types float or
00602      double get treated as if they are implemented as:
00603 
00604      struct complexT {
00605        T real;
00606        T imag;
00607      };  */
00608   else if (code == TYPE_CODE_COMPLEX && len == 8)
00609     class[0] = AMD64_SSE;
00610   else if (code == TYPE_CODE_COMPLEX && len == 16)
00611     class[0] = class[1] = AMD64_SSE;
00612 
00613   /* A variable of type complex long double is classified as type
00614      COMPLEX_X87.  */
00615   else if (code == TYPE_CODE_COMPLEX && len == 32)
00616     class[0] = AMD64_COMPLEX_X87;
00617 
00618   /* Aggregates.  */
00619   else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
00620            || code == TYPE_CODE_UNION)
00621     amd64_classify_aggregate (type, class);
00622 }
00623 
00624 static enum return_value_convention
00625 amd64_return_value (struct gdbarch *gdbarch, struct value *function,
00626                     struct type *type, struct regcache *regcache,
00627                     gdb_byte *readbuf, const gdb_byte *writebuf)
00628 {
00629   enum amd64_reg_class class[2];
00630   int len = TYPE_LENGTH (type);
00631   static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
00632   static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
00633   int integer_reg = 0;
00634   int sse_reg = 0;
00635   int i;
00636 
00637   gdb_assert (!(readbuf && writebuf));
00638 
00639   /* 1. Classify the return type with the classification algorithm.  */
00640   amd64_classify (type, class);
00641 
00642   /* 2. If the type has class MEMORY, then the caller provides space
00643      for the return value and passes the address of this storage in
00644      %rdi as if it were the first argument to the function.  In effect,
00645      this address becomes a hidden first argument.
00646 
00647      On return %rax will contain the address that has been passed in
00648      by the caller in %rdi.  */
00649   if (class[0] == AMD64_MEMORY)
00650     {
00651       /* As indicated by the comment above, the ABI guarantees that we
00652          can always find the return value just after the function has
00653          returned.  */
00654 
00655       if (readbuf)
00656         {
00657           ULONGEST addr;
00658 
00659           regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
00660           read_memory (addr, readbuf, TYPE_LENGTH (type));
00661         }
00662 
00663       return RETURN_VALUE_ABI_RETURNS_ADDRESS;
00664     }
00665 
00666   /* 8. If the class is COMPLEX_X87, the real part of the value is
00667         returned in %st0 and the imaginary part in %st1.  */
00668   if (class[0] == AMD64_COMPLEX_X87)
00669     {
00670       if (readbuf)
00671         {
00672           regcache_raw_read (regcache, AMD64_ST0_REGNUM, readbuf);
00673           regcache_raw_read (regcache, AMD64_ST1_REGNUM, readbuf + 16);
00674         }
00675 
00676       if (writebuf)
00677         {
00678           i387_return_value (gdbarch, regcache);
00679           regcache_raw_write (regcache, AMD64_ST0_REGNUM, writebuf);
00680           regcache_raw_write (regcache, AMD64_ST1_REGNUM, writebuf + 16);
00681 
00682           /* Fix up the tag word such that both %st(0) and %st(1) are
00683              marked as valid.  */
00684           regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
00685         }
00686 
00687       return RETURN_VALUE_REGISTER_CONVENTION;
00688     }
00689 
00690   gdb_assert (class[1] != AMD64_MEMORY);
00691   gdb_assert (len <= 16);
00692 
00693   for (i = 0; len > 0; i++, len -= 8)
00694     {
00695       int regnum = -1;
00696       int offset = 0;
00697 
00698       switch (class[i])
00699         {
00700         case AMD64_INTEGER:
00701           /* 3. If the class is INTEGER, the next available register
00702              of the sequence %rax, %rdx is used.  */
00703           regnum = integer_regnum[integer_reg++];
00704           break;
00705 
00706         case AMD64_SSE:
00707           /* 4. If the class is SSE, the next available SSE register
00708              of the sequence %xmm0, %xmm1 is used.  */
00709           regnum = sse_regnum[sse_reg++];
00710           break;
00711 
00712         case AMD64_SSEUP:
00713           /* 5. If the class is SSEUP, the eightbyte is passed in the
00714              upper half of the last used SSE register.  */
00715           gdb_assert (sse_reg > 0);
00716           regnum = sse_regnum[sse_reg - 1];
00717           offset = 8;
00718           break;
00719 
00720         case AMD64_X87:
00721           /* 6. If the class is X87, the value is returned on the X87
00722              stack in %st0 as 80-bit x87 number.  */
00723           regnum = AMD64_ST0_REGNUM;
00724           if (writebuf)
00725             i387_return_value (gdbarch, regcache);
00726           break;
00727 
00728         case AMD64_X87UP:
00729           /* 7. If the class is X87UP, the value is returned together
00730              with the previous X87 value in %st0.  */
00731           gdb_assert (i > 0 && class[0] == AMD64_X87);
00732           regnum = AMD64_ST0_REGNUM;
00733           offset = 8;
00734           len = 2;
00735           break;
00736 
00737         case AMD64_NO_CLASS:
00738           continue;
00739 
00740         default:
00741           gdb_assert (!"Unexpected register class.");
00742         }
00743 
00744       gdb_assert (regnum != -1);
00745 
00746       if (readbuf)
00747         regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
00748                                 readbuf + i * 8);
00749       if (writebuf)
00750         regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
00751                                  writebuf + i * 8);
00752     }
00753 
00754   return RETURN_VALUE_REGISTER_CONVENTION;
00755 }
00756 
00757 
00758 static CORE_ADDR
00759 amd64_push_arguments (struct regcache *regcache, int nargs,
00760                       struct value **args, CORE_ADDR sp, int struct_return)
00761 {
00762   static int integer_regnum[] =
00763   {
00764     AMD64_RDI_REGNUM,           /* %rdi */
00765     AMD64_RSI_REGNUM,           /* %rsi */
00766     AMD64_RDX_REGNUM,           /* %rdx */
00767     AMD64_RCX_REGNUM,           /* %rcx */
00768     AMD64_R8_REGNUM,            /* %r8 */
00769     AMD64_R9_REGNUM             /* %r9 */
00770   };
00771   static int sse_regnum[] =
00772   {
00773     /* %xmm0 ... %xmm7 */
00774     AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
00775     AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
00776     AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
00777     AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
00778   };
00779   struct value **stack_args = alloca (nargs * sizeof (struct value *));
00780   int num_stack_args = 0;
00781   int num_elements = 0;
00782   int element = 0;
00783   int integer_reg = 0;
00784   int sse_reg = 0;
00785   int i;
00786 
00787   /* Reserve a register for the "hidden" argument.  */
00788   if (struct_return)
00789     integer_reg++;
00790 
00791   for (i = 0; i < nargs; i++)
00792     {
00793       struct type *type = value_type (args[i]);
00794       int len = TYPE_LENGTH (type);
00795       enum amd64_reg_class class[2];
00796       int needed_integer_regs = 0;
00797       int needed_sse_regs = 0;
00798       int j;
00799 
00800       /* Classify argument.  */
00801       amd64_classify (type, class);
00802 
00803       /* Calculate the number of integer and SSE registers needed for
00804          this argument.  */
00805       for (j = 0; j < 2; j++)
00806         {
00807           if (class[j] == AMD64_INTEGER)
00808             needed_integer_regs++;
00809           else if (class[j] == AMD64_SSE)
00810             needed_sse_regs++;
00811         }
00812 
00813       /* Check whether enough registers are available, and if the
00814          argument should be passed in registers at all.  */
00815       if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
00816           || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
00817           || (needed_integer_regs == 0 && needed_sse_regs == 0))
00818         {
00819           /* The argument will be passed on the stack.  */
00820           num_elements += ((len + 7) / 8);
00821           stack_args[num_stack_args++] = args[i];
00822         }
00823       else
00824         {
00825           /* The argument will be passed in registers.  */
00826           const gdb_byte *valbuf = value_contents (args[i]);
00827           gdb_byte buf[8];
00828 
00829           gdb_assert (len <= 16);
00830 
00831           for (j = 0; len > 0; j++, len -= 8)
00832             {
00833               int regnum = -1;
00834               int offset = 0;
00835 
00836               switch (class[j])
00837                 {
00838                 case AMD64_INTEGER:
00839                   regnum = integer_regnum[integer_reg++];
00840                   break;
00841 
00842                 case AMD64_SSE:
00843                   regnum = sse_regnum[sse_reg++];
00844                   break;
00845 
00846                 case AMD64_SSEUP:
00847                   gdb_assert (sse_reg > 0);
00848                   regnum = sse_regnum[sse_reg - 1];
00849                   offset = 8;
00850                   break;
00851 
00852                 default:
00853                   gdb_assert (!"Unexpected register class.");
00854                 }
00855 
00856               gdb_assert (regnum != -1);
00857               memset (buf, 0, sizeof buf);
00858               memcpy (buf, valbuf + j * 8, min (len, 8));
00859               regcache_raw_write_part (regcache, regnum, offset, 8, buf);
00860             }
00861         }
00862     }
00863 
00864   /* Allocate space for the arguments on the stack.  */
00865   sp -= num_elements * 8;
00866 
00867   /* The psABI says that "The end of the input argument area shall be
00868      aligned on a 16 byte boundary."  */
00869   sp &= ~0xf;
00870 
00871   /* Write out the arguments to the stack.  */
00872   for (i = 0; i < num_stack_args; i++)
00873     {
00874       struct type *type = value_type (stack_args[i]);
00875       const gdb_byte *valbuf = value_contents (stack_args[i]);
00876       int len = TYPE_LENGTH (type);
00877 
00878       write_memory (sp + element * 8, valbuf, len);
00879       element += ((len + 7) / 8);
00880     }
00881 
00882   /* The psABI says that "For calls that may call functions that use
00883      varargs or stdargs (prototype-less calls or calls to functions
00884      containing ellipsis (...) in the declaration) %al is used as
00885      hidden argument to specify the number of SSE registers used.  */
00886   regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
00887   return sp; 
00888 }
00889 
00890 static CORE_ADDR
00891 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
00892                        struct regcache *regcache, CORE_ADDR bp_addr,
00893                        int nargs, struct value **args,  CORE_ADDR sp,
00894                        int struct_return, CORE_ADDR struct_addr)
00895 {
00896   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
00897   gdb_byte buf[8];
00898 
00899   /* Pass arguments.  */
00900   sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
00901 
00902   /* Pass "hidden" argument".  */
00903   if (struct_return)
00904     {
00905       store_unsigned_integer (buf, 8, byte_order, struct_addr);
00906       regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
00907     }
00908 
00909   /* Store return address.  */
00910   sp -= 8;
00911   store_unsigned_integer (buf, 8, byte_order, bp_addr);
00912   write_memory (sp, buf, 8);
00913 
00914   /* Finally, update the stack pointer...  */
00915   store_unsigned_integer (buf, 8, byte_order, sp);
00916   regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
00917 
00918   /* ...and fake a frame pointer.  */
00919   regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
00920 
00921   return sp + 16;
00922 }
00923 
00924 /* Displaced instruction handling.  */
00925 
00926 /* A partially decoded instruction.
00927    This contains enough details for displaced stepping purposes.  */
00928 
00929 struct amd64_insn
00930 {
00931   /* The number of opcode bytes.  */
00932   int opcode_len;
00933   /* The offset of the rex prefix or -1 if not present.  */
00934   int rex_offset;
00935   /* The offset to the first opcode byte.  */
00936   int opcode_offset;
00937   /* The offset to the modrm byte or -1 if not present.  */
00938   int modrm_offset;
00939 
00940   /* The raw instruction.  */
00941   gdb_byte *raw_insn;
00942 };
00943 
00944 struct displaced_step_closure
00945 {
00946   /* For rip-relative insns, saved copy of the reg we use instead of %rip.  */
00947   int tmp_used;
00948   int tmp_regno;
00949   ULONGEST tmp_save;
00950 
00951   /* Details of the instruction.  */
00952   struct amd64_insn insn_details;
00953 
00954   /* Amount of space allocated to insn_buf.  */
00955   int max_len;
00956 
00957   /* The possibly modified insn.
00958      This is a variable-length field.  */
00959   gdb_byte insn_buf[1];
00960 };
00961 
00962 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
00963    ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
00964    at which point delete these in favor of libopcodes' versions).  */
00965 
00966 static const unsigned char onebyte_has_modrm[256] = {
00967   /*       0 1 2 3 4 5 6 7 8 9 a b c d e f        */
00968   /*       -------------------------------        */
00969   /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
00970   /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
00971   /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
00972   /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
00973   /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
00974   /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
00975   /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
00976   /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
00977   /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
00978   /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
00979   /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
00980   /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
00981   /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
00982   /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
00983   /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
00984   /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1  /* f0 */
00985   /*       -------------------------------        */
00986   /*       0 1 2 3 4 5 6 7 8 9 a b c d e f        */
00987 };
00988 
00989 static const unsigned char twobyte_has_modrm[256] = {
00990   /*       0 1 2 3 4 5 6 7 8 9 a b c d e f        */
00991   /*       -------------------------------        */
00992   /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
00993   /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
00994   /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
00995   /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
00996   /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
00997   /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
00998   /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
00999   /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
01000   /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
01001   /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
01002   /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
01003   /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
01004   /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
01005   /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
01006   /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
01007   /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0  /* ff */
01008   /*       -------------------------------        */
01009   /*       0 1 2 3 4 5 6 7 8 9 a b c d e f        */
01010 };
01011 
01012 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
01013 
01014 static int
01015 rex_prefix_p (gdb_byte pfx)
01016 {
01017   return REX_PREFIX_P (pfx);
01018 }
01019 
01020 /* Skip the legacy instruction prefixes in INSN.
01021    We assume INSN is properly sentineled so we don't have to worry
01022    about falling off the end of the buffer.  */
01023 
01024 static gdb_byte *
01025 amd64_skip_prefixes (gdb_byte *insn)
01026 {
01027   while (1)
01028     {
01029       switch (*insn)
01030         {
01031         case DATA_PREFIX_OPCODE:
01032         case ADDR_PREFIX_OPCODE:
01033         case CS_PREFIX_OPCODE:
01034         case DS_PREFIX_OPCODE:
01035         case ES_PREFIX_OPCODE:
01036         case FS_PREFIX_OPCODE:
01037         case GS_PREFIX_OPCODE:
01038         case SS_PREFIX_OPCODE:
01039         case LOCK_PREFIX_OPCODE:
01040         case REPE_PREFIX_OPCODE:
01041         case REPNE_PREFIX_OPCODE:
01042           ++insn;
01043           continue;
01044         default:
01045           break;
01046         }
01047       break;
01048     }
01049 
01050   return insn;
01051 }
01052 
01053 /* Return an integer register (other than RSP) that is unused as an input
01054    operand in INSN.
01055    In order to not require adding a rex prefix if the insn doesn't already
01056    have one, the result is restricted to RAX ... RDI, sans RSP.
01057    The register numbering of the result follows architecture ordering,
01058    e.g. RDI = 7.  */
01059 
01060 static int
01061 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
01062 {
01063   /* 1 bit for each reg */
01064   int used_regs_mask = 0;
01065 
01066   /* There can be at most 3 int regs used as inputs in an insn, and we have
01067      7 to choose from (RAX ... RDI, sans RSP).
01068      This allows us to take a conservative approach and keep things simple.
01069      E.g. By avoiding RAX, we don't have to specifically watch for opcodes
01070      that implicitly specify RAX.  */
01071 
01072   /* Avoid RAX.  */
01073   used_regs_mask |= 1 << EAX_REG_NUM;
01074   /* Similarily avoid RDX, implicit operand in divides.  */
01075   used_regs_mask |= 1 << EDX_REG_NUM;
01076   /* Avoid RSP.  */
01077   used_regs_mask |= 1 << ESP_REG_NUM;
01078 
01079   /* If the opcode is one byte long and there's no ModRM byte,
01080      assume the opcode specifies a register.  */
01081   if (details->opcode_len == 1 && details->modrm_offset == -1)
01082     used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
01083 
01084   /* Mark used regs in the modrm/sib bytes.  */
01085   if (details->modrm_offset != -1)
01086     {
01087       int modrm = details->raw_insn[details->modrm_offset];
01088       int mod = MODRM_MOD_FIELD (modrm);
01089       int reg = MODRM_REG_FIELD (modrm);
01090       int rm = MODRM_RM_FIELD (modrm);
01091       int have_sib = mod != 3 && rm == 4;
01092 
01093       /* Assume the reg field of the modrm byte specifies a register.  */
01094       used_regs_mask |= 1 << reg;
01095 
01096       if (have_sib)
01097         {
01098           int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
01099           int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
01100           used_regs_mask |= 1 << base;
01101           used_regs_mask |= 1 << idx;
01102         }
01103       else
01104         {
01105           used_regs_mask |= 1 << rm;
01106         }
01107     }
01108 
01109   gdb_assert (used_regs_mask < 256);
01110   gdb_assert (used_regs_mask != 255);
01111 
01112   /* Finally, find a free reg.  */
01113   {
01114     int i;
01115 
01116     for (i = 0; i < 8; ++i)
01117       {
01118         if (! (used_regs_mask & (1 << i)))
01119           return i;
01120       }
01121 
01122     /* We shouldn't get here.  */
01123     internal_error (__FILE__, __LINE__, _("unable to find free reg"));
01124   }
01125 }
01126 
01127 /* Extract the details of INSN that we need.  */
01128 
01129 static void
01130 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
01131 {
01132   gdb_byte *start = insn;
01133   int need_modrm;
01134 
01135   details->raw_insn = insn;
01136 
01137   details->opcode_len = -1;
01138   details->rex_offset = -1;
01139   details->opcode_offset = -1;
01140   details->modrm_offset = -1;
01141 
01142   /* Skip legacy instruction prefixes.  */
01143   insn = amd64_skip_prefixes (insn);
01144 
01145   /* Skip REX instruction prefix.  */
01146   if (rex_prefix_p (*insn))
01147     {
01148       details->rex_offset = insn - start;
01149       ++insn;
01150     }
01151 
01152   details->opcode_offset = insn - start;
01153 
01154   if (*insn == TWO_BYTE_OPCODE_ESCAPE)
01155     {
01156       /* Two or three-byte opcode.  */
01157       ++insn;
01158       need_modrm = twobyte_has_modrm[*insn];
01159 
01160       /* Check for three-byte opcode.  */
01161       switch (*insn)
01162         {
01163         case 0x24:
01164         case 0x25:
01165         case 0x38:
01166         case 0x3a:
01167         case 0x7a:
01168         case 0x7b:
01169           ++insn;
01170           details->opcode_len = 3;
01171           break;
01172         default:
01173           details->opcode_len = 2;
01174           break;
01175         }
01176     }
01177   else
01178     {
01179       /* One-byte opcode.  */
01180       need_modrm = onebyte_has_modrm[*insn];
01181       details->opcode_len = 1;
01182     }
01183 
01184   if (need_modrm)
01185     {
01186       ++insn;
01187       details->modrm_offset = insn - start;
01188     }
01189 }
01190 
01191 /* Update %rip-relative addressing in INSN.
01192 
01193    %rip-relative addressing only uses a 32-bit displacement.
01194    32 bits is not enough to be guaranteed to cover the distance between where
01195    the real instruction is and where its copy is.
01196    Convert the insn to use base+disp addressing.
01197    We set base = pc + insn_length so we can leave disp unchanged.  */
01198 
01199 static void
01200 fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
01201               CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
01202 {
01203   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
01204   const struct amd64_insn *insn_details = &dsc->insn_details;
01205   int modrm_offset = insn_details->modrm_offset;
01206   gdb_byte *insn = insn_details->raw_insn + modrm_offset;
01207   CORE_ADDR rip_base;
01208   int32_t disp;
01209   int insn_length;
01210   int arch_tmp_regno, tmp_regno;
01211   ULONGEST orig_value;
01212 
01213   /* %rip+disp32 addressing mode, displacement follows ModRM byte.  */
01214   ++insn;
01215 
01216   /* Compute the rip-relative address.  */
01217   disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
01218   insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf,
01219                                           dsc->max_len, from);
01220   rip_base = from + insn_length;
01221 
01222   /* We need a register to hold the address.
01223      Pick one not used in the insn.
01224      NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7.  */
01225   arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
01226   tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
01227 
01228   /* REX.B should be unset as we were using rip-relative addressing,
01229      but ensure it's unset anyway, tmp_regno is not r8-r15.  */
01230   if (insn_details->rex_offset != -1)
01231     dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
01232 
01233   regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
01234   dsc->tmp_regno = tmp_regno;
01235   dsc->tmp_save = orig_value;
01236   dsc->tmp_used = 1;
01237 
01238   /* Convert the ModRM field to be base+disp.  */
01239   dsc->insn_buf[modrm_offset] &= ~0xc7;
01240   dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
01241 
01242   regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
01243 
01244   if (debug_displaced)
01245     fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
01246                         "displaced: using temp reg %d, old value %s, new value %s\n",
01247                         dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
01248                         paddress (gdbarch, rip_base));
01249 }
01250 
01251 static void
01252 fixup_displaced_copy (struct gdbarch *gdbarch,
01253                       struct displaced_step_closure *dsc,
01254                       CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
01255 {
01256   const struct amd64_insn *details = &dsc->insn_details;
01257 
01258   if (details->modrm_offset != -1)
01259     {
01260       gdb_byte modrm = details->raw_insn[details->modrm_offset];
01261 
01262       if ((modrm & 0xc7) == 0x05)
01263         {
01264           /* The insn uses rip-relative addressing.
01265              Deal with it.  */
01266           fixup_riprel (gdbarch, dsc, from, to, regs);
01267         }
01268     }
01269 }
01270 
01271 struct displaced_step_closure *
01272 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
01273                                 CORE_ADDR from, CORE_ADDR to,
01274                                 struct regcache *regs)
01275 {
01276   int len = gdbarch_max_insn_length (gdbarch);
01277   /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
01278      continually watch for running off the end of the buffer.  */
01279   int fixup_sentinel_space = len;
01280   struct displaced_step_closure *dsc =
01281     xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
01282   gdb_byte *buf = &dsc->insn_buf[0];
01283   struct amd64_insn *details = &dsc->insn_details;
01284 
01285   dsc->tmp_used = 0;
01286   dsc->max_len = len + fixup_sentinel_space;
01287 
01288   read_memory (from, buf, len);
01289 
01290   /* Set up the sentinel space so we don't have to worry about running
01291      off the end of the buffer.  An excessive number of leading prefixes
01292      could otherwise cause this.  */
01293   memset (buf + len, 0, fixup_sentinel_space);
01294 
01295   amd64_get_insn_details (buf, details);
01296 
01297   /* GDB may get control back after the insn after the syscall.
01298      Presumably this is a kernel bug.
01299      If this is a syscall, make sure there's a nop afterwards.  */
01300   {
01301     int syscall_length;
01302 
01303     if (amd64_syscall_p (details, &syscall_length))
01304       buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
01305   }
01306 
01307   /* Modify the insn to cope with the address where it will be executed from.
01308      In particular, handle any rip-relative addressing.  */
01309   fixup_displaced_copy (gdbarch, dsc, from, to, regs);
01310 
01311   write_memory (to, buf, len);
01312 
01313   if (debug_displaced)
01314     {
01315       fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
01316                           paddress (gdbarch, from), paddress (gdbarch, to));
01317       displaced_step_dump_bytes (gdb_stdlog, buf, len);
01318     }
01319 
01320   return dsc;
01321 }
01322 
01323 static int
01324 amd64_absolute_jmp_p (const struct amd64_insn *details)
01325 {
01326   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
01327 
01328   if (insn[0] == 0xff)
01329     {
01330       /* jump near, absolute indirect (/4) */
01331       if ((insn[1] & 0x38) == 0x20)
01332         return 1;
01333 
01334       /* jump far, absolute indirect (/5) */
01335       if ((insn[1] & 0x38) == 0x28)
01336         return 1;
01337     }
01338 
01339   return 0;
01340 }
01341 
01342 static int
01343 amd64_absolute_call_p (const struct amd64_insn *details)
01344 {
01345   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
01346 
01347   if (insn[0] == 0xff)
01348     {
01349       /* Call near, absolute indirect (/2) */
01350       if ((insn[1] & 0x38) == 0x10)
01351         return 1;
01352 
01353       /* Call far, absolute indirect (/3) */
01354       if ((insn[1] & 0x38) == 0x18)
01355         return 1;
01356     }
01357 
01358   return 0;
01359 }
01360 
01361 static int
01362 amd64_ret_p (const struct amd64_insn *details)
01363 {
01364   /* NOTE: gcc can emit "repz ; ret".  */
01365   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
01366 
01367   switch (insn[0])
01368     {
01369     case 0xc2: /* ret near, pop N bytes */
01370     case 0xc3: /* ret near */
01371     case 0xca: /* ret far, pop N bytes */
01372     case 0xcb: /* ret far */
01373     case 0xcf: /* iret */
01374       return 1;
01375 
01376     default:
01377       return 0;
01378     }
01379 }
01380 
01381 static int
01382 amd64_call_p (const struct amd64_insn *details)
01383 {
01384   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
01385 
01386   if (amd64_absolute_call_p (details))
01387     return 1;
01388 
01389   /* call near, relative */
01390   if (insn[0] == 0xe8)
01391     return 1;
01392 
01393   return 0;
01394 }
01395 
01396 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
01397    length in bytes.  Otherwise, return zero.  */
01398 
01399 static int
01400 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
01401 {
01402   const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
01403 
01404   if (insn[0] == 0x0f && insn[1] == 0x05)
01405     {
01406       *lengthp = 2;
01407       return 1;
01408     }
01409 
01410   return 0;
01411 }
01412 
01413 /* Fix up the state of registers and memory after having single-stepped
01414    a displaced instruction.  */
01415 
01416 void
01417 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
01418                             struct displaced_step_closure *dsc,
01419                             CORE_ADDR from, CORE_ADDR to,
01420                             struct regcache *regs)
01421 {
01422   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
01423   /* The offset we applied to the instruction's address.  */
01424   ULONGEST insn_offset = to - from;
01425   gdb_byte *insn = dsc->insn_buf;
01426   const struct amd64_insn *insn_details = &dsc->insn_details;
01427 
01428   if (debug_displaced)
01429     fprintf_unfiltered (gdb_stdlog,
01430                         "displaced: fixup (%s, %s), "
01431                         "insn = 0x%02x 0x%02x ...\n",
01432                         paddress (gdbarch, from), paddress (gdbarch, to),
01433                         insn[0], insn[1]);
01434 
01435   /* If we used a tmp reg, restore it.  */
01436 
01437   if (dsc->tmp_used)
01438     {
01439       if (debug_displaced)
01440         fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
01441                             dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
01442       regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
01443     }
01444 
01445   /* The list of issues to contend with here is taken from
01446      resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
01447      Yay for Free Software!  */
01448 
01449   /* Relocate the %rip back to the program's instruction stream,
01450      if necessary.  */
01451 
01452   /* Except in the case of absolute or indirect jump or call
01453      instructions, or a return instruction, the new rip is relative to
01454      the displaced instruction; make it relative to the original insn.
01455      Well, signal handler returns don't need relocation either, but we use the
01456      value of %rip to recognize those; see below.  */
01457   if (! amd64_absolute_jmp_p (insn_details)
01458       && ! amd64_absolute_call_p (insn_details)
01459       && ! amd64_ret_p (insn_details))
01460     {
01461       ULONGEST orig_rip;
01462       int insn_len;
01463 
01464       regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
01465 
01466       /* A signal trampoline system call changes the %rip, resuming
01467          execution of the main program after the signal handler has
01468          returned.  That makes them like 'return' instructions; we
01469          shouldn't relocate %rip.
01470 
01471          But most system calls don't, and we do need to relocate %rip.
01472 
01473          Our heuristic for distinguishing these cases: if stepping
01474          over the system call instruction left control directly after
01475          the instruction, the we relocate --- control almost certainly
01476          doesn't belong in the displaced copy.  Otherwise, we assume
01477          the instruction has put control where it belongs, and leave
01478          it unrelocated.  Goodness help us if there are PC-relative
01479          system calls.  */
01480       if (amd64_syscall_p (insn_details, &insn_len)
01481           && orig_rip != to + insn_len
01482           /* GDB can get control back after the insn after the syscall.
01483              Presumably this is a kernel bug.
01484              Fixup ensures its a nop, we add one to the length for it.  */
01485           && orig_rip != to + insn_len + 1)
01486         {
01487           if (debug_displaced)
01488             fprintf_unfiltered (gdb_stdlog,
01489                                 "displaced: syscall changed %%rip; "
01490                                 "not relocating\n");
01491         }
01492       else
01493         {
01494           ULONGEST rip = orig_rip - insn_offset;
01495 
01496           /* If we just stepped over a breakpoint insn, we don't backup
01497              the pc on purpose; this is to match behaviour without
01498              stepping.  */
01499 
01500           regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
01501 
01502           if (debug_displaced)
01503             fprintf_unfiltered (gdb_stdlog,
01504                                 "displaced: "
01505                                 "relocated %%rip from %s to %s\n",
01506                                 paddress (gdbarch, orig_rip),
01507                                 paddress (gdbarch, rip));
01508         }
01509     }
01510 
01511   /* If the instruction was PUSHFL, then the TF bit will be set in the
01512      pushed value, and should be cleared.  We'll leave this for later,
01513      since GDB already messes up the TF flag when stepping over a
01514      pushfl.  */
01515 
01516   /* If the instruction was a call, the return address now atop the
01517      stack is the address following the copied instruction.  We need
01518      to make it the address following the original instruction.  */
01519   if (amd64_call_p (insn_details))
01520     {
01521       ULONGEST rsp;
01522       ULONGEST retaddr;
01523       const ULONGEST retaddr_len = 8;
01524 
01525       regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
01526       retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
01527       retaddr = (retaddr - insn_offset) & 0xffffffffUL;
01528       write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
01529 
01530       if (debug_displaced)
01531         fprintf_unfiltered (gdb_stdlog,
01532                             "displaced: relocated return addr at %s "
01533                             "to %s\n",
01534                             paddress (gdbarch, rsp),
01535                             paddress (gdbarch, retaddr));
01536     }
01537 }
01538 
01539 /* If the instruction INSN uses RIP-relative addressing, return the
01540    offset into the raw INSN where the displacement to be adjusted is
01541    found.  Returns 0 if the instruction doesn't use RIP-relative
01542    addressing.  */
01543 
01544 static int
01545 rip_relative_offset (struct amd64_insn *insn)
01546 {
01547   if (insn->modrm_offset != -1)
01548     {
01549       gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
01550 
01551       if ((modrm & 0xc7) == 0x05)
01552         {
01553           /* The displacement is found right after the ModRM byte.  */
01554           return insn->modrm_offset + 1;
01555         }
01556     }
01557 
01558   return 0;
01559 }
01560 
01561 static void
01562 append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
01563 {
01564   target_write_memory (*to, buf, len);
01565   *to += len;
01566 }
01567 
01568 static void
01569 amd64_relocate_instruction (struct gdbarch *gdbarch,
01570                             CORE_ADDR *to, CORE_ADDR oldloc)
01571 {
01572   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
01573   int len = gdbarch_max_insn_length (gdbarch);
01574   /* Extra space for sentinels.  */
01575   int fixup_sentinel_space = len;
01576   gdb_byte *buf = xmalloc (len + fixup_sentinel_space);
01577   struct amd64_insn insn_details;
01578   int offset = 0;
01579   LONGEST rel32, newrel;
01580   gdb_byte *insn;
01581   int insn_length;
01582 
01583   read_memory (oldloc, buf, len);
01584 
01585   /* Set up the sentinel space so we don't have to worry about running
01586      off the end of the buffer.  An excessive number of leading prefixes
01587      could otherwise cause this.  */
01588   memset (buf + len, 0, fixup_sentinel_space);
01589 
01590   insn = buf;
01591   amd64_get_insn_details (insn, &insn_details);
01592 
01593   insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
01594 
01595   /* Skip legacy instruction prefixes.  */
01596   insn = amd64_skip_prefixes (insn);
01597 
01598   /* Adjust calls with 32-bit relative addresses as push/jump, with
01599      the address pushed being the location where the original call in
01600      the user program would return to.  */
01601   if (insn[0] == 0xe8)
01602     {
01603       gdb_byte push_buf[16];
01604       unsigned int ret_addr;
01605 
01606       /* Where "ret" in the original code will return to.  */
01607       ret_addr = oldloc + insn_length;
01608       push_buf[0] = 0x68; /* pushq $...  */
01609       store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
01610       /* Push the push.  */
01611       append_insns (to, 5, push_buf);
01612 
01613       /* Convert the relative call to a relative jump.  */
01614       insn[0] = 0xe9;
01615 
01616       /* Adjust the destination offset.  */
01617       rel32 = extract_signed_integer (insn + 1, 4, byte_order);
01618       newrel = (oldloc - *to) + rel32;
01619       store_signed_integer (insn + 1, 4, byte_order, newrel);
01620 
01621       if (debug_displaced)
01622         fprintf_unfiltered (gdb_stdlog,
01623                             "Adjusted insn rel32=%s at %s to"
01624                             " rel32=%s at %s\n",
01625                             hex_string (rel32), paddress (gdbarch, oldloc),
01626                             hex_string (newrel), paddress (gdbarch, *to));
01627 
01628       /* Write the adjusted jump into its displaced location.  */
01629       append_insns (to, 5, insn);
01630       return;
01631     }
01632 
01633   offset = rip_relative_offset (&insn_details);
01634   if (!offset)
01635     {
01636       /* Adjust jumps with 32-bit relative addresses.  Calls are
01637          already handled above.  */
01638       if (insn[0] == 0xe9)
01639         offset = 1;
01640       /* Adjust conditional jumps.  */
01641       else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
01642         offset = 2;
01643     }
01644 
01645   if (offset)
01646     {
01647       rel32 = extract_signed_integer (insn + offset, 4, byte_order);
01648       newrel = (oldloc - *to) + rel32;
01649       store_signed_integer (insn + offset, 4, byte_order, newrel);
01650       if (debug_displaced)
01651         fprintf_unfiltered (gdb_stdlog,
01652                             "Adjusted insn rel32=%s at %s to"
01653                             " rel32=%s at %s\n",
01654                             hex_string (rel32), paddress (gdbarch, oldloc),
01655                             hex_string (newrel), paddress (gdbarch, *to));
01656     }
01657 
01658   /* Write the adjusted instruction into its displaced location.  */
01659   append_insns (to, insn_length, buf);
01660 }
01661 
01662 
01663 /* The maximum number of saved registers.  This should include %rip.  */
01664 #define AMD64_NUM_SAVED_REGS    AMD64_NUM_GREGS
01665 
01666 struct amd64_frame_cache
01667 {
01668   /* Base address.  */
01669   CORE_ADDR base;
01670   int base_p;
01671   CORE_ADDR sp_offset;
01672   CORE_ADDR pc;
01673 
01674   /* Saved registers.  */
01675   CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
01676   CORE_ADDR saved_sp;
01677   int saved_sp_reg;
01678 
01679   /* Do we have a frame?  */
01680   int frameless_p;
01681 };
01682 
01683 /* Initialize a frame cache.  */
01684 
01685 static void
01686 amd64_init_frame_cache (struct amd64_frame_cache *cache)
01687 {
01688   int i;
01689 
01690   /* Base address.  */
01691   cache->base = 0;
01692   cache->base_p = 0;
01693   cache->sp_offset = -8;
01694   cache->pc = 0;
01695 
01696   /* Saved registers.  We initialize these to -1 since zero is a valid
01697      offset (that's where %rbp is supposed to be stored).
01698      The values start out as being offsets, and are later converted to
01699      addresses (at which point -1 is interpreted as an address, still meaning
01700      "invalid").  */
01701   for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
01702     cache->saved_regs[i] = -1;
01703   cache->saved_sp = 0;
01704   cache->saved_sp_reg = -1;
01705 
01706   /* Frameless until proven otherwise.  */
01707   cache->frameless_p = 1;
01708 }
01709 
01710 /* Allocate and initialize a frame cache.  */
01711 
01712 static struct amd64_frame_cache *
01713 amd64_alloc_frame_cache (void)
01714 {
01715   struct amd64_frame_cache *cache;
01716 
01717   cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
01718   amd64_init_frame_cache (cache);
01719   return cache;
01720 }
01721 
01722 /* GCC 4.4 and later, can put code in the prologue to realign the
01723    stack pointer.  Check whether PC points to such code, and update
01724    CACHE accordingly.  Return the first instruction after the code
01725    sequence or CURRENT_PC, whichever is smaller.  If we don't
01726    recognize the code, return PC.  */
01727 
01728 static CORE_ADDR
01729 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
01730                            struct amd64_frame_cache *cache)
01731 {
01732   /* There are 2 code sequences to re-align stack before the frame
01733      gets set up:
01734 
01735         1. Use a caller-saved saved register:
01736 
01737                 leaq  8(%rsp), %reg
01738                 andq  $-XXX, %rsp
01739                 pushq -8(%reg)
01740 
01741         2. Use a callee-saved saved register:
01742 
01743                 pushq %reg
01744                 leaq  16(%rsp), %reg
01745                 andq  $-XXX, %rsp
01746                 pushq -8(%reg)
01747 
01748      "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
01749      
01750         0x48 0x83 0xe4 0xf0                     andq $-16, %rsp
01751         0x48 0x81 0xe4 0x00 0xff 0xff 0xff      andq $-256, %rsp
01752    */
01753 
01754   gdb_byte buf[18];
01755   int reg, r;
01756   int offset, offset_and;
01757 
01758   if (target_read_memory (pc, buf, sizeof buf))
01759     return pc;
01760 
01761   /* Check caller-saved saved register.  The first instruction has
01762      to be "leaq 8(%rsp), %reg".  */
01763   if ((buf[0] & 0xfb) == 0x48
01764       && buf[1] == 0x8d
01765       && buf[3] == 0x24
01766       && buf[4] == 0x8)
01767     {
01768       /* MOD must be binary 10 and R/M must be binary 100.  */
01769       if ((buf[2] & 0xc7) != 0x44)
01770         return pc;
01771 
01772       /* REG has register number.  */
01773       reg = (buf[2] >> 3) & 7;
01774 
01775       /* Check the REX.R bit.  */
01776       if (buf[0] == 0x4c)
01777         reg += 8;
01778 
01779       offset = 5;
01780     }
01781   else
01782     {
01783       /* Check callee-saved saved register.  The first instruction
01784          has to be "pushq %reg".  */
01785       reg = 0;
01786       if ((buf[0] & 0xf8) == 0x50)
01787         offset = 0;
01788       else if ((buf[0] & 0xf6) == 0x40
01789                && (buf[1] & 0xf8) == 0x50)
01790         {
01791           /* Check the REX.B bit.  */
01792           if ((buf[0] & 1) != 0)
01793             reg = 8;
01794 
01795           offset = 1;
01796         }
01797       else
01798         return pc;
01799 
01800       /* Get register.  */
01801       reg += buf[offset] & 0x7;
01802 
01803       offset++;
01804 
01805       /* The next instruction has to be "leaq 16(%rsp), %reg".  */
01806       if ((buf[offset] & 0xfb) != 0x48
01807           || buf[offset + 1] != 0x8d
01808           || buf[offset + 3] != 0x24
01809           || buf[offset + 4] != 0x10)
01810         return pc;
01811 
01812       /* MOD must be binary 10 and R/M must be binary 100.  */
01813       if ((buf[offset + 2] & 0xc7) != 0x44)
01814         return pc;
01815       
01816       /* REG has register number.  */
01817       r = (buf[offset + 2] >> 3) & 7;
01818 
01819       /* Check the REX.R bit.  */
01820       if (buf[offset] == 0x4c)
01821         r += 8;
01822 
01823       /* Registers in pushq and leaq have to be the same.  */
01824       if (reg != r)
01825         return pc;
01826 
01827       offset += 5;
01828     }
01829 
01830   /* Rigister can't be %rsp nor %rbp.  */
01831   if (reg == 4 || reg == 5)
01832     return pc;
01833 
01834   /* The next instruction has to be "andq $-XXX, %rsp".  */
01835   if (buf[offset] != 0x48
01836       || buf[offset + 2] != 0xe4
01837       || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
01838     return pc;
01839 
01840   offset_and = offset;
01841   offset += buf[offset + 1] == 0x81 ? 7 : 4;
01842 
01843   /* The next instruction has to be "pushq -8(%reg)".  */
01844   r = 0;
01845   if (buf[offset] == 0xff)
01846     offset++;
01847   else if ((buf[offset] & 0xf6) == 0x40
01848            && buf[offset + 1] == 0xff)
01849     {
01850       /* Check the REX.B bit.  */
01851       if ((buf[offset] & 0x1) != 0)
01852         r = 8;
01853       offset += 2;
01854     }
01855   else
01856     return pc;
01857 
01858   /* 8bit -8 is 0xf8.  REG must be binary 110 and MOD must be binary
01859      01.  */
01860   if (buf[offset + 1] != 0xf8
01861       || (buf[offset] & 0xf8) != 0x70)
01862     return pc;
01863 
01864   /* R/M has register.  */
01865   r += buf[offset] & 7;
01866 
01867   /* Registers in leaq and pushq have to be the same.  */
01868   if (reg != r)
01869     return pc;
01870 
01871   if (current_pc > pc + offset_and)
01872     cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
01873 
01874   return min (pc + offset + 2, current_pc);
01875 }
01876 
01877 /* Similar to amd64_analyze_stack_align for x32.  */
01878 
01879 static CORE_ADDR
01880 amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
01881                                struct amd64_frame_cache *cache) 
01882 {
01883   /* There are 2 code sequences to re-align stack before the frame
01884      gets set up:
01885 
01886         1. Use a caller-saved saved register:
01887 
01888                 leaq  8(%rsp), %reg
01889                 andq  $-XXX, %rsp
01890                 pushq -8(%reg)
01891 
01892            or
01893 
01894                 [addr32] leal  8(%rsp), %reg
01895                 andl  $-XXX, %esp
01896                 [addr32] pushq -8(%reg)
01897 
01898         2. Use a callee-saved saved register:
01899 
01900                 pushq %reg
01901                 leaq  16(%rsp), %reg
01902                 andq  $-XXX, %rsp
01903                 pushq -8(%reg)
01904 
01905            or
01906 
01907                 pushq %reg
01908                 [addr32] leal  16(%rsp), %reg
01909                 andl  $-XXX, %esp
01910                 [addr32] pushq -8(%reg)
01911 
01912      "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
01913      
01914         0x48 0x83 0xe4 0xf0                     andq $-16, %rsp
01915         0x48 0x81 0xe4 0x00 0xff 0xff 0xff      andq $-256, %rsp
01916 
01917      "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
01918      
01919         0x83 0xe4 0xf0                  andl $-16, %esp
01920         0x81 0xe4 0x00 0xff 0xff 0xff   andl $-256, %esp
01921    */
01922 
01923   gdb_byte buf[19];
01924   int reg, r;
01925   int offset, offset_and;
01926 
01927   if (target_read_memory (pc, buf, sizeof buf))
01928     return pc;
01929 
01930   /* Skip optional addr32 prefix.  */
01931   offset = buf[0] == 0x67 ? 1 : 0;
01932 
01933   /* Check caller-saved saved register.  The first instruction has
01934      to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg".  */
01935   if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
01936       && buf[offset + 1] == 0x8d
01937       && buf[offset + 3] == 0x24
01938       && buf[offset + 4] == 0x8)
01939     {
01940       /* MOD must be binary 10 and R/M must be binary 100.  */
01941       if ((buf[offset + 2] & 0xc7) != 0x44)
01942         return pc;
01943 
01944       /* REG has register number.  */
01945       reg = (buf[offset + 2] >> 3) & 7;
01946 
01947       /* Check the REX.R bit.  */
01948       if ((buf[offset] & 0x4) != 0)
01949         reg += 8;
01950 
01951       offset += 5;
01952     }
01953   else
01954     {
01955       /* Check callee-saved saved register.  The first instruction
01956          has to be "pushq %reg".  */
01957       reg = 0;
01958       if ((buf[offset] & 0xf6) == 0x40
01959           && (buf[offset + 1] & 0xf8) == 0x50)
01960         {
01961           /* Check the REX.B bit.  */
01962           if ((buf[offset] & 1) != 0)
01963             reg = 8;
01964 
01965           offset += 1;
01966         }
01967       else if ((buf[offset] & 0xf8) != 0x50)
01968         return pc;
01969 
01970       /* Get register.  */
01971       reg += buf[offset] & 0x7;
01972 
01973       offset++;
01974 
01975       /* Skip optional addr32 prefix.  */
01976       if (buf[offset] == 0x67)
01977         offset++;
01978 
01979       /* The next instruction has to be "leaq 16(%rsp), %reg" or
01980          "leal 16(%rsp), %reg".  */
01981       if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
01982           || buf[offset + 1] != 0x8d
01983           || buf[offset + 3] != 0x24
01984           || buf[offset + 4] != 0x10)
01985         return pc;
01986 
01987       /* MOD must be binary 10 and R/M must be binary 100.  */
01988       if ((buf[offset + 2] & 0xc7) != 0x44)
01989         return pc;
01990       
01991       /* REG has register number.  */
01992       r = (buf[offset + 2] >> 3) & 7;
01993 
01994       /* Check the REX.R bit.  */
01995       if ((buf[offset] & 0x4) != 0)
01996         r += 8;
01997 
01998       /* Registers in pushq and leaq have to be the same.  */
01999       if (reg != r)
02000         return pc;
02001 
02002       offset += 5;
02003     }
02004 
02005   /* Rigister can't be %rsp nor %rbp.  */
02006   if (reg == 4 || reg == 5)
02007     return pc;
02008 
02009   /* The next instruction may be "andq $-XXX, %rsp" or
02010      "andl $-XXX, %esp".  */
02011   if (buf[offset] != 0x48)
02012     offset--;
02013 
02014   if (buf[offset + 2] != 0xe4
02015       || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
02016     return pc;
02017 
02018   offset_and = offset;
02019   offset += buf[offset + 1] == 0x81 ? 7 : 4;
02020 
02021   /* Skip optional addr32 prefix.  */
02022   if (buf[offset] == 0x67)
02023     offset++;
02024 
02025   /* The next instruction has to be "pushq -8(%reg)".  */
02026   r = 0;
02027   if (buf[offset] == 0xff)
02028     offset++;
02029   else if ((buf[offset] & 0xf6) == 0x40
02030            && buf[offset + 1] == 0xff)
02031     {
02032       /* Check the REX.B bit.  */
02033       if ((buf[offset] & 0x1) != 0)
02034         r = 8;
02035       offset += 2;
02036     }
02037   else
02038     return pc;
02039 
02040   /* 8bit -8 is 0xf8.  REG must be binary 110 and MOD must be binary
02041      01.  */
02042   if (buf[offset + 1] != 0xf8
02043       || (buf[offset] & 0xf8) != 0x70)
02044     return pc;
02045 
02046   /* R/M has register.  */
02047   r += buf[offset] & 7;
02048 
02049   /* Registers in leaq and pushq have to be the same.  */
02050   if (reg != r)
02051     return pc;
02052 
02053   if (current_pc > pc + offset_and)
02054     cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
02055 
02056   return min (pc + offset + 2, current_pc);
02057 }
02058 
02059 /* Do a limited analysis of the prologue at PC and update CACHE
02060    accordingly.  Bail out early if CURRENT_PC is reached.  Return the
02061    address where the analysis stopped.
02062 
02063    We will handle only functions beginning with:
02064 
02065       pushq %rbp        0x55
02066       movq %rsp, %rbp   0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
02067 
02068    or (for the X32 ABI):
02069 
02070       pushq %rbp        0x55
02071       movl %esp, %ebp   0x89 0xe5 (or 0x8b 0xec)
02072 
02073    Any function that doesn't start with one of these sequences will be
02074    assumed to have no prologue and thus no valid frame pointer in
02075    %rbp.  */
02076 
02077 static CORE_ADDR
02078 amd64_analyze_prologue (struct gdbarch *gdbarch,
02079                         CORE_ADDR pc, CORE_ADDR current_pc,
02080                         struct amd64_frame_cache *cache)
02081 {
02082   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
02083   /* There are two variations of movq %rsp, %rbp.  */
02084   static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
02085   static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
02086   /* Ditto for movl %esp, %ebp.  */
02087   static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
02088   static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
02089 
02090   gdb_byte buf[3];
02091   gdb_byte op;
02092 
02093   if (current_pc <= pc)
02094     return current_pc;
02095 
02096   if (gdbarch_ptr_bit (gdbarch) == 32)
02097     pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
02098   else
02099     pc = amd64_analyze_stack_align (pc, current_pc, cache);
02100 
02101   op = read_memory_unsigned_integer (pc, 1, byte_order);
02102 
02103   if (op == 0x55)               /* pushq %rbp */
02104     {
02105       /* Take into account that we've executed the `pushq %rbp' that
02106          starts this instruction sequence.  */
02107       cache->saved_regs[AMD64_RBP_REGNUM] = 0;
02108       cache->sp_offset += 8;
02109 
02110       /* If that's all, return now.  */
02111       if (current_pc <= pc + 1)
02112         return current_pc;
02113 
02114       read_memory (pc + 1, buf, 3);
02115 
02116       /* Check for `movq %rsp, %rbp'.  */
02117       if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
02118           || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
02119         {
02120           /* OK, we actually have a frame.  */
02121           cache->frameless_p = 0;
02122           return pc + 4;
02123         }
02124 
02125       /* For X32, also check for `movq %esp, %ebp'.  */
02126       if (gdbarch_ptr_bit (gdbarch) == 32)
02127         {
02128           if (memcmp (buf, mov_esp_ebp_1, 2) == 0
02129               || memcmp (buf, mov_esp_ebp_2, 2) == 0)
02130             {
02131               /* OK, we actually have a frame.  */
02132               cache->frameless_p = 0;
02133               return pc + 3;
02134             }
02135         }
02136 
02137       return pc + 1;
02138     }
02139 
02140   return pc;
02141 }
02142 
02143 /* Work around false termination of prologue - GCC PR debug/48827.
02144 
02145    START_PC is the first instruction of a function, PC is its minimal already
02146    determined advanced address.  Function returns PC if it has nothing to do.
02147 
02148    84 c0                test   %al,%al
02149    74 23                je     after
02150    <-- here is 0 lines advance - the false prologue end marker.
02151    0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
02152    0f 29 4d 80          movaps %xmm1,-0x80(%rbp)
02153    0f 29 55 90          movaps %xmm2,-0x70(%rbp)
02154    0f 29 5d a0          movaps %xmm3,-0x60(%rbp)
02155    0f 29 65 b0          movaps %xmm4,-0x50(%rbp)
02156    0f 29 6d c0          movaps %xmm5,-0x40(%rbp)
02157    0f 29 75 d0          movaps %xmm6,-0x30(%rbp)
02158    0f 29 7d e0          movaps %xmm7,-0x20(%rbp)
02159    after:  */
02160 
02161 static CORE_ADDR
02162 amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
02163 {
02164   struct symtab_and_line start_pc_sal, next_sal;
02165   gdb_byte buf[4 + 8 * 7];
02166   int offset, xmmreg;
02167 
02168   if (pc == start_pc)
02169     return pc;
02170 
02171   start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
02172   if (start_pc_sal.symtab == NULL
02173       || producer_is_gcc_ge_4 (start_pc_sal.symtab->producer) < 6
02174       || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
02175     return pc;
02176 
02177   next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
02178   if (next_sal.line != start_pc_sal.line)
02179     return pc;
02180 
02181   /* START_PC can be from overlayed memory, ignored here.  */
02182   if (target_read_memory (next_sal.pc - 4, buf, sizeof (buf)) != 0)
02183     return pc;
02184 
02185   /* test %al,%al */
02186   if (buf[0] != 0x84 || buf[1] != 0xc0)
02187     return pc;
02188   /* je AFTER */
02189   if (buf[2] != 0x74)
02190     return pc;
02191 
02192   offset = 4;
02193   for (xmmreg = 0; xmmreg < 8; xmmreg++)
02194     {
02195       /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
02196       if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
02197           || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
02198         return pc;
02199 
02200       /* 0b01?????? */
02201       if ((buf[offset + 2] & 0xc0) == 0x40)
02202         {
02203           /* 8-bit displacement.  */
02204           offset += 4;
02205         }
02206       /* 0b10?????? */
02207       else if ((buf[offset + 2] & 0xc0) == 0x80)
02208         {
02209           /* 32-bit displacement.  */
02210           offset += 7;
02211         }
02212       else
02213         return pc;
02214     }
02215 
02216   /* je AFTER */
02217   if (offset - 4 != buf[3])
02218     return pc;
02219 
02220   return next_sal.end;
02221 }
02222 
02223 /* Return PC of first real instruction.  */
02224 
02225 static CORE_ADDR
02226 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
02227 {
02228   struct amd64_frame_cache cache;
02229   CORE_ADDR pc;
02230   CORE_ADDR func_addr;
02231 
02232   if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
02233     {
02234       CORE_ADDR post_prologue_pc
02235         = skip_prologue_using_sal (gdbarch, func_addr);
02236       struct symtab *s = find_pc_symtab (func_addr);
02237 
02238       /* Clang always emits a line note before the prologue and another
02239          one after.  We trust clang to emit usable line notes.  */
02240       if (post_prologue_pc
02241           && (s != NULL
02242               && s->producer != NULL
02243               && strncmp (s->producer, "clang ", sizeof ("clang ") - 1) == 0))
02244         return max (start_pc, post_prologue_pc);
02245     }
02246 
02247   amd64_init_frame_cache (&cache);
02248   pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
02249                                &cache);
02250   if (cache.frameless_p)
02251     return start_pc;
02252 
02253   return amd64_skip_xmm_prologue (pc, start_pc);
02254 }
02255 
02256 
02257 /* Normal frames.  */
02258 
02259 static void
02260 amd64_frame_cache_1 (struct frame_info *this_frame,
02261                      struct amd64_frame_cache *cache)
02262 {
02263   struct gdbarch *gdbarch = get_frame_arch (this_frame);
02264   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
02265   gdb_byte buf[8];
02266   int i;
02267 
02268   cache->pc = get_frame_func (this_frame);
02269   if (cache->pc != 0)
02270     amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
02271                             cache);
02272 
02273   if (cache->frameless_p)
02274     {
02275       /* We didn't find a valid frame.  If we're at the start of a
02276          function, or somewhere half-way its prologue, the function's
02277          frame probably hasn't been fully setup yet.  Try to
02278          reconstruct the base address for the stack frame by looking
02279          at the stack pointer.  For truly "frameless" functions this
02280          might work too.  */
02281 
02282       if (cache->saved_sp_reg != -1)
02283         {
02284           /* Stack pointer has been saved.  */
02285           get_frame_register (this_frame, cache->saved_sp_reg, buf);
02286           cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
02287 
02288           /* We're halfway aligning the stack.  */
02289           cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
02290           cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
02291 
02292           /* This will be added back below.  */
02293           cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
02294         }
02295       else
02296         {
02297           get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
02298           cache->base = extract_unsigned_integer (buf, 8, byte_order)
02299                         + cache->sp_offset;
02300         }
02301     }
02302   else
02303     {
02304       get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
02305       cache->base = extract_unsigned_integer (buf, 8, byte_order);
02306     }
02307 
02308   /* Now that we have the base address for the stack frame we can
02309      calculate the value of %rsp in the calling frame.  */
02310   cache->saved_sp = cache->base + 16;
02311 
02312   /* For normal frames, %rip is stored at 8(%rbp).  If we don't have a
02313      frame we find it at the same offset from the reconstructed base
02314      address.  If we're halfway aligning the stack, %rip is handled
02315      differently (see above).  */
02316   if (!cache->frameless_p || cache->saved_sp_reg == -1)
02317     cache->saved_regs[AMD64_RIP_REGNUM] = 8;
02318 
02319   /* Adjust all the saved registers such that they contain addresses
02320      instead of offsets.  */
02321   for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
02322     if (cache->saved_regs[i] != -1)
02323       cache->saved_regs[i] += cache->base;
02324 
02325   cache->base_p = 1;
02326 }
02327 
02328 static struct amd64_frame_cache *
02329 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
02330 {
02331   volatile struct gdb_exception ex;
02332   struct amd64_frame_cache *cache;
02333 
02334   if (*this_cache)
02335     return *this_cache;
02336 
02337   cache = amd64_alloc_frame_cache ();
02338   *this_cache = cache;
02339 
02340   TRY_CATCH (ex, RETURN_MASK_ERROR)
02341     {
02342       amd64_frame_cache_1 (this_frame, cache);
02343     }
02344   if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
02345     throw_exception (ex);
02346 
02347   return cache;
02348 }
02349 
02350 static enum unwind_stop_reason
02351 amd64_frame_unwind_stop_reason (struct frame_info *this_frame,
02352                                 void **this_cache)
02353 {
02354   struct amd64_frame_cache *cache =
02355     amd64_frame_cache (this_frame, this_cache);
02356 
02357   if (!cache->base_p)
02358     return UNWIND_UNAVAILABLE;
02359 
02360   /* This marks the outermost frame.  */
02361   if (cache->base == 0)
02362     return UNWIND_OUTERMOST;
02363 
02364   return UNWIND_NO_REASON;
02365 }
02366 
02367 static void
02368 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
02369                      struct frame_id *this_id)
02370 {
02371   struct amd64_frame_cache *cache =
02372     amd64_frame_cache (this_frame, this_cache);
02373 
02374   if (!cache->base_p)
02375     return;
02376 
02377   /* This marks the outermost frame.  */
02378   if (cache->base == 0)
02379     return;
02380 
02381   (*this_id) = frame_id_build (cache->base + 16, cache->pc);
02382 }
02383 
02384 static struct value *
02385 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
02386                            int regnum)
02387 {
02388   struct gdbarch *gdbarch = get_frame_arch (this_frame);
02389   struct amd64_frame_cache *cache =
02390     amd64_frame_cache (this_frame, this_cache);
02391 
02392   gdb_assert (regnum >= 0);
02393 
02394   if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
02395     return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
02396 
02397   if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
02398     return frame_unwind_got_memory (this_frame, regnum,
02399                                     cache->saved_regs[regnum]);
02400 
02401   return frame_unwind_got_register (this_frame, regnum, regnum);
02402 }
02403 
02404 static const struct frame_unwind amd64_frame_unwind =
02405 {
02406   NORMAL_FRAME,
02407   amd64_frame_unwind_stop_reason,
02408   amd64_frame_this_id,
02409   amd64_frame_prev_register,
02410   NULL,
02411   default_frame_sniffer
02412 };
02413 
02414 /* Generate a bytecode expression to get the value of the saved PC.  */
02415 
02416 static void
02417 amd64_gen_return_address (struct gdbarch *gdbarch,
02418                           struct agent_expr *ax, struct axs_value *value,
02419                           CORE_ADDR scope)
02420 {
02421   /* The following sequence assumes the traditional use of the base
02422      register.  */
02423   ax_reg (ax, AMD64_RBP_REGNUM);
02424   ax_const_l (ax, 8);
02425   ax_simple (ax, aop_add);
02426   value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
02427   value->kind = axs_lvalue_memory;
02428 }
02429 
02430 
02431 /* Signal trampolines.  */
02432 
02433 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
02434    64-bit variants.  This would require using identical frame caches
02435    on both platforms.  */
02436 
02437 static struct amd64_frame_cache *
02438 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
02439 {
02440   struct gdbarch *gdbarch = get_frame_arch (this_frame);
02441   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
02442   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
02443   volatile struct gdb_exception ex;
02444   struct amd64_frame_cache *cache;
02445   CORE_ADDR addr;
02446   gdb_byte buf[8];
02447   int i;
02448 
02449   if (*this_cache)
02450     return *this_cache;
02451 
02452   cache = amd64_alloc_frame_cache ();
02453 
02454   TRY_CATCH (ex, RETURN_MASK_ERROR)
02455     {
02456       get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
02457       cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
02458 
02459       addr = tdep->sigcontext_addr (this_frame);
02460       gdb_assert (tdep->sc_reg_offset);
02461       gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
02462       for (i = 0; i < tdep->sc_num_regs; i++)
02463         if (tdep->sc_reg_offset[i] != -1)
02464           cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
02465 
02466       cache->base_p = 1;
02467     }
02468   if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
02469     throw_exception (ex);
02470 
02471   *this_cache = cache;
02472   return cache;
02473 }
02474 
02475 static enum unwind_stop_reason
02476 amd64_sigtramp_frame_unwind_stop_reason (struct frame_info *this_frame,
02477                                          void **this_cache)
02478 {
02479   struct amd64_frame_cache *cache =
02480     amd64_sigtramp_frame_cache (this_frame, this_cache);
02481 
02482   if (!cache->base_p)
02483     return UNWIND_UNAVAILABLE;
02484 
02485   return UNWIND_NO_REASON;
02486 }
02487 
02488 static void
02489 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
02490                               void **this_cache, struct frame_id *this_id)
02491 {
02492   struct amd64_frame_cache *cache =
02493     amd64_sigtramp_frame_cache (this_frame, this_cache);
02494 
02495   if (!cache->base_p)
02496     return;
02497 
02498   (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
02499 }
02500 
02501 static struct value *
02502 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
02503                                     void **this_cache, int regnum)
02504 {
02505   /* Make sure we've initialized the cache.  */
02506   amd64_sigtramp_frame_cache (this_frame, this_cache);
02507 
02508   return amd64_frame_prev_register (this_frame, this_cache, regnum);
02509 }
02510 
02511 static int
02512 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
02513                               struct frame_info *this_frame,
02514                               void **this_cache)
02515 {
02516   struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
02517 
02518   /* We shouldn't even bother if we don't have a sigcontext_addr
02519      handler.  */
02520   if (tdep->sigcontext_addr == NULL)
02521     return 0;
02522 
02523   if (tdep->sigtramp_p != NULL)
02524     {
02525       if (tdep->sigtramp_p (this_frame))
02526         return 1;
02527     }
02528 
02529   if (tdep->sigtramp_start != 0)
02530     {
02531       CORE_ADDR pc = get_frame_pc (this_frame);
02532 
02533       gdb_assert (tdep->sigtramp_end != 0);
02534       if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
02535         return 1;
02536     }
02537 
02538   return 0;
02539 }
02540 
02541 static const struct frame_unwind amd64_sigtramp_frame_unwind =
02542 {
02543   SIGTRAMP_FRAME,
02544   amd64_sigtramp_frame_unwind_stop_reason,
02545   amd64_sigtramp_frame_this_id,
02546   amd64_sigtramp_frame_prev_register,
02547   NULL,
02548   amd64_sigtramp_frame_sniffer
02549 };
02550 
02551 
02552 static CORE_ADDR
02553 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
02554 {
02555   struct amd64_frame_cache *cache =
02556     amd64_frame_cache (this_frame, this_cache);
02557 
02558   return cache->base;
02559 }
02560 
02561 static const struct frame_base amd64_frame_base =
02562 {
02563   &amd64_frame_unwind,
02564   amd64_frame_base_address,
02565   amd64_frame_base_address,
02566   amd64_frame_base_address
02567 };
02568 
02569 /* Normal frames, but in a function epilogue.  */
02570 
02571 /* The epilogue is defined here as the 'ret' instruction, which will
02572    follow any instruction such as 'leave' or 'pop %ebp' that destroys
02573    the function's stack frame.  */
02574 
02575 static int
02576 amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
02577 {
02578   gdb_byte insn;
02579   struct symtab *symtab;
02580 
02581   symtab = find_pc_symtab (pc);
02582   if (symtab && symtab->epilogue_unwind_valid)
02583     return 0;
02584 
02585   if (target_read_memory (pc, &insn, 1))
02586     return 0;   /* Can't read memory at pc.  */
02587 
02588   if (insn != 0xc3)     /* 'ret' instruction.  */
02589     return 0;
02590 
02591   return 1;
02592 }
02593 
02594 static int
02595 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
02596                               struct frame_info *this_frame,
02597                               void **this_prologue_cache)
02598 {
02599   if (frame_relative_level (this_frame) == 0)
02600     return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
02601                                          get_frame_pc (this_frame));
02602   else
02603     return 0;
02604 }
02605 
02606 static struct amd64_frame_cache *
02607 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
02608 {
02609   struct gdbarch *gdbarch = get_frame_arch (this_frame);
02610   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
02611   volatile struct gdb_exception ex;
02612   struct amd64_frame_cache *cache;
02613   gdb_byte buf[8];
02614 
02615   if (*this_cache)
02616     return *this_cache;
02617 
02618   cache = amd64_alloc_frame_cache ();
02619   *this_cache = cache;
02620 
02621   TRY_CATCH (ex, RETURN_MASK_ERROR)
02622     {
02623       /* Cache base will be %esp plus cache->sp_offset (-8).  */
02624       get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
02625       cache->base = extract_unsigned_integer (buf, 8,
02626                                               byte_order) + cache->sp_offset;
02627 
02628       /* Cache pc will be the frame func.  */
02629       cache->pc = get_frame_pc (this_frame);
02630 
02631       /* The saved %esp will be at cache->base plus 16.  */
02632       cache->saved_sp = cache->base + 16;
02633 
02634       /* The saved %eip will be at cache->base plus 8.  */
02635       cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
02636 
02637       cache->base_p = 1;
02638     }
02639   if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
02640     throw_exception (ex);
02641 
02642   return cache;
02643 }
02644 
02645 static enum unwind_stop_reason
02646 amd64_epilogue_frame_unwind_stop_reason (struct frame_info *this_frame,
02647                                          void **this_cache)
02648 {
02649   struct amd64_frame_cache *cache
02650     = amd64_epilogue_frame_cache (this_frame, this_cache);
02651 
02652   if (!cache->base_p)
02653     return UNWIND_UNAVAILABLE;
02654 
02655   return UNWIND_NO_REASON;
02656 }
02657 
02658 static void
02659 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
02660                               void **this_cache,
02661                               struct frame_id *this_id)
02662 {
02663   struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
02664                                                                this_cache);
02665 
02666   if (!cache->base_p)
02667     return;
02668 
02669   (*this_id) = frame_id_build (cache->base + 8, cache->pc);
02670 }
02671 
02672 static const struct frame_unwind amd64_epilogue_frame_unwind =
02673 {
02674   NORMAL_FRAME,
02675   amd64_epilogue_frame_unwind_stop_reason,
02676   amd64_epilogue_frame_this_id,
02677   amd64_frame_prev_register,
02678   NULL, 
02679   amd64_epilogue_frame_sniffer
02680 };
02681 
02682 static struct frame_id
02683 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
02684 {
02685   CORE_ADDR fp;
02686 
02687   fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
02688 
02689   return frame_id_build (fp + 16, get_frame_pc (this_frame));
02690 }
02691 
02692 /* 16 byte align the SP per frame requirements.  */
02693 
02694 static CORE_ADDR
02695 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
02696 {
02697   return sp & -(CORE_ADDR)16;
02698 }
02699 
02700 
02701 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
02702    in the floating-point register set REGSET to register cache
02703    REGCACHE.  If REGNUM is -1, do this for all registers in REGSET.  */
02704 
02705 static void
02706 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
02707                        int regnum, const void *fpregs, size_t len)
02708 {
02709   const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
02710 
02711   gdb_assert (len == tdep->sizeof_fpregset);
02712   amd64_supply_fxsave (regcache, regnum, fpregs);
02713 }
02714 
02715 /* Collect register REGNUM from the register cache REGCACHE and store
02716    it in the buffer specified by FPREGS and LEN as described by the
02717    floating-point register set REGSET.  If REGNUM is -1, do this for
02718    all registers in REGSET.  */
02719 
02720 static void
02721 amd64_collect_fpregset (const struct regset *regset,
02722                         const struct regcache *regcache,
02723                         int regnum, void *fpregs, size_t len)
02724 {
02725   const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
02726 
02727   gdb_assert (len == tdep->sizeof_fpregset);
02728   amd64_collect_fxsave (regcache, regnum, fpregs);
02729 }
02730 
02731 /* Similar to amd64_supply_fpregset, but use XSAVE extended state.  */
02732 
02733 static void
02734 amd64_supply_xstateregset (const struct regset *regset,
02735                            struct regcache *regcache, int regnum,
02736                            const void *xstateregs, size_t len)
02737 {
02738   amd64_supply_xsave (regcache, regnum, xstateregs);
02739 }
02740 
02741 /* Similar to amd64_collect_fpregset, but use XSAVE extended state.  */
02742 
02743 static void
02744 amd64_collect_xstateregset (const struct regset *regset,
02745                             const struct regcache *regcache,
02746                             int regnum, void *xstateregs, size_t len)
02747 {
02748   amd64_collect_xsave (regcache, regnum, xstateregs, 1);
02749 }
02750 
02751 /* Return the appropriate register set for the core section identified
02752    by SECT_NAME and SECT_SIZE.  */
02753 
02754 static const struct regset *
02755 amd64_regset_from_core_section (struct gdbarch *gdbarch,
02756                                 const char *sect_name, size_t sect_size)
02757 {
02758   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
02759 
02760   if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
02761     {
02762       if (tdep->fpregset == NULL)
02763         tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
02764                                        amd64_collect_fpregset);
02765 
02766       return tdep->fpregset;
02767     }
02768 
02769   if (strcmp (sect_name, ".reg-xstate") == 0)
02770     {
02771       if (tdep->xstateregset == NULL)
02772         tdep->xstateregset = regset_alloc (gdbarch,
02773                                            amd64_supply_xstateregset,
02774                                            amd64_collect_xstateregset);
02775 
02776       return tdep->xstateregset;
02777     }
02778 
02779   return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
02780 }
02781 
02782 
02783 /* Figure out where the longjmp will land.  Slurp the jmp_buf out of
02784    %rdi.  We expect its value to be a pointer to the jmp_buf structure
02785    from which we extract the address that we will land at.  This
02786    address is copied into PC.  This routine returns non-zero on
02787    success.  */
02788 
02789 static int
02790 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
02791 {
02792   gdb_byte buf[8];
02793   CORE_ADDR jb_addr;
02794   struct gdbarch *gdbarch = get_frame_arch (frame);
02795   int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
02796   int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
02797 
02798   /* If JB_PC_OFFSET is -1, we have no way to find out where the
02799      longjmp will land.  */
02800   if (jb_pc_offset == -1)
02801     return 0;
02802 
02803   get_frame_register (frame, AMD64_RDI_REGNUM, buf);
02804   jb_addr= extract_typed_address
02805             (buf, builtin_type (gdbarch)->builtin_data_ptr);
02806   if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
02807     return 0;
02808 
02809   *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
02810 
02811   return 1;
02812 }
02813 
02814 static const int amd64_record_regmap[] =
02815 {
02816   AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
02817   AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
02818   AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
02819   AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
02820   AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
02821   AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
02822 };
02823 
02824 void
02825 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
02826 {
02827   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
02828   const struct target_desc *tdesc = info.target_desc;
02829 
02830   /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
02831      floating-point registers.  */
02832   tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
02833 
02834   if (! tdesc_has_registers (tdesc))
02835     tdesc = tdesc_amd64;
02836   tdep->tdesc = tdesc;
02837 
02838   tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
02839   tdep->register_names = amd64_register_names;
02840 
02841   if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
02842     {
02843       tdep->ymmh_register_names = amd64_ymmh_names;
02844       tdep->num_ymm_regs = 16;
02845       tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
02846     }
02847 
02848   tdep->num_byte_regs = 20;
02849   tdep->num_word_regs = 16;
02850   tdep->num_dword_regs = 16;
02851   /* Avoid wiring in the MMX registers for now.  */
02852   tdep->num_mmx_regs = 0;
02853 
02854   set_gdbarch_pseudo_register_read_value (gdbarch,
02855                                           amd64_pseudo_register_read_value);
02856   set_gdbarch_pseudo_register_write (gdbarch,
02857                                      amd64_pseudo_register_write);
02858 
02859   set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
02860 
02861   /* AMD64 has an FPU and 16 SSE registers.  */
02862   tdep->st0_regnum = AMD64_ST0_REGNUM;
02863   tdep->num_xmm_regs = 16;
02864 
02865   /* This is what all the fuss is about.  */
02866   set_gdbarch_long_bit (gdbarch, 64);
02867   set_gdbarch_long_long_bit (gdbarch, 64);
02868   set_gdbarch_ptr_bit (gdbarch, 64);
02869 
02870   /* In contrast to the i386, on AMD64 a `long double' actually takes
02871      up 128 bits, even though it's still based on the i387 extended
02872      floating-point format which has only 80 significant bits.  */
02873   set_gdbarch_long_double_bit (gdbarch, 128);
02874 
02875   set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
02876 
02877   /* Register numbers of various important registers.  */
02878   set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
02879   set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
02880   set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
02881   set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
02882 
02883   /* The "default" register numbering scheme for AMD64 is referred to
02884      as the "DWARF Register Number Mapping" in the System V psABI.
02885      The preferred debugging format for all known AMD64 targets is
02886      actually DWARF2, and GCC doesn't seem to support DWARF (that is
02887      DWARF-1), but we provide the same mapping just in case.  This
02888      mapping is also used for stabs, which GCC does support.  */
02889   set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
02890   set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
02891 
02892   /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
02893      be in use on any of the supported AMD64 targets.  */
02894 
02895   /* Call dummy code.  */
02896   set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
02897   set_gdbarch_frame_align (gdbarch, amd64_frame_align);
02898   set_gdbarch_frame_red_zone_size (gdbarch, 128);
02899 
02900   set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
02901   set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
02902   set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
02903 
02904   set_gdbarch_return_value (gdbarch, amd64_return_value);
02905 
02906   set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
02907 
02908   tdep->record_regmap = amd64_record_regmap;
02909 
02910   set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
02911 
02912   /* Hook the function epilogue frame unwinder.  This unwinder is
02913      appended to the list first, so that it supercedes the other
02914      unwinders in function epilogues.  */
02915   frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
02916 
02917   /* Hook the prologue-based frame unwinders.  */
02918   frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
02919   frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
02920   frame_base_set_default (gdbarch, &amd64_frame_base);
02921 
02922   /* If we have a register mapping, enable the generic core file support.  */
02923   if (tdep->gregset_reg_offset)
02924     set_gdbarch_regset_from_core_section (gdbarch,
02925                                           amd64_regset_from_core_section);
02926 
02927   set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
02928 
02929   set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
02930 
02931   set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
02932 
02933   /* SystemTap variables and functions.  */
02934   set_gdbarch_stap_integer_prefix (gdbarch, "$");
02935   set_gdbarch_stap_register_prefix (gdbarch, "%");
02936   set_gdbarch_stap_register_indirection_prefix (gdbarch, "(");
02937   set_gdbarch_stap_register_indirection_suffix (gdbarch, ")");
02938   set_gdbarch_stap_is_single_operand (gdbarch,
02939                                       i386_stap_is_single_operand);
02940   set_gdbarch_stap_parse_special_token (gdbarch,
02941                                         i386_stap_parse_special_token);
02942 }
02943 
02944 
02945 static struct type *
02946 amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
02947 {
02948   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
02949 
02950   switch (regnum - tdep->eax_regnum)
02951     {
02952     case AMD64_RBP_REGNUM:      /* %ebp */
02953     case AMD64_RSP_REGNUM:      /* %esp */
02954       return builtin_type (gdbarch)->builtin_data_ptr;
02955     case AMD64_RIP_REGNUM:      /* %eip */
02956       return builtin_type (gdbarch)->builtin_func_ptr;
02957     }
02958 
02959   return i386_pseudo_register_type (gdbarch, regnum);
02960 }
02961 
02962 void
02963 amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
02964 {
02965   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
02966   const struct target_desc *tdesc = info.target_desc;
02967 
02968   amd64_init_abi (info, gdbarch);
02969 
02970   if (! tdesc_has_registers (tdesc))
02971     tdesc = tdesc_x32;
02972   tdep->tdesc = tdesc;
02973 
02974   tdep->num_dword_regs = 17;
02975   set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
02976 
02977   set_gdbarch_long_bit (gdbarch, 32);
02978   set_gdbarch_ptr_bit (gdbarch, 32);
02979 }
02980 
02981 /* Provide a prototype to silence -Wmissing-prototypes.  */
02982 void _initialize_amd64_tdep (void);
02983 
02984 void
02985 _initialize_amd64_tdep (void)
02986 {
02987   initialize_tdesc_amd64 ();
02988   initialize_tdesc_amd64_avx ();
02989   initialize_tdesc_x32 ();
02990   initialize_tdesc_x32_avx ();
02991 }
02992 
02993 
02994 /* The 64-bit FXSAVE format differs from the 32-bit format in the
02995    sense that the instruction pointer and data pointer are simply
02996    64-bit offsets into the code segment and the data segment instead
02997    of a selector offset pair.  The functions below store the upper 32
02998    bits of these pointers (instead of just the 16-bits of the segment
02999    selector).  */
03000 
03001 /* Fill register REGNUM in REGCACHE with the appropriate
03002    floating-point or SSE register value from *FXSAVE.  If REGNUM is
03003    -1, do this for all registers.  This function masks off any of the
03004    reserved bits in *FXSAVE.  */
03005 
03006 void
03007 amd64_supply_fxsave (struct regcache *regcache, int regnum,
03008                      const void *fxsave)
03009 {
03010   struct gdbarch *gdbarch = get_regcache_arch (regcache);
03011   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
03012 
03013   i387_supply_fxsave (regcache, regnum, fxsave);
03014 
03015   if (fxsave
03016       && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
03017     {
03018       const gdb_byte *regs = fxsave;
03019 
03020       if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
03021         regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
03022       if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
03023         regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
03024     }
03025 }
03026 
03027 /* Similar to amd64_supply_fxsave, but use XSAVE extended state.  */
03028 
03029 void
03030 amd64_supply_xsave (struct regcache *regcache, int regnum,
03031                     const void *xsave)
03032 {
03033   struct gdbarch *gdbarch = get_regcache_arch (regcache);
03034   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
03035 
03036   i387_supply_xsave (regcache, regnum, xsave);
03037 
03038   if (xsave
03039       && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
03040     {
03041       const gdb_byte *regs = xsave;
03042 
03043       if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
03044         regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep),
03045                              regs + 12);
03046       if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
03047         regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep),
03048                              regs + 20);
03049     }
03050 }
03051 
03052 /* Fill register REGNUM (if it is a floating-point or SSE register) in
03053    *FXSAVE with the value from REGCACHE.  If REGNUM is -1, do this for
03054    all registers.  This function doesn't touch any of the reserved
03055    bits in *FXSAVE.  */
03056 
03057 void
03058 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
03059                       void *fxsave)
03060 {
03061   struct gdbarch *gdbarch = get_regcache_arch (regcache);
03062   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
03063   gdb_byte *regs = fxsave;
03064 
03065   i387_collect_fxsave (regcache, regnum, fxsave);
03066 
03067   if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
03068     {
03069       if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
03070         regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
03071       if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
03072         regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
03073     }
03074 }
03075 
03076 /* Similar to amd64_collect_fxsave, but use XSAVE extended state.  */
03077 
03078 void
03079 amd64_collect_xsave (const struct regcache *regcache, int regnum,
03080                      void *xsave, int gcore)
03081 {
03082   struct gdbarch *gdbarch = get_regcache_arch (regcache);
03083   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
03084   gdb_byte *regs = xsave;
03085 
03086   i387_collect_xsave (regcache, regnum, xsave, gcore);
03087 
03088   if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
03089     {
03090       if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
03091         regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep),
03092                               regs + 12);
03093       if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
03094         regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep),
03095                               regs + 20);
03096     }
03097 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Defines