GDB (API)
|
00001 /* SPU target-dependent code for GDB, the GNU debugger. 00002 Copyright (C) 2006-2013 Free Software Foundation, Inc. 00003 00004 Contributed by Ulrich Weigand <uweigand@de.ibm.com>. 00005 Based on a port by Sid Manning <sid@us.ibm.com>. 00006 00007 This file is part of GDB. 00008 00009 This program is free software; you can redistribute it and/or modify 00010 it under the terms of the GNU General Public License as published by 00011 the Free Software Foundation; either version 3 of the License, or 00012 (at your option) any later version. 00013 00014 This program is distributed in the hope that it will be useful, 00015 but WITHOUT ANY WARRANTY; without even the implied warranty of 00016 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00017 GNU General Public License for more details. 00018 00019 You should have received a copy of the GNU General Public License 00020 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 00021 00022 #include "defs.h" 00023 #include "arch-utils.h" 00024 #include "gdbtypes.h" 00025 #include "gdbcmd.h" 00026 #include "gdbcore.h" 00027 #include "gdb_string.h" 00028 #include "gdb_assert.h" 00029 #include "frame.h" 00030 #include "frame-unwind.h" 00031 #include "frame-base.h" 00032 #include "trad-frame.h" 00033 #include "symtab.h" 00034 #include "symfile.h" 00035 #include "value.h" 00036 #include "inferior.h" 00037 #include "dis-asm.h" 00038 #include "objfiles.h" 00039 #include "language.h" 00040 #include "regcache.h" 00041 #include "reggroups.h" 00042 #include "floatformat.h" 00043 #include "block.h" 00044 #include "observer.h" 00045 #include "infcall.h" 00046 #include "dwarf2.h" 00047 #include "exceptions.h" 00048 #include "spu-tdep.h" 00049 00050 00051 /* The list of available "set spu " and "show spu " commands. */ 00052 static struct cmd_list_element *setspucmdlist = NULL; 00053 static struct cmd_list_element *showspucmdlist = NULL; 00054 00055 /* Whether to stop for new SPE contexts. */ 00056 static int spu_stop_on_load_p = 0; 00057 /* Whether to automatically flush the SW-managed cache. */ 00058 static int spu_auto_flush_cache_p = 1; 00059 00060 00061 /* The tdep structure. */ 00062 struct gdbarch_tdep 00063 { 00064 /* The spufs ID identifying our address space. */ 00065 int id; 00066 00067 /* SPU-specific vector type. */ 00068 struct type *spu_builtin_type_vec128; 00069 }; 00070 00071 00072 /* SPU-specific vector type. */ 00073 static struct type * 00074 spu_builtin_type_vec128 (struct gdbarch *gdbarch) 00075 { 00076 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 00077 00078 if (!tdep->spu_builtin_type_vec128) 00079 { 00080 const struct builtin_type *bt = builtin_type (gdbarch); 00081 struct type *t; 00082 00083 t = arch_composite_type (gdbarch, 00084 "__spu_builtin_type_vec128", TYPE_CODE_UNION); 00085 append_composite_type_field (t, "uint128", bt->builtin_int128); 00086 append_composite_type_field (t, "v2_int64", 00087 init_vector_type (bt->builtin_int64, 2)); 00088 append_composite_type_field (t, "v4_int32", 00089 init_vector_type (bt->builtin_int32, 4)); 00090 append_composite_type_field (t, "v8_int16", 00091 init_vector_type (bt->builtin_int16, 8)); 00092 append_composite_type_field (t, "v16_int8", 00093 init_vector_type (bt->builtin_int8, 16)); 00094 append_composite_type_field (t, "v2_double", 00095 init_vector_type (bt->builtin_double, 2)); 00096 append_composite_type_field (t, "v4_float", 00097 init_vector_type (bt->builtin_float, 4)); 00098 00099 TYPE_VECTOR (t) = 1; 00100 TYPE_NAME (t) = "spu_builtin_type_vec128"; 00101 00102 tdep->spu_builtin_type_vec128 = t; 00103 } 00104 00105 return tdep->spu_builtin_type_vec128; 00106 } 00107 00108 00109 /* The list of available "info spu " commands. */ 00110 static struct cmd_list_element *infospucmdlist = NULL; 00111 00112 /* Registers. */ 00113 00114 static const char * 00115 spu_register_name (struct gdbarch *gdbarch, int reg_nr) 00116 { 00117 static char *register_names[] = 00118 { 00119 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 00120 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 00121 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 00122 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", 00123 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39", 00124 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", 00125 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55", 00126 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63", 00127 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71", 00128 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79", 00129 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87", 00130 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95", 00131 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103", 00132 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111", 00133 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119", 00134 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127", 00135 "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status" 00136 }; 00137 00138 if (reg_nr < 0) 00139 return NULL; 00140 if (reg_nr >= sizeof register_names / sizeof *register_names) 00141 return NULL; 00142 00143 return register_names[reg_nr]; 00144 } 00145 00146 static struct type * 00147 spu_register_type (struct gdbarch *gdbarch, int reg_nr) 00148 { 00149 if (reg_nr < SPU_NUM_GPRS) 00150 return spu_builtin_type_vec128 (gdbarch); 00151 00152 switch (reg_nr) 00153 { 00154 case SPU_ID_REGNUM: 00155 return builtin_type (gdbarch)->builtin_uint32; 00156 00157 case SPU_PC_REGNUM: 00158 return builtin_type (gdbarch)->builtin_func_ptr; 00159 00160 case SPU_SP_REGNUM: 00161 return builtin_type (gdbarch)->builtin_data_ptr; 00162 00163 case SPU_FPSCR_REGNUM: 00164 return builtin_type (gdbarch)->builtin_uint128; 00165 00166 case SPU_SRR0_REGNUM: 00167 return builtin_type (gdbarch)->builtin_uint32; 00168 00169 case SPU_LSLR_REGNUM: 00170 return builtin_type (gdbarch)->builtin_uint32; 00171 00172 case SPU_DECR_REGNUM: 00173 return builtin_type (gdbarch)->builtin_uint32; 00174 00175 case SPU_DECR_STATUS_REGNUM: 00176 return builtin_type (gdbarch)->builtin_uint32; 00177 00178 default: 00179 internal_error (__FILE__, __LINE__, _("invalid regnum")); 00180 } 00181 } 00182 00183 /* Pseudo registers for preferred slots - stack pointer. */ 00184 00185 static enum register_status 00186 spu_pseudo_register_read_spu (struct regcache *regcache, const char *regname, 00187 gdb_byte *buf) 00188 { 00189 struct gdbarch *gdbarch = get_regcache_arch (regcache); 00190 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 00191 enum register_status status; 00192 gdb_byte reg[32]; 00193 char annex[32]; 00194 ULONGEST id; 00195 ULONGEST ul; 00196 00197 status = regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id); 00198 if (status != REG_VALID) 00199 return status; 00200 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname); 00201 memset (reg, 0, sizeof reg); 00202 target_read (¤t_target, TARGET_OBJECT_SPU, annex, 00203 reg, 0, sizeof reg); 00204 00205 ul = strtoulst ((char *) reg, NULL, 16); 00206 store_unsigned_integer (buf, 4, byte_order, ul); 00207 return REG_VALID; 00208 } 00209 00210 static enum register_status 00211 spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache, 00212 int regnum, gdb_byte *buf) 00213 { 00214 gdb_byte reg[16]; 00215 char annex[32]; 00216 ULONGEST id; 00217 enum register_status status; 00218 00219 switch (regnum) 00220 { 00221 case SPU_SP_REGNUM: 00222 status = regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg); 00223 if (status != REG_VALID) 00224 return status; 00225 memcpy (buf, reg, 4); 00226 return status; 00227 00228 case SPU_FPSCR_REGNUM: 00229 status = regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id); 00230 if (status != REG_VALID) 00231 return status; 00232 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id); 00233 target_read (¤t_target, TARGET_OBJECT_SPU, annex, buf, 0, 16); 00234 return status; 00235 00236 case SPU_SRR0_REGNUM: 00237 return spu_pseudo_register_read_spu (regcache, "srr0", buf); 00238 00239 case SPU_LSLR_REGNUM: 00240 return spu_pseudo_register_read_spu (regcache, "lslr", buf); 00241 00242 case SPU_DECR_REGNUM: 00243 return spu_pseudo_register_read_spu (regcache, "decr", buf); 00244 00245 case SPU_DECR_STATUS_REGNUM: 00246 return spu_pseudo_register_read_spu (regcache, "decr_status", buf); 00247 00248 default: 00249 internal_error (__FILE__, __LINE__, _("invalid regnum")); 00250 } 00251 } 00252 00253 static void 00254 spu_pseudo_register_write_spu (struct regcache *regcache, const char *regname, 00255 const gdb_byte *buf) 00256 { 00257 struct gdbarch *gdbarch = get_regcache_arch (regcache); 00258 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 00259 char reg[32]; 00260 char annex[32]; 00261 ULONGEST id; 00262 00263 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id); 00264 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname); 00265 xsnprintf (reg, sizeof reg, "0x%s", 00266 phex_nz (extract_unsigned_integer (buf, 4, byte_order), 4)); 00267 target_write (¤t_target, TARGET_OBJECT_SPU, annex, 00268 (gdb_byte *) reg, 0, strlen (reg)); 00269 } 00270 00271 static void 00272 spu_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache, 00273 int regnum, const gdb_byte *buf) 00274 { 00275 gdb_byte reg[16]; 00276 char annex[32]; 00277 ULONGEST id; 00278 00279 switch (regnum) 00280 { 00281 case SPU_SP_REGNUM: 00282 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg); 00283 memcpy (reg, buf, 4); 00284 regcache_raw_write (regcache, SPU_RAW_SP_REGNUM, reg); 00285 break; 00286 00287 case SPU_FPSCR_REGNUM: 00288 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id); 00289 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id); 00290 target_write (¤t_target, TARGET_OBJECT_SPU, annex, buf, 0, 16); 00291 break; 00292 00293 case SPU_SRR0_REGNUM: 00294 spu_pseudo_register_write_spu (regcache, "srr0", buf); 00295 break; 00296 00297 case SPU_LSLR_REGNUM: 00298 spu_pseudo_register_write_spu (regcache, "lslr", buf); 00299 break; 00300 00301 case SPU_DECR_REGNUM: 00302 spu_pseudo_register_write_spu (regcache, "decr", buf); 00303 break; 00304 00305 case SPU_DECR_STATUS_REGNUM: 00306 spu_pseudo_register_write_spu (regcache, "decr_status", buf); 00307 break; 00308 00309 default: 00310 internal_error (__FILE__, __LINE__, _("invalid regnum")); 00311 } 00312 } 00313 00314 /* Value conversion -- access scalar values at the preferred slot. */ 00315 00316 static struct value * 00317 spu_value_from_register (struct type *type, int regnum, 00318 struct frame_info *frame) 00319 { 00320 struct value *value = default_value_from_register (type, regnum, frame); 00321 int len = TYPE_LENGTH (type); 00322 00323 if (regnum < SPU_NUM_GPRS && len < 16) 00324 { 00325 int preferred_slot = len < 4 ? 4 - len : 0; 00326 set_value_offset (value, preferred_slot); 00327 } 00328 00329 return value; 00330 } 00331 00332 /* Register groups. */ 00333 00334 static int 00335 spu_register_reggroup_p (struct gdbarch *gdbarch, int regnum, 00336 struct reggroup *group) 00337 { 00338 /* Registers displayed via 'info regs'. */ 00339 if (group == general_reggroup) 00340 return 1; 00341 00342 /* Registers displayed via 'info float'. */ 00343 if (group == float_reggroup) 00344 return 0; 00345 00346 /* Registers that need to be saved/restored in order to 00347 push or pop frames. */ 00348 if (group == save_reggroup || group == restore_reggroup) 00349 return 1; 00350 00351 return default_register_reggroup_p (gdbarch, regnum, group); 00352 } 00353 00354 00355 /* Address handling. */ 00356 00357 static int 00358 spu_gdbarch_id (struct gdbarch *gdbarch) 00359 { 00360 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 00361 int id = tdep->id; 00362 00363 /* The objfile architecture of a standalone SPU executable does not 00364 provide an SPU ID. Retrieve it from the objfile's relocated 00365 address range in this special case. */ 00366 if (id == -1 00367 && symfile_objfile && symfile_objfile->obfd 00368 && bfd_get_arch (symfile_objfile->obfd) == bfd_arch_spu 00369 && symfile_objfile->sections != symfile_objfile->sections_end) 00370 id = SPUADDR_SPU (obj_section_addr (symfile_objfile->sections)); 00371 00372 return id; 00373 } 00374 00375 static int 00376 spu_address_class_type_flags (int byte_size, int dwarf2_addr_class) 00377 { 00378 if (dwarf2_addr_class == 1) 00379 return TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1; 00380 else 00381 return 0; 00382 } 00383 00384 static const char * 00385 spu_address_class_type_flags_to_name (struct gdbarch *gdbarch, int type_flags) 00386 { 00387 if (type_flags & TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1) 00388 return "__ea"; 00389 else 00390 return NULL; 00391 } 00392 00393 static int 00394 spu_address_class_name_to_type_flags (struct gdbarch *gdbarch, 00395 const char *name, int *type_flags_ptr) 00396 { 00397 if (strcmp (name, "__ea") == 0) 00398 { 00399 *type_flags_ptr = TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1; 00400 return 1; 00401 } 00402 else 00403 return 0; 00404 } 00405 00406 static void 00407 spu_address_to_pointer (struct gdbarch *gdbarch, 00408 struct type *type, gdb_byte *buf, CORE_ADDR addr) 00409 { 00410 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 00411 store_unsigned_integer (buf, TYPE_LENGTH (type), byte_order, 00412 SPUADDR_ADDR (addr)); 00413 } 00414 00415 static CORE_ADDR 00416 spu_pointer_to_address (struct gdbarch *gdbarch, 00417 struct type *type, const gdb_byte *buf) 00418 { 00419 int id = spu_gdbarch_id (gdbarch); 00420 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 00421 ULONGEST addr 00422 = extract_unsigned_integer (buf, TYPE_LENGTH (type), byte_order); 00423 00424 /* Do not convert __ea pointers. */ 00425 if (TYPE_ADDRESS_CLASS_1 (type)) 00426 return addr; 00427 00428 return addr? SPUADDR (id, addr) : 0; 00429 } 00430 00431 static CORE_ADDR 00432 spu_integer_to_address (struct gdbarch *gdbarch, 00433 struct type *type, const gdb_byte *buf) 00434 { 00435 int id = spu_gdbarch_id (gdbarch); 00436 ULONGEST addr = unpack_long (type, buf); 00437 00438 return SPUADDR (id, addr); 00439 } 00440 00441 00442 /* Decoding SPU instructions. */ 00443 00444 enum 00445 { 00446 op_lqd = 0x34, 00447 op_lqx = 0x3c4, 00448 op_lqa = 0x61, 00449 op_lqr = 0x67, 00450 op_stqd = 0x24, 00451 op_stqx = 0x144, 00452 op_stqa = 0x41, 00453 op_stqr = 0x47, 00454 00455 op_il = 0x081, 00456 op_ila = 0x21, 00457 op_a = 0x0c0, 00458 op_ai = 0x1c, 00459 00460 op_selb = 0x8, 00461 00462 op_br = 0x64, 00463 op_bra = 0x60, 00464 op_brsl = 0x66, 00465 op_brasl = 0x62, 00466 op_brnz = 0x42, 00467 op_brz = 0x40, 00468 op_brhnz = 0x46, 00469 op_brhz = 0x44, 00470 op_bi = 0x1a8, 00471 op_bisl = 0x1a9, 00472 op_biz = 0x128, 00473 op_binz = 0x129, 00474 op_bihz = 0x12a, 00475 op_bihnz = 0x12b, 00476 }; 00477 00478 static int 00479 is_rr (unsigned int insn, int op, int *rt, int *ra, int *rb) 00480 { 00481 if ((insn >> 21) == op) 00482 { 00483 *rt = insn & 127; 00484 *ra = (insn >> 7) & 127; 00485 *rb = (insn >> 14) & 127; 00486 return 1; 00487 } 00488 00489 return 0; 00490 } 00491 00492 static int 00493 is_rrr (unsigned int insn, int op, int *rt, int *ra, int *rb, int *rc) 00494 { 00495 if ((insn >> 28) == op) 00496 { 00497 *rt = (insn >> 21) & 127; 00498 *ra = (insn >> 7) & 127; 00499 *rb = (insn >> 14) & 127; 00500 *rc = insn & 127; 00501 return 1; 00502 } 00503 00504 return 0; 00505 } 00506 00507 static int 00508 is_ri7 (unsigned int insn, int op, int *rt, int *ra, int *i7) 00509 { 00510 if ((insn >> 21) == op) 00511 { 00512 *rt = insn & 127; 00513 *ra = (insn >> 7) & 127; 00514 *i7 = (((insn >> 14) & 127) ^ 0x40) - 0x40; 00515 return 1; 00516 } 00517 00518 return 0; 00519 } 00520 00521 static int 00522 is_ri10 (unsigned int insn, int op, int *rt, int *ra, int *i10) 00523 { 00524 if ((insn >> 24) == op) 00525 { 00526 *rt = insn & 127; 00527 *ra = (insn >> 7) & 127; 00528 *i10 = (((insn >> 14) & 0x3ff) ^ 0x200) - 0x200; 00529 return 1; 00530 } 00531 00532 return 0; 00533 } 00534 00535 static int 00536 is_ri16 (unsigned int insn, int op, int *rt, int *i16) 00537 { 00538 if ((insn >> 23) == op) 00539 { 00540 *rt = insn & 127; 00541 *i16 = (((insn >> 7) & 0xffff) ^ 0x8000) - 0x8000; 00542 return 1; 00543 } 00544 00545 return 0; 00546 } 00547 00548 static int 00549 is_ri18 (unsigned int insn, int op, int *rt, int *i18) 00550 { 00551 if ((insn >> 25) == op) 00552 { 00553 *rt = insn & 127; 00554 *i18 = (((insn >> 7) & 0x3ffff) ^ 0x20000) - 0x20000; 00555 return 1; 00556 } 00557 00558 return 0; 00559 } 00560 00561 static int 00562 is_branch (unsigned int insn, int *offset, int *reg) 00563 { 00564 int rt, i7, i16; 00565 00566 if (is_ri16 (insn, op_br, &rt, &i16) 00567 || is_ri16 (insn, op_brsl, &rt, &i16) 00568 || is_ri16 (insn, op_brnz, &rt, &i16) 00569 || is_ri16 (insn, op_brz, &rt, &i16) 00570 || is_ri16 (insn, op_brhnz, &rt, &i16) 00571 || is_ri16 (insn, op_brhz, &rt, &i16)) 00572 { 00573 *reg = SPU_PC_REGNUM; 00574 *offset = i16 << 2; 00575 return 1; 00576 } 00577 00578 if (is_ri16 (insn, op_bra, &rt, &i16) 00579 || is_ri16 (insn, op_brasl, &rt, &i16)) 00580 { 00581 *reg = -1; 00582 *offset = i16 << 2; 00583 return 1; 00584 } 00585 00586 if (is_ri7 (insn, op_bi, &rt, reg, &i7) 00587 || is_ri7 (insn, op_bisl, &rt, reg, &i7) 00588 || is_ri7 (insn, op_biz, &rt, reg, &i7) 00589 || is_ri7 (insn, op_binz, &rt, reg, &i7) 00590 || is_ri7 (insn, op_bihz, &rt, reg, &i7) 00591 || is_ri7 (insn, op_bihnz, &rt, reg, &i7)) 00592 { 00593 *offset = 0; 00594 return 1; 00595 } 00596 00597 return 0; 00598 } 00599 00600 00601 /* Prolog parsing. */ 00602 00603 struct spu_prologue_data 00604 { 00605 /* Stack frame size. -1 if analysis was unsuccessful. */ 00606 int size; 00607 00608 /* How to find the CFA. The CFA is equal to SP at function entry. */ 00609 int cfa_reg; 00610 int cfa_offset; 00611 00612 /* Offset relative to CFA where a register is saved. -1 if invalid. */ 00613 int reg_offset[SPU_NUM_GPRS]; 00614 }; 00615 00616 static CORE_ADDR 00617 spu_analyze_prologue (struct gdbarch *gdbarch, 00618 CORE_ADDR start_pc, CORE_ADDR end_pc, 00619 struct spu_prologue_data *data) 00620 { 00621 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 00622 int found_sp = 0; 00623 int found_fp = 0; 00624 int found_lr = 0; 00625 int found_bc = 0; 00626 int reg_immed[SPU_NUM_GPRS]; 00627 gdb_byte buf[16]; 00628 CORE_ADDR prolog_pc = start_pc; 00629 CORE_ADDR pc; 00630 int i; 00631 00632 00633 /* Initialize DATA to default values. */ 00634 data->size = -1; 00635 00636 data->cfa_reg = SPU_RAW_SP_REGNUM; 00637 data->cfa_offset = 0; 00638 00639 for (i = 0; i < SPU_NUM_GPRS; i++) 00640 data->reg_offset[i] = -1; 00641 00642 /* Set up REG_IMMED array. This is non-zero for a register if we know its 00643 preferred slot currently holds this immediate value. */ 00644 for (i = 0; i < SPU_NUM_GPRS; i++) 00645 reg_immed[i] = 0; 00646 00647 /* Scan instructions until the first branch. 00648 00649 The following instructions are important prolog components: 00650 00651 - The first instruction to set up the stack pointer. 00652 - The first instruction to set up the frame pointer. 00653 - The first instruction to save the link register. 00654 - The first instruction to save the backchain. 00655 00656 We return the instruction after the latest of these four, 00657 or the incoming PC if none is found. The first instruction 00658 to set up the stack pointer also defines the frame size. 00659 00660 Note that instructions saving incoming arguments to their stack 00661 slots are not counted as important, because they are hard to 00662 identify with certainty. This should not matter much, because 00663 arguments are relevant only in code compiled with debug data, 00664 and in such code the GDB core will advance until the first source 00665 line anyway, using SAL data. 00666 00667 For purposes of stack unwinding, we analyze the following types 00668 of instructions in addition: 00669 00670 - Any instruction adding to the current frame pointer. 00671 - Any instruction loading an immediate constant into a register. 00672 - Any instruction storing a register onto the stack. 00673 00674 These are used to compute the CFA and REG_OFFSET output. */ 00675 00676 for (pc = start_pc; pc < end_pc; pc += 4) 00677 { 00678 unsigned int insn; 00679 int rt, ra, rb, rc, immed; 00680 00681 if (target_read_memory (pc, buf, 4)) 00682 break; 00683 insn = extract_unsigned_integer (buf, 4, byte_order); 00684 00685 /* AI is the typical instruction to set up a stack frame. 00686 It is also used to initialize the frame pointer. */ 00687 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)) 00688 { 00689 if (rt == data->cfa_reg && ra == data->cfa_reg) 00690 data->cfa_offset -= immed; 00691 00692 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM 00693 && !found_sp) 00694 { 00695 found_sp = 1; 00696 prolog_pc = pc + 4; 00697 00698 data->size = -immed; 00699 } 00700 else if (rt == SPU_FP_REGNUM && ra == SPU_RAW_SP_REGNUM 00701 && !found_fp) 00702 { 00703 found_fp = 1; 00704 prolog_pc = pc + 4; 00705 00706 data->cfa_reg = SPU_FP_REGNUM; 00707 data->cfa_offset -= immed; 00708 } 00709 } 00710 00711 /* A is used to set up stack frames of size >= 512 bytes. 00712 If we have tracked the contents of the addend register, 00713 we can handle this as well. */ 00714 else if (is_rr (insn, op_a, &rt, &ra, &rb)) 00715 { 00716 if (rt == data->cfa_reg && ra == data->cfa_reg) 00717 { 00718 if (reg_immed[rb] != 0) 00719 data->cfa_offset -= reg_immed[rb]; 00720 else 00721 data->cfa_reg = -1; /* We don't know the CFA any more. */ 00722 } 00723 00724 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM 00725 && !found_sp) 00726 { 00727 found_sp = 1; 00728 prolog_pc = pc + 4; 00729 00730 if (reg_immed[rb] != 0) 00731 data->size = -reg_immed[rb]; 00732 } 00733 } 00734 00735 /* We need to track IL and ILA used to load immediate constants 00736 in case they are later used as input to an A instruction. */ 00737 else if (is_ri16 (insn, op_il, &rt, &immed)) 00738 { 00739 reg_immed[rt] = immed; 00740 00741 if (rt == SPU_RAW_SP_REGNUM && !found_sp) 00742 found_sp = 1; 00743 } 00744 00745 else if (is_ri18 (insn, op_ila, &rt, &immed)) 00746 { 00747 reg_immed[rt] = immed & 0x3ffff; 00748 00749 if (rt == SPU_RAW_SP_REGNUM && !found_sp) 00750 found_sp = 1; 00751 } 00752 00753 /* STQD is used to save registers to the stack. */ 00754 else if (is_ri10 (insn, op_stqd, &rt, &ra, &immed)) 00755 { 00756 if (ra == data->cfa_reg) 00757 data->reg_offset[rt] = data->cfa_offset - (immed << 4); 00758 00759 if (ra == data->cfa_reg && rt == SPU_LR_REGNUM 00760 && !found_lr) 00761 { 00762 found_lr = 1; 00763 prolog_pc = pc + 4; 00764 } 00765 00766 if (ra == SPU_RAW_SP_REGNUM 00767 && (found_sp? immed == 0 : rt == SPU_RAW_SP_REGNUM) 00768 && !found_bc) 00769 { 00770 found_bc = 1; 00771 prolog_pc = pc + 4; 00772 } 00773 } 00774 00775 /* _start uses SELB to set up the stack pointer. */ 00776 else if (is_rrr (insn, op_selb, &rt, &ra, &rb, &rc)) 00777 { 00778 if (rt == SPU_RAW_SP_REGNUM && !found_sp) 00779 found_sp = 1; 00780 } 00781 00782 /* We terminate if we find a branch. */ 00783 else if (is_branch (insn, &immed, &ra)) 00784 break; 00785 } 00786 00787 00788 /* If we successfully parsed until here, and didn't find any instruction 00789 modifying SP, we assume we have a frameless function. */ 00790 if (!found_sp) 00791 data->size = 0; 00792 00793 /* Return cooked instead of raw SP. */ 00794 if (data->cfa_reg == SPU_RAW_SP_REGNUM) 00795 data->cfa_reg = SPU_SP_REGNUM; 00796 00797 return prolog_pc; 00798 } 00799 00800 /* Return the first instruction after the prologue starting at PC. */ 00801 static CORE_ADDR 00802 spu_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc) 00803 { 00804 struct spu_prologue_data data; 00805 return spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data); 00806 } 00807 00808 /* Return the frame pointer in use at address PC. */ 00809 static void 00810 spu_virtual_frame_pointer (struct gdbarch *gdbarch, CORE_ADDR pc, 00811 int *reg, LONGEST *offset) 00812 { 00813 struct spu_prologue_data data; 00814 spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data); 00815 00816 if (data.size != -1 && data.cfa_reg != -1) 00817 { 00818 /* The 'frame pointer' address is CFA minus frame size. */ 00819 *reg = data.cfa_reg; 00820 *offset = data.cfa_offset - data.size; 00821 } 00822 else 00823 { 00824 /* ??? We don't really know ... */ 00825 *reg = SPU_SP_REGNUM; 00826 *offset = 0; 00827 } 00828 } 00829 00830 /* Return true if we are in the function's epilogue, i.e. after the 00831 instruction that destroyed the function's stack frame. 00832 00833 1) scan forward from the point of execution: 00834 a) If you find an instruction that modifies the stack pointer 00835 or transfers control (except a return), execution is not in 00836 an epilogue, return. 00837 b) Stop scanning if you find a return instruction or reach the 00838 end of the function or reach the hard limit for the size of 00839 an epilogue. 00840 2) scan backward from the point of execution: 00841 a) If you find an instruction that modifies the stack pointer, 00842 execution *is* in an epilogue, return. 00843 b) Stop scanning if you reach an instruction that transfers 00844 control or the beginning of the function or reach the hard 00845 limit for the size of an epilogue. */ 00846 00847 static int 00848 spu_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc) 00849 { 00850 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 00851 CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end; 00852 bfd_byte buf[4]; 00853 unsigned int insn; 00854 int rt, ra, rb, immed; 00855 00856 /* Find the search limits based on function boundaries and hard limit. 00857 We assume the epilogue can be up to 64 instructions long. */ 00858 00859 const int spu_max_epilogue_size = 64 * 4; 00860 00861 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end)) 00862 return 0; 00863 00864 if (pc - func_start < spu_max_epilogue_size) 00865 epilogue_start = func_start; 00866 else 00867 epilogue_start = pc - spu_max_epilogue_size; 00868 00869 if (func_end - pc < spu_max_epilogue_size) 00870 epilogue_end = func_end; 00871 else 00872 epilogue_end = pc + spu_max_epilogue_size; 00873 00874 /* Scan forward until next 'bi $0'. */ 00875 00876 for (scan_pc = pc; scan_pc < epilogue_end; scan_pc += 4) 00877 { 00878 if (target_read_memory (scan_pc, buf, 4)) 00879 return 0; 00880 insn = extract_unsigned_integer (buf, 4, byte_order); 00881 00882 if (is_branch (insn, &immed, &ra)) 00883 { 00884 if (immed == 0 && ra == SPU_LR_REGNUM) 00885 break; 00886 00887 return 0; 00888 } 00889 00890 if (is_ri10 (insn, op_ai, &rt, &ra, &immed) 00891 || is_rr (insn, op_a, &rt, &ra, &rb) 00892 || is_ri10 (insn, op_lqd, &rt, &ra, &immed)) 00893 { 00894 if (rt == SPU_RAW_SP_REGNUM) 00895 return 0; 00896 } 00897 } 00898 00899 if (scan_pc >= epilogue_end) 00900 return 0; 00901 00902 /* Scan backward until adjustment to stack pointer (R1). */ 00903 00904 for (scan_pc = pc - 4; scan_pc >= epilogue_start; scan_pc -= 4) 00905 { 00906 if (target_read_memory (scan_pc, buf, 4)) 00907 return 0; 00908 insn = extract_unsigned_integer (buf, 4, byte_order); 00909 00910 if (is_branch (insn, &immed, &ra)) 00911 return 0; 00912 00913 if (is_ri10 (insn, op_ai, &rt, &ra, &immed) 00914 || is_rr (insn, op_a, &rt, &ra, &rb) 00915 || is_ri10 (insn, op_lqd, &rt, &ra, &immed)) 00916 { 00917 if (rt == SPU_RAW_SP_REGNUM) 00918 return 1; 00919 } 00920 } 00921 00922 return 0; 00923 } 00924 00925 00926 /* Normal stack frames. */ 00927 00928 struct spu_unwind_cache 00929 { 00930 CORE_ADDR func; 00931 CORE_ADDR frame_base; 00932 CORE_ADDR local_base; 00933 00934 struct trad_frame_saved_reg *saved_regs; 00935 }; 00936 00937 static struct spu_unwind_cache * 00938 spu_frame_unwind_cache (struct frame_info *this_frame, 00939 void **this_prologue_cache) 00940 { 00941 struct gdbarch *gdbarch = get_frame_arch (this_frame); 00942 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 00943 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 00944 struct spu_unwind_cache *info; 00945 struct spu_prologue_data data; 00946 CORE_ADDR id = tdep->id; 00947 gdb_byte buf[16]; 00948 00949 if (*this_prologue_cache) 00950 return *this_prologue_cache; 00951 00952 info = FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache); 00953 *this_prologue_cache = info; 00954 info->saved_regs = trad_frame_alloc_saved_regs (this_frame); 00955 info->frame_base = 0; 00956 info->local_base = 0; 00957 00958 /* Find the start of the current function, and analyze its prologue. */ 00959 info->func = get_frame_func (this_frame); 00960 if (info->func == 0) 00961 { 00962 /* Fall back to using the current PC as frame ID. */ 00963 info->func = get_frame_pc (this_frame); 00964 data.size = -1; 00965 } 00966 else 00967 spu_analyze_prologue (gdbarch, info->func, get_frame_pc (this_frame), 00968 &data); 00969 00970 /* If successful, use prologue analysis data. */ 00971 if (data.size != -1 && data.cfa_reg != -1) 00972 { 00973 CORE_ADDR cfa; 00974 int i; 00975 00976 /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */ 00977 get_frame_register (this_frame, data.cfa_reg, buf); 00978 cfa = extract_unsigned_integer (buf, 4, byte_order) + data.cfa_offset; 00979 cfa = SPUADDR (id, cfa); 00980 00981 /* Call-saved register slots. */ 00982 for (i = 0; i < SPU_NUM_GPRS; i++) 00983 if (i == SPU_LR_REGNUM 00984 || (i >= SPU_SAVED1_REGNUM && i <= SPU_SAVEDN_REGNUM)) 00985 if (data.reg_offset[i] != -1) 00986 info->saved_regs[i].addr = cfa - data.reg_offset[i]; 00987 00988 /* Frame bases. */ 00989 info->frame_base = cfa; 00990 info->local_base = cfa - data.size; 00991 } 00992 00993 /* Otherwise, fall back to reading the backchain link. */ 00994 else 00995 { 00996 CORE_ADDR reg; 00997 LONGEST backchain; 00998 ULONGEST lslr; 00999 int status; 01000 01001 /* Get local store limit. */ 01002 lslr = get_frame_register_unsigned (this_frame, SPU_LSLR_REGNUM); 01003 if (!lslr) 01004 lslr = (ULONGEST) -1; 01005 01006 /* Get the backchain. */ 01007 reg = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM); 01008 status = safe_read_memory_integer (SPUADDR (id, reg), 4, byte_order, 01009 &backchain); 01010 01011 /* A zero backchain terminates the frame chain. Also, sanity 01012 check against the local store size limit. */ 01013 if (status && backchain > 0 && backchain <= lslr) 01014 { 01015 /* Assume the link register is saved into its slot. */ 01016 if (backchain + 16 <= lslr) 01017 info->saved_regs[SPU_LR_REGNUM].addr = SPUADDR (id, 01018 backchain + 16); 01019 01020 /* Frame bases. */ 01021 info->frame_base = SPUADDR (id, backchain); 01022 info->local_base = SPUADDR (id, reg); 01023 } 01024 } 01025 01026 /* If we didn't find a frame, we cannot determine SP / return address. */ 01027 if (info->frame_base == 0) 01028 return info; 01029 01030 /* The previous SP is equal to the CFA. */ 01031 trad_frame_set_value (info->saved_regs, SPU_SP_REGNUM, 01032 SPUADDR_ADDR (info->frame_base)); 01033 01034 /* Read full contents of the unwound link register in order to 01035 be able to determine the return address. */ 01036 if (trad_frame_addr_p (info->saved_regs, SPU_LR_REGNUM)) 01037 target_read_memory (info->saved_regs[SPU_LR_REGNUM].addr, buf, 16); 01038 else 01039 get_frame_register (this_frame, SPU_LR_REGNUM, buf); 01040 01041 /* Normally, the return address is contained in the slot 0 of the 01042 link register, and slots 1-3 are zero. For an overlay return, 01043 slot 0 contains the address of the overlay manager return stub, 01044 slot 1 contains the partition number of the overlay section to 01045 be returned to, and slot 2 contains the return address within 01046 that section. Return the latter address in that case. */ 01047 if (extract_unsigned_integer (buf + 8, 4, byte_order) != 0) 01048 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM, 01049 extract_unsigned_integer (buf + 8, 4, byte_order)); 01050 else 01051 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM, 01052 extract_unsigned_integer (buf, 4, byte_order)); 01053 01054 return info; 01055 } 01056 01057 static void 01058 spu_frame_this_id (struct frame_info *this_frame, 01059 void **this_prologue_cache, struct frame_id *this_id) 01060 { 01061 struct spu_unwind_cache *info = 01062 spu_frame_unwind_cache (this_frame, this_prologue_cache); 01063 01064 if (info->frame_base == 0) 01065 return; 01066 01067 *this_id = frame_id_build (info->frame_base, info->func); 01068 } 01069 01070 static struct value * 01071 spu_frame_prev_register (struct frame_info *this_frame, 01072 void **this_prologue_cache, int regnum) 01073 { 01074 struct spu_unwind_cache *info 01075 = spu_frame_unwind_cache (this_frame, this_prologue_cache); 01076 01077 /* Special-case the stack pointer. */ 01078 if (regnum == SPU_RAW_SP_REGNUM) 01079 regnum = SPU_SP_REGNUM; 01080 01081 return trad_frame_get_prev_register (this_frame, info->saved_regs, regnum); 01082 } 01083 01084 static const struct frame_unwind spu_frame_unwind = { 01085 NORMAL_FRAME, 01086 default_frame_unwind_stop_reason, 01087 spu_frame_this_id, 01088 spu_frame_prev_register, 01089 NULL, 01090 default_frame_sniffer 01091 }; 01092 01093 static CORE_ADDR 01094 spu_frame_base_address (struct frame_info *this_frame, void **this_cache) 01095 { 01096 struct spu_unwind_cache *info 01097 = spu_frame_unwind_cache (this_frame, this_cache); 01098 return info->local_base; 01099 } 01100 01101 static const struct frame_base spu_frame_base = { 01102 &spu_frame_unwind, 01103 spu_frame_base_address, 01104 spu_frame_base_address, 01105 spu_frame_base_address 01106 }; 01107 01108 static CORE_ADDR 01109 spu_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame) 01110 { 01111 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 01112 CORE_ADDR pc = frame_unwind_register_unsigned (next_frame, SPU_PC_REGNUM); 01113 /* Mask off interrupt enable bit. */ 01114 return SPUADDR (tdep->id, pc & -4); 01115 } 01116 01117 static CORE_ADDR 01118 spu_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame) 01119 { 01120 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 01121 CORE_ADDR sp = frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM); 01122 return SPUADDR (tdep->id, sp); 01123 } 01124 01125 static CORE_ADDR 01126 spu_read_pc (struct regcache *regcache) 01127 { 01128 struct gdbarch_tdep *tdep = gdbarch_tdep (get_regcache_arch (regcache)); 01129 ULONGEST pc; 01130 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &pc); 01131 /* Mask off interrupt enable bit. */ 01132 return SPUADDR (tdep->id, pc & -4); 01133 } 01134 01135 static void 01136 spu_write_pc (struct regcache *regcache, CORE_ADDR pc) 01137 { 01138 /* Keep interrupt enabled state unchanged. */ 01139 ULONGEST old_pc; 01140 01141 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &old_pc); 01142 regcache_cooked_write_unsigned (regcache, SPU_PC_REGNUM, 01143 (SPUADDR_ADDR (pc) & -4) | (old_pc & 3)); 01144 } 01145 01146 01147 /* Cell/B.E. cross-architecture unwinder support. */ 01148 01149 struct spu2ppu_cache 01150 { 01151 struct frame_id frame_id; 01152 struct regcache *regcache; 01153 }; 01154 01155 static struct gdbarch * 01156 spu2ppu_prev_arch (struct frame_info *this_frame, void **this_cache) 01157 { 01158 struct spu2ppu_cache *cache = *this_cache; 01159 return get_regcache_arch (cache->regcache); 01160 } 01161 01162 static void 01163 spu2ppu_this_id (struct frame_info *this_frame, 01164 void **this_cache, struct frame_id *this_id) 01165 { 01166 struct spu2ppu_cache *cache = *this_cache; 01167 *this_id = cache->frame_id; 01168 } 01169 01170 static struct value * 01171 spu2ppu_prev_register (struct frame_info *this_frame, 01172 void **this_cache, int regnum) 01173 { 01174 struct spu2ppu_cache *cache = *this_cache; 01175 struct gdbarch *gdbarch = get_regcache_arch (cache->regcache); 01176 gdb_byte *buf; 01177 01178 buf = alloca (register_size (gdbarch, regnum)); 01179 regcache_cooked_read (cache->regcache, regnum, buf); 01180 return frame_unwind_got_bytes (this_frame, regnum, buf); 01181 } 01182 01183 static int 01184 spu2ppu_sniffer (const struct frame_unwind *self, 01185 struct frame_info *this_frame, void **this_prologue_cache) 01186 { 01187 struct gdbarch *gdbarch = get_frame_arch (this_frame); 01188 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 01189 CORE_ADDR base, func, backchain; 01190 gdb_byte buf[4]; 01191 01192 if (gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_spu) 01193 return 0; 01194 01195 base = get_frame_sp (this_frame); 01196 func = get_frame_pc (this_frame); 01197 if (target_read_memory (base, buf, 4)) 01198 return 0; 01199 backchain = extract_unsigned_integer (buf, 4, byte_order); 01200 01201 if (!backchain) 01202 { 01203 struct frame_info *fi; 01204 01205 struct spu2ppu_cache *cache 01206 = FRAME_OBSTACK_CALLOC (1, struct spu2ppu_cache); 01207 01208 cache->frame_id = frame_id_build (base + 16, func); 01209 01210 for (fi = get_next_frame (this_frame); fi; fi = get_next_frame (fi)) 01211 if (gdbarch_bfd_arch_info (get_frame_arch (fi))->arch != bfd_arch_spu) 01212 break; 01213 01214 if (fi) 01215 { 01216 cache->regcache = frame_save_as_regcache (fi); 01217 *this_prologue_cache = cache; 01218 return 1; 01219 } 01220 else 01221 { 01222 struct regcache *regcache; 01223 regcache = get_thread_arch_regcache (inferior_ptid, target_gdbarch ()); 01224 cache->regcache = regcache_dup (regcache); 01225 *this_prologue_cache = cache; 01226 return 1; 01227 } 01228 } 01229 01230 return 0; 01231 } 01232 01233 static void 01234 spu2ppu_dealloc_cache (struct frame_info *self, void *this_cache) 01235 { 01236 struct spu2ppu_cache *cache = this_cache; 01237 regcache_xfree (cache->regcache); 01238 } 01239 01240 static const struct frame_unwind spu2ppu_unwind = { 01241 ARCH_FRAME, 01242 default_frame_unwind_stop_reason, 01243 spu2ppu_this_id, 01244 spu2ppu_prev_register, 01245 NULL, 01246 spu2ppu_sniffer, 01247 spu2ppu_dealloc_cache, 01248 spu2ppu_prev_arch, 01249 }; 01250 01251 01252 /* Function calling convention. */ 01253 01254 static CORE_ADDR 01255 spu_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp) 01256 { 01257 return sp & ~15; 01258 } 01259 01260 static CORE_ADDR 01261 spu_push_dummy_code (struct gdbarch *gdbarch, CORE_ADDR sp, CORE_ADDR funaddr, 01262 struct value **args, int nargs, struct type *value_type, 01263 CORE_ADDR *real_pc, CORE_ADDR *bp_addr, 01264 struct regcache *regcache) 01265 { 01266 /* Allocate space sufficient for a breakpoint, keeping the stack aligned. */ 01267 sp = (sp - 4) & ~15; 01268 /* Store the address of that breakpoint */ 01269 *bp_addr = sp; 01270 /* The call starts at the callee's entry point. */ 01271 *real_pc = funaddr; 01272 01273 return sp; 01274 } 01275 01276 static int 01277 spu_scalar_value_p (struct type *type) 01278 { 01279 switch (TYPE_CODE (type)) 01280 { 01281 case TYPE_CODE_INT: 01282 case TYPE_CODE_ENUM: 01283 case TYPE_CODE_RANGE: 01284 case TYPE_CODE_CHAR: 01285 case TYPE_CODE_BOOL: 01286 case TYPE_CODE_PTR: 01287 case TYPE_CODE_REF: 01288 return TYPE_LENGTH (type) <= 16; 01289 01290 default: 01291 return 0; 01292 } 01293 } 01294 01295 static void 01296 spu_value_to_regcache (struct regcache *regcache, int regnum, 01297 struct type *type, const gdb_byte *in) 01298 { 01299 int len = TYPE_LENGTH (type); 01300 01301 if (spu_scalar_value_p (type)) 01302 { 01303 int preferred_slot = len < 4 ? 4 - len : 0; 01304 regcache_cooked_write_part (regcache, regnum, preferred_slot, len, in); 01305 } 01306 else 01307 { 01308 while (len >= 16) 01309 { 01310 regcache_cooked_write (regcache, regnum++, in); 01311 in += 16; 01312 len -= 16; 01313 } 01314 01315 if (len > 0) 01316 regcache_cooked_write_part (regcache, regnum, 0, len, in); 01317 } 01318 } 01319 01320 static void 01321 spu_regcache_to_value (struct regcache *regcache, int regnum, 01322 struct type *type, gdb_byte *out) 01323 { 01324 int len = TYPE_LENGTH (type); 01325 01326 if (spu_scalar_value_p (type)) 01327 { 01328 int preferred_slot = len < 4 ? 4 - len : 0; 01329 regcache_cooked_read_part (regcache, regnum, preferred_slot, len, out); 01330 } 01331 else 01332 { 01333 while (len >= 16) 01334 { 01335 regcache_cooked_read (regcache, regnum++, out); 01336 out += 16; 01337 len -= 16; 01338 } 01339 01340 if (len > 0) 01341 regcache_cooked_read_part (regcache, regnum, 0, len, out); 01342 } 01343 } 01344 01345 static CORE_ADDR 01346 spu_push_dummy_call (struct gdbarch *gdbarch, struct value *function, 01347 struct regcache *regcache, CORE_ADDR bp_addr, 01348 int nargs, struct value **args, CORE_ADDR sp, 01349 int struct_return, CORE_ADDR struct_addr) 01350 { 01351 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 01352 CORE_ADDR sp_delta; 01353 int i; 01354 int regnum = SPU_ARG1_REGNUM; 01355 int stack_arg = -1; 01356 gdb_byte buf[16]; 01357 01358 /* Set the return address. */ 01359 memset (buf, 0, sizeof buf); 01360 store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (bp_addr)); 01361 regcache_cooked_write (regcache, SPU_LR_REGNUM, buf); 01362 01363 /* If STRUCT_RETURN is true, then the struct return address (in 01364 STRUCT_ADDR) will consume the first argument-passing register. 01365 Both adjust the register count and store that value. */ 01366 if (struct_return) 01367 { 01368 memset (buf, 0, sizeof buf); 01369 store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (struct_addr)); 01370 regcache_cooked_write (regcache, regnum++, buf); 01371 } 01372 01373 /* Fill in argument registers. */ 01374 for (i = 0; i < nargs; i++) 01375 { 01376 struct value *arg = args[i]; 01377 struct type *type = check_typedef (value_type (arg)); 01378 const gdb_byte *contents = value_contents (arg); 01379 int n_regs = align_up (TYPE_LENGTH (type), 16) / 16; 01380 01381 /* If the argument doesn't wholly fit into registers, it and 01382 all subsequent arguments go to the stack. */ 01383 if (regnum + n_regs - 1 > SPU_ARGN_REGNUM) 01384 { 01385 stack_arg = i; 01386 break; 01387 } 01388 01389 spu_value_to_regcache (regcache, regnum, type, contents); 01390 regnum += n_regs; 01391 } 01392 01393 /* Overflow arguments go to the stack. */ 01394 if (stack_arg != -1) 01395 { 01396 CORE_ADDR ap; 01397 01398 /* Allocate all required stack size. */ 01399 for (i = stack_arg; i < nargs; i++) 01400 { 01401 struct type *type = check_typedef (value_type (args[i])); 01402 sp -= align_up (TYPE_LENGTH (type), 16); 01403 } 01404 01405 /* Fill in stack arguments. */ 01406 ap = sp; 01407 for (i = stack_arg; i < nargs; i++) 01408 { 01409 struct value *arg = args[i]; 01410 struct type *type = check_typedef (value_type (arg)); 01411 int len = TYPE_LENGTH (type); 01412 int preferred_slot; 01413 01414 if (spu_scalar_value_p (type)) 01415 preferred_slot = len < 4 ? 4 - len : 0; 01416 else 01417 preferred_slot = 0; 01418 01419 target_write_memory (ap + preferred_slot, value_contents (arg), len); 01420 ap += align_up (TYPE_LENGTH (type), 16); 01421 } 01422 } 01423 01424 /* Allocate stack frame header. */ 01425 sp -= 32; 01426 01427 /* Store stack back chain. */ 01428 regcache_cooked_read (regcache, SPU_RAW_SP_REGNUM, buf); 01429 target_write_memory (sp, buf, 16); 01430 01431 /* Finally, update all slots of the SP register. */ 01432 sp_delta = sp - extract_unsigned_integer (buf, 4, byte_order); 01433 for (i = 0; i < 4; i++) 01434 { 01435 CORE_ADDR sp_slot = extract_unsigned_integer (buf + 4*i, 4, byte_order); 01436 store_unsigned_integer (buf + 4*i, 4, byte_order, sp_slot + sp_delta); 01437 } 01438 regcache_cooked_write (regcache, SPU_RAW_SP_REGNUM, buf); 01439 01440 return sp; 01441 } 01442 01443 static struct frame_id 01444 spu_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame) 01445 { 01446 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 01447 CORE_ADDR pc = get_frame_register_unsigned (this_frame, SPU_PC_REGNUM); 01448 CORE_ADDR sp = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM); 01449 return frame_id_build (SPUADDR (tdep->id, sp), SPUADDR (tdep->id, pc & -4)); 01450 } 01451 01452 /* Function return value access. */ 01453 01454 static enum return_value_convention 01455 spu_return_value (struct gdbarch *gdbarch, struct value *function, 01456 struct type *type, struct regcache *regcache, 01457 gdb_byte *out, const gdb_byte *in) 01458 { 01459 struct type *func_type = function ? value_type (function) : NULL; 01460 enum return_value_convention rvc; 01461 int opencl_vector = 0; 01462 01463 if (func_type) 01464 { 01465 func_type = check_typedef (func_type); 01466 01467 if (TYPE_CODE (func_type) == TYPE_CODE_PTR) 01468 func_type = check_typedef (TYPE_TARGET_TYPE (func_type)); 01469 01470 if (TYPE_CODE (func_type) == TYPE_CODE_FUNC 01471 && TYPE_CALLING_CONVENTION (func_type) == DW_CC_GDB_IBM_OpenCL 01472 && TYPE_CODE (type) == TYPE_CODE_ARRAY 01473 && TYPE_VECTOR (type)) 01474 opencl_vector = 1; 01475 } 01476 01477 if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16) 01478 rvc = RETURN_VALUE_REGISTER_CONVENTION; 01479 else 01480 rvc = RETURN_VALUE_STRUCT_CONVENTION; 01481 01482 if (in) 01483 { 01484 switch (rvc) 01485 { 01486 case RETURN_VALUE_REGISTER_CONVENTION: 01487 if (opencl_vector && TYPE_LENGTH (type) == 2) 01488 regcache_cooked_write_part (regcache, SPU_ARG1_REGNUM, 2, 2, in); 01489 else 01490 spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in); 01491 break; 01492 01493 case RETURN_VALUE_STRUCT_CONVENTION: 01494 error (_("Cannot set function return value.")); 01495 break; 01496 } 01497 } 01498 else if (out) 01499 { 01500 switch (rvc) 01501 { 01502 case RETURN_VALUE_REGISTER_CONVENTION: 01503 if (opencl_vector && TYPE_LENGTH (type) == 2) 01504 regcache_cooked_read_part (regcache, SPU_ARG1_REGNUM, 2, 2, out); 01505 else 01506 spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out); 01507 break; 01508 01509 case RETURN_VALUE_STRUCT_CONVENTION: 01510 error (_("Function return value unknown.")); 01511 break; 01512 } 01513 } 01514 01515 return rvc; 01516 } 01517 01518 01519 /* Breakpoints. */ 01520 01521 static const gdb_byte * 01522 spu_breakpoint_from_pc (struct gdbarch *gdbarch, 01523 CORE_ADDR * pcptr, int *lenptr) 01524 { 01525 static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff }; 01526 01527 *lenptr = sizeof breakpoint; 01528 return breakpoint; 01529 } 01530 01531 static int 01532 spu_memory_remove_breakpoint (struct gdbarch *gdbarch, 01533 struct bp_target_info *bp_tgt) 01534 { 01535 /* We work around a problem in combined Cell/B.E. debugging here. Consider 01536 that in a combined application, we have some breakpoints inserted in SPU 01537 code, and now the application forks (on the PPU side). GDB common code 01538 will assume that the fork system call copied all breakpoints into the new 01539 process' address space, and that all those copies now need to be removed 01540 (see breakpoint.c:detach_breakpoints). 01541 01542 While this is certainly true for PPU side breakpoints, it is not true 01543 for SPU side breakpoints. fork will clone the SPU context file 01544 descriptors, so that all the existing SPU contexts are in accessible 01545 in the new process. However, the contents of the SPU contexts themselves 01546 are *not* cloned. Therefore the effect of detach_breakpoints is to 01547 remove SPU breakpoints from the *original* SPU context's local store 01548 -- this is not the correct behaviour. 01549 01550 The workaround is to check whether the PID we are asked to remove this 01551 breakpoint from (i.e. ptid_get_pid (inferior_ptid)) is different from the 01552 PID of the current inferior (i.e. current_inferior ()->pid). This is only 01553 true in the context of detach_breakpoints. If so, we simply do nothing. 01554 [ Note that for the fork child process, it does not matter if breakpoints 01555 remain inserted, because those SPU contexts are not runnable anyway -- 01556 the Linux kernel allows only the original process to invoke spu_run. */ 01557 01558 if (ptid_get_pid (inferior_ptid) != current_inferior ()->pid) 01559 return 0; 01560 01561 return default_memory_remove_breakpoint (gdbarch, bp_tgt); 01562 } 01563 01564 01565 /* Software single-stepping support. */ 01566 01567 static int 01568 spu_software_single_step (struct frame_info *frame) 01569 { 01570 struct gdbarch *gdbarch = get_frame_arch (frame); 01571 struct address_space *aspace = get_frame_address_space (frame); 01572 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 01573 CORE_ADDR pc, next_pc; 01574 unsigned int insn; 01575 int offset, reg; 01576 gdb_byte buf[4]; 01577 ULONGEST lslr; 01578 01579 pc = get_frame_pc (frame); 01580 01581 if (target_read_memory (pc, buf, 4)) 01582 return 1; 01583 insn = extract_unsigned_integer (buf, 4, byte_order); 01584 01585 /* Get local store limit. */ 01586 lslr = get_frame_register_unsigned (frame, SPU_LSLR_REGNUM); 01587 if (!lslr) 01588 lslr = (ULONGEST) -1; 01589 01590 /* Next sequential instruction is at PC + 4, except if the current 01591 instruction is a PPE-assisted call, in which case it is at PC + 8. 01592 Wrap around LS limit to be on the safe side. */ 01593 if ((insn & 0xffffff00) == 0x00002100) 01594 next_pc = (SPUADDR_ADDR (pc) + 8) & lslr; 01595 else 01596 next_pc = (SPUADDR_ADDR (pc) + 4) & lslr; 01597 01598 insert_single_step_breakpoint (gdbarch, 01599 aspace, SPUADDR (SPUADDR_SPU (pc), next_pc)); 01600 01601 if (is_branch (insn, &offset, ®)) 01602 { 01603 CORE_ADDR target = offset; 01604 01605 if (reg == SPU_PC_REGNUM) 01606 target += SPUADDR_ADDR (pc); 01607 else if (reg != -1) 01608 { 01609 int optim, unavail; 01610 01611 if (get_frame_register_bytes (frame, reg, 0, 4, buf, 01612 &optim, &unavail)) 01613 target += extract_unsigned_integer (buf, 4, byte_order) & -4; 01614 else 01615 { 01616 if (optim) 01617 error (_("Could not determine address of " 01618 "single-step breakpoint.")); 01619 if (unavail) 01620 throw_error (NOT_AVAILABLE_ERROR, 01621 _("Could not determine address of " 01622 "single-step breakpoint.")); 01623 } 01624 } 01625 01626 target = target & lslr; 01627 if (target != next_pc) 01628 insert_single_step_breakpoint (gdbarch, aspace, 01629 SPUADDR (SPUADDR_SPU (pc), target)); 01630 } 01631 01632 return 1; 01633 } 01634 01635 01636 /* Longjmp support. */ 01637 01638 static int 01639 spu_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc) 01640 { 01641 struct gdbarch *gdbarch = get_frame_arch (frame); 01642 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 01643 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 01644 gdb_byte buf[4]; 01645 CORE_ADDR jb_addr; 01646 int optim, unavail; 01647 01648 /* Jump buffer is pointed to by the argument register $r3. */ 01649 if (!get_frame_register_bytes (frame, SPU_ARG1_REGNUM, 0, 4, buf, 01650 &optim, &unavail)) 01651 return 0; 01652 01653 jb_addr = extract_unsigned_integer (buf, 4, byte_order); 01654 if (target_read_memory (SPUADDR (tdep->id, jb_addr), buf, 4)) 01655 return 0; 01656 01657 *pc = extract_unsigned_integer (buf, 4, byte_order); 01658 *pc = SPUADDR (tdep->id, *pc); 01659 return 1; 01660 } 01661 01662 01663 /* Disassembler. */ 01664 01665 struct spu_dis_asm_data 01666 { 01667 struct gdbarch *gdbarch; 01668 int id; 01669 }; 01670 01671 static void 01672 spu_dis_asm_print_address (bfd_vma addr, struct disassemble_info *info) 01673 { 01674 struct spu_dis_asm_data *data = info->application_data; 01675 print_address (data->gdbarch, SPUADDR (data->id, addr), info->stream); 01676 } 01677 01678 static int 01679 gdb_print_insn_spu (bfd_vma memaddr, struct disassemble_info *info) 01680 { 01681 /* The opcodes disassembler does 18-bit address arithmetic. Make 01682 sure the SPU ID encoded in the high bits is added back when we 01683 call print_address. */ 01684 struct disassemble_info spu_info = *info; 01685 struct spu_dis_asm_data data; 01686 data.gdbarch = info->application_data; 01687 data.id = SPUADDR_SPU (memaddr); 01688 01689 spu_info.application_data = &data; 01690 spu_info.print_address_func = spu_dis_asm_print_address; 01691 return print_insn_spu (memaddr, &spu_info); 01692 } 01693 01694 01695 /* Target overlays for the SPU overlay manager. 01696 01697 See the documentation of simple_overlay_update for how the 01698 interface is supposed to work. 01699 01700 Data structures used by the overlay manager: 01701 01702 struct ovly_table 01703 { 01704 u32 vma; 01705 u32 size; 01706 u32 pos; 01707 u32 buf; 01708 } _ovly_table[]; -- one entry per overlay section 01709 01710 struct ovly_buf_table 01711 { 01712 u32 mapped; 01713 } _ovly_buf_table[]; -- one entry per overlay buffer 01714 01715 _ovly_table should never change. 01716 01717 Both tables are aligned to a 16-byte boundary, the symbols 01718 _ovly_table and _ovly_buf_table are of type STT_OBJECT and their 01719 size set to the size of the respective array. buf in _ovly_table is 01720 an index into _ovly_buf_table. 01721 01722 mapped is an index into _ovly_table. Both the mapped and buf indices start 01723 from one to reference the first entry in their respective tables. */ 01724 01725 /* Using the per-objfile private data mechanism, we store for each 01726 objfile an array of "struct spu_overlay_table" structures, one 01727 for each obj_section of the objfile. This structure holds two 01728 fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this 01729 is *not* an overlay section. If it is non-zero, it represents 01730 a target address. The overlay section is mapped iff the target 01731 integer at this location equals MAPPED_VAL. */ 01732 01733 static const struct objfile_data *spu_overlay_data; 01734 01735 struct spu_overlay_table 01736 { 01737 CORE_ADDR mapped_ptr; 01738 CORE_ADDR mapped_val; 01739 }; 01740 01741 /* Retrieve the overlay table for OBJFILE. If not already cached, read 01742 the _ovly_table data structure from the target and initialize the 01743 spu_overlay_table data structure from it. */ 01744 static struct spu_overlay_table * 01745 spu_get_overlay_table (struct objfile *objfile) 01746 { 01747 enum bfd_endian byte_order = bfd_big_endian (objfile->obfd)? 01748 BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE; 01749 struct minimal_symbol *ovly_table_msym, *ovly_buf_table_msym; 01750 CORE_ADDR ovly_table_base, ovly_buf_table_base; 01751 unsigned ovly_table_size, ovly_buf_table_size; 01752 struct spu_overlay_table *tbl; 01753 struct obj_section *osect; 01754 gdb_byte *ovly_table; 01755 int i; 01756 01757 tbl = objfile_data (objfile, spu_overlay_data); 01758 if (tbl) 01759 return tbl; 01760 01761 ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile); 01762 if (!ovly_table_msym) 01763 return NULL; 01764 01765 ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table", 01766 NULL, objfile); 01767 if (!ovly_buf_table_msym) 01768 return NULL; 01769 01770 ovly_table_base = SYMBOL_VALUE_ADDRESS (ovly_table_msym); 01771 ovly_table_size = MSYMBOL_SIZE (ovly_table_msym); 01772 01773 ovly_buf_table_base = SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym); 01774 ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym); 01775 01776 ovly_table = xmalloc (ovly_table_size); 01777 read_memory (ovly_table_base, ovly_table, ovly_table_size); 01778 01779 tbl = OBSTACK_CALLOC (&objfile->objfile_obstack, 01780 objfile->sections_end - objfile->sections, 01781 struct spu_overlay_table); 01782 01783 for (i = 0; i < ovly_table_size / 16; i++) 01784 { 01785 CORE_ADDR vma = extract_unsigned_integer (ovly_table + 16*i + 0, 01786 4, byte_order); 01787 CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4, 01788 4, byte_order); 01789 CORE_ADDR pos = extract_unsigned_integer (ovly_table + 16*i + 8, 01790 4, byte_order); 01791 CORE_ADDR buf = extract_unsigned_integer (ovly_table + 16*i + 12, 01792 4, byte_order); 01793 01794 if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size) 01795 continue; 01796 01797 ALL_OBJFILE_OSECTIONS (objfile, osect) 01798 if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section) 01799 && pos == osect->the_bfd_section->filepos) 01800 { 01801 int ndx = osect - objfile->sections; 01802 tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4; 01803 tbl[ndx].mapped_val = i + 1; 01804 break; 01805 } 01806 } 01807 01808 xfree (ovly_table); 01809 set_objfile_data (objfile, spu_overlay_data, tbl); 01810 return tbl; 01811 } 01812 01813 /* Read _ovly_buf_table entry from the target to dermine whether 01814 OSECT is currently mapped, and update the mapped state. */ 01815 static void 01816 spu_overlay_update_osect (struct obj_section *osect) 01817 { 01818 enum bfd_endian byte_order = bfd_big_endian (osect->objfile->obfd)? 01819 BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE; 01820 struct spu_overlay_table *ovly_table; 01821 CORE_ADDR id, val; 01822 01823 ovly_table = spu_get_overlay_table (osect->objfile); 01824 if (!ovly_table) 01825 return; 01826 01827 ovly_table += osect - osect->objfile->sections; 01828 if (ovly_table->mapped_ptr == 0) 01829 return; 01830 01831 id = SPUADDR_SPU (obj_section_addr (osect)); 01832 val = read_memory_unsigned_integer (SPUADDR (id, ovly_table->mapped_ptr), 01833 4, byte_order); 01834 osect->ovly_mapped = (val == ovly_table->mapped_val); 01835 } 01836 01837 /* If OSECT is NULL, then update all sections' mapped state. 01838 If OSECT is non-NULL, then update only OSECT's mapped state. */ 01839 static void 01840 spu_overlay_update (struct obj_section *osect) 01841 { 01842 /* Just one section. */ 01843 if (osect) 01844 spu_overlay_update_osect (osect); 01845 01846 /* All sections. */ 01847 else 01848 { 01849 struct objfile *objfile; 01850 01851 ALL_OBJSECTIONS (objfile, osect) 01852 if (section_is_overlay (osect)) 01853 spu_overlay_update_osect (osect); 01854 } 01855 } 01856 01857 /* Whenever a new objfile is loaded, read the target's _ovly_table. 01858 If there is one, go through all sections and make sure for non- 01859 overlay sections LMA equals VMA, while for overlay sections LMA 01860 is larger than SPU_OVERLAY_LMA. */ 01861 static void 01862 spu_overlay_new_objfile (struct objfile *objfile) 01863 { 01864 struct spu_overlay_table *ovly_table; 01865 struct obj_section *osect; 01866 01867 /* If we've already touched this file, do nothing. */ 01868 if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL) 01869 return; 01870 01871 /* Consider only SPU objfiles. */ 01872 if (bfd_get_arch (objfile->obfd) != bfd_arch_spu) 01873 return; 01874 01875 /* Check if this objfile has overlays. */ 01876 ovly_table = spu_get_overlay_table (objfile); 01877 if (!ovly_table) 01878 return; 01879 01880 /* Now go and fiddle with all the LMAs. */ 01881 ALL_OBJFILE_OSECTIONS (objfile, osect) 01882 { 01883 bfd *obfd = objfile->obfd; 01884 asection *bsect = osect->the_bfd_section; 01885 int ndx = osect - objfile->sections; 01886 01887 if (ovly_table[ndx].mapped_ptr == 0) 01888 bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect); 01889 else 01890 bfd_section_lma (obfd, bsect) = SPU_OVERLAY_LMA + bsect->filepos; 01891 } 01892 } 01893 01894 01895 /* Insert temporary breakpoint on "main" function of newly loaded 01896 SPE context OBJFILE. */ 01897 static void 01898 spu_catch_start (struct objfile *objfile) 01899 { 01900 struct minimal_symbol *minsym; 01901 struct symtab *symtab; 01902 CORE_ADDR pc; 01903 char buf[32]; 01904 01905 /* Do this only if requested by "set spu stop-on-load on". */ 01906 if (!spu_stop_on_load_p) 01907 return; 01908 01909 /* Consider only SPU objfiles. */ 01910 if (!objfile || bfd_get_arch (objfile->obfd) != bfd_arch_spu) 01911 return; 01912 01913 /* The main objfile is handled differently. */ 01914 if (objfile == symfile_objfile) 01915 return; 01916 01917 /* There can be multiple symbols named "main". Search for the 01918 "main" in *this* objfile. */ 01919 minsym = lookup_minimal_symbol ("main", NULL, objfile); 01920 if (!minsym) 01921 return; 01922 01923 /* If we have debugging information, try to use it -- this 01924 will allow us to properly skip the prologue. */ 01925 pc = SYMBOL_VALUE_ADDRESS (minsym); 01926 symtab = find_pc_sect_symtab (pc, SYMBOL_OBJ_SECTION (objfile, minsym)); 01927 if (symtab != NULL) 01928 { 01929 struct blockvector *bv = BLOCKVECTOR (symtab); 01930 struct block *block = BLOCKVECTOR_BLOCK (bv, GLOBAL_BLOCK); 01931 struct symbol *sym; 01932 struct symtab_and_line sal; 01933 01934 sym = lookup_block_symbol (block, "main", VAR_DOMAIN); 01935 if (sym) 01936 { 01937 fixup_symbol_section (sym, objfile); 01938 sal = find_function_start_sal (sym, 1); 01939 pc = sal.pc; 01940 } 01941 } 01942 01943 /* Use a numerical address for the set_breakpoint command to avoid having 01944 the breakpoint re-set incorrectly. */ 01945 xsnprintf (buf, sizeof buf, "*%s", core_addr_to_string (pc)); 01946 create_breakpoint (get_objfile_arch (objfile), buf /* arg */, 01947 NULL /* cond_string */, -1 /* thread */, 01948 NULL /* extra_string */, 01949 0 /* parse_condition_and_thread */, 1 /* tempflag */, 01950 bp_breakpoint /* type_wanted */, 01951 0 /* ignore_count */, 01952 AUTO_BOOLEAN_FALSE /* pending_break_support */, 01953 &bkpt_breakpoint_ops /* ops */, 0 /* from_tty */, 01954 1 /* enabled */, 0 /* internal */, 0); 01955 } 01956 01957 01958 /* Look up OBJFILE loaded into FRAME's SPU context. */ 01959 static struct objfile * 01960 spu_objfile_from_frame (struct frame_info *frame) 01961 { 01962 struct gdbarch *gdbarch = get_frame_arch (frame); 01963 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 01964 struct objfile *obj; 01965 01966 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu) 01967 return NULL; 01968 01969 ALL_OBJFILES (obj) 01970 { 01971 if (obj->sections != obj->sections_end 01972 && SPUADDR_SPU (obj_section_addr (obj->sections)) == tdep->id) 01973 return obj; 01974 } 01975 01976 return NULL; 01977 } 01978 01979 /* Flush cache for ea pointer access if available. */ 01980 static void 01981 flush_ea_cache (void) 01982 { 01983 struct minimal_symbol *msymbol; 01984 struct objfile *obj; 01985 01986 if (!has_stack_frames ()) 01987 return; 01988 01989 obj = spu_objfile_from_frame (get_current_frame ()); 01990 if (obj == NULL) 01991 return; 01992 01993 /* Lookup inferior function __cache_flush. */ 01994 msymbol = lookup_minimal_symbol ("__cache_flush", NULL, obj); 01995 if (msymbol != NULL) 01996 { 01997 struct type *type; 01998 CORE_ADDR addr; 01999 02000 type = objfile_type (obj)->builtin_void; 02001 type = lookup_function_type (type); 02002 type = lookup_pointer_type (type); 02003 addr = SYMBOL_VALUE_ADDRESS (msymbol); 02004 02005 call_function_by_hand (value_from_pointer (type, addr), 0, NULL); 02006 } 02007 } 02008 02009 /* This handler is called when the inferior has stopped. If it is stopped in 02010 SPU architecture then flush the ea cache if used. */ 02011 static void 02012 spu_attach_normal_stop (struct bpstats *bs, int print_frame) 02013 { 02014 if (!spu_auto_flush_cache_p) 02015 return; 02016 02017 /* Temporarily reset spu_auto_flush_cache_p to avoid recursively 02018 re-entering this function when __cache_flush stops. */ 02019 spu_auto_flush_cache_p = 0; 02020 flush_ea_cache (); 02021 spu_auto_flush_cache_p = 1; 02022 } 02023 02024 02025 /* "info spu" commands. */ 02026 02027 static void 02028 info_spu_event_command (char *args, int from_tty) 02029 { 02030 struct frame_info *frame = get_selected_frame (NULL); 02031 ULONGEST event_status = 0; 02032 ULONGEST event_mask = 0; 02033 struct cleanup *chain; 02034 gdb_byte buf[100]; 02035 char annex[32]; 02036 LONGEST len; 02037 int id; 02038 02039 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu) 02040 error (_("\"info spu\" is only supported on the SPU architecture.")); 02041 02042 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM); 02043 02044 xsnprintf (annex, sizeof annex, "%d/event_status", id); 02045 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, 02046 buf, 0, (sizeof (buf) - 1)); 02047 if (len <= 0) 02048 error (_("Could not read event_status.")); 02049 buf[len] = '\0'; 02050 event_status = strtoulst ((char *) buf, NULL, 16); 02051 02052 xsnprintf (annex, sizeof annex, "%d/event_mask", id); 02053 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, 02054 buf, 0, (sizeof (buf) - 1)); 02055 if (len <= 0) 02056 error (_("Could not read event_mask.")); 02057 buf[len] = '\0'; 02058 event_mask = strtoulst ((char *) buf, NULL, 16); 02059 02060 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoEvent"); 02061 02062 if (ui_out_is_mi_like_p (current_uiout)) 02063 { 02064 ui_out_field_fmt (current_uiout, "event_status", 02065 "0x%s", phex_nz (event_status, 4)); 02066 ui_out_field_fmt (current_uiout, "event_mask", 02067 "0x%s", phex_nz (event_mask, 4)); 02068 } 02069 else 02070 { 02071 printf_filtered (_("Event Status 0x%s\n"), phex (event_status, 4)); 02072 printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask, 4)); 02073 } 02074 02075 do_cleanups (chain); 02076 } 02077 02078 static void 02079 info_spu_signal_command (char *args, int from_tty) 02080 { 02081 struct frame_info *frame = get_selected_frame (NULL); 02082 struct gdbarch *gdbarch = get_frame_arch (frame); 02083 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 02084 ULONGEST signal1 = 0; 02085 ULONGEST signal1_type = 0; 02086 int signal1_pending = 0; 02087 ULONGEST signal2 = 0; 02088 ULONGEST signal2_type = 0; 02089 int signal2_pending = 0; 02090 struct cleanup *chain; 02091 char annex[32]; 02092 gdb_byte buf[100]; 02093 LONGEST len; 02094 int id; 02095 02096 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu) 02097 error (_("\"info spu\" is only supported on the SPU architecture.")); 02098 02099 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM); 02100 02101 xsnprintf (annex, sizeof annex, "%d/signal1", id); 02102 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, buf, 0, 4); 02103 if (len < 0) 02104 error (_("Could not read signal1.")); 02105 else if (len == 4) 02106 { 02107 signal1 = extract_unsigned_integer (buf, 4, byte_order); 02108 signal1_pending = 1; 02109 } 02110 02111 xsnprintf (annex, sizeof annex, "%d/signal1_type", id); 02112 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, 02113 buf, 0, (sizeof (buf) - 1)); 02114 if (len <= 0) 02115 error (_("Could not read signal1_type.")); 02116 buf[len] = '\0'; 02117 signal1_type = strtoulst ((char *) buf, NULL, 16); 02118 02119 xsnprintf (annex, sizeof annex, "%d/signal2", id); 02120 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, buf, 0, 4); 02121 if (len < 0) 02122 error (_("Could not read signal2.")); 02123 else if (len == 4) 02124 { 02125 signal2 = extract_unsigned_integer (buf, 4, byte_order); 02126 signal2_pending = 1; 02127 } 02128 02129 xsnprintf (annex, sizeof annex, "%d/signal2_type", id); 02130 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, 02131 buf, 0, (sizeof (buf) - 1)); 02132 if (len <= 0) 02133 error (_("Could not read signal2_type.")); 02134 buf[len] = '\0'; 02135 signal2_type = strtoulst ((char *) buf, NULL, 16); 02136 02137 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoSignal"); 02138 02139 if (ui_out_is_mi_like_p (current_uiout)) 02140 { 02141 ui_out_field_int (current_uiout, "signal1_pending", signal1_pending); 02142 ui_out_field_fmt (current_uiout, "signal1", "0x%s", phex_nz (signal1, 4)); 02143 ui_out_field_int (current_uiout, "signal1_type", signal1_type); 02144 ui_out_field_int (current_uiout, "signal2_pending", signal2_pending); 02145 ui_out_field_fmt (current_uiout, "signal2", "0x%s", phex_nz (signal2, 4)); 02146 ui_out_field_int (current_uiout, "signal2_type", signal2_type); 02147 } 02148 else 02149 { 02150 if (signal1_pending) 02151 printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1, 4)); 02152 else 02153 printf_filtered (_("Signal 1 not pending ")); 02154 02155 if (signal1_type) 02156 printf_filtered (_("(Type Or)\n")); 02157 else 02158 printf_filtered (_("(Type Overwrite)\n")); 02159 02160 if (signal2_pending) 02161 printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2, 4)); 02162 else 02163 printf_filtered (_("Signal 2 not pending ")); 02164 02165 if (signal2_type) 02166 printf_filtered (_("(Type Or)\n")); 02167 else 02168 printf_filtered (_("(Type Overwrite)\n")); 02169 } 02170 02171 do_cleanups (chain); 02172 } 02173 02174 static void 02175 info_spu_mailbox_list (gdb_byte *buf, int nr, enum bfd_endian byte_order, 02176 const char *field, const char *msg) 02177 { 02178 struct cleanup *chain; 02179 int i; 02180 02181 if (nr <= 0) 02182 return; 02183 02184 chain = make_cleanup_ui_out_table_begin_end (current_uiout, 1, nr, "mbox"); 02185 02186 ui_out_table_header (current_uiout, 32, ui_left, field, msg); 02187 ui_out_table_body (current_uiout); 02188 02189 for (i = 0; i < nr; i++) 02190 { 02191 struct cleanup *val_chain; 02192 ULONGEST val; 02193 val_chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "mbox"); 02194 val = extract_unsigned_integer (buf + 4*i, 4, byte_order); 02195 ui_out_field_fmt (current_uiout, field, "0x%s", phex (val, 4)); 02196 do_cleanups (val_chain); 02197 02198 if (!ui_out_is_mi_like_p (current_uiout)) 02199 printf_filtered ("\n"); 02200 } 02201 02202 do_cleanups (chain); 02203 } 02204 02205 static void 02206 info_spu_mailbox_command (char *args, int from_tty) 02207 { 02208 struct frame_info *frame = get_selected_frame (NULL); 02209 struct gdbarch *gdbarch = get_frame_arch (frame); 02210 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 02211 struct cleanup *chain; 02212 char annex[32]; 02213 gdb_byte buf[1024]; 02214 LONGEST len; 02215 int id; 02216 02217 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu) 02218 error (_("\"info spu\" is only supported on the SPU architecture.")); 02219 02220 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM); 02221 02222 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoMailbox"); 02223 02224 xsnprintf (annex, sizeof annex, "%d/mbox_info", id); 02225 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, 02226 buf, 0, sizeof buf); 02227 if (len < 0) 02228 error (_("Could not read mbox_info.")); 02229 02230 info_spu_mailbox_list (buf, len / 4, byte_order, 02231 "mbox", "SPU Outbound Mailbox"); 02232 02233 xsnprintf (annex, sizeof annex, "%d/ibox_info", id); 02234 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, 02235 buf, 0, sizeof buf); 02236 if (len < 0) 02237 error (_("Could not read ibox_info.")); 02238 02239 info_spu_mailbox_list (buf, len / 4, byte_order, 02240 "ibox", "SPU Outbound Interrupt Mailbox"); 02241 02242 xsnprintf (annex, sizeof annex, "%d/wbox_info", id); 02243 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, 02244 buf, 0, sizeof buf); 02245 if (len < 0) 02246 error (_("Could not read wbox_info.")); 02247 02248 info_spu_mailbox_list (buf, len / 4, byte_order, 02249 "wbox", "SPU Inbound Mailbox"); 02250 02251 do_cleanups (chain); 02252 } 02253 02254 static ULONGEST 02255 spu_mfc_get_bitfield (ULONGEST word, int first, int last) 02256 { 02257 ULONGEST mask = ~(~(ULONGEST)0 << (last - first + 1)); 02258 return (word >> (63 - last)) & mask; 02259 } 02260 02261 static void 02262 info_spu_dma_cmdlist (gdb_byte *buf, int nr, enum bfd_endian byte_order) 02263 { 02264 static char *spu_mfc_opcode[256] = 02265 { 02266 /* 00 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02267 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02268 /* 10 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02269 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02270 /* 20 */ "put", "putb", "putf", NULL, "putl", "putlb", "putlf", NULL, 02271 "puts", "putbs", "putfs", NULL, NULL, NULL, NULL, NULL, 02272 /* 30 */ "putr", "putrb", "putrf", NULL, "putrl", "putrlb", "putrlf", NULL, 02273 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02274 /* 40 */ "get", "getb", "getf", NULL, "getl", "getlb", "getlf", NULL, 02275 "gets", "getbs", "getfs", NULL, NULL, NULL, NULL, NULL, 02276 /* 50 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02277 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02278 /* 60 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02279 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02280 /* 70 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02281 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02282 /* 80 */ "sdcrt", "sdcrtst", NULL, NULL, NULL, NULL, NULL, NULL, 02283 NULL, "sdcrz", NULL, NULL, NULL, "sdcrst", NULL, "sdcrf", 02284 /* 90 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02285 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02286 /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL, NULL, NULL, NULL, NULL, 02287 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02288 /* b0 */ "putlluc", NULL, NULL, NULL, "putllc", NULL, NULL, NULL, 02289 "putqlluc", NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02290 /* c0 */ "barrier", NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02291 "mfceieio", NULL, NULL, NULL, "mfcsync", NULL, NULL, NULL, 02292 /* d0 */ "getllar", NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02293 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02294 /* e0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02295 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02296 /* f0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02297 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 02298 }; 02299 02300 int *seq = alloca (nr * sizeof (int)); 02301 int done = 0; 02302 struct cleanup *chain; 02303 int i, j; 02304 02305 02306 /* Determine sequence in which to display (valid) entries. */ 02307 for (i = 0; i < nr; i++) 02308 { 02309 /* Search for the first valid entry all of whose 02310 dependencies are met. */ 02311 for (j = 0; j < nr; j++) 02312 { 02313 ULONGEST mfc_cq_dw3; 02314 ULONGEST dependencies; 02315 02316 if (done & (1 << (nr - 1 - j))) 02317 continue; 02318 02319 mfc_cq_dw3 02320 = extract_unsigned_integer (buf + 32*j + 24,8, byte_order); 02321 if (!spu_mfc_get_bitfield (mfc_cq_dw3, 16, 16)) 02322 continue; 02323 02324 dependencies = spu_mfc_get_bitfield (mfc_cq_dw3, 0, nr - 1); 02325 if ((dependencies & done) != dependencies) 02326 continue; 02327 02328 seq[i] = j; 02329 done |= 1 << (nr - 1 - j); 02330 break; 02331 } 02332 02333 if (j == nr) 02334 break; 02335 } 02336 02337 nr = i; 02338 02339 02340 chain = make_cleanup_ui_out_table_begin_end (current_uiout, 10, nr, 02341 "dma_cmd"); 02342 02343 ui_out_table_header (current_uiout, 7, ui_left, "opcode", "Opcode"); 02344 ui_out_table_header (current_uiout, 3, ui_left, "tag", "Tag"); 02345 ui_out_table_header (current_uiout, 3, ui_left, "tid", "TId"); 02346 ui_out_table_header (current_uiout, 3, ui_left, "rid", "RId"); 02347 ui_out_table_header (current_uiout, 18, ui_left, "ea", "EA"); 02348 ui_out_table_header (current_uiout, 7, ui_left, "lsa", "LSA"); 02349 ui_out_table_header (current_uiout, 7, ui_left, "size", "Size"); 02350 ui_out_table_header (current_uiout, 7, ui_left, "lstaddr", "LstAddr"); 02351 ui_out_table_header (current_uiout, 7, ui_left, "lstsize", "LstSize"); 02352 ui_out_table_header (current_uiout, 1, ui_left, "error_p", "E"); 02353 02354 ui_out_table_body (current_uiout); 02355 02356 for (i = 0; i < nr; i++) 02357 { 02358 struct cleanup *cmd_chain; 02359 ULONGEST mfc_cq_dw0; 02360 ULONGEST mfc_cq_dw1; 02361 ULONGEST mfc_cq_dw2; 02362 int mfc_cmd_opcode, mfc_cmd_tag, rclass_id, tclass_id; 02363 int list_lsa, list_size, mfc_lsa, mfc_size; 02364 ULONGEST mfc_ea; 02365 int list_valid_p, noop_valid_p, qw_valid_p, ea_valid_p, cmd_error_p; 02366 02367 /* Decode contents of MFC Command Queue Context Save/Restore Registers. 02368 See "Cell Broadband Engine Registers V1.3", section 3.3.2.1. */ 02369 02370 mfc_cq_dw0 02371 = extract_unsigned_integer (buf + 32*seq[i], 8, byte_order); 02372 mfc_cq_dw1 02373 = extract_unsigned_integer (buf + 32*seq[i] + 8, 8, byte_order); 02374 mfc_cq_dw2 02375 = extract_unsigned_integer (buf + 32*seq[i] + 16, 8, byte_order); 02376 02377 list_lsa = spu_mfc_get_bitfield (mfc_cq_dw0, 0, 14); 02378 list_size = spu_mfc_get_bitfield (mfc_cq_dw0, 15, 26); 02379 mfc_cmd_opcode = spu_mfc_get_bitfield (mfc_cq_dw0, 27, 34); 02380 mfc_cmd_tag = spu_mfc_get_bitfield (mfc_cq_dw0, 35, 39); 02381 list_valid_p = spu_mfc_get_bitfield (mfc_cq_dw0, 40, 40); 02382 rclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 41, 43); 02383 tclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 44, 46); 02384 02385 mfc_ea = spu_mfc_get_bitfield (mfc_cq_dw1, 0, 51) << 12 02386 | spu_mfc_get_bitfield (mfc_cq_dw2, 25, 36); 02387 02388 mfc_lsa = spu_mfc_get_bitfield (mfc_cq_dw2, 0, 13); 02389 mfc_size = spu_mfc_get_bitfield (mfc_cq_dw2, 14, 24); 02390 noop_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 37, 37); 02391 qw_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 38, 38); 02392 ea_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 39, 39); 02393 cmd_error_p = spu_mfc_get_bitfield (mfc_cq_dw2, 40, 40); 02394 02395 cmd_chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "cmd"); 02396 02397 if (spu_mfc_opcode[mfc_cmd_opcode]) 02398 ui_out_field_string (current_uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]); 02399 else 02400 ui_out_field_int (current_uiout, "opcode", mfc_cmd_opcode); 02401 02402 ui_out_field_int (current_uiout, "tag", mfc_cmd_tag); 02403 ui_out_field_int (current_uiout, "tid", tclass_id); 02404 ui_out_field_int (current_uiout, "rid", rclass_id); 02405 02406 if (ea_valid_p) 02407 ui_out_field_fmt (current_uiout, "ea", "0x%s", phex (mfc_ea, 8)); 02408 else 02409 ui_out_field_skip (current_uiout, "ea"); 02410 02411 ui_out_field_fmt (current_uiout, "lsa", "0x%05x", mfc_lsa << 4); 02412 if (qw_valid_p) 02413 ui_out_field_fmt (current_uiout, "size", "0x%05x", mfc_size << 4); 02414 else 02415 ui_out_field_fmt (current_uiout, "size", "0x%05x", mfc_size); 02416 02417 if (list_valid_p) 02418 { 02419 ui_out_field_fmt (current_uiout, "lstaddr", "0x%05x", list_lsa << 3); 02420 ui_out_field_fmt (current_uiout, "lstsize", "0x%05x", list_size << 3); 02421 } 02422 else 02423 { 02424 ui_out_field_skip (current_uiout, "lstaddr"); 02425 ui_out_field_skip (current_uiout, "lstsize"); 02426 } 02427 02428 if (cmd_error_p) 02429 ui_out_field_string (current_uiout, "error_p", "*"); 02430 else 02431 ui_out_field_skip (current_uiout, "error_p"); 02432 02433 do_cleanups (cmd_chain); 02434 02435 if (!ui_out_is_mi_like_p (current_uiout)) 02436 printf_filtered ("\n"); 02437 } 02438 02439 do_cleanups (chain); 02440 } 02441 02442 static void 02443 info_spu_dma_command (char *args, int from_tty) 02444 { 02445 struct frame_info *frame = get_selected_frame (NULL); 02446 struct gdbarch *gdbarch = get_frame_arch (frame); 02447 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 02448 ULONGEST dma_info_type; 02449 ULONGEST dma_info_mask; 02450 ULONGEST dma_info_status; 02451 ULONGEST dma_info_stall_and_notify; 02452 ULONGEST dma_info_atomic_command_status; 02453 struct cleanup *chain; 02454 char annex[32]; 02455 gdb_byte buf[1024]; 02456 LONGEST len; 02457 int id; 02458 02459 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu) 02460 error (_("\"info spu\" is only supported on the SPU architecture.")); 02461 02462 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM); 02463 02464 xsnprintf (annex, sizeof annex, "%d/dma_info", id); 02465 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, 02466 buf, 0, 40 + 16 * 32); 02467 if (len <= 0) 02468 error (_("Could not read dma_info.")); 02469 02470 dma_info_type 02471 = extract_unsigned_integer (buf, 8, byte_order); 02472 dma_info_mask 02473 = extract_unsigned_integer (buf + 8, 8, byte_order); 02474 dma_info_status 02475 = extract_unsigned_integer (buf + 16, 8, byte_order); 02476 dma_info_stall_and_notify 02477 = extract_unsigned_integer (buf + 24, 8, byte_order); 02478 dma_info_atomic_command_status 02479 = extract_unsigned_integer (buf + 32, 8, byte_order); 02480 02481 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoDMA"); 02482 02483 if (ui_out_is_mi_like_p (current_uiout)) 02484 { 02485 ui_out_field_fmt (current_uiout, "dma_info_type", "0x%s", 02486 phex_nz (dma_info_type, 4)); 02487 ui_out_field_fmt (current_uiout, "dma_info_mask", "0x%s", 02488 phex_nz (dma_info_mask, 4)); 02489 ui_out_field_fmt (current_uiout, "dma_info_status", "0x%s", 02490 phex_nz (dma_info_status, 4)); 02491 ui_out_field_fmt (current_uiout, "dma_info_stall_and_notify", "0x%s", 02492 phex_nz (dma_info_stall_and_notify, 4)); 02493 ui_out_field_fmt (current_uiout, "dma_info_atomic_command_status", "0x%s", 02494 phex_nz (dma_info_atomic_command_status, 4)); 02495 } 02496 else 02497 { 02498 const char *query_msg = _("no query pending"); 02499 02500 if (dma_info_type & 4) 02501 switch (dma_info_type & 3) 02502 { 02503 case 1: query_msg = _("'any' query pending"); break; 02504 case 2: query_msg = _("'all' query pending"); break; 02505 default: query_msg = _("undefined query type"); break; 02506 } 02507 02508 printf_filtered (_("Tag-Group Status 0x%s\n"), 02509 phex (dma_info_status, 4)); 02510 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"), 02511 phex (dma_info_mask, 4), query_msg); 02512 printf_filtered (_("Stall-and-Notify 0x%s\n"), 02513 phex (dma_info_stall_and_notify, 4)); 02514 printf_filtered (_("Atomic Cmd Status 0x%s\n"), 02515 phex (dma_info_atomic_command_status, 4)); 02516 printf_filtered ("\n"); 02517 } 02518 02519 info_spu_dma_cmdlist (buf + 40, 16, byte_order); 02520 do_cleanups (chain); 02521 } 02522 02523 static void 02524 info_spu_proxydma_command (char *args, int from_tty) 02525 { 02526 struct frame_info *frame = get_selected_frame (NULL); 02527 struct gdbarch *gdbarch = get_frame_arch (frame); 02528 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 02529 ULONGEST dma_info_type; 02530 ULONGEST dma_info_mask; 02531 ULONGEST dma_info_status; 02532 struct cleanup *chain; 02533 char annex[32]; 02534 gdb_byte buf[1024]; 02535 LONGEST len; 02536 int id; 02537 02538 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu) 02539 error (_("\"info spu\" is only supported on the SPU architecture.")); 02540 02541 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM); 02542 02543 xsnprintf (annex, sizeof annex, "%d/proxydma_info", id); 02544 len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, 02545 buf, 0, 24 + 8 * 32); 02546 if (len <= 0) 02547 error (_("Could not read proxydma_info.")); 02548 02549 dma_info_type = extract_unsigned_integer (buf, 8, byte_order); 02550 dma_info_mask = extract_unsigned_integer (buf + 8, 8, byte_order); 02551 dma_info_status = extract_unsigned_integer (buf + 16, 8, byte_order); 02552 02553 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, 02554 "SPUInfoProxyDMA"); 02555 02556 if (ui_out_is_mi_like_p (current_uiout)) 02557 { 02558 ui_out_field_fmt (current_uiout, "proxydma_info_type", "0x%s", 02559 phex_nz (dma_info_type, 4)); 02560 ui_out_field_fmt (current_uiout, "proxydma_info_mask", "0x%s", 02561 phex_nz (dma_info_mask, 4)); 02562 ui_out_field_fmt (current_uiout, "proxydma_info_status", "0x%s", 02563 phex_nz (dma_info_status, 4)); 02564 } 02565 else 02566 { 02567 const char *query_msg; 02568 02569 switch (dma_info_type & 3) 02570 { 02571 case 0: query_msg = _("no query pending"); break; 02572 case 1: query_msg = _("'any' query pending"); break; 02573 case 2: query_msg = _("'all' query pending"); break; 02574 default: query_msg = _("undefined query type"); break; 02575 } 02576 02577 printf_filtered (_("Tag-Group Status 0x%s\n"), 02578 phex (dma_info_status, 4)); 02579 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"), 02580 phex (dma_info_mask, 4), query_msg); 02581 printf_filtered ("\n"); 02582 } 02583 02584 info_spu_dma_cmdlist (buf + 24, 8, byte_order); 02585 do_cleanups (chain); 02586 } 02587 02588 static void 02589 info_spu_command (char *args, int from_tty) 02590 { 02591 printf_unfiltered (_("\"info spu\" must be followed by " 02592 "the name of an SPU facility.\n")); 02593 help_list (infospucmdlist, "info spu ", -1, gdb_stdout); 02594 } 02595 02596 02597 /* Root of all "set spu "/"show spu " commands. */ 02598 02599 static void 02600 show_spu_command (char *args, int from_tty) 02601 { 02602 help_list (showspucmdlist, "show spu ", all_commands, gdb_stdout); 02603 } 02604 02605 static void 02606 set_spu_command (char *args, int from_tty) 02607 { 02608 help_list (setspucmdlist, "set spu ", all_commands, gdb_stdout); 02609 } 02610 02611 static void 02612 show_spu_stop_on_load (struct ui_file *file, int from_tty, 02613 struct cmd_list_element *c, const char *value) 02614 { 02615 fprintf_filtered (file, _("Stopping for new SPE threads is %s.\n"), 02616 value); 02617 } 02618 02619 static void 02620 show_spu_auto_flush_cache (struct ui_file *file, int from_tty, 02621 struct cmd_list_element *c, const char *value) 02622 { 02623 fprintf_filtered (file, _("Automatic software-cache flush is %s.\n"), 02624 value); 02625 } 02626 02627 02628 /* Set up gdbarch struct. */ 02629 02630 static struct gdbarch * 02631 spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) 02632 { 02633 struct gdbarch *gdbarch; 02634 struct gdbarch_tdep *tdep; 02635 int id = -1; 02636 02637 /* Which spufs ID was requested as address space? */ 02638 if (info.tdep_info) 02639 id = *(int *)info.tdep_info; 02640 /* For objfile architectures of SPU solibs, decode the ID from the name. 02641 This assumes the filename convention employed by solib-spu.c. */ 02642 else if (info.abfd) 02643 { 02644 char *name = strrchr (info.abfd->filename, '@'); 02645 if (name) 02646 sscanf (name, "@0x%*x <%d>", &id); 02647 } 02648 02649 /* Find a candidate among extant architectures. */ 02650 for (arches = gdbarch_list_lookup_by_info (arches, &info); 02651 arches != NULL; 02652 arches = gdbarch_list_lookup_by_info (arches->next, &info)) 02653 { 02654 tdep = gdbarch_tdep (arches->gdbarch); 02655 if (tdep && tdep->id == id) 02656 return arches->gdbarch; 02657 } 02658 02659 /* None found, so create a new architecture. */ 02660 tdep = XCALLOC (1, struct gdbarch_tdep); 02661 tdep->id = id; 02662 gdbarch = gdbarch_alloc (&info, tdep); 02663 02664 /* Disassembler. */ 02665 set_gdbarch_print_insn (gdbarch, gdb_print_insn_spu); 02666 02667 /* Registers. */ 02668 set_gdbarch_num_regs (gdbarch, SPU_NUM_REGS); 02669 set_gdbarch_num_pseudo_regs (gdbarch, SPU_NUM_PSEUDO_REGS); 02670 set_gdbarch_sp_regnum (gdbarch, SPU_SP_REGNUM); 02671 set_gdbarch_pc_regnum (gdbarch, SPU_PC_REGNUM); 02672 set_gdbarch_read_pc (gdbarch, spu_read_pc); 02673 set_gdbarch_write_pc (gdbarch, spu_write_pc); 02674 set_gdbarch_register_name (gdbarch, spu_register_name); 02675 set_gdbarch_register_type (gdbarch, spu_register_type); 02676 set_gdbarch_pseudo_register_read (gdbarch, spu_pseudo_register_read); 02677 set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write); 02678 set_gdbarch_value_from_register (gdbarch, spu_value_from_register); 02679 set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p); 02680 02681 /* Data types. */ 02682 set_gdbarch_char_signed (gdbarch, 0); 02683 set_gdbarch_ptr_bit (gdbarch, 32); 02684 set_gdbarch_addr_bit (gdbarch, 32); 02685 set_gdbarch_short_bit (gdbarch, 16); 02686 set_gdbarch_int_bit (gdbarch, 32); 02687 set_gdbarch_long_bit (gdbarch, 32); 02688 set_gdbarch_long_long_bit (gdbarch, 64); 02689 set_gdbarch_float_bit (gdbarch, 32); 02690 set_gdbarch_double_bit (gdbarch, 64); 02691 set_gdbarch_long_double_bit (gdbarch, 64); 02692 set_gdbarch_float_format (gdbarch, floatformats_ieee_single); 02693 set_gdbarch_double_format (gdbarch, floatformats_ieee_double); 02694 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double); 02695 02696 /* Address handling. */ 02697 set_gdbarch_address_to_pointer (gdbarch, spu_address_to_pointer); 02698 set_gdbarch_pointer_to_address (gdbarch, spu_pointer_to_address); 02699 set_gdbarch_integer_to_address (gdbarch, spu_integer_to_address); 02700 set_gdbarch_address_class_type_flags (gdbarch, spu_address_class_type_flags); 02701 set_gdbarch_address_class_type_flags_to_name 02702 (gdbarch, spu_address_class_type_flags_to_name); 02703 set_gdbarch_address_class_name_to_type_flags 02704 (gdbarch, spu_address_class_name_to_type_flags); 02705 02706 02707 /* Inferior function calls. */ 02708 set_gdbarch_call_dummy_location (gdbarch, ON_STACK); 02709 set_gdbarch_frame_align (gdbarch, spu_frame_align); 02710 set_gdbarch_frame_red_zone_size (gdbarch, 2000); 02711 set_gdbarch_push_dummy_code (gdbarch, spu_push_dummy_code); 02712 set_gdbarch_push_dummy_call (gdbarch, spu_push_dummy_call); 02713 set_gdbarch_dummy_id (gdbarch, spu_dummy_id); 02714 set_gdbarch_return_value (gdbarch, spu_return_value); 02715 02716 /* Frame handling. */ 02717 set_gdbarch_inner_than (gdbarch, core_addr_lessthan); 02718 frame_unwind_append_unwinder (gdbarch, &spu_frame_unwind); 02719 frame_base_set_default (gdbarch, &spu_frame_base); 02720 set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc); 02721 set_gdbarch_unwind_sp (gdbarch, spu_unwind_sp); 02722 set_gdbarch_virtual_frame_pointer (gdbarch, spu_virtual_frame_pointer); 02723 set_gdbarch_frame_args_skip (gdbarch, 0); 02724 set_gdbarch_skip_prologue (gdbarch, spu_skip_prologue); 02725 set_gdbarch_in_function_epilogue_p (gdbarch, spu_in_function_epilogue_p); 02726 02727 /* Cell/B.E. cross-architecture unwinder support. */ 02728 frame_unwind_prepend_unwinder (gdbarch, &spu2ppu_unwind); 02729 02730 /* Breakpoints. */ 02731 set_gdbarch_decr_pc_after_break (gdbarch, 4); 02732 set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc); 02733 set_gdbarch_memory_remove_breakpoint (gdbarch, spu_memory_remove_breakpoint); 02734 set_gdbarch_cannot_step_breakpoint (gdbarch, 1); 02735 set_gdbarch_software_single_step (gdbarch, spu_software_single_step); 02736 set_gdbarch_get_longjmp_target (gdbarch, spu_get_longjmp_target); 02737 02738 /* Overlays. */ 02739 set_gdbarch_overlay_update (gdbarch, spu_overlay_update); 02740 02741 return gdbarch; 02742 } 02743 02744 /* Provide a prototype to silence -Wmissing-prototypes. */ 02745 extern initialize_file_ftype _initialize_spu_tdep; 02746 02747 void 02748 _initialize_spu_tdep (void) 02749 { 02750 register_gdbarch_init (bfd_arch_spu, spu_gdbarch_init); 02751 02752 /* Add ourselves to objfile event chain. */ 02753 observer_attach_new_objfile (spu_overlay_new_objfile); 02754 spu_overlay_data = register_objfile_data (); 02755 02756 /* Install spu stop-on-load handler. */ 02757 observer_attach_new_objfile (spu_catch_start); 02758 02759 /* Add ourselves to normal_stop event chain. */ 02760 observer_attach_normal_stop (spu_attach_normal_stop); 02761 02762 /* Add root prefix command for all "set spu"/"show spu" commands. */ 02763 add_prefix_cmd ("spu", no_class, set_spu_command, 02764 _("Various SPU specific commands."), 02765 &setspucmdlist, "set spu ", 0, &setlist); 02766 add_prefix_cmd ("spu", no_class, show_spu_command, 02767 _("Various SPU specific commands."), 02768 &showspucmdlist, "show spu ", 0, &showlist); 02769 02770 /* Toggle whether or not to add a temporary breakpoint at the "main" 02771 function of new SPE contexts. */ 02772 add_setshow_boolean_cmd ("stop-on-load", class_support, 02773 &spu_stop_on_load_p, _("\ 02774 Set whether to stop for new SPE threads."), 02775 _("\ 02776 Show whether to stop for new SPE threads."), 02777 _("\ 02778 Use \"on\" to give control to the user when a new SPE thread\n\ 02779 enters its \"main\" function.\n\ 02780 Use \"off\" to disable stopping for new SPE threads."), 02781 NULL, 02782 show_spu_stop_on_load, 02783 &setspucmdlist, &showspucmdlist); 02784 02785 /* Toggle whether or not to automatically flush the software-managed 02786 cache whenever SPE execution stops. */ 02787 add_setshow_boolean_cmd ("auto-flush-cache", class_support, 02788 &spu_auto_flush_cache_p, _("\ 02789 Set whether to automatically flush the software-managed cache."), 02790 _("\ 02791 Show whether to automatically flush the software-managed cache."), 02792 _("\ 02793 Use \"on\" to automatically flush the software-managed cache\n\ 02794 whenever SPE execution stops.\n\ 02795 Use \"off\" to never automatically flush the software-managed cache."), 02796 NULL, 02797 show_spu_auto_flush_cache, 02798 &setspucmdlist, &showspucmdlist); 02799 02800 /* Add root prefix command for all "info spu" commands. */ 02801 add_prefix_cmd ("spu", class_info, info_spu_command, 02802 _("Various SPU specific commands."), 02803 &infospucmdlist, "info spu ", 0, &infolist); 02804 02805 /* Add various "info spu" commands. */ 02806 add_cmd ("event", class_info, info_spu_event_command, 02807 _("Display SPU event facility status.\n"), 02808 &infospucmdlist); 02809 add_cmd ("signal", class_info, info_spu_signal_command, 02810 _("Display SPU signal notification facility status.\n"), 02811 &infospucmdlist); 02812 add_cmd ("mailbox", class_info, info_spu_mailbox_command, 02813 _("Display SPU mailbox facility status.\n"), 02814 &infospucmdlist); 02815 add_cmd ("dma", class_info, info_spu_dma_command, 02816 _("Display MFC DMA status.\n"), 02817 &infospucmdlist); 02818 add_cmd ("proxydma", class_info, info_spu_proxydma_command, 02819 _("Display MFC Proxy-DMA status.\n"), 02820 &infospucmdlist); 02821 }