GDB (API)
|
00001 /* DWARF 2 Expression Evaluator. 00002 00003 Copyright (C) 2001-2013 Free Software Foundation, Inc. 00004 00005 Contributed by Daniel Berlin (dan@dberlin.org) 00006 00007 This file is part of GDB. 00008 00009 This program is free software; you can redistribute it and/or modify 00010 it under the terms of the GNU General Public License as published by 00011 the Free Software Foundation; either version 3 of the License, or 00012 (at your option) any later version. 00013 00014 This program is distributed in the hope that it will be useful, 00015 but WITHOUT ANY WARRANTY; without even the implied warranty of 00016 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00017 GNU General Public License for more details. 00018 00019 You should have received a copy of the GNU General Public License 00020 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 00021 00022 #include "defs.h" 00023 #include "symtab.h" 00024 #include "gdbtypes.h" 00025 #include "value.h" 00026 #include "gdbcore.h" 00027 #include "dwarf2.h" 00028 #include "dwarf2expr.h" 00029 #include "gdb_assert.h" 00030 00031 /* Local prototypes. */ 00032 00033 static void execute_stack_op (struct dwarf_expr_context *, 00034 const gdb_byte *, const gdb_byte *); 00035 00036 /* Cookie for gdbarch data. */ 00037 00038 static struct gdbarch_data *dwarf_arch_cookie; 00039 00040 /* This holds gdbarch-specific types used by the DWARF expression 00041 evaluator. See comments in execute_stack_op. */ 00042 00043 struct dwarf_gdbarch_types 00044 { 00045 struct type *dw_types[3]; 00046 }; 00047 00048 /* Allocate and fill in dwarf_gdbarch_types for an arch. */ 00049 00050 static void * 00051 dwarf_gdbarch_types_init (struct gdbarch *gdbarch) 00052 { 00053 struct dwarf_gdbarch_types *types 00054 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types); 00055 00056 /* The types themselves are lazily initialized. */ 00057 00058 return types; 00059 } 00060 00061 /* Return the type used for DWARF operations where the type is 00062 unspecified in the DWARF spec. Only certain sizes are 00063 supported. */ 00064 00065 static struct type * 00066 dwarf_expr_address_type (struct dwarf_expr_context *ctx) 00067 { 00068 struct dwarf_gdbarch_types *types = gdbarch_data (ctx->gdbarch, 00069 dwarf_arch_cookie); 00070 int ndx; 00071 00072 if (ctx->addr_size == 2) 00073 ndx = 0; 00074 else if (ctx->addr_size == 4) 00075 ndx = 1; 00076 else if (ctx->addr_size == 8) 00077 ndx = 2; 00078 else 00079 error (_("Unsupported address size in DWARF expressions: %d bits"), 00080 8 * ctx->addr_size); 00081 00082 if (types->dw_types[ndx] == NULL) 00083 types->dw_types[ndx] 00084 = arch_integer_type (ctx->gdbarch, 00085 8 * ctx->addr_size, 00086 0, "<signed DWARF address type>"); 00087 00088 return types->dw_types[ndx]; 00089 } 00090 00091 /* Create a new context for the expression evaluator. */ 00092 00093 struct dwarf_expr_context * 00094 new_dwarf_expr_context (void) 00095 { 00096 struct dwarf_expr_context *retval; 00097 00098 retval = xcalloc (1, sizeof (struct dwarf_expr_context)); 00099 retval->stack_len = 0; 00100 retval->stack_allocated = 10; 00101 retval->stack = xmalloc (retval->stack_allocated 00102 * sizeof (struct dwarf_stack_value)); 00103 retval->num_pieces = 0; 00104 retval->pieces = 0; 00105 retval->max_recursion_depth = 0x100; 00106 return retval; 00107 } 00108 00109 /* Release the memory allocated to CTX. */ 00110 00111 void 00112 free_dwarf_expr_context (struct dwarf_expr_context *ctx) 00113 { 00114 xfree (ctx->stack); 00115 xfree (ctx->pieces); 00116 xfree (ctx); 00117 } 00118 00119 /* Helper for make_cleanup_free_dwarf_expr_context. */ 00120 00121 static void 00122 free_dwarf_expr_context_cleanup (void *arg) 00123 { 00124 free_dwarf_expr_context (arg); 00125 } 00126 00127 /* Return a cleanup that calls free_dwarf_expr_context. */ 00128 00129 struct cleanup * 00130 make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx) 00131 { 00132 return make_cleanup (free_dwarf_expr_context_cleanup, ctx); 00133 } 00134 00135 /* Expand the memory allocated to CTX's stack to contain at least 00136 NEED more elements than are currently used. */ 00137 00138 static void 00139 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need) 00140 { 00141 if (ctx->stack_len + need > ctx->stack_allocated) 00142 { 00143 size_t newlen = ctx->stack_len + need + 10; 00144 00145 ctx->stack = xrealloc (ctx->stack, 00146 newlen * sizeof (struct dwarf_stack_value)); 00147 ctx->stack_allocated = newlen; 00148 } 00149 } 00150 00151 /* Push VALUE onto CTX's stack. */ 00152 00153 static void 00154 dwarf_expr_push (struct dwarf_expr_context *ctx, struct value *value, 00155 int in_stack_memory) 00156 { 00157 struct dwarf_stack_value *v; 00158 00159 dwarf_expr_grow_stack (ctx, 1); 00160 v = &ctx->stack[ctx->stack_len++]; 00161 v->value = value; 00162 v->in_stack_memory = in_stack_memory; 00163 } 00164 00165 /* Push VALUE onto CTX's stack. */ 00166 00167 void 00168 dwarf_expr_push_address (struct dwarf_expr_context *ctx, CORE_ADDR value, 00169 int in_stack_memory) 00170 { 00171 dwarf_expr_push (ctx, 00172 value_from_ulongest (dwarf_expr_address_type (ctx), value), 00173 in_stack_memory); 00174 } 00175 00176 /* Pop the top item off of CTX's stack. */ 00177 00178 static void 00179 dwarf_expr_pop (struct dwarf_expr_context *ctx) 00180 { 00181 if (ctx->stack_len <= 0) 00182 error (_("dwarf expression stack underflow")); 00183 ctx->stack_len--; 00184 } 00185 00186 /* Retrieve the N'th item on CTX's stack. */ 00187 00188 struct value * 00189 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n) 00190 { 00191 if (ctx->stack_len <= n) 00192 error (_("Asked for position %d of stack, " 00193 "stack only has %d elements on it."), 00194 n, ctx->stack_len); 00195 return ctx->stack[ctx->stack_len - (1 + n)].value; 00196 } 00197 00198 /* Require that TYPE be an integral type; throw an exception if not. */ 00199 00200 static void 00201 dwarf_require_integral (struct type *type) 00202 { 00203 if (TYPE_CODE (type) != TYPE_CODE_INT 00204 && TYPE_CODE (type) != TYPE_CODE_CHAR 00205 && TYPE_CODE (type) != TYPE_CODE_BOOL) 00206 error (_("integral type expected in DWARF expression")); 00207 } 00208 00209 /* Return the unsigned form of TYPE. TYPE is necessarily an integral 00210 type. */ 00211 00212 static struct type * 00213 get_unsigned_type (struct gdbarch *gdbarch, struct type *type) 00214 { 00215 switch (TYPE_LENGTH (type)) 00216 { 00217 case 1: 00218 return builtin_type (gdbarch)->builtin_uint8; 00219 case 2: 00220 return builtin_type (gdbarch)->builtin_uint16; 00221 case 4: 00222 return builtin_type (gdbarch)->builtin_uint32; 00223 case 8: 00224 return builtin_type (gdbarch)->builtin_uint64; 00225 default: 00226 error (_("no unsigned variant found for type, while evaluating " 00227 "DWARF expression")); 00228 } 00229 } 00230 00231 /* Return the signed form of TYPE. TYPE is necessarily an integral 00232 type. */ 00233 00234 static struct type * 00235 get_signed_type (struct gdbarch *gdbarch, struct type *type) 00236 { 00237 switch (TYPE_LENGTH (type)) 00238 { 00239 case 1: 00240 return builtin_type (gdbarch)->builtin_int8; 00241 case 2: 00242 return builtin_type (gdbarch)->builtin_int16; 00243 case 4: 00244 return builtin_type (gdbarch)->builtin_int32; 00245 case 8: 00246 return builtin_type (gdbarch)->builtin_int64; 00247 default: 00248 error (_("no signed variant found for type, while evaluating " 00249 "DWARF expression")); 00250 } 00251 } 00252 00253 /* Retrieve the N'th item on CTX's stack, converted to an address. */ 00254 00255 CORE_ADDR 00256 dwarf_expr_fetch_address (struct dwarf_expr_context *ctx, int n) 00257 { 00258 struct value *result_val = dwarf_expr_fetch (ctx, n); 00259 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch); 00260 ULONGEST result; 00261 00262 dwarf_require_integral (value_type (result_val)); 00263 result = extract_unsigned_integer (value_contents (result_val), 00264 TYPE_LENGTH (value_type (result_val)), 00265 byte_order); 00266 00267 /* For most architectures, calling extract_unsigned_integer() alone 00268 is sufficient for extracting an address. However, some 00269 architectures (e.g. MIPS) use signed addresses and using 00270 extract_unsigned_integer() will not produce a correct 00271 result. Make sure we invoke gdbarch_integer_to_address() 00272 for those architectures which require it. */ 00273 if (gdbarch_integer_to_address_p (ctx->gdbarch)) 00274 { 00275 gdb_byte *buf = alloca (ctx->addr_size); 00276 struct type *int_type = get_unsigned_type (ctx->gdbarch, 00277 value_type (result_val)); 00278 00279 store_unsigned_integer (buf, ctx->addr_size, byte_order, result); 00280 return gdbarch_integer_to_address (ctx->gdbarch, int_type, buf); 00281 } 00282 00283 return (CORE_ADDR) result; 00284 } 00285 00286 /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */ 00287 00288 int 00289 dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n) 00290 { 00291 if (ctx->stack_len <= n) 00292 error (_("Asked for position %d of stack, " 00293 "stack only has %d elements on it."), 00294 n, ctx->stack_len); 00295 return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory; 00296 } 00297 00298 /* Return true if the expression stack is empty. */ 00299 00300 static int 00301 dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx) 00302 { 00303 return ctx->stack_len == 0; 00304 } 00305 00306 /* Add a new piece to CTX's piece list. */ 00307 static void 00308 add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset) 00309 { 00310 struct dwarf_expr_piece *p; 00311 00312 ctx->num_pieces++; 00313 00314 ctx->pieces = xrealloc (ctx->pieces, 00315 (ctx->num_pieces 00316 * sizeof (struct dwarf_expr_piece))); 00317 00318 p = &ctx->pieces[ctx->num_pieces - 1]; 00319 p->location = ctx->location; 00320 p->size = size; 00321 p->offset = offset; 00322 00323 if (p->location == DWARF_VALUE_LITERAL) 00324 { 00325 p->v.literal.data = ctx->data; 00326 p->v.literal.length = ctx->len; 00327 } 00328 else if (dwarf_expr_stack_empty_p (ctx)) 00329 { 00330 p->location = DWARF_VALUE_OPTIMIZED_OUT; 00331 /* Also reset the context's location, for our callers. This is 00332 a somewhat strange approach, but this lets us avoid setting 00333 the location to DWARF_VALUE_MEMORY in all the individual 00334 cases in the evaluator. */ 00335 ctx->location = DWARF_VALUE_OPTIMIZED_OUT; 00336 } 00337 else if (p->location == DWARF_VALUE_MEMORY) 00338 { 00339 p->v.mem.addr = dwarf_expr_fetch_address (ctx, 0); 00340 p->v.mem.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0); 00341 } 00342 else if (p->location == DWARF_VALUE_IMPLICIT_POINTER) 00343 { 00344 p->v.ptr.die.sect_off = ctx->len; 00345 p->v.ptr.offset = value_as_long (dwarf_expr_fetch (ctx, 0)); 00346 } 00347 else if (p->location == DWARF_VALUE_REGISTER) 00348 p->v.regno = value_as_long (dwarf_expr_fetch (ctx, 0)); 00349 else 00350 { 00351 p->v.value = dwarf_expr_fetch (ctx, 0); 00352 } 00353 } 00354 00355 /* Evaluate the expression at ADDR (LEN bytes long) using the context 00356 CTX. */ 00357 00358 void 00359 dwarf_expr_eval (struct dwarf_expr_context *ctx, const gdb_byte *addr, 00360 size_t len) 00361 { 00362 int old_recursion_depth = ctx->recursion_depth; 00363 00364 execute_stack_op (ctx, addr, addr + len); 00365 00366 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */ 00367 00368 gdb_assert (ctx->recursion_depth == old_recursion_depth); 00369 } 00370 00371 /* Helper to read a uleb128 value or throw an error. */ 00372 00373 const gdb_byte * 00374 safe_read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end, 00375 uint64_t *r) 00376 { 00377 buf = gdb_read_uleb128 (buf, buf_end, r); 00378 if (buf == NULL) 00379 error (_("DWARF expression error: ran off end of buffer reading uleb128 value")); 00380 return buf; 00381 } 00382 00383 /* Helper to read a sleb128 value or throw an error. */ 00384 00385 const gdb_byte * 00386 safe_read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end, 00387 int64_t *r) 00388 { 00389 buf = gdb_read_sleb128 (buf, buf_end, r); 00390 if (buf == NULL) 00391 error (_("DWARF expression error: ran off end of buffer reading sleb128 value")); 00392 return buf; 00393 } 00394 00395 const gdb_byte * 00396 safe_skip_leb128 (const gdb_byte *buf, const gdb_byte *buf_end) 00397 { 00398 buf = gdb_skip_leb128 (buf, buf_end); 00399 if (buf == NULL) 00400 error (_("DWARF expression error: ran off end of buffer reading leb128 value")); 00401 return buf; 00402 } 00403 00404 00405 /* Check that the current operator is either at the end of an 00406 expression, or that it is followed by a composition operator. */ 00407 00408 void 00409 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end, 00410 const char *op_name) 00411 { 00412 /* It seems like DW_OP_GNU_uninit should be handled here. However, 00413 it doesn't seem to make sense for DW_OP_*_value, and it was not 00414 checked at the other place that this function is called. */ 00415 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece) 00416 error (_("DWARF-2 expression error: `%s' operations must be " 00417 "used either alone or in conjunction with DW_OP_piece " 00418 "or DW_OP_bit_piece."), 00419 op_name); 00420 } 00421 00422 /* Return true iff the types T1 and T2 are "the same". This only does 00423 checks that might reasonably be needed to compare DWARF base 00424 types. */ 00425 00426 static int 00427 base_types_equal_p (struct type *t1, struct type *t2) 00428 { 00429 if (TYPE_CODE (t1) != TYPE_CODE (t2)) 00430 return 0; 00431 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2)) 00432 return 0; 00433 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2); 00434 } 00435 00436 /* A convenience function to call get_base_type on CTX and return the 00437 result. DIE is the DIE whose type we need. SIZE is non-zero if 00438 this function should verify that the resulting type has the correct 00439 size. */ 00440 00441 static struct type * 00442 dwarf_get_base_type (struct dwarf_expr_context *ctx, cu_offset die, int size) 00443 { 00444 struct type *result; 00445 00446 if (ctx->funcs->get_base_type) 00447 { 00448 result = ctx->funcs->get_base_type (ctx, die); 00449 if (result == NULL) 00450 error (_("Could not find type for DW_OP_GNU_const_type")); 00451 if (size != 0 && TYPE_LENGTH (result) != size) 00452 error (_("DW_OP_GNU_const_type has different sizes for type and data")); 00453 } 00454 else 00455 /* Anything will do. */ 00456 result = builtin_type (ctx->gdbarch)->builtin_int; 00457 00458 return result; 00459 } 00460 00461 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the 00462 DWARF register number. Otherwise return -1. */ 00463 00464 int 00465 dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end) 00466 { 00467 uint64_t dwarf_reg; 00468 00469 if (buf_end <= buf) 00470 return -1; 00471 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31) 00472 { 00473 if (buf_end - buf != 1) 00474 return -1; 00475 return *buf - DW_OP_reg0; 00476 } 00477 00478 if (*buf == DW_OP_GNU_regval_type) 00479 { 00480 buf++; 00481 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg); 00482 if (buf == NULL) 00483 return -1; 00484 buf = gdb_skip_leb128 (buf, buf_end); 00485 if (buf == NULL) 00486 return -1; 00487 } 00488 else if (*buf == DW_OP_regx) 00489 { 00490 buf++; 00491 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg); 00492 if (buf == NULL) 00493 return -1; 00494 } 00495 else 00496 return -1; 00497 if (buf != buf_end || (int) dwarf_reg != dwarf_reg) 00498 return -1; 00499 return dwarf_reg; 00500 } 00501 00502 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and 00503 DW_OP_deref* return the DWARF register number. Otherwise return -1. 00504 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the 00505 size from DW_OP_deref_size. */ 00506 00507 int 00508 dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end, 00509 CORE_ADDR *deref_size_return) 00510 { 00511 uint64_t dwarf_reg; 00512 int64_t offset; 00513 00514 if (buf_end <= buf) 00515 return -1; 00516 00517 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31) 00518 { 00519 dwarf_reg = *buf - DW_OP_breg0; 00520 buf++; 00521 if (buf >= buf_end) 00522 return -1; 00523 } 00524 else if (*buf == DW_OP_bregx) 00525 { 00526 buf++; 00527 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg); 00528 if (buf == NULL) 00529 return -1; 00530 if ((int) dwarf_reg != dwarf_reg) 00531 return -1; 00532 } 00533 else 00534 return -1; 00535 00536 buf = gdb_read_sleb128 (buf, buf_end, &offset); 00537 if (buf == NULL) 00538 return -1; 00539 if (offset != 0) 00540 return -1; 00541 00542 if (*buf == DW_OP_deref) 00543 { 00544 buf++; 00545 *deref_size_return = -1; 00546 } 00547 else if (*buf == DW_OP_deref_size) 00548 { 00549 buf++; 00550 if (buf >= buf_end) 00551 return -1; 00552 *deref_size_return = *buf++; 00553 } 00554 else 00555 return -1; 00556 00557 if (buf != buf_end) 00558 return -1; 00559 00560 return dwarf_reg; 00561 } 00562 00563 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill 00564 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */ 00565 00566 int 00567 dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end, 00568 CORE_ADDR *fb_offset_return) 00569 { 00570 int64_t fb_offset; 00571 00572 if (buf_end <= buf) 00573 return 0; 00574 00575 if (*buf != DW_OP_fbreg) 00576 return 0; 00577 buf++; 00578 00579 buf = gdb_read_sleb128 (buf, buf_end, &fb_offset); 00580 if (buf == NULL) 00581 return 0; 00582 *fb_offset_return = fb_offset; 00583 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return) 00584 return 0; 00585 00586 return 1; 00587 } 00588 00589 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill 00590 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. 00591 The matched SP register number depends on GDBARCH. */ 00592 00593 int 00594 dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf, 00595 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return) 00596 { 00597 uint64_t dwarf_reg; 00598 int64_t sp_offset; 00599 00600 if (buf_end <= buf) 00601 return 0; 00602 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31) 00603 { 00604 dwarf_reg = *buf - DW_OP_breg0; 00605 buf++; 00606 } 00607 else 00608 { 00609 if (*buf != DW_OP_bregx) 00610 return 0; 00611 buf++; 00612 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg); 00613 if (buf == NULL) 00614 return 0; 00615 } 00616 00617 if (gdbarch_dwarf2_reg_to_regnum (gdbarch, dwarf_reg) 00618 != gdbarch_sp_regnum (gdbarch)) 00619 return 0; 00620 00621 buf = gdb_read_sleb128 (buf, buf_end, &sp_offset); 00622 if (buf == NULL) 00623 return 0; 00624 *sp_offset_return = sp_offset; 00625 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return) 00626 return 0; 00627 00628 return 1; 00629 } 00630 00631 /* The engine for the expression evaluator. Using the context in CTX, 00632 evaluate the expression between OP_PTR and OP_END. */ 00633 00634 static void 00635 execute_stack_op (struct dwarf_expr_context *ctx, 00636 const gdb_byte *op_ptr, const gdb_byte *op_end) 00637 { 00638 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch); 00639 /* Old-style "untyped" DWARF values need special treatment in a 00640 couple of places, specifically DW_OP_mod and DW_OP_shr. We need 00641 a special type for these values so we can distinguish them from 00642 values that have an explicit type, because explicitly-typed 00643 values do not need special treatment. This special type must be 00644 different (in the `==' sense) from any base type coming from the 00645 CU. */ 00646 struct type *address_type = dwarf_expr_address_type (ctx); 00647 00648 ctx->location = DWARF_VALUE_MEMORY; 00649 ctx->initialized = 1; /* Default is initialized. */ 00650 00651 if (ctx->recursion_depth > ctx->max_recursion_depth) 00652 error (_("DWARF-2 expression error: Loop detected (%d)."), 00653 ctx->recursion_depth); 00654 ctx->recursion_depth++; 00655 00656 while (op_ptr < op_end) 00657 { 00658 enum dwarf_location_atom op = *op_ptr++; 00659 ULONGEST result; 00660 /* Assume the value is not in stack memory. 00661 Code that knows otherwise sets this to 1. 00662 Some arithmetic on stack addresses can probably be assumed to still 00663 be a stack address, but we skip this complication for now. 00664 This is just an optimization, so it's always ok to punt 00665 and leave this as 0. */ 00666 int in_stack_memory = 0; 00667 uint64_t uoffset, reg; 00668 int64_t offset; 00669 struct value *result_val = NULL; 00670 00671 /* The DWARF expression might have a bug causing an infinite 00672 loop. In that case, quitting is the only way out. */ 00673 QUIT; 00674 00675 switch (op) 00676 { 00677 case DW_OP_lit0: 00678 case DW_OP_lit1: 00679 case DW_OP_lit2: 00680 case DW_OP_lit3: 00681 case DW_OP_lit4: 00682 case DW_OP_lit5: 00683 case DW_OP_lit6: 00684 case DW_OP_lit7: 00685 case DW_OP_lit8: 00686 case DW_OP_lit9: 00687 case DW_OP_lit10: 00688 case DW_OP_lit11: 00689 case DW_OP_lit12: 00690 case DW_OP_lit13: 00691 case DW_OP_lit14: 00692 case DW_OP_lit15: 00693 case DW_OP_lit16: 00694 case DW_OP_lit17: 00695 case DW_OP_lit18: 00696 case DW_OP_lit19: 00697 case DW_OP_lit20: 00698 case DW_OP_lit21: 00699 case DW_OP_lit22: 00700 case DW_OP_lit23: 00701 case DW_OP_lit24: 00702 case DW_OP_lit25: 00703 case DW_OP_lit26: 00704 case DW_OP_lit27: 00705 case DW_OP_lit28: 00706 case DW_OP_lit29: 00707 case DW_OP_lit30: 00708 case DW_OP_lit31: 00709 result = op - DW_OP_lit0; 00710 result_val = value_from_ulongest (address_type, result); 00711 break; 00712 00713 case DW_OP_addr: 00714 result = extract_unsigned_integer (op_ptr, 00715 ctx->addr_size, byte_order); 00716 op_ptr += ctx->addr_size; 00717 /* Some versions of GCC emit DW_OP_addr before 00718 DW_OP_GNU_push_tls_address. In this case the value is an 00719 index, not an address. We don't support things like 00720 branching between the address and the TLS op. */ 00721 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address) 00722 result += ctx->offset; 00723 result_val = value_from_ulongest (address_type, result); 00724 break; 00725 00726 case DW_OP_GNU_addr_index: 00727 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset); 00728 result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset); 00729 result += ctx->offset; 00730 result_val = value_from_ulongest (address_type, result); 00731 break; 00732 case DW_OP_GNU_const_index: 00733 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset); 00734 result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset); 00735 result_val = value_from_ulongest (address_type, result); 00736 break; 00737 00738 case DW_OP_const1u: 00739 result = extract_unsigned_integer (op_ptr, 1, byte_order); 00740 result_val = value_from_ulongest (address_type, result); 00741 op_ptr += 1; 00742 break; 00743 case DW_OP_const1s: 00744 result = extract_signed_integer (op_ptr, 1, byte_order); 00745 result_val = value_from_ulongest (address_type, result); 00746 op_ptr += 1; 00747 break; 00748 case DW_OP_const2u: 00749 result = extract_unsigned_integer (op_ptr, 2, byte_order); 00750 result_val = value_from_ulongest (address_type, result); 00751 op_ptr += 2; 00752 break; 00753 case DW_OP_const2s: 00754 result = extract_signed_integer (op_ptr, 2, byte_order); 00755 result_val = value_from_ulongest (address_type, result); 00756 op_ptr += 2; 00757 break; 00758 case DW_OP_const4u: 00759 result = extract_unsigned_integer (op_ptr, 4, byte_order); 00760 result_val = value_from_ulongest (address_type, result); 00761 op_ptr += 4; 00762 break; 00763 case DW_OP_const4s: 00764 result = extract_signed_integer (op_ptr, 4, byte_order); 00765 result_val = value_from_ulongest (address_type, result); 00766 op_ptr += 4; 00767 break; 00768 case DW_OP_const8u: 00769 result = extract_unsigned_integer (op_ptr, 8, byte_order); 00770 result_val = value_from_ulongest (address_type, result); 00771 op_ptr += 8; 00772 break; 00773 case DW_OP_const8s: 00774 result = extract_signed_integer (op_ptr, 8, byte_order); 00775 result_val = value_from_ulongest (address_type, result); 00776 op_ptr += 8; 00777 break; 00778 case DW_OP_constu: 00779 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset); 00780 result = uoffset; 00781 result_val = value_from_ulongest (address_type, result); 00782 break; 00783 case DW_OP_consts: 00784 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset); 00785 result = offset; 00786 result_val = value_from_ulongest (address_type, result); 00787 break; 00788 00789 /* The DW_OP_reg operations are required to occur alone in 00790 location expressions. */ 00791 case DW_OP_reg0: 00792 case DW_OP_reg1: 00793 case DW_OP_reg2: 00794 case DW_OP_reg3: 00795 case DW_OP_reg4: 00796 case DW_OP_reg5: 00797 case DW_OP_reg6: 00798 case DW_OP_reg7: 00799 case DW_OP_reg8: 00800 case DW_OP_reg9: 00801 case DW_OP_reg10: 00802 case DW_OP_reg11: 00803 case DW_OP_reg12: 00804 case DW_OP_reg13: 00805 case DW_OP_reg14: 00806 case DW_OP_reg15: 00807 case DW_OP_reg16: 00808 case DW_OP_reg17: 00809 case DW_OP_reg18: 00810 case DW_OP_reg19: 00811 case DW_OP_reg20: 00812 case DW_OP_reg21: 00813 case DW_OP_reg22: 00814 case DW_OP_reg23: 00815 case DW_OP_reg24: 00816 case DW_OP_reg25: 00817 case DW_OP_reg26: 00818 case DW_OP_reg27: 00819 case DW_OP_reg28: 00820 case DW_OP_reg29: 00821 case DW_OP_reg30: 00822 case DW_OP_reg31: 00823 if (op_ptr != op_end 00824 && *op_ptr != DW_OP_piece 00825 && *op_ptr != DW_OP_bit_piece 00826 && *op_ptr != DW_OP_GNU_uninit) 00827 error (_("DWARF-2 expression error: DW_OP_reg operations must be " 00828 "used either alone or in conjunction with DW_OP_piece " 00829 "or DW_OP_bit_piece.")); 00830 00831 result = op - DW_OP_reg0; 00832 result_val = value_from_ulongest (address_type, result); 00833 ctx->location = DWARF_VALUE_REGISTER; 00834 break; 00835 00836 case DW_OP_regx: 00837 op_ptr = safe_read_uleb128 (op_ptr, op_end, ®); 00838 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx"); 00839 00840 result = reg; 00841 result_val = value_from_ulongest (address_type, result); 00842 ctx->location = DWARF_VALUE_REGISTER; 00843 break; 00844 00845 case DW_OP_implicit_value: 00846 { 00847 uint64_t len; 00848 00849 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len); 00850 if (op_ptr + len > op_end) 00851 error (_("DW_OP_implicit_value: too few bytes available.")); 00852 ctx->len = len; 00853 ctx->data = op_ptr; 00854 ctx->location = DWARF_VALUE_LITERAL; 00855 op_ptr += len; 00856 dwarf_expr_require_composition (op_ptr, op_end, 00857 "DW_OP_implicit_value"); 00858 } 00859 goto no_push; 00860 00861 case DW_OP_stack_value: 00862 ctx->location = DWARF_VALUE_STACK; 00863 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value"); 00864 goto no_push; 00865 00866 case DW_OP_GNU_implicit_pointer: 00867 { 00868 int64_t len; 00869 00870 if (ctx->ref_addr_size == -1) 00871 error (_("DWARF-2 expression error: DW_OP_GNU_implicit_pointer " 00872 "is not allowed in frame context")); 00873 00874 /* The referred-to DIE of sect_offset kind. */ 00875 ctx->len = extract_unsigned_integer (op_ptr, ctx->ref_addr_size, 00876 byte_order); 00877 op_ptr += ctx->ref_addr_size; 00878 00879 /* The byte offset into the data. */ 00880 op_ptr = safe_read_sleb128 (op_ptr, op_end, &len); 00881 result = (ULONGEST) len; 00882 result_val = value_from_ulongest (address_type, result); 00883 00884 ctx->location = DWARF_VALUE_IMPLICIT_POINTER; 00885 dwarf_expr_require_composition (op_ptr, op_end, 00886 "DW_OP_GNU_implicit_pointer"); 00887 } 00888 break; 00889 00890 case DW_OP_breg0: 00891 case DW_OP_breg1: 00892 case DW_OP_breg2: 00893 case DW_OP_breg3: 00894 case DW_OP_breg4: 00895 case DW_OP_breg5: 00896 case DW_OP_breg6: 00897 case DW_OP_breg7: 00898 case DW_OP_breg8: 00899 case DW_OP_breg9: 00900 case DW_OP_breg10: 00901 case DW_OP_breg11: 00902 case DW_OP_breg12: 00903 case DW_OP_breg13: 00904 case DW_OP_breg14: 00905 case DW_OP_breg15: 00906 case DW_OP_breg16: 00907 case DW_OP_breg17: 00908 case DW_OP_breg18: 00909 case DW_OP_breg19: 00910 case DW_OP_breg20: 00911 case DW_OP_breg21: 00912 case DW_OP_breg22: 00913 case DW_OP_breg23: 00914 case DW_OP_breg24: 00915 case DW_OP_breg25: 00916 case DW_OP_breg26: 00917 case DW_OP_breg27: 00918 case DW_OP_breg28: 00919 case DW_OP_breg29: 00920 case DW_OP_breg30: 00921 case DW_OP_breg31: 00922 { 00923 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset); 00924 result = (ctx->funcs->read_reg) (ctx->baton, op - DW_OP_breg0); 00925 result += offset; 00926 result_val = value_from_ulongest (address_type, result); 00927 } 00928 break; 00929 case DW_OP_bregx: 00930 { 00931 op_ptr = safe_read_uleb128 (op_ptr, op_end, ®); 00932 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset); 00933 result = (ctx->funcs->read_reg) (ctx->baton, reg); 00934 result += offset; 00935 result_val = value_from_ulongest (address_type, result); 00936 } 00937 break; 00938 case DW_OP_fbreg: 00939 { 00940 const gdb_byte *datastart; 00941 size_t datalen; 00942 unsigned int before_stack_len; 00943 00944 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset); 00945 /* Rather than create a whole new context, we simply 00946 record the stack length before execution, then reset it 00947 afterwards, effectively erasing whatever the recursive 00948 call put there. */ 00949 before_stack_len = ctx->stack_len; 00950 /* FIXME: cagney/2003-03-26: This code should be using 00951 get_frame_base_address(), and then implement a dwarf2 00952 specific this_base method. */ 00953 (ctx->funcs->get_frame_base) (ctx->baton, &datastart, &datalen); 00954 dwarf_expr_eval (ctx, datastart, datalen); 00955 if (ctx->location == DWARF_VALUE_MEMORY) 00956 result = dwarf_expr_fetch_address (ctx, 0); 00957 else if (ctx->location == DWARF_VALUE_REGISTER) 00958 result = (ctx->funcs->read_reg) (ctx->baton, 00959 value_as_long (dwarf_expr_fetch (ctx, 0))); 00960 else 00961 error (_("Not implemented: computing frame " 00962 "base using explicit value operator")); 00963 result = result + offset; 00964 result_val = value_from_ulongest (address_type, result); 00965 in_stack_memory = 1; 00966 ctx->stack_len = before_stack_len; 00967 ctx->location = DWARF_VALUE_MEMORY; 00968 } 00969 break; 00970 00971 case DW_OP_dup: 00972 result_val = dwarf_expr_fetch (ctx, 0); 00973 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0); 00974 break; 00975 00976 case DW_OP_drop: 00977 dwarf_expr_pop (ctx); 00978 goto no_push; 00979 00980 case DW_OP_pick: 00981 offset = *op_ptr++; 00982 result_val = dwarf_expr_fetch (ctx, offset); 00983 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset); 00984 break; 00985 00986 case DW_OP_swap: 00987 { 00988 struct dwarf_stack_value t1, t2; 00989 00990 if (ctx->stack_len < 2) 00991 error (_("Not enough elements for " 00992 "DW_OP_swap. Need 2, have %d."), 00993 ctx->stack_len); 00994 t1 = ctx->stack[ctx->stack_len - 1]; 00995 t2 = ctx->stack[ctx->stack_len - 2]; 00996 ctx->stack[ctx->stack_len - 1] = t2; 00997 ctx->stack[ctx->stack_len - 2] = t1; 00998 goto no_push; 00999 } 01000 01001 case DW_OP_over: 01002 result_val = dwarf_expr_fetch (ctx, 1); 01003 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1); 01004 break; 01005 01006 case DW_OP_rot: 01007 { 01008 struct dwarf_stack_value t1, t2, t3; 01009 01010 if (ctx->stack_len < 3) 01011 error (_("Not enough elements for " 01012 "DW_OP_rot. Need 3, have %d."), 01013 ctx->stack_len); 01014 t1 = ctx->stack[ctx->stack_len - 1]; 01015 t2 = ctx->stack[ctx->stack_len - 2]; 01016 t3 = ctx->stack[ctx->stack_len - 3]; 01017 ctx->stack[ctx->stack_len - 1] = t2; 01018 ctx->stack[ctx->stack_len - 2] = t3; 01019 ctx->stack[ctx->stack_len - 3] = t1; 01020 goto no_push; 01021 } 01022 01023 case DW_OP_deref: 01024 case DW_OP_deref_size: 01025 case DW_OP_GNU_deref_type: 01026 { 01027 int addr_size = (op == DW_OP_deref ? ctx->addr_size : *op_ptr++); 01028 gdb_byte *buf = alloca (addr_size); 01029 CORE_ADDR addr = dwarf_expr_fetch_address (ctx, 0); 01030 struct type *type; 01031 01032 dwarf_expr_pop (ctx); 01033 01034 if (op == DW_OP_GNU_deref_type) 01035 { 01036 cu_offset type_die; 01037 01038 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset); 01039 type_die.cu_off = uoffset; 01040 type = dwarf_get_base_type (ctx, type_die, 0); 01041 } 01042 else 01043 type = address_type; 01044 01045 (ctx->funcs->read_mem) (ctx->baton, buf, addr, addr_size); 01046 01047 /* If the size of the object read from memory is different 01048 from the type length, we need to zero-extend it. */ 01049 if (TYPE_LENGTH (type) != addr_size) 01050 { 01051 ULONGEST result = 01052 extract_unsigned_integer (buf, addr_size, byte_order); 01053 01054 buf = alloca (TYPE_LENGTH (type)); 01055 store_unsigned_integer (buf, TYPE_LENGTH (type), 01056 byte_order, result); 01057 } 01058 01059 result_val = value_from_contents_and_address (type, buf, addr); 01060 break; 01061 } 01062 01063 case DW_OP_abs: 01064 case DW_OP_neg: 01065 case DW_OP_not: 01066 case DW_OP_plus_uconst: 01067 { 01068 /* Unary operations. */ 01069 result_val = dwarf_expr_fetch (ctx, 0); 01070 dwarf_expr_pop (ctx); 01071 01072 switch (op) 01073 { 01074 case DW_OP_abs: 01075 if (value_less (result_val, 01076 value_zero (value_type (result_val), not_lval))) 01077 result_val = value_neg (result_val); 01078 break; 01079 case DW_OP_neg: 01080 result_val = value_neg (result_val); 01081 break; 01082 case DW_OP_not: 01083 dwarf_require_integral (value_type (result_val)); 01084 result_val = value_complement (result_val); 01085 break; 01086 case DW_OP_plus_uconst: 01087 dwarf_require_integral (value_type (result_val)); 01088 result = value_as_long (result_val); 01089 op_ptr = safe_read_uleb128 (op_ptr, op_end, ®); 01090 result += reg; 01091 result_val = value_from_ulongest (address_type, result); 01092 break; 01093 } 01094 } 01095 break; 01096 01097 case DW_OP_and: 01098 case DW_OP_div: 01099 case DW_OP_minus: 01100 case DW_OP_mod: 01101 case DW_OP_mul: 01102 case DW_OP_or: 01103 case DW_OP_plus: 01104 case DW_OP_shl: 01105 case DW_OP_shr: 01106 case DW_OP_shra: 01107 case DW_OP_xor: 01108 case DW_OP_le: 01109 case DW_OP_ge: 01110 case DW_OP_eq: 01111 case DW_OP_lt: 01112 case DW_OP_gt: 01113 case DW_OP_ne: 01114 { 01115 /* Binary operations. */ 01116 struct value *first, *second; 01117 01118 second = dwarf_expr_fetch (ctx, 0); 01119 dwarf_expr_pop (ctx); 01120 01121 first = dwarf_expr_fetch (ctx, 0); 01122 dwarf_expr_pop (ctx); 01123 01124 if (! base_types_equal_p (value_type (first), value_type (second))) 01125 error (_("Incompatible types on DWARF stack")); 01126 01127 switch (op) 01128 { 01129 case DW_OP_and: 01130 dwarf_require_integral (value_type (first)); 01131 dwarf_require_integral (value_type (second)); 01132 result_val = value_binop (first, second, BINOP_BITWISE_AND); 01133 break; 01134 case DW_OP_div: 01135 result_val = value_binop (first, second, BINOP_DIV); 01136 break; 01137 case DW_OP_minus: 01138 result_val = value_binop (first, second, BINOP_SUB); 01139 break; 01140 case DW_OP_mod: 01141 { 01142 int cast_back = 0; 01143 struct type *orig_type = value_type (first); 01144 01145 /* We have to special-case "old-style" untyped values 01146 -- these must have mod computed using unsigned 01147 math. */ 01148 if (orig_type == address_type) 01149 { 01150 struct type *utype 01151 = get_unsigned_type (ctx->gdbarch, orig_type); 01152 01153 cast_back = 1; 01154 first = value_cast (utype, first); 01155 second = value_cast (utype, second); 01156 } 01157 /* Note that value_binop doesn't handle float or 01158 decimal float here. This seems unimportant. */ 01159 result_val = value_binop (first, second, BINOP_MOD); 01160 if (cast_back) 01161 result_val = value_cast (orig_type, result_val); 01162 } 01163 break; 01164 case DW_OP_mul: 01165 result_val = value_binop (first, second, BINOP_MUL); 01166 break; 01167 case DW_OP_or: 01168 dwarf_require_integral (value_type (first)); 01169 dwarf_require_integral (value_type (second)); 01170 result_val = value_binop (first, second, BINOP_BITWISE_IOR); 01171 break; 01172 case DW_OP_plus: 01173 result_val = value_binop (first, second, BINOP_ADD); 01174 break; 01175 case DW_OP_shl: 01176 dwarf_require_integral (value_type (first)); 01177 dwarf_require_integral (value_type (second)); 01178 result_val = value_binop (first, second, BINOP_LSH); 01179 break; 01180 case DW_OP_shr: 01181 dwarf_require_integral (value_type (first)); 01182 dwarf_require_integral (value_type (second)); 01183 if (!TYPE_UNSIGNED (value_type (first))) 01184 { 01185 struct type *utype 01186 = get_unsigned_type (ctx->gdbarch, value_type (first)); 01187 01188 first = value_cast (utype, first); 01189 } 01190 01191 result_val = value_binop (first, second, BINOP_RSH); 01192 /* Make sure we wind up with the same type we started 01193 with. */ 01194 if (value_type (result_val) != value_type (second)) 01195 result_val = value_cast (value_type (second), result_val); 01196 break; 01197 case DW_OP_shra: 01198 dwarf_require_integral (value_type (first)); 01199 dwarf_require_integral (value_type (second)); 01200 if (TYPE_UNSIGNED (value_type (first))) 01201 { 01202 struct type *stype 01203 = get_signed_type (ctx->gdbarch, value_type (first)); 01204 01205 first = value_cast (stype, first); 01206 } 01207 01208 result_val = value_binop (first, second, BINOP_RSH); 01209 /* Make sure we wind up with the same type we started 01210 with. */ 01211 if (value_type (result_val) != value_type (second)) 01212 result_val = value_cast (value_type (second), result_val); 01213 break; 01214 case DW_OP_xor: 01215 dwarf_require_integral (value_type (first)); 01216 dwarf_require_integral (value_type (second)); 01217 result_val = value_binop (first, second, BINOP_BITWISE_XOR); 01218 break; 01219 case DW_OP_le: 01220 /* A <= B is !(B < A). */ 01221 result = ! value_less (second, first); 01222 result_val = value_from_ulongest (address_type, result); 01223 break; 01224 case DW_OP_ge: 01225 /* A >= B is !(A < B). */ 01226 result = ! value_less (first, second); 01227 result_val = value_from_ulongest (address_type, result); 01228 break; 01229 case DW_OP_eq: 01230 result = value_equal (first, second); 01231 result_val = value_from_ulongest (address_type, result); 01232 break; 01233 case DW_OP_lt: 01234 result = value_less (first, second); 01235 result_val = value_from_ulongest (address_type, result); 01236 break; 01237 case DW_OP_gt: 01238 /* A > B is B < A. */ 01239 result = value_less (second, first); 01240 result_val = value_from_ulongest (address_type, result); 01241 break; 01242 case DW_OP_ne: 01243 result = ! value_equal (first, second); 01244 result_val = value_from_ulongest (address_type, result); 01245 break; 01246 default: 01247 internal_error (__FILE__, __LINE__, 01248 _("Can't be reached.")); 01249 } 01250 } 01251 break; 01252 01253 case DW_OP_call_frame_cfa: 01254 result = (ctx->funcs->get_frame_cfa) (ctx->baton); 01255 result_val = value_from_ulongest (address_type, result); 01256 in_stack_memory = 1; 01257 break; 01258 01259 case DW_OP_GNU_push_tls_address: 01260 /* Variable is at a constant offset in the thread-local 01261 storage block into the objfile for the current thread and 01262 the dynamic linker module containing this expression. Here 01263 we return returns the offset from that base. The top of the 01264 stack has the offset from the beginning of the thread 01265 control block at which the variable is located. Nothing 01266 should follow this operator, so the top of stack would be 01267 returned. */ 01268 result = value_as_long (dwarf_expr_fetch (ctx, 0)); 01269 dwarf_expr_pop (ctx); 01270 result = (ctx->funcs->get_tls_address) (ctx->baton, result); 01271 result_val = value_from_ulongest (address_type, result); 01272 break; 01273 01274 case DW_OP_skip: 01275 offset = extract_signed_integer (op_ptr, 2, byte_order); 01276 op_ptr += 2; 01277 op_ptr += offset; 01278 goto no_push; 01279 01280 case DW_OP_bra: 01281 { 01282 struct value *val; 01283 01284 offset = extract_signed_integer (op_ptr, 2, byte_order); 01285 op_ptr += 2; 01286 val = dwarf_expr_fetch (ctx, 0); 01287 dwarf_require_integral (value_type (val)); 01288 if (value_as_long (val) != 0) 01289 op_ptr += offset; 01290 dwarf_expr_pop (ctx); 01291 } 01292 goto no_push; 01293 01294 case DW_OP_nop: 01295 goto no_push; 01296 01297 case DW_OP_piece: 01298 { 01299 uint64_t size; 01300 01301 /* Record the piece. */ 01302 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size); 01303 add_piece (ctx, 8 * size, 0); 01304 01305 /* Pop off the address/regnum, and reset the location 01306 type. */ 01307 if (ctx->location != DWARF_VALUE_LITERAL 01308 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT) 01309 dwarf_expr_pop (ctx); 01310 ctx->location = DWARF_VALUE_MEMORY; 01311 } 01312 goto no_push; 01313 01314 case DW_OP_bit_piece: 01315 { 01316 uint64_t size, offset; 01317 01318 /* Record the piece. */ 01319 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size); 01320 op_ptr = safe_read_uleb128 (op_ptr, op_end, &offset); 01321 add_piece (ctx, size, offset); 01322 01323 /* Pop off the address/regnum, and reset the location 01324 type. */ 01325 if (ctx->location != DWARF_VALUE_LITERAL 01326 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT) 01327 dwarf_expr_pop (ctx); 01328 ctx->location = DWARF_VALUE_MEMORY; 01329 } 01330 goto no_push; 01331 01332 case DW_OP_GNU_uninit: 01333 if (op_ptr != op_end) 01334 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always " 01335 "be the very last op.")); 01336 01337 ctx->initialized = 0; 01338 goto no_push; 01339 01340 case DW_OP_call2: 01341 { 01342 cu_offset offset; 01343 01344 offset.cu_off = extract_unsigned_integer (op_ptr, 2, byte_order); 01345 op_ptr += 2; 01346 ctx->funcs->dwarf_call (ctx, offset); 01347 } 01348 goto no_push; 01349 01350 case DW_OP_call4: 01351 { 01352 cu_offset offset; 01353 01354 offset.cu_off = extract_unsigned_integer (op_ptr, 4, byte_order); 01355 op_ptr += 4; 01356 ctx->funcs->dwarf_call (ctx, offset); 01357 } 01358 goto no_push; 01359 01360 case DW_OP_GNU_entry_value: 01361 { 01362 uint64_t len; 01363 CORE_ADDR deref_size; 01364 union call_site_parameter_u kind_u; 01365 01366 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len); 01367 if (op_ptr + len > op_end) 01368 error (_("DW_OP_GNU_entry_value: too few bytes available.")); 01369 01370 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len); 01371 if (kind_u.dwarf_reg != -1) 01372 { 01373 op_ptr += len; 01374 ctx->funcs->push_dwarf_reg_entry_value (ctx, 01375 CALL_SITE_PARAMETER_DWARF_REG, 01376 kind_u, 01377 -1 /* deref_size */); 01378 goto no_push; 01379 } 01380 01381 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr, 01382 op_ptr + len, 01383 &deref_size); 01384 if (kind_u.dwarf_reg != -1) 01385 { 01386 if (deref_size == -1) 01387 deref_size = ctx->addr_size; 01388 op_ptr += len; 01389 ctx->funcs->push_dwarf_reg_entry_value (ctx, 01390 CALL_SITE_PARAMETER_DWARF_REG, 01391 kind_u, deref_size); 01392 goto no_push; 01393 } 01394 01395 error (_("DWARF-2 expression error: DW_OP_GNU_entry_value is " 01396 "supported only for single DW_OP_reg* " 01397 "or for DW_OP_breg*(0)+DW_OP_deref*")); 01398 } 01399 01400 case DW_OP_GNU_parameter_ref: 01401 { 01402 union call_site_parameter_u kind_u; 01403 01404 kind_u.param_offset.cu_off = extract_unsigned_integer (op_ptr, 4, 01405 byte_order); 01406 op_ptr += 4; 01407 ctx->funcs->push_dwarf_reg_entry_value (ctx, 01408 CALL_SITE_PARAMETER_PARAM_OFFSET, 01409 kind_u, 01410 -1 /* deref_size */); 01411 } 01412 goto no_push; 01413 01414 case DW_OP_GNU_const_type: 01415 { 01416 cu_offset type_die; 01417 int n; 01418 const gdb_byte *data; 01419 struct type *type; 01420 01421 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset); 01422 type_die.cu_off = uoffset; 01423 n = *op_ptr++; 01424 data = op_ptr; 01425 op_ptr += n; 01426 01427 type = dwarf_get_base_type (ctx, type_die, n); 01428 result_val = value_from_contents (type, data); 01429 } 01430 break; 01431 01432 case DW_OP_GNU_regval_type: 01433 { 01434 cu_offset type_die; 01435 struct type *type; 01436 01437 op_ptr = safe_read_uleb128 (op_ptr, op_end, ®); 01438 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset); 01439 type_die.cu_off = uoffset; 01440 01441 type = dwarf_get_base_type (ctx, type_die, 0); 01442 result = (ctx->funcs->read_reg) (ctx->baton, reg); 01443 result_val = value_from_ulongest (address_type, result); 01444 result_val = value_from_contents (type, 01445 value_contents_all (result_val)); 01446 } 01447 break; 01448 01449 case DW_OP_GNU_convert: 01450 case DW_OP_GNU_reinterpret: 01451 { 01452 cu_offset type_die; 01453 struct type *type; 01454 01455 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset); 01456 type_die.cu_off = uoffset; 01457 01458 if (type_die.cu_off == 0) 01459 type = address_type; 01460 else 01461 type = dwarf_get_base_type (ctx, type_die, 0); 01462 01463 result_val = dwarf_expr_fetch (ctx, 0); 01464 dwarf_expr_pop (ctx); 01465 01466 if (op == DW_OP_GNU_convert) 01467 result_val = value_cast (type, result_val); 01468 else if (type == value_type (result_val)) 01469 { 01470 /* Nothing. */ 01471 } 01472 else if (TYPE_LENGTH (type) 01473 != TYPE_LENGTH (value_type (result_val))) 01474 error (_("DW_OP_GNU_reinterpret has wrong size")); 01475 else 01476 result_val 01477 = value_from_contents (type, 01478 value_contents_all (result_val)); 01479 } 01480 break; 01481 01482 default: 01483 error (_("Unhandled dwarf expression opcode 0x%x"), op); 01484 } 01485 01486 /* Most things push a result value. */ 01487 gdb_assert (result_val != NULL); 01488 dwarf_expr_push (ctx, result_val, in_stack_memory); 01489 no_push: 01490 ; 01491 } 01492 01493 /* To simplify our main caller, if the result is an implicit 01494 pointer, then make a pieced value. This is ok because we can't 01495 have implicit pointers in contexts where pieces are invalid. */ 01496 if (ctx->location == DWARF_VALUE_IMPLICIT_POINTER) 01497 add_piece (ctx, 8 * ctx->addr_size, 0); 01498 01499 abort_expression: 01500 ctx->recursion_depth--; 01501 gdb_assert (ctx->recursion_depth >= 0); 01502 } 01503 01504 /* Stub dwarf_expr_context_funcs.get_frame_base implementation. */ 01505 01506 void 01507 ctx_no_get_frame_base (void *baton, const gdb_byte **start, size_t *length) 01508 { 01509 error (_("%s is invalid in this context"), "DW_OP_fbreg"); 01510 } 01511 01512 /* Stub dwarf_expr_context_funcs.get_frame_cfa implementation. */ 01513 01514 CORE_ADDR 01515 ctx_no_get_frame_cfa (void *baton) 01516 { 01517 error (_("%s is invalid in this context"), "DW_OP_call_frame_cfa"); 01518 } 01519 01520 /* Stub dwarf_expr_context_funcs.get_frame_pc implementation. */ 01521 01522 CORE_ADDR 01523 ctx_no_get_frame_pc (void *baton) 01524 { 01525 error (_("%s is invalid in this context"), "DW_OP_GNU_implicit_pointer"); 01526 } 01527 01528 /* Stub dwarf_expr_context_funcs.get_tls_address implementation. */ 01529 01530 CORE_ADDR 01531 ctx_no_get_tls_address (void *baton, CORE_ADDR offset) 01532 { 01533 error (_("%s is invalid in this context"), "DW_OP_GNU_push_tls_address"); 01534 } 01535 01536 /* Stub dwarf_expr_context_funcs.dwarf_call implementation. */ 01537 01538 void 01539 ctx_no_dwarf_call (struct dwarf_expr_context *ctx, cu_offset die_offset) 01540 { 01541 error (_("%s is invalid in this context"), "DW_OP_call*"); 01542 } 01543 01544 /* Stub dwarf_expr_context_funcs.get_base_type implementation. */ 01545 01546 struct type * 01547 ctx_no_get_base_type (struct dwarf_expr_context *ctx, cu_offset die) 01548 { 01549 error (_("Support for typed DWARF is not supported in this context")); 01550 } 01551 01552 /* Stub dwarf_expr_context_funcs.push_dwarf_block_entry_value 01553 implementation. */ 01554 01555 void 01556 ctx_no_push_dwarf_reg_entry_value (struct dwarf_expr_context *ctx, 01557 enum call_site_parameter_kind kind, 01558 union call_site_parameter_u kind_u, 01559 int deref_size) 01560 { 01561 internal_error (__FILE__, __LINE__, 01562 _("Support for DW_OP_GNU_entry_value is unimplemented")); 01563 } 01564 01565 /* Stub dwarf_expr_context_funcs.get_addr_index implementation. */ 01566 01567 CORE_ADDR 01568 ctx_no_get_addr_index (void *baton, unsigned int index) 01569 { 01570 error (_("%s is invalid in this context"), "DW_OP_GNU_addr_index"); 01571 } 01572 01573 /* Provide a prototype to silence -Wmissing-prototypes. */ 01574 extern initialize_file_ftype _initialize_dwarf2expr; 01575 01576 void 01577 _initialize_dwarf2expr (void) 01578 { 01579 dwarf_arch_cookie 01580 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init); 01581 }