GDB (API)
|
00001 /* GNU/Linux native-dependent code common to multiple platforms. 00002 00003 Copyright (C) 2001-2013 Free Software Foundation, Inc. 00004 00005 This file is part of GDB. 00006 00007 This program is free software; you can redistribute it and/or modify 00008 it under the terms of the GNU General Public License as published by 00009 the Free Software Foundation; either version 3 of the License, or 00010 (at your option) any later version. 00011 00012 This program is distributed in the hope that it will be useful, 00013 but WITHOUT ANY WARRANTY; without even the implied warranty of 00014 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00015 GNU General Public License for more details. 00016 00017 You should have received a copy of the GNU General Public License 00018 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 00019 00020 #include "defs.h" 00021 #include "inferior.h" 00022 #include "target.h" 00023 #include "nat/linux-nat.h" 00024 #include "nat/linux-waitpid.h" 00025 #include "gdb_string.h" 00026 #include "gdb_wait.h" 00027 #include "gdb_assert.h" 00028 #ifdef HAVE_TKILL_SYSCALL 00029 #include <unistd.h> 00030 #include <sys/syscall.h> 00031 #endif 00032 #include <sys/ptrace.h> 00033 #include "linux-nat.h" 00034 #include "linux-ptrace.h" 00035 #include "linux-procfs.h" 00036 #include "linux-fork.h" 00037 #include "gdbthread.h" 00038 #include "gdbcmd.h" 00039 #include "regcache.h" 00040 #include "regset.h" 00041 #include "inf-child.h" 00042 #include "inf-ptrace.h" 00043 #include "auxv.h" 00044 #include <sys/procfs.h> /* for elf_gregset etc. */ 00045 #include "elf-bfd.h" /* for elfcore_write_* */ 00046 #include "gregset.h" /* for gregset */ 00047 #include "gdbcore.h" /* for get_exec_file */ 00048 #include <ctype.h> /* for isdigit */ 00049 #include "gdbthread.h" /* for struct thread_info etc. */ 00050 #include "gdb_stat.h" /* for struct stat */ 00051 #include <fcntl.h> /* for O_RDONLY */ 00052 #include "inf-loop.h" 00053 #include "event-loop.h" 00054 #include "event-top.h" 00055 #include <pwd.h> 00056 #include <sys/types.h> 00057 #include "gdb_dirent.h" 00058 #include "xml-support.h" 00059 #include "terminal.h" 00060 #include <sys/vfs.h> 00061 #include "solib.h" 00062 #include "linux-osdata.h" 00063 #include "linux-tdep.h" 00064 #include "symfile.h" 00065 #include "agent.h" 00066 #include "tracepoint.h" 00067 #include "exceptions.h" 00068 #include "linux-ptrace.h" 00069 #include "buffer.h" 00070 #include "target-descriptions.h" 00071 #include "filestuff.h" 00072 00073 #ifndef SPUFS_MAGIC 00074 #define SPUFS_MAGIC 0x23c9b64e 00075 #endif 00076 00077 #ifdef HAVE_PERSONALITY 00078 # include <sys/personality.h> 00079 # if !HAVE_DECL_ADDR_NO_RANDOMIZE 00080 # define ADDR_NO_RANDOMIZE 0x0040000 00081 # endif 00082 #endif /* HAVE_PERSONALITY */ 00083 00084 /* This comment documents high-level logic of this file. 00085 00086 Waiting for events in sync mode 00087 =============================== 00088 00089 When waiting for an event in a specific thread, we just use waitpid, passing 00090 the specific pid, and not passing WNOHANG. 00091 00092 When waiting for an event in all threads, waitpid is not quite good. Prior to 00093 version 2.4, Linux can either wait for event in main thread, or in secondary 00094 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might 00095 miss an event. The solution is to use non-blocking waitpid, together with 00096 sigsuspend. First, we use non-blocking waitpid to get an event in the main 00097 process, if any. Second, we use non-blocking waitpid with the __WCLONED 00098 flag to check for events in cloned processes. If nothing is found, we use 00099 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something 00100 happened to a child process -- and SIGCHLD will be delivered both for events 00101 in main debugged process and in cloned processes. As soon as we know there's 00102 an event, we get back to calling nonblocking waitpid with and without 00103 __WCLONED. 00104 00105 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls, 00106 so that we don't miss a signal. If SIGCHLD arrives in between, when it's 00107 blocked, the signal becomes pending and sigsuspend immediately 00108 notices it and returns. 00109 00110 Waiting for events in async mode 00111 ================================ 00112 00113 In async mode, GDB should always be ready to handle both user input 00114 and target events, so neither blocking waitpid nor sigsuspend are 00115 viable options. Instead, we should asynchronously notify the GDB main 00116 event loop whenever there's an unprocessed event from the target. We 00117 detect asynchronous target events by handling SIGCHLD signals. To 00118 notify the event loop about target events, the self-pipe trick is used 00119 --- a pipe is registered as waitable event source in the event loop, 00120 the event loop select/poll's on the read end of this pipe (as well on 00121 other event sources, e.g., stdin), and the SIGCHLD handler writes a 00122 byte to this pipe. This is more portable than relying on 00123 pselect/ppoll, since on kernels that lack those syscalls, libc 00124 emulates them with select/poll+sigprocmask, and that is racy 00125 (a.k.a. plain broken). 00126 00127 Obviously, if we fail to notify the event loop if there's a target 00128 event, it's bad. OTOH, if we notify the event loop when there's no 00129 event from the target, linux_nat_wait will detect that there's no real 00130 event to report, and return event of type TARGET_WAITKIND_IGNORE. 00131 This is mostly harmless, but it will waste time and is better avoided. 00132 00133 The main design point is that every time GDB is outside linux-nat.c, 00134 we have a SIGCHLD handler installed that is called when something 00135 happens to the target and notifies the GDB event loop. Whenever GDB 00136 core decides to handle the event, and calls into linux-nat.c, we 00137 process things as in sync mode, except that the we never block in 00138 sigsuspend. 00139 00140 While processing an event, we may end up momentarily blocked in 00141 waitpid calls. Those waitpid calls, while blocking, are guarantied to 00142 return quickly. E.g., in all-stop mode, before reporting to the core 00143 that an LWP hit a breakpoint, all LWPs are stopped by sending them 00144 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported. 00145 Note that this is different from blocking indefinitely waiting for the 00146 next event --- here, we're already handling an event. 00147 00148 Use of signals 00149 ============== 00150 00151 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another 00152 signal is not entirely significant; we just need for a signal to be delivered, 00153 so that we can intercept it. SIGSTOP's advantage is that it can not be 00154 blocked. A disadvantage is that it is not a real-time signal, so it can only 00155 be queued once; we do not keep track of other sources of SIGSTOP. 00156 00157 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't 00158 use them, because they have special behavior when the signal is generated - 00159 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL 00160 kills the entire thread group. 00161 00162 A delivered SIGSTOP would stop the entire thread group, not just the thread we 00163 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and 00164 cancel it (by PTRACE_CONT without passing SIGSTOP). 00165 00166 We could use a real-time signal instead. This would solve those problems; we 00167 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB. 00168 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH 00169 generates it, and there are races with trying to find a signal that is not 00170 blocked. */ 00171 00172 #ifndef O_LARGEFILE 00173 #define O_LARGEFILE 0 00174 #endif 00175 00176 /* The single-threaded native GNU/Linux target_ops. We save a pointer for 00177 the use of the multi-threaded target. */ 00178 static struct target_ops *linux_ops; 00179 static struct target_ops linux_ops_saved; 00180 00181 /* The method to call, if any, when a new thread is attached. */ 00182 static void (*linux_nat_new_thread) (struct lwp_info *); 00183 00184 /* The method to call, if any, when a new fork is attached. */ 00185 static linux_nat_new_fork_ftype *linux_nat_new_fork; 00186 00187 /* The method to call, if any, when a process is no longer 00188 attached. */ 00189 static linux_nat_forget_process_ftype *linux_nat_forget_process_hook; 00190 00191 /* Hook to call prior to resuming a thread. */ 00192 static void (*linux_nat_prepare_to_resume) (struct lwp_info *); 00193 00194 /* The method to call, if any, when the siginfo object needs to be 00195 converted between the layout returned by ptrace, and the layout in 00196 the architecture of the inferior. */ 00197 static int (*linux_nat_siginfo_fixup) (siginfo_t *, 00198 gdb_byte *, 00199 int); 00200 00201 /* The saved to_xfer_partial method, inherited from inf-ptrace.c. 00202 Called by our to_xfer_partial. */ 00203 static LONGEST (*super_xfer_partial) (struct target_ops *, 00204 enum target_object, 00205 const char *, gdb_byte *, 00206 const gdb_byte *, 00207 ULONGEST, LONGEST); 00208 00209 static unsigned int debug_linux_nat; 00210 static void 00211 show_debug_linux_nat (struct ui_file *file, int from_tty, 00212 struct cmd_list_element *c, const char *value) 00213 { 00214 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"), 00215 value); 00216 } 00217 00218 struct simple_pid_list 00219 { 00220 int pid; 00221 int status; 00222 struct simple_pid_list *next; 00223 }; 00224 struct simple_pid_list *stopped_pids; 00225 00226 /* Async mode support. */ 00227 00228 /* The read/write ends of the pipe registered as waitable file in the 00229 event loop. */ 00230 static int linux_nat_event_pipe[2] = { -1, -1 }; 00231 00232 /* Flush the event pipe. */ 00233 00234 static void 00235 async_file_flush (void) 00236 { 00237 int ret; 00238 char buf; 00239 00240 do 00241 { 00242 ret = read (linux_nat_event_pipe[0], &buf, 1); 00243 } 00244 while (ret >= 0 || (ret == -1 && errno == EINTR)); 00245 } 00246 00247 /* Put something (anything, doesn't matter what, or how much) in event 00248 pipe, so that the select/poll in the event-loop realizes we have 00249 something to process. */ 00250 00251 static void 00252 async_file_mark (void) 00253 { 00254 int ret; 00255 00256 /* It doesn't really matter what the pipe contains, as long we end 00257 up with something in it. Might as well flush the previous 00258 left-overs. */ 00259 async_file_flush (); 00260 00261 do 00262 { 00263 ret = write (linux_nat_event_pipe[1], "+", 1); 00264 } 00265 while (ret == -1 && errno == EINTR); 00266 00267 /* Ignore EAGAIN. If the pipe is full, the event loop will already 00268 be awakened anyway. */ 00269 } 00270 00271 static void linux_nat_async (void (*callback) 00272 (enum inferior_event_type event_type, 00273 void *context), 00274 void *context); 00275 static int kill_lwp (int lwpid, int signo); 00276 00277 static int stop_callback (struct lwp_info *lp, void *data); 00278 00279 static void block_child_signals (sigset_t *prev_mask); 00280 static void restore_child_signals_mask (sigset_t *prev_mask); 00281 00282 struct lwp_info; 00283 static struct lwp_info *add_lwp (ptid_t ptid); 00284 static void purge_lwp_list (int pid); 00285 static void delete_lwp (ptid_t ptid); 00286 static struct lwp_info *find_lwp_pid (ptid_t ptid); 00287 00288 00289 /* Trivial list manipulation functions to keep track of a list of 00290 new stopped processes. */ 00291 static void 00292 add_to_pid_list (struct simple_pid_list **listp, int pid, int status) 00293 { 00294 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list)); 00295 00296 new_pid->pid = pid; 00297 new_pid->status = status; 00298 new_pid->next = *listp; 00299 *listp = new_pid; 00300 } 00301 00302 static int 00303 in_pid_list_p (struct simple_pid_list *list, int pid) 00304 { 00305 struct simple_pid_list *p; 00306 00307 for (p = list; p != NULL; p = p->next) 00308 if (p->pid == pid) 00309 return 1; 00310 return 0; 00311 } 00312 00313 static int 00314 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp) 00315 { 00316 struct simple_pid_list **p; 00317 00318 for (p = listp; *p != NULL; p = &(*p)->next) 00319 if ((*p)->pid == pid) 00320 { 00321 struct simple_pid_list *next = (*p)->next; 00322 00323 *statusp = (*p)->status; 00324 xfree (*p); 00325 *p = next; 00326 return 1; 00327 } 00328 return 0; 00329 } 00330 00331 /* Initialize ptrace warnings and check for supported ptrace 00332 features given PID. */ 00333 00334 static void 00335 linux_init_ptrace (pid_t pid) 00336 { 00337 linux_enable_event_reporting (pid); 00338 linux_ptrace_init_warnings (); 00339 } 00340 00341 static void 00342 linux_child_post_attach (int pid) 00343 { 00344 linux_init_ptrace (pid); 00345 } 00346 00347 static void 00348 linux_child_post_startup_inferior (ptid_t ptid) 00349 { 00350 linux_init_ptrace (ptid_get_pid (ptid)); 00351 } 00352 00353 /* Return the number of known LWPs in the tgid given by PID. */ 00354 00355 static int 00356 num_lwps (int pid) 00357 { 00358 int count = 0; 00359 struct lwp_info *lp; 00360 00361 for (lp = lwp_list; lp; lp = lp->next) 00362 if (ptid_get_pid (lp->ptid) == pid) 00363 count++; 00364 00365 return count; 00366 } 00367 00368 /* Call delete_lwp with prototype compatible for make_cleanup. */ 00369 00370 static void 00371 delete_lwp_cleanup (void *lp_voidp) 00372 { 00373 struct lwp_info *lp = lp_voidp; 00374 00375 delete_lwp (lp->ptid); 00376 } 00377 00378 static int 00379 linux_child_follow_fork (struct target_ops *ops, int follow_child, 00380 int detach_fork) 00381 { 00382 int has_vforked; 00383 int parent_pid, child_pid; 00384 00385 has_vforked = (inferior_thread ()->pending_follow.kind 00386 == TARGET_WAITKIND_VFORKED); 00387 parent_pid = ptid_get_lwp (inferior_ptid); 00388 if (parent_pid == 0) 00389 parent_pid = ptid_get_pid (inferior_ptid); 00390 child_pid 00391 = ptid_get_pid (inferior_thread ()->pending_follow.value.related_pid); 00392 00393 if (has_vforked 00394 && !non_stop /* Non-stop always resumes both branches. */ 00395 && (!target_is_async_p () || sync_execution) 00396 && !(follow_child || detach_fork || sched_multi)) 00397 { 00398 /* The parent stays blocked inside the vfork syscall until the 00399 child execs or exits. If we don't let the child run, then 00400 the parent stays blocked. If we're telling the parent to run 00401 in the foreground, the user will not be able to ctrl-c to get 00402 back the terminal, effectively hanging the debug session. */ 00403 fprintf_filtered (gdb_stderr, _("\ 00404 Can not resume the parent process over vfork in the foreground while\n\ 00405 holding the child stopped. Try \"set detach-on-fork\" or \ 00406 \"set schedule-multiple\".\n")); 00407 /* FIXME output string > 80 columns. */ 00408 return 1; 00409 } 00410 00411 if (! follow_child) 00412 { 00413 struct lwp_info *child_lp = NULL; 00414 00415 /* We're already attached to the parent, by default. */ 00416 00417 /* Detach new forked process? */ 00418 if (detach_fork) 00419 { 00420 struct cleanup *old_chain; 00421 00422 /* Before detaching from the child, remove all breakpoints 00423 from it. If we forked, then this has already been taken 00424 care of by infrun.c. If we vforked however, any 00425 breakpoint inserted in the parent is visible in the 00426 child, even those added while stopped in a vfork 00427 catchpoint. This will remove the breakpoints from the 00428 parent also, but they'll be reinserted below. */ 00429 if (has_vforked) 00430 { 00431 /* keep breakpoints list in sync. */ 00432 remove_breakpoints_pid (ptid_get_pid (inferior_ptid)); 00433 } 00434 00435 if (info_verbose || debug_linux_nat) 00436 { 00437 target_terminal_ours (); 00438 fprintf_filtered (gdb_stdlog, 00439 "Detaching after fork from " 00440 "child process %d.\n", 00441 child_pid); 00442 } 00443 00444 old_chain = save_inferior_ptid (); 00445 inferior_ptid = ptid_build (child_pid, child_pid, 0); 00446 00447 child_lp = add_lwp (inferior_ptid); 00448 child_lp->stopped = 1; 00449 child_lp->last_resume_kind = resume_stop; 00450 make_cleanup (delete_lwp_cleanup, child_lp); 00451 00452 if (linux_nat_prepare_to_resume != NULL) 00453 linux_nat_prepare_to_resume (child_lp); 00454 ptrace (PTRACE_DETACH, child_pid, 0, 0); 00455 00456 do_cleanups (old_chain); 00457 } 00458 else 00459 { 00460 struct inferior *parent_inf, *child_inf; 00461 struct cleanup *old_chain; 00462 00463 /* Add process to GDB's tables. */ 00464 child_inf = add_inferior (child_pid); 00465 00466 parent_inf = current_inferior (); 00467 child_inf->attach_flag = parent_inf->attach_flag; 00468 copy_terminal_info (child_inf, parent_inf); 00469 child_inf->gdbarch = parent_inf->gdbarch; 00470 copy_inferior_target_desc_info (child_inf, parent_inf); 00471 00472 old_chain = save_inferior_ptid (); 00473 save_current_program_space (); 00474 00475 inferior_ptid = ptid_build (child_pid, child_pid, 0); 00476 add_thread (inferior_ptid); 00477 child_lp = add_lwp (inferior_ptid); 00478 child_lp->stopped = 1; 00479 child_lp->last_resume_kind = resume_stop; 00480 child_inf->symfile_flags = SYMFILE_NO_READ; 00481 00482 /* If this is a vfork child, then the address-space is 00483 shared with the parent. */ 00484 if (has_vforked) 00485 { 00486 child_inf->pspace = parent_inf->pspace; 00487 child_inf->aspace = parent_inf->aspace; 00488 00489 /* The parent will be frozen until the child is done 00490 with the shared region. Keep track of the 00491 parent. */ 00492 child_inf->vfork_parent = parent_inf; 00493 child_inf->pending_detach = 0; 00494 parent_inf->vfork_child = child_inf; 00495 parent_inf->pending_detach = 0; 00496 } 00497 else 00498 { 00499 child_inf->aspace = new_address_space (); 00500 child_inf->pspace = add_program_space (child_inf->aspace); 00501 child_inf->removable = 1; 00502 set_current_program_space (child_inf->pspace); 00503 clone_program_space (child_inf->pspace, parent_inf->pspace); 00504 00505 /* Let the shared library layer (solib-svr4) learn about 00506 this new process, relocate the cloned exec, pull in 00507 shared libraries, and install the solib event 00508 breakpoint. If a "cloned-VM" event was propagated 00509 better throughout the core, this wouldn't be 00510 required. */ 00511 solib_create_inferior_hook (0); 00512 } 00513 00514 /* Let the thread_db layer learn about this new process. */ 00515 check_for_thread_db (); 00516 00517 do_cleanups (old_chain); 00518 } 00519 00520 if (has_vforked) 00521 { 00522 struct lwp_info *parent_lp; 00523 struct inferior *parent_inf; 00524 00525 parent_inf = current_inferior (); 00526 00527 /* If we detached from the child, then we have to be careful 00528 to not insert breakpoints in the parent until the child 00529 is done with the shared memory region. However, if we're 00530 staying attached to the child, then we can and should 00531 insert breakpoints, so that we can debug it. A 00532 subsequent child exec or exit is enough to know when does 00533 the child stops using the parent's address space. */ 00534 parent_inf->waiting_for_vfork_done = detach_fork; 00535 parent_inf->pspace->breakpoints_not_allowed = detach_fork; 00536 00537 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid)); 00538 gdb_assert (linux_supports_tracefork () >= 0); 00539 00540 if (linux_supports_tracevforkdone ()) 00541 { 00542 if (debug_linux_nat) 00543 fprintf_unfiltered (gdb_stdlog, 00544 "LCFF: waiting for VFORK_DONE on %d\n", 00545 parent_pid); 00546 parent_lp->stopped = 1; 00547 00548 /* We'll handle the VFORK_DONE event like any other 00549 event, in target_wait. */ 00550 } 00551 else 00552 { 00553 /* We can't insert breakpoints until the child has 00554 finished with the shared memory region. We need to 00555 wait until that happens. Ideal would be to just 00556 call: 00557 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0); 00558 - waitpid (parent_pid, &status, __WALL); 00559 However, most architectures can't handle a syscall 00560 being traced on the way out if it wasn't traced on 00561 the way in. 00562 00563 We might also think to loop, continuing the child 00564 until it exits or gets a SIGTRAP. One problem is 00565 that the child might call ptrace with PTRACE_TRACEME. 00566 00567 There's no simple and reliable way to figure out when 00568 the vforked child will be done with its copy of the 00569 shared memory. We could step it out of the syscall, 00570 two instructions, let it go, and then single-step the 00571 parent once. When we have hardware single-step, this 00572 would work; with software single-step it could still 00573 be made to work but we'd have to be able to insert 00574 single-step breakpoints in the child, and we'd have 00575 to insert -just- the single-step breakpoint in the 00576 parent. Very awkward. 00577 00578 In the end, the best we can do is to make sure it 00579 runs for a little while. Hopefully it will be out of 00580 range of any breakpoints we reinsert. Usually this 00581 is only the single-step breakpoint at vfork's return 00582 point. */ 00583 00584 if (debug_linux_nat) 00585 fprintf_unfiltered (gdb_stdlog, 00586 "LCFF: no VFORK_DONE " 00587 "support, sleeping a bit\n"); 00588 00589 usleep (10000); 00590 00591 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event, 00592 and leave it pending. The next linux_nat_resume call 00593 will notice a pending event, and bypasses actually 00594 resuming the inferior. */ 00595 parent_lp->status = 0; 00596 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE; 00597 parent_lp->stopped = 1; 00598 00599 /* If we're in async mode, need to tell the event loop 00600 there's something here to process. */ 00601 if (target_can_async_p ()) 00602 async_file_mark (); 00603 } 00604 } 00605 } 00606 else 00607 { 00608 struct inferior *parent_inf, *child_inf; 00609 struct lwp_info *child_lp; 00610 struct program_space *parent_pspace; 00611 00612 if (info_verbose || debug_linux_nat) 00613 { 00614 target_terminal_ours (); 00615 if (has_vforked) 00616 fprintf_filtered (gdb_stdlog, 00617 _("Attaching after process %d " 00618 "vfork to child process %d.\n"), 00619 parent_pid, child_pid); 00620 else 00621 fprintf_filtered (gdb_stdlog, 00622 _("Attaching after process %d " 00623 "fork to child process %d.\n"), 00624 parent_pid, child_pid); 00625 } 00626 00627 /* Add the new inferior first, so that the target_detach below 00628 doesn't unpush the target. */ 00629 00630 child_inf = add_inferior (child_pid); 00631 00632 parent_inf = current_inferior (); 00633 child_inf->attach_flag = parent_inf->attach_flag; 00634 copy_terminal_info (child_inf, parent_inf); 00635 child_inf->gdbarch = parent_inf->gdbarch; 00636 copy_inferior_target_desc_info (child_inf, parent_inf); 00637 00638 parent_pspace = parent_inf->pspace; 00639 00640 /* If we're vforking, we want to hold on to the parent until the 00641 child exits or execs. At child exec or exit time we can 00642 remove the old breakpoints from the parent and detach or 00643 resume debugging it. Otherwise, detach the parent now; we'll 00644 want to reuse it's program/address spaces, but we can't set 00645 them to the child before removing breakpoints from the 00646 parent, otherwise, the breakpoints module could decide to 00647 remove breakpoints from the wrong process (since they'd be 00648 assigned to the same address space). */ 00649 00650 if (has_vforked) 00651 { 00652 gdb_assert (child_inf->vfork_parent == NULL); 00653 gdb_assert (parent_inf->vfork_child == NULL); 00654 child_inf->vfork_parent = parent_inf; 00655 child_inf->pending_detach = 0; 00656 parent_inf->vfork_child = child_inf; 00657 parent_inf->pending_detach = detach_fork; 00658 parent_inf->waiting_for_vfork_done = 0; 00659 } 00660 else if (detach_fork) 00661 target_detach (NULL, 0); 00662 00663 /* Note that the detach above makes PARENT_INF dangling. */ 00664 00665 /* Add the child thread to the appropriate lists, and switch to 00666 this new thread, before cloning the program space, and 00667 informing the solib layer about this new process. */ 00668 00669 inferior_ptid = ptid_build (child_pid, child_pid, 0); 00670 add_thread (inferior_ptid); 00671 child_lp = add_lwp (inferior_ptid); 00672 child_lp->stopped = 1; 00673 child_lp->last_resume_kind = resume_stop; 00674 00675 /* If this is a vfork child, then the address-space is shared 00676 with the parent. If we detached from the parent, then we can 00677 reuse the parent's program/address spaces. */ 00678 if (has_vforked || detach_fork) 00679 { 00680 child_inf->pspace = parent_pspace; 00681 child_inf->aspace = child_inf->pspace->aspace; 00682 } 00683 else 00684 { 00685 child_inf->aspace = new_address_space (); 00686 child_inf->pspace = add_program_space (child_inf->aspace); 00687 child_inf->removable = 1; 00688 child_inf->symfile_flags = SYMFILE_NO_READ; 00689 set_current_program_space (child_inf->pspace); 00690 clone_program_space (child_inf->pspace, parent_pspace); 00691 00692 /* Let the shared library layer (solib-svr4) learn about 00693 this new process, relocate the cloned exec, pull in 00694 shared libraries, and install the solib event breakpoint. 00695 If a "cloned-VM" event was propagated better throughout 00696 the core, this wouldn't be required. */ 00697 solib_create_inferior_hook (0); 00698 } 00699 00700 /* Let the thread_db layer learn about this new process. */ 00701 check_for_thread_db (); 00702 } 00703 00704 return 0; 00705 } 00706 00707 00708 static int 00709 linux_child_insert_fork_catchpoint (int pid) 00710 { 00711 return !linux_supports_tracefork (); 00712 } 00713 00714 static int 00715 linux_child_remove_fork_catchpoint (int pid) 00716 { 00717 return 0; 00718 } 00719 00720 static int 00721 linux_child_insert_vfork_catchpoint (int pid) 00722 { 00723 return !linux_supports_tracefork (); 00724 } 00725 00726 static int 00727 linux_child_remove_vfork_catchpoint (int pid) 00728 { 00729 return 0; 00730 } 00731 00732 static int 00733 linux_child_insert_exec_catchpoint (int pid) 00734 { 00735 return !linux_supports_tracefork (); 00736 } 00737 00738 static int 00739 linux_child_remove_exec_catchpoint (int pid) 00740 { 00741 return 0; 00742 } 00743 00744 static int 00745 linux_child_set_syscall_catchpoint (int pid, int needed, int any_count, 00746 int table_size, int *table) 00747 { 00748 if (!linux_supports_tracesysgood ()) 00749 return 1; 00750 00751 /* On GNU/Linux, we ignore the arguments. It means that we only 00752 enable the syscall catchpoints, but do not disable them. 00753 00754 Also, we do not use the `table' information because we do not 00755 filter system calls here. We let GDB do the logic for us. */ 00756 return 0; 00757 } 00758 00759 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's 00760 are processes sharing the same VM space. A multi-threaded process 00761 is basically a group of such processes. However, such a grouping 00762 is almost entirely a user-space issue; the kernel doesn't enforce 00763 such a grouping at all (this might change in the future). In 00764 general, we'll rely on the threads library (i.e. the GNU/Linux 00765 Threads library) to provide such a grouping. 00766 00767 It is perfectly well possible to write a multi-threaded application 00768 without the assistance of a threads library, by using the clone 00769 system call directly. This module should be able to give some 00770 rudimentary support for debugging such applications if developers 00771 specify the CLONE_PTRACE flag in the clone system call, and are 00772 using the Linux kernel 2.4 or above. 00773 00774 Note that there are some peculiarities in GNU/Linux that affect 00775 this code: 00776 00777 - In general one should specify the __WCLONE flag to waitpid in 00778 order to make it report events for any of the cloned processes 00779 (and leave it out for the initial process). However, if a cloned 00780 process has exited the exit status is only reported if the 00781 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but 00782 we cannot use it since GDB must work on older systems too. 00783 00784 - When a traced, cloned process exits and is waited for by the 00785 debugger, the kernel reassigns it to the original parent and 00786 keeps it around as a "zombie". Somehow, the GNU/Linux Threads 00787 library doesn't notice this, which leads to the "zombie problem": 00788 When debugged a multi-threaded process that spawns a lot of 00789 threads will run out of processes, even if the threads exit, 00790 because the "zombies" stay around. */ 00791 00792 /* List of known LWPs. */ 00793 struct lwp_info *lwp_list; 00794 00795 00796 /* Original signal mask. */ 00797 static sigset_t normal_mask; 00798 00799 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in 00800 _initialize_linux_nat. */ 00801 static sigset_t suspend_mask; 00802 00803 /* Signals to block to make that sigsuspend work. */ 00804 static sigset_t blocked_mask; 00805 00806 /* SIGCHLD action. */ 00807 struct sigaction sigchld_action; 00808 00809 /* Block child signals (SIGCHLD and linux threads signals), and store 00810 the previous mask in PREV_MASK. */ 00811 00812 static void 00813 block_child_signals (sigset_t *prev_mask) 00814 { 00815 /* Make sure SIGCHLD is blocked. */ 00816 if (!sigismember (&blocked_mask, SIGCHLD)) 00817 sigaddset (&blocked_mask, SIGCHLD); 00818 00819 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask); 00820 } 00821 00822 /* Restore child signals mask, previously returned by 00823 block_child_signals. */ 00824 00825 static void 00826 restore_child_signals_mask (sigset_t *prev_mask) 00827 { 00828 sigprocmask (SIG_SETMASK, prev_mask, NULL); 00829 } 00830 00831 /* Mask of signals to pass directly to the inferior. */ 00832 static sigset_t pass_mask; 00833 00834 /* Update signals to pass to the inferior. */ 00835 static void 00836 linux_nat_pass_signals (int numsigs, unsigned char *pass_signals) 00837 { 00838 int signo; 00839 00840 sigemptyset (&pass_mask); 00841 00842 for (signo = 1; signo < NSIG; signo++) 00843 { 00844 int target_signo = gdb_signal_from_host (signo); 00845 if (target_signo < numsigs && pass_signals[target_signo]) 00846 sigaddset (&pass_mask, signo); 00847 } 00848 } 00849 00850 00851 00852 /* Prototypes for local functions. */ 00853 static int stop_wait_callback (struct lwp_info *lp, void *data); 00854 static int linux_thread_alive (ptid_t ptid); 00855 static char *linux_child_pid_to_exec_file (int pid); 00856 00857 00858 /* Convert wait status STATUS to a string. Used for printing debug 00859 messages only. */ 00860 00861 static char * 00862 status_to_str (int status) 00863 { 00864 static char buf[64]; 00865 00866 if (WIFSTOPPED (status)) 00867 { 00868 if (WSTOPSIG (status) == SYSCALL_SIGTRAP) 00869 snprintf (buf, sizeof (buf), "%s (stopped at syscall)", 00870 strsignal (SIGTRAP)); 00871 else 00872 snprintf (buf, sizeof (buf), "%s (stopped)", 00873 strsignal (WSTOPSIG (status))); 00874 } 00875 else if (WIFSIGNALED (status)) 00876 snprintf (buf, sizeof (buf), "%s (terminated)", 00877 strsignal (WTERMSIG (status))); 00878 else 00879 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status)); 00880 00881 return buf; 00882 } 00883 00884 /* Destroy and free LP. */ 00885 00886 static void 00887 lwp_free (struct lwp_info *lp) 00888 { 00889 xfree (lp->arch_private); 00890 xfree (lp); 00891 } 00892 00893 /* Remove all LWPs belong to PID from the lwp list. */ 00894 00895 static void 00896 purge_lwp_list (int pid) 00897 { 00898 struct lwp_info *lp, *lpprev, *lpnext; 00899 00900 lpprev = NULL; 00901 00902 for (lp = lwp_list; lp; lp = lpnext) 00903 { 00904 lpnext = lp->next; 00905 00906 if (ptid_get_pid (lp->ptid) == pid) 00907 { 00908 if (lp == lwp_list) 00909 lwp_list = lp->next; 00910 else 00911 lpprev->next = lp->next; 00912 00913 lwp_free (lp); 00914 } 00915 else 00916 lpprev = lp; 00917 } 00918 } 00919 00920 /* Add the LWP specified by PTID to the list. PTID is the first LWP 00921 in the process. Return a pointer to the structure describing the 00922 new LWP. 00923 00924 This differs from add_lwp in that we don't let the arch specific 00925 bits know about this new thread. Current clients of this callback 00926 take the opportunity to install watchpoints in the new thread, and 00927 we shouldn't do that for the first thread. If we're spawning a 00928 child ("run"), the thread executes the shell wrapper first, and we 00929 shouldn't touch it until it execs the program we want to debug. 00930 For "attach", it'd be okay to call the callback, but it's not 00931 necessary, because watchpoints can't yet have been inserted into 00932 the inferior. */ 00933 00934 static struct lwp_info * 00935 add_initial_lwp (ptid_t ptid) 00936 { 00937 struct lwp_info *lp; 00938 00939 gdb_assert (ptid_lwp_p (ptid)); 00940 00941 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info)); 00942 00943 memset (lp, 0, sizeof (struct lwp_info)); 00944 00945 lp->last_resume_kind = resume_continue; 00946 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE; 00947 00948 lp->ptid = ptid; 00949 lp->core = -1; 00950 00951 lp->next = lwp_list; 00952 lwp_list = lp; 00953 00954 return lp; 00955 } 00956 00957 /* Add the LWP specified by PID to the list. Return a pointer to the 00958 structure describing the new LWP. The LWP should already be 00959 stopped. */ 00960 00961 static struct lwp_info * 00962 add_lwp (ptid_t ptid) 00963 { 00964 struct lwp_info *lp; 00965 00966 lp = add_initial_lwp (ptid); 00967 00968 /* Let the arch specific bits know about this new thread. Current 00969 clients of this callback take the opportunity to install 00970 watchpoints in the new thread. We don't do this for the first 00971 thread though. See add_initial_lwp. */ 00972 if (linux_nat_new_thread != NULL) 00973 linux_nat_new_thread (lp); 00974 00975 return lp; 00976 } 00977 00978 /* Remove the LWP specified by PID from the list. */ 00979 00980 static void 00981 delete_lwp (ptid_t ptid) 00982 { 00983 struct lwp_info *lp, *lpprev; 00984 00985 lpprev = NULL; 00986 00987 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next) 00988 if (ptid_equal (lp->ptid, ptid)) 00989 break; 00990 00991 if (!lp) 00992 return; 00993 00994 if (lpprev) 00995 lpprev->next = lp->next; 00996 else 00997 lwp_list = lp->next; 00998 00999 lwp_free (lp); 01000 } 01001 01002 /* Return a pointer to the structure describing the LWP corresponding 01003 to PID. If no corresponding LWP could be found, return NULL. */ 01004 01005 static struct lwp_info * 01006 find_lwp_pid (ptid_t ptid) 01007 { 01008 struct lwp_info *lp; 01009 int lwp; 01010 01011 if (ptid_lwp_p (ptid)) 01012 lwp = ptid_get_lwp (ptid); 01013 else 01014 lwp = ptid_get_pid (ptid); 01015 01016 for (lp = lwp_list; lp; lp = lp->next) 01017 if (lwp == ptid_get_lwp (lp->ptid)) 01018 return lp; 01019 01020 return NULL; 01021 } 01022 01023 /* Call CALLBACK with its second argument set to DATA for every LWP in 01024 the list. If CALLBACK returns 1 for a particular LWP, return a 01025 pointer to the structure describing that LWP immediately. 01026 Otherwise return NULL. */ 01027 01028 struct lwp_info * 01029 iterate_over_lwps (ptid_t filter, 01030 int (*callback) (struct lwp_info *, void *), 01031 void *data) 01032 { 01033 struct lwp_info *lp, *lpnext; 01034 01035 for (lp = lwp_list; lp; lp = lpnext) 01036 { 01037 lpnext = lp->next; 01038 01039 if (ptid_match (lp->ptid, filter)) 01040 { 01041 if ((*callback) (lp, data)) 01042 return lp; 01043 } 01044 } 01045 01046 return NULL; 01047 } 01048 01049 /* Update our internal state when changing from one checkpoint to 01050 another indicated by NEW_PTID. We can only switch single-threaded 01051 applications, so we only create one new LWP, and the previous list 01052 is discarded. */ 01053 01054 void 01055 linux_nat_switch_fork (ptid_t new_ptid) 01056 { 01057 struct lwp_info *lp; 01058 01059 purge_lwp_list (ptid_get_pid (inferior_ptid)); 01060 01061 lp = add_lwp (new_ptid); 01062 lp->stopped = 1; 01063 01064 /* This changes the thread's ptid while preserving the gdb thread 01065 num. Also changes the inferior pid, while preserving the 01066 inferior num. */ 01067 thread_change_ptid (inferior_ptid, new_ptid); 01068 01069 /* We've just told GDB core that the thread changed target id, but, 01070 in fact, it really is a different thread, with different register 01071 contents. */ 01072 registers_changed (); 01073 } 01074 01075 /* Handle the exit of a single thread LP. */ 01076 01077 static void 01078 exit_lwp (struct lwp_info *lp) 01079 { 01080 struct thread_info *th = find_thread_ptid (lp->ptid); 01081 01082 if (th) 01083 { 01084 if (print_thread_events) 01085 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid)); 01086 01087 delete_thread (lp->ptid); 01088 } 01089 01090 delete_lwp (lp->ptid); 01091 } 01092 01093 /* Wait for the LWP specified by LP, which we have just attached to. 01094 Returns a wait status for that LWP, to cache. */ 01095 01096 static int 01097 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned, 01098 int *signalled) 01099 { 01100 pid_t new_pid, pid = ptid_get_lwp (ptid); 01101 int status; 01102 01103 if (linux_proc_pid_is_stopped (pid)) 01104 { 01105 if (debug_linux_nat) 01106 fprintf_unfiltered (gdb_stdlog, 01107 "LNPAW: Attaching to a stopped process\n"); 01108 01109 /* The process is definitely stopped. It is in a job control 01110 stop, unless the kernel predates the TASK_STOPPED / 01111 TASK_TRACED distinction, in which case it might be in a 01112 ptrace stop. Make sure it is in a ptrace stop; from there we 01113 can kill it, signal it, et cetera. 01114 01115 First make sure there is a pending SIGSTOP. Since we are 01116 already attached, the process can not transition from stopped 01117 to running without a PTRACE_CONT; so we know this signal will 01118 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is 01119 probably already in the queue (unless this kernel is old 01120 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP 01121 is not an RT signal, it can only be queued once. */ 01122 kill_lwp (pid, SIGSTOP); 01123 01124 /* Finally, resume the stopped process. This will deliver the SIGSTOP 01125 (or a higher priority signal, just like normal PTRACE_ATTACH). */ 01126 ptrace (PTRACE_CONT, pid, 0, 0); 01127 } 01128 01129 /* Make sure the initial process is stopped. The user-level threads 01130 layer might want to poke around in the inferior, and that won't 01131 work if things haven't stabilized yet. */ 01132 new_pid = my_waitpid (pid, &status, 0); 01133 if (new_pid == -1 && errno == ECHILD) 01134 { 01135 if (first) 01136 warning (_("%s is a cloned process"), target_pid_to_str (ptid)); 01137 01138 /* Try again with __WCLONE to check cloned processes. */ 01139 new_pid = my_waitpid (pid, &status, __WCLONE); 01140 *cloned = 1; 01141 } 01142 01143 gdb_assert (pid == new_pid); 01144 01145 if (!WIFSTOPPED (status)) 01146 { 01147 /* The pid we tried to attach has apparently just exited. */ 01148 if (debug_linux_nat) 01149 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s", 01150 pid, status_to_str (status)); 01151 return status; 01152 } 01153 01154 if (WSTOPSIG (status) != SIGSTOP) 01155 { 01156 *signalled = 1; 01157 if (debug_linux_nat) 01158 fprintf_unfiltered (gdb_stdlog, 01159 "LNPAW: Received %s after attaching\n", 01160 status_to_str (status)); 01161 } 01162 01163 return status; 01164 } 01165 01166 /* Attach to the LWP specified by PID. Return 0 if successful, -1 if 01167 the new LWP could not be attached, or 1 if we're already auto 01168 attached to this thread, but haven't processed the 01169 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore 01170 its existance, without considering it an error. */ 01171 01172 int 01173 lin_lwp_attach_lwp (ptid_t ptid) 01174 { 01175 struct lwp_info *lp; 01176 int lwpid; 01177 01178 gdb_assert (ptid_lwp_p (ptid)); 01179 01180 lp = find_lwp_pid (ptid); 01181 lwpid = ptid_get_lwp (ptid); 01182 01183 /* We assume that we're already attached to any LWP that has an id 01184 equal to the overall process id, and to any LWP that is already 01185 in our list of LWPs. If we're not seeing exit events from threads 01186 and we've had PID wraparound since we last tried to stop all threads, 01187 this assumption might be wrong; fortunately, this is very unlikely 01188 to happen. */ 01189 if (lwpid != ptid_get_pid (ptid) && lp == NULL) 01190 { 01191 int status, cloned = 0, signalled = 0; 01192 01193 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0) 01194 { 01195 if (linux_supports_tracefork ()) 01196 { 01197 /* If we haven't stopped all threads when we get here, 01198 we may have seen a thread listed in thread_db's list, 01199 but not processed the PTRACE_EVENT_CLONE yet. If 01200 that's the case, ignore this new thread, and let 01201 normal event handling discover it later. */ 01202 if (in_pid_list_p (stopped_pids, lwpid)) 01203 { 01204 /* We've already seen this thread stop, but we 01205 haven't seen the PTRACE_EVENT_CLONE extended 01206 event yet. */ 01207 return 0; 01208 } 01209 else 01210 { 01211 int new_pid; 01212 int status; 01213 01214 /* See if we've got a stop for this new child 01215 pending. If so, we're already attached. */ 01216 new_pid = my_waitpid (lwpid, &status, WNOHANG); 01217 if (new_pid == -1 && errno == ECHILD) 01218 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG); 01219 if (new_pid != -1) 01220 { 01221 if (WIFSTOPPED (status)) 01222 add_to_pid_list (&stopped_pids, lwpid, status); 01223 return 1; 01224 } 01225 } 01226 } 01227 01228 /* If we fail to attach to the thread, issue a warning, 01229 but continue. One way this can happen is if thread 01230 creation is interrupted; as of Linux kernel 2.6.19, a 01231 bug may place threads in the thread list and then fail 01232 to create them. */ 01233 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid), 01234 safe_strerror (errno)); 01235 return -1; 01236 } 01237 01238 if (debug_linux_nat) 01239 fprintf_unfiltered (gdb_stdlog, 01240 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n", 01241 target_pid_to_str (ptid)); 01242 01243 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled); 01244 if (!WIFSTOPPED (status)) 01245 return 1; 01246 01247 lp = add_lwp (ptid); 01248 lp->stopped = 1; 01249 lp->cloned = cloned; 01250 lp->signalled = signalled; 01251 if (WSTOPSIG (status) != SIGSTOP) 01252 { 01253 lp->resumed = 1; 01254 lp->status = status; 01255 } 01256 01257 target_post_attach (ptid_get_lwp (lp->ptid)); 01258 01259 if (debug_linux_nat) 01260 { 01261 fprintf_unfiltered (gdb_stdlog, 01262 "LLAL: waitpid %s received %s\n", 01263 target_pid_to_str (ptid), 01264 status_to_str (status)); 01265 } 01266 } 01267 else 01268 { 01269 /* We assume that the LWP representing the original process is 01270 already stopped. Mark it as stopped in the data structure 01271 that the GNU/linux ptrace layer uses to keep track of 01272 threads. Note that this won't have already been done since 01273 the main thread will have, we assume, been stopped by an 01274 attach from a different layer. */ 01275 if (lp == NULL) 01276 lp = add_lwp (ptid); 01277 lp->stopped = 1; 01278 } 01279 01280 lp->last_resume_kind = resume_stop; 01281 return 0; 01282 } 01283 01284 static void 01285 linux_nat_create_inferior (struct target_ops *ops, 01286 char *exec_file, char *allargs, char **env, 01287 int from_tty) 01288 { 01289 #ifdef HAVE_PERSONALITY 01290 int personality_orig = 0, personality_set = 0; 01291 #endif /* HAVE_PERSONALITY */ 01292 01293 /* The fork_child mechanism is synchronous and calls target_wait, so 01294 we have to mask the async mode. */ 01295 01296 #ifdef HAVE_PERSONALITY 01297 if (disable_randomization) 01298 { 01299 errno = 0; 01300 personality_orig = personality (0xffffffff); 01301 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE)) 01302 { 01303 personality_set = 1; 01304 personality (personality_orig | ADDR_NO_RANDOMIZE); 01305 } 01306 if (errno != 0 || (personality_set 01307 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE))) 01308 warning (_("Error disabling address space randomization: %s"), 01309 safe_strerror (errno)); 01310 } 01311 #endif /* HAVE_PERSONALITY */ 01312 01313 /* Make sure we report all signals during startup. */ 01314 linux_nat_pass_signals (0, NULL); 01315 01316 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty); 01317 01318 #ifdef HAVE_PERSONALITY 01319 if (personality_set) 01320 { 01321 errno = 0; 01322 personality (personality_orig); 01323 if (errno != 0) 01324 warning (_("Error restoring address space randomization: %s"), 01325 safe_strerror (errno)); 01326 } 01327 #endif /* HAVE_PERSONALITY */ 01328 } 01329 01330 static void 01331 linux_nat_attach (struct target_ops *ops, char *args, int from_tty) 01332 { 01333 struct lwp_info *lp; 01334 int status; 01335 ptid_t ptid; 01336 volatile struct gdb_exception ex; 01337 01338 /* Make sure we report all signals during attach. */ 01339 linux_nat_pass_signals (0, NULL); 01340 01341 TRY_CATCH (ex, RETURN_MASK_ERROR) 01342 { 01343 linux_ops->to_attach (ops, args, from_tty); 01344 } 01345 if (ex.reason < 0) 01346 { 01347 pid_t pid = parse_pid_to_attach (args); 01348 struct buffer buffer; 01349 char *message, *buffer_s; 01350 01351 message = xstrdup (ex.message); 01352 make_cleanup (xfree, message); 01353 01354 buffer_init (&buffer); 01355 linux_ptrace_attach_warnings (pid, &buffer); 01356 01357 buffer_grow_str0 (&buffer, ""); 01358 buffer_s = buffer_finish (&buffer); 01359 make_cleanup (xfree, buffer_s); 01360 01361 throw_error (ex.error, "%s%s", buffer_s, message); 01362 } 01363 01364 /* The ptrace base target adds the main thread with (pid,0,0) 01365 format. Decorate it with lwp info. */ 01366 ptid = ptid_build (ptid_get_pid (inferior_ptid), 01367 ptid_get_pid (inferior_ptid), 01368 0); 01369 thread_change_ptid (inferior_ptid, ptid); 01370 01371 /* Add the initial process as the first LWP to the list. */ 01372 lp = add_initial_lwp (ptid); 01373 01374 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned, 01375 &lp->signalled); 01376 if (!WIFSTOPPED (status)) 01377 { 01378 if (WIFEXITED (status)) 01379 { 01380 int exit_code = WEXITSTATUS (status); 01381 01382 target_terminal_ours (); 01383 target_mourn_inferior (); 01384 if (exit_code == 0) 01385 error (_("Unable to attach: program exited normally.")); 01386 else 01387 error (_("Unable to attach: program exited with code %d."), 01388 exit_code); 01389 } 01390 else if (WIFSIGNALED (status)) 01391 { 01392 enum gdb_signal signo; 01393 01394 target_terminal_ours (); 01395 target_mourn_inferior (); 01396 01397 signo = gdb_signal_from_host (WTERMSIG (status)); 01398 error (_("Unable to attach: program terminated with signal " 01399 "%s, %s."), 01400 gdb_signal_to_name (signo), 01401 gdb_signal_to_string (signo)); 01402 } 01403 01404 internal_error (__FILE__, __LINE__, 01405 _("unexpected status %d for PID %ld"), 01406 status, (long) ptid_get_lwp (ptid)); 01407 } 01408 01409 lp->stopped = 1; 01410 01411 /* Save the wait status to report later. */ 01412 lp->resumed = 1; 01413 if (debug_linux_nat) 01414 fprintf_unfiltered (gdb_stdlog, 01415 "LNA: waitpid %ld, saving status %s\n", 01416 (long) ptid_get_pid (lp->ptid), status_to_str (status)); 01417 01418 lp->status = status; 01419 01420 if (target_can_async_p ()) 01421 target_async (inferior_event_handler, 0); 01422 } 01423 01424 /* Get pending status of LP. */ 01425 static int 01426 get_pending_status (struct lwp_info *lp, int *status) 01427 { 01428 enum gdb_signal signo = GDB_SIGNAL_0; 01429 01430 /* If we paused threads momentarily, we may have stored pending 01431 events in lp->status or lp->waitstatus (see stop_wait_callback), 01432 and GDB core hasn't seen any signal for those threads. 01433 Otherwise, the last signal reported to the core is found in the 01434 thread object's stop_signal. 01435 01436 There's a corner case that isn't handled here at present. Only 01437 if the thread stopped with a TARGET_WAITKIND_STOPPED does 01438 stop_signal make sense as a real signal to pass to the inferior. 01439 Some catchpoint related events, like 01440 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set 01441 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But, 01442 those traps are debug API (ptrace in our case) related and 01443 induced; the inferior wouldn't see them if it wasn't being 01444 traced. Hence, we should never pass them to the inferior, even 01445 when set to pass state. Since this corner case isn't handled by 01446 infrun.c when proceeding with a signal, for consistency, neither 01447 do we handle it here (or elsewhere in the file we check for 01448 signal pass state). Normally SIGTRAP isn't set to pass state, so 01449 this is really a corner case. */ 01450 01451 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE) 01452 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */ 01453 else if (lp->status) 01454 signo = gdb_signal_from_host (WSTOPSIG (lp->status)); 01455 else if (non_stop && !is_executing (lp->ptid)) 01456 { 01457 struct thread_info *tp = find_thread_ptid (lp->ptid); 01458 01459 signo = tp->suspend.stop_signal; 01460 } 01461 else if (!non_stop) 01462 { 01463 struct target_waitstatus last; 01464 ptid_t last_ptid; 01465 01466 get_last_target_status (&last_ptid, &last); 01467 01468 if (ptid_get_lwp (lp->ptid) == ptid_get_lwp (last_ptid)) 01469 { 01470 struct thread_info *tp = find_thread_ptid (lp->ptid); 01471 01472 signo = tp->suspend.stop_signal; 01473 } 01474 } 01475 01476 *status = 0; 01477 01478 if (signo == GDB_SIGNAL_0) 01479 { 01480 if (debug_linux_nat) 01481 fprintf_unfiltered (gdb_stdlog, 01482 "GPT: lwp %s has no pending signal\n", 01483 target_pid_to_str (lp->ptid)); 01484 } 01485 else if (!signal_pass_state (signo)) 01486 { 01487 if (debug_linux_nat) 01488 fprintf_unfiltered (gdb_stdlog, 01489 "GPT: lwp %s had signal %s, " 01490 "but it is in no pass state\n", 01491 target_pid_to_str (lp->ptid), 01492 gdb_signal_to_string (signo)); 01493 } 01494 else 01495 { 01496 *status = W_STOPCODE (gdb_signal_to_host (signo)); 01497 01498 if (debug_linux_nat) 01499 fprintf_unfiltered (gdb_stdlog, 01500 "GPT: lwp %s has pending signal %s\n", 01501 target_pid_to_str (lp->ptid), 01502 gdb_signal_to_string (signo)); 01503 } 01504 01505 return 0; 01506 } 01507 01508 static int 01509 detach_callback (struct lwp_info *lp, void *data) 01510 { 01511 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status)); 01512 01513 if (debug_linux_nat && lp->status) 01514 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n", 01515 strsignal (WSTOPSIG (lp->status)), 01516 target_pid_to_str (lp->ptid)); 01517 01518 /* If there is a pending SIGSTOP, get rid of it. */ 01519 if (lp->signalled) 01520 { 01521 if (debug_linux_nat) 01522 fprintf_unfiltered (gdb_stdlog, 01523 "DC: Sending SIGCONT to %s\n", 01524 target_pid_to_str (lp->ptid)); 01525 01526 kill_lwp (ptid_get_lwp (lp->ptid), SIGCONT); 01527 lp->signalled = 0; 01528 } 01529 01530 /* We don't actually detach from the LWP that has an id equal to the 01531 overall process id just yet. */ 01532 if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid)) 01533 { 01534 int status = 0; 01535 01536 /* Pass on any pending signal for this LWP. */ 01537 get_pending_status (lp, &status); 01538 01539 if (linux_nat_prepare_to_resume != NULL) 01540 linux_nat_prepare_to_resume (lp); 01541 errno = 0; 01542 if (ptrace (PTRACE_DETACH, ptid_get_lwp (lp->ptid), 0, 01543 WSTOPSIG (status)) < 0) 01544 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid), 01545 safe_strerror (errno)); 01546 01547 if (debug_linux_nat) 01548 fprintf_unfiltered (gdb_stdlog, 01549 "PTRACE_DETACH (%s, %s, 0) (OK)\n", 01550 target_pid_to_str (lp->ptid), 01551 strsignal (WSTOPSIG (status))); 01552 01553 delete_lwp (lp->ptid); 01554 } 01555 01556 return 0; 01557 } 01558 01559 static void 01560 linux_nat_detach (struct target_ops *ops, char *args, int from_tty) 01561 { 01562 int pid; 01563 int status; 01564 struct lwp_info *main_lwp; 01565 01566 pid = ptid_get_pid (inferior_ptid); 01567 01568 /* Don't unregister from the event loop, as there may be other 01569 inferiors running. */ 01570 01571 /* Stop all threads before detaching. ptrace requires that the 01572 thread is stopped to sucessfully detach. */ 01573 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL); 01574 /* ... and wait until all of them have reported back that 01575 they're no longer running. */ 01576 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL); 01577 01578 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL); 01579 01580 /* Only the initial process should be left right now. */ 01581 gdb_assert (num_lwps (ptid_get_pid (inferior_ptid)) == 1); 01582 01583 main_lwp = find_lwp_pid (pid_to_ptid (pid)); 01584 01585 /* Pass on any pending signal for the last LWP. */ 01586 if ((args == NULL || *args == '\0') 01587 && get_pending_status (main_lwp, &status) != -1 01588 && WIFSTOPPED (status)) 01589 { 01590 /* Put the signal number in ARGS so that inf_ptrace_detach will 01591 pass it along with PTRACE_DETACH. */ 01592 args = alloca (8); 01593 sprintf (args, "%d", (int) WSTOPSIG (status)); 01594 if (debug_linux_nat) 01595 fprintf_unfiltered (gdb_stdlog, 01596 "LND: Sending signal %s to %s\n", 01597 args, 01598 target_pid_to_str (main_lwp->ptid)); 01599 } 01600 01601 if (linux_nat_prepare_to_resume != NULL) 01602 linux_nat_prepare_to_resume (main_lwp); 01603 delete_lwp (main_lwp->ptid); 01604 01605 if (forks_exist_p ()) 01606 { 01607 /* Multi-fork case. The current inferior_ptid is being detached 01608 from, but there are other viable forks to debug. Detach from 01609 the current fork, and context-switch to the first 01610 available. */ 01611 linux_fork_detach (args, from_tty); 01612 } 01613 else 01614 linux_ops->to_detach (ops, args, from_tty); 01615 } 01616 01617 /* Resume LP. */ 01618 01619 static void 01620 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo) 01621 { 01622 if (lp->stopped) 01623 { 01624 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid)); 01625 01626 if (inf->vfork_child != NULL) 01627 { 01628 if (debug_linux_nat) 01629 fprintf_unfiltered (gdb_stdlog, 01630 "RC: Not resuming %s (vfork parent)\n", 01631 target_pid_to_str (lp->ptid)); 01632 } 01633 else if (lp->status == 0 01634 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE) 01635 { 01636 if (debug_linux_nat) 01637 fprintf_unfiltered (gdb_stdlog, 01638 "RC: Resuming sibling %s, %s, %s\n", 01639 target_pid_to_str (lp->ptid), 01640 (signo != GDB_SIGNAL_0 01641 ? strsignal (gdb_signal_to_host (signo)) 01642 : "0"), 01643 step ? "step" : "resume"); 01644 01645 if (linux_nat_prepare_to_resume != NULL) 01646 linux_nat_prepare_to_resume (lp); 01647 linux_ops->to_resume (linux_ops, 01648 pid_to_ptid (ptid_get_lwp (lp->ptid)), 01649 step, signo); 01650 lp->stopped = 0; 01651 lp->step = step; 01652 lp->stopped_by_watchpoint = 0; 01653 } 01654 else 01655 { 01656 if (debug_linux_nat) 01657 fprintf_unfiltered (gdb_stdlog, 01658 "RC: Not resuming sibling %s (has pending)\n", 01659 target_pid_to_str (lp->ptid)); 01660 } 01661 } 01662 else 01663 { 01664 if (debug_linux_nat) 01665 fprintf_unfiltered (gdb_stdlog, 01666 "RC: Not resuming sibling %s (not stopped)\n", 01667 target_pid_to_str (lp->ptid)); 01668 } 01669 } 01670 01671 /* Resume LWP, with the last stop signal, if it is in pass state. */ 01672 01673 static int 01674 linux_nat_resume_callback (struct lwp_info *lp, void *data) 01675 { 01676 enum gdb_signal signo = GDB_SIGNAL_0; 01677 01678 if (lp->stopped) 01679 { 01680 struct thread_info *thread; 01681 01682 thread = find_thread_ptid (lp->ptid); 01683 if (thread != NULL) 01684 { 01685 if (signal_pass_state (thread->suspend.stop_signal)) 01686 signo = thread->suspend.stop_signal; 01687 thread->suspend.stop_signal = GDB_SIGNAL_0; 01688 } 01689 } 01690 01691 resume_lwp (lp, 0, signo); 01692 return 0; 01693 } 01694 01695 static int 01696 resume_clear_callback (struct lwp_info *lp, void *data) 01697 { 01698 lp->resumed = 0; 01699 lp->last_resume_kind = resume_stop; 01700 return 0; 01701 } 01702 01703 static int 01704 resume_set_callback (struct lwp_info *lp, void *data) 01705 { 01706 lp->resumed = 1; 01707 lp->last_resume_kind = resume_continue; 01708 return 0; 01709 } 01710 01711 static void 01712 linux_nat_resume (struct target_ops *ops, 01713 ptid_t ptid, int step, enum gdb_signal signo) 01714 { 01715 struct lwp_info *lp; 01716 int resume_many; 01717 01718 if (debug_linux_nat) 01719 fprintf_unfiltered (gdb_stdlog, 01720 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n", 01721 step ? "step" : "resume", 01722 target_pid_to_str (ptid), 01723 (signo != GDB_SIGNAL_0 01724 ? strsignal (gdb_signal_to_host (signo)) : "0"), 01725 target_pid_to_str (inferior_ptid)); 01726 01727 /* A specific PTID means `step only this process id'. */ 01728 resume_many = (ptid_equal (minus_one_ptid, ptid) 01729 || ptid_is_pid (ptid)); 01730 01731 /* Mark the lwps we're resuming as resumed. */ 01732 iterate_over_lwps (ptid, resume_set_callback, NULL); 01733 01734 /* See if it's the current inferior that should be handled 01735 specially. */ 01736 if (resume_many) 01737 lp = find_lwp_pid (inferior_ptid); 01738 else 01739 lp = find_lwp_pid (ptid); 01740 gdb_assert (lp != NULL); 01741 01742 /* Remember if we're stepping. */ 01743 lp->step = step; 01744 lp->last_resume_kind = step ? resume_step : resume_continue; 01745 01746 /* If we have a pending wait status for this thread, there is no 01747 point in resuming the process. But first make sure that 01748 linux_nat_wait won't preemptively handle the event - we 01749 should never take this short-circuit if we are going to 01750 leave LP running, since we have skipped resuming all the 01751 other threads. This bit of code needs to be synchronized 01752 with linux_nat_wait. */ 01753 01754 if (lp->status && WIFSTOPPED (lp->status)) 01755 { 01756 if (!lp->step 01757 && WSTOPSIG (lp->status) 01758 && sigismember (&pass_mask, WSTOPSIG (lp->status))) 01759 { 01760 if (debug_linux_nat) 01761 fprintf_unfiltered (gdb_stdlog, 01762 "LLR: Not short circuiting for ignored " 01763 "status 0x%x\n", lp->status); 01764 01765 /* FIXME: What should we do if we are supposed to continue 01766 this thread with a signal? */ 01767 gdb_assert (signo == GDB_SIGNAL_0); 01768 signo = gdb_signal_from_host (WSTOPSIG (lp->status)); 01769 lp->status = 0; 01770 } 01771 } 01772 01773 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE) 01774 { 01775 /* FIXME: What should we do if we are supposed to continue 01776 this thread with a signal? */ 01777 gdb_assert (signo == GDB_SIGNAL_0); 01778 01779 if (debug_linux_nat) 01780 fprintf_unfiltered (gdb_stdlog, 01781 "LLR: Short circuiting for status 0x%x\n", 01782 lp->status); 01783 01784 if (target_can_async_p ()) 01785 { 01786 target_async (inferior_event_handler, 0); 01787 /* Tell the event loop we have something to process. */ 01788 async_file_mark (); 01789 } 01790 return; 01791 } 01792 01793 /* Mark LWP as not stopped to prevent it from being continued by 01794 linux_nat_resume_callback. */ 01795 lp->stopped = 0; 01796 01797 if (resume_many) 01798 iterate_over_lwps (ptid, linux_nat_resume_callback, NULL); 01799 01800 /* Convert to something the lower layer understands. */ 01801 ptid = pid_to_ptid (ptid_get_lwp (lp->ptid)); 01802 01803 if (linux_nat_prepare_to_resume != NULL) 01804 linux_nat_prepare_to_resume (lp); 01805 linux_ops->to_resume (linux_ops, ptid, step, signo); 01806 lp->stopped_by_watchpoint = 0; 01807 01808 if (debug_linux_nat) 01809 fprintf_unfiltered (gdb_stdlog, 01810 "LLR: %s %s, %s (resume event thread)\n", 01811 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT", 01812 target_pid_to_str (ptid), 01813 (signo != GDB_SIGNAL_0 01814 ? strsignal (gdb_signal_to_host (signo)) : "0")); 01815 01816 if (target_can_async_p ()) 01817 target_async (inferior_event_handler, 0); 01818 } 01819 01820 /* Send a signal to an LWP. */ 01821 01822 static int 01823 kill_lwp (int lwpid, int signo) 01824 { 01825 /* Use tkill, if possible, in case we are using nptl threads. If tkill 01826 fails, then we are not using nptl threads and we should be using kill. */ 01827 01828 #ifdef HAVE_TKILL_SYSCALL 01829 { 01830 static int tkill_failed; 01831 01832 if (!tkill_failed) 01833 { 01834 int ret; 01835 01836 errno = 0; 01837 ret = syscall (__NR_tkill, lwpid, signo); 01838 if (errno != ENOSYS) 01839 return ret; 01840 tkill_failed = 1; 01841 } 01842 } 01843 #endif 01844 01845 return kill (lwpid, signo); 01846 } 01847 01848 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall 01849 event, check if the core is interested in it: if not, ignore the 01850 event, and keep waiting; otherwise, we need to toggle the LWP's 01851 syscall entry/exit status, since the ptrace event itself doesn't 01852 indicate it, and report the trap to higher layers. */ 01853 01854 static int 01855 linux_handle_syscall_trap (struct lwp_info *lp, int stopping) 01856 { 01857 struct target_waitstatus *ourstatus = &lp->waitstatus; 01858 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid); 01859 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid); 01860 01861 if (stopping) 01862 { 01863 /* If we're stopping threads, there's a SIGSTOP pending, which 01864 makes it so that the LWP reports an immediate syscall return, 01865 followed by the SIGSTOP. Skip seeing that "return" using 01866 PTRACE_CONT directly, and let stop_wait_callback collect the 01867 SIGSTOP. Later when the thread is resumed, a new syscall 01868 entry event. If we didn't do this (and returned 0), we'd 01869 leave a syscall entry pending, and our caller, by using 01870 PTRACE_CONT to collect the SIGSTOP, skips the syscall return 01871 itself. Later, when the user re-resumes this LWP, we'd see 01872 another syscall entry event and we'd mistake it for a return. 01873 01874 If stop_wait_callback didn't force the SIGSTOP out of the LWP 01875 (leaving immediately with LWP->signalled set, without issuing 01876 a PTRACE_CONT), it would still be problematic to leave this 01877 syscall enter pending, as later when the thread is resumed, 01878 it would then see the same syscall exit mentioned above, 01879 followed by the delayed SIGSTOP, while the syscall didn't 01880 actually get to execute. It seems it would be even more 01881 confusing to the user. */ 01882 01883 if (debug_linux_nat) 01884 fprintf_unfiltered (gdb_stdlog, 01885 "LHST: ignoring syscall %d " 01886 "for LWP %ld (stopping threads), " 01887 "resuming with PTRACE_CONT for SIGSTOP\n", 01888 syscall_number, 01889 ptid_get_lwp (lp->ptid)); 01890 01891 lp->syscall_state = TARGET_WAITKIND_IGNORE; 01892 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0); 01893 return 1; 01894 } 01895 01896 if (catch_syscall_enabled ()) 01897 { 01898 /* Always update the entry/return state, even if this particular 01899 syscall isn't interesting to the core now. In async mode, 01900 the user could install a new catchpoint for this syscall 01901 between syscall enter/return, and we'll need to know to 01902 report a syscall return if that happens. */ 01903 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY 01904 ? TARGET_WAITKIND_SYSCALL_RETURN 01905 : TARGET_WAITKIND_SYSCALL_ENTRY); 01906 01907 if (catching_syscall_number (syscall_number)) 01908 { 01909 /* Alright, an event to report. */ 01910 ourstatus->kind = lp->syscall_state; 01911 ourstatus->value.syscall_number = syscall_number; 01912 01913 if (debug_linux_nat) 01914 fprintf_unfiltered (gdb_stdlog, 01915 "LHST: stopping for %s of syscall %d" 01916 " for LWP %ld\n", 01917 lp->syscall_state 01918 == TARGET_WAITKIND_SYSCALL_ENTRY 01919 ? "entry" : "return", 01920 syscall_number, 01921 ptid_get_lwp (lp->ptid)); 01922 return 0; 01923 } 01924 01925 if (debug_linux_nat) 01926 fprintf_unfiltered (gdb_stdlog, 01927 "LHST: ignoring %s of syscall %d " 01928 "for LWP %ld\n", 01929 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY 01930 ? "entry" : "return", 01931 syscall_number, 01932 ptid_get_lwp (lp->ptid)); 01933 } 01934 else 01935 { 01936 /* If we had been syscall tracing, and hence used PT_SYSCALL 01937 before on this LWP, it could happen that the user removes all 01938 syscall catchpoints before we get to process this event. 01939 There are two noteworthy issues here: 01940 01941 - When stopped at a syscall entry event, resuming with 01942 PT_STEP still resumes executing the syscall and reports a 01943 syscall return. 01944 01945 - Only PT_SYSCALL catches syscall enters. If we last 01946 single-stepped this thread, then this event can't be a 01947 syscall enter. If we last single-stepped this thread, this 01948 has to be a syscall exit. 01949 01950 The points above mean that the next resume, be it PT_STEP or 01951 PT_CONTINUE, can not trigger a syscall trace event. */ 01952 if (debug_linux_nat) 01953 fprintf_unfiltered (gdb_stdlog, 01954 "LHST: caught syscall event " 01955 "with no syscall catchpoints." 01956 " %d for LWP %ld, ignoring\n", 01957 syscall_number, 01958 ptid_get_lwp (lp->ptid)); 01959 lp->syscall_state = TARGET_WAITKIND_IGNORE; 01960 } 01961 01962 /* The core isn't interested in this event. For efficiency, avoid 01963 stopping all threads only to have the core resume them all again. 01964 Since we're not stopping threads, if we're still syscall tracing 01965 and not stepping, we can't use PTRACE_CONT here, as we'd miss any 01966 subsequent syscall. Simply resume using the inf-ptrace layer, 01967 which knows when to use PT_SYSCALL or PT_CONTINUE. */ 01968 01969 /* Note that gdbarch_get_syscall_number may access registers, hence 01970 fill a regcache. */ 01971 registers_changed (); 01972 if (linux_nat_prepare_to_resume != NULL) 01973 linux_nat_prepare_to_resume (lp); 01974 linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)), 01975 lp->step, GDB_SIGNAL_0); 01976 return 1; 01977 } 01978 01979 /* Handle a GNU/Linux extended wait response. If we see a clone 01980 event, we need to add the new LWP to our list (and not report the 01981 trap to higher layers). This function returns non-zero if the 01982 event should be ignored and we should wait again. If STOPPING is 01983 true, the new LWP remains stopped, otherwise it is continued. */ 01984 01985 static int 01986 linux_handle_extended_wait (struct lwp_info *lp, int status, 01987 int stopping) 01988 { 01989 int pid = ptid_get_lwp (lp->ptid); 01990 struct target_waitstatus *ourstatus = &lp->waitstatus; 01991 int event = status >> 16; 01992 01993 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK 01994 || event == PTRACE_EVENT_CLONE) 01995 { 01996 unsigned long new_pid; 01997 int ret; 01998 01999 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid); 02000 02001 /* If we haven't already seen the new PID stop, wait for it now. */ 02002 if (! pull_pid_from_list (&stopped_pids, new_pid, &status)) 02003 { 02004 /* The new child has a pending SIGSTOP. We can't affect it until it 02005 hits the SIGSTOP, but we're already attached. */ 02006 ret = my_waitpid (new_pid, &status, 02007 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0); 02008 if (ret == -1) 02009 perror_with_name (_("waiting for new child")); 02010 else if (ret != new_pid) 02011 internal_error (__FILE__, __LINE__, 02012 _("wait returned unexpected PID %d"), ret); 02013 else if (!WIFSTOPPED (status)) 02014 internal_error (__FILE__, __LINE__, 02015 _("wait returned unexpected status 0x%x"), status); 02016 } 02017 02018 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0); 02019 02020 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK) 02021 { 02022 /* The arch-specific native code may need to know about new 02023 forks even if those end up never mapped to an 02024 inferior. */ 02025 if (linux_nat_new_fork != NULL) 02026 linux_nat_new_fork (lp, new_pid); 02027 } 02028 02029 if (event == PTRACE_EVENT_FORK 02030 && linux_fork_checkpointing_p (ptid_get_pid (lp->ptid))) 02031 { 02032 /* Handle checkpointing by linux-fork.c here as a special 02033 case. We don't want the follow-fork-mode or 'catch fork' 02034 to interfere with this. */ 02035 02036 /* This won't actually modify the breakpoint list, but will 02037 physically remove the breakpoints from the child. */ 02038 detach_breakpoints (ptid_build (new_pid, new_pid, 0)); 02039 02040 /* Retain child fork in ptrace (stopped) state. */ 02041 if (!find_fork_pid (new_pid)) 02042 add_fork (new_pid); 02043 02044 /* Report as spurious, so that infrun doesn't want to follow 02045 this fork. We're actually doing an infcall in 02046 linux-fork.c. */ 02047 ourstatus->kind = TARGET_WAITKIND_SPURIOUS; 02048 02049 /* Report the stop to the core. */ 02050 return 0; 02051 } 02052 02053 if (event == PTRACE_EVENT_FORK) 02054 ourstatus->kind = TARGET_WAITKIND_FORKED; 02055 else if (event == PTRACE_EVENT_VFORK) 02056 ourstatus->kind = TARGET_WAITKIND_VFORKED; 02057 else 02058 { 02059 struct lwp_info *new_lp; 02060 02061 ourstatus->kind = TARGET_WAITKIND_IGNORE; 02062 02063 if (debug_linux_nat) 02064 fprintf_unfiltered (gdb_stdlog, 02065 "LHEW: Got clone event " 02066 "from LWP %d, new child is LWP %ld\n", 02067 pid, new_pid); 02068 02069 new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0)); 02070 new_lp->cloned = 1; 02071 new_lp->stopped = 1; 02072 02073 if (WSTOPSIG (status) != SIGSTOP) 02074 { 02075 /* This can happen if someone starts sending signals to 02076 the new thread before it gets a chance to run, which 02077 have a lower number than SIGSTOP (e.g. SIGUSR1). 02078 This is an unlikely case, and harder to handle for 02079 fork / vfork than for clone, so we do not try - but 02080 we handle it for clone events here. We'll send 02081 the other signal on to the thread below. */ 02082 02083 new_lp->signalled = 1; 02084 } 02085 else 02086 { 02087 struct thread_info *tp; 02088 02089 /* When we stop for an event in some other thread, and 02090 pull the thread list just as this thread has cloned, 02091 we'll have seen the new thread in the thread_db list 02092 before handling the CLONE event (glibc's 02093 pthread_create adds the new thread to the thread list 02094 before clone'ing, and has the kernel fill in the 02095 thread's tid on the clone call with 02096 CLONE_PARENT_SETTID). If that happened, and the core 02097 had requested the new thread to stop, we'll have 02098 killed it with SIGSTOP. But since SIGSTOP is not an 02099 RT signal, it can only be queued once. We need to be 02100 careful to not resume the LWP if we wanted it to 02101 stop. In that case, we'll leave the SIGSTOP pending. 02102 It will later be reported as GDB_SIGNAL_0. */ 02103 tp = find_thread_ptid (new_lp->ptid); 02104 if (tp != NULL && tp->stop_requested) 02105 new_lp->last_resume_kind = resume_stop; 02106 else 02107 status = 0; 02108 } 02109 02110 if (non_stop) 02111 { 02112 /* Add the new thread to GDB's lists as soon as possible 02113 so that: 02114 02115 1) the frontend doesn't have to wait for a stop to 02116 display them, and, 02117 02118 2) we tag it with the correct running state. */ 02119 02120 /* If the thread_db layer is active, let it know about 02121 this new thread, and add it to GDB's list. */ 02122 if (!thread_db_attach_lwp (new_lp->ptid)) 02123 { 02124 /* We're not using thread_db. Add it to GDB's 02125 list. */ 02126 target_post_attach (ptid_get_lwp (new_lp->ptid)); 02127 add_thread (new_lp->ptid); 02128 } 02129 02130 if (!stopping) 02131 { 02132 set_running (new_lp->ptid, 1); 02133 set_executing (new_lp->ptid, 1); 02134 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced 02135 resume_stop. */ 02136 new_lp->last_resume_kind = resume_continue; 02137 } 02138 } 02139 02140 if (status != 0) 02141 { 02142 /* We created NEW_LP so it cannot yet contain STATUS. */ 02143 gdb_assert (new_lp->status == 0); 02144 02145 /* Save the wait status to report later. */ 02146 if (debug_linux_nat) 02147 fprintf_unfiltered (gdb_stdlog, 02148 "LHEW: waitpid of new LWP %ld, " 02149 "saving status %s\n", 02150 (long) ptid_get_lwp (new_lp->ptid), 02151 status_to_str (status)); 02152 new_lp->status = status; 02153 } 02154 02155 /* Note the need to use the low target ops to resume, to 02156 handle resuming with PT_SYSCALL if we have syscall 02157 catchpoints. */ 02158 if (!stopping) 02159 { 02160 new_lp->resumed = 1; 02161 02162 if (status == 0) 02163 { 02164 gdb_assert (new_lp->last_resume_kind == resume_continue); 02165 if (debug_linux_nat) 02166 fprintf_unfiltered (gdb_stdlog, 02167 "LHEW: resuming new LWP %ld\n", 02168 ptid_get_lwp (new_lp->ptid)); 02169 if (linux_nat_prepare_to_resume != NULL) 02170 linux_nat_prepare_to_resume (new_lp); 02171 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid), 02172 0, GDB_SIGNAL_0); 02173 new_lp->stopped = 0; 02174 } 02175 } 02176 02177 if (debug_linux_nat) 02178 fprintf_unfiltered (gdb_stdlog, 02179 "LHEW: resuming parent LWP %d\n", pid); 02180 if (linux_nat_prepare_to_resume != NULL) 02181 linux_nat_prepare_to_resume (lp); 02182 linux_ops->to_resume (linux_ops, 02183 pid_to_ptid (ptid_get_lwp (lp->ptid)), 02184 0, GDB_SIGNAL_0); 02185 02186 return 1; 02187 } 02188 02189 return 0; 02190 } 02191 02192 if (event == PTRACE_EVENT_EXEC) 02193 { 02194 if (debug_linux_nat) 02195 fprintf_unfiltered (gdb_stdlog, 02196 "LHEW: Got exec event from LWP %ld\n", 02197 ptid_get_lwp (lp->ptid)); 02198 02199 ourstatus->kind = TARGET_WAITKIND_EXECD; 02200 ourstatus->value.execd_pathname 02201 = xstrdup (linux_child_pid_to_exec_file (pid)); 02202 02203 return 0; 02204 } 02205 02206 if (event == PTRACE_EVENT_VFORK_DONE) 02207 { 02208 if (current_inferior ()->waiting_for_vfork_done) 02209 { 02210 if (debug_linux_nat) 02211 fprintf_unfiltered (gdb_stdlog, 02212 "LHEW: Got expected PTRACE_EVENT_" 02213 "VFORK_DONE from LWP %ld: stopping\n", 02214 ptid_get_lwp (lp->ptid)); 02215 02216 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE; 02217 return 0; 02218 } 02219 02220 if (debug_linux_nat) 02221 fprintf_unfiltered (gdb_stdlog, 02222 "LHEW: Got PTRACE_EVENT_VFORK_DONE " 02223 "from LWP %ld: resuming\n", 02224 ptid_get_lwp (lp->ptid)); 02225 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0); 02226 return 1; 02227 } 02228 02229 internal_error (__FILE__, __LINE__, 02230 _("unknown ptrace event %d"), event); 02231 } 02232 02233 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has 02234 exited. */ 02235 02236 static int 02237 wait_lwp (struct lwp_info *lp) 02238 { 02239 pid_t pid; 02240 int status = 0; 02241 int thread_dead = 0; 02242 sigset_t prev_mask; 02243 02244 gdb_assert (!lp->stopped); 02245 gdb_assert (lp->status == 0); 02246 02247 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */ 02248 block_child_signals (&prev_mask); 02249 02250 for (;;) 02251 { 02252 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind 02253 was right and we should just call sigsuspend. */ 02254 02255 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, WNOHANG); 02256 if (pid == -1 && errno == ECHILD) 02257 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, __WCLONE | WNOHANG); 02258 if (pid == -1 && errno == ECHILD) 02259 { 02260 /* The thread has previously exited. We need to delete it 02261 now because, for some vendor 2.4 kernels with NPTL 02262 support backported, there won't be an exit event unless 02263 it is the main thread. 2.6 kernels will report an exit 02264 event for each thread that exits, as expected. */ 02265 thread_dead = 1; 02266 if (debug_linux_nat) 02267 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n", 02268 target_pid_to_str (lp->ptid)); 02269 } 02270 if (pid != 0) 02271 break; 02272 02273 /* Bugs 10970, 12702. 02274 Thread group leader may have exited in which case we'll lock up in 02275 waitpid if there are other threads, even if they are all zombies too. 02276 Basically, we're not supposed to use waitpid this way. 02277 __WCLONE is not applicable for the leader so we can't use that. 02278 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED 02279 process; it gets ESRCH both for the zombie and for running processes. 02280 02281 As a workaround, check if we're waiting for the thread group leader and 02282 if it's a zombie, and avoid calling waitpid if it is. 02283 02284 This is racy, what if the tgl becomes a zombie right after we check? 02285 Therefore always use WNOHANG with sigsuspend - it is equivalent to 02286 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */ 02287 02288 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid) 02289 && linux_proc_pid_is_zombie (ptid_get_lwp (lp->ptid))) 02290 { 02291 thread_dead = 1; 02292 if (debug_linux_nat) 02293 fprintf_unfiltered (gdb_stdlog, 02294 "WL: Thread group leader %s vanished.\n", 02295 target_pid_to_str (lp->ptid)); 02296 break; 02297 } 02298 02299 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers 02300 get invoked despite our caller had them intentionally blocked by 02301 block_child_signals. This is sensitive only to the loop of 02302 linux_nat_wait_1 and there if we get called my_waitpid gets called 02303 again before it gets to sigsuspend so we can safely let the handlers 02304 get executed here. */ 02305 02306 sigsuspend (&suspend_mask); 02307 } 02308 02309 restore_child_signals_mask (&prev_mask); 02310 02311 if (!thread_dead) 02312 { 02313 gdb_assert (pid == ptid_get_lwp (lp->ptid)); 02314 02315 if (debug_linux_nat) 02316 { 02317 fprintf_unfiltered (gdb_stdlog, 02318 "WL: waitpid %s received %s\n", 02319 target_pid_to_str (lp->ptid), 02320 status_to_str (status)); 02321 } 02322 02323 /* Check if the thread has exited. */ 02324 if (WIFEXITED (status) || WIFSIGNALED (status)) 02325 { 02326 thread_dead = 1; 02327 if (debug_linux_nat) 02328 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n", 02329 target_pid_to_str (lp->ptid)); 02330 } 02331 } 02332 02333 if (thread_dead) 02334 { 02335 exit_lwp (lp); 02336 return 0; 02337 } 02338 02339 gdb_assert (WIFSTOPPED (status)); 02340 02341 /* Handle GNU/Linux's syscall SIGTRAPs. */ 02342 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP) 02343 { 02344 /* No longer need the sysgood bit. The ptrace event ends up 02345 recorded in lp->waitstatus if we care for it. We can carry 02346 on handling the event like a regular SIGTRAP from here 02347 on. */ 02348 status = W_STOPCODE (SIGTRAP); 02349 if (linux_handle_syscall_trap (lp, 1)) 02350 return wait_lwp (lp); 02351 } 02352 02353 /* Handle GNU/Linux's extended waitstatus for trace events. */ 02354 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0) 02355 { 02356 if (debug_linux_nat) 02357 fprintf_unfiltered (gdb_stdlog, 02358 "WL: Handling extended status 0x%06x\n", 02359 status); 02360 if (linux_handle_extended_wait (lp, status, 1)) 02361 return wait_lwp (lp); 02362 } 02363 02364 return status; 02365 } 02366 02367 /* Send a SIGSTOP to LP. */ 02368 02369 static int 02370 stop_callback (struct lwp_info *lp, void *data) 02371 { 02372 if (!lp->stopped && !lp->signalled) 02373 { 02374 int ret; 02375 02376 if (debug_linux_nat) 02377 { 02378 fprintf_unfiltered (gdb_stdlog, 02379 "SC: kill %s **<SIGSTOP>**\n", 02380 target_pid_to_str (lp->ptid)); 02381 } 02382 errno = 0; 02383 ret = kill_lwp (ptid_get_lwp (lp->ptid), SIGSTOP); 02384 if (debug_linux_nat) 02385 { 02386 fprintf_unfiltered (gdb_stdlog, 02387 "SC: lwp kill %d %s\n", 02388 ret, 02389 errno ? safe_strerror (errno) : "ERRNO-OK"); 02390 } 02391 02392 lp->signalled = 1; 02393 gdb_assert (lp->status == 0); 02394 } 02395 02396 return 0; 02397 } 02398 02399 /* Request a stop on LWP. */ 02400 02401 void 02402 linux_stop_lwp (struct lwp_info *lwp) 02403 { 02404 stop_callback (lwp, NULL); 02405 } 02406 02407 /* Return non-zero if LWP PID has a pending SIGINT. */ 02408 02409 static int 02410 linux_nat_has_pending_sigint (int pid) 02411 { 02412 sigset_t pending, blocked, ignored; 02413 02414 linux_proc_pending_signals (pid, &pending, &blocked, &ignored); 02415 02416 if (sigismember (&pending, SIGINT) 02417 && !sigismember (&ignored, SIGINT)) 02418 return 1; 02419 02420 return 0; 02421 } 02422 02423 /* Set a flag in LP indicating that we should ignore its next SIGINT. */ 02424 02425 static int 02426 set_ignore_sigint (struct lwp_info *lp, void *data) 02427 { 02428 /* If a thread has a pending SIGINT, consume it; otherwise, set a 02429 flag to consume the next one. */ 02430 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status) 02431 && WSTOPSIG (lp->status) == SIGINT) 02432 lp->status = 0; 02433 else 02434 lp->ignore_sigint = 1; 02435 02436 return 0; 02437 } 02438 02439 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag. 02440 This function is called after we know the LWP has stopped; if the LWP 02441 stopped before the expected SIGINT was delivered, then it will never have 02442 arrived. Also, if the signal was delivered to a shared queue and consumed 02443 by a different thread, it will never be delivered to this LWP. */ 02444 02445 static void 02446 maybe_clear_ignore_sigint (struct lwp_info *lp) 02447 { 02448 if (!lp->ignore_sigint) 02449 return; 02450 02451 if (!linux_nat_has_pending_sigint (ptid_get_lwp (lp->ptid))) 02452 { 02453 if (debug_linux_nat) 02454 fprintf_unfiltered (gdb_stdlog, 02455 "MCIS: Clearing bogus flag for %s\n", 02456 target_pid_to_str (lp->ptid)); 02457 lp->ignore_sigint = 0; 02458 } 02459 } 02460 02461 /* Fetch the possible triggered data watchpoint info and store it in 02462 LP. 02463 02464 On some archs, like x86, that use debug registers to set 02465 watchpoints, it's possible that the way to know which watched 02466 address trapped, is to check the register that is used to select 02467 which address to watch. Problem is, between setting the watchpoint 02468 and reading back which data address trapped, the user may change 02469 the set of watchpoints, and, as a consequence, GDB changes the 02470 debug registers in the inferior. To avoid reading back a stale 02471 stopped-data-address when that happens, we cache in LP the fact 02472 that a watchpoint trapped, and the corresponding data address, as 02473 soon as we see LP stop with a SIGTRAP. If GDB changes the debug 02474 registers meanwhile, we have the cached data we can rely on. */ 02475 02476 static void 02477 save_sigtrap (struct lwp_info *lp) 02478 { 02479 struct cleanup *old_chain; 02480 02481 if (linux_ops->to_stopped_by_watchpoint == NULL) 02482 { 02483 lp->stopped_by_watchpoint = 0; 02484 return; 02485 } 02486 02487 old_chain = save_inferior_ptid (); 02488 inferior_ptid = lp->ptid; 02489 02490 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint (); 02491 02492 if (lp->stopped_by_watchpoint) 02493 { 02494 if (linux_ops->to_stopped_data_address != NULL) 02495 lp->stopped_data_address_p = 02496 linux_ops->to_stopped_data_address (¤t_target, 02497 &lp->stopped_data_address); 02498 else 02499 lp->stopped_data_address_p = 0; 02500 } 02501 02502 do_cleanups (old_chain); 02503 } 02504 02505 /* See save_sigtrap. */ 02506 02507 static int 02508 linux_nat_stopped_by_watchpoint (void) 02509 { 02510 struct lwp_info *lp = find_lwp_pid (inferior_ptid); 02511 02512 gdb_assert (lp != NULL); 02513 02514 return lp->stopped_by_watchpoint; 02515 } 02516 02517 static int 02518 linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p) 02519 { 02520 struct lwp_info *lp = find_lwp_pid (inferior_ptid); 02521 02522 gdb_assert (lp != NULL); 02523 02524 *addr_p = lp->stopped_data_address; 02525 02526 return lp->stopped_data_address_p; 02527 } 02528 02529 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */ 02530 02531 static int 02532 sigtrap_is_event (int status) 02533 { 02534 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP; 02535 } 02536 02537 /* SIGTRAP-like events recognizer. */ 02538 02539 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event; 02540 02541 /* Check for SIGTRAP-like events in LP. */ 02542 02543 static int 02544 linux_nat_lp_status_is_event (struct lwp_info *lp) 02545 { 02546 /* We check for lp->waitstatus in addition to lp->status, because we can 02547 have pending process exits recorded in lp->status 02548 and W_EXITCODE(0,0) == 0. We should probably have an additional 02549 lp->status_p flag. */ 02550 02551 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE 02552 && linux_nat_status_is_event (lp->status)); 02553 } 02554 02555 /* Set alternative SIGTRAP-like events recognizer. If 02556 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be 02557 applied. */ 02558 02559 void 02560 linux_nat_set_status_is_event (struct target_ops *t, 02561 int (*status_is_event) (int status)) 02562 { 02563 linux_nat_status_is_event = status_is_event; 02564 } 02565 02566 /* Wait until LP is stopped. */ 02567 02568 static int 02569 stop_wait_callback (struct lwp_info *lp, void *data) 02570 { 02571 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid)); 02572 02573 /* If this is a vfork parent, bail out, it is not going to report 02574 any SIGSTOP until the vfork is done with. */ 02575 if (inf->vfork_child != NULL) 02576 return 0; 02577 02578 if (!lp->stopped) 02579 { 02580 int status; 02581 02582 status = wait_lwp (lp); 02583 if (status == 0) 02584 return 0; 02585 02586 if (lp->ignore_sigint && WIFSTOPPED (status) 02587 && WSTOPSIG (status) == SIGINT) 02588 { 02589 lp->ignore_sigint = 0; 02590 02591 errno = 0; 02592 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0); 02593 if (debug_linux_nat) 02594 fprintf_unfiltered (gdb_stdlog, 02595 "PTRACE_CONT %s, 0, 0 (%s) " 02596 "(discarding SIGINT)\n", 02597 target_pid_to_str (lp->ptid), 02598 errno ? safe_strerror (errno) : "OK"); 02599 02600 return stop_wait_callback (lp, NULL); 02601 } 02602 02603 maybe_clear_ignore_sigint (lp); 02604 02605 if (WSTOPSIG (status) != SIGSTOP) 02606 { 02607 /* The thread was stopped with a signal other than SIGSTOP. */ 02608 02609 save_sigtrap (lp); 02610 02611 if (debug_linux_nat) 02612 fprintf_unfiltered (gdb_stdlog, 02613 "SWC: Pending event %s in %s\n", 02614 status_to_str ((int) status), 02615 target_pid_to_str (lp->ptid)); 02616 02617 /* Save the sigtrap event. */ 02618 lp->status = status; 02619 gdb_assert (!lp->stopped); 02620 gdb_assert (lp->signalled); 02621 lp->stopped = 1; 02622 } 02623 else 02624 { 02625 /* We caught the SIGSTOP that we intended to catch, so 02626 there's no SIGSTOP pending. */ 02627 02628 if (debug_linux_nat) 02629 fprintf_unfiltered (gdb_stdlog, 02630 "SWC: Delayed SIGSTOP caught for %s.\n", 02631 target_pid_to_str (lp->ptid)); 02632 02633 lp->stopped = 1; 02634 02635 /* Reset SIGNALLED only after the stop_wait_callback call 02636 above as it does gdb_assert on SIGNALLED. */ 02637 lp->signalled = 0; 02638 } 02639 } 02640 02641 return 0; 02642 } 02643 02644 /* Return non-zero if LP has a wait status pending. */ 02645 02646 static int 02647 status_callback (struct lwp_info *lp, void *data) 02648 { 02649 /* Only report a pending wait status if we pretend that this has 02650 indeed been resumed. */ 02651 if (!lp->resumed) 02652 return 0; 02653 02654 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE) 02655 { 02656 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event, 02657 or a pending process exit. Note that `W_EXITCODE(0,0) == 02658 0', so a clean process exit can not be stored pending in 02659 lp->status, it is indistinguishable from 02660 no-pending-status. */ 02661 return 1; 02662 } 02663 02664 if (lp->status != 0) 02665 return 1; 02666 02667 return 0; 02668 } 02669 02670 /* Return non-zero if LP isn't stopped. */ 02671 02672 static int 02673 running_callback (struct lwp_info *lp, void *data) 02674 { 02675 return (!lp->stopped 02676 || ((lp->status != 0 02677 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE) 02678 && lp->resumed)); 02679 } 02680 02681 /* Count the LWP's that have had events. */ 02682 02683 static int 02684 count_events_callback (struct lwp_info *lp, void *data) 02685 { 02686 int *count = data; 02687 02688 gdb_assert (count != NULL); 02689 02690 /* Count only resumed LWPs that have a SIGTRAP event pending. */ 02691 if (lp->resumed && linux_nat_lp_status_is_event (lp)) 02692 (*count)++; 02693 02694 return 0; 02695 } 02696 02697 /* Select the LWP (if any) that is currently being single-stepped. */ 02698 02699 static int 02700 select_singlestep_lwp_callback (struct lwp_info *lp, void *data) 02701 { 02702 if (lp->last_resume_kind == resume_step 02703 && lp->status != 0) 02704 return 1; 02705 else 02706 return 0; 02707 } 02708 02709 /* Select the Nth LWP that has had a SIGTRAP event. */ 02710 02711 static int 02712 select_event_lwp_callback (struct lwp_info *lp, void *data) 02713 { 02714 int *selector = data; 02715 02716 gdb_assert (selector != NULL); 02717 02718 /* Select only resumed LWPs that have a SIGTRAP event pending. */ 02719 if (lp->resumed && linux_nat_lp_status_is_event (lp)) 02720 if ((*selector)-- == 0) 02721 return 1; 02722 02723 return 0; 02724 } 02725 02726 static int 02727 cancel_breakpoint (struct lwp_info *lp) 02728 { 02729 /* Arrange for a breakpoint to be hit again later. We don't keep 02730 the SIGTRAP status and don't forward the SIGTRAP signal to the 02731 LWP. We will handle the current event, eventually we will resume 02732 this LWP, and this breakpoint will trap again. 02733 02734 If we do not do this, then we run the risk that the user will 02735 delete or disable the breakpoint, but the LWP will have already 02736 tripped on it. */ 02737 02738 struct regcache *regcache = get_thread_regcache (lp->ptid); 02739 struct gdbarch *gdbarch = get_regcache_arch (regcache); 02740 CORE_ADDR pc; 02741 02742 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch); 02743 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc)) 02744 { 02745 if (debug_linux_nat) 02746 fprintf_unfiltered (gdb_stdlog, 02747 "CB: Push back breakpoint for %s\n", 02748 target_pid_to_str (lp->ptid)); 02749 02750 /* Back up the PC if necessary. */ 02751 if (gdbarch_decr_pc_after_break (gdbarch)) 02752 regcache_write_pc (regcache, pc); 02753 02754 return 1; 02755 } 02756 return 0; 02757 } 02758 02759 static int 02760 cancel_breakpoints_callback (struct lwp_info *lp, void *data) 02761 { 02762 struct lwp_info *event_lp = data; 02763 02764 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */ 02765 if (lp == event_lp) 02766 return 0; 02767 02768 /* If a LWP other than the LWP that we're reporting an event for has 02769 hit a GDB breakpoint (as opposed to some random trap signal), 02770 then just arrange for it to hit it again later. We don't keep 02771 the SIGTRAP status and don't forward the SIGTRAP signal to the 02772 LWP. We will handle the current event, eventually we will resume 02773 all LWPs, and this one will get its breakpoint trap again. 02774 02775 If we do not do this, then we run the risk that the user will 02776 delete or disable the breakpoint, but the LWP will have already 02777 tripped on it. */ 02778 02779 if (linux_nat_lp_status_is_event (lp) 02780 && cancel_breakpoint (lp)) 02781 /* Throw away the SIGTRAP. */ 02782 lp->status = 0; 02783 02784 return 0; 02785 } 02786 02787 /* Select one LWP out of those that have events pending. */ 02788 02789 static void 02790 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status) 02791 { 02792 int num_events = 0; 02793 int random_selector; 02794 struct lwp_info *event_lp; 02795 02796 /* Record the wait status for the original LWP. */ 02797 (*orig_lp)->status = *status; 02798 02799 /* Give preference to any LWP that is being single-stepped. */ 02800 event_lp = iterate_over_lwps (filter, 02801 select_singlestep_lwp_callback, NULL); 02802 if (event_lp != NULL) 02803 { 02804 if (debug_linux_nat) 02805 fprintf_unfiltered (gdb_stdlog, 02806 "SEL: Select single-step %s\n", 02807 target_pid_to_str (event_lp->ptid)); 02808 } 02809 else 02810 { 02811 /* No single-stepping LWP. Select one at random, out of those 02812 which have had SIGTRAP events. */ 02813 02814 /* First see how many SIGTRAP events we have. */ 02815 iterate_over_lwps (filter, count_events_callback, &num_events); 02816 02817 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */ 02818 random_selector = (int) 02819 ((num_events * (double) rand ()) / (RAND_MAX + 1.0)); 02820 02821 if (debug_linux_nat && num_events > 1) 02822 fprintf_unfiltered (gdb_stdlog, 02823 "SEL: Found %d SIGTRAP events, selecting #%d\n", 02824 num_events, random_selector); 02825 02826 event_lp = iterate_over_lwps (filter, 02827 select_event_lwp_callback, 02828 &random_selector); 02829 } 02830 02831 if (event_lp != NULL) 02832 { 02833 /* Switch the event LWP. */ 02834 *orig_lp = event_lp; 02835 *status = event_lp->status; 02836 } 02837 02838 /* Flush the wait status for the event LWP. */ 02839 (*orig_lp)->status = 0; 02840 } 02841 02842 /* Return non-zero if LP has been resumed. */ 02843 02844 static int 02845 resumed_callback (struct lwp_info *lp, void *data) 02846 { 02847 return lp->resumed; 02848 } 02849 02850 /* Stop an active thread, verify it still exists, then resume it. If 02851 the thread ends up with a pending status, then it is not resumed, 02852 and *DATA (really a pointer to int), is set. */ 02853 02854 static int 02855 stop_and_resume_callback (struct lwp_info *lp, void *data) 02856 { 02857 int *new_pending_p = data; 02858 02859 if (!lp->stopped) 02860 { 02861 ptid_t ptid = lp->ptid; 02862 02863 stop_callback (lp, NULL); 02864 stop_wait_callback (lp, NULL); 02865 02866 /* Resume if the lwp still exists, and the core wanted it 02867 running. */ 02868 lp = find_lwp_pid (ptid); 02869 if (lp != NULL) 02870 { 02871 if (lp->last_resume_kind == resume_stop 02872 && lp->status == 0) 02873 { 02874 /* The core wanted the LWP to stop. Even if it stopped 02875 cleanly (with SIGSTOP), leave the event pending. */ 02876 if (debug_linux_nat) 02877 fprintf_unfiltered (gdb_stdlog, 02878 "SARC: core wanted LWP %ld stopped " 02879 "(leaving SIGSTOP pending)\n", 02880 ptid_get_lwp (lp->ptid)); 02881 lp->status = W_STOPCODE (SIGSTOP); 02882 } 02883 02884 if (lp->status == 0) 02885 { 02886 if (debug_linux_nat) 02887 fprintf_unfiltered (gdb_stdlog, 02888 "SARC: re-resuming LWP %ld\n", 02889 ptid_get_lwp (lp->ptid)); 02890 resume_lwp (lp, lp->step, GDB_SIGNAL_0); 02891 } 02892 else 02893 { 02894 if (debug_linux_nat) 02895 fprintf_unfiltered (gdb_stdlog, 02896 "SARC: not re-resuming LWP %ld " 02897 "(has pending)\n", 02898 ptid_get_lwp (lp->ptid)); 02899 if (new_pending_p) 02900 *new_pending_p = 1; 02901 } 02902 } 02903 } 02904 return 0; 02905 } 02906 02907 /* Check if we should go on and pass this event to common code. 02908 Return the affected lwp if we are, or NULL otherwise. If we stop 02909 all lwps temporarily, we may end up with new pending events in some 02910 other lwp. In that case set *NEW_PENDING_P to true. */ 02911 02912 static struct lwp_info * 02913 linux_nat_filter_event (int lwpid, int status, int *new_pending_p) 02914 { 02915 struct lwp_info *lp; 02916 02917 *new_pending_p = 0; 02918 02919 lp = find_lwp_pid (pid_to_ptid (lwpid)); 02920 02921 /* Check for stop events reported by a process we didn't already 02922 know about - anything not already in our LWP list. 02923 02924 If we're expecting to receive stopped processes after 02925 fork, vfork, and clone events, then we'll just add the 02926 new one to our list and go back to waiting for the event 02927 to be reported - the stopped process might be returned 02928 from waitpid before or after the event is. 02929 02930 But note the case of a non-leader thread exec'ing after the 02931 leader having exited, and gone from our lists. The non-leader 02932 thread changes its tid to the tgid. */ 02933 02934 if (WIFSTOPPED (status) && lp == NULL 02935 && (WSTOPSIG (status) == SIGTRAP && status >> 16 == PTRACE_EVENT_EXEC)) 02936 { 02937 /* A multi-thread exec after we had seen the leader exiting. */ 02938 if (debug_linux_nat) 02939 fprintf_unfiltered (gdb_stdlog, 02940 "LLW: Re-adding thread group leader LWP %d.\n", 02941 lwpid); 02942 02943 lp = add_lwp (ptid_build (lwpid, lwpid, 0)); 02944 lp->stopped = 1; 02945 lp->resumed = 1; 02946 add_thread (lp->ptid); 02947 } 02948 02949 if (WIFSTOPPED (status) && !lp) 02950 { 02951 add_to_pid_list (&stopped_pids, lwpid, status); 02952 return NULL; 02953 } 02954 02955 /* Make sure we don't report an event for the exit of an LWP not in 02956 our list, i.e. not part of the current process. This can happen 02957 if we detach from a program we originally forked and then it 02958 exits. */ 02959 if (!WIFSTOPPED (status) && !lp) 02960 return NULL; 02961 02962 /* Handle GNU/Linux's syscall SIGTRAPs. */ 02963 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP) 02964 { 02965 /* No longer need the sysgood bit. The ptrace event ends up 02966 recorded in lp->waitstatus if we care for it. We can carry 02967 on handling the event like a regular SIGTRAP from here 02968 on. */ 02969 status = W_STOPCODE (SIGTRAP); 02970 if (linux_handle_syscall_trap (lp, 0)) 02971 return NULL; 02972 } 02973 02974 /* Handle GNU/Linux's extended waitstatus for trace events. */ 02975 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0) 02976 { 02977 if (debug_linux_nat) 02978 fprintf_unfiltered (gdb_stdlog, 02979 "LLW: Handling extended status 0x%06x\n", 02980 status); 02981 if (linux_handle_extended_wait (lp, status, 0)) 02982 return NULL; 02983 } 02984 02985 if (linux_nat_status_is_event (status)) 02986 save_sigtrap (lp); 02987 02988 /* Check if the thread has exited. */ 02989 if ((WIFEXITED (status) || WIFSIGNALED (status)) 02990 && num_lwps (ptid_get_pid (lp->ptid)) > 1) 02991 { 02992 /* If this is the main thread, we must stop all threads and verify 02993 if they are still alive. This is because in the nptl thread model 02994 on Linux 2.4, there is no signal issued for exiting LWPs 02995 other than the main thread. We only get the main thread exit 02996 signal once all child threads have already exited. If we 02997 stop all the threads and use the stop_wait_callback to check 02998 if they have exited we can determine whether this signal 02999 should be ignored or whether it means the end of the debugged 03000 application, regardless of which threading model is being 03001 used. */ 03002 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid)) 03003 { 03004 lp->stopped = 1; 03005 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)), 03006 stop_and_resume_callback, new_pending_p); 03007 } 03008 03009 if (debug_linux_nat) 03010 fprintf_unfiltered (gdb_stdlog, 03011 "LLW: %s exited.\n", 03012 target_pid_to_str (lp->ptid)); 03013 03014 if (num_lwps (ptid_get_pid (lp->ptid)) > 1) 03015 { 03016 /* If there is at least one more LWP, then the exit signal 03017 was not the end of the debugged application and should be 03018 ignored. */ 03019 exit_lwp (lp); 03020 return NULL; 03021 } 03022 } 03023 03024 /* Check if the current LWP has previously exited. In the nptl 03025 thread model, LWPs other than the main thread do not issue 03026 signals when they exit so we must check whenever the thread has 03027 stopped. A similar check is made in stop_wait_callback(). */ 03028 if (num_lwps (ptid_get_pid (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid)) 03029 { 03030 ptid_t ptid = pid_to_ptid (ptid_get_pid (lp->ptid)); 03031 03032 if (debug_linux_nat) 03033 fprintf_unfiltered (gdb_stdlog, 03034 "LLW: %s exited.\n", 03035 target_pid_to_str (lp->ptid)); 03036 03037 exit_lwp (lp); 03038 03039 /* Make sure there is at least one thread running. */ 03040 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL)); 03041 03042 /* Discard the event. */ 03043 return NULL; 03044 } 03045 03046 /* Make sure we don't report a SIGSTOP that we sent ourselves in 03047 an attempt to stop an LWP. */ 03048 if (lp->signalled 03049 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP) 03050 { 03051 if (debug_linux_nat) 03052 fprintf_unfiltered (gdb_stdlog, 03053 "LLW: Delayed SIGSTOP caught for %s.\n", 03054 target_pid_to_str (lp->ptid)); 03055 03056 lp->signalled = 0; 03057 03058 if (lp->last_resume_kind != resume_stop) 03059 { 03060 /* This is a delayed SIGSTOP. */ 03061 03062 registers_changed (); 03063 03064 if (linux_nat_prepare_to_resume != NULL) 03065 linux_nat_prepare_to_resume (lp); 03066 linux_ops->to_resume (linux_ops, 03067 pid_to_ptid (ptid_get_lwp (lp->ptid)), 03068 lp->step, GDB_SIGNAL_0); 03069 if (debug_linux_nat) 03070 fprintf_unfiltered (gdb_stdlog, 03071 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n", 03072 lp->step ? 03073 "PTRACE_SINGLESTEP" : "PTRACE_CONT", 03074 target_pid_to_str (lp->ptid)); 03075 03076 lp->stopped = 0; 03077 gdb_assert (lp->resumed); 03078 03079 /* Discard the event. */ 03080 return NULL; 03081 } 03082 } 03083 03084 /* Make sure we don't report a SIGINT that we have already displayed 03085 for another thread. */ 03086 if (lp->ignore_sigint 03087 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT) 03088 { 03089 if (debug_linux_nat) 03090 fprintf_unfiltered (gdb_stdlog, 03091 "LLW: Delayed SIGINT caught for %s.\n", 03092 target_pid_to_str (lp->ptid)); 03093 03094 /* This is a delayed SIGINT. */ 03095 lp->ignore_sigint = 0; 03096 03097 registers_changed (); 03098 if (linux_nat_prepare_to_resume != NULL) 03099 linux_nat_prepare_to_resume (lp); 03100 linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)), 03101 lp->step, GDB_SIGNAL_0); 03102 if (debug_linux_nat) 03103 fprintf_unfiltered (gdb_stdlog, 03104 "LLW: %s %s, 0, 0 (discard SIGINT)\n", 03105 lp->step ? 03106 "PTRACE_SINGLESTEP" : "PTRACE_CONT", 03107 target_pid_to_str (lp->ptid)); 03108 03109 lp->stopped = 0; 03110 gdb_assert (lp->resumed); 03111 03112 /* Discard the event. */ 03113 return NULL; 03114 } 03115 03116 /* An interesting event. */ 03117 gdb_assert (lp); 03118 lp->status = status; 03119 return lp; 03120 } 03121 03122 /* Detect zombie thread group leaders, and "exit" them. We can't reap 03123 their exits until all other threads in the group have exited. */ 03124 03125 static void 03126 check_zombie_leaders (void) 03127 { 03128 struct inferior *inf; 03129 03130 ALL_INFERIORS (inf) 03131 { 03132 struct lwp_info *leader_lp; 03133 03134 if (inf->pid == 0) 03135 continue; 03136 03137 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid)); 03138 if (leader_lp != NULL 03139 /* Check if there are other threads in the group, as we may 03140 have raced with the inferior simply exiting. */ 03141 && num_lwps (inf->pid) > 1 03142 && linux_proc_pid_is_zombie (inf->pid)) 03143 { 03144 if (debug_linux_nat) 03145 fprintf_unfiltered (gdb_stdlog, 03146 "CZL: Thread group leader %d zombie " 03147 "(it exited, or another thread execd).\n", 03148 inf->pid); 03149 03150 /* A leader zombie can mean one of two things: 03151 03152 - It exited, and there's an exit status pending 03153 available, or only the leader exited (not the whole 03154 program). In the latter case, we can't waitpid the 03155 leader's exit status until all other threads are gone. 03156 03157 - There are 3 or more threads in the group, and a thread 03158 other than the leader exec'd. On an exec, the Linux 03159 kernel destroys all other threads (except the execing 03160 one) in the thread group, and resets the execing thread's 03161 tid to the tgid. No exit notification is sent for the 03162 execing thread -- from the ptracer's perspective, it 03163 appears as though the execing thread just vanishes. 03164 Until we reap all other threads except the leader and the 03165 execing thread, the leader will be zombie, and the 03166 execing thread will be in `D (disc sleep)'. As soon as 03167 all other threads are reaped, the execing thread changes 03168 it's tid to the tgid, and the previous (zombie) leader 03169 vanishes, giving place to the "new" leader. We could try 03170 distinguishing the exit and exec cases, by waiting once 03171 more, and seeing if something comes out, but it doesn't 03172 sound useful. The previous leader _does_ go away, and 03173 we'll re-add the new one once we see the exec event 03174 (which is just the same as what would happen if the 03175 previous leader did exit voluntarily before some other 03176 thread execs). */ 03177 03178 if (debug_linux_nat) 03179 fprintf_unfiltered (gdb_stdlog, 03180 "CZL: Thread group leader %d vanished.\n", 03181 inf->pid); 03182 exit_lwp (leader_lp); 03183 } 03184 } 03185 } 03186 03187 static ptid_t 03188 linux_nat_wait_1 (struct target_ops *ops, 03189 ptid_t ptid, struct target_waitstatus *ourstatus, 03190 int target_options) 03191 { 03192 static sigset_t prev_mask; 03193 enum resume_kind last_resume_kind; 03194 struct lwp_info *lp; 03195 int status; 03196 03197 if (debug_linux_nat) 03198 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n"); 03199 03200 /* The first time we get here after starting a new inferior, we may 03201 not have added it to the LWP list yet - this is the earliest 03202 moment at which we know its PID. */ 03203 if (ptid_is_pid (inferior_ptid)) 03204 { 03205 /* Upgrade the main thread's ptid. */ 03206 thread_change_ptid (inferior_ptid, 03207 ptid_build (ptid_get_pid (inferior_ptid), 03208 ptid_get_pid (inferior_ptid), 0)); 03209 03210 lp = add_initial_lwp (inferior_ptid); 03211 lp->resumed = 1; 03212 } 03213 03214 /* Make sure SIGCHLD is blocked until the sigsuspend below. */ 03215 block_child_signals (&prev_mask); 03216 03217 retry: 03218 lp = NULL; 03219 status = 0; 03220 03221 /* First check if there is a LWP with a wait status pending. */ 03222 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid)) 03223 { 03224 /* Any LWP in the PTID group that's been resumed will do. */ 03225 lp = iterate_over_lwps (ptid, status_callback, NULL); 03226 if (lp) 03227 { 03228 if (debug_linux_nat && lp->status) 03229 fprintf_unfiltered (gdb_stdlog, 03230 "LLW: Using pending wait status %s for %s.\n", 03231 status_to_str (lp->status), 03232 target_pid_to_str (lp->ptid)); 03233 } 03234 } 03235 else if (ptid_lwp_p (ptid)) 03236 { 03237 if (debug_linux_nat) 03238 fprintf_unfiltered (gdb_stdlog, 03239 "LLW: Waiting for specific LWP %s.\n", 03240 target_pid_to_str (ptid)); 03241 03242 /* We have a specific LWP to check. */ 03243 lp = find_lwp_pid (ptid); 03244 gdb_assert (lp); 03245 03246 if (debug_linux_nat && lp->status) 03247 fprintf_unfiltered (gdb_stdlog, 03248 "LLW: Using pending wait status %s for %s.\n", 03249 status_to_str (lp->status), 03250 target_pid_to_str (lp->ptid)); 03251 03252 /* We check for lp->waitstatus in addition to lp->status, 03253 because we can have pending process exits recorded in 03254 lp->status and W_EXITCODE(0,0) == 0. We should probably have 03255 an additional lp->status_p flag. */ 03256 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE) 03257 lp = NULL; 03258 } 03259 03260 if (!target_can_async_p ()) 03261 { 03262 /* Causes SIGINT to be passed on to the attached process. */ 03263 set_sigint_trap (); 03264 } 03265 03266 /* But if we don't find a pending event, we'll have to wait. */ 03267 03268 while (lp == NULL) 03269 { 03270 pid_t lwpid; 03271 03272 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace 03273 quirks: 03274 03275 - If the thread group leader exits while other threads in the 03276 thread group still exist, waitpid(TGID, ...) hangs. That 03277 waitpid won't return an exit status until the other threads 03278 in the group are reapped. 03279 03280 - When a non-leader thread execs, that thread just vanishes 03281 without reporting an exit (so we'd hang if we waited for it 03282 explicitly in that case). The exec event is reported to 03283 the TGID pid. */ 03284 03285 errno = 0; 03286 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG); 03287 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD)) 03288 lwpid = my_waitpid (-1, &status, WNOHANG); 03289 03290 if (debug_linux_nat) 03291 fprintf_unfiltered (gdb_stdlog, 03292 "LNW: waitpid(-1, ...) returned %d, %s\n", 03293 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK"); 03294 03295 if (lwpid > 0) 03296 { 03297 /* If this is true, then we paused LWPs momentarily, and may 03298 now have pending events to handle. */ 03299 int new_pending; 03300 03301 if (debug_linux_nat) 03302 { 03303 fprintf_unfiltered (gdb_stdlog, 03304 "LLW: waitpid %ld received %s\n", 03305 (long) lwpid, status_to_str (status)); 03306 } 03307 03308 lp = linux_nat_filter_event (lwpid, status, &new_pending); 03309 03310 /* STATUS is now no longer valid, use LP->STATUS instead. */ 03311 status = 0; 03312 03313 if (lp && !ptid_match (lp->ptid, ptid)) 03314 { 03315 gdb_assert (lp->resumed); 03316 03317 if (debug_linux_nat) 03318 fprintf (stderr, 03319 "LWP %ld got an event %06x, leaving pending.\n", 03320 ptid_get_lwp (lp->ptid), lp->status); 03321 03322 if (WIFSTOPPED (lp->status)) 03323 { 03324 if (WSTOPSIG (lp->status) != SIGSTOP) 03325 { 03326 /* Cancel breakpoint hits. The breakpoint may 03327 be removed before we fetch events from this 03328 process to report to the core. It is best 03329 not to assume the moribund breakpoints 03330 heuristic always handles these cases --- it 03331 could be too many events go through to the 03332 core before this one is handled. All-stop 03333 always cancels breakpoint hits in all 03334 threads. */ 03335 if (non_stop 03336 && linux_nat_lp_status_is_event (lp) 03337 && cancel_breakpoint (lp)) 03338 { 03339 /* Throw away the SIGTRAP. */ 03340 lp->status = 0; 03341 03342 if (debug_linux_nat) 03343 fprintf (stderr, 03344 "LLW: LWP %ld hit a breakpoint while" 03345 " waiting for another process;" 03346 " cancelled it\n", 03347 ptid_get_lwp (lp->ptid)); 03348 } 03349 lp->stopped = 1; 03350 } 03351 else 03352 { 03353 lp->stopped = 1; 03354 lp->signalled = 0; 03355 } 03356 } 03357 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status)) 03358 { 03359 if (debug_linux_nat) 03360 fprintf (stderr, 03361 "Process %ld exited while stopping LWPs\n", 03362 ptid_get_lwp (lp->ptid)); 03363 03364 /* This was the last lwp in the process. Since 03365 events are serialized to GDB core, and we can't 03366 report this one right now, but GDB core and the 03367 other target layers will want to be notified 03368 about the exit code/signal, leave the status 03369 pending for the next time we're able to report 03370 it. */ 03371 03372 /* Prevent trying to stop this thread again. We'll 03373 never try to resume it because it has a pending 03374 status. */ 03375 lp->stopped = 1; 03376 03377 /* Dead LWP's aren't expected to reported a pending 03378 sigstop. */ 03379 lp->signalled = 0; 03380 03381 /* Store the pending event in the waitstatus as 03382 well, because W_EXITCODE(0,0) == 0. */ 03383 store_waitstatus (&lp->waitstatus, lp->status); 03384 } 03385 03386 /* Keep looking. */ 03387 lp = NULL; 03388 } 03389 03390 if (new_pending) 03391 { 03392 /* Some LWP now has a pending event. Go all the way 03393 back to check it. */ 03394 goto retry; 03395 } 03396 03397 if (lp) 03398 { 03399 /* We got an event to report to the core. */ 03400 break; 03401 } 03402 03403 /* Retry until nothing comes out of waitpid. A single 03404 SIGCHLD can indicate more than one child stopped. */ 03405 continue; 03406 } 03407 03408 /* Check for zombie thread group leaders. Those can't be reaped 03409 until all other threads in the thread group are. */ 03410 check_zombie_leaders (); 03411 03412 /* If there are no resumed children left, bail. We'd be stuck 03413 forever in the sigsuspend call below otherwise. */ 03414 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL) 03415 { 03416 if (debug_linux_nat) 03417 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n"); 03418 03419 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED; 03420 03421 if (!target_can_async_p ()) 03422 clear_sigint_trap (); 03423 03424 restore_child_signals_mask (&prev_mask); 03425 return minus_one_ptid; 03426 } 03427 03428 /* No interesting event to report to the core. */ 03429 03430 if (target_options & TARGET_WNOHANG) 03431 { 03432 if (debug_linux_nat) 03433 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n"); 03434 03435 ourstatus->kind = TARGET_WAITKIND_IGNORE; 03436 restore_child_signals_mask (&prev_mask); 03437 return minus_one_ptid; 03438 } 03439 03440 /* We shouldn't end up here unless we want to try again. */ 03441 gdb_assert (lp == NULL); 03442 03443 /* Block until we get an event reported with SIGCHLD. */ 03444 sigsuspend (&suspend_mask); 03445 } 03446 03447 if (!target_can_async_p ()) 03448 clear_sigint_trap (); 03449 03450 gdb_assert (lp); 03451 03452 status = lp->status; 03453 lp->status = 0; 03454 03455 /* Don't report signals that GDB isn't interested in, such as 03456 signals that are neither printed nor stopped upon. Stopping all 03457 threads can be a bit time-consuming so if we want decent 03458 performance with heavily multi-threaded programs, especially when 03459 they're using a high frequency timer, we'd better avoid it if we 03460 can. */ 03461 03462 if (WIFSTOPPED (status)) 03463 { 03464 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status)); 03465 03466 /* When using hardware single-step, we need to report every signal. 03467 Otherwise, signals in pass_mask may be short-circuited. */ 03468 if (!lp->step 03469 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))) 03470 { 03471 /* FIMXE: kettenis/2001-06-06: Should we resume all threads 03472 here? It is not clear we should. GDB may not expect 03473 other threads to run. On the other hand, not resuming 03474 newly attached threads may cause an unwanted delay in 03475 getting them running. */ 03476 registers_changed (); 03477 if (linux_nat_prepare_to_resume != NULL) 03478 linux_nat_prepare_to_resume (lp); 03479 linux_ops->to_resume (linux_ops, 03480 pid_to_ptid (ptid_get_lwp (lp->ptid)), 03481 lp->step, signo); 03482 if (debug_linux_nat) 03483 fprintf_unfiltered (gdb_stdlog, 03484 "LLW: %s %s, %s (preempt 'handle')\n", 03485 lp->step ? 03486 "PTRACE_SINGLESTEP" : "PTRACE_CONT", 03487 target_pid_to_str (lp->ptid), 03488 (signo != GDB_SIGNAL_0 03489 ? strsignal (gdb_signal_to_host (signo)) 03490 : "0")); 03491 lp->stopped = 0; 03492 goto retry; 03493 } 03494 03495 if (!non_stop) 03496 { 03497 /* Only do the below in all-stop, as we currently use SIGINT 03498 to implement target_stop (see linux_nat_stop) in 03499 non-stop. */ 03500 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0) 03501 { 03502 /* If ^C/BREAK is typed at the tty/console, SIGINT gets 03503 forwarded to the entire process group, that is, all LWPs 03504 will receive it - unless they're using CLONE_THREAD to 03505 share signals. Since we only want to report it once, we 03506 mark it as ignored for all LWPs except this one. */ 03507 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)), 03508 set_ignore_sigint, NULL); 03509 lp->ignore_sigint = 0; 03510 } 03511 else 03512 maybe_clear_ignore_sigint (lp); 03513 } 03514 } 03515 03516 /* This LWP is stopped now. */ 03517 lp->stopped = 1; 03518 03519 if (debug_linux_nat) 03520 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n", 03521 status_to_str (status), target_pid_to_str (lp->ptid)); 03522 03523 if (!non_stop) 03524 { 03525 /* Now stop all other LWP's ... */ 03526 iterate_over_lwps (minus_one_ptid, stop_callback, NULL); 03527 03528 /* ... and wait until all of them have reported back that 03529 they're no longer running. */ 03530 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL); 03531 03532 /* If we're not waiting for a specific LWP, choose an event LWP 03533 from among those that have had events. Giving equal priority 03534 to all LWPs that have had events helps prevent 03535 starvation. */ 03536 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid)) 03537 select_event_lwp (ptid, &lp, &status); 03538 03539 /* Now that we've selected our final event LWP, cancel any 03540 breakpoints in other LWPs that have hit a GDB breakpoint. 03541 See the comment in cancel_breakpoints_callback to find out 03542 why. */ 03543 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp); 03544 03545 /* We'll need this to determine whether to report a SIGSTOP as 03546 TARGET_WAITKIND_0. Need to take a copy because 03547 resume_clear_callback clears it. */ 03548 last_resume_kind = lp->last_resume_kind; 03549 03550 /* In all-stop, from the core's perspective, all LWPs are now 03551 stopped until a new resume action is sent over. */ 03552 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL); 03553 } 03554 else 03555 { 03556 /* See above. */ 03557 last_resume_kind = lp->last_resume_kind; 03558 resume_clear_callback (lp, NULL); 03559 } 03560 03561 if (linux_nat_status_is_event (status)) 03562 { 03563 if (debug_linux_nat) 03564 fprintf_unfiltered (gdb_stdlog, 03565 "LLW: trap ptid is %s.\n", 03566 target_pid_to_str (lp->ptid)); 03567 } 03568 03569 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE) 03570 { 03571 *ourstatus = lp->waitstatus; 03572 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE; 03573 } 03574 else 03575 store_waitstatus (ourstatus, status); 03576 03577 if (debug_linux_nat) 03578 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n"); 03579 03580 restore_child_signals_mask (&prev_mask); 03581 03582 if (last_resume_kind == resume_stop 03583 && ourstatus->kind == TARGET_WAITKIND_STOPPED 03584 && WSTOPSIG (status) == SIGSTOP) 03585 { 03586 /* A thread that has been requested to stop by GDB with 03587 target_stop, and it stopped cleanly, so report as SIG0. The 03588 use of SIGSTOP is an implementation detail. */ 03589 ourstatus->value.sig = GDB_SIGNAL_0; 03590 } 03591 03592 if (ourstatus->kind == TARGET_WAITKIND_EXITED 03593 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED) 03594 lp->core = -1; 03595 else 03596 lp->core = linux_common_core_of_thread (lp->ptid); 03597 03598 return lp->ptid; 03599 } 03600 03601 /* Resume LWPs that are currently stopped without any pending status 03602 to report, but are resumed from the core's perspective. */ 03603 03604 static int 03605 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data) 03606 { 03607 ptid_t *wait_ptid_p = data; 03608 03609 if (lp->stopped 03610 && lp->resumed 03611 && lp->status == 0 03612 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE) 03613 { 03614 struct regcache *regcache = get_thread_regcache (lp->ptid); 03615 struct gdbarch *gdbarch = get_regcache_arch (regcache); 03616 CORE_ADDR pc = regcache_read_pc (regcache); 03617 03618 gdb_assert (is_executing (lp->ptid)); 03619 03620 /* Don't bother if there's a breakpoint at PC that we'd hit 03621 immediately, and we're not waiting for this LWP. */ 03622 if (!ptid_match (lp->ptid, *wait_ptid_p)) 03623 { 03624 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc)) 03625 return 0; 03626 } 03627 03628 if (debug_linux_nat) 03629 fprintf_unfiltered (gdb_stdlog, 03630 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n", 03631 target_pid_to_str (lp->ptid), 03632 paddress (gdbarch, pc), 03633 lp->step); 03634 03635 registers_changed (); 03636 if (linux_nat_prepare_to_resume != NULL) 03637 linux_nat_prepare_to_resume (lp); 03638 linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)), 03639 lp->step, GDB_SIGNAL_0); 03640 lp->stopped = 0; 03641 lp->stopped_by_watchpoint = 0; 03642 } 03643 03644 return 0; 03645 } 03646 03647 static ptid_t 03648 linux_nat_wait (struct target_ops *ops, 03649 ptid_t ptid, struct target_waitstatus *ourstatus, 03650 int target_options) 03651 { 03652 ptid_t event_ptid; 03653 03654 if (debug_linux_nat) 03655 { 03656 char *options_string; 03657 03658 options_string = target_options_to_string (target_options); 03659 fprintf_unfiltered (gdb_stdlog, 03660 "linux_nat_wait: [%s], [%s]\n", 03661 target_pid_to_str (ptid), 03662 options_string); 03663 xfree (options_string); 03664 } 03665 03666 /* Flush the async file first. */ 03667 if (target_can_async_p ()) 03668 async_file_flush (); 03669 03670 /* Resume LWPs that are currently stopped without any pending status 03671 to report, but are resumed from the core's perspective. LWPs get 03672 in this state if we find them stopping at a time we're not 03673 interested in reporting the event (target_wait on a 03674 specific_process, for example, see linux_nat_wait_1), and 03675 meanwhile the event became uninteresting. Don't bother resuming 03676 LWPs we're not going to wait for if they'd stop immediately. */ 03677 if (non_stop) 03678 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid); 03679 03680 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options); 03681 03682 /* If we requested any event, and something came out, assume there 03683 may be more. If we requested a specific lwp or process, also 03684 assume there may be more. */ 03685 if (target_can_async_p () 03686 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE 03687 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED) 03688 || !ptid_equal (ptid, minus_one_ptid))) 03689 async_file_mark (); 03690 03691 /* Get ready for the next event. */ 03692 if (target_can_async_p ()) 03693 target_async (inferior_event_handler, 0); 03694 03695 return event_ptid; 03696 } 03697 03698 static int 03699 kill_callback (struct lwp_info *lp, void *data) 03700 { 03701 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */ 03702 03703 errno = 0; 03704 kill (ptid_get_lwp (lp->ptid), SIGKILL); 03705 if (debug_linux_nat) 03706 fprintf_unfiltered (gdb_stdlog, 03707 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n", 03708 target_pid_to_str (lp->ptid), 03709 errno ? safe_strerror (errno) : "OK"); 03710 03711 /* Some kernels ignore even SIGKILL for processes under ptrace. */ 03712 03713 errno = 0; 03714 ptrace (PTRACE_KILL, ptid_get_lwp (lp->ptid), 0, 0); 03715 if (debug_linux_nat) 03716 fprintf_unfiltered (gdb_stdlog, 03717 "KC: PTRACE_KILL %s, 0, 0 (%s)\n", 03718 target_pid_to_str (lp->ptid), 03719 errno ? safe_strerror (errno) : "OK"); 03720 03721 return 0; 03722 } 03723 03724 static int 03725 kill_wait_callback (struct lwp_info *lp, void *data) 03726 { 03727 pid_t pid; 03728 03729 /* We must make sure that there are no pending events (delayed 03730 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current 03731 program doesn't interfere with any following debugging session. */ 03732 03733 /* For cloned processes we must check both with __WCLONE and 03734 without, since the exit status of a cloned process isn't reported 03735 with __WCLONE. */ 03736 if (lp->cloned) 03737 { 03738 do 03739 { 03740 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, __WCLONE); 03741 if (pid != (pid_t) -1) 03742 { 03743 if (debug_linux_nat) 03744 fprintf_unfiltered (gdb_stdlog, 03745 "KWC: wait %s received unknown.\n", 03746 target_pid_to_str (lp->ptid)); 03747 /* The Linux kernel sometimes fails to kill a thread 03748 completely after PTRACE_KILL; that goes from the stop 03749 point in do_fork out to the one in 03750 get_signal_to_deliever and waits again. So kill it 03751 again. */ 03752 kill_callback (lp, NULL); 03753 } 03754 } 03755 while (pid == ptid_get_lwp (lp->ptid)); 03756 03757 gdb_assert (pid == -1 && errno == ECHILD); 03758 } 03759 03760 do 03761 { 03762 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, 0); 03763 if (pid != (pid_t) -1) 03764 { 03765 if (debug_linux_nat) 03766 fprintf_unfiltered (gdb_stdlog, 03767 "KWC: wait %s received unk.\n", 03768 target_pid_to_str (lp->ptid)); 03769 /* See the call to kill_callback above. */ 03770 kill_callback (lp, NULL); 03771 } 03772 } 03773 while (pid == ptid_get_lwp (lp->ptid)); 03774 03775 gdb_assert (pid == -1 && errno == ECHILD); 03776 return 0; 03777 } 03778 03779 static void 03780 linux_nat_kill (struct target_ops *ops) 03781 { 03782 struct target_waitstatus last; 03783 ptid_t last_ptid; 03784 int status; 03785 03786 /* If we're stopped while forking and we haven't followed yet, 03787 kill the other task. We need to do this first because the 03788 parent will be sleeping if this is a vfork. */ 03789 03790 get_last_target_status (&last_ptid, &last); 03791 03792 if (last.kind == TARGET_WAITKIND_FORKED 03793 || last.kind == TARGET_WAITKIND_VFORKED) 03794 { 03795 ptrace (PT_KILL, ptid_get_pid (last.value.related_pid), 0, 0); 03796 wait (&status); 03797 03798 /* Let the arch-specific native code know this process is 03799 gone. */ 03800 linux_nat_forget_process (ptid_get_pid (last.value.related_pid)); 03801 } 03802 03803 if (forks_exist_p ()) 03804 linux_fork_killall (); 03805 else 03806 { 03807 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid)); 03808 03809 /* Stop all threads before killing them, since ptrace requires 03810 that the thread is stopped to sucessfully PTRACE_KILL. */ 03811 iterate_over_lwps (ptid, stop_callback, NULL); 03812 /* ... and wait until all of them have reported back that 03813 they're no longer running. */ 03814 iterate_over_lwps (ptid, stop_wait_callback, NULL); 03815 03816 /* Kill all LWP's ... */ 03817 iterate_over_lwps (ptid, kill_callback, NULL); 03818 03819 /* ... and wait until we've flushed all events. */ 03820 iterate_over_lwps (ptid, kill_wait_callback, NULL); 03821 } 03822 03823 target_mourn_inferior (); 03824 } 03825 03826 static void 03827 linux_nat_mourn_inferior (struct target_ops *ops) 03828 { 03829 int pid = ptid_get_pid (inferior_ptid); 03830 03831 purge_lwp_list (pid); 03832 03833 if (! forks_exist_p ()) 03834 /* Normal case, no other forks available. */ 03835 linux_ops->to_mourn_inferior (ops); 03836 else 03837 /* Multi-fork case. The current inferior_ptid has exited, but 03838 there are other viable forks to debug. Delete the exiting 03839 one and context-switch to the first available. */ 03840 linux_fork_mourn_inferior (); 03841 03842 /* Let the arch-specific native code know this process is gone. */ 03843 linux_nat_forget_process (pid); 03844 } 03845 03846 /* Convert a native/host siginfo object, into/from the siginfo in the 03847 layout of the inferiors' architecture. */ 03848 03849 static void 03850 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction) 03851 { 03852 int done = 0; 03853 03854 if (linux_nat_siginfo_fixup != NULL) 03855 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction); 03856 03857 /* If there was no callback, or the callback didn't do anything, 03858 then just do a straight memcpy. */ 03859 if (!done) 03860 { 03861 if (direction == 1) 03862 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t)); 03863 else 03864 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t)); 03865 } 03866 } 03867 03868 static LONGEST 03869 linux_xfer_siginfo (struct target_ops *ops, enum target_object object, 03870 const char *annex, gdb_byte *readbuf, 03871 const gdb_byte *writebuf, ULONGEST offset, LONGEST len) 03872 { 03873 int pid; 03874 siginfo_t siginfo; 03875 gdb_byte inf_siginfo[sizeof (siginfo_t)]; 03876 03877 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO); 03878 gdb_assert (readbuf || writebuf); 03879 03880 pid = ptid_get_lwp (inferior_ptid); 03881 if (pid == 0) 03882 pid = ptid_get_pid (inferior_ptid); 03883 03884 if (offset > sizeof (siginfo)) 03885 return -1; 03886 03887 errno = 0; 03888 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo); 03889 if (errno != 0) 03890 return -1; 03891 03892 /* When GDB is built as a 64-bit application, ptrace writes into 03893 SIGINFO an object with 64-bit layout. Since debugging a 32-bit 03894 inferior with a 64-bit GDB should look the same as debugging it 03895 with a 32-bit GDB, we need to convert it. GDB core always sees 03896 the converted layout, so any read/write will have to be done 03897 post-conversion. */ 03898 siginfo_fixup (&siginfo, inf_siginfo, 0); 03899 03900 if (offset + len > sizeof (siginfo)) 03901 len = sizeof (siginfo) - offset; 03902 03903 if (readbuf != NULL) 03904 memcpy (readbuf, inf_siginfo + offset, len); 03905 else 03906 { 03907 memcpy (inf_siginfo + offset, writebuf, len); 03908 03909 /* Convert back to ptrace layout before flushing it out. */ 03910 siginfo_fixup (&siginfo, inf_siginfo, 1); 03911 03912 errno = 0; 03913 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo); 03914 if (errno != 0) 03915 return -1; 03916 } 03917 03918 return len; 03919 } 03920 03921 static LONGEST 03922 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object, 03923 const char *annex, gdb_byte *readbuf, 03924 const gdb_byte *writebuf, 03925 ULONGEST offset, LONGEST len) 03926 { 03927 struct cleanup *old_chain; 03928 LONGEST xfer; 03929 03930 if (object == TARGET_OBJECT_SIGNAL_INFO) 03931 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf, 03932 offset, len); 03933 03934 /* The target is connected but no live inferior is selected. Pass 03935 this request down to a lower stratum (e.g., the executable 03936 file). */ 03937 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid)) 03938 return 0; 03939 03940 old_chain = save_inferior_ptid (); 03941 03942 if (ptid_lwp_p (inferior_ptid)) 03943 inferior_ptid = pid_to_ptid (ptid_get_lwp (inferior_ptid)); 03944 03945 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf, 03946 offset, len); 03947 03948 do_cleanups (old_chain); 03949 return xfer; 03950 } 03951 03952 static int 03953 linux_thread_alive (ptid_t ptid) 03954 { 03955 int err, tmp_errno; 03956 03957 gdb_assert (ptid_lwp_p (ptid)); 03958 03959 /* Send signal 0 instead of anything ptrace, because ptracing a 03960 running thread errors out claiming that the thread doesn't 03961 exist. */ 03962 err = kill_lwp (ptid_get_lwp (ptid), 0); 03963 tmp_errno = errno; 03964 if (debug_linux_nat) 03965 fprintf_unfiltered (gdb_stdlog, 03966 "LLTA: KILL(SIG0) %s (%s)\n", 03967 target_pid_to_str (ptid), 03968 err ? safe_strerror (tmp_errno) : "OK"); 03969 03970 if (err != 0) 03971 return 0; 03972 03973 return 1; 03974 } 03975 03976 static int 03977 linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid) 03978 { 03979 return linux_thread_alive (ptid); 03980 } 03981 03982 static char * 03983 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid) 03984 { 03985 static char buf[64]; 03986 03987 if (ptid_lwp_p (ptid) 03988 && (ptid_get_pid (ptid) != ptid_get_lwp (ptid) 03989 || num_lwps (ptid_get_pid (ptid)) > 1)) 03990 { 03991 snprintf (buf, sizeof (buf), "LWP %ld", ptid_get_lwp (ptid)); 03992 return buf; 03993 } 03994 03995 return normal_pid_to_str (ptid); 03996 } 03997 03998 static char * 03999 linux_nat_thread_name (struct thread_info *thr) 04000 { 04001 int pid = ptid_get_pid (thr->ptid); 04002 long lwp = ptid_get_lwp (thr->ptid); 04003 #define FORMAT "/proc/%d/task/%ld/comm" 04004 char buf[sizeof (FORMAT) + 30]; 04005 FILE *comm_file; 04006 char *result = NULL; 04007 04008 snprintf (buf, sizeof (buf), FORMAT, pid, lwp); 04009 comm_file = gdb_fopen_cloexec (buf, "r"); 04010 if (comm_file) 04011 { 04012 /* Not exported by the kernel, so we define it here. */ 04013 #define COMM_LEN 16 04014 static char line[COMM_LEN + 1]; 04015 04016 if (fgets (line, sizeof (line), comm_file)) 04017 { 04018 char *nl = strchr (line, '\n'); 04019 04020 if (nl) 04021 *nl = '\0'; 04022 if (*line != '\0') 04023 result = line; 04024 } 04025 04026 fclose (comm_file); 04027 } 04028 04029 #undef COMM_LEN 04030 #undef FORMAT 04031 04032 return result; 04033 } 04034 04035 /* Accepts an integer PID; Returns a string representing a file that 04036 can be opened to get the symbols for the child process. */ 04037 04038 static char * 04039 linux_child_pid_to_exec_file (int pid) 04040 { 04041 char *name1, *name2; 04042 04043 name1 = xmalloc (PATH_MAX); 04044 name2 = xmalloc (PATH_MAX); 04045 make_cleanup (xfree, name1); 04046 make_cleanup (xfree, name2); 04047 memset (name2, 0, PATH_MAX); 04048 04049 sprintf (name1, "/proc/%d/exe", pid); 04050 if (readlink (name1, name2, PATH_MAX - 1) > 0) 04051 return name2; 04052 else 04053 return name1; 04054 } 04055 04056 /* Records the thread's register state for the corefile note 04057 section. */ 04058 04059 static char * 04060 linux_nat_collect_thread_registers (const struct regcache *regcache, 04061 ptid_t ptid, bfd *obfd, 04062 char *note_data, int *note_size, 04063 enum gdb_signal stop_signal) 04064 { 04065 struct gdbarch *gdbarch = get_regcache_arch (regcache); 04066 const struct regset *regset; 04067 int core_regset_p; 04068 gdb_gregset_t gregs; 04069 gdb_fpregset_t fpregs; 04070 04071 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch); 04072 04073 if (core_regset_p 04074 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg", 04075 sizeof (gregs))) 04076 != NULL && regset->collect_regset != NULL) 04077 regset->collect_regset (regset, regcache, -1, &gregs, sizeof (gregs)); 04078 else 04079 fill_gregset (regcache, &gregs, -1); 04080 04081 note_data = (char *) elfcore_write_prstatus 04082 (obfd, note_data, note_size, ptid_get_lwp (ptid), 04083 gdb_signal_to_host (stop_signal), &gregs); 04084 04085 if (core_regset_p 04086 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2", 04087 sizeof (fpregs))) 04088 != NULL && regset->collect_regset != NULL) 04089 regset->collect_regset (regset, regcache, -1, &fpregs, sizeof (fpregs)); 04090 else 04091 fill_fpregset (regcache, &fpregs, -1); 04092 04093 note_data = (char *) elfcore_write_prfpreg (obfd, note_data, note_size, 04094 &fpregs, sizeof (fpregs)); 04095 04096 return note_data; 04097 } 04098 04099 /* Fills the "to_make_corefile_note" target vector. Builds the note 04100 section for a corefile, and returns it in a malloc buffer. */ 04101 04102 static char * 04103 linux_nat_make_corefile_notes (bfd *obfd, int *note_size) 04104 { 04105 /* FIXME: uweigand/2011-10-06: Once all GNU/Linux architectures have been 04106 converted to gdbarch_core_regset_sections, this function can go away. */ 04107 return linux_make_corefile_notes (target_gdbarch (), obfd, note_size, 04108 linux_nat_collect_thread_registers); 04109 } 04110 04111 /* Implement the to_xfer_partial interface for memory reads using the /proc 04112 filesystem. Because we can use a single read() call for /proc, this 04113 can be much more efficient than banging away at PTRACE_PEEKTEXT, 04114 but it doesn't support writes. */ 04115 04116 static LONGEST 04117 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object, 04118 const char *annex, gdb_byte *readbuf, 04119 const gdb_byte *writebuf, 04120 ULONGEST offset, LONGEST len) 04121 { 04122 LONGEST ret; 04123 int fd; 04124 char filename[64]; 04125 04126 if (object != TARGET_OBJECT_MEMORY || !readbuf) 04127 return 0; 04128 04129 /* Don't bother for one word. */ 04130 if (len < 3 * sizeof (long)) 04131 return 0; 04132 04133 /* We could keep this file open and cache it - possibly one per 04134 thread. That requires some juggling, but is even faster. */ 04135 sprintf (filename, "/proc/%d/mem", ptid_get_pid (inferior_ptid)); 04136 fd = gdb_open_cloexec (filename, O_RDONLY | O_LARGEFILE, 0); 04137 if (fd == -1) 04138 return 0; 04139 04140 /* If pread64 is available, use it. It's faster if the kernel 04141 supports it (only one syscall), and it's 64-bit safe even on 04142 32-bit platforms (for instance, SPARC debugging a SPARC64 04143 application). */ 04144 #ifdef HAVE_PREAD64 04145 if (pread64 (fd, readbuf, len, offset) != len) 04146 #else 04147 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len) 04148 #endif 04149 ret = 0; 04150 else 04151 ret = len; 04152 04153 close (fd); 04154 return ret; 04155 } 04156 04157 04158 /* Enumerate spufs IDs for process PID. */ 04159 static LONGEST 04160 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len) 04161 { 04162 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ()); 04163 LONGEST pos = 0; 04164 LONGEST written = 0; 04165 char path[128]; 04166 DIR *dir; 04167 struct dirent *entry; 04168 04169 xsnprintf (path, sizeof path, "/proc/%d/fd", pid); 04170 dir = opendir (path); 04171 if (!dir) 04172 return -1; 04173 04174 rewinddir (dir); 04175 while ((entry = readdir (dir)) != NULL) 04176 { 04177 struct stat st; 04178 struct statfs stfs; 04179 int fd; 04180 04181 fd = atoi (entry->d_name); 04182 if (!fd) 04183 continue; 04184 04185 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd); 04186 if (stat (path, &st) != 0) 04187 continue; 04188 if (!S_ISDIR (st.st_mode)) 04189 continue; 04190 04191 if (statfs (path, &stfs) != 0) 04192 continue; 04193 if (stfs.f_type != SPUFS_MAGIC) 04194 continue; 04195 04196 if (pos >= offset && pos + 4 <= offset + len) 04197 { 04198 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd); 04199 written += 4; 04200 } 04201 pos += 4; 04202 } 04203 04204 closedir (dir); 04205 return written; 04206 } 04207 04208 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU 04209 object type, using the /proc file system. */ 04210 static LONGEST 04211 linux_proc_xfer_spu (struct target_ops *ops, enum target_object object, 04212 const char *annex, gdb_byte *readbuf, 04213 const gdb_byte *writebuf, 04214 ULONGEST offset, LONGEST len) 04215 { 04216 char buf[128]; 04217 int fd = 0; 04218 int ret = -1; 04219 int pid = ptid_get_pid (inferior_ptid); 04220 04221 if (!annex) 04222 { 04223 if (!readbuf) 04224 return -1; 04225 else 04226 return spu_enumerate_spu_ids (pid, readbuf, offset, len); 04227 } 04228 04229 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex); 04230 fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0); 04231 if (fd <= 0) 04232 return -1; 04233 04234 if (offset != 0 04235 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset) 04236 { 04237 close (fd); 04238 return 0; 04239 } 04240 04241 if (writebuf) 04242 ret = write (fd, writebuf, (size_t) len); 04243 else if (readbuf) 04244 ret = read (fd, readbuf, (size_t) len); 04245 04246 close (fd); 04247 return ret; 04248 } 04249 04250 04251 /* Parse LINE as a signal set and add its set bits to SIGS. */ 04252 04253 static void 04254 add_line_to_sigset (const char *line, sigset_t *sigs) 04255 { 04256 int len = strlen (line) - 1; 04257 const char *p; 04258 int signum; 04259 04260 if (line[len] != '\n') 04261 error (_("Could not parse signal set: %s"), line); 04262 04263 p = line; 04264 signum = len * 4; 04265 while (len-- > 0) 04266 { 04267 int digit; 04268 04269 if (*p >= '0' && *p <= '9') 04270 digit = *p - '0'; 04271 else if (*p >= 'a' && *p <= 'f') 04272 digit = *p - 'a' + 10; 04273 else 04274 error (_("Could not parse signal set: %s"), line); 04275 04276 signum -= 4; 04277 04278 if (digit & 1) 04279 sigaddset (sigs, signum + 1); 04280 if (digit & 2) 04281 sigaddset (sigs, signum + 2); 04282 if (digit & 4) 04283 sigaddset (sigs, signum + 3); 04284 if (digit & 8) 04285 sigaddset (sigs, signum + 4); 04286 04287 p++; 04288 } 04289 } 04290 04291 /* Find process PID's pending signals from /proc/pid/status and set 04292 SIGS to match. */ 04293 04294 void 04295 linux_proc_pending_signals (int pid, sigset_t *pending, 04296 sigset_t *blocked, sigset_t *ignored) 04297 { 04298 FILE *procfile; 04299 char buffer[PATH_MAX], fname[PATH_MAX]; 04300 struct cleanup *cleanup; 04301 04302 sigemptyset (pending); 04303 sigemptyset (blocked); 04304 sigemptyset (ignored); 04305 sprintf (fname, "/proc/%d/status", pid); 04306 procfile = gdb_fopen_cloexec (fname, "r"); 04307 if (procfile == NULL) 04308 error (_("Could not open %s"), fname); 04309 cleanup = make_cleanup_fclose (procfile); 04310 04311 while (fgets (buffer, PATH_MAX, procfile) != NULL) 04312 { 04313 /* Normal queued signals are on the SigPnd line in the status 04314 file. However, 2.6 kernels also have a "shared" pending 04315 queue for delivering signals to a thread group, so check for 04316 a ShdPnd line also. 04317 04318 Unfortunately some Red Hat kernels include the shared pending 04319 queue but not the ShdPnd status field. */ 04320 04321 if (strncmp (buffer, "SigPnd:\t", 8) == 0) 04322 add_line_to_sigset (buffer + 8, pending); 04323 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0) 04324 add_line_to_sigset (buffer + 8, pending); 04325 else if (strncmp (buffer, "SigBlk:\t", 8) == 0) 04326 add_line_to_sigset (buffer + 8, blocked); 04327 else if (strncmp (buffer, "SigIgn:\t", 8) == 0) 04328 add_line_to_sigset (buffer + 8, ignored); 04329 } 04330 04331 do_cleanups (cleanup); 04332 } 04333 04334 static LONGEST 04335 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object, 04336 const char *annex, gdb_byte *readbuf, 04337 const gdb_byte *writebuf, ULONGEST offset, LONGEST len) 04338 { 04339 gdb_assert (object == TARGET_OBJECT_OSDATA); 04340 04341 return linux_common_xfer_osdata (annex, readbuf, offset, len); 04342 } 04343 04344 static LONGEST 04345 linux_xfer_partial (struct target_ops *ops, enum target_object object, 04346 const char *annex, gdb_byte *readbuf, 04347 const gdb_byte *writebuf, ULONGEST offset, LONGEST len) 04348 { 04349 LONGEST xfer; 04350 04351 if (object == TARGET_OBJECT_AUXV) 04352 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf, 04353 offset, len); 04354 04355 if (object == TARGET_OBJECT_OSDATA) 04356 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf, 04357 offset, len); 04358 04359 if (object == TARGET_OBJECT_SPU) 04360 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf, 04361 offset, len); 04362 04363 /* GDB calculates all the addresses in possibly larget width of the address. 04364 Address width needs to be masked before its final use - either by 04365 linux_proc_xfer_partial or inf_ptrace_xfer_partial. 04366 04367 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */ 04368 04369 if (object == TARGET_OBJECT_MEMORY) 04370 { 04371 int addr_bit = gdbarch_addr_bit (target_gdbarch ()); 04372 04373 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT)) 04374 offset &= ((ULONGEST) 1 << addr_bit) - 1; 04375 } 04376 04377 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf, 04378 offset, len); 04379 if (xfer != 0) 04380 return xfer; 04381 04382 return super_xfer_partial (ops, object, annex, readbuf, writebuf, 04383 offset, len); 04384 } 04385 04386 static void 04387 cleanup_target_stop (void *arg) 04388 { 04389 ptid_t *ptid = (ptid_t *) arg; 04390 04391 gdb_assert (arg != NULL); 04392 04393 /* Unpause all */ 04394 target_resume (*ptid, 0, GDB_SIGNAL_0); 04395 } 04396 04397 static VEC(static_tracepoint_marker_p) * 04398 linux_child_static_tracepoint_markers_by_strid (const char *strid) 04399 { 04400 char s[IPA_CMD_BUF_SIZE]; 04401 struct cleanup *old_chain; 04402 int pid = ptid_get_pid (inferior_ptid); 04403 VEC(static_tracepoint_marker_p) *markers = NULL; 04404 struct static_tracepoint_marker *marker = NULL; 04405 char *p = s; 04406 ptid_t ptid = ptid_build (pid, 0, 0); 04407 04408 /* Pause all */ 04409 target_stop (ptid); 04410 04411 memcpy (s, "qTfSTM", sizeof ("qTfSTM")); 04412 s[sizeof ("qTfSTM")] = 0; 04413 04414 agent_run_command (pid, s, strlen (s) + 1); 04415 04416 old_chain = make_cleanup (free_current_marker, &marker); 04417 make_cleanup (cleanup_target_stop, &ptid); 04418 04419 while (*p++ == 'm') 04420 { 04421 if (marker == NULL) 04422 marker = XCNEW (struct static_tracepoint_marker); 04423 04424 do 04425 { 04426 parse_static_tracepoint_marker_definition (p, &p, marker); 04427 04428 if (strid == NULL || strcmp (strid, marker->str_id) == 0) 04429 { 04430 VEC_safe_push (static_tracepoint_marker_p, 04431 markers, marker); 04432 marker = NULL; 04433 } 04434 else 04435 { 04436 release_static_tracepoint_marker (marker); 04437 memset (marker, 0, sizeof (*marker)); 04438 } 04439 } 04440 while (*p++ == ','); /* comma-separated list */ 04441 04442 memcpy (s, "qTsSTM", sizeof ("qTsSTM")); 04443 s[sizeof ("qTsSTM")] = 0; 04444 agent_run_command (pid, s, strlen (s) + 1); 04445 p = s; 04446 } 04447 04448 do_cleanups (old_chain); 04449 04450 return markers; 04451 } 04452 04453 /* Create a prototype generic GNU/Linux target. The client can override 04454 it with local methods. */ 04455 04456 static void 04457 linux_target_install_ops (struct target_ops *t) 04458 { 04459 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint; 04460 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint; 04461 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint; 04462 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint; 04463 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint; 04464 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint; 04465 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint; 04466 t->to_pid_to_exec_file = linux_child_pid_to_exec_file; 04467 t->to_post_startup_inferior = linux_child_post_startup_inferior; 04468 t->to_post_attach = linux_child_post_attach; 04469 t->to_follow_fork = linux_child_follow_fork; 04470 t->to_make_corefile_notes = linux_nat_make_corefile_notes; 04471 04472 super_xfer_partial = t->to_xfer_partial; 04473 t->to_xfer_partial = linux_xfer_partial; 04474 04475 t->to_static_tracepoint_markers_by_strid 04476 = linux_child_static_tracepoint_markers_by_strid; 04477 } 04478 04479 struct target_ops * 04480 linux_target (void) 04481 { 04482 struct target_ops *t; 04483 04484 t = inf_ptrace_target (); 04485 linux_target_install_ops (t); 04486 04487 return t; 04488 } 04489 04490 struct target_ops * 04491 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int)) 04492 { 04493 struct target_ops *t; 04494 04495 t = inf_ptrace_trad_target (register_u_offset); 04496 linux_target_install_ops (t); 04497 04498 return t; 04499 } 04500 04501 /* target_is_async_p implementation. */ 04502 04503 static int 04504 linux_nat_is_async_p (void) 04505 { 04506 /* NOTE: palves 2008-03-21: We're only async when the user requests 04507 it explicitly with the "set target-async" command. 04508 Someday, linux will always be async. */ 04509 return target_async_permitted; 04510 } 04511 04512 /* target_can_async_p implementation. */ 04513 04514 static int 04515 linux_nat_can_async_p (void) 04516 { 04517 /* NOTE: palves 2008-03-21: We're only async when the user requests 04518 it explicitly with the "set target-async" command. 04519 Someday, linux will always be async. */ 04520 return target_async_permitted; 04521 } 04522 04523 static int 04524 linux_nat_supports_non_stop (void) 04525 { 04526 return 1; 04527 } 04528 04529 /* True if we want to support multi-process. To be removed when GDB 04530 supports multi-exec. */ 04531 04532 int linux_multi_process = 1; 04533 04534 static int 04535 linux_nat_supports_multi_process (void) 04536 { 04537 return linux_multi_process; 04538 } 04539 04540 static int 04541 linux_nat_supports_disable_randomization (void) 04542 { 04543 #ifdef HAVE_PERSONALITY 04544 return 1; 04545 #else 04546 return 0; 04547 #endif 04548 } 04549 04550 static int async_terminal_is_ours = 1; 04551 04552 /* target_terminal_inferior implementation. */ 04553 04554 static void 04555 linux_nat_terminal_inferior (void) 04556 { 04557 if (!target_is_async_p ()) 04558 { 04559 /* Async mode is disabled. */ 04560 terminal_inferior (); 04561 return; 04562 } 04563 04564 terminal_inferior (); 04565 04566 /* Calls to target_terminal_*() are meant to be idempotent. */ 04567 if (!async_terminal_is_ours) 04568 return; 04569 04570 delete_file_handler (input_fd); 04571 async_terminal_is_ours = 0; 04572 set_sigint_trap (); 04573 } 04574 04575 /* target_terminal_ours implementation. */ 04576 04577 static void 04578 linux_nat_terminal_ours (void) 04579 { 04580 if (!target_is_async_p ()) 04581 { 04582 /* Async mode is disabled. */ 04583 terminal_ours (); 04584 return; 04585 } 04586 04587 /* GDB should never give the terminal to the inferior if the 04588 inferior is running in the background (run&, continue&, etc.), 04589 but claiming it sure should. */ 04590 terminal_ours (); 04591 04592 if (async_terminal_is_ours) 04593 return; 04594 04595 clear_sigint_trap (); 04596 add_file_handler (input_fd, stdin_event_handler, 0); 04597 async_terminal_is_ours = 1; 04598 } 04599 04600 static void (*async_client_callback) (enum inferior_event_type event_type, 04601 void *context); 04602 static void *async_client_context; 04603 04604 /* SIGCHLD handler that serves two purposes: In non-stop/async mode, 04605 so we notice when any child changes state, and notify the 04606 event-loop; it allows us to use sigsuspend in linux_nat_wait_1 04607 above to wait for the arrival of a SIGCHLD. */ 04608 04609 static void 04610 sigchld_handler (int signo) 04611 { 04612 int old_errno = errno; 04613 04614 if (debug_linux_nat) 04615 ui_file_write_async_safe (gdb_stdlog, 04616 "sigchld\n", sizeof ("sigchld\n") - 1); 04617 04618 if (signo == SIGCHLD 04619 && linux_nat_event_pipe[0] != -1) 04620 async_file_mark (); /* Let the event loop know that there are 04621 events to handle. */ 04622 04623 errno = old_errno; 04624 } 04625 04626 /* Callback registered with the target events file descriptor. */ 04627 04628 static void 04629 handle_target_event (int error, gdb_client_data client_data) 04630 { 04631 (*async_client_callback) (INF_REG_EVENT, async_client_context); 04632 } 04633 04634 /* Create/destroy the target events pipe. Returns previous state. */ 04635 04636 static int 04637 linux_async_pipe (int enable) 04638 { 04639 int previous = (linux_nat_event_pipe[0] != -1); 04640 04641 if (previous != enable) 04642 { 04643 sigset_t prev_mask; 04644 04645 /* Block child signals while we create/destroy the pipe, as 04646 their handler writes to it. */ 04647 block_child_signals (&prev_mask); 04648 04649 if (enable) 04650 { 04651 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1) 04652 internal_error (__FILE__, __LINE__, 04653 "creating event pipe failed."); 04654 04655 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK); 04656 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK); 04657 } 04658 else 04659 { 04660 close (linux_nat_event_pipe[0]); 04661 close (linux_nat_event_pipe[1]); 04662 linux_nat_event_pipe[0] = -1; 04663 linux_nat_event_pipe[1] = -1; 04664 } 04665 04666 restore_child_signals_mask (&prev_mask); 04667 } 04668 04669 return previous; 04670 } 04671 04672 /* target_async implementation. */ 04673 04674 static void 04675 linux_nat_async (void (*callback) (enum inferior_event_type event_type, 04676 void *context), void *context) 04677 { 04678 if (callback != NULL) 04679 { 04680 async_client_callback = callback; 04681 async_client_context = context; 04682 if (!linux_async_pipe (1)) 04683 { 04684 add_file_handler (linux_nat_event_pipe[0], 04685 handle_target_event, NULL); 04686 /* There may be pending events to handle. Tell the event loop 04687 to poll them. */ 04688 async_file_mark (); 04689 } 04690 } 04691 else 04692 { 04693 async_client_callback = callback; 04694 async_client_context = context; 04695 delete_file_handler (linux_nat_event_pipe[0]); 04696 linux_async_pipe (0); 04697 } 04698 return; 04699 } 04700 04701 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other 04702 event came out. */ 04703 04704 static int 04705 linux_nat_stop_lwp (struct lwp_info *lwp, void *data) 04706 { 04707 if (!lwp->stopped) 04708 { 04709 if (debug_linux_nat) 04710 fprintf_unfiltered (gdb_stdlog, 04711 "LNSL: running -> suspending %s\n", 04712 target_pid_to_str (lwp->ptid)); 04713 04714 04715 if (lwp->last_resume_kind == resume_stop) 04716 { 04717 if (debug_linux_nat) 04718 fprintf_unfiltered (gdb_stdlog, 04719 "linux-nat: already stopping LWP %ld at " 04720 "GDB's request\n", 04721 ptid_get_lwp (lwp->ptid)); 04722 return 0; 04723 } 04724 04725 stop_callback (lwp, NULL); 04726 lwp->last_resume_kind = resume_stop; 04727 } 04728 else 04729 { 04730 /* Already known to be stopped; do nothing. */ 04731 04732 if (debug_linux_nat) 04733 { 04734 if (find_thread_ptid (lwp->ptid)->stop_requested) 04735 fprintf_unfiltered (gdb_stdlog, 04736 "LNSL: already stopped/stop_requested %s\n", 04737 target_pid_to_str (lwp->ptid)); 04738 else 04739 fprintf_unfiltered (gdb_stdlog, 04740 "LNSL: already stopped/no " 04741 "stop_requested yet %s\n", 04742 target_pid_to_str (lwp->ptid)); 04743 } 04744 } 04745 return 0; 04746 } 04747 04748 static void 04749 linux_nat_stop (ptid_t ptid) 04750 { 04751 if (non_stop) 04752 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL); 04753 else 04754 linux_ops->to_stop (ptid); 04755 } 04756 04757 static void 04758 linux_nat_close (void) 04759 { 04760 /* Unregister from the event loop. */ 04761 if (linux_nat_is_async_p ()) 04762 linux_nat_async (NULL, 0); 04763 04764 if (linux_ops->to_close) 04765 linux_ops->to_close (); 04766 } 04767 04768 /* When requests are passed down from the linux-nat layer to the 04769 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are 04770 used. The address space pointer is stored in the inferior object, 04771 but the common code that is passed such ptid can't tell whether 04772 lwpid is a "main" process id or not (it assumes so). We reverse 04773 look up the "main" process id from the lwp here. */ 04774 04775 static struct address_space * 04776 linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid) 04777 { 04778 struct lwp_info *lwp; 04779 struct inferior *inf; 04780 int pid; 04781 04782 pid = ptid_get_lwp (ptid); 04783 if (ptid_get_lwp (ptid) == 0) 04784 { 04785 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the 04786 tgid. */ 04787 lwp = find_lwp_pid (ptid); 04788 pid = ptid_get_pid (lwp->ptid); 04789 } 04790 else 04791 { 04792 /* A (pid,lwpid,0) ptid. */ 04793 pid = ptid_get_pid (ptid); 04794 } 04795 04796 inf = find_inferior_pid (pid); 04797 gdb_assert (inf != NULL); 04798 return inf->aspace; 04799 } 04800 04801 /* Return the cached value of the processor core for thread PTID. */ 04802 04803 static int 04804 linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid) 04805 { 04806 struct lwp_info *info = find_lwp_pid (ptid); 04807 04808 if (info) 04809 return info->core; 04810 return -1; 04811 } 04812 04813 void 04814 linux_nat_add_target (struct target_ops *t) 04815 { 04816 /* Save the provided single-threaded target. We save this in a separate 04817 variable because another target we've inherited from (e.g. inf-ptrace) 04818 may have saved a pointer to T; we want to use it for the final 04819 process stratum target. */ 04820 linux_ops_saved = *t; 04821 linux_ops = &linux_ops_saved; 04822 04823 /* Override some methods for multithreading. */ 04824 t->to_create_inferior = linux_nat_create_inferior; 04825 t->to_attach = linux_nat_attach; 04826 t->to_detach = linux_nat_detach; 04827 t->to_resume = linux_nat_resume; 04828 t->to_wait = linux_nat_wait; 04829 t->to_pass_signals = linux_nat_pass_signals; 04830 t->to_xfer_partial = linux_nat_xfer_partial; 04831 t->to_kill = linux_nat_kill; 04832 t->to_mourn_inferior = linux_nat_mourn_inferior; 04833 t->to_thread_alive = linux_nat_thread_alive; 04834 t->to_pid_to_str = linux_nat_pid_to_str; 04835 t->to_thread_name = linux_nat_thread_name; 04836 t->to_has_thread_control = tc_schedlock; 04837 t->to_thread_address_space = linux_nat_thread_address_space; 04838 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint; 04839 t->to_stopped_data_address = linux_nat_stopped_data_address; 04840 04841 t->to_can_async_p = linux_nat_can_async_p; 04842 t->to_is_async_p = linux_nat_is_async_p; 04843 t->to_supports_non_stop = linux_nat_supports_non_stop; 04844 t->to_async = linux_nat_async; 04845 t->to_terminal_inferior = linux_nat_terminal_inferior; 04846 t->to_terminal_ours = linux_nat_terminal_ours; 04847 t->to_close = linux_nat_close; 04848 04849 /* Methods for non-stop support. */ 04850 t->to_stop = linux_nat_stop; 04851 04852 t->to_supports_multi_process = linux_nat_supports_multi_process; 04853 04854 t->to_supports_disable_randomization 04855 = linux_nat_supports_disable_randomization; 04856 04857 t->to_core_of_thread = linux_nat_core_of_thread; 04858 04859 /* We don't change the stratum; this target will sit at 04860 process_stratum and thread_db will set at thread_stratum. This 04861 is a little strange, since this is a multi-threaded-capable 04862 target, but we want to be on the stack below thread_db, and we 04863 also want to be used for single-threaded processes. */ 04864 04865 add_target (t); 04866 } 04867 04868 /* Register a method to call whenever a new thread is attached. */ 04869 void 04870 linux_nat_set_new_thread (struct target_ops *t, 04871 void (*new_thread) (struct lwp_info *)) 04872 { 04873 /* Save the pointer. We only support a single registered instance 04874 of the GNU/Linux native target, so we do not need to map this to 04875 T. */ 04876 linux_nat_new_thread = new_thread; 04877 } 04878 04879 /* See declaration in linux-nat.h. */ 04880 04881 void 04882 linux_nat_set_new_fork (struct target_ops *t, 04883 linux_nat_new_fork_ftype *new_fork) 04884 { 04885 /* Save the pointer. */ 04886 linux_nat_new_fork = new_fork; 04887 } 04888 04889 /* See declaration in linux-nat.h. */ 04890 04891 void 04892 linux_nat_set_forget_process (struct target_ops *t, 04893 linux_nat_forget_process_ftype *fn) 04894 { 04895 /* Save the pointer. */ 04896 linux_nat_forget_process_hook = fn; 04897 } 04898 04899 /* See declaration in linux-nat.h. */ 04900 04901 void 04902 linux_nat_forget_process (pid_t pid) 04903 { 04904 if (linux_nat_forget_process_hook != NULL) 04905 linux_nat_forget_process_hook (pid); 04906 } 04907 04908 /* Register a method that converts a siginfo object between the layout 04909 that ptrace returns, and the layout in the architecture of the 04910 inferior. */ 04911 void 04912 linux_nat_set_siginfo_fixup (struct target_ops *t, 04913 int (*siginfo_fixup) (siginfo_t *, 04914 gdb_byte *, 04915 int)) 04916 { 04917 /* Save the pointer. */ 04918 linux_nat_siginfo_fixup = siginfo_fixup; 04919 } 04920 04921 /* Register a method to call prior to resuming a thread. */ 04922 04923 void 04924 linux_nat_set_prepare_to_resume (struct target_ops *t, 04925 void (*prepare_to_resume) (struct lwp_info *)) 04926 { 04927 /* Save the pointer. */ 04928 linux_nat_prepare_to_resume = prepare_to_resume; 04929 } 04930 04931 /* See linux-nat.h. */ 04932 04933 int 04934 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo) 04935 { 04936 int pid; 04937 04938 pid = ptid_get_lwp (ptid); 04939 if (pid == 0) 04940 pid = ptid_get_pid (ptid); 04941 04942 errno = 0; 04943 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo); 04944 if (errno != 0) 04945 { 04946 memset (siginfo, 0, sizeof (*siginfo)); 04947 return 0; 04948 } 04949 return 1; 04950 } 04951 04952 /* Provide a prototype to silence -Wmissing-prototypes. */ 04953 extern initialize_file_ftype _initialize_linux_nat; 04954 04955 void 04956 _initialize_linux_nat (void) 04957 { 04958 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance, 04959 &debug_linux_nat, _("\ 04960 Set debugging of GNU/Linux lwp module."), _("\ 04961 Show debugging of GNU/Linux lwp module."), _("\ 04962 Enables printf debugging output."), 04963 NULL, 04964 show_debug_linux_nat, 04965 &setdebuglist, &showdebuglist); 04966 04967 /* Save this mask as the default. */ 04968 sigprocmask (SIG_SETMASK, NULL, &normal_mask); 04969 04970 /* Install a SIGCHLD handler. */ 04971 sigchld_action.sa_handler = sigchld_handler; 04972 sigemptyset (&sigchld_action.sa_mask); 04973 sigchld_action.sa_flags = SA_RESTART; 04974 04975 /* Make it the default. */ 04976 sigaction (SIGCHLD, &sigchld_action, NULL); 04977 04978 /* Make sure we don't block SIGCHLD during a sigsuspend. */ 04979 sigprocmask (SIG_SETMASK, NULL, &suspend_mask); 04980 sigdelset (&suspend_mask, SIGCHLD); 04981 04982 sigemptyset (&blocked_mask); 04983 } 04984 04985 04986 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to 04987 the GNU/Linux Threads library and therefore doesn't really belong 04988 here. */ 04989 04990 /* Read variable NAME in the target and return its value if found. 04991 Otherwise return zero. It is assumed that the type of the variable 04992 is `int'. */ 04993 04994 static int 04995 get_signo (const char *name) 04996 { 04997 struct minimal_symbol *ms; 04998 int signo; 04999 05000 ms = lookup_minimal_symbol (name, NULL, NULL); 05001 if (ms == NULL) 05002 return 0; 05003 05004 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo, 05005 sizeof (signo)) != 0) 05006 return 0; 05007 05008 return signo; 05009 } 05010 05011 /* Return the set of signals used by the threads library in *SET. */ 05012 05013 void 05014 lin_thread_get_thread_signals (sigset_t *set) 05015 { 05016 struct sigaction action; 05017 int restart, cancel; 05018 05019 sigemptyset (&blocked_mask); 05020 sigemptyset (set); 05021 05022 restart = get_signo ("__pthread_sig_restart"); 05023 cancel = get_signo ("__pthread_sig_cancel"); 05024 05025 /* LinuxThreads normally uses the first two RT signals, but in some legacy 05026 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does 05027 not provide any way for the debugger to query the signal numbers - 05028 fortunately they don't change! */ 05029 05030 if (restart == 0) 05031 restart = __SIGRTMIN; 05032 05033 if (cancel == 0) 05034 cancel = __SIGRTMIN + 1; 05035 05036 sigaddset (set, restart); 05037 sigaddset (set, cancel); 05038 05039 /* The GNU/Linux Threads library makes terminating threads send a 05040 special "cancel" signal instead of SIGCHLD. Make sure we catch 05041 those (to prevent them from terminating GDB itself, which is 05042 likely to be their default action) and treat them the same way as 05043 SIGCHLD. */ 05044 05045 action.sa_handler = sigchld_handler; 05046 sigemptyset (&action.sa_mask); 05047 action.sa_flags = SA_RESTART; 05048 sigaction (cancel, &action, NULL); 05049 05050 /* We block the "cancel" signal throughout this code ... */ 05051 sigaddset (&blocked_mask, cancel); 05052 sigprocmask (SIG_BLOCK, &blocked_mask, NULL); 05053 05054 /* ... except during a sigsuspend. */ 05055 sigdelset (&suspend_mask, cancel); 05056 }