GDBserver
/home/stan/gdb/src/gdb/gdbserver/linux-low.c
Go to the documentation of this file.
00001 /* Low level interface to ptrace, for the remote server for GDB.
00002    Copyright (C) 1995-2013 Free Software Foundation, Inc.
00003 
00004    This file is part of GDB.
00005 
00006    This program is free software; you can redistribute it and/or modify
00007    it under the terms of the GNU General Public License as published by
00008    the Free Software Foundation; either version 3 of the License, or
00009    (at your option) any later version.
00010 
00011    This program is distributed in the hope that it will be useful,
00012    but WITHOUT ANY WARRANTY; without even the implied warranty of
00013    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00014    GNU General Public License for more details.
00015 
00016    You should have received a copy of the GNU General Public License
00017    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
00018 
00019 #include "server.h"
00020 #include "linux-low.h"
00021 #include "linux-osdata.h"
00022 #include "agent.h"
00023 
00024 #include "nat/linux-nat.h"
00025 #include "nat/linux-waitpid.h"
00026 #include "gdb_wait.h"
00027 #include <stdio.h>
00028 #include <sys/ptrace.h>
00029 #include "linux-ptrace.h"
00030 #include "linux-procfs.h"
00031 #include <signal.h>
00032 #include <sys/ioctl.h>
00033 #include <fcntl.h>
00034 #include <string.h>
00035 #include <stdlib.h>
00036 #include <unistd.h>
00037 #include <errno.h>
00038 #include <sys/syscall.h>
00039 #include <sched.h>
00040 #include <ctype.h>
00041 #include <pwd.h>
00042 #include <sys/types.h>
00043 #include <dirent.h>
00044 #include "gdb_stat.h"
00045 #include <sys/vfs.h>
00046 #include <sys/uio.h>
00047 #include "filestuff.h"
00048 #include "tracepoint.h"
00049 #include "hostio.h"
00050 #ifndef ELFMAG0
00051 /* Don't include <linux/elf.h> here.  If it got included by gdb_proc_service.h
00052    then ELFMAG0 will have been defined.  If it didn't get included by
00053    gdb_proc_service.h then including it will likely introduce a duplicate
00054    definition of elf_fpregset_t.  */
00055 #include <elf.h>
00056 #endif
00057 
00058 #ifndef SPUFS_MAGIC
00059 #define SPUFS_MAGIC 0x23c9b64e
00060 #endif
00061 
00062 #ifdef HAVE_PERSONALITY
00063 # include <sys/personality.h>
00064 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
00065 #  define ADDR_NO_RANDOMIZE 0x0040000
00066 # endif
00067 #endif
00068 
00069 #ifndef O_LARGEFILE
00070 #define O_LARGEFILE 0
00071 #endif
00072 
00073 #ifndef W_STOPCODE
00074 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
00075 #endif
00076 
00077 /* This is the kernel's hard limit.  Not to be confused with
00078    SIGRTMIN.  */
00079 #ifndef __SIGRTMIN
00080 #define __SIGRTMIN 32
00081 #endif
00082 
00083 /* Some targets did not define these ptrace constants from the start,
00084    so gdbserver defines them locally here.  In the future, these may
00085    be removed after they are added to asm/ptrace.h.  */
00086 #if !(defined(PT_TEXT_ADDR) \
00087       || defined(PT_DATA_ADDR) \
00088       || defined(PT_TEXT_END_ADDR))
00089 #if defined(__mcoldfire__)
00090 /* These are still undefined in 3.10 kernels.  */
00091 #define PT_TEXT_ADDR 49*4
00092 #define PT_DATA_ADDR 50*4
00093 #define PT_TEXT_END_ADDR  51*4
00094 /* BFIN already defines these since at least 2.6.32 kernels.  */
00095 #elif defined(BFIN)
00096 #define PT_TEXT_ADDR 220
00097 #define PT_TEXT_END_ADDR 224
00098 #define PT_DATA_ADDR 228
00099 /* These are still undefined in 3.10 kernels.  */
00100 #elif defined(__TMS320C6X__)
00101 #define PT_TEXT_ADDR     (0x10000*4)
00102 #define PT_DATA_ADDR     (0x10004*4)
00103 #define PT_TEXT_END_ADDR (0x10008*4)
00104 #endif
00105 #endif
00106 
00107 #ifdef HAVE_LINUX_BTRACE
00108 # include "linux-btrace.h"
00109 #endif
00110 
00111 #ifndef HAVE_ELF32_AUXV_T
00112 /* Copied from glibc's elf.h.  */
00113 typedef struct
00114 {
00115   uint32_t a_type;              /* Entry type */
00116   union
00117     {
00118       uint32_t a_val;           /* Integer value */
00119       /* We use to have pointer elements added here.  We cannot do that,
00120          though, since it does not work when using 32-bit definitions
00121          on 64-bit platforms and vice versa.  */
00122     } a_un;
00123 } Elf32_auxv_t;
00124 #endif
00125 
00126 #ifndef HAVE_ELF64_AUXV_T
00127 /* Copied from glibc's elf.h.  */
00128 typedef struct
00129 {
00130   uint64_t a_type;              /* Entry type */
00131   union
00132     {
00133       uint64_t a_val;           /* Integer value */
00134       /* We use to have pointer elements added here.  We cannot do that,
00135          though, since it does not work when using 32-bit definitions
00136          on 64-bit platforms and vice versa.  */
00137     } a_un;
00138 } Elf64_auxv_t;
00139 #endif
00140 
00141 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
00142    representation of the thread ID.
00143 
00144    ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
00145    the same as the LWP ID.
00146 
00147    ``all_processes'' is keyed by the "overall process ID", which
00148    GNU/Linux calls tgid, "thread group ID".  */
00149 
00150 struct inferior_list all_lwps;
00151 
00152 /* A list of all unknown processes which receive stop signals.  Some
00153    other process will presumably claim each of these as forked
00154    children momentarily.  */
00155 
00156 struct simple_pid_list
00157 {
00158   /* The process ID.  */
00159   int pid;
00160 
00161   /* The status as reported by waitpid.  */
00162   int status;
00163 
00164   /* Next in chain.  */
00165   struct simple_pid_list *next;
00166 };
00167 struct simple_pid_list *stopped_pids;
00168 
00169 /* Trivial list manipulation functions to keep track of a list of new
00170    stopped processes.  */
00171 
00172 static void
00173 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
00174 {
00175   struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
00176 
00177   new_pid->pid = pid;
00178   new_pid->status = status;
00179   new_pid->next = *listp;
00180   *listp = new_pid;
00181 }
00182 
00183 static int
00184 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
00185 {
00186   struct simple_pid_list **p;
00187 
00188   for (p = listp; *p != NULL; p = &(*p)->next)
00189     if ((*p)->pid == pid)
00190       {
00191         struct simple_pid_list *next = (*p)->next;
00192 
00193         *statusp = (*p)->status;
00194         xfree (*p);
00195         *p = next;
00196         return 1;
00197       }
00198   return 0;
00199 }
00200 
00201 enum stopping_threads_kind
00202   {
00203     /* Not stopping threads presently.  */
00204     NOT_STOPPING_THREADS,
00205 
00206     /* Stopping threads.  */
00207     STOPPING_THREADS,
00208 
00209     /* Stopping and suspending threads.  */
00210     STOPPING_AND_SUSPENDING_THREADS
00211   };
00212 
00213 /* This is set while stop_all_lwps is in effect.  */
00214 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
00215 
00216 /* FIXME make into a target method?  */
00217 int using_threads = 1;
00218 
00219 /* True if we're presently stabilizing threads (moving them out of
00220    jump pads).  */
00221 static int stabilizing_threads;
00222 
00223 static void linux_resume_one_lwp (struct lwp_info *lwp,
00224                                   int step, int signal, siginfo_t *info);
00225 static void linux_resume (struct thread_resume *resume_info, size_t n);
00226 static void stop_all_lwps (int suspend, struct lwp_info *except);
00227 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
00228 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
00229 static void *add_lwp (ptid_t ptid);
00230 static int linux_stopped_by_watchpoint (void);
00231 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
00232 static void proceed_all_lwps (void);
00233 static int finish_step_over (struct lwp_info *lwp);
00234 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
00235 static int kill_lwp (unsigned long lwpid, int signo);
00236 
00237 /* True if the low target can hardware single-step.  Such targets
00238    don't need a BREAKPOINT_REINSERT_ADDR callback.  */
00239 
00240 static int
00241 can_hardware_single_step (void)
00242 {
00243   return (the_low_target.breakpoint_reinsert_addr == NULL);
00244 }
00245 
00246 /* True if the low target supports memory breakpoints.  If so, we'll
00247    have a GET_PC implementation.  */
00248 
00249 static int
00250 supports_breakpoints (void)
00251 {
00252   return (the_low_target.get_pc != NULL);
00253 }
00254 
00255 /* Returns true if this target can support fast tracepoints.  This
00256    does not mean that the in-process agent has been loaded in the
00257    inferior.  */
00258 
00259 static int
00260 supports_fast_tracepoints (void)
00261 {
00262   return the_low_target.install_fast_tracepoint_jump_pad != NULL;
00263 }
00264 
00265 /* True if LWP is stopped in its stepping range.  */
00266 
00267 static int
00268 lwp_in_step_range (struct lwp_info *lwp)
00269 {
00270   CORE_ADDR pc = lwp->stop_pc;
00271 
00272   return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
00273 }
00274 
00275 struct pending_signals
00276 {
00277   int signal;
00278   siginfo_t info;
00279   struct pending_signals *prev;
00280 };
00281 
00282 /* The read/write ends of the pipe registered as waitable file in the
00283    event loop.  */
00284 static int linux_event_pipe[2] = { -1, -1 };
00285 
00286 /* True if we're currently in async mode.  */
00287 #define target_is_async_p() (linux_event_pipe[0] != -1)
00288 
00289 static void send_sigstop (struct lwp_info *lwp);
00290 static void wait_for_sigstop (struct inferior_list_entry *entry);
00291 
00292 /* Return non-zero if HEADER is a 64-bit ELF file.  */
00293 
00294 static int
00295 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
00296 {
00297   if (header->e_ident[EI_MAG0] == ELFMAG0
00298       && header->e_ident[EI_MAG1] == ELFMAG1
00299       && header->e_ident[EI_MAG2] == ELFMAG2
00300       && header->e_ident[EI_MAG3] == ELFMAG3)
00301     {
00302       *machine = header->e_machine;
00303       return header->e_ident[EI_CLASS] == ELFCLASS64;
00304 
00305     }
00306   *machine = EM_NONE;
00307   return -1;
00308 }
00309 
00310 /* Return non-zero if FILE is a 64-bit ELF file,
00311    zero if the file is not a 64-bit ELF file,
00312    and -1 if the file is not accessible or doesn't exist.  */
00313 
00314 static int
00315 elf_64_file_p (const char *file, unsigned int *machine)
00316 {
00317   Elf64_Ehdr header;
00318   int fd;
00319 
00320   fd = open (file, O_RDONLY);
00321   if (fd < 0)
00322     return -1;
00323 
00324   if (read (fd, &header, sizeof (header)) != sizeof (header))
00325     {
00326       close (fd);
00327       return 0;
00328     }
00329   close (fd);
00330 
00331   return elf_64_header_p (&header, machine);
00332 }
00333 
00334 /* Accepts an integer PID; Returns true if the executable PID is
00335    running is a 64-bit ELF file..  */
00336 
00337 int
00338 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
00339 {
00340   char file[PATH_MAX];
00341 
00342   sprintf (file, "/proc/%d/exe", pid);
00343   return elf_64_file_p (file, machine);
00344 }
00345 
00346 static void
00347 delete_lwp (struct lwp_info *lwp)
00348 {
00349   remove_thread (get_lwp_thread (lwp));
00350   remove_inferior (&all_lwps, &lwp->head);
00351   free (lwp->arch_private);
00352   free (lwp);
00353 }
00354 
00355 /* Add a process to the common process list, and set its private
00356    data.  */
00357 
00358 static struct process_info *
00359 linux_add_process (int pid, int attached)
00360 {
00361   struct process_info *proc;
00362 
00363   proc = add_process (pid, attached);
00364   proc->private = xcalloc (1, sizeof (*proc->private));
00365 
00366   /* Set the arch when the first LWP stops.  */
00367   proc->private->new_inferior = 1;
00368 
00369   if (the_low_target.new_process != NULL)
00370     proc->private->arch_private = the_low_target.new_process ();
00371 
00372   return proc;
00373 }
00374 
00375 /* Handle a GNU/Linux extended wait response.  If we see a clone
00376    event, we need to add the new LWP to our list (and not report the
00377    trap to higher layers).  */
00378 
00379 static void
00380 handle_extended_wait (struct lwp_info *event_child, int wstat)
00381 {
00382   int event = wstat >> 16;
00383   struct lwp_info *new_lwp;
00384 
00385   if (event == PTRACE_EVENT_CLONE)
00386     {
00387       ptid_t ptid;
00388       unsigned long new_pid;
00389       int ret, status;
00390 
00391       ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), (PTRACE_TYPE_ARG3) 0,
00392               &new_pid);
00393 
00394       /* If we haven't already seen the new PID stop, wait for it now.  */
00395       if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
00396         {
00397           /* The new child has a pending SIGSTOP.  We can't affect it until it
00398              hits the SIGSTOP, but we're already attached.  */
00399 
00400           ret = my_waitpid (new_pid, &status, __WALL);
00401 
00402           if (ret == -1)
00403             perror_with_name ("waiting for new child");
00404           else if (ret != new_pid)
00405             warning ("wait returned unexpected PID %d", ret);
00406           else if (!WIFSTOPPED (status))
00407             warning ("wait returned unexpected status 0x%x", status);
00408         }
00409 
00410       ptid = ptid_build (pid_of (event_child), new_pid, 0);
00411       new_lwp = (struct lwp_info *) add_lwp (ptid);
00412       add_thread (ptid, new_lwp);
00413 
00414       /* Either we're going to immediately resume the new thread
00415          or leave it stopped.  linux_resume_one_lwp is a nop if it
00416          thinks the thread is currently running, so set this first
00417          before calling linux_resume_one_lwp.  */
00418       new_lwp->stopped = 1;
00419 
00420      /* If we're suspending all threads, leave this one suspended
00421         too.  */
00422       if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
00423         new_lwp->suspended = 1;
00424 
00425       /* Normally we will get the pending SIGSTOP.  But in some cases
00426          we might get another signal delivered to the group first.
00427          If we do get another signal, be sure not to lose it.  */
00428       if (WSTOPSIG (status) == SIGSTOP)
00429         {
00430           if (stopping_threads != NOT_STOPPING_THREADS)
00431             new_lwp->stop_pc = get_stop_pc (new_lwp);
00432           else
00433             linux_resume_one_lwp (new_lwp, 0, 0, NULL);
00434         }
00435       else
00436         {
00437           new_lwp->stop_expected = 1;
00438 
00439           if (stopping_threads != NOT_STOPPING_THREADS)
00440             {
00441               new_lwp->stop_pc = get_stop_pc (new_lwp);
00442               new_lwp->status_pending_p = 1;
00443               new_lwp->status_pending = status;
00444             }
00445           else
00446             /* Pass the signal on.  This is what GDB does - except
00447                shouldn't we really report it instead?  */
00448             linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
00449         }
00450 
00451       /* Always resume the current thread.  If we are stopping
00452          threads, it will have a pending SIGSTOP; we may as well
00453          collect it now.  */
00454       linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
00455     }
00456 }
00457 
00458 /* Return the PC as read from the regcache of LWP, without any
00459    adjustment.  */
00460 
00461 static CORE_ADDR
00462 get_pc (struct lwp_info *lwp)
00463 {
00464   struct thread_info *saved_inferior;
00465   struct regcache *regcache;
00466   CORE_ADDR pc;
00467 
00468   if (the_low_target.get_pc == NULL)
00469     return 0;
00470 
00471   saved_inferior = current_inferior;
00472   current_inferior = get_lwp_thread (lwp);
00473 
00474   regcache = get_thread_regcache (current_inferior, 1);
00475   pc = (*the_low_target.get_pc) (regcache);
00476 
00477   if (debug_threads)
00478     fprintf (stderr, "pc is 0x%lx\n", (long) pc);
00479 
00480   current_inferior = saved_inferior;
00481   return pc;
00482 }
00483 
00484 /* This function should only be called if LWP got a SIGTRAP.
00485    The SIGTRAP could mean several things.
00486 
00487    On i386, where decr_pc_after_break is non-zero:
00488    If we were single-stepping this process using PTRACE_SINGLESTEP,
00489    we will get only the one SIGTRAP (even if the instruction we
00490    stepped over was a breakpoint).  The value of $eip will be the
00491    next instruction.
00492    If we continue the process using PTRACE_CONT, we will get a
00493    SIGTRAP when we hit a breakpoint.  The value of $eip will be
00494    the instruction after the breakpoint (i.e. needs to be
00495    decremented).  If we report the SIGTRAP to GDB, we must also
00496    report the undecremented PC.  If we cancel the SIGTRAP, we
00497    must resume at the decremented PC.
00498 
00499    (Presumably, not yet tested) On a non-decr_pc_after_break machine
00500    with hardware or kernel single-step:
00501    If we single-step over a breakpoint instruction, our PC will
00502    point at the following instruction.  If we continue and hit a
00503    breakpoint instruction, our PC will point at the breakpoint
00504    instruction.  */
00505 
00506 static CORE_ADDR
00507 get_stop_pc (struct lwp_info *lwp)
00508 {
00509   CORE_ADDR stop_pc;
00510 
00511   if (the_low_target.get_pc == NULL)
00512     return 0;
00513 
00514   stop_pc = get_pc (lwp);
00515 
00516   if (WSTOPSIG (lwp->last_status) == SIGTRAP
00517       && !lwp->stepping
00518       && !lwp->stopped_by_watchpoint
00519       && lwp->last_status >> 16 == 0)
00520     stop_pc -= the_low_target.decr_pc_after_break;
00521 
00522   if (debug_threads)
00523     fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
00524 
00525   return stop_pc;
00526 }
00527 
00528 static void *
00529 add_lwp (ptid_t ptid)
00530 {
00531   struct lwp_info *lwp;
00532 
00533   lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
00534   memset (lwp, 0, sizeof (*lwp));
00535 
00536   lwp->head.id = ptid;
00537 
00538   if (the_low_target.new_thread != NULL)
00539     lwp->arch_private = the_low_target.new_thread ();
00540 
00541   add_inferior_to_list (&all_lwps, &lwp->head);
00542 
00543   return lwp;
00544 }
00545 
00546 /* Start an inferior process and returns its pid.
00547    ALLARGS is a vector of program-name and args. */
00548 
00549 static int
00550 linux_create_inferior (char *program, char **allargs)
00551 {
00552 #ifdef HAVE_PERSONALITY
00553   int personality_orig = 0, personality_set = 0;
00554 #endif
00555   struct lwp_info *new_lwp;
00556   int pid;
00557   ptid_t ptid;
00558 
00559 #ifdef HAVE_PERSONALITY
00560   if (disable_randomization)
00561     {
00562       errno = 0;
00563       personality_orig = personality (0xffffffff);
00564       if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
00565         {
00566           personality_set = 1;
00567           personality (personality_orig | ADDR_NO_RANDOMIZE);
00568         }
00569       if (errno != 0 || (personality_set
00570                          && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
00571         warning ("Error disabling address space randomization: %s",
00572                  strerror (errno));
00573     }
00574 #endif
00575 
00576 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
00577   pid = vfork ();
00578 #else
00579   pid = fork ();
00580 #endif
00581   if (pid < 0)
00582     perror_with_name ("fork");
00583 
00584   if (pid == 0)
00585     {
00586       close_most_fds ();
00587       ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
00588 
00589 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does.  */
00590       signal (__SIGRTMIN + 1, SIG_DFL);
00591 #endif
00592 
00593       setpgid (0, 0);
00594 
00595       /* If gdbserver is connected to gdb via stdio, redirect the inferior's
00596          stdout to stderr so that inferior i/o doesn't corrupt the connection.
00597          Also, redirect stdin to /dev/null.  */
00598       if (remote_connection_is_stdio ())
00599         {
00600           close (0);
00601           open ("/dev/null", O_RDONLY);
00602           dup2 (2, 1);
00603           if (write (2, "stdin/stdout redirected\n",
00604                      sizeof ("stdin/stdout redirected\n") - 1) < 0)
00605             {
00606               /* Errors ignored.  */;
00607             }
00608         }
00609 
00610       execv (program, allargs);
00611       if (errno == ENOENT)
00612         execvp (program, allargs);
00613 
00614       fprintf (stderr, "Cannot exec %s: %s.\n", program,
00615                strerror (errno));
00616       fflush (stderr);
00617       _exit (0177);
00618     }
00619 
00620 #ifdef HAVE_PERSONALITY
00621   if (personality_set)
00622     {
00623       errno = 0;
00624       personality (personality_orig);
00625       if (errno != 0)
00626         warning ("Error restoring address space randomization: %s",
00627                  strerror (errno));
00628     }
00629 #endif
00630 
00631   linux_add_process (pid, 0);
00632 
00633   ptid = ptid_build (pid, pid, 0);
00634   new_lwp = add_lwp (ptid);
00635   add_thread (ptid, new_lwp);
00636   new_lwp->must_set_ptrace_flags = 1;
00637 
00638   return pid;
00639 }
00640 
00641 /* Attach to an inferior process.  */
00642 
00643 static void
00644 linux_attach_lwp_1 (unsigned long lwpid, int initial)
00645 {
00646   ptid_t ptid;
00647   struct lwp_info *new_lwp;
00648 
00649   if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
00650       != 0)
00651     {
00652       struct buffer buffer;
00653 
00654       if (!initial)
00655         {
00656           /* If we fail to attach to an LWP, just warn.  */
00657           fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
00658                    strerror (errno), errno);
00659           fflush (stderr);
00660           return;
00661         }
00662 
00663       /* If we fail to attach to a process, report an error.  */
00664       buffer_init (&buffer);
00665       linux_ptrace_attach_warnings (lwpid, &buffer);
00666       buffer_grow_str0 (&buffer, "");
00667       error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
00668              lwpid, strerror (errno), errno);
00669     }
00670 
00671   if (initial)
00672     /* If lwp is the tgid, we handle adding existing threads later.
00673        Otherwise we just add lwp without bothering about any other
00674        threads.  */
00675     ptid = ptid_build (lwpid, lwpid, 0);
00676   else
00677     {
00678       /* Note that extracting the pid from the current inferior is
00679          safe, since we're always called in the context of the same
00680          process as this new thread.  */
00681       int pid = pid_of (get_thread_lwp (current_inferior));
00682       ptid = ptid_build (pid, lwpid, 0);
00683     }
00684 
00685   new_lwp = (struct lwp_info *) add_lwp (ptid);
00686   add_thread (ptid, new_lwp);
00687 
00688   /* We need to wait for SIGSTOP before being able to make the next
00689      ptrace call on this LWP.  */
00690   new_lwp->must_set_ptrace_flags = 1;
00691 
00692   if (linux_proc_pid_is_stopped (lwpid))
00693     {
00694       if (debug_threads)
00695         fprintf (stderr,
00696                  "Attached to a stopped process\n");
00697 
00698       /* The process is definitely stopped.  It is in a job control
00699          stop, unless the kernel predates the TASK_STOPPED /
00700          TASK_TRACED distinction, in which case it might be in a
00701          ptrace stop.  Make sure it is in a ptrace stop; from there we
00702          can kill it, signal it, et cetera.
00703 
00704          First make sure there is a pending SIGSTOP.  Since we are
00705          already attached, the process can not transition from stopped
00706          to running without a PTRACE_CONT; so we know this signal will
00707          go into the queue.  The SIGSTOP generated by PTRACE_ATTACH is
00708          probably already in the queue (unless this kernel is old
00709          enough to use TASK_STOPPED for ptrace stops); but since
00710          SIGSTOP is not an RT signal, it can only be queued once.  */
00711       kill_lwp (lwpid, SIGSTOP);
00712 
00713       /* Finally, resume the stopped process.  This will deliver the
00714          SIGSTOP (or a higher priority signal, just like normal
00715          PTRACE_ATTACH), which we'll catch later on.  */
00716       ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
00717     }
00718 
00719   /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
00720      brings it to a halt.
00721 
00722      There are several cases to consider here:
00723 
00724      1) gdbserver has already attached to the process and is being notified
00725         of a new thread that is being created.
00726         In this case we should ignore that SIGSTOP and resume the
00727         process.  This is handled below by setting stop_expected = 1,
00728         and the fact that add_thread sets last_resume_kind ==
00729         resume_continue.
00730 
00731      2) This is the first thread (the process thread), and we're attaching
00732         to it via attach_inferior.
00733         In this case we want the process thread to stop.
00734         This is handled by having linux_attach set last_resume_kind ==
00735         resume_stop after we return.
00736 
00737         If the pid we are attaching to is also the tgid, we attach to and
00738         stop all the existing threads.  Otherwise, we attach to pid and
00739         ignore any other threads in the same group as this pid.
00740 
00741      3) GDB is connecting to gdbserver and is requesting an enumeration of all
00742         existing threads.
00743         In this case we want the thread to stop.
00744         FIXME: This case is currently not properly handled.
00745         We should wait for the SIGSTOP but don't.  Things work apparently
00746         because enough time passes between when we ptrace (ATTACH) and when
00747         gdb makes the next ptrace call on the thread.
00748 
00749      On the other hand, if we are currently trying to stop all threads, we
00750      should treat the new thread as if we had sent it a SIGSTOP.  This works
00751      because we are guaranteed that the add_lwp call above added us to the
00752      end of the list, and so the new thread has not yet reached
00753      wait_for_sigstop (but will).  */
00754   new_lwp->stop_expected = 1;
00755 }
00756 
00757 void
00758 linux_attach_lwp (unsigned long lwpid)
00759 {
00760   linux_attach_lwp_1 (lwpid, 0);
00761 }
00762 
00763 /* Attach to PID.  If PID is the tgid, attach to it and all
00764    of its threads.  */
00765 
00766 static int
00767 linux_attach (unsigned long pid)
00768 {
00769   /* Attach to PID.  We will check for other threads
00770      soon.  */
00771   linux_attach_lwp_1 (pid, 1);
00772   linux_add_process (pid, 1);
00773 
00774   if (!non_stop)
00775     {
00776       struct thread_info *thread;
00777 
00778      /* Don't ignore the initial SIGSTOP if we just attached to this
00779         process.  It will be collected by wait shortly.  */
00780       thread = find_thread_ptid (ptid_build (pid, pid, 0));
00781       thread->last_resume_kind = resume_stop;
00782     }
00783 
00784   if (linux_proc_get_tgid (pid) == pid)
00785     {
00786       DIR *dir;
00787       char pathname[128];
00788 
00789       sprintf (pathname, "/proc/%ld/task", pid);
00790 
00791       dir = opendir (pathname);
00792 
00793       if (!dir)
00794         {
00795           fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
00796           fflush (stderr);
00797         }
00798       else
00799         {
00800           /* At this point we attached to the tgid.  Scan the task for
00801              existing threads.  */
00802           unsigned long lwp;
00803           int new_threads_found;
00804           int iterations = 0;
00805           struct dirent *dp;
00806 
00807           while (iterations < 2)
00808             {
00809               new_threads_found = 0;
00810               /* Add all the other threads.  While we go through the
00811                  threads, new threads may be spawned.  Cycle through
00812                  the list of threads until we have done two iterations without
00813                  finding new threads.  */
00814               while ((dp = readdir (dir)) != NULL)
00815                 {
00816                   /* Fetch one lwp.  */
00817                   lwp = strtoul (dp->d_name, NULL, 10);
00818 
00819                   /* Is this a new thread?  */
00820                   if (lwp
00821                       && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
00822                     {
00823                       linux_attach_lwp_1 (lwp, 0);
00824                       new_threads_found++;
00825 
00826                       if (debug_threads)
00827                         fprintf (stderr, "\
00828 Found and attached to new lwp %ld\n", lwp);
00829                     }
00830                 }
00831 
00832               if (!new_threads_found)
00833                 iterations++;
00834               else
00835                 iterations = 0;
00836 
00837               rewinddir (dir);
00838             }
00839           closedir (dir);
00840         }
00841     }
00842 
00843   return 0;
00844 }
00845 
00846 struct counter
00847 {
00848   int pid;
00849   int count;
00850 };
00851 
00852 static int
00853 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
00854 {
00855   struct counter *counter = args;
00856 
00857   if (ptid_get_pid (entry->id) == counter->pid)
00858     {
00859       if (++counter->count > 1)
00860         return 1;
00861     }
00862 
00863   return 0;
00864 }
00865 
00866 static int
00867 last_thread_of_process_p (struct thread_info *thread)
00868 {
00869   ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
00870   int pid = ptid_get_pid (ptid);
00871   struct counter counter = { pid , 0 };
00872 
00873   return (find_inferior (&all_threads,
00874                          second_thread_of_pid_p, &counter) == NULL);
00875 }
00876 
00877 /* Kill LWP.  */
00878 
00879 static void
00880 linux_kill_one_lwp (struct lwp_info *lwp)
00881 {
00882   int pid = lwpid_of (lwp);
00883 
00884   /* PTRACE_KILL is unreliable.  After stepping into a signal handler,
00885      there is no signal context, and ptrace(PTRACE_KILL) (or
00886      ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
00887      ptrace(CONT, pid, 0,0) and just resumes the tracee.  A better
00888      alternative is to kill with SIGKILL.  We only need one SIGKILL
00889      per process, not one for each thread.  But since we still support
00890      linuxthreads, and we also support debugging programs using raw
00891      clone without CLONE_THREAD, we send one for each thread.  For
00892      years, we used PTRACE_KILL only, so we're being a bit paranoid
00893      about some old kernels where PTRACE_KILL might work better
00894      (dubious if there are any such, but that's why it's paranoia), so
00895      we try SIGKILL first, PTRACE_KILL second, and so we're fine
00896      everywhere.  */
00897 
00898   errno = 0;
00899   kill (pid, SIGKILL);
00900   if (debug_threads)
00901     fprintf (stderr,
00902              "LKL:  kill (SIGKILL) %s, 0, 0 (%s)\n",
00903              target_pid_to_str (ptid_of (lwp)),
00904              errno ? strerror (errno) : "OK");
00905 
00906   errno = 0;
00907   ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
00908   if (debug_threads)
00909     fprintf (stderr,
00910              "LKL:  PTRACE_KILL %s, 0, 0 (%s)\n",
00911              target_pid_to_str (ptid_of (lwp)),
00912              errno ? strerror (errno) : "OK");
00913 }
00914 
00915 /* Callback for `find_inferior'.  Kills an lwp of a given process,
00916    except the leader.  */
00917 
00918 static int
00919 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
00920 {
00921   struct thread_info *thread = (struct thread_info *) entry;
00922   struct lwp_info *lwp = get_thread_lwp (thread);
00923   int wstat;
00924   int pid = * (int *) args;
00925 
00926   if (ptid_get_pid (entry->id) != pid)
00927     return 0;
00928 
00929   /* We avoid killing the first thread here, because of a Linux kernel (at
00930      least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
00931      the children get a chance to be reaped, it will remain a zombie
00932      forever.  */
00933 
00934   if (lwpid_of (lwp) == pid)
00935     {
00936       if (debug_threads)
00937         fprintf (stderr, "lkop: is last of process %s\n",
00938                  target_pid_to_str (entry->id));
00939       return 0;
00940     }
00941 
00942   do
00943     {
00944       linux_kill_one_lwp (lwp);
00945 
00946       /* Make sure it died.  The loop is most likely unnecessary.  */
00947       pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
00948     } while (pid > 0 && WIFSTOPPED (wstat));
00949 
00950   return 0;
00951 }
00952 
00953 static int
00954 linux_kill (int pid)
00955 {
00956   struct process_info *process;
00957   struct lwp_info *lwp;
00958   int wstat;
00959   int lwpid;
00960 
00961   process = find_process_pid (pid);
00962   if (process == NULL)
00963     return -1;
00964 
00965   /* If we're killing a running inferior, make sure it is stopped
00966      first, as PTRACE_KILL will not work otherwise.  */
00967   stop_all_lwps (0, NULL);
00968 
00969   find_inferior (&all_threads, kill_one_lwp_callback , &pid);
00970 
00971   /* See the comment in linux_kill_one_lwp.  We did not kill the first
00972      thread in the list, so do so now.  */
00973   lwp = find_lwp_pid (pid_to_ptid (pid));
00974 
00975   if (lwp == NULL)
00976     {
00977       if (debug_threads)
00978         fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
00979                  lwpid_of (lwp), pid);
00980     }
00981   else
00982     {
00983       if (debug_threads)
00984         fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
00985                  lwpid_of (lwp), pid);
00986 
00987       do
00988         {
00989           linux_kill_one_lwp (lwp);
00990 
00991           /* Make sure it died.  The loop is most likely unnecessary.  */
00992           lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
00993         } while (lwpid > 0 && WIFSTOPPED (wstat));
00994     }
00995 
00996   the_target->mourn (process);
00997 
00998   /* Since we presently can only stop all lwps of all processes, we
00999      need to unstop lwps of other processes.  */
01000   unstop_all_lwps (0, NULL);
01001   return 0;
01002 }
01003 
01004 /* Get pending signal of THREAD, for detaching purposes.  This is the
01005    signal the thread last stopped for, which we need to deliver to the
01006    thread when detaching, otherwise, it'd be suppressed/lost.  */
01007 
01008 static int
01009 get_detach_signal (struct thread_info *thread)
01010 {
01011   enum gdb_signal signo = GDB_SIGNAL_0;
01012   int status;
01013   struct lwp_info *lp = get_thread_lwp (thread);
01014 
01015   if (lp->status_pending_p)
01016     status = lp->status_pending;
01017   else
01018     {
01019       /* If the thread had been suspended by gdbserver, and it stopped
01020          cleanly, then it'll have stopped with SIGSTOP.  But we don't
01021          want to deliver that SIGSTOP.  */
01022       if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
01023           || thread->last_status.value.sig == GDB_SIGNAL_0)
01024         return 0;
01025 
01026       /* Otherwise, we may need to deliver the signal we
01027          intercepted.  */
01028       status = lp->last_status;
01029     }
01030 
01031   if (!WIFSTOPPED (status))
01032     {
01033       if (debug_threads)
01034         fprintf (stderr,
01035                  "GPS: lwp %s hasn't stopped: no pending signal\n",
01036                  target_pid_to_str (ptid_of (lp)));
01037       return 0;
01038     }
01039 
01040   /* Extended wait statuses aren't real SIGTRAPs.  */
01041   if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
01042     {
01043       if (debug_threads)
01044         fprintf (stderr,
01045                  "GPS: lwp %s had stopped with extended "
01046                  "status: no pending signal\n",
01047                  target_pid_to_str (ptid_of (lp)));
01048       return 0;
01049     }
01050 
01051   signo = gdb_signal_from_host (WSTOPSIG (status));
01052 
01053   if (program_signals_p && !program_signals[signo])
01054     {
01055       if (debug_threads)
01056         fprintf (stderr,
01057                  "GPS: lwp %s had signal %s, but it is in nopass state\n",
01058                  target_pid_to_str (ptid_of (lp)),
01059                  gdb_signal_to_string (signo));
01060       return 0;
01061     }
01062   else if (!program_signals_p
01063            /* If we have no way to know which signals GDB does not
01064               want to have passed to the program, assume
01065               SIGTRAP/SIGINT, which is GDB's default.  */
01066            && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
01067     {
01068       if (debug_threads)
01069         fprintf (stderr,
01070                  "GPS: lwp %s had signal %s, "
01071                  "but we don't know if we should pass it.  Default to not.\n",
01072                  target_pid_to_str (ptid_of (lp)),
01073                  gdb_signal_to_string (signo));
01074       return 0;
01075     }
01076   else
01077     {
01078       if (debug_threads)
01079         fprintf (stderr,
01080                  "GPS: lwp %s has pending signal %s: delivering it.\n",
01081                  target_pid_to_str (ptid_of (lp)),
01082                  gdb_signal_to_string (signo));
01083 
01084       return WSTOPSIG (status);
01085     }
01086 }
01087 
01088 static int
01089 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
01090 {
01091   struct thread_info *thread = (struct thread_info *) entry;
01092   struct lwp_info *lwp = get_thread_lwp (thread);
01093   int pid = * (int *) args;
01094   int sig;
01095 
01096   if (ptid_get_pid (entry->id) != pid)
01097     return 0;
01098 
01099   /* If there is a pending SIGSTOP, get rid of it.  */
01100   if (lwp->stop_expected)
01101     {
01102       if (debug_threads)
01103         fprintf (stderr,
01104                  "Sending SIGCONT to %s\n",
01105                  target_pid_to_str (ptid_of (lwp)));
01106 
01107       kill_lwp (lwpid_of (lwp), SIGCONT);
01108       lwp->stop_expected = 0;
01109     }
01110 
01111   /* Flush any pending changes to the process's registers.  */
01112   regcache_invalidate_thread (get_lwp_thread (lwp));
01113 
01114   /* Pass on any pending signal for this thread.  */
01115   sig = get_detach_signal (thread);
01116 
01117   /* Finally, let it resume.  */
01118   if (the_low_target.prepare_to_resume != NULL)
01119     the_low_target.prepare_to_resume (lwp);
01120   if (ptrace (PTRACE_DETACH, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
01121               (PTRACE_TYPE_ARG4) (long) sig) < 0)
01122     error (_("Can't detach %s: %s"),
01123            target_pid_to_str (ptid_of (lwp)),
01124            strerror (errno));
01125 
01126   delete_lwp (lwp);
01127   return 0;
01128 }
01129 
01130 static int
01131 linux_detach (int pid)
01132 {
01133   struct process_info *process;
01134 
01135   process = find_process_pid (pid);
01136   if (process == NULL)
01137     return -1;
01138 
01139   /* Stop all threads before detaching.  First, ptrace requires that
01140      the thread is stopped to sucessfully detach.  Second, thread_db
01141      may need to uninstall thread event breakpoints from memory, which
01142      only works with a stopped process anyway.  */
01143   stop_all_lwps (0, NULL);
01144 
01145 #ifdef USE_THREAD_DB
01146   thread_db_detach (process);
01147 #endif
01148 
01149   /* Stabilize threads (move out of jump pads).  */
01150   stabilize_threads ();
01151 
01152   find_inferior (&all_threads, linux_detach_one_lwp, &pid);
01153 
01154   the_target->mourn (process);
01155 
01156   /* Since we presently can only stop all lwps of all processes, we
01157      need to unstop lwps of other processes.  */
01158   unstop_all_lwps (0, NULL);
01159   return 0;
01160 }
01161 
01162 /* Remove all LWPs that belong to process PROC from the lwp list.  */
01163 
01164 static int
01165 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
01166 {
01167   struct lwp_info *lwp = (struct lwp_info *) entry;
01168   struct process_info *process = proc;
01169 
01170   if (pid_of (lwp) == pid_of (process))
01171     delete_lwp (lwp);
01172 
01173   return 0;
01174 }
01175 
01176 static void
01177 linux_mourn (struct process_info *process)
01178 {
01179   struct process_info_private *priv;
01180 
01181 #ifdef USE_THREAD_DB
01182   thread_db_mourn (process);
01183 #endif
01184 
01185   find_inferior (&all_lwps, delete_lwp_callback, process);
01186 
01187   /* Freeing all private data.  */
01188   priv = process->private;
01189   free (priv->arch_private);
01190   free (priv);
01191   process->private = NULL;
01192 
01193   remove_process (process);
01194 }
01195 
01196 static void
01197 linux_join (int pid)
01198 {
01199   int status, ret;
01200 
01201   do {
01202     ret = my_waitpid (pid, &status, 0);
01203     if (WIFEXITED (status) || WIFSIGNALED (status))
01204       break;
01205   } while (ret != -1 || errno != ECHILD);
01206 }
01207 
01208 /* Return nonzero if the given thread is still alive.  */
01209 static int
01210 linux_thread_alive (ptid_t ptid)
01211 {
01212   struct lwp_info *lwp = find_lwp_pid (ptid);
01213 
01214   /* We assume we always know if a thread exits.  If a whole process
01215      exited but we still haven't been able to report it to GDB, we'll
01216      hold on to the last lwp of the dead process.  */
01217   if (lwp != NULL)
01218     return !lwp->dead;
01219   else
01220     return 0;
01221 }
01222 
01223 /* Return 1 if this lwp has an interesting status pending.  */
01224 static int
01225 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
01226 {
01227   struct lwp_info *lwp = (struct lwp_info *) entry;
01228   ptid_t ptid = * (ptid_t *) arg;
01229   struct thread_info *thread;
01230 
01231   /* Check if we're only interested in events from a specific process
01232      or its lwps.  */
01233   if (!ptid_equal (minus_one_ptid, ptid)
01234       && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
01235     return 0;
01236 
01237   thread = get_lwp_thread (lwp);
01238 
01239   /* If we got a `vCont;t', but we haven't reported a stop yet, do
01240      report any status pending the LWP may have.  */
01241   if (thread->last_resume_kind == resume_stop
01242       && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
01243     return 0;
01244 
01245   return lwp->status_pending_p;
01246 }
01247 
01248 static int
01249 same_lwp (struct inferior_list_entry *entry, void *data)
01250 {
01251   ptid_t ptid = *(ptid_t *) data;
01252   int lwp;
01253 
01254   if (ptid_get_lwp (ptid) != 0)
01255     lwp = ptid_get_lwp (ptid);
01256   else
01257     lwp = ptid_get_pid (ptid);
01258 
01259   if (ptid_get_lwp (entry->id) == lwp)
01260     return 1;
01261 
01262   return 0;
01263 }
01264 
01265 struct lwp_info *
01266 find_lwp_pid (ptid_t ptid)
01267 {
01268   return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
01269 }
01270 
01271 static struct lwp_info *
01272 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
01273 {
01274   int ret;
01275   int to_wait_for = -1;
01276   struct lwp_info *child = NULL;
01277 
01278   if (debug_threads)
01279     fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
01280 
01281   if (ptid_equal (ptid, minus_one_ptid))
01282     to_wait_for = -1;                   /* any child */
01283   else
01284     to_wait_for = ptid_get_lwp (ptid);  /* this lwp only */
01285 
01286   options |= __WALL;
01287 
01288 retry:
01289 
01290   ret = my_waitpid (to_wait_for, wstatp, options);
01291   if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
01292     return NULL;
01293   else if (ret == -1)
01294     perror_with_name ("waitpid");
01295 
01296   if (debug_threads
01297       && (!WIFSTOPPED (*wstatp)
01298           || (WSTOPSIG (*wstatp) != 32
01299               && WSTOPSIG (*wstatp) != 33)))
01300     fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
01301 
01302   child = find_lwp_pid (pid_to_ptid (ret));
01303 
01304   /* If we didn't find a process, one of two things presumably happened:
01305      - A process we started and then detached from has exited.  Ignore it.
01306      - A process we are controlling has forked and the new child's stop
01307      was reported to us by the kernel.  Save its PID.  */
01308   if (child == NULL && WIFSTOPPED (*wstatp))
01309     {
01310       add_to_pid_list (&stopped_pids, ret, *wstatp);
01311       goto retry;
01312     }
01313   else if (child == NULL)
01314     goto retry;
01315 
01316   child->stopped = 1;
01317 
01318   child->last_status = *wstatp;
01319 
01320   if (WIFSTOPPED (*wstatp))
01321     {
01322       struct process_info *proc;
01323 
01324       /* Architecture-specific setup after inferior is running.  This
01325          needs to happen after we have attached to the inferior and it
01326          is stopped for the first time, but before we access any
01327          inferior registers.  */
01328       proc = find_process_pid (pid_of (child));
01329       if (proc->private->new_inferior)
01330         {
01331           struct thread_info *saved_inferior;
01332 
01333           saved_inferior = current_inferior;
01334           current_inferior = get_lwp_thread (child);
01335 
01336           the_low_target.arch_setup ();
01337 
01338           current_inferior = saved_inferior;
01339 
01340           proc->private->new_inferior = 0;
01341         }
01342     }
01343 
01344   /* Fetch the possibly triggered data watchpoint info and store it in
01345      CHILD.
01346 
01347      On some archs, like x86, that use debug registers to set
01348      watchpoints, it's possible that the way to know which watched
01349      address trapped, is to check the register that is used to select
01350      which address to watch.  Problem is, between setting the
01351      watchpoint and reading back which data address trapped, the user
01352      may change the set of watchpoints, and, as a consequence, GDB
01353      changes the debug registers in the inferior.  To avoid reading
01354      back a stale stopped-data-address when that happens, we cache in
01355      LP the fact that a watchpoint trapped, and the corresponding data
01356      address, as soon as we see CHILD stop with a SIGTRAP.  If GDB
01357      changes the debug registers meanwhile, we have the cached data we
01358      can rely on.  */
01359 
01360   if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
01361     {
01362       if (the_low_target.stopped_by_watchpoint == NULL)
01363         {
01364           child->stopped_by_watchpoint = 0;
01365         }
01366       else
01367         {
01368           struct thread_info *saved_inferior;
01369 
01370           saved_inferior = current_inferior;
01371           current_inferior = get_lwp_thread (child);
01372 
01373           child->stopped_by_watchpoint
01374             = the_low_target.stopped_by_watchpoint ();
01375 
01376           if (child->stopped_by_watchpoint)
01377             {
01378               if (the_low_target.stopped_data_address != NULL)
01379                 child->stopped_data_address
01380                   = the_low_target.stopped_data_address ();
01381               else
01382                 child->stopped_data_address = 0;
01383             }
01384 
01385           current_inferior = saved_inferior;
01386         }
01387     }
01388 
01389   /* Store the STOP_PC, with adjustment applied.  This depends on the
01390      architecture being defined already (so that CHILD has a valid
01391      regcache), and on LAST_STATUS being set (to check for SIGTRAP or
01392      not).  */
01393   if (WIFSTOPPED (*wstatp))
01394     child->stop_pc = get_stop_pc (child);
01395 
01396   if (debug_threads
01397       && WIFSTOPPED (*wstatp)
01398       && the_low_target.get_pc != NULL)
01399     {
01400       struct thread_info *saved_inferior = current_inferior;
01401       struct regcache *regcache;
01402       CORE_ADDR pc;
01403 
01404       current_inferior = get_lwp_thread (child);
01405       regcache = get_thread_regcache (current_inferior, 1);
01406       pc = (*the_low_target.get_pc) (regcache);
01407       fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
01408       current_inferior = saved_inferior;
01409     }
01410 
01411   return child;
01412 }
01413 
01414 /* This function should only be called if the LWP got a SIGTRAP.
01415 
01416    Handle any tracepoint steps or hits.  Return true if a tracepoint
01417    event was handled, 0 otherwise.  */
01418 
01419 static int
01420 handle_tracepoints (struct lwp_info *lwp)
01421 {
01422   struct thread_info *tinfo = get_lwp_thread (lwp);
01423   int tpoint_related_event = 0;
01424 
01425   /* If this tracepoint hit causes a tracing stop, we'll immediately
01426      uninsert tracepoints.  To do this, we temporarily pause all
01427      threads, unpatch away, and then unpause threads.  We need to make
01428      sure the unpausing doesn't resume LWP too.  */
01429   lwp->suspended++;
01430 
01431   /* And we need to be sure that any all-threads-stopping doesn't try
01432      to move threads out of the jump pads, as it could deadlock the
01433      inferior (LWP could be in the jump pad, maybe even holding the
01434      lock.)  */
01435 
01436   /* Do any necessary step collect actions.  */
01437   tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
01438 
01439   tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
01440 
01441   /* See if we just hit a tracepoint and do its main collect
01442      actions.  */
01443   tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
01444 
01445   lwp->suspended--;
01446 
01447   gdb_assert (lwp->suspended == 0);
01448   gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
01449 
01450   if (tpoint_related_event)
01451     {
01452       if (debug_threads)
01453         fprintf (stderr, "got a tracepoint event\n");
01454       return 1;
01455     }
01456 
01457   return 0;
01458 }
01459 
01460 /* Convenience wrapper.  Returns true if LWP is presently collecting a
01461    fast tracepoint.  */
01462 
01463 static int
01464 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
01465                                   struct fast_tpoint_collect_status *status)
01466 {
01467   CORE_ADDR thread_area;
01468 
01469   if (the_low_target.get_thread_area == NULL)
01470     return 0;
01471 
01472   /* Get the thread area address.  This is used to recognize which
01473      thread is which when tracing with the in-process agent library.
01474      We don't read anything from the address, and treat it as opaque;
01475      it's the address itself that we assume is unique per-thread.  */
01476   if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
01477     return 0;
01478 
01479   return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
01480 }
01481 
01482 /* The reason we resume in the caller, is because we want to be able
01483    to pass lwp->status_pending as WSTAT, and we need to clear
01484    status_pending_p before resuming, otherwise, linux_resume_one_lwp
01485    refuses to resume.  */
01486 
01487 static int
01488 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
01489 {
01490   struct thread_info *saved_inferior;
01491 
01492   saved_inferior = current_inferior;
01493   current_inferior = get_lwp_thread (lwp);
01494 
01495   if ((wstat == NULL
01496        || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
01497       && supports_fast_tracepoints ()
01498       && agent_loaded_p ())
01499     {
01500       struct fast_tpoint_collect_status status;
01501       int r;
01502 
01503       if (debug_threads)
01504         fprintf (stderr, "\
01505 Checking whether LWP %ld needs to move out of the jump pad.\n",
01506                  lwpid_of (lwp));
01507 
01508       r = linux_fast_tracepoint_collecting (lwp, &status);
01509 
01510       if (wstat == NULL
01511           || (WSTOPSIG (*wstat) != SIGILL
01512               && WSTOPSIG (*wstat) != SIGFPE
01513               && WSTOPSIG (*wstat) != SIGSEGV
01514               && WSTOPSIG (*wstat) != SIGBUS))
01515         {
01516           lwp->collecting_fast_tracepoint = r;
01517 
01518           if (r != 0)
01519             {
01520               if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
01521                 {
01522                   /* Haven't executed the original instruction yet.
01523                      Set breakpoint there, and wait till it's hit,
01524                      then single-step until exiting the jump pad.  */
01525                   lwp->exit_jump_pad_bkpt
01526                     = set_breakpoint_at (status.adjusted_insn_addr, NULL);
01527                 }
01528 
01529               if (debug_threads)
01530                 fprintf (stderr, "\
01531 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
01532                  lwpid_of (lwp));
01533               current_inferior = saved_inferior;
01534 
01535               return 1;
01536             }
01537         }
01538       else
01539         {
01540           /* If we get a synchronous signal while collecting, *and*
01541              while executing the (relocated) original instruction,
01542              reset the PC to point at the tpoint address, before
01543              reporting to GDB.  Otherwise, it's an IPA lib bug: just
01544              report the signal to GDB, and pray for the best.  */
01545 
01546           lwp->collecting_fast_tracepoint = 0;
01547 
01548           if (r != 0
01549               && (status.adjusted_insn_addr <= lwp->stop_pc
01550                   && lwp->stop_pc < status.adjusted_insn_addr_end))
01551             {
01552               siginfo_t info;
01553               struct regcache *regcache;
01554 
01555               /* The si_addr on a few signals references the address
01556                  of the faulting instruction.  Adjust that as
01557                  well.  */
01558               if ((WSTOPSIG (*wstat) == SIGILL
01559                    || WSTOPSIG (*wstat) == SIGFPE
01560                    || WSTOPSIG (*wstat) == SIGBUS
01561                    || WSTOPSIG (*wstat) == SIGSEGV)
01562                   && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp),
01563                              (PTRACE_TYPE_ARG3) 0, &info) == 0
01564                   /* Final check just to make sure we don't clobber
01565                      the siginfo of non-kernel-sent signals.  */
01566                   && (uintptr_t) info.si_addr == lwp->stop_pc)
01567                 {
01568                   info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
01569                   ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp),
01570                           (PTRACE_TYPE_ARG3) 0, &info);
01571                 }
01572 
01573               regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
01574               (*the_low_target.set_pc) (regcache, status.tpoint_addr);
01575               lwp->stop_pc = status.tpoint_addr;
01576 
01577               /* Cancel any fast tracepoint lock this thread was
01578                  holding.  */
01579               force_unlock_trace_buffer ();
01580             }
01581 
01582           if (lwp->exit_jump_pad_bkpt != NULL)
01583             {
01584               if (debug_threads)
01585                 fprintf (stderr,
01586                          "Cancelling fast exit-jump-pad: removing bkpt. "
01587                          "stopping all threads momentarily.\n");
01588 
01589               stop_all_lwps (1, lwp);
01590               cancel_breakpoints ();
01591 
01592               delete_breakpoint (lwp->exit_jump_pad_bkpt);
01593               lwp->exit_jump_pad_bkpt = NULL;
01594 
01595               unstop_all_lwps (1, lwp);
01596 
01597               gdb_assert (lwp->suspended >= 0);
01598             }
01599         }
01600     }
01601 
01602   if (debug_threads)
01603     fprintf (stderr, "\
01604 Checking whether LWP %ld needs to move out of the jump pad...no\n",
01605              lwpid_of (lwp));
01606 
01607   current_inferior = saved_inferior;
01608   return 0;
01609 }
01610 
01611 /* Enqueue one signal in the "signals to report later when out of the
01612    jump pad" list.  */
01613 
01614 static void
01615 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
01616 {
01617   struct pending_signals *p_sig;
01618 
01619   if (debug_threads)
01620     fprintf (stderr, "\
01621 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
01622 
01623   if (debug_threads)
01624     {
01625       struct pending_signals *sig;
01626 
01627       for (sig = lwp->pending_signals_to_report;
01628            sig != NULL;
01629            sig = sig->prev)
01630         fprintf (stderr,
01631                  "   Already queued %d\n",
01632                  sig->signal);
01633 
01634       fprintf (stderr, "   (no more currently queued signals)\n");
01635     }
01636 
01637   /* Don't enqueue non-RT signals if they are already in the deferred
01638      queue.  (SIGSTOP being the easiest signal to see ending up here
01639      twice)  */
01640   if (WSTOPSIG (*wstat) < __SIGRTMIN)
01641     {
01642       struct pending_signals *sig;
01643 
01644       for (sig = lwp->pending_signals_to_report;
01645            sig != NULL;
01646            sig = sig->prev)
01647         {
01648           if (sig->signal == WSTOPSIG (*wstat))
01649             {
01650               if (debug_threads)
01651                 fprintf (stderr,
01652                          "Not requeuing already queued non-RT signal %d"
01653                          " for LWP %ld\n",
01654                          sig->signal,
01655                          lwpid_of (lwp));
01656               return;
01657             }
01658         }
01659     }
01660 
01661   p_sig = xmalloc (sizeof (*p_sig));
01662   p_sig->prev = lwp->pending_signals_to_report;
01663   p_sig->signal = WSTOPSIG (*wstat);
01664   memset (&p_sig->info, 0, sizeof (siginfo_t));
01665   ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
01666           &p_sig->info);
01667 
01668   lwp->pending_signals_to_report = p_sig;
01669 }
01670 
01671 /* Dequeue one signal from the "signals to report later when out of
01672    the jump pad" list.  */
01673 
01674 static int
01675 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
01676 {
01677   if (lwp->pending_signals_to_report != NULL)
01678     {
01679       struct pending_signals **p_sig;
01680 
01681       p_sig = &lwp->pending_signals_to_report;
01682       while ((*p_sig)->prev != NULL)
01683         p_sig = &(*p_sig)->prev;
01684 
01685       *wstat = W_STOPCODE ((*p_sig)->signal);
01686       if ((*p_sig)->info.si_signo != 0)
01687         ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
01688                 &(*p_sig)->info);
01689       free (*p_sig);
01690       *p_sig = NULL;
01691 
01692       if (debug_threads)
01693         fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
01694                  WSTOPSIG (*wstat), lwpid_of (lwp));
01695 
01696       if (debug_threads)
01697         {
01698           struct pending_signals *sig;
01699 
01700           for (sig = lwp->pending_signals_to_report;
01701                sig != NULL;
01702                sig = sig->prev)
01703             fprintf (stderr,
01704                      "   Still queued %d\n",
01705                      sig->signal);
01706 
01707           fprintf (stderr, "   (no more queued signals)\n");
01708         }
01709 
01710       return 1;
01711     }
01712 
01713   return 0;
01714 }
01715 
01716 /* Arrange for a breakpoint to be hit again later.  We don't keep the
01717    SIGTRAP status and don't forward the SIGTRAP signal to the LWP.  We
01718    will handle the current event, eventually we will resume this LWP,
01719    and this breakpoint will trap again.  */
01720 
01721 static int
01722 cancel_breakpoint (struct lwp_info *lwp)
01723 {
01724   struct thread_info *saved_inferior;
01725 
01726   /* There's nothing to do if we don't support breakpoints.  */
01727   if (!supports_breakpoints ())
01728     return 0;
01729 
01730   /* breakpoint_at reads from current inferior.  */
01731   saved_inferior = current_inferior;
01732   current_inferior = get_lwp_thread (lwp);
01733 
01734   if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
01735     {
01736       if (debug_threads)
01737         fprintf (stderr,
01738                  "CB: Push back breakpoint for %s\n",
01739                  target_pid_to_str (ptid_of (lwp)));
01740 
01741       /* Back up the PC if necessary.  */
01742       if (the_low_target.decr_pc_after_break)
01743         {
01744           struct regcache *regcache
01745             = get_thread_regcache (current_inferior, 1);
01746           (*the_low_target.set_pc) (regcache, lwp->stop_pc);
01747         }
01748 
01749       current_inferior = saved_inferior;
01750       return 1;
01751     }
01752   else
01753     {
01754       if (debug_threads)
01755         fprintf (stderr,
01756                  "CB: No breakpoint found at %s for [%s]\n",
01757                  paddress (lwp->stop_pc),
01758                  target_pid_to_str (ptid_of (lwp)));
01759     }
01760 
01761   current_inferior = saved_inferior;
01762   return 0;
01763 }
01764 
01765 /* When the event-loop is doing a step-over, this points at the thread
01766    being stepped.  */
01767 ptid_t step_over_bkpt;
01768 
01769 /* Wait for an event from child PID.  If PID is -1, wait for any
01770    child.  Store the stop status through the status pointer WSTAT.
01771    OPTIONS is passed to the waitpid call.  Return 0 if no child stop
01772    event was found and OPTIONS contains WNOHANG.  Return the PID of
01773    the stopped child otherwise.  */
01774 
01775 static int
01776 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
01777 {
01778   struct lwp_info *event_child, *requested_child;
01779   ptid_t wait_ptid;
01780 
01781   event_child = NULL;
01782   requested_child = NULL;
01783 
01784   /* Check for a lwp with a pending status.  */
01785 
01786   if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
01787     {
01788       event_child = (struct lwp_info *)
01789         find_inferior (&all_lwps, status_pending_p_callback, &ptid);
01790       if (debug_threads && event_child)
01791         fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
01792     }
01793   else
01794     {
01795       requested_child = find_lwp_pid (ptid);
01796 
01797       if (stopping_threads == NOT_STOPPING_THREADS
01798           && requested_child->status_pending_p
01799           && requested_child->collecting_fast_tracepoint)
01800         {
01801           enqueue_one_deferred_signal (requested_child,
01802                                        &requested_child->status_pending);
01803           requested_child->status_pending_p = 0;
01804           requested_child->status_pending = 0;
01805           linux_resume_one_lwp (requested_child, 0, 0, NULL);
01806         }
01807 
01808       if (requested_child->suspended
01809           && requested_child->status_pending_p)
01810         fatal ("requesting an event out of a suspended child?");
01811 
01812       if (requested_child->status_pending_p)
01813         event_child = requested_child;
01814     }
01815 
01816   if (event_child != NULL)
01817     {
01818       if (debug_threads)
01819         fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
01820                  lwpid_of (event_child), event_child->status_pending);
01821       *wstat = event_child->status_pending;
01822       event_child->status_pending_p = 0;
01823       event_child->status_pending = 0;
01824       current_inferior = get_lwp_thread (event_child);
01825       return lwpid_of (event_child);
01826     }
01827 
01828   if (ptid_is_pid (ptid))
01829     {
01830       /* A request to wait for a specific tgid.  This is not possible
01831          with waitpid, so instead, we wait for any child, and leave
01832          children we're not interested in right now with a pending
01833          status to report later.  */
01834       wait_ptid = minus_one_ptid;
01835     }
01836   else
01837     wait_ptid = ptid;
01838 
01839   /* We only enter this loop if no process has a pending wait status.  Thus
01840      any action taken in response to a wait status inside this loop is
01841      responding as soon as we detect the status, not after any pending
01842      events.  */
01843   while (1)
01844     {
01845       event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
01846 
01847       if ((options & WNOHANG) && event_child == NULL)
01848         {
01849           if (debug_threads)
01850             fprintf (stderr, "WNOHANG set, no event found\n");
01851           return 0;
01852         }
01853 
01854       if (event_child == NULL)
01855         error ("event from unknown child");
01856 
01857       if (ptid_is_pid (ptid)
01858           && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
01859         {
01860           if (! WIFSTOPPED (*wstat))
01861             mark_lwp_dead (event_child, *wstat);
01862           else
01863             {
01864               event_child->status_pending_p = 1;
01865               event_child->status_pending = *wstat;
01866             }
01867           continue;
01868         }
01869 
01870       current_inferior = get_lwp_thread (event_child);
01871 
01872       /* Check for thread exit.  */
01873       if (! WIFSTOPPED (*wstat))
01874         {
01875           if (debug_threads)
01876             fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
01877 
01878           /* If the last thread is exiting, just return.  */
01879           if (last_thread_of_process_p (current_inferior))
01880             {
01881               if (debug_threads)
01882                 fprintf (stderr, "LWP %ld is last lwp of process\n",
01883                          lwpid_of (event_child));
01884               return lwpid_of (event_child);
01885             }
01886 
01887           if (!non_stop)
01888             {
01889               current_inferior = (struct thread_info *) all_threads.head;
01890               if (debug_threads)
01891                 fprintf (stderr, "Current inferior is now %ld\n",
01892                          lwpid_of (get_thread_lwp (current_inferior)));
01893             }
01894           else
01895             {
01896               current_inferior = NULL;
01897               if (debug_threads)
01898                 fprintf (stderr, "Current inferior is now <NULL>\n");
01899             }
01900 
01901           /* If we were waiting for this particular child to do something...
01902              well, it did something.  */
01903           if (requested_child != NULL)
01904             {
01905               int lwpid = lwpid_of (event_child);
01906 
01907               /* Cancel the step-over operation --- the thread that
01908                  started it is gone.  */
01909               if (finish_step_over (event_child))
01910                 unstop_all_lwps (1, event_child);
01911               delete_lwp (event_child);
01912               return lwpid;
01913             }
01914 
01915           delete_lwp (event_child);
01916 
01917           /* Wait for a more interesting event.  */
01918           continue;
01919         }
01920 
01921       if (event_child->must_set_ptrace_flags)
01922         {
01923           linux_enable_event_reporting (lwpid_of (event_child));
01924           event_child->must_set_ptrace_flags = 0;
01925         }
01926 
01927       if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
01928           && *wstat >> 16 != 0)
01929         {
01930           handle_extended_wait (event_child, *wstat);
01931           continue;
01932         }
01933 
01934       if (WIFSTOPPED (*wstat)
01935           && WSTOPSIG (*wstat) == SIGSTOP
01936           && event_child->stop_expected)
01937         {
01938           int should_stop;
01939 
01940           if (debug_threads)
01941             fprintf (stderr, "Expected stop.\n");
01942           event_child->stop_expected = 0;
01943 
01944           should_stop = (current_inferior->last_resume_kind == resume_stop
01945                          || stopping_threads != NOT_STOPPING_THREADS);
01946 
01947           if (!should_stop)
01948             {
01949               linux_resume_one_lwp (event_child,
01950                                     event_child->stepping, 0, NULL);
01951               continue;
01952             }
01953         }
01954 
01955       return lwpid_of (event_child);
01956     }
01957 
01958   /* NOTREACHED */
01959   return 0;
01960 }
01961 
01962 /* Count the LWP's that have had events.  */
01963 
01964 static int
01965 count_events_callback (struct inferior_list_entry *entry, void *data)
01966 {
01967   struct lwp_info *lp = (struct lwp_info *) entry;
01968   struct thread_info *thread = get_lwp_thread (lp);
01969   int *count = data;
01970 
01971   gdb_assert (count != NULL);
01972 
01973   /* Count only resumed LWPs that have a SIGTRAP event pending that
01974      should be reported to GDB.  */
01975   if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
01976       && thread->last_resume_kind != resume_stop
01977       && lp->status_pending_p
01978       && WIFSTOPPED (lp->status_pending)
01979       && WSTOPSIG (lp->status_pending) == SIGTRAP
01980       && !breakpoint_inserted_here (lp->stop_pc))
01981     (*count)++;
01982 
01983   return 0;
01984 }
01985 
01986 /* Select the LWP (if any) that is currently being single-stepped.  */
01987 
01988 static int
01989 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
01990 {
01991   struct lwp_info *lp = (struct lwp_info *) entry;
01992   struct thread_info *thread = get_lwp_thread (lp);
01993 
01994   if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
01995       && thread->last_resume_kind == resume_step
01996       && lp->status_pending_p)
01997     return 1;
01998   else
01999     return 0;
02000 }
02001 
02002 /* Select the Nth LWP that has had a SIGTRAP event that should be
02003    reported to GDB.  */
02004 
02005 static int
02006 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
02007 {
02008   struct lwp_info *lp = (struct lwp_info *) entry;
02009   struct thread_info *thread = get_lwp_thread (lp);
02010   int *selector = data;
02011 
02012   gdb_assert (selector != NULL);
02013 
02014   /* Select only resumed LWPs that have a SIGTRAP event pending. */
02015   if (thread->last_resume_kind != resume_stop
02016       && thread->last_status.kind == TARGET_WAITKIND_IGNORE
02017       && lp->status_pending_p
02018       && WIFSTOPPED (lp->status_pending)
02019       && WSTOPSIG (lp->status_pending) == SIGTRAP
02020       && !breakpoint_inserted_here (lp->stop_pc))
02021     if ((*selector)-- == 0)
02022       return 1;
02023 
02024   return 0;
02025 }
02026 
02027 static int
02028 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
02029 {
02030   struct lwp_info *lp = (struct lwp_info *) entry;
02031   struct thread_info *thread = get_lwp_thread (lp);
02032   struct lwp_info *event_lp = data;
02033 
02034   /* Leave the LWP that has been elected to receive a SIGTRAP alone.  */
02035   if (lp == event_lp)
02036     return 0;
02037 
02038   /* If a LWP other than the LWP that we're reporting an event for has
02039      hit a GDB breakpoint (as opposed to some random trap signal),
02040      then just arrange for it to hit it again later.  We don't keep
02041      the SIGTRAP status and don't forward the SIGTRAP signal to the
02042      LWP.  We will handle the current event, eventually we will resume
02043      all LWPs, and this one will get its breakpoint trap again.
02044 
02045      If we do not do this, then we run the risk that the user will
02046      delete or disable the breakpoint, but the LWP will have already
02047      tripped on it.  */
02048 
02049   if (thread->last_resume_kind != resume_stop
02050       && thread->last_status.kind == TARGET_WAITKIND_IGNORE
02051       && lp->status_pending_p
02052       && WIFSTOPPED (lp->status_pending)
02053       && WSTOPSIG (lp->status_pending) == SIGTRAP
02054       && !lp->stepping
02055       && !lp->stopped_by_watchpoint
02056       && cancel_breakpoint (lp))
02057     /* Throw away the SIGTRAP.  */
02058     lp->status_pending_p = 0;
02059 
02060   return 0;
02061 }
02062 
02063 static void
02064 linux_cancel_breakpoints (void)
02065 {
02066   find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
02067 }
02068 
02069 /* Select one LWP out of those that have events pending.  */
02070 
02071 static void
02072 select_event_lwp (struct lwp_info **orig_lp)
02073 {
02074   int num_events = 0;
02075   int random_selector;
02076   struct lwp_info *event_lp;
02077 
02078   /* Give preference to any LWP that is being single-stepped.  */
02079   event_lp
02080     = (struct lwp_info *) find_inferior (&all_lwps,
02081                                          select_singlestep_lwp_callback, NULL);
02082   if (event_lp != NULL)
02083     {
02084       if (debug_threads)
02085         fprintf (stderr,
02086                  "SEL: Select single-step %s\n",
02087                  target_pid_to_str (ptid_of (event_lp)));
02088     }
02089   else
02090     {
02091       /* No single-stepping LWP.  Select one at random, out of those
02092          which have had SIGTRAP events.  */
02093 
02094       /* First see how many SIGTRAP events we have.  */
02095       find_inferior (&all_lwps, count_events_callback, &num_events);
02096 
02097       /* Now randomly pick a LWP out of those that have had a SIGTRAP.  */
02098       random_selector = (int)
02099         ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
02100 
02101       if (debug_threads && num_events > 1)
02102         fprintf (stderr,
02103                  "SEL: Found %d SIGTRAP events, selecting #%d\n",
02104                  num_events, random_selector);
02105 
02106       event_lp = (struct lwp_info *) find_inferior (&all_lwps,
02107                                                     select_event_lwp_callback,
02108                                                     &random_selector);
02109     }
02110 
02111   if (event_lp != NULL)
02112     {
02113       /* Switch the event LWP.  */
02114       *orig_lp = event_lp;
02115     }
02116 }
02117 
02118 /* Decrement the suspend count of an LWP.  */
02119 
02120 static int
02121 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
02122 {
02123   struct lwp_info *lwp = (struct lwp_info *) entry;
02124 
02125   /* Ignore EXCEPT.  */
02126   if (lwp == except)
02127     return 0;
02128 
02129   lwp->suspended--;
02130 
02131   gdb_assert (lwp->suspended >= 0);
02132   return 0;
02133 }
02134 
02135 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
02136    NULL.  */
02137 
02138 static void
02139 unsuspend_all_lwps (struct lwp_info *except)
02140 {
02141   find_inferior (&all_lwps, unsuspend_one_lwp, except);
02142 }
02143 
02144 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
02145 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
02146                                        void *data);
02147 static int lwp_running (struct inferior_list_entry *entry, void *data);
02148 static ptid_t linux_wait_1 (ptid_t ptid,
02149                             struct target_waitstatus *ourstatus,
02150                             int target_options);
02151 
02152 /* Stabilize threads (move out of jump pads).
02153 
02154    If a thread is midway collecting a fast tracepoint, we need to
02155    finish the collection and move it out of the jump pad before
02156    reporting the signal.
02157 
02158    This avoids recursion while collecting (when a signal arrives
02159    midway, and the signal handler itself collects), which would trash
02160    the trace buffer.  In case the user set a breakpoint in a signal
02161    handler, this avoids the backtrace showing the jump pad, etc..
02162    Most importantly, there are certain things we can't do safely if
02163    threads are stopped in a jump pad (or in its callee's).  For
02164    example:
02165 
02166      - starting a new trace run.  A thread still collecting the
02167    previous run, could trash the trace buffer when resumed.  The trace
02168    buffer control structures would have been reset but the thread had
02169    no way to tell.  The thread could even midway memcpy'ing to the
02170    buffer, which would mean that when resumed, it would clobber the
02171    trace buffer that had been set for a new run.
02172 
02173      - we can't rewrite/reuse the jump pads for new tracepoints
02174    safely.  Say you do tstart while a thread is stopped midway while
02175    collecting.  When the thread is later resumed, it finishes the
02176    collection, and returns to the jump pad, to execute the original
02177    instruction that was under the tracepoint jump at the time the
02178    older run had been started.  If the jump pad had been rewritten
02179    since for something else in the new run, the thread would now
02180    execute the wrong / random instructions.  */
02181 
02182 static void
02183 linux_stabilize_threads (void)
02184 {
02185   struct thread_info *save_inferior;
02186   struct lwp_info *lwp_stuck;
02187 
02188   lwp_stuck
02189     = (struct lwp_info *) find_inferior (&all_lwps,
02190                                          stuck_in_jump_pad_callback, NULL);
02191   if (lwp_stuck != NULL)
02192     {
02193       if (debug_threads)
02194         fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
02195                  lwpid_of (lwp_stuck));
02196       return;
02197     }
02198 
02199   save_inferior = current_inferior;
02200 
02201   stabilizing_threads = 1;
02202 
02203   /* Kick 'em all.  */
02204   for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
02205 
02206   /* Loop until all are stopped out of the jump pads.  */
02207   while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
02208     {
02209       struct target_waitstatus ourstatus;
02210       struct lwp_info *lwp;
02211       int wstat;
02212 
02213       /* Note that we go through the full wait even loop.  While
02214          moving threads out of jump pad, we need to be able to step
02215          over internal breakpoints and such.  */
02216       linux_wait_1 (minus_one_ptid, &ourstatus, 0);
02217 
02218       if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
02219         {
02220           lwp = get_thread_lwp (current_inferior);
02221 
02222           /* Lock it.  */
02223           lwp->suspended++;
02224 
02225           if (ourstatus.value.sig != GDB_SIGNAL_0
02226               || current_inferior->last_resume_kind == resume_stop)
02227             {
02228               wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
02229               enqueue_one_deferred_signal (lwp, &wstat);
02230             }
02231         }
02232     }
02233 
02234   find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
02235 
02236   stabilizing_threads = 0;
02237 
02238   current_inferior = save_inferior;
02239 
02240   if (debug_threads)
02241     {
02242       lwp_stuck
02243         = (struct lwp_info *) find_inferior (&all_lwps,
02244                                          stuck_in_jump_pad_callback, NULL);
02245       if (lwp_stuck != NULL)
02246         fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
02247                  lwpid_of (lwp_stuck));
02248     }
02249 }
02250 
02251 /* Wait for process, returns status.  */
02252 
02253 static ptid_t
02254 linux_wait_1 (ptid_t ptid,
02255               struct target_waitstatus *ourstatus, int target_options)
02256 {
02257   int w;
02258   struct lwp_info *event_child;
02259   int options;
02260   int pid;
02261   int step_over_finished;
02262   int bp_explains_trap;
02263   int maybe_internal_trap;
02264   int report_to_gdb;
02265   int trace_event;
02266   int in_step_range;
02267 
02268   /* Translate generic target options into linux options.  */
02269   options = __WALL;
02270   if (target_options & TARGET_WNOHANG)
02271     options |= WNOHANG;
02272 
02273 retry:
02274   bp_explains_trap = 0;
02275   trace_event = 0;
02276   in_step_range = 0;
02277   ourstatus->kind = TARGET_WAITKIND_IGNORE;
02278 
02279   /* If we were only supposed to resume one thread, only wait for
02280      that thread - if it's still alive.  If it died, however - which
02281      can happen if we're coming from the thread death case below -
02282      then we need to make sure we restart the other threads.  We could
02283      pick a thread at random or restart all; restarting all is less
02284      arbitrary.  */
02285   if (!non_stop
02286       && !ptid_equal (cont_thread, null_ptid)
02287       && !ptid_equal (cont_thread, minus_one_ptid))
02288     {
02289       struct thread_info *thread;
02290 
02291       thread = (struct thread_info *) find_inferior_id (&all_threads,
02292                                                         cont_thread);
02293 
02294       /* No stepping, no signal - unless one is pending already, of course.  */
02295       if (thread == NULL)
02296         {
02297           struct thread_resume resume_info;
02298           resume_info.thread = minus_one_ptid;
02299           resume_info.kind = resume_continue;
02300           resume_info.sig = 0;
02301           linux_resume (&resume_info, 1);
02302         }
02303       else
02304         ptid = cont_thread;
02305     }
02306 
02307   if (ptid_equal (step_over_bkpt, null_ptid))
02308     pid = linux_wait_for_event (ptid, &w, options);
02309   else
02310     {
02311       if (debug_threads)
02312         fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
02313                  target_pid_to_str (step_over_bkpt));
02314       pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
02315     }
02316 
02317   if (pid == 0) /* only if TARGET_WNOHANG */
02318     return null_ptid;
02319 
02320   event_child = get_thread_lwp (current_inferior);
02321 
02322   /* If we are waiting for a particular child, and it exited,
02323      linux_wait_for_event will return its exit status.  Similarly if
02324      the last child exited.  If this is not the last child, however,
02325      do not report it as exited until there is a 'thread exited' response
02326      available in the remote protocol.  Instead, just wait for another event.
02327      This should be safe, because if the thread crashed we will already
02328      have reported the termination signal to GDB; that should stop any
02329      in-progress stepping operations, etc.
02330 
02331      Report the exit status of the last thread to exit.  This matches
02332      LinuxThreads' behavior.  */
02333 
02334   if (last_thread_of_process_p (current_inferior))
02335     {
02336       if (WIFEXITED (w) || WIFSIGNALED (w))
02337         {
02338           if (WIFEXITED (w))
02339             {
02340               ourstatus->kind = TARGET_WAITKIND_EXITED;
02341               ourstatus->value.integer = WEXITSTATUS (w);
02342 
02343               if (debug_threads)
02344                 fprintf (stderr,
02345                          "\nChild exited with retcode = %x \n",
02346                          WEXITSTATUS (w));
02347             }
02348           else
02349             {
02350               ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
02351               ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
02352 
02353               if (debug_threads)
02354                 fprintf (stderr,
02355                          "\nChild terminated with signal = %x \n",
02356                          WTERMSIG (w));
02357 
02358             }
02359 
02360           return ptid_of (event_child);
02361         }
02362     }
02363   else
02364     {
02365       if (!WIFSTOPPED (w))
02366         goto retry;
02367     }
02368 
02369   /* If this event was not handled before, and is not a SIGTRAP, we
02370      report it.  SIGILL and SIGSEGV are also treated as traps in case
02371      a breakpoint is inserted at the current PC.  If this target does
02372      not support internal breakpoints at all, we also report the
02373      SIGTRAP without further processing; it's of no concern to us.  */
02374   maybe_internal_trap
02375     = (supports_breakpoints ()
02376        && (WSTOPSIG (w) == SIGTRAP
02377            || ((WSTOPSIG (w) == SIGILL
02378                 || WSTOPSIG (w) == SIGSEGV)
02379                && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
02380 
02381   if (maybe_internal_trap)
02382     {
02383       /* Handle anything that requires bookkeeping before deciding to
02384          report the event or continue waiting.  */
02385 
02386       /* First check if we can explain the SIGTRAP with an internal
02387          breakpoint, or if we should possibly report the event to GDB.
02388          Do this before anything that may remove or insert a
02389          breakpoint.  */
02390       bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
02391 
02392       /* We have a SIGTRAP, possibly a step-over dance has just
02393          finished.  If so, tweak the state machine accordingly,
02394          reinsert breakpoints and delete any reinsert (software
02395          single-step) breakpoints.  */
02396       step_over_finished = finish_step_over (event_child);
02397 
02398       /* Now invoke the callbacks of any internal breakpoints there.  */
02399       check_breakpoints (event_child->stop_pc);
02400 
02401       /* Handle tracepoint data collecting.  This may overflow the
02402          trace buffer, and cause a tracing stop, removing
02403          breakpoints.  */
02404       trace_event = handle_tracepoints (event_child);
02405 
02406       if (bp_explains_trap)
02407         {
02408           /* If we stepped or ran into an internal breakpoint, we've
02409              already handled it.  So next time we resume (from this
02410              PC), we should step over it.  */
02411           if (debug_threads)
02412             fprintf (stderr, "Hit a gdbserver breakpoint.\n");
02413 
02414           if (breakpoint_here (event_child->stop_pc))
02415             event_child->need_step_over = 1;
02416         }
02417     }
02418   else
02419     {
02420       /* We have some other signal, possibly a step-over dance was in
02421          progress, and it should be cancelled too.  */
02422       step_over_finished = finish_step_over (event_child);
02423     }
02424 
02425   /* We have all the data we need.  Either report the event to GDB, or
02426      resume threads and keep waiting for more.  */
02427 
02428   /* If we're collecting a fast tracepoint, finish the collection and
02429      move out of the jump pad before delivering a signal.  See
02430      linux_stabilize_threads.  */
02431 
02432   if (WIFSTOPPED (w)
02433       && WSTOPSIG (w) != SIGTRAP
02434       && supports_fast_tracepoints ()
02435       && agent_loaded_p ())
02436     {
02437       if (debug_threads)
02438         fprintf (stderr,
02439                  "Got signal %d for LWP %ld.  Check if we need "
02440                  "to defer or adjust it.\n",
02441                  WSTOPSIG (w), lwpid_of (event_child));
02442 
02443       /* Allow debugging the jump pad itself.  */
02444       if (current_inferior->last_resume_kind != resume_step
02445           && maybe_move_out_of_jump_pad (event_child, &w))
02446         {
02447           enqueue_one_deferred_signal (event_child, &w);
02448 
02449           if (debug_threads)
02450             fprintf (stderr,
02451                      "Signal %d for LWP %ld deferred (in jump pad)\n",
02452                      WSTOPSIG (w), lwpid_of (event_child));
02453 
02454           linux_resume_one_lwp (event_child, 0, 0, NULL);
02455           goto retry;
02456         }
02457     }
02458 
02459   if (event_child->collecting_fast_tracepoint)
02460     {
02461       if (debug_threads)
02462         fprintf (stderr, "\
02463 LWP %ld was trying to move out of the jump pad (%d).  \
02464 Check if we're already there.\n",
02465                  lwpid_of (event_child),
02466                  event_child->collecting_fast_tracepoint);
02467 
02468       trace_event = 1;
02469 
02470       event_child->collecting_fast_tracepoint
02471         = linux_fast_tracepoint_collecting (event_child, NULL);
02472 
02473       if (event_child->collecting_fast_tracepoint != 1)
02474         {
02475           /* No longer need this breakpoint.  */
02476           if (event_child->exit_jump_pad_bkpt != NULL)
02477             {
02478               if (debug_threads)
02479                 fprintf (stderr,
02480                          "No longer need exit-jump-pad bkpt; removing it."
02481                          "stopping all threads momentarily.\n");
02482 
02483               /* Other running threads could hit this breakpoint.
02484                  We don't handle moribund locations like GDB does,
02485                  instead we always pause all threads when removing
02486                  breakpoints, so that any step-over or
02487                  decr_pc_after_break adjustment is always taken
02488                  care of while the breakpoint is still
02489                  inserted.  */
02490               stop_all_lwps (1, event_child);
02491               cancel_breakpoints ();
02492 
02493               delete_breakpoint (event_child->exit_jump_pad_bkpt);
02494               event_child->exit_jump_pad_bkpt = NULL;
02495 
02496               unstop_all_lwps (1, event_child);
02497 
02498               gdb_assert (event_child->suspended >= 0);
02499             }
02500         }
02501 
02502       if (event_child->collecting_fast_tracepoint == 0)
02503         {
02504           if (debug_threads)
02505             fprintf (stderr,
02506                      "fast tracepoint finished "
02507                      "collecting successfully.\n");
02508 
02509           /* We may have a deferred signal to report.  */
02510           if (dequeue_one_deferred_signal (event_child, &w))
02511             {
02512               if (debug_threads)
02513                 fprintf (stderr, "dequeued one signal.\n");
02514             }
02515           else
02516             {
02517               if (debug_threads)
02518                 fprintf (stderr, "no deferred signals.\n");
02519 
02520               if (stabilizing_threads)
02521                 {
02522                   ourstatus->kind = TARGET_WAITKIND_STOPPED;
02523                   ourstatus->value.sig = GDB_SIGNAL_0;
02524                   return ptid_of (event_child);
02525                 }
02526             }
02527         }
02528     }
02529 
02530   /* Check whether GDB would be interested in this event.  */
02531 
02532   /* If GDB is not interested in this signal, don't stop other
02533      threads, and don't report it to GDB.  Just resume the inferior
02534      right away.  We do this for threading-related signals as well as
02535      any that GDB specifically requested we ignore.  But never ignore
02536      SIGSTOP if we sent it ourselves, and do not ignore signals when
02537      stepping - they may require special handling to skip the signal
02538      handler.  */
02539   /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
02540      thread library?  */
02541   if (WIFSTOPPED (w)
02542       && current_inferior->last_resume_kind != resume_step
02543       && (
02544 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
02545           (current_process ()->private->thread_db != NULL
02546            && (WSTOPSIG (w) == __SIGRTMIN
02547                || WSTOPSIG (w) == __SIGRTMIN + 1))
02548           ||
02549 #endif
02550           (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
02551            && !(WSTOPSIG (w) == SIGSTOP
02552                 && current_inferior->last_resume_kind == resume_stop))))
02553     {
02554       siginfo_t info, *info_p;
02555 
02556       if (debug_threads)
02557         fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
02558                  WSTOPSIG (w), lwpid_of (event_child));
02559 
02560       if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child),
02561                   (PTRACE_TYPE_ARG3) 0, &info) == 0)
02562         info_p = &info;
02563       else
02564         info_p = NULL;
02565       linux_resume_one_lwp (event_child, event_child->stepping,
02566                             WSTOPSIG (w), info_p);
02567       goto retry;
02568     }
02569 
02570   /* Note that all addresses are always "out of the step range" when
02571      there's no range to begin with.  */
02572   in_step_range = lwp_in_step_range (event_child);
02573 
02574   /* If GDB wanted this thread to single step, and the thread is out
02575      of the step range, we always want to report the SIGTRAP, and let
02576      GDB handle it.  Watchpoints should always be reported.  So should
02577      signals we can't explain.  A SIGTRAP we can't explain could be a
02578      GDB breakpoint --- we may or not support Z0 breakpoints.  If we
02579      do, we're be able to handle GDB breakpoints on top of internal
02580      breakpoints, by handling the internal breakpoint and still
02581      reporting the event to GDB.  If we don't, we're out of luck, GDB
02582      won't see the breakpoint hit.  */
02583   report_to_gdb = (!maybe_internal_trap
02584                    || (current_inferior->last_resume_kind == resume_step
02585                        && !in_step_range)
02586                    || event_child->stopped_by_watchpoint
02587                    || (!step_over_finished && !in_step_range
02588                        && !bp_explains_trap && !trace_event)
02589                    || (gdb_breakpoint_here (event_child->stop_pc)
02590                        && gdb_condition_true_at_breakpoint (event_child->stop_pc)
02591                        && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
02592 
02593   run_breakpoint_commands (event_child->stop_pc);
02594 
02595   /* We found no reason GDB would want us to stop.  We either hit one
02596      of our own breakpoints, or finished an internal step GDB
02597      shouldn't know about.  */
02598   if (!report_to_gdb)
02599     {
02600       if (debug_threads)
02601         {
02602           if (bp_explains_trap)
02603             fprintf (stderr, "Hit a gdbserver breakpoint.\n");
02604           if (step_over_finished)
02605             fprintf (stderr, "Step-over finished.\n");
02606           if (trace_event)
02607             fprintf (stderr, "Tracepoint event.\n");
02608           if (lwp_in_step_range (event_child))
02609             fprintf (stderr, "Range stepping pc 0x%s [0x%s, 0x%s).\n",
02610                      paddress (event_child->stop_pc),
02611                      paddress (event_child->step_range_start),
02612                      paddress (event_child->step_range_end));
02613         }
02614 
02615       /* We're not reporting this breakpoint to GDB, so apply the
02616          decr_pc_after_break adjustment to the inferior's regcache
02617          ourselves.  */
02618 
02619       if (the_low_target.set_pc != NULL)
02620         {
02621           struct regcache *regcache
02622             = get_thread_regcache (get_lwp_thread (event_child), 1);
02623           (*the_low_target.set_pc) (regcache, event_child->stop_pc);
02624         }
02625 
02626       /* We may have finished stepping over a breakpoint.  If so,
02627          we've stopped and suspended all LWPs momentarily except the
02628          stepping one.  This is where we resume them all again.  We're
02629          going to keep waiting, so use proceed, which handles stepping
02630          over the next breakpoint.  */
02631       if (debug_threads)
02632         fprintf (stderr, "proceeding all threads.\n");
02633 
02634       if (step_over_finished)
02635         unsuspend_all_lwps (event_child);
02636 
02637       proceed_all_lwps ();
02638       goto retry;
02639     }
02640 
02641   if (debug_threads)
02642     {
02643       if (current_inferior->last_resume_kind == resume_step)
02644         {
02645           if (event_child->step_range_start == event_child->step_range_end)
02646             fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
02647           else if (!lwp_in_step_range (event_child))
02648             fprintf (stderr, "Out of step range, reporting event.\n");
02649         }
02650       if (event_child->stopped_by_watchpoint)
02651         fprintf (stderr, "Stopped by watchpoint.\n");
02652       if (gdb_breakpoint_here (event_child->stop_pc))
02653         fprintf (stderr, "Stopped by GDB breakpoint.\n");
02654       if (debug_threads)
02655         fprintf (stderr, "Hit a non-gdbserver trap event.\n");
02656     }
02657 
02658   /* Alright, we're going to report a stop.  */
02659 
02660   if (!non_stop && !stabilizing_threads)
02661     {
02662       /* In all-stop, stop all threads.  */
02663       stop_all_lwps (0, NULL);
02664 
02665       /* If we're not waiting for a specific LWP, choose an event LWP
02666          from among those that have had events.  Giving equal priority
02667          to all LWPs that have had events helps prevent
02668          starvation.  */
02669       if (ptid_equal (ptid, minus_one_ptid))
02670         {
02671           event_child->status_pending_p = 1;
02672           event_child->status_pending = w;
02673 
02674           select_event_lwp (&event_child);
02675 
02676           event_child->status_pending_p = 0;
02677           w = event_child->status_pending;
02678         }
02679 
02680       /* Now that we've selected our final event LWP, cancel any
02681          breakpoints in other LWPs that have hit a GDB breakpoint.
02682          See the comment in cancel_breakpoints_callback to find out
02683          why.  */
02684       find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
02685 
02686       /* If we were going a step-over, all other threads but the stepping one
02687          had been paused in start_step_over, with their suspend counts
02688          incremented.  We don't want to do a full unstop/unpause, because we're
02689          in all-stop mode (so we want threads stopped), but we still need to
02690          unsuspend the other threads, to decrement their `suspended' count
02691          back.  */
02692       if (step_over_finished)
02693         unsuspend_all_lwps (event_child);
02694 
02695       /* Stabilize threads (move out of jump pads).  */
02696       stabilize_threads ();
02697     }
02698   else
02699     {
02700       /* If we just finished a step-over, then all threads had been
02701          momentarily paused.  In all-stop, that's fine, we want
02702          threads stopped by now anyway.  In non-stop, we need to
02703          re-resume threads that GDB wanted to be running.  */
02704       if (step_over_finished)
02705         unstop_all_lwps (1, event_child);
02706     }
02707 
02708   ourstatus->kind = TARGET_WAITKIND_STOPPED;
02709 
02710   if (current_inferior->last_resume_kind == resume_stop
02711       && WSTOPSIG (w) == SIGSTOP)
02712     {
02713       /* A thread that has been requested to stop by GDB with vCont;t,
02714          and it stopped cleanly, so report as SIG0.  The use of
02715          SIGSTOP is an implementation detail.  */
02716       ourstatus->value.sig = GDB_SIGNAL_0;
02717     }
02718   else if (current_inferior->last_resume_kind == resume_stop
02719            && WSTOPSIG (w) != SIGSTOP)
02720     {
02721       /* A thread that has been requested to stop by GDB with vCont;t,
02722          but, it stopped for other reasons.  */
02723       ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
02724     }
02725   else
02726     {
02727       ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
02728     }
02729 
02730   gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
02731 
02732   if (debug_threads)
02733     fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
02734              target_pid_to_str (ptid_of (event_child)),
02735              ourstatus->kind,
02736              ourstatus->value.sig);
02737 
02738   return ptid_of (event_child);
02739 }
02740 
02741 /* Get rid of any pending event in the pipe.  */
02742 static void
02743 async_file_flush (void)
02744 {
02745   int ret;
02746   char buf;
02747 
02748   do
02749     ret = read (linux_event_pipe[0], &buf, 1);
02750   while (ret >= 0 || (ret == -1 && errno == EINTR));
02751 }
02752 
02753 /* Put something in the pipe, so the event loop wakes up.  */
02754 static void
02755 async_file_mark (void)
02756 {
02757   int ret;
02758 
02759   async_file_flush ();
02760 
02761   do
02762     ret = write (linux_event_pipe[1], "+", 1);
02763   while (ret == 0 || (ret == -1 && errno == EINTR));
02764 
02765   /* Ignore EAGAIN.  If the pipe is full, the event loop will already
02766      be awakened anyway.  */
02767 }
02768 
02769 static ptid_t
02770 linux_wait (ptid_t ptid,
02771             struct target_waitstatus *ourstatus, int target_options)
02772 {
02773   ptid_t event_ptid;
02774 
02775   if (debug_threads)
02776     fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
02777 
02778   /* Flush the async file first.  */
02779   if (target_is_async_p ())
02780     async_file_flush ();
02781 
02782   event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
02783 
02784   /* If at least one stop was reported, there may be more.  A single
02785      SIGCHLD can signal more than one child stop.  */
02786   if (target_is_async_p ()
02787       && (target_options & TARGET_WNOHANG) != 0
02788       && !ptid_equal (event_ptid, null_ptid))
02789     async_file_mark ();
02790 
02791   return event_ptid;
02792 }
02793 
02794 /* Send a signal to an LWP.  */
02795 
02796 static int
02797 kill_lwp (unsigned long lwpid, int signo)
02798 {
02799   /* Use tkill, if possible, in case we are using nptl threads.  If tkill
02800      fails, then we are not using nptl threads and we should be using kill.  */
02801 
02802 #ifdef __NR_tkill
02803   {
02804     static int tkill_failed;
02805 
02806     if (!tkill_failed)
02807       {
02808         int ret;
02809 
02810         errno = 0;
02811         ret = syscall (__NR_tkill, lwpid, signo);
02812         if (errno != ENOSYS)
02813           return ret;
02814         tkill_failed = 1;
02815       }
02816   }
02817 #endif
02818 
02819   return kill (lwpid, signo);
02820 }
02821 
02822 void
02823 linux_stop_lwp (struct lwp_info *lwp)
02824 {
02825   send_sigstop (lwp);
02826 }
02827 
02828 static void
02829 send_sigstop (struct lwp_info *lwp)
02830 {
02831   int pid;
02832 
02833   pid = lwpid_of (lwp);
02834 
02835   /* If we already have a pending stop signal for this process, don't
02836      send another.  */
02837   if (lwp->stop_expected)
02838     {
02839       if (debug_threads)
02840         fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
02841 
02842       return;
02843     }
02844 
02845   if (debug_threads)
02846     fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
02847 
02848   lwp->stop_expected = 1;
02849   kill_lwp (pid, SIGSTOP);
02850 }
02851 
02852 static int
02853 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02854 {
02855   struct lwp_info *lwp = (struct lwp_info *) entry;
02856 
02857   /* Ignore EXCEPT.  */
02858   if (lwp == except)
02859     return 0;
02860 
02861   if (lwp->stopped)
02862     return 0;
02863 
02864   send_sigstop (lwp);
02865   return 0;
02866 }
02867 
02868 /* Increment the suspend count of an LWP, and stop it, if not stopped
02869    yet.  */
02870 static int
02871 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
02872                                    void *except)
02873 {
02874   struct lwp_info *lwp = (struct lwp_info *) entry;
02875 
02876   /* Ignore EXCEPT.  */
02877   if (lwp == except)
02878     return 0;
02879 
02880   lwp->suspended++;
02881 
02882   return send_sigstop_callback (entry, except);
02883 }
02884 
02885 static void
02886 mark_lwp_dead (struct lwp_info *lwp, int wstat)
02887 {
02888   /* It's dead, really.  */
02889   lwp->dead = 1;
02890 
02891   /* Store the exit status for later.  */
02892   lwp->status_pending_p = 1;
02893   lwp->status_pending = wstat;
02894 
02895   /* Prevent trying to stop it.  */
02896   lwp->stopped = 1;
02897 
02898   /* No further stops are expected from a dead lwp.  */
02899   lwp->stop_expected = 0;
02900 }
02901 
02902 static void
02903 wait_for_sigstop (struct inferior_list_entry *entry)
02904 {
02905   struct lwp_info *lwp = (struct lwp_info *) entry;
02906   struct thread_info *saved_inferior;
02907   int wstat;
02908   ptid_t saved_tid;
02909   ptid_t ptid;
02910   int pid;
02911 
02912   if (lwp->stopped)
02913     {
02914       if (debug_threads)
02915         fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
02916                  lwpid_of (lwp));
02917       return;
02918     }
02919 
02920   saved_inferior = current_inferior;
02921   if (saved_inferior != NULL)
02922     saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
02923   else
02924     saved_tid = null_ptid; /* avoid bogus unused warning */
02925 
02926   ptid = lwp->head.id;
02927 
02928   if (debug_threads)
02929     fprintf (stderr, "wait_for_sigstop: pulling one event\n");
02930 
02931   pid = linux_wait_for_event (ptid, &wstat, __WALL);
02932 
02933   /* If we stopped with a non-SIGSTOP signal, save it for later
02934      and record the pending SIGSTOP.  If the process exited, just
02935      return.  */
02936   if (WIFSTOPPED (wstat))
02937     {
02938       if (debug_threads)
02939         fprintf (stderr, "LWP %ld stopped with signal %d\n",
02940                  lwpid_of (lwp), WSTOPSIG (wstat));
02941 
02942       if (WSTOPSIG (wstat) != SIGSTOP)
02943         {
02944           if (debug_threads)
02945             fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
02946                      lwpid_of (lwp), wstat);
02947 
02948           lwp->status_pending_p = 1;
02949           lwp->status_pending = wstat;
02950         }
02951     }
02952   else
02953     {
02954       if (debug_threads)
02955         fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
02956 
02957       lwp = find_lwp_pid (pid_to_ptid (pid));
02958       if (lwp)
02959         {
02960           /* Leave this status pending for the next time we're able to
02961              report it.  In the mean time, we'll report this lwp as
02962              dead to GDB, so GDB doesn't try to read registers and
02963              memory from it.  This can only happen if this was the
02964              last thread of the process; otherwise, PID is removed
02965              from the thread tables before linux_wait_for_event
02966              returns.  */
02967           mark_lwp_dead (lwp, wstat);
02968         }
02969     }
02970 
02971   if (saved_inferior == NULL || linux_thread_alive (saved_tid))
02972     current_inferior = saved_inferior;
02973   else
02974     {
02975       if (debug_threads)
02976         fprintf (stderr, "Previously current thread died.\n");
02977 
02978       if (non_stop)
02979         {
02980           /* We can't change the current inferior behind GDB's back,
02981              otherwise, a subsequent command may apply to the wrong
02982              process.  */
02983           current_inferior = NULL;
02984         }
02985       else
02986         {
02987           /* Set a valid thread as current.  */
02988           set_desired_inferior (0);
02989         }
02990     }
02991 }
02992 
02993 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
02994    move it out, because we need to report the stop event to GDB.  For
02995    example, if the user puts a breakpoint in the jump pad, it's
02996    because she wants to debug it.  */
02997 
02998 static int
02999 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
03000 {
03001   struct lwp_info *lwp = (struct lwp_info *) entry;
03002   struct thread_info *thread = get_lwp_thread (lwp);
03003 
03004   gdb_assert (lwp->suspended == 0);
03005   gdb_assert (lwp->stopped);
03006 
03007   /* Allow debugging the jump pad, gdb_collect, etc..  */
03008   return (supports_fast_tracepoints ()
03009           && agent_loaded_p ()
03010           && (gdb_breakpoint_here (lwp->stop_pc)
03011               || lwp->stopped_by_watchpoint
03012               || thread->last_resume_kind == resume_step)
03013           && linux_fast_tracepoint_collecting (lwp, NULL));
03014 }
03015 
03016 static void
03017 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
03018 {
03019   struct lwp_info *lwp = (struct lwp_info *) entry;
03020   struct thread_info *thread = get_lwp_thread (lwp);
03021   int *wstat;
03022 
03023   gdb_assert (lwp->suspended == 0);
03024   gdb_assert (lwp->stopped);
03025 
03026   wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
03027 
03028   /* Allow debugging the jump pad, gdb_collect, etc.  */
03029   if (!gdb_breakpoint_here (lwp->stop_pc)
03030       && !lwp->stopped_by_watchpoint
03031       && thread->last_resume_kind != resume_step
03032       && maybe_move_out_of_jump_pad (lwp, wstat))
03033     {
03034       if (debug_threads)
03035         fprintf (stderr,
03036                  "LWP %ld needs stabilizing (in jump pad)\n",
03037                  lwpid_of (lwp));
03038 
03039       if (wstat)
03040         {
03041           lwp->status_pending_p = 0;
03042           enqueue_one_deferred_signal (lwp, wstat);
03043 
03044           if (debug_threads)
03045             fprintf (stderr,
03046                      "Signal %d for LWP %ld deferred "
03047                      "(in jump pad)\n",
03048                      WSTOPSIG (*wstat), lwpid_of (lwp));
03049         }
03050 
03051       linux_resume_one_lwp (lwp, 0, 0, NULL);
03052     }
03053   else
03054     lwp->suspended++;
03055 }
03056 
03057 static int
03058 lwp_running (struct inferior_list_entry *entry, void *data)
03059 {
03060   struct lwp_info *lwp = (struct lwp_info *) entry;
03061 
03062   if (lwp->dead)
03063     return 0;
03064   if (lwp->stopped)
03065     return 0;
03066   return 1;
03067 }
03068 
03069 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
03070    If SUSPEND, then also increase the suspend count of every LWP,
03071    except EXCEPT.  */
03072 
03073 static void
03074 stop_all_lwps (int suspend, struct lwp_info *except)
03075 {
03076   /* Should not be called recursively.  */
03077   gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
03078 
03079   stopping_threads = (suspend
03080                       ? STOPPING_AND_SUSPENDING_THREADS
03081                       : STOPPING_THREADS);
03082 
03083   if (suspend)
03084     find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
03085   else
03086     find_inferior (&all_lwps, send_sigstop_callback, except);
03087   for_each_inferior (&all_lwps, wait_for_sigstop);
03088   stopping_threads = NOT_STOPPING_THREADS;
03089 }
03090 
03091 /* Resume execution of the inferior process.
03092    If STEP is nonzero, single-step it.
03093    If SIGNAL is nonzero, give it that signal.  */
03094 
03095 static void
03096 linux_resume_one_lwp (struct lwp_info *lwp,
03097                       int step, int signal, siginfo_t *info)
03098 {
03099   struct thread_info *saved_inferior;
03100   int fast_tp_collecting;
03101 
03102   if (lwp->stopped == 0)
03103     return;
03104 
03105   fast_tp_collecting = lwp->collecting_fast_tracepoint;
03106 
03107   gdb_assert (!stabilizing_threads || fast_tp_collecting);
03108 
03109   /* Cancel actions that rely on GDB not changing the PC (e.g., the
03110      user used the "jump" command, or "set $pc = foo").  */
03111   if (lwp->stop_pc != get_pc (lwp))
03112     {
03113       /* Collecting 'while-stepping' actions doesn't make sense
03114          anymore.  */
03115       release_while_stepping_state_list (get_lwp_thread (lwp));
03116     }
03117 
03118   /* If we have pending signals or status, and a new signal, enqueue the
03119      signal.  Also enqueue the signal if we are waiting to reinsert a
03120      breakpoint; it will be picked up again below.  */
03121   if (signal != 0
03122       && (lwp->status_pending_p
03123           || lwp->pending_signals != NULL
03124           || lwp->bp_reinsert != 0
03125           || fast_tp_collecting))
03126     {
03127       struct pending_signals *p_sig;
03128       p_sig = xmalloc (sizeof (*p_sig));
03129       p_sig->prev = lwp->pending_signals;
03130       p_sig->signal = signal;
03131       if (info == NULL)
03132         memset (&p_sig->info, 0, sizeof (siginfo_t));
03133       else
03134         memcpy (&p_sig->info, info, sizeof (siginfo_t));
03135       lwp->pending_signals = p_sig;
03136     }
03137 
03138   if (lwp->status_pending_p)
03139     {
03140       if (debug_threads)
03141         fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
03142                  " has pending status\n",
03143                  lwpid_of (lwp), step ? "step" : "continue", signal,
03144                  lwp->stop_expected ? "expected" : "not expected");
03145       return;
03146     }
03147 
03148   saved_inferior = current_inferior;
03149   current_inferior = get_lwp_thread (lwp);
03150 
03151   if (debug_threads)
03152     fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
03153              lwpid_of (lwp), step ? "step" : "continue", signal,
03154              lwp->stop_expected ? "expected" : "not expected");
03155 
03156   /* This bit needs some thinking about.  If we get a signal that
03157      we must report while a single-step reinsert is still pending,
03158      we often end up resuming the thread.  It might be better to
03159      (ew) allow a stack of pending events; then we could be sure that
03160      the reinsert happened right away and not lose any signals.
03161 
03162      Making this stack would also shrink the window in which breakpoints are
03163      uninserted (see comment in linux_wait_for_lwp) but not enough for
03164      complete correctness, so it won't solve that problem.  It may be
03165      worthwhile just to solve this one, however.  */
03166   if (lwp->bp_reinsert != 0)
03167     {
03168       if (debug_threads)
03169         fprintf (stderr, "  pending reinsert at 0x%s\n",
03170                  paddress (lwp->bp_reinsert));
03171 
03172       if (can_hardware_single_step ())
03173         {
03174           if (fast_tp_collecting == 0)
03175             {
03176               if (step == 0)
03177                 fprintf (stderr, "BAD - reinserting but not stepping.\n");
03178               if (lwp->suspended)
03179                 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
03180                          lwp->suspended);
03181             }
03182 
03183           step = 1;
03184         }
03185 
03186       /* Postpone any pending signal.  It was enqueued above.  */
03187       signal = 0;
03188     }
03189 
03190   if (fast_tp_collecting == 1)
03191     {
03192       if (debug_threads)
03193         fprintf (stderr, "\
03194 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
03195                  lwpid_of (lwp));
03196 
03197       /* Postpone any pending signal.  It was enqueued above.  */
03198       signal = 0;
03199     }
03200   else if (fast_tp_collecting == 2)
03201     {
03202       if (debug_threads)
03203         fprintf (stderr, "\
03204 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
03205                  lwpid_of (lwp));
03206 
03207       if (can_hardware_single_step ())
03208         step = 1;
03209       else
03210         fatal ("moving out of jump pad single-stepping"
03211                " not implemented on this target");
03212 
03213       /* Postpone any pending signal.  It was enqueued above.  */
03214       signal = 0;
03215     }
03216 
03217   /* If we have while-stepping actions in this thread set it stepping.
03218      If we have a signal to deliver, it may or may not be set to
03219      SIG_IGN, we don't know.  Assume so, and allow collecting
03220      while-stepping into a signal handler.  A possible smart thing to
03221      do would be to set an internal breakpoint at the signal return
03222      address, continue, and carry on catching this while-stepping
03223      action only when that breakpoint is hit.  A future
03224      enhancement.  */
03225   if (get_lwp_thread (lwp)->while_stepping != NULL
03226       && can_hardware_single_step ())
03227     {
03228       if (debug_threads)
03229         fprintf (stderr,
03230                  "lwp %ld has a while-stepping action -> forcing step.\n",
03231                  lwpid_of (lwp));
03232       step = 1;
03233     }
03234 
03235   if (debug_threads && the_low_target.get_pc != NULL)
03236     {
03237       struct regcache *regcache = get_thread_regcache (current_inferior, 1);
03238       CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
03239       fprintf (stderr, "  resuming from pc 0x%lx\n", (long) pc);
03240     }
03241 
03242   /* If we have pending signals, consume one unless we are trying to
03243      reinsert a breakpoint or we're trying to finish a fast tracepoint
03244      collect.  */
03245   if (lwp->pending_signals != NULL
03246       && lwp->bp_reinsert == 0
03247       && fast_tp_collecting == 0)
03248     {
03249       struct pending_signals **p_sig;
03250 
03251       p_sig = &lwp->pending_signals;
03252       while ((*p_sig)->prev != NULL)
03253         p_sig = &(*p_sig)->prev;
03254 
03255       signal = (*p_sig)->signal;
03256       if ((*p_sig)->info.si_signo != 0)
03257         ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
03258                 &(*p_sig)->info);
03259 
03260       free (*p_sig);
03261       *p_sig = NULL;
03262     }
03263 
03264   if (the_low_target.prepare_to_resume != NULL)
03265     the_low_target.prepare_to_resume (lwp);
03266 
03267   regcache_invalidate_thread (get_lwp_thread (lwp));
03268   errno = 0;
03269   lwp->stopped = 0;
03270   lwp->stopped_by_watchpoint = 0;
03271   lwp->stepping = step;
03272   ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp),
03273           (PTRACE_TYPE_ARG3) 0,
03274           /* Coerce to a uintptr_t first to avoid potential gcc warning
03275              of coercing an 8 byte integer to a 4 byte pointer.  */
03276           (PTRACE_TYPE_ARG4) (uintptr_t) signal);
03277 
03278   current_inferior = saved_inferior;
03279   if (errno)
03280     {
03281       /* ESRCH from ptrace either means that the thread was already
03282          running (an error) or that it is gone (a race condition).  If
03283          it's gone, we will get a notification the next time we wait,
03284          so we can ignore the error.  We could differentiate these
03285          two, but it's tricky without waiting; the thread still exists
03286          as a zombie, so sending it signal 0 would succeed.  So just
03287          ignore ESRCH.  */
03288       if (errno == ESRCH)
03289         return;
03290 
03291       perror_with_name ("ptrace");
03292     }
03293 }
03294 
03295 struct thread_resume_array
03296 {
03297   struct thread_resume *resume;
03298   size_t n;
03299 };
03300 
03301 /* This function is called once per thread.  We look up the thread
03302    in RESUME_PTR, and mark the thread with a pointer to the appropriate
03303    resume request.
03304 
03305    This algorithm is O(threads * resume elements), but resume elements
03306    is small (and will remain small at least until GDB supports thread
03307    suspension).  */
03308 static int
03309 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
03310 {
03311   struct lwp_info *lwp;
03312   struct thread_info *thread;
03313   int ndx;
03314   struct thread_resume_array *r;
03315 
03316   thread = (struct thread_info *) entry;
03317   lwp = get_thread_lwp (thread);
03318   r = arg;
03319 
03320   for (ndx = 0; ndx < r->n; ndx++)
03321     {
03322       ptid_t ptid = r->resume[ndx].thread;
03323       if (ptid_equal (ptid, minus_one_ptid)
03324           || ptid_equal (ptid, entry->id)
03325           /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
03326              of PID'.  */
03327           || (ptid_get_pid (ptid) == pid_of (lwp)
03328               && (ptid_is_pid (ptid)
03329                   || ptid_get_lwp (ptid) == -1)))
03330         {
03331           if (r->resume[ndx].kind == resume_stop
03332               && thread->last_resume_kind == resume_stop)
03333             {
03334               if (debug_threads)
03335                 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
03336                          thread->last_status.kind == TARGET_WAITKIND_STOPPED
03337                          ? "stopped"
03338                          : "stopping",
03339                          lwpid_of (lwp));
03340 
03341               continue;
03342             }
03343 
03344           lwp->resume = &r->resume[ndx];
03345           thread->last_resume_kind = lwp->resume->kind;
03346 
03347           lwp->step_range_start = lwp->resume->step_range_start;
03348           lwp->step_range_end = lwp->resume->step_range_end;
03349 
03350           /* If we had a deferred signal to report, dequeue one now.
03351              This can happen if LWP gets more than one signal while
03352              trying to get out of a jump pad.  */
03353           if (lwp->stopped
03354               && !lwp->status_pending_p
03355               && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
03356             {
03357               lwp->status_pending_p = 1;
03358 
03359               if (debug_threads)
03360                 fprintf (stderr,
03361                          "Dequeueing deferred signal %d for LWP %ld, "
03362                          "leaving status pending.\n",
03363                          WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
03364             }
03365 
03366           return 0;
03367         }
03368     }
03369 
03370   /* No resume action for this thread.  */
03371   lwp->resume = NULL;
03372 
03373   return 0;
03374 }
03375 
03376 
03377 /* Set *FLAG_P if this lwp has an interesting status pending.  */
03378 static int
03379 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
03380 {
03381   struct lwp_info *lwp = (struct lwp_info *) entry;
03382 
03383   /* LWPs which will not be resumed are not interesting, because
03384      we might not wait for them next time through linux_wait.  */
03385   if (lwp->resume == NULL)
03386     return 0;
03387 
03388   if (lwp->status_pending_p)
03389     * (int *) flag_p = 1;
03390 
03391   return 0;
03392 }
03393 
03394 /* Return 1 if this lwp that GDB wants running is stopped at an
03395    internal breakpoint that we need to step over.  It assumes that any
03396    required STOP_PC adjustment has already been propagated to the
03397    inferior's regcache.  */
03398 
03399 static int
03400 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
03401 {
03402   struct lwp_info *lwp = (struct lwp_info *) entry;
03403   struct thread_info *thread;
03404   struct thread_info *saved_inferior;
03405   CORE_ADDR pc;
03406 
03407   /* LWPs which will not be resumed are not interesting, because we
03408      might not wait for them next time through linux_wait.  */
03409 
03410   if (!lwp->stopped)
03411     {
03412       if (debug_threads)
03413         fprintf (stderr,
03414                  "Need step over [LWP %ld]? Ignoring, not stopped\n",
03415                  lwpid_of (lwp));
03416       return 0;
03417     }
03418 
03419   thread = get_lwp_thread (lwp);
03420 
03421   if (thread->last_resume_kind == resume_stop)
03422     {
03423       if (debug_threads)
03424         fprintf (stderr,
03425                  "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
03426                  lwpid_of (lwp));
03427       return 0;
03428     }
03429 
03430   gdb_assert (lwp->suspended >= 0);
03431 
03432   if (lwp->suspended)
03433     {
03434       if (debug_threads)
03435         fprintf (stderr,
03436                  "Need step over [LWP %ld]? Ignoring, suspended\n",
03437                  lwpid_of (lwp));
03438       return 0;
03439     }
03440 
03441   if (!lwp->need_step_over)
03442     {
03443       if (debug_threads)
03444         fprintf (stderr,
03445                  "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
03446     }
03447 
03448   if (lwp->status_pending_p)
03449     {
03450       if (debug_threads)
03451         fprintf (stderr,
03452                  "Need step over [LWP %ld]? Ignoring, has pending status.\n",
03453                  lwpid_of (lwp));
03454       return 0;
03455     }
03456 
03457   /* Note: PC, not STOP_PC.  Either GDB has adjusted the PC already,
03458      or we have.  */
03459   pc = get_pc (lwp);
03460 
03461   /* If the PC has changed since we stopped, then don't do anything,
03462      and let the breakpoint/tracepoint be hit.  This happens if, for
03463      instance, GDB handled the decr_pc_after_break subtraction itself,
03464      GDB is OOL stepping this thread, or the user has issued a "jump"
03465      command, or poked thread's registers herself.  */
03466   if (pc != lwp->stop_pc)
03467     {
03468       if (debug_threads)
03469         fprintf (stderr,
03470                  "Need step over [LWP %ld]? Cancelling, PC was changed.  "
03471                  "Old stop_pc was 0x%s, PC is now 0x%s\n",
03472                  lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
03473 
03474       lwp->need_step_over = 0;
03475       return 0;
03476     }
03477 
03478   saved_inferior = current_inferior;
03479   current_inferior = thread;
03480 
03481   /* We can only step over breakpoints we know about.  */
03482   if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
03483     {
03484       /* Don't step over a breakpoint that GDB expects to hit
03485          though.  If the condition is being evaluated on the target's side
03486          and it evaluate to false, step over this breakpoint as well.  */
03487       if (gdb_breakpoint_here (pc)
03488           && gdb_condition_true_at_breakpoint (pc)
03489           && gdb_no_commands_at_breakpoint (pc))
03490         {
03491           if (debug_threads)
03492             fprintf (stderr,
03493                      "Need step over [LWP %ld]? yes, but found"
03494                      " GDB breakpoint at 0x%s; skipping step over\n",
03495                      lwpid_of (lwp), paddress (pc));
03496 
03497           current_inferior = saved_inferior;
03498           return 0;
03499         }
03500       else
03501         {
03502           if (debug_threads)
03503             fprintf (stderr,
03504                      "Need step over [LWP %ld]? yes, "
03505                      "found breakpoint at 0x%s\n",
03506                      lwpid_of (lwp), paddress (pc));
03507 
03508           /* We've found an lwp that needs stepping over --- return 1 so
03509              that find_inferior stops looking.  */
03510           current_inferior = saved_inferior;
03511 
03512           /* If the step over is cancelled, this is set again.  */
03513           lwp->need_step_over = 0;
03514           return 1;
03515         }
03516     }
03517 
03518   current_inferior = saved_inferior;
03519 
03520   if (debug_threads)
03521     fprintf (stderr,
03522              "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
03523              lwpid_of (lwp), paddress (pc));
03524 
03525   return 0;
03526 }
03527 
03528 /* Start a step-over operation on LWP.  When LWP stopped at a
03529    breakpoint, to make progress, we need to remove the breakpoint out
03530    of the way.  If we let other threads run while we do that, they may
03531    pass by the breakpoint location and miss hitting it.  To avoid
03532    that, a step-over momentarily stops all threads while LWP is
03533    single-stepped while the breakpoint is temporarily uninserted from
03534    the inferior.  When the single-step finishes, we reinsert the
03535    breakpoint, and let all threads that are supposed to be running,
03536    run again.
03537 
03538    On targets that don't support hardware single-step, we don't
03539    currently support full software single-stepping.  Instead, we only
03540    support stepping over the thread event breakpoint, by asking the
03541    low target where to place a reinsert breakpoint.  Since this
03542    routine assumes the breakpoint being stepped over is a thread event
03543    breakpoint, it usually assumes the return address of the current
03544    function is a good enough place to set the reinsert breakpoint.  */
03545 
03546 static int
03547 start_step_over (struct lwp_info *lwp)
03548 {
03549   struct thread_info *saved_inferior;
03550   CORE_ADDR pc;
03551   int step;
03552 
03553   if (debug_threads)
03554     fprintf (stderr,
03555              "Starting step-over on LWP %ld.  Stopping all threads\n",
03556              lwpid_of (lwp));
03557 
03558   stop_all_lwps (1, lwp);
03559   gdb_assert (lwp->suspended == 0);
03560 
03561   if (debug_threads)
03562     fprintf (stderr, "Done stopping all threads for step-over.\n");
03563 
03564   /* Note, we should always reach here with an already adjusted PC,
03565      either by GDB (if we're resuming due to GDB's request), or by our
03566      caller, if we just finished handling an internal breakpoint GDB
03567      shouldn't care about.  */
03568   pc = get_pc (lwp);
03569 
03570   saved_inferior = current_inferior;
03571   current_inferior = get_lwp_thread (lwp);
03572 
03573   lwp->bp_reinsert = pc;
03574   uninsert_breakpoints_at (pc);
03575   uninsert_fast_tracepoint_jumps_at (pc);
03576 
03577   if (can_hardware_single_step ())
03578     {
03579       step = 1;
03580     }
03581   else
03582     {
03583       CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
03584       set_reinsert_breakpoint (raddr);
03585       step = 0;
03586     }
03587 
03588   current_inferior = saved_inferior;
03589 
03590   linux_resume_one_lwp (lwp, step, 0, NULL);
03591 
03592   /* Require next event from this LWP.  */
03593   step_over_bkpt = lwp->head.id;
03594   return 1;
03595 }
03596 
03597 /* Finish a step-over.  Reinsert the breakpoint we had uninserted in
03598    start_step_over, if still there, and delete any reinsert
03599    breakpoints we've set, on non hardware single-step targets.  */
03600 
03601 static int
03602 finish_step_over (struct lwp_info *lwp)
03603 {
03604   if (lwp->bp_reinsert != 0)
03605     {
03606       if (debug_threads)
03607         fprintf (stderr, "Finished step over.\n");
03608 
03609       /* Reinsert any breakpoint at LWP->BP_REINSERT.  Note that there
03610          may be no breakpoint to reinsert there by now.  */
03611       reinsert_breakpoints_at (lwp->bp_reinsert);
03612       reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
03613 
03614       lwp->bp_reinsert = 0;
03615 
03616       /* Delete any software-single-step reinsert breakpoints.  No
03617          longer needed.  We don't have to worry about other threads
03618          hitting this trap, and later not being able to explain it,
03619          because we were stepping over a breakpoint, and we hold all
03620          threads but LWP stopped while doing that.  */
03621       if (!can_hardware_single_step ())
03622         delete_reinsert_breakpoints ();
03623 
03624       step_over_bkpt = null_ptid;
03625       return 1;
03626     }
03627   else
03628     return 0;
03629 }
03630 
03631 /* This function is called once per thread.  We check the thread's resume
03632    request, which will tell us whether to resume, step, or leave the thread
03633    stopped; and what signal, if any, it should be sent.
03634 
03635    For threads which we aren't explicitly told otherwise, we preserve
03636    the stepping flag; this is used for stepping over gdbserver-placed
03637    breakpoints.
03638 
03639    If pending_flags was set in any thread, we queue any needed
03640    signals, since we won't actually resume.  We already have a pending
03641    event to report, so we don't need to preserve any step requests;
03642    they should be re-issued if necessary.  */
03643 
03644 static int
03645 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
03646 {
03647   struct lwp_info *lwp;
03648   struct thread_info *thread;
03649   int step;
03650   int leave_all_stopped = * (int *) arg;
03651   int leave_pending;
03652 
03653   thread = (struct thread_info *) entry;
03654   lwp = get_thread_lwp (thread);
03655 
03656   if (lwp->resume == NULL)
03657     return 0;
03658 
03659   if (lwp->resume->kind == resume_stop)
03660     {
03661       if (debug_threads)
03662         fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
03663 
03664       if (!lwp->stopped)
03665         {
03666           if (debug_threads)
03667             fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
03668 
03669           /* Stop the thread, and wait for the event asynchronously,
03670              through the event loop.  */
03671           send_sigstop (lwp);
03672         }
03673       else
03674         {
03675           if (debug_threads)
03676             fprintf (stderr, "already stopped LWP %ld\n",
03677                      lwpid_of (lwp));
03678 
03679           /* The LWP may have been stopped in an internal event that
03680              was not meant to be notified back to GDB (e.g., gdbserver
03681              breakpoint), so we should be reporting a stop event in
03682              this case too.  */
03683 
03684           /* If the thread already has a pending SIGSTOP, this is a
03685              no-op.  Otherwise, something later will presumably resume
03686              the thread and this will cause it to cancel any pending
03687              operation, due to last_resume_kind == resume_stop.  If
03688              the thread already has a pending status to report, we
03689              will still report it the next time we wait - see
03690              status_pending_p_callback.  */
03691 
03692           /* If we already have a pending signal to report, then
03693              there's no need to queue a SIGSTOP, as this means we're
03694              midway through moving the LWP out of the jumppad, and we
03695              will report the pending signal as soon as that is
03696              finished.  */
03697           if (lwp->pending_signals_to_report == NULL)
03698             send_sigstop (lwp);
03699         }
03700 
03701       /* For stop requests, we're done.  */
03702       lwp->resume = NULL;
03703       thread->last_status.kind = TARGET_WAITKIND_IGNORE;
03704       return 0;
03705     }
03706 
03707   /* If this thread which is about to be resumed has a pending status,
03708      then don't resume any threads - we can just report the pending
03709      status.  Make sure to queue any signals that would otherwise be
03710      sent.  In all-stop mode, we do this decision based on if *any*
03711      thread has a pending status.  If there's a thread that needs the
03712      step-over-breakpoint dance, then don't resume any other thread
03713      but that particular one.  */
03714   leave_pending = (lwp->status_pending_p || leave_all_stopped);
03715 
03716   if (!leave_pending)
03717     {
03718       if (debug_threads)
03719         fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
03720 
03721       step = (lwp->resume->kind == resume_step);
03722       linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
03723     }
03724   else
03725     {
03726       if (debug_threads)
03727         fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
03728 
03729       /* If we have a new signal, enqueue the signal.  */
03730       if (lwp->resume->sig != 0)
03731         {
03732           struct pending_signals *p_sig;
03733           p_sig = xmalloc (sizeof (*p_sig));
03734           p_sig->prev = lwp->pending_signals;
03735           p_sig->signal = lwp->resume->sig;
03736           memset (&p_sig->info, 0, sizeof (siginfo_t));
03737 
03738           /* If this is the same signal we were previously stopped by,
03739              make sure to queue its siginfo.  We can ignore the return
03740              value of ptrace; if it fails, we'll skip
03741              PTRACE_SETSIGINFO.  */
03742           if (WIFSTOPPED (lwp->last_status)
03743               && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
03744             ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
03745                     &p_sig->info);
03746 
03747           lwp->pending_signals = p_sig;
03748         }
03749     }
03750 
03751   thread->last_status.kind = TARGET_WAITKIND_IGNORE;
03752   lwp->resume = NULL;
03753   return 0;
03754 }
03755 
03756 static void
03757 linux_resume (struct thread_resume *resume_info, size_t n)
03758 {
03759   struct thread_resume_array array = { resume_info, n };
03760   struct lwp_info *need_step_over = NULL;
03761   int any_pending;
03762   int leave_all_stopped;
03763 
03764   find_inferior (&all_threads, linux_set_resume_request, &array);
03765 
03766   /* If there is a thread which would otherwise be resumed, which has
03767      a pending status, then don't resume any threads - we can just
03768      report the pending status.  Make sure to queue any signals that
03769      would otherwise be sent.  In non-stop mode, we'll apply this
03770      logic to each thread individually.  We consume all pending events
03771      before considering to start a step-over (in all-stop).  */
03772   any_pending = 0;
03773   if (!non_stop)
03774     find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
03775 
03776   /* If there is a thread which would otherwise be resumed, which is
03777      stopped at a breakpoint that needs stepping over, then don't
03778      resume any threads - have it step over the breakpoint with all
03779      other threads stopped, then resume all threads again.  Make sure
03780      to queue any signals that would otherwise be delivered or
03781      queued.  */
03782   if (!any_pending && supports_breakpoints ())
03783     need_step_over
03784       = (struct lwp_info *) find_inferior (&all_lwps,
03785                                            need_step_over_p, NULL);
03786 
03787   leave_all_stopped = (need_step_over != NULL || any_pending);
03788 
03789   if (debug_threads)
03790     {
03791       if (need_step_over != NULL)
03792         fprintf (stderr, "Not resuming all, need step over\n");
03793       else if (any_pending)
03794         fprintf (stderr,
03795                  "Not resuming, all-stop and found "
03796                  "an LWP with pending status\n");
03797       else
03798         fprintf (stderr, "Resuming, no pending status or step over needed\n");
03799     }
03800 
03801   /* Even if we're leaving threads stopped, queue all signals we'd
03802      otherwise deliver.  */
03803   find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
03804 
03805   if (need_step_over)
03806     start_step_over (need_step_over);
03807 }
03808 
03809 /* This function is called once per thread.  We check the thread's
03810    last resume request, which will tell us whether to resume, step, or
03811    leave the thread stopped.  Any signal the client requested to be
03812    delivered has already been enqueued at this point.
03813 
03814    If any thread that GDB wants running is stopped at an internal
03815    breakpoint that needs stepping over, we start a step-over operation
03816    on that particular thread, and leave all others stopped.  */
03817 
03818 static int
03819 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
03820 {
03821   struct lwp_info *lwp = (struct lwp_info *) entry;
03822   struct thread_info *thread;
03823   int step;
03824 
03825   if (lwp == except)
03826     return 0;
03827 
03828   if (debug_threads)
03829     fprintf (stderr,
03830              "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
03831 
03832   if (!lwp->stopped)
03833     {
03834       if (debug_threads)
03835         fprintf (stderr, "   LWP %ld already running\n", lwpid_of (lwp));
03836       return 0;
03837     }
03838 
03839   thread = get_lwp_thread (lwp);
03840 
03841   if (thread->last_resume_kind == resume_stop
03842       && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
03843     {
03844       if (debug_threads)
03845         fprintf (stderr, "   client wants LWP to remain %ld stopped\n",
03846                  lwpid_of (lwp));
03847       return 0;
03848     }
03849 
03850   if (lwp->status_pending_p)
03851     {
03852       if (debug_threads)
03853         fprintf (stderr, "   LWP %ld has pending status, leaving stopped\n",
03854                  lwpid_of (lwp));
03855       return 0;
03856     }
03857 
03858   gdb_assert (lwp->suspended >= 0);
03859 
03860   if (lwp->suspended)
03861     {
03862       if (debug_threads)
03863         fprintf (stderr, "   LWP %ld is suspended\n", lwpid_of (lwp));
03864       return 0;
03865     }
03866 
03867   if (thread->last_resume_kind == resume_stop
03868       && lwp->pending_signals_to_report == NULL
03869       && lwp->collecting_fast_tracepoint == 0)
03870     {
03871       /* We haven't reported this LWP as stopped yet (otherwise, the
03872          last_status.kind check above would catch it, and we wouldn't
03873          reach here.  This LWP may have been momentarily paused by a
03874          stop_all_lwps call while handling for example, another LWP's
03875          step-over.  In that case, the pending expected SIGSTOP signal
03876          that was queued at vCont;t handling time will have already
03877          been consumed by wait_for_sigstop, and so we need to requeue
03878          another one here.  Note that if the LWP already has a SIGSTOP
03879          pending, this is a no-op.  */
03880 
03881       if (debug_threads)
03882         fprintf (stderr,
03883                  "Client wants LWP %ld to stop. "
03884                  "Making sure it has a SIGSTOP pending\n",
03885                  lwpid_of (lwp));
03886 
03887       send_sigstop (lwp);
03888     }
03889 
03890   step = thread->last_resume_kind == resume_step;
03891   linux_resume_one_lwp (lwp, step, 0, NULL);
03892   return 0;
03893 }
03894 
03895 static int
03896 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
03897 {
03898   struct lwp_info *lwp = (struct lwp_info *) entry;
03899 
03900   if (lwp == except)
03901     return 0;
03902 
03903   lwp->suspended--;
03904   gdb_assert (lwp->suspended >= 0);
03905 
03906   return proceed_one_lwp (entry, except);
03907 }
03908 
03909 /* When we finish a step-over, set threads running again.  If there's
03910    another thread that may need a step-over, now's the time to start
03911    it.  Eventually, we'll move all threads past their breakpoints.  */
03912 
03913 static void
03914 proceed_all_lwps (void)
03915 {
03916   struct lwp_info *need_step_over;
03917 
03918   /* If there is a thread which would otherwise be resumed, which is
03919      stopped at a breakpoint that needs stepping over, then don't
03920      resume any threads - have it step over the breakpoint with all
03921      other threads stopped, then resume all threads again.  */
03922 
03923   if (supports_breakpoints ())
03924     {
03925       need_step_over
03926         = (struct lwp_info *) find_inferior (&all_lwps,
03927                                              need_step_over_p, NULL);
03928 
03929       if (need_step_over != NULL)
03930         {
03931           if (debug_threads)
03932             fprintf (stderr, "proceed_all_lwps: found "
03933                      "thread %ld needing a step-over\n",
03934                      lwpid_of (need_step_over));
03935 
03936           start_step_over (need_step_over);
03937           return;
03938         }
03939     }
03940 
03941   if (debug_threads)
03942     fprintf (stderr, "Proceeding, no step-over needed\n");
03943 
03944   find_inferior (&all_lwps, proceed_one_lwp, NULL);
03945 }
03946 
03947 /* Stopped LWPs that the client wanted to be running, that don't have
03948    pending statuses, are set to run again, except for EXCEPT, if not
03949    NULL.  This undoes a stop_all_lwps call.  */
03950 
03951 static void
03952 unstop_all_lwps (int unsuspend, struct lwp_info *except)
03953 {
03954   if (debug_threads)
03955     {
03956       if (except)
03957         fprintf (stderr,
03958                  "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
03959       else
03960         fprintf (stderr,
03961                  "unstopping all lwps\n");
03962     }
03963 
03964   if (unsuspend)
03965     find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
03966   else
03967     find_inferior (&all_lwps, proceed_one_lwp, except);
03968 }
03969 
03970 
03971 #ifdef HAVE_LINUX_REGSETS
03972 
03973 #define use_linux_regsets 1
03974 
03975 /* Returns true if REGSET has been disabled.  */
03976 
03977 static int
03978 regset_disabled (struct regsets_info *info, struct regset_info *regset)
03979 {
03980   return (info->disabled_regsets != NULL
03981           && info->disabled_regsets[regset - info->regsets]);
03982 }
03983 
03984 /* Disable REGSET.  */
03985 
03986 static void
03987 disable_regset (struct regsets_info *info, struct regset_info *regset)
03988 {
03989   int dr_offset;
03990 
03991   dr_offset = regset - info->regsets;
03992   if (info->disabled_regsets == NULL)
03993     info->disabled_regsets = xcalloc (1, info->num_regsets);
03994   info->disabled_regsets[dr_offset] = 1;
03995 }
03996 
03997 static int
03998 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
03999                                   struct regcache *regcache)
04000 {
04001   struct regset_info *regset;
04002   int saw_general_regs = 0;
04003   int pid;
04004   struct iovec iov;
04005 
04006   regset = regsets_info->regsets;
04007 
04008   pid = lwpid_of (get_thread_lwp (current_inferior));
04009   while (regset->size >= 0)
04010     {
04011       void *buf, *data;
04012       int nt_type, res;
04013 
04014       if (regset->size == 0 || regset_disabled (regsets_info, regset))
04015         {
04016           regset ++;
04017           continue;
04018         }
04019 
04020       buf = xmalloc (regset->size);
04021 
04022       nt_type = regset->nt_type;
04023       if (nt_type)
04024         {
04025           iov.iov_base = buf;
04026           iov.iov_len = regset->size;
04027           data = (void *) &iov;
04028         }
04029       else
04030         data = buf;
04031 
04032 #ifndef __sparc__
04033       res = ptrace (regset->get_request, pid,
04034                     (PTRACE_TYPE_ARG3) (long) nt_type, data);
04035 #else
04036       res = ptrace (regset->get_request, pid, data, nt_type);
04037 #endif
04038       if (res < 0)
04039         {
04040           if (errno == EIO)
04041             {
04042               /* If we get EIO on a regset, do not try it again for
04043                  this process mode.  */
04044               disable_regset (regsets_info, regset);
04045               free (buf);
04046               continue;
04047             }
04048           else
04049             {
04050               char s[256];
04051               sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
04052                        pid);
04053               perror (s);
04054             }
04055         }
04056       else if (regset->type == GENERAL_REGS)
04057         saw_general_regs = 1;
04058       regset->store_function (regcache, buf);
04059       regset ++;
04060       free (buf);
04061     }
04062   if (saw_general_regs)
04063     return 0;
04064   else
04065     return 1;
04066 }
04067 
04068 static int
04069 regsets_store_inferior_registers (struct regsets_info *regsets_info,
04070                                   struct regcache *regcache)
04071 {
04072   struct regset_info *regset;
04073   int saw_general_regs = 0;
04074   int pid;
04075   struct iovec iov;
04076 
04077   regset = regsets_info->regsets;
04078 
04079   pid = lwpid_of (get_thread_lwp (current_inferior));
04080   while (regset->size >= 0)
04081     {
04082       void *buf, *data;
04083       int nt_type, res;
04084 
04085       if (regset->size == 0 || regset_disabled (regsets_info, regset))
04086         {
04087           regset ++;
04088           continue;
04089         }
04090 
04091       buf = xmalloc (regset->size);
04092 
04093       /* First fill the buffer with the current register set contents,
04094          in case there are any items in the kernel's regset that are
04095          not in gdbserver's regcache.  */
04096 
04097       nt_type = regset->nt_type;
04098       if (nt_type)
04099         {
04100           iov.iov_base = buf;
04101           iov.iov_len = regset->size;
04102           data = (void *) &iov;
04103         }
04104       else
04105         data = buf;
04106 
04107 #ifndef __sparc__
04108       res = ptrace (regset->get_request, pid,
04109                     (PTRACE_TYPE_ARG3) (long) nt_type, data);
04110 #else
04111       res = ptrace (regset->get_request, pid, data, nt_type);
04112 #endif
04113 
04114       if (res == 0)
04115         {
04116           /* Then overlay our cached registers on that.  */
04117           regset->fill_function (regcache, buf);
04118 
04119           /* Only now do we write the register set.  */
04120 #ifndef __sparc__
04121           res = ptrace (regset->set_request, pid,
04122                         (PTRACE_TYPE_ARG3) (long) nt_type, data);
04123 #else
04124           res = ptrace (regset->set_request, pid, data, nt_type);
04125 #endif
04126         }
04127 
04128       if (res < 0)
04129         {
04130           if (errno == EIO)
04131             {
04132               /* If we get EIO on a regset, do not try it again for
04133                  this process mode.  */
04134               disable_regset (regsets_info, regset);
04135               free (buf);
04136               continue;
04137             }
04138           else if (errno == ESRCH)
04139             {
04140               /* At this point, ESRCH should mean the process is
04141                  already gone, in which case we simply ignore attempts
04142                  to change its registers.  See also the related
04143                  comment in linux_resume_one_lwp.  */
04144               free (buf);
04145               return 0;
04146             }
04147           else
04148             {
04149               perror ("Warning: ptrace(regsets_store_inferior_registers)");
04150             }
04151         }
04152       else if (regset->type == GENERAL_REGS)
04153         saw_general_regs = 1;
04154       regset ++;
04155       free (buf);
04156     }
04157   if (saw_general_regs)
04158     return 0;
04159   else
04160     return 1;
04161 }
04162 
04163 #else /* !HAVE_LINUX_REGSETS */
04164 
04165 #define use_linux_regsets 0
04166 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
04167 #define regsets_store_inferior_registers(regsets_info, regcache) 1
04168 
04169 #endif
04170 
04171 /* Return 1 if register REGNO is supported by one of the regset ptrace
04172    calls or 0 if it has to be transferred individually.  */
04173 
04174 static int
04175 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
04176 {
04177   unsigned char mask = 1 << (regno % 8);
04178   size_t index = regno / 8;
04179 
04180   return (use_linux_regsets
04181           && (regs_info->regset_bitmap == NULL
04182               || (regs_info->regset_bitmap[index] & mask) != 0));
04183 }
04184 
04185 #ifdef HAVE_LINUX_USRREGS
04186 
04187 int
04188 register_addr (const struct usrregs_info *usrregs, int regnum)
04189 {
04190   int addr;
04191 
04192   if (regnum < 0 || regnum >= usrregs->num_regs)
04193     error ("Invalid register number %d.", regnum);
04194 
04195   addr = usrregs->regmap[regnum];
04196 
04197   return addr;
04198 }
04199 
04200 /* Fetch one register.  */
04201 static void
04202 fetch_register (const struct usrregs_info *usrregs,
04203                 struct regcache *regcache, int regno)
04204 {
04205   CORE_ADDR regaddr;
04206   int i, size;
04207   char *buf;
04208   int pid;
04209 
04210   if (regno >= usrregs->num_regs)
04211     return;
04212   if ((*the_low_target.cannot_fetch_register) (regno))
04213     return;
04214 
04215   regaddr = register_addr (usrregs, regno);
04216   if (regaddr == -1)
04217     return;
04218 
04219   size = ((register_size (regcache->tdesc, regno)
04220            + sizeof (PTRACE_XFER_TYPE) - 1)
04221           & -sizeof (PTRACE_XFER_TYPE));
04222   buf = alloca (size);
04223 
04224   pid = lwpid_of (get_thread_lwp (current_inferior));
04225   for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
04226     {
04227       errno = 0;
04228       *(PTRACE_XFER_TYPE *) (buf + i) =
04229         ptrace (PTRACE_PEEKUSER, pid,
04230                 /* Coerce to a uintptr_t first to avoid potential gcc warning
04231                    of coercing an 8 byte integer to a 4 byte pointer.  */
04232                 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
04233       regaddr += sizeof (PTRACE_XFER_TYPE);
04234       if (errno != 0)
04235         error ("reading register %d: %s", regno, strerror (errno));
04236     }
04237 
04238   if (the_low_target.supply_ptrace_register)
04239     the_low_target.supply_ptrace_register (regcache, regno, buf);
04240   else
04241     supply_register (regcache, regno, buf);
04242 }
04243 
04244 /* Store one register.  */
04245 static void
04246 store_register (const struct usrregs_info *usrregs,
04247                 struct regcache *regcache, int regno)
04248 {
04249   CORE_ADDR regaddr;
04250   int i, size;
04251   char *buf;
04252   int pid;
04253 
04254   if (regno >= usrregs->num_regs)
04255     return;
04256   if ((*the_low_target.cannot_store_register) (regno))
04257     return;
04258 
04259   regaddr = register_addr (usrregs, regno);
04260   if (regaddr == -1)
04261     return;
04262 
04263   size = ((register_size (regcache->tdesc, regno)
04264            + sizeof (PTRACE_XFER_TYPE) - 1)
04265           & -sizeof (PTRACE_XFER_TYPE));
04266   buf = alloca (size);
04267   memset (buf, 0, size);
04268 
04269   if (the_low_target.collect_ptrace_register)
04270     the_low_target.collect_ptrace_register (regcache, regno, buf);
04271   else
04272     collect_register (regcache, regno, buf);
04273 
04274   pid = lwpid_of (get_thread_lwp (current_inferior));
04275   for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
04276     {
04277       errno = 0;
04278       ptrace (PTRACE_POKEUSER, pid,
04279             /* Coerce to a uintptr_t first to avoid potential gcc warning
04280                about coercing an 8 byte integer to a 4 byte pointer.  */
04281               (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
04282               (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
04283       if (errno != 0)
04284         {
04285           /* At this point, ESRCH should mean the process is
04286              already gone, in which case we simply ignore attempts
04287              to change its registers.  See also the related
04288              comment in linux_resume_one_lwp.  */
04289           if (errno == ESRCH)
04290             return;
04291 
04292           if ((*the_low_target.cannot_store_register) (regno) == 0)
04293             error ("writing register %d: %s", regno, strerror (errno));
04294         }
04295       regaddr += sizeof (PTRACE_XFER_TYPE);
04296     }
04297 }
04298 
04299 /* Fetch all registers, or just one, from the child process.
04300    If REGNO is -1, do this for all registers, skipping any that are
04301    assumed to have been retrieved by regsets_fetch_inferior_registers,
04302    unless ALL is non-zero.
04303    Otherwise, REGNO specifies which register (so we can save time).  */
04304 static void
04305 usr_fetch_inferior_registers (const struct regs_info *regs_info,
04306                               struct regcache *regcache, int regno, int all)
04307 {
04308   struct usrregs_info *usr = regs_info->usrregs;
04309 
04310   if (regno == -1)
04311     {
04312       for (regno = 0; regno < usr->num_regs; regno++)
04313         if (all || !linux_register_in_regsets (regs_info, regno))
04314           fetch_register (usr, regcache, regno);
04315     }
04316   else
04317     fetch_register (usr, regcache, regno);
04318 }
04319 
04320 /* Store our register values back into the inferior.
04321    If REGNO is -1, do this for all registers, skipping any that are
04322    assumed to have been saved by regsets_store_inferior_registers,
04323    unless ALL is non-zero.
04324    Otherwise, REGNO specifies which register (so we can save time).  */
04325 static void
04326 usr_store_inferior_registers (const struct regs_info *regs_info,
04327                               struct regcache *regcache, int regno, int all)
04328 {
04329   struct usrregs_info *usr = regs_info->usrregs;
04330 
04331   if (regno == -1)
04332     {
04333       for (regno = 0; regno < usr->num_regs; regno++)
04334         if (all || !linux_register_in_regsets (regs_info, regno))
04335           store_register (usr, regcache, regno);
04336     }
04337   else
04338     store_register (usr, regcache, regno);
04339 }
04340 
04341 #else /* !HAVE_LINUX_USRREGS */
04342 
04343 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
04344 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
04345 
04346 #endif
04347 
04348 
04349 void
04350 linux_fetch_registers (struct regcache *regcache, int regno)
04351 {
04352   int use_regsets;
04353   int all = 0;
04354   const struct regs_info *regs_info = (*the_low_target.regs_info) ();
04355 
04356   if (regno == -1)
04357     {
04358       if (the_low_target.fetch_register != NULL
04359           && regs_info->usrregs != NULL)
04360         for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
04361           (*the_low_target.fetch_register) (regcache, regno);
04362 
04363       all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
04364       if (regs_info->usrregs != NULL)
04365         usr_fetch_inferior_registers (regs_info, regcache, -1, all);
04366     }
04367   else
04368     {
04369       if (the_low_target.fetch_register != NULL
04370           && (*the_low_target.fetch_register) (regcache, regno))
04371         return;
04372 
04373       use_regsets = linux_register_in_regsets (regs_info, regno);
04374       if (use_regsets)
04375         all = regsets_fetch_inferior_registers (regs_info->regsets_info,
04376                                                 regcache);
04377       if ((!use_regsets || all) && regs_info->usrregs != NULL)
04378         usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
04379     }
04380 }
04381 
04382 void
04383 linux_store_registers (struct regcache *regcache, int regno)
04384 {
04385   int use_regsets;
04386   int all = 0;
04387   const struct regs_info *regs_info = (*the_low_target.regs_info) ();
04388 
04389   if (regno == -1)
04390     {
04391       all = regsets_store_inferior_registers (regs_info->regsets_info,
04392                                               regcache);
04393       if (regs_info->usrregs != NULL)
04394         usr_store_inferior_registers (regs_info, regcache, regno, all);
04395     }
04396   else
04397     {
04398       use_regsets = linux_register_in_regsets (regs_info, regno);
04399       if (use_regsets)
04400         all = regsets_store_inferior_registers (regs_info->regsets_info,
04401                                                 regcache);
04402       if ((!use_regsets || all) && regs_info->usrregs != NULL)
04403         usr_store_inferior_registers (regs_info, regcache, regno, 1);
04404     }
04405 }
04406 
04407 
04408 /* Copy LEN bytes from inferior's memory starting at MEMADDR
04409    to debugger memory starting at MYADDR.  */
04410 
04411 static int
04412 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
04413 {
04414   int pid = lwpid_of (get_thread_lwp (current_inferior));
04415   register PTRACE_XFER_TYPE *buffer;
04416   register CORE_ADDR addr;
04417   register int count;
04418   char filename[64];
04419   register int i;
04420   int ret;
04421   int fd;
04422 
04423   /* Try using /proc.  Don't bother for one word.  */
04424   if (len >= 3 * sizeof (long))
04425     {
04426       int bytes;
04427 
04428       /* We could keep this file open and cache it - possibly one per
04429          thread.  That requires some juggling, but is even faster.  */
04430       sprintf (filename, "/proc/%d/mem", pid);
04431       fd = open (filename, O_RDONLY | O_LARGEFILE);
04432       if (fd == -1)
04433         goto no_proc;
04434 
04435       /* If pread64 is available, use it.  It's faster if the kernel
04436          supports it (only one syscall), and it's 64-bit safe even on
04437          32-bit platforms (for instance, SPARC debugging a SPARC64
04438          application).  */
04439 #ifdef HAVE_PREAD64
04440       bytes = pread64 (fd, myaddr, len, memaddr);
04441 #else
04442       bytes = -1;
04443       if (lseek (fd, memaddr, SEEK_SET) != -1)
04444         bytes = read (fd, myaddr, len);
04445 #endif
04446 
04447       close (fd);
04448       if (bytes == len)
04449         return 0;
04450 
04451       /* Some data was read, we'll try to get the rest with ptrace.  */
04452       if (bytes > 0)
04453         {
04454           memaddr += bytes;
04455           myaddr += bytes;
04456           len -= bytes;
04457         }
04458     }
04459 
04460  no_proc:
04461   /* Round starting address down to longword boundary.  */
04462   addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
04463   /* Round ending address up; get number of longwords that makes.  */
04464   count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
04465            / sizeof (PTRACE_XFER_TYPE));
04466   /* Allocate buffer of that many longwords.  */
04467   buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
04468 
04469   /* Read all the longwords */
04470   errno = 0;
04471   for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
04472     {
04473       /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
04474          about coercing an 8 byte integer to a 4 byte pointer.  */
04475       buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
04476                           (PTRACE_TYPE_ARG3) (uintptr_t) addr,
04477                           (PTRACE_TYPE_ARG4) 0);
04478       if (errno)
04479         break;
04480     }
04481   ret = errno;
04482 
04483   /* Copy appropriate bytes out of the buffer.  */
04484   if (i > 0)
04485     {
04486       i *= sizeof (PTRACE_XFER_TYPE);
04487       i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
04488       memcpy (myaddr,
04489               (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
04490               i < len ? i : len);
04491     }
04492 
04493   return ret;
04494 }
04495 
04496 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
04497    memory at MEMADDR.  On failure (cannot write to the inferior)
04498    returns the value of errno.  Always succeeds if LEN is zero.  */
04499 
04500 static int
04501 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
04502 {
04503   register int i;
04504   /* Round starting address down to longword boundary.  */
04505   register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
04506   /* Round ending address up; get number of longwords that makes.  */
04507   register int count
04508     = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
04509     / sizeof (PTRACE_XFER_TYPE);
04510 
04511   /* Allocate buffer of that many longwords.  */
04512   register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
04513     alloca (count * sizeof (PTRACE_XFER_TYPE));
04514 
04515   int pid = lwpid_of (get_thread_lwp (current_inferior));
04516 
04517   if (len == 0)
04518     {
04519       /* Zero length write always succeeds.  */
04520       return 0;
04521     }
04522 
04523   if (debug_threads)
04524     {
04525       /* Dump up to four bytes.  */
04526       unsigned int val = * (unsigned int *) myaddr;
04527       if (len == 1)
04528         val = val & 0xff;
04529       else if (len == 2)
04530         val = val & 0xffff;
04531       else if (len == 3)
04532         val = val & 0xffffff;
04533       fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
04534                val, (long)memaddr);
04535     }
04536 
04537   /* Fill start and end extra bytes of buffer with existing memory data.  */
04538 
04539   errno = 0;
04540   /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
04541      about coercing an 8 byte integer to a 4 byte pointer.  */
04542   buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
04543                       (PTRACE_TYPE_ARG3) (uintptr_t) addr,
04544                       (PTRACE_TYPE_ARG4) 0);
04545   if (errno)
04546     return errno;
04547 
04548   if (count > 1)
04549     {
04550       errno = 0;
04551       buffer[count - 1]
04552         = ptrace (PTRACE_PEEKTEXT, pid,
04553                   /* Coerce to a uintptr_t first to avoid potential gcc warning
04554                      about coercing an 8 byte integer to a 4 byte pointer.  */
04555                   (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
04556                                                   * sizeof (PTRACE_XFER_TYPE)),
04557                   (PTRACE_TYPE_ARG4) 0);
04558       if (errno)
04559         return errno;
04560     }
04561 
04562   /* Copy data to be written over corresponding part of buffer.  */
04563 
04564   memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
04565           myaddr, len);
04566 
04567   /* Write the entire buffer.  */
04568 
04569   for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
04570     {
04571       errno = 0;
04572       ptrace (PTRACE_POKETEXT, pid,
04573               /* Coerce to a uintptr_t first to avoid potential gcc warning
04574                  about coercing an 8 byte integer to a 4 byte pointer.  */
04575               (PTRACE_TYPE_ARG3) (uintptr_t) addr,
04576               (PTRACE_TYPE_ARG4) buffer[i]);
04577       if (errno)
04578         return errno;
04579     }
04580 
04581   return 0;
04582 }
04583 
04584 static void
04585 linux_look_up_symbols (void)
04586 {
04587 #ifdef USE_THREAD_DB
04588   struct process_info *proc = current_process ();
04589 
04590   if (proc->private->thread_db != NULL)
04591     return;
04592 
04593   /* If the kernel supports tracing clones, then we don't need to
04594      use the magic thread event breakpoint to learn about
04595      threads.  */
04596   thread_db_init (!linux_supports_traceclone ());
04597 #endif
04598 }
04599 
04600 static void
04601 linux_request_interrupt (void)
04602 {
04603   extern unsigned long signal_pid;
04604 
04605   if (!ptid_equal (cont_thread, null_ptid)
04606       && !ptid_equal (cont_thread, minus_one_ptid))
04607     {
04608       struct lwp_info *lwp;
04609       int lwpid;
04610 
04611       lwp = get_thread_lwp (current_inferior);
04612       lwpid = lwpid_of (lwp);
04613       kill_lwp (lwpid, SIGINT);
04614     }
04615   else
04616     kill_lwp (signal_pid, SIGINT);
04617 }
04618 
04619 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
04620    to debugger memory starting at MYADDR.  */
04621 
04622 static int
04623 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
04624 {
04625   char filename[PATH_MAX];
04626   int fd, n;
04627   int pid = lwpid_of (get_thread_lwp (current_inferior));
04628 
04629   xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
04630 
04631   fd = open (filename, O_RDONLY);
04632   if (fd < 0)
04633     return -1;
04634 
04635   if (offset != (CORE_ADDR) 0
04636       && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
04637     n = -1;
04638   else
04639     n = read (fd, myaddr, len);
04640 
04641   close (fd);
04642 
04643   return n;
04644 }
04645 
04646 /* These breakpoint and watchpoint related wrapper functions simply
04647    pass on the function call if the target has registered a
04648    corresponding function.  */
04649 
04650 static int
04651 linux_insert_point (char type, CORE_ADDR addr, int len)
04652 {
04653   if (the_low_target.insert_point != NULL)
04654     return the_low_target.insert_point (type, addr, len);
04655   else
04656     /* Unsupported (see target.h).  */
04657     return 1;
04658 }
04659 
04660 static int
04661 linux_remove_point (char type, CORE_ADDR addr, int len)
04662 {
04663   if (the_low_target.remove_point != NULL)
04664     return the_low_target.remove_point (type, addr, len);
04665   else
04666     /* Unsupported (see target.h).  */
04667     return 1;
04668 }
04669 
04670 static int
04671 linux_stopped_by_watchpoint (void)
04672 {
04673   struct lwp_info *lwp = get_thread_lwp (current_inferior);
04674 
04675   return lwp->stopped_by_watchpoint;
04676 }
04677 
04678 static CORE_ADDR
04679 linux_stopped_data_address (void)
04680 {
04681   struct lwp_info *lwp = get_thread_lwp (current_inferior);
04682 
04683   return lwp->stopped_data_address;
04684 }
04685 
04686 #if defined(__UCLIBC__) && defined(HAS_NOMMU)         \
04687     && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
04688     && defined(PT_TEXT_END_ADDR)
04689 
04690 /* This is only used for targets that define PT_TEXT_ADDR,
04691    PT_DATA_ADDR and PT_TEXT_END_ADDR.  If those are not defined, supposedly
04692    the target has different ways of acquiring this information, like
04693    loadmaps.  */
04694 
04695 /* Under uClinux, programs are loaded at non-zero offsets, which we need
04696    to tell gdb about.  */
04697 
04698 static int
04699 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
04700 {
04701   unsigned long text, text_end, data;
04702   int pid = lwpid_of (get_thread_lwp (current_inferior));
04703 
04704   errno = 0;
04705 
04706   text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
04707                  (PTRACE_TYPE_ARG4) 0);
04708   text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
04709                      (PTRACE_TYPE_ARG4) 0);
04710   data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
04711                  (PTRACE_TYPE_ARG4) 0);
04712 
04713   if (errno == 0)
04714     {
04715       /* Both text and data offsets produced at compile-time (and so
04716          used by gdb) are relative to the beginning of the program,
04717          with the data segment immediately following the text segment.
04718          However, the actual runtime layout in memory may put the data
04719          somewhere else, so when we send gdb a data base-address, we
04720          use the real data base address and subtract the compile-time
04721          data base-address from it (which is just the length of the
04722          text segment).  BSS immediately follows data in both
04723          cases.  */
04724       *text_p = text;
04725       *data_p = data - (text_end - text);
04726 
04727       return 1;
04728     }
04729  return 0;
04730 }
04731 #endif
04732 
04733 static int
04734 linux_qxfer_osdata (const char *annex,
04735                     unsigned char *readbuf, unsigned const char *writebuf,
04736                     CORE_ADDR offset, int len)
04737 {
04738   return linux_common_xfer_osdata (annex, readbuf, offset, len);
04739 }
04740 
04741 /* Convert a native/host siginfo object, into/from the siginfo in the
04742    layout of the inferiors' architecture.  */
04743 
04744 static void
04745 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
04746 {
04747   int done = 0;
04748 
04749   if (the_low_target.siginfo_fixup != NULL)
04750     done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
04751 
04752   /* If there was no callback, or the callback didn't do anything,
04753      then just do a straight memcpy.  */
04754   if (!done)
04755     {
04756       if (direction == 1)
04757         memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
04758       else
04759         memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
04760     }
04761 }
04762 
04763 static int
04764 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
04765                     unsigned const char *writebuf, CORE_ADDR offset, int len)
04766 {
04767   int pid;
04768   siginfo_t siginfo;
04769   char inf_siginfo[sizeof (siginfo_t)];
04770 
04771   if (current_inferior == NULL)
04772     return -1;
04773 
04774   pid = lwpid_of (get_thread_lwp (current_inferior));
04775 
04776   if (debug_threads)
04777     fprintf (stderr, "%s siginfo for lwp %d.\n",
04778              readbuf != NULL ? "Reading" : "Writing",
04779              pid);
04780 
04781   if (offset >= sizeof (siginfo))
04782     return -1;
04783 
04784   if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
04785     return -1;
04786 
04787   /* When GDBSERVER is built as a 64-bit application, ptrace writes into
04788      SIGINFO an object with 64-bit layout.  Since debugging a 32-bit
04789      inferior with a 64-bit GDBSERVER should look the same as debugging it
04790      with a 32-bit GDBSERVER, we need to convert it.  */
04791   siginfo_fixup (&siginfo, inf_siginfo, 0);
04792 
04793   if (offset + len > sizeof (siginfo))
04794     len = sizeof (siginfo) - offset;
04795 
04796   if (readbuf != NULL)
04797     memcpy (readbuf, inf_siginfo + offset, len);
04798   else
04799     {
04800       memcpy (inf_siginfo + offset, writebuf, len);
04801 
04802       /* Convert back to ptrace layout before flushing it out.  */
04803       siginfo_fixup (&siginfo, inf_siginfo, 1);
04804 
04805       if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
04806         return -1;
04807     }
04808 
04809   return len;
04810 }
04811 
04812 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
04813    so we notice when children change state; as the handler for the
04814    sigsuspend in my_waitpid.  */
04815 
04816 static void
04817 sigchld_handler (int signo)
04818 {
04819   int old_errno = errno;
04820 
04821   if (debug_threads)
04822     {
04823       do
04824         {
04825           /* fprintf is not async-signal-safe, so call write
04826              directly.  */
04827           if (write (2, "sigchld_handler\n",
04828                      sizeof ("sigchld_handler\n") - 1) < 0)
04829             break; /* just ignore */
04830         } while (0);
04831     }
04832 
04833   if (target_is_async_p ())
04834     async_file_mark (); /* trigger a linux_wait */
04835 
04836   errno = old_errno;
04837 }
04838 
04839 static int
04840 linux_supports_non_stop (void)
04841 {
04842   return 1;
04843 }
04844 
04845 static int
04846 linux_async (int enable)
04847 {
04848   int previous = (linux_event_pipe[0] != -1);
04849 
04850   if (debug_threads)
04851     fprintf (stderr, "linux_async (%d), previous=%d\n",
04852              enable, previous);
04853 
04854   if (previous != enable)
04855     {
04856       sigset_t mask;
04857       sigemptyset (&mask);
04858       sigaddset (&mask, SIGCHLD);
04859 
04860       sigprocmask (SIG_BLOCK, &mask, NULL);
04861 
04862       if (enable)
04863         {
04864           if (pipe (linux_event_pipe) == -1)
04865             fatal ("creating event pipe failed.");
04866 
04867           fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
04868           fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
04869 
04870           /* Register the event loop handler.  */
04871           add_file_handler (linux_event_pipe[0],
04872                             handle_target_event, NULL);
04873 
04874           /* Always trigger a linux_wait.  */
04875           async_file_mark ();
04876         }
04877       else
04878         {
04879           delete_file_handler (linux_event_pipe[0]);
04880 
04881           close (linux_event_pipe[0]);
04882           close (linux_event_pipe[1]);
04883           linux_event_pipe[0] = -1;
04884           linux_event_pipe[1] = -1;
04885         }
04886 
04887       sigprocmask (SIG_UNBLOCK, &mask, NULL);
04888     }
04889 
04890   return previous;
04891 }
04892 
04893 static int
04894 linux_start_non_stop (int nonstop)
04895 {
04896   /* Register or unregister from event-loop accordingly.  */
04897   linux_async (nonstop);
04898   return 0;
04899 }
04900 
04901 static int
04902 linux_supports_multi_process (void)
04903 {
04904   return 1;
04905 }
04906 
04907 static int
04908 linux_supports_disable_randomization (void)
04909 {
04910 #ifdef HAVE_PERSONALITY
04911   return 1;
04912 #else
04913   return 0;
04914 #endif
04915 }
04916 
04917 static int
04918 linux_supports_agent (void)
04919 {
04920   return 1;
04921 }
04922 
04923 static int
04924 linux_supports_range_stepping (void)
04925 {
04926   if (*the_low_target.supports_range_stepping == NULL)
04927     return 0;
04928 
04929   return (*the_low_target.supports_range_stepping) ();
04930 }
04931 
04932 /* Enumerate spufs IDs for process PID.  */
04933 static int
04934 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
04935 {
04936   int pos = 0;
04937   int written = 0;
04938   char path[128];
04939   DIR *dir;
04940   struct dirent *entry;
04941 
04942   sprintf (path, "/proc/%ld/fd", pid);
04943   dir = opendir (path);
04944   if (!dir)
04945     return -1;
04946 
04947   rewinddir (dir);
04948   while ((entry = readdir (dir)) != NULL)
04949     {
04950       struct stat st;
04951       struct statfs stfs;
04952       int fd;
04953 
04954       fd = atoi (entry->d_name);
04955       if (!fd)
04956         continue;
04957 
04958       sprintf (path, "/proc/%ld/fd/%d", pid, fd);
04959       if (stat (path, &st) != 0)
04960         continue;
04961       if (!S_ISDIR (st.st_mode))
04962         continue;
04963 
04964       if (statfs (path, &stfs) != 0)
04965         continue;
04966       if (stfs.f_type != SPUFS_MAGIC)
04967         continue;
04968 
04969       if (pos >= offset && pos + 4 <= offset + len)
04970         {
04971           *(unsigned int *)(buf + pos - offset) = fd;
04972           written += 4;
04973         }
04974       pos += 4;
04975     }
04976 
04977   closedir (dir);
04978   return written;
04979 }
04980 
04981 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
04982    object type, using the /proc file system.  */
04983 static int
04984 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
04985                  unsigned const char *writebuf,
04986                  CORE_ADDR offset, int len)
04987 {
04988   long pid = lwpid_of (get_thread_lwp (current_inferior));
04989   char buf[128];
04990   int fd = 0;
04991   int ret = 0;
04992 
04993   if (!writebuf && !readbuf)
04994     return -1;
04995 
04996   if (!*annex)
04997     {
04998       if (!readbuf)
04999         return -1;
05000       else
05001         return spu_enumerate_spu_ids (pid, readbuf, offset, len);
05002     }
05003 
05004   sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
05005   fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
05006   if (fd <= 0)
05007     return -1;
05008 
05009   if (offset != 0
05010       && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
05011     {
05012       close (fd);
05013       return 0;
05014     }
05015 
05016   if (writebuf)
05017     ret = write (fd, writebuf, (size_t) len);
05018   else
05019     ret = read (fd, readbuf, (size_t) len);
05020 
05021   close (fd);
05022   return ret;
05023 }
05024 
05025 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
05026 struct target_loadseg
05027 {
05028   /* Core address to which the segment is mapped.  */
05029   Elf32_Addr addr;
05030   /* VMA recorded in the program header.  */
05031   Elf32_Addr p_vaddr;
05032   /* Size of this segment in memory.  */
05033   Elf32_Word p_memsz;
05034 };
05035 
05036 # if defined PT_GETDSBT
05037 struct target_loadmap
05038 {
05039   /* Protocol version number, must be zero.  */
05040   Elf32_Word version;
05041   /* Pointer to the DSBT table, its size, and the DSBT index.  */
05042   unsigned *dsbt_table;
05043   unsigned dsbt_size, dsbt_index;
05044   /* Number of segments in this map.  */
05045   Elf32_Word nsegs;
05046   /* The actual memory map.  */
05047   struct target_loadseg segs[/*nsegs*/];
05048 };
05049 #  define LINUX_LOADMAP         PT_GETDSBT
05050 #  define LINUX_LOADMAP_EXEC    PTRACE_GETDSBT_EXEC
05051 #  define LINUX_LOADMAP_INTERP  PTRACE_GETDSBT_INTERP
05052 # else
05053 struct target_loadmap
05054 {
05055   /* Protocol version number, must be zero.  */
05056   Elf32_Half version;
05057   /* Number of segments in this map.  */
05058   Elf32_Half nsegs;
05059   /* The actual memory map.  */
05060   struct target_loadseg segs[/*nsegs*/];
05061 };
05062 #  define LINUX_LOADMAP         PTRACE_GETFDPIC
05063 #  define LINUX_LOADMAP_EXEC    PTRACE_GETFDPIC_EXEC
05064 #  define LINUX_LOADMAP_INTERP  PTRACE_GETFDPIC_INTERP
05065 # endif
05066 
05067 static int
05068 linux_read_loadmap (const char *annex, CORE_ADDR offset,
05069                     unsigned char *myaddr, unsigned int len)
05070 {
05071   int pid = lwpid_of (get_thread_lwp (current_inferior));
05072   int addr = -1;
05073   struct target_loadmap *data = NULL;
05074   unsigned int actual_length, copy_length;
05075 
05076   if (strcmp (annex, "exec") == 0)
05077     addr = (int) LINUX_LOADMAP_EXEC;
05078   else if (strcmp (annex, "interp") == 0)
05079     addr = (int) LINUX_LOADMAP_INTERP;
05080   else
05081     return -1;
05082 
05083   if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
05084     return -1;
05085 
05086   if (data == NULL)
05087     return -1;
05088 
05089   actual_length = sizeof (struct target_loadmap)
05090     + sizeof (struct target_loadseg) * data->nsegs;
05091 
05092   if (offset < 0 || offset > actual_length)
05093     return -1;
05094 
05095   copy_length = actual_length - offset < len ? actual_length - offset : len;
05096   memcpy (myaddr, (char *) data + offset, copy_length);
05097   return copy_length;
05098 }
05099 #else
05100 # define linux_read_loadmap NULL
05101 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
05102 
05103 static void
05104 linux_process_qsupported (const char *query)
05105 {
05106   if (the_low_target.process_qsupported != NULL)
05107     the_low_target.process_qsupported (query);
05108 }
05109 
05110 static int
05111 linux_supports_tracepoints (void)
05112 {
05113   if (*the_low_target.supports_tracepoints == NULL)
05114     return 0;
05115 
05116   return (*the_low_target.supports_tracepoints) ();
05117 }
05118 
05119 static CORE_ADDR
05120 linux_read_pc (struct regcache *regcache)
05121 {
05122   if (the_low_target.get_pc == NULL)
05123     return 0;
05124 
05125   return (*the_low_target.get_pc) (regcache);
05126 }
05127 
05128 static void
05129 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
05130 {
05131   gdb_assert (the_low_target.set_pc != NULL);
05132 
05133   (*the_low_target.set_pc) (regcache, pc);
05134 }
05135 
05136 static int
05137 linux_thread_stopped (struct thread_info *thread)
05138 {
05139   return get_thread_lwp (thread)->stopped;
05140 }
05141 
05142 /* This exposes stop-all-threads functionality to other modules.  */
05143 
05144 static void
05145 linux_pause_all (int freeze)
05146 {
05147   stop_all_lwps (freeze, NULL);
05148 }
05149 
05150 /* This exposes unstop-all-threads functionality to other gdbserver
05151    modules.  */
05152 
05153 static void
05154 linux_unpause_all (int unfreeze)
05155 {
05156   unstop_all_lwps (unfreeze, NULL);
05157 }
05158 
05159 static int
05160 linux_prepare_to_access_memory (void)
05161 {
05162   /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
05163      running LWP.  */
05164   if (non_stop)
05165     linux_pause_all (1);
05166   return 0;
05167 }
05168 
05169 static void
05170 linux_done_accessing_memory (void)
05171 {
05172   /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
05173      running LWP.  */
05174   if (non_stop)
05175     linux_unpause_all (1);
05176 }
05177 
05178 static int
05179 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
05180                                         CORE_ADDR collector,
05181                                         CORE_ADDR lockaddr,
05182                                         ULONGEST orig_size,
05183                                         CORE_ADDR *jump_entry,
05184                                         CORE_ADDR *trampoline,
05185                                         ULONGEST *trampoline_size,
05186                                         unsigned char *jjump_pad_insn,
05187                                         ULONGEST *jjump_pad_insn_size,
05188                                         CORE_ADDR *adjusted_insn_addr,
05189                                         CORE_ADDR *adjusted_insn_addr_end,
05190                                         char *err)
05191 {
05192   return (*the_low_target.install_fast_tracepoint_jump_pad)
05193     (tpoint, tpaddr, collector, lockaddr, orig_size,
05194      jump_entry, trampoline, trampoline_size,
05195      jjump_pad_insn, jjump_pad_insn_size,
05196      adjusted_insn_addr, adjusted_insn_addr_end,
05197      err);
05198 }
05199 
05200 static struct emit_ops *
05201 linux_emit_ops (void)
05202 {
05203   if (the_low_target.emit_ops != NULL)
05204     return (*the_low_target.emit_ops) ();
05205   else
05206     return NULL;
05207 }
05208 
05209 static int
05210 linux_get_min_fast_tracepoint_insn_len (void)
05211 {
05212   return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
05213 }
05214 
05215 /* Extract &phdr and num_phdr in the inferior.  Return 0 on success.  */
05216 
05217 static int
05218 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
05219                                CORE_ADDR *phdr_memaddr, int *num_phdr)
05220 {
05221   char filename[PATH_MAX];
05222   int fd;
05223   const int auxv_size = is_elf64
05224     ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
05225   char buf[sizeof (Elf64_auxv_t)];  /* The larger of the two.  */
05226 
05227   xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
05228 
05229   fd = open (filename, O_RDONLY);
05230   if (fd < 0)
05231     return 1;
05232 
05233   *phdr_memaddr = 0;
05234   *num_phdr = 0;
05235   while (read (fd, buf, auxv_size) == auxv_size
05236          && (*phdr_memaddr == 0 || *num_phdr == 0))
05237     {
05238       if (is_elf64)
05239         {
05240           Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
05241 
05242           switch (aux->a_type)
05243             {
05244             case AT_PHDR:
05245               *phdr_memaddr = aux->a_un.a_val;
05246               break;
05247             case AT_PHNUM:
05248               *num_phdr = aux->a_un.a_val;
05249               break;
05250             }
05251         }
05252       else
05253         {
05254           Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
05255 
05256           switch (aux->a_type)
05257             {
05258             case AT_PHDR:
05259               *phdr_memaddr = aux->a_un.a_val;
05260               break;
05261             case AT_PHNUM:
05262               *num_phdr = aux->a_un.a_val;
05263               break;
05264             }
05265         }
05266     }
05267 
05268   close (fd);
05269 
05270   if (*phdr_memaddr == 0 || *num_phdr == 0)
05271     {
05272       warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
05273                "phdr_memaddr = %ld, phdr_num = %d",
05274                (long) *phdr_memaddr, *num_phdr);
05275       return 2;
05276     }
05277 
05278   return 0;
05279 }
05280 
05281 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present.  */
05282 
05283 static CORE_ADDR
05284 get_dynamic (const int pid, const int is_elf64)
05285 {
05286   CORE_ADDR phdr_memaddr, relocation;
05287   int num_phdr, i;
05288   unsigned char *phdr_buf;
05289   const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
05290 
05291   if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
05292     return 0;
05293 
05294   gdb_assert (num_phdr < 100);  /* Basic sanity check.  */
05295   phdr_buf = alloca (num_phdr * phdr_size);
05296 
05297   if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
05298     return 0;
05299 
05300   /* Compute relocation: it is expected to be 0 for "regular" executables,
05301      non-zero for PIE ones.  */
05302   relocation = -1;
05303   for (i = 0; relocation == -1 && i < num_phdr; i++)
05304     if (is_elf64)
05305       {
05306         Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
05307 
05308         if (p->p_type == PT_PHDR)
05309           relocation = phdr_memaddr - p->p_vaddr;
05310       }
05311     else
05312       {
05313         Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
05314 
05315         if (p->p_type == PT_PHDR)
05316           relocation = phdr_memaddr - p->p_vaddr;
05317       }
05318 
05319   if (relocation == -1)
05320     {
05321       /* PT_PHDR is optional, but necessary for PIE in general.  Fortunately
05322          any real world executables, including PIE executables, have always
05323          PT_PHDR present.  PT_PHDR is not present in some shared libraries or
05324          in fpc (Free Pascal 2.4) binaries but neither of those have a need for
05325          or present DT_DEBUG anyway (fpc binaries are statically linked).
05326 
05327          Therefore if there exists DT_DEBUG there is always also PT_PHDR.
05328 
05329          GDB could find RELOCATION also from AT_ENTRY - e_entry.  */
05330 
05331       return 0;
05332     }
05333 
05334   for (i = 0; i < num_phdr; i++)
05335     {
05336       if (is_elf64)
05337         {
05338           Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
05339 
05340           if (p->p_type == PT_DYNAMIC)
05341             return p->p_vaddr + relocation;
05342         }
05343       else
05344         {
05345           Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
05346 
05347           if (p->p_type == PT_DYNAMIC)
05348             return p->p_vaddr + relocation;
05349         }
05350     }
05351 
05352   return 0;
05353 }
05354 
05355 /* Return &_r_debug in the inferior, or -1 if not present.  Return value
05356    can be 0 if the inferior does not yet have the library list initialized.
05357    We look for DT_MIPS_RLD_MAP first.  MIPS executables use this instead of
05358    DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too.  */
05359 
05360 static CORE_ADDR
05361 get_r_debug (const int pid, const int is_elf64)
05362 {
05363   CORE_ADDR dynamic_memaddr;
05364   const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
05365   unsigned char buf[sizeof (Elf64_Dyn)];  /* The larger of the two.  */
05366   CORE_ADDR map = -1;
05367 
05368   dynamic_memaddr = get_dynamic (pid, is_elf64);
05369   if (dynamic_memaddr == 0)
05370     return map;
05371 
05372   while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
05373     {
05374       if (is_elf64)
05375         {
05376           Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
05377 #ifdef DT_MIPS_RLD_MAP
05378           union
05379             {
05380               Elf64_Xword map;
05381               unsigned char buf[sizeof (Elf64_Xword)];
05382             }
05383           rld_map;
05384 
05385           if (dyn->d_tag == DT_MIPS_RLD_MAP)
05386             {
05387               if (linux_read_memory (dyn->d_un.d_val,
05388                                      rld_map.buf, sizeof (rld_map.buf)) == 0)
05389                 return rld_map.map;
05390               else
05391                 break;
05392             }
05393 #endif  /* DT_MIPS_RLD_MAP */
05394 
05395           if (dyn->d_tag == DT_DEBUG && map == -1)
05396             map = dyn->d_un.d_val;
05397 
05398           if (dyn->d_tag == DT_NULL)
05399             break;
05400         }
05401       else
05402         {
05403           Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
05404 #ifdef DT_MIPS_RLD_MAP
05405           union
05406             {
05407               Elf32_Word map;
05408               unsigned char buf[sizeof (Elf32_Word)];
05409             }
05410           rld_map;
05411 
05412           if (dyn->d_tag == DT_MIPS_RLD_MAP)
05413             {
05414               if (linux_read_memory (dyn->d_un.d_val,
05415                                      rld_map.buf, sizeof (rld_map.buf)) == 0)
05416                 return rld_map.map;
05417               else
05418                 break;
05419             }
05420 #endif  /* DT_MIPS_RLD_MAP */
05421 
05422           if (dyn->d_tag == DT_DEBUG && map == -1)
05423             map = dyn->d_un.d_val;
05424 
05425           if (dyn->d_tag == DT_NULL)
05426             break;
05427         }
05428 
05429       dynamic_memaddr += dyn_size;
05430     }
05431 
05432   return map;
05433 }
05434 
05435 /* Read one pointer from MEMADDR in the inferior.  */
05436 
05437 static int
05438 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
05439 {
05440   int ret;
05441 
05442   /* Go through a union so this works on either big or little endian
05443      hosts, when the inferior's pointer size is smaller than the size
05444      of CORE_ADDR.  It is assumed the inferior's endianness is the
05445      same of the superior's.  */
05446   union
05447   {
05448     CORE_ADDR core_addr;
05449     unsigned int ui;
05450     unsigned char uc;
05451   } addr;
05452 
05453   ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
05454   if (ret == 0)
05455     {
05456       if (ptr_size == sizeof (CORE_ADDR))
05457         *ptr = addr.core_addr;
05458       else if (ptr_size == sizeof (unsigned int))
05459         *ptr = addr.ui;
05460       else
05461         gdb_assert_not_reached ("unhandled pointer size");
05462     }
05463   return ret;
05464 }
05465 
05466 struct link_map_offsets
05467   {
05468     /* Offset and size of r_debug.r_version.  */
05469     int r_version_offset;
05470 
05471     /* Offset and size of r_debug.r_map.  */
05472     int r_map_offset;
05473 
05474     /* Offset to l_addr field in struct link_map.  */
05475     int l_addr_offset;
05476 
05477     /* Offset to l_name field in struct link_map.  */
05478     int l_name_offset;
05479 
05480     /* Offset to l_ld field in struct link_map.  */
05481     int l_ld_offset;
05482 
05483     /* Offset to l_next field in struct link_map.  */
05484     int l_next_offset;
05485 
05486     /* Offset to l_prev field in struct link_map.  */
05487     int l_prev_offset;
05488   };
05489 
05490 /* Construct qXfer:libraries-svr4:read reply.  */
05491 
05492 static int
05493 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
05494                             unsigned const char *writebuf,
05495                             CORE_ADDR offset, int len)
05496 {
05497   char *document;
05498   unsigned document_len;
05499   struct process_info_private *const priv = current_process ()->private;
05500   char filename[PATH_MAX];
05501   int pid, is_elf64;
05502 
05503   static const struct link_map_offsets lmo_32bit_offsets =
05504     {
05505       0,     /* r_version offset. */
05506       4,     /* r_debug.r_map offset.  */
05507       0,     /* l_addr offset in link_map.  */
05508       4,     /* l_name offset in link_map.  */
05509       8,     /* l_ld offset in link_map.  */
05510       12,    /* l_next offset in link_map.  */
05511       16     /* l_prev offset in link_map.  */
05512     };
05513 
05514   static const struct link_map_offsets lmo_64bit_offsets =
05515     {
05516       0,     /* r_version offset. */
05517       8,     /* r_debug.r_map offset.  */
05518       0,     /* l_addr offset in link_map.  */
05519       8,     /* l_name offset in link_map.  */
05520       16,    /* l_ld offset in link_map.  */
05521       24,    /* l_next offset in link_map.  */
05522       32     /* l_prev offset in link_map.  */
05523     };
05524   const struct link_map_offsets *lmo;
05525   unsigned int machine;
05526   int ptr_size;
05527   CORE_ADDR lm_addr = 0, lm_prev = 0;
05528   int allocated = 1024;
05529   char *p;
05530   CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
05531   int header_done = 0;
05532 
05533   if (writebuf != NULL)
05534     return -2;
05535   if (readbuf == NULL)
05536     return -1;
05537 
05538   pid = lwpid_of (get_thread_lwp (current_inferior));
05539   xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
05540   is_elf64 = elf_64_file_p (filename, &machine);
05541   lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
05542   ptr_size = is_elf64 ? 8 : 4;
05543 
05544   while (annex[0] != '\0')
05545     {
05546       const char *sep;
05547       CORE_ADDR *addrp;
05548       int len;
05549 
05550       sep = strchr (annex, '=');
05551       if (sep == NULL)
05552         break;
05553 
05554       len = sep - annex;
05555       if (len == 5 && strncmp (annex, "start", 5) == 0)
05556         addrp = &lm_addr;
05557       else if (len == 4 && strncmp (annex, "prev", 4) == 0)
05558         addrp = &lm_prev;
05559       else
05560         {
05561           annex = strchr (sep, ';');
05562           if (annex == NULL)
05563             break;
05564           annex++;
05565           continue;
05566         }
05567 
05568       annex = decode_address_to_semicolon (addrp, sep + 1);
05569     }
05570 
05571   if (lm_addr == 0)
05572     {
05573       int r_version = 0;
05574 
05575       if (priv->r_debug == 0)
05576         priv->r_debug = get_r_debug (pid, is_elf64);
05577 
05578       /* We failed to find DT_DEBUG.  Such situation will not change
05579          for this inferior - do not retry it.  Report it to GDB as
05580          E01, see for the reasons at the GDB solib-svr4.c side.  */
05581       if (priv->r_debug == (CORE_ADDR) -1)
05582         return -1;
05583 
05584       if (priv->r_debug != 0)
05585         {
05586           if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
05587                                  (unsigned char *) &r_version,
05588                                  sizeof (r_version)) != 0
05589               || r_version != 1)
05590             {
05591               warning ("unexpected r_debug version %d", r_version);
05592             }
05593           else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
05594                                  &lm_addr, ptr_size) != 0)
05595             {
05596               warning ("unable to read r_map from 0x%lx",
05597                        (long) priv->r_debug + lmo->r_map_offset);
05598             }
05599         }
05600     }
05601 
05602   document = xmalloc (allocated);
05603   strcpy (document, "<library-list-svr4 version=\"1.0\"");
05604   p = document + strlen (document);
05605 
05606   while (lm_addr
05607          && read_one_ptr (lm_addr + lmo->l_name_offset,
05608                           &l_name, ptr_size) == 0
05609          && read_one_ptr (lm_addr + lmo->l_addr_offset,
05610                           &l_addr, ptr_size) == 0
05611          && read_one_ptr (lm_addr + lmo->l_ld_offset,
05612                           &l_ld, ptr_size) == 0
05613          && read_one_ptr (lm_addr + lmo->l_prev_offset,
05614                           &l_prev, ptr_size) == 0
05615          && read_one_ptr (lm_addr + lmo->l_next_offset,
05616                           &l_next, ptr_size) == 0)
05617     {
05618       unsigned char libname[PATH_MAX];
05619 
05620       if (lm_prev != l_prev)
05621         {
05622           warning ("Corrupted shared library list: 0x%lx != 0x%lx",
05623                    (long) lm_prev, (long) l_prev);
05624           break;
05625         }
05626 
05627       /* Ignore the first entry even if it has valid name as the first entry
05628          corresponds to the main executable.  The first entry should not be
05629          skipped if the dynamic loader was loaded late by a static executable
05630          (see solib-svr4.c parameter ignore_first).  But in such case the main
05631          executable does not have PT_DYNAMIC present and this function already
05632          exited above due to failed get_r_debug.  */
05633       if (lm_prev == 0)
05634         {
05635           sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
05636           p = p + strlen (p);
05637         }
05638       else
05639         {
05640           /* Not checking for error because reading may stop before
05641              we've got PATH_MAX worth of characters.  */
05642           libname[0] = '\0';
05643           linux_read_memory (l_name, libname, sizeof (libname) - 1);
05644           libname[sizeof (libname) - 1] = '\0';
05645           if (libname[0] != '\0')
05646             {
05647               /* 6x the size for xml_escape_text below.  */
05648               size_t len = 6 * strlen ((char *) libname);
05649               char *name;
05650 
05651               if (!header_done)
05652                 {
05653                   /* Terminate `<library-list-svr4'.  */
05654                   *p++ = '>';
05655                   header_done = 1;
05656                 }
05657 
05658               while (allocated < p - document + len + 200)
05659                 {
05660                   /* Expand to guarantee sufficient storage.  */
05661                   uintptr_t document_len = p - document;
05662 
05663                   document = xrealloc (document, 2 * allocated);
05664                   allocated *= 2;
05665                   p = document + document_len;
05666                 }
05667 
05668               name = xml_escape_text ((char *) libname);
05669               p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
05670                             "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
05671                             name, (unsigned long) lm_addr,
05672                             (unsigned long) l_addr, (unsigned long) l_ld);
05673               free (name);
05674             }
05675         }
05676 
05677       lm_prev = lm_addr;
05678       lm_addr = l_next;
05679     }
05680 
05681   if (!header_done)
05682     {
05683       /* Empty list; terminate `<library-list-svr4'.  */
05684       strcpy (p, "/>");
05685     }
05686   else
05687     strcpy (p, "</library-list-svr4>");
05688 
05689   document_len = strlen (document);
05690   if (offset < document_len)
05691     document_len -= offset;
05692   else
05693     document_len = 0;
05694   if (len > document_len)
05695     len = document_len;
05696 
05697   memcpy (readbuf, document + offset, len);
05698   xfree (document);
05699 
05700   return len;
05701 }
05702 
05703 #ifdef HAVE_LINUX_BTRACE
05704 
05705 /* Enable branch tracing.  */
05706 
05707 static struct btrace_target_info *
05708 linux_low_enable_btrace (ptid_t ptid)
05709 {
05710   struct btrace_target_info *tinfo;
05711 
05712   tinfo = linux_enable_btrace (ptid);
05713 
05714   if (tinfo != NULL)
05715     {
05716       struct thread_info *thread = find_thread_ptid (ptid);
05717       struct regcache *regcache = get_thread_regcache (thread, 0);
05718 
05719       tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
05720     }
05721 
05722   return tinfo;
05723 }
05724 
05725 /* Read branch trace data as btrace xml document.  */
05726 
05727 static void
05728 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
05729                        int type)
05730 {
05731   VEC (btrace_block_s) *btrace;
05732   struct btrace_block *block;
05733   int i;
05734 
05735   btrace = linux_read_btrace (tinfo, type);
05736 
05737   buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
05738   buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
05739 
05740   for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
05741     buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
05742                        paddress (block->begin), paddress (block->end));
05743 
05744   buffer_grow_str (buffer, "</btrace>\n");
05745 
05746   VEC_free (btrace_block_s, btrace);
05747 }
05748 #endif /* HAVE_LINUX_BTRACE */
05749 
05750 static struct target_ops linux_target_ops = {
05751   linux_create_inferior,
05752   linux_attach,
05753   linux_kill,
05754   linux_detach,
05755   linux_mourn,
05756   linux_join,
05757   linux_thread_alive,
05758   linux_resume,
05759   linux_wait,
05760   linux_fetch_registers,
05761   linux_store_registers,
05762   linux_prepare_to_access_memory,
05763   linux_done_accessing_memory,
05764   linux_read_memory,
05765   linux_write_memory,
05766   linux_look_up_symbols,
05767   linux_request_interrupt,
05768   linux_read_auxv,
05769   linux_insert_point,
05770   linux_remove_point,
05771   linux_stopped_by_watchpoint,
05772   linux_stopped_data_address,
05773 #if defined(__UCLIBC__) && defined(HAS_NOMMU)         \
05774     && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
05775     && defined(PT_TEXT_END_ADDR)
05776   linux_read_offsets,
05777 #else
05778   NULL,
05779 #endif
05780 #ifdef USE_THREAD_DB
05781   thread_db_get_tls_address,
05782 #else
05783   NULL,
05784 #endif
05785   linux_qxfer_spu,
05786   hostio_last_error_from_errno,
05787   linux_qxfer_osdata,
05788   linux_xfer_siginfo,
05789   linux_supports_non_stop,
05790   linux_async,
05791   linux_start_non_stop,
05792   linux_supports_multi_process,
05793 #ifdef USE_THREAD_DB
05794   thread_db_handle_monitor_command,
05795 #else
05796   NULL,
05797 #endif
05798   linux_common_core_of_thread,
05799   linux_read_loadmap,
05800   linux_process_qsupported,
05801   linux_supports_tracepoints,
05802   linux_read_pc,
05803   linux_write_pc,
05804   linux_thread_stopped,
05805   NULL,
05806   linux_pause_all,
05807   linux_unpause_all,
05808   linux_cancel_breakpoints,
05809   linux_stabilize_threads,
05810   linux_install_fast_tracepoint_jump_pad,
05811   linux_emit_ops,
05812   linux_supports_disable_randomization,
05813   linux_get_min_fast_tracepoint_insn_len,
05814   linux_qxfer_libraries_svr4,
05815   linux_supports_agent,
05816 #ifdef HAVE_LINUX_BTRACE
05817   linux_supports_btrace,
05818   linux_low_enable_btrace,
05819   linux_disable_btrace,
05820   linux_low_read_btrace,
05821 #else
05822   NULL,
05823   NULL,
05824   NULL,
05825   NULL,
05826 #endif
05827   linux_supports_range_stepping,
05828 };
05829 
05830 static void
05831 linux_init_signals ()
05832 {
05833   /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
05834      to find what the cancel signal actually is.  */
05835 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does.  */
05836   signal (__SIGRTMIN+1, SIG_IGN);
05837 #endif
05838 }
05839 
05840 #ifdef HAVE_LINUX_REGSETS
05841 void
05842 initialize_regsets_info (struct regsets_info *info)
05843 {
05844   for (info->num_regsets = 0;
05845        info->regsets[info->num_regsets].size >= 0;
05846        info->num_regsets++)
05847     ;
05848 }
05849 #endif
05850 
05851 void
05852 initialize_low (void)
05853 {
05854   struct sigaction sigchld_action;
05855   memset (&sigchld_action, 0, sizeof (sigchld_action));
05856   set_target_ops (&linux_target_ops);
05857   set_breakpoint_data (the_low_target.breakpoint,
05858                        the_low_target.breakpoint_len);
05859   linux_init_signals ();
05860   linux_ptrace_init_warnings ();
05861 
05862   sigchld_action.sa_handler = sigchld_handler;
05863   sigemptyset (&sigchld_action.sa_mask);
05864   sigchld_action.sa_flags = SA_RESTART;
05865   sigaction (SIGCHLD, &sigchld_action, NULL);
05866 
05867   initialize_low_arch ();
05868 }
 All Classes Files Functions Variables Typedefs Enumerations Enumerator Defines