aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/libc_r/uthread/pthread_private.h416
-rw-r--r--lib/libc_r/uthread/uthread_attr_setschedparam.c4
-rw-r--r--lib/libc_r/uthread/uthread_cond.c66
-rw-r--r--lib/libc_r/uthread/uthread_create.c99
-rw-r--r--lib/libc_r/uthread/uthread_detach.c5
-rw-r--r--lib/libc_r/uthread/uthread_execve.c3
-rw-r--r--lib/libc_r/uthread/uthread_exit.c80
-rw-r--r--lib/libc_r/uthread/uthread_fd.c70
-rw-r--r--lib/libc_r/uthread/uthread_file.c40
-rw-r--r--lib/libc_r/uthread/uthread_fork.c3
-rw-r--r--lib/libc_r/uthread/uthread_gc.c4
-rw-r--r--lib/libc_r/uthread/uthread_getschedparam.c3
-rw-r--r--lib/libc_r/uthread/uthread_info.c2
-rw-r--r--lib/libc_r/uthread/uthread_init.c62
-rw-r--r--lib/libc_r/uthread/uthread_jmp.c153
-rw-r--r--lib/libc_r/uthread/uthread_join.c31
-rw-r--r--lib/libc_r/uthread/uthread_kern.c588
-rw-r--r--lib/libc_r/uthread/uthread_mutex.c64
-rw-r--r--lib/libc_r/uthread/uthread_priority_queue.c20
-rw-r--r--lib/libc_r/uthread/uthread_sem.c1
-rw-r--r--lib/libc_r/uthread/uthread_sendfile.c4
-rw-r--r--lib/libc_r/uthread/uthread_setschedparam.c5
-rw-r--r--lib/libc_r/uthread/uthread_sig.c1242
-rw-r--r--lib/libc_r/uthread/uthread_sigaction.c13
-rw-r--r--lib/libc_r/uthread/uthread_sigmask.c17
-rw-r--r--lib/libc_r/uthread/uthread_signal.c2
-rw-r--r--lib/libc_r/uthread/uthread_sigprocmask.c48
-rw-r--r--lib/libc_r/uthread/uthread_sigwait.c10
-rw-r--r--lib/libc_r/uthread/uthread_write.c2
-rw-r--r--lib/libc_r/uthread/uthread_yield.c3
-rw-r--r--lib/libkse/thread/thr_attr_setschedparam.c4
-rw-r--r--lib/libkse/thread/thr_cond.c66
-rw-r--r--lib/libkse/thread/thr_create.c99
-rw-r--r--lib/libkse/thread/thr_detach.c5
-rw-r--r--lib/libkse/thread/thr_exit.c80
-rw-r--r--lib/libkse/thread/thr_fork.c3
-rw-r--r--lib/libkse/thread/thr_getschedparam.c3
-rw-r--r--lib/libkse/thread/thr_info.c2
-rw-r--r--lib/libkse/thread/thr_init.c62
-rw-r--r--lib/libkse/thread/thr_join.c31
-rw-r--r--lib/libkse/thread/thr_kern.c588
-rw-r--r--lib/libkse/thread/thr_mutex.c64
-rw-r--r--lib/libkse/thread/thr_priority_queue.c20
-rw-r--r--lib/libkse/thread/thr_private.h416
-rw-r--r--lib/libkse/thread/thr_sem.c1
-rw-r--r--lib/libkse/thread/thr_setschedparam.c5
-rw-r--r--lib/libkse/thread/thr_sig.c1242
-rw-r--r--lib/libkse/thread/thr_sigaction.c13
-rw-r--r--lib/libkse/thread/thr_sigmask.c17
-rw-r--r--lib/libkse/thread/thr_sigprocmask.c48
-rw-r--r--lib/libkse/thread/thr_sigwait.c10
-rw-r--r--lib/libkse/thread/thr_write.c2
-rw-r--r--lib/libkse/thread/thr_yield.c3
-rw-r--r--lib/libpthread/thread/thr_attr_setschedparam.c4
-rw-r--r--lib/libpthread/thread/thr_cond.c66
-rw-r--r--lib/libpthread/thread/thr_create.c99
-rw-r--r--lib/libpthread/thread/thr_detach.c5
-rw-r--r--lib/libpthread/thread/thr_exit.c80
-rw-r--r--lib/libpthread/thread/thr_fork.c3
-rw-r--r--lib/libpthread/thread/thr_gc.c4
-rw-r--r--lib/libpthread/thread/thr_getschedparam.c3
-rw-r--r--lib/libpthread/thread/thr_info.c2
-rw-r--r--lib/libpthread/thread/thr_init.c62
-rw-r--r--lib/libpthread/thread/thr_join.c31
-rw-r--r--lib/libpthread/thread/thr_kern.c588
-rw-r--r--lib/libpthread/thread/thr_mutex.c64
-rw-r--r--lib/libpthread/thread/thr_priority_queue.c20
-rw-r--r--lib/libpthread/thread/thr_private.h416
-rw-r--r--lib/libpthread/thread/thr_sem.c1
-rw-r--r--lib/libpthread/thread/thr_setschedparam.c5
-rw-r--r--lib/libpthread/thread/thr_sig.c1242
-rw-r--r--lib/libpthread/thread/thr_sigaction.c13
-rw-r--r--lib/libpthread/thread/thr_sigmask.c17
-rw-r--r--lib/libpthread/thread/thr_sigprocmask.c48
-rw-r--r--lib/libpthread/thread/thr_sigwait.c10
-rw-r--r--lib/libpthread/thread/thr_write.c2
-rw-r--r--lib/libpthread/thread/thr_yield.c3
77 files changed, 5243 insertions, 3389 deletions
diff --git a/lib/libc_r/uthread/pthread_private.h b/lib/libc_r/uthread/pthread_private.h
index 9d76747ff763..50e33bc181cb 100644
--- a/lib/libc_r/uthread/pthread_private.h
+++ b/lib/libc_r/uthread/pthread_private.h
@@ -51,6 +51,7 @@
*/
#include <setjmp.h>
#include <signal.h>
+#include <stdio.h>
#include <sys/queue.h>
#include <sys/types.h>
#include <sys/time.h>
@@ -60,13 +61,67 @@
#include <pthread_np.h>
/*
+ * Define machine dependent macros to get and set the stack pointer
+ * from the supported contexts. Also define a macro to set the return
+ * address in a jmp_buf context.
+ *
+ * XXX - These need to be moved into architecture dependent support files.
+ */
+#if defined(__i386__)
+#define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2]))
+#define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2]))
+#define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_esp))
+#define SET_STACK_JB(jb, stk) (jb)[0]._jb[2] = (int)(stk)
+#define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[2] = (int)(stk)
+#define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_esp = (int)(stk)
+#define FP_SAVE_UC(ucp) do { \
+ char *fdata; \
+ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \
+ __asm__("fnsave %0": :"m"(*fdata)); \
+} while (0)
+#define FP_RESTORE_UC(ucp) do { \
+ char *fdata; \
+ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \
+ __asm__("frstor %0": :"m"(*fdata)); \
+} while (0)
+#define SET_RETURN_ADDR_JB(jb, ra) (jb)[0]._jb[0] = (int)(ra)
+#elif defined(__alpha__)
+#include <machine/reg.h>
+#define GET_STACK_JB(jb, stk) ((unsigned long)((jb)[0]._jb[R_SP + 4]))
+#define GET_STACK_SJB(sjb, stk) ((unsigned long)((sjb)[0]._sjb[R_SP + 4]))
+#define GET_STACK_UC(ucp, stk) ((ucp)->uc_mcontext.mc_regs[R_SP])
+#define SET_STACK_JB(jb, stk) (jb)[0]._jb[R_SP + 4] = (long)(stk)
+#define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[R_SP + 4] = (long)(stk)
+#define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_regs[R_SP] = (unsigned long)(stk)
+#define FP_SAVE_UC(ucp)
+#define FP_RESTORE_UC(ucp)
+#define SET_RETURN_ADDR_JB(jb, ra) do { \
+ (jb)[0]._jb[2] = (long)(ra); \
+ (jb)[0]._jb[R_RA + 4] = 0; \
+ (jb)[0]._jb[R_T12 + 4] = (long)(ra); \
+} while (0)
+#else
+#error "Don't recognize this architecture!"
+#endif
+
+/*
* Kernel fatal error handler macro.
*/
#define PANIC(string) _thread_exit(__FILE__,__LINE__,string)
+
/* Output debug messages like this: */
-#define stdout_debug(_x) _thread_sys_write(1,_x,strlen(_x));
-#define stderr_debug(_x) _thread_sys_write(2,_x,strlen(_x));
+#define stdout_debug(args...) do { \
+ char buf[128]; \
+ snprintf(buf, sizeof(buf), ##args); \
+ _thread_sys_write(1, buf, strlen(buf)); \
+} while (0)
+#define stderr_debug(args...) do { \
+ char buf[128]; \
+ snprintf(buf, sizeof(buf), ##args); \
+ _thread_sys_write(2, buf, strlen(buf)); \
+} while (0)
+
/*
@@ -80,34 +135,13 @@
/*
* Waiting queue manipulation macros (using pqe link):
*/
-#if defined(_PTHREADS_INVARIANTS)
#define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd)
#define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd)
+
+#if defined(_PTHREADS_INVARIANTS)
#define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive()
#define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive()
#else
-#define PTHREAD_WAITQ_REMOVE(thrd) do { \
- TAILQ_REMOVE(&_waitingq,thrd,pqe); \
- (thrd)->flags &= ~PTHREAD_FLAGS_IN_WAITQ; \
-} while (0)
-
-#define PTHREAD_WAITQ_INSERT(thrd) do { \
- if ((thrd)->wakeup_time.tv_sec == -1) \
- TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe); \
- else { \
- pthread_t tid = TAILQ_FIRST(&_waitingq); \
- while ((tid != NULL) && (tid->wakeup_time.tv_sec != -1) && \
- ((tid->wakeup_time.tv_sec < (thrd)->wakeup_time.tv_sec) || \
- ((tid->wakeup_time.tv_sec == (thrd)->wakeup_time.tv_sec) && \
- (tid->wakeup_time.tv_nsec <= (thrd)->wakeup_time.tv_nsec)))) \
- tid = TAILQ_NEXT(tid, pqe); \
- if (tid == NULL) \
- TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe); \
- else \
- TAILQ_INSERT_BEFORE(tid,thrd,pqe); \
- } \
- (thrd)->flags |= PTHREAD_FLAGS_IN_WAITQ; \
-} while (0)
#define PTHREAD_WAITQ_CLEARACTIVE()
#define PTHREAD_WAITQ_SETACTIVE()
#endif
@@ -139,6 +173,14 @@
* called with preemption deferred (see thread_kern_sched_[un]defer).
*/
#if defined(_PTHREADS_INVARIANTS)
+#include <assert.h>
+#define PTHREAD_ASSERT(cond, msg) do { \
+ if (!(cond)) \
+ PANIC(msg); \
+} while (0)
+#define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \
+ PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \
+ "Illegal call from signal handler");
#define PTHREAD_NEW_STATE(thrd, newstate) do { \
if (_thread_kern_new_state != 0) \
PANIC("Recursive PTHREAD_NEW_STATE"); \
@@ -156,6 +198,8 @@
PTHREAD_SET_STATE(thrd, newstate); \
} while (0)
#else
+#define PTHREAD_ASSERT(cond, msg)
+#define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd)
#define PTHREAD_NEW_STATE(thrd, newstate) do { \
if ((thrd)->state != newstate) { \
if ((thrd)->state == PS_RUNNING) { \
@@ -379,21 +423,52 @@ enum pthread_susp {
* almost entirely on this stack.
*/
#define PTHREAD_STACK_INITIAL 0x100000
-/* Address immediately beyond the beginning of the initial thread stack. */
-#define PTHREAD_DEFAULT_PRIORITY 64
-#define PTHREAD_MAX_PRIORITY 126
+
+/*
+ * Define the different priority ranges. All applications have thread
+ * priorities constrained within 0-31. The threads library raises the
+ * priority when delivering signals in order to ensure that signal
+ * delivery happens (from the POSIX spec) "as soon as possible".
+ * In the future, the threads library will also be able to map specific
+ * threads into real-time (cooperating) processes or kernel threads.
+ * The RT and SIGNAL priorities will be used internally and added to
+ * thread base priorities so that the scheduling queue can handle both
+ * normal and RT priority threads with and without signal handling.
+ *
+ * The approach taken is that, within each class, signal delivery
+ * always has priority over thread execution.
+ */
+#define PTHREAD_DEFAULT_PRIORITY 15
#define PTHREAD_MIN_PRIORITY 0
-#define _POSIX_THREAD_ATTR_STACKSIZE
+#define PTHREAD_MAX_PRIORITY 31 /* 0x1F */
+#define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */
+#define PTHREAD_RT_PRIORITY 64 /* 0x40 */
+#define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY
+#define PTHREAD_LAST_PRIORITY \
+ (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY)
+#define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY)
/*
- * Clock resolution in nanoseconds.
+ * Clock resolution in microseconds.
*/
-#define CLOCK_RES_NSEC 10000000
+#define CLOCK_RES_USEC 10000
/*
* Time slice period in microseconds.
*/
-#define TIMESLICE_USEC 100000
+#define TIMESLICE_USEC 20000
+
+/*
+ * Define a thread-safe macro to get the current time of day
+ * which is updated at regular intervals by the scheduling signal
+ * handler.
+ */
+#define GET_CURRENT_TOD(tv) \
+ do { \
+ tv.tv_sec = _sched_tod.tv_sec; \
+ tv.tv_usec = _sched_tod.tv_usec; \
+ } while (tv.tv_sec != _sched_tod.tv_sec)
+
struct pthread_key {
spinlock_t lock;
@@ -487,8 +562,10 @@ union pthread_wait_data {
short branch; /* Line number, for debugging. */
char *fname; /* Source file name for debugging.*/
} fd;
- struct pthread_poll_data * poll_data;
+ FILE *fp;
+ struct pthread_poll_data *poll_data;
spinlock_t *spinlock;
+ struct pthread *thread;
};
/*
@@ -497,6 +574,83 @@ union pthread_wait_data {
*/
typedef void (*thread_continuation_t) (void *);
+struct pthread_state_data {
+ int psd_interrupted;
+ sigset_t psd_sigmask;
+ enum pthread_state psd_state;
+ int psd_flags;
+ struct timespec psd_wakeup_time;
+ union pthread_wait_data psd_wait_data;
+ /* XXX - What about thread->timeout and/or thread->error? */
+};
+
+
+/*
+ * Normally thread contexts are stored as jmp_bufs via _setjmp()/_longjmp(),
+ * but they may also be sigjmp_buf and ucontext_t. When a thread is
+ * interrupted by a signal, it's context is saved as a ucontext_t. An
+ * application is also free to use [_]longjmp()/[_]siglongjmp() to jump
+ * between contexts within the same thread. Future support will also
+ * include setcontext()/getcontext().
+ *
+ * Define an enumerated type that can identify the 4 different context
+ * types.
+ */
+typedef enum {
+ CTX_JB_NOSIG, /* context is jmp_buf without saved sigset */
+ CTX_JB, /* context is jmp_buf (with saved sigset) */
+ CTX_SJB, /* context is sigjmp_buf (with saved sigset) */
+ CTX_UC /* context is ucontext_t (with saved sigset) */
+} thread_context_t;
+
+/*
+ * There are 2 basic contexts that a frame may contain at any
+ * one time:
+ *
+ * o ctx - The context that the thread should return to after normal
+ * completion of the signal handler.
+ * o sig_jb - The context just before the signal handler is invoked.
+ * Attempts at abnormal returns from user supplied signal handlers
+ * will return back to the signal context to perform any necessary
+ * cleanup.
+ */
+struct pthread_signal_frame {
+ /*
+ * This stores the threads state before the signal.
+ */
+ struct pthread_state_data saved_state;
+
+ /* Beginning (bottom) of threads stack frame for this signal. */
+ unsigned long stackp;
+
+ /*
+ * Threads return context; ctxtype identifies the type of context.
+ * For signal frame 0, these point to the context storage area
+ * within the pthread structure. When handling signals (frame > 0),
+ * these point to a context storage area that is allocated off the
+ * threads stack.
+ */
+ union {
+ jmp_buf jb;
+ sigjmp_buf sigjb;
+ ucontext_t uc;
+ } ctx;
+ thread_context_t ctxtype;
+ int longjmp_val;
+
+ /* Threads "jump out of signal handler" destination frame. */
+ int dst_frame;
+
+ /*
+ * Used to return back to the signal handling frame in case
+ * the application tries to change contexts from the handler.
+ */
+ jmp_buf *sig_jb;
+
+ int signo; /* signal, arg 1 to sighandler */
+ int sig_has_args; /* use signal args if true */
+};
+
/*
* Thread structure.
*/
@@ -530,54 +684,19 @@ struct pthread {
void *stack;
struct pthread_attr attr;
-#if (defined(__FreeBSD__) || defined(__NetBSD__)) && defined(__i386__)
- /*
- * Saved floating point registers on systems where they are not
- * saved in the signal context.
- */
- char saved_fp[108];
-#endif
-
- /*
- * Saved signal context used in call to sigreturn by
- * _thread_kern_sched if sig_saved is TRUE.
- */
- ucontext_t saved_sigcontext;
-
- /*
- * Saved jump buffer used in call to longjmp by _thread_kern_sched
- * if sig_saved is FALSE.
- */
- jmp_buf saved_jmp_buf;
- jmp_buf *sighandler_jmp_buf;
-
- /*
- * Saved jump buffers for use when doing nested [sig|_]longjmp()s, as
- * when doing signal delivery.
- */
- union {
- jmp_buf jmp;
- sigjmp_buf sigjmp;
- } nested_jmp;
- int longjmp_val;
-
-#define JMPFLAGS_NONE 0x00
-#define JMPFLAGS_LONGJMP 0x01
-#define JMPFLAGS__LONGJMP 0x02
-#define JMPFLAGS_SIGLONGJMP 0x04
-#define JMPFLAGS_DEFERRED 0x08
- int jmpflags;
-
- /*
- * TRUE if the last state saved was a signal context. FALSE if the
- * last state saved was a jump buffer.
- */
- int sig_saved;
-
/*
* Used for tracking delivery of nested signal handlers.
+ * Signal frame 0 is used for normal context (when no
+ * signal handlers are active for the thread). Frame
+ * 1 is used as the context for the first signal, and
+ * frames 2 .. NSIG-1 are used when additional signals
+ * arrive interrupting already active signal handlers.
*/
- int signal_nest_level;
+ struct pthread_signal_frame *sigframes[NSIG];
+ struct pthread_signal_frame sigframe0;
+ struct pthread_signal_frame *curframe;
+ int sigframe_count;
+ int sigframe_done;
/*
* Cancelability flags - the lower 2 bits are used by cancel
@@ -588,7 +707,7 @@ struct pthread {
#define PTHREAD_CANCEL_NEEDED 0x0010
int cancelflags;
- enum pthread_susp suspended;
+ enum pthread_susp suspended;
thread_continuation_t continuation;
@@ -597,16 +716,16 @@ struct pthread {
*/
sigset_t sigmask;
sigset_t sigpend;
+ int check_pending;
/* Thread state: */
enum pthread_state state;
- enum pthread_state oldstate;
- /* Time that this thread was last made active. */
- struct timeval last_active;
+ /* Scheduling clock when this thread was last made active. */
+ long last_active;
- /* Time that this thread was last made inactive. */
- struct timeval last_inactive;
+ /* Scheduling clock when this thread was last made inactive. */
+ long last_inactive;
/*
* Number of microseconds accumulated by this thread when
@@ -615,12 +734,6 @@ struct pthread {
long slice_usec;
/*
- * Incremental priority accumulated by thread while it is ready to
- * run but is denied being run.
- */
- int inc_prio;
-
- /*
* Time to wake up thread. This is used for sleeping threads and
* for any operation which may time out (such as select).
*/
@@ -640,8 +753,7 @@ struct pthread {
/*
* The current thread can belong to only one scheduling queue at
- * a time (ready or waiting queue). It can also belong to (only)
- * one of:
+ * a time (ready or waiting queue). It can also belong to:
*
* o A queue of threads waiting for a mutex
* o A queue of threads waiting for a condition variable
@@ -651,15 +763,21 @@ struct pthread {
* o A queue of threads needing work done by the kernel thread
* (waiting for a spinlock or file I/O)
*
+ * It is possible for a thread to belong to more than one of the
+ * above queues if it is handling a signal. A thread may only
+ * enter a mutex, condition variable, or join queue when it is
+ * not being called from a signal handler. If a thread is a
+ * member of one of these queues when a signal handler is invoked,
+ * it must remain in the queue. For this reason, the links for
+ * these queues must not be (re)used for other queues.
+ *
* Use pqe for the scheduling queue link (both ready and waiting),
- * and qe for other links.
+ * sqe for synchronization (mutex, condition variable, and join)
+ * queue links, and qe for all other links.
*/
-
- /* Priority queue entry for this thread: */
- TAILQ_ENTRY(pthread) pqe;
-
- /* Queue entry for this thread: */
- TAILQ_ENTRY(pthread) qe;
+ TAILQ_ENTRY(pthread) pqe; /* priority queue link */
+ TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */
+ TAILQ_ENTRY(pthread) qe; /* all other queues link */
/* Wait data. */
union pthread_wait_data data;
@@ -694,14 +812,17 @@ struct pthread {
int flags;
#define PTHREAD_FLAGS_PRIVATE 0x0001
#define PTHREAD_EXITING 0x0002
-#define PTHREAD_FLAGS_IN_CONDQ 0x0004 /* in condition queue using qe link*/
-#define PTHREAD_FLAGS_IN_WORKQ 0x0008 /* in work queue using qe link */
-#define PTHREAD_FLAGS_IN_WAITQ 0x0010 /* in waiting queue using pqe link */
-#define PTHREAD_FLAGS_IN_PRIOQ 0x0020 /* in priority queue using pqe link */
-#define PTHREAD_FLAGS_IN_MUTEXQ 0x0040 /* in mutex queue using qe link */
-#define PTHREAD_FLAGS_IN_FILEQ 0x0080 /* in file lock queue using qe link */
-#define PTHREAD_FLAGS_IN_FDQ 0x0100 /* in fd lock queue using qe link */
-#define PTHREAD_FLAGS_TRACE 0x0200 /* for debugging purposes */
+#define PTHREAD_FLAGS_IN_WAITQ 0x0004 /* in waiting queue using pqe link */
+#define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */
+#define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */
+#define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */
+#define PTHREAD_FLAGS_IN_FDQ 0x0040 /* in fd lock queue using qe link */
+#define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/
+#define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */
+#define PTHREAD_FLAGS_IN_JOINQ 0x0200 /* in join queue using sqe link */
+#define PTHREAD_FLAGS_TRACE 0x0400 /* for debugging purposes */
+#define PTHREAD_FLAGS_IN_SYNCQ \
+ (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ | PTHREAD_FLAGS_IN_JOINQ)
/*
* Base priority is the user setable and retrievable priority
@@ -820,14 +941,31 @@ SCLASS int _thread_kern_in_sched
;
#endif
-/* Last time that an incremental priority update was performed: */
-SCLASS struct timeval kern_inc_prio_time
+SCLASS int _sig_in_handler
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= 0;
+#else
+;
+#endif
+
+/* Time of day at last scheduling timer signal: */
+SCLASS struct timeval volatile _sched_tod
#ifdef GLOBAL_PTHREAD_PRIVATE
= { 0, 0 };
#else
;
#endif
+/*
+ * Current scheduling timer ticks; used as resource usage.
+ */
+SCLASS unsigned int volatile _sched_ticks
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= 0;
+#else
+;
+#endif
+
/* Dead threads: */
SCLASS TAILQ_HEAD(, pthread) _dead_list
#ifdef GLOBAL_PTHREAD_PRIVATE
@@ -905,9 +1043,9 @@ SCLASS int _thread_dtablesize /* Descriptor table size. */
;
#endif
-SCLASS int _clock_res_nsec /* Clock resolution in nsec. */
+SCLASS int _clock_res_usec /* Clock resolution in usec. */
#ifdef GLOBAL_PTHREAD_PRIVATE
-= CLOCK_RES_NSEC;
+= CLOCK_RES_USEC;
#else
;
#endif
@@ -937,9 +1075,10 @@ SCLASS struct sigaction _thread_sigact[NSIG];
SCLASS int _thread_dfl_count[NSIG];
/*
- * Pending signals for this process.
+ * Pending signals and mask for this process:
*/
SCLASS sigset_t _process_sigpending;
+SCLASS sigset_t _process_sigmask;
/*
* Scheduling queues:
@@ -959,6 +1098,21 @@ SCLASS volatile int _spinblock_count
#endif
;
+/* Used to maintain pending and active signals: */
+struct sigstatus {
+ int pending; /* Is this a pending signal? */
+ int blocked; /*
+ * A handler is currently active for
+ * this signal; ignore subsequent
+ * signals until the handler is done.
+ */
+ int signo; /* arg 1 to signal handler */
+ siginfo_t siginfo; /* arg 2 to signal handler */
+ ucontext_t uc; /* arg 3 to signal handler */
+};
+
+SCLASS struct sigstatus _thread_sigq[NSIG];
+
/* Indicates that the signal queue needs to be checked. */
SCLASS volatile int _sigq_check_reqd
#ifdef GLOBAL_PTHREAD_PRIVATE
@@ -998,6 +1152,18 @@ SCLASS void * _next_stack
#endif
;
+/*
+ * Declare the kernel scheduler jump buffer and stack:
+ */
+SCLASS jmp_buf _thread_kern_sched_jb;
+
+SCLASS void * _thread_kern_sched_stack
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= NULL
+#endif
+;
+
+
/* Used for _PTHREADS_INVARIANTS checking. */
SCLASS int _thread_kern_new_state
#ifdef GLOBAL_PTHREAD_PRIVATE
@@ -1025,15 +1191,19 @@ __BEGIN_DECLS
char *__ttyname_basic(int);
char *__ttyname_r_basic(int, char *, size_t);
char *ttyname_r(int, char *, size_t);
+void _cond_wait_backout(pthread_t);
+void _fd_lock_backout(pthread_t);
int _find_dead_thread(pthread_t);
int _find_thread(pthread_t);
+void _flockfile_backout(pthread_t);
void _funlock_owned(pthread_t);
+void _join_backout(pthread_t);
int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t);
int _thread_fd_lock(int, int, struct timespec *);
int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno);
-void _dispatch_signals(void);
int _mutex_cv_lock(pthread_mutex_t *);
int _mutex_cv_unlock(pthread_mutex_t *);
+void _mutex_lock_backout(pthread_t);
void _mutex_notify_priochange(pthread_t);
int _mutex_reinit(pthread_mutex_t *);
void _mutex_unlock_private(pthread_t);
@@ -1044,14 +1214,15 @@ void _pq_remove(struct pq_queue *pq, struct pthread *);
void _pq_insert_head(struct pq_queue *pq, struct pthread *);
void _pq_insert_tail(struct pq_queue *pq, struct pthread *);
struct pthread *_pq_first(struct pq_queue *pq);
-#if defined(_PTHREADS_INVARIANTS)
void _waitq_insert(pthread_t pthread);
void _waitq_remove(pthread_t pthread);
+#if defined(_PTHREADS_INVARIANTS)
void _waitq_setactive(void);
void _waitq_clearactive(void);
#endif
void _thread_exit(char *, int, char *);
void _thread_exit_cleanup(void);
+void _thread_exit_finish(void);
void _thread_fd_unlock(int, int);
void _thread_fd_unlock_debug(int, int, char *, int);
void _thread_fd_unlock_owned(pthread_t);
@@ -1060,20 +1231,23 @@ void _thread_cleanupspecific(void);
void _thread_dump_info(void);
void _thread_init(void);
void _thread_kern_sched(ucontext_t *);
-void _thread_kern_sched_state(enum pthread_state,char *fname,int lineno);
+void _thread_kern_scheduler(void);
+void _thread_kern_sched_frame(int frame);
+void _thread_kern_sched_sig(void);
+void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno);
void _thread_kern_sched_state_unlock(enum pthread_state state,
spinlock_t *lock, char *fname, int lineno);
void _thread_kern_set_timeout(const struct timespec *);
void _thread_kern_sig_defer(void);
void _thread_kern_sig_undefer(void);
-void _thread_sig_handler(int, int, ucontext_t *);
-pthread_t _thread_sig_handle(int, ucontext_t *);
-void _thread_sig_init(void);
+void _thread_sig_handler(int, siginfo_t *, ucontext_t *);
+void _thread_sig_check_pending(pthread_t pthread);
+void _thread_sig_handle_pending(void);
void _thread_sig_send(pthread_t pthread, int sig);
-void _thread_sig_deliver(pthread_t pthread, int sig);
+void _thread_sig_wrapper(void);
+int _thread_sigframe_find(pthread_t pthread, void *stackp);
void _thread_start(void);
-void _thread_start_sig_handler(void);
-void _thread_seterrno(pthread_t,int);
+void _thread_seterrno(pthread_t, int);
int _thread_fd_table_init(int fd);
pthread_addr_t _thread_gc(pthread_addr_t);
void _thread_enter_cancellation_point(void);
diff --git a/lib/libc_r/uthread/uthread_attr_setschedparam.c b/lib/libc_r/uthread/uthread_attr_setschedparam.c
index 6c4166b1784a..755bb13b7acc 100644
--- a/lib/libc_r/uthread/uthread_attr_setschedparam.c
+++ b/lib/libc_r/uthread/uthread_attr_setschedparam.c
@@ -45,6 +45,10 @@ pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param
ret = EINVAL;
else if (param == NULL) {
ret = ENOTSUP;
+ } else if ((param->sched_priority < PTHREAD_MIN_PRIORITY) ||
+ (param->sched_priority > PTHREAD_MAX_PRIORITY)) {
+ /* Return an unsupported value error. */
+ ret = ENOTSUP;
} else
(*attr)->prio = param->sched_priority;
diff --git a/lib/libc_r/uthread/uthread_cond.c b/lib/libc_r/uthread/uthread_cond.c
index 0a5298bcfcc0..50cf92765954 100644
--- a/lib/libc_r/uthread/uthread_cond.c
+++ b/lib/libc_r/uthread/uthread_cond.c
@@ -170,10 +170,7 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
* perform the dynamic initialization:
*/
else if (*cond != NULL ||
- (rval = pthread_cond_init(cond,NULL)) == 0) {
-
- _thread_enter_cancellation_point();
-
+ (rval = pthread_cond_init(cond, NULL)) == 0) {
/* Lock the condition variable structure: */
_SPINLOCK(&(*cond)->lock);
@@ -286,8 +283,6 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
if (_thread_run->continuation != NULL)
_thread_run->continuation((void *) _thread_run);
}
-
- _thread_leave_cancellation_point();
}
_thread_leave_cancellation_point();
@@ -313,8 +308,6 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
* initialization.
*/
else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
- _thread_enter_cancellation_point();
-
/* Lock the condition variable structure: */
_SPINLOCK(&(*cond)->lock);
@@ -446,8 +439,6 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
if (_thread_run->continuation != NULL)
_thread_run->continuation((void *) _thread_run);
}
-
- _thread_leave_cancellation_point();
}
_thread_leave_cancellation_point();
@@ -589,6 +580,48 @@ pthread_cond_broadcast(pthread_cond_t * cond)
return (rval);
}
+void
+_cond_wait_backout(pthread_t pthread)
+{
+ pthread_cond_t cond;
+
+ cond = pthread->data.cond;
+ if (cond != NULL) {
+ /*
+ * Defer signals to protect the scheduling queues
+ * from access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
+ /* Lock the condition variable structure: */
+ _SPINLOCK(&cond->lock);
+
+ /* Process according to condition variable type: */
+ switch (cond->c_type) {
+ /* Fast condition variable: */
+ case COND_TYPE_FAST:
+ cond_queue_remove(cond, pthread);
+
+ /* Check for no more waiters: */
+ if (TAILQ_FIRST(&cond->c_queue) == NULL)
+ cond->c_mutex = NULL;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Unlock the condition variable structure: */
+ _SPINUNLOCK(&cond->lock);
+
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary:
+ */
+ _thread_kern_sig_undefer();
+ }
+}
+
/*
* Dequeue a waiting thread from the head of a condition queue in
* descending priority order.
@@ -599,7 +632,7 @@ cond_queue_deq(pthread_cond_t cond)
pthread_t pthread;
while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
- TAILQ_REMOVE(&cond->c_queue, pthread, qe);
+ TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
if ((pthread->timeout == 0) && (pthread->interrupted == 0))
/*
@@ -628,7 +661,7 @@ cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
* it isn't in the queue.
*/
if (pthread->flags & PTHREAD_FLAGS_IN_CONDQ) {
- TAILQ_REMOVE(&cond->c_queue, pthread, qe);
+ TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
}
}
@@ -642,19 +675,22 @@ cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
{
pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head);
+ PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
+
/*
* For the common case of all threads having equal priority,
* we perform a quick check against the priority of the thread
* at the tail of the queue.
*/
if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
- TAILQ_INSERT_TAIL(&cond->c_queue, pthread, qe);
+ TAILQ_INSERT_TAIL(&cond->c_queue, pthread, sqe);
else {
tid = TAILQ_FIRST(&cond->c_queue);
while (pthread->active_priority <= tid->active_priority)
- tid = TAILQ_NEXT(tid, qe);
- TAILQ_INSERT_BEFORE(tid, pthread, qe);
+ tid = TAILQ_NEXT(tid, sqe);
+ TAILQ_INSERT_BEFORE(tid, pthread, sqe);
}
pthread->flags |= PTHREAD_FLAGS_IN_CONDQ;
+ pthread->data.cond = cond;
}
#endif
diff --git a/lib/libc_r/uthread/uthread_create.c b/lib/libc_r/uthread/uthread_create.c
index 3a80611dc145..0390f1be7205 100644
--- a/lib/libc_r/uthread/uthread_create.c
+++ b/lib/libc_r/uthread/uthread_create.c
@@ -49,17 +49,24 @@
static u_int64_t next_uniqueid = 1;
#define OFF(f) offsetof(struct pthread, f)
+#define SIGFRAME_OFF(f) offsetof(struct pthread_signal_frame, f)
int _thread_next_offset = OFF(tle.tqe_next);
int _thread_uniqueid_offset = OFF(uniqueid);
int _thread_state_offset = OFF(state);
int _thread_name_offset = OFF(name);
-int _thread_sig_saved_offset = OFF(sig_saved);
-int _thread_saved_sigcontext_offset = OFF(saved_sigcontext);
-int _thread_saved_jmp_buf_offset = OFF(saved_jmp_buf);
+int _thread_curframe_offset = OFF(curframe);
+int _thread_sigframe_ctx_offset = SIGFRAME_OFF(ctx);
+int _thread_sigframe_ctxtype_offset = SIGFRAME_OFF(ctxtype);
#undef OFF
+#undef SIGFRAME_OFF
int _thread_PS_RUNNING_value = PS_RUNNING;
int _thread_PS_DEAD_value = PS_DEAD;
+int _thread_CTX_JB_NOSIG_value = CTX_JB_NOSIG;
+int _thread_CTX_JB_value = CTX_JB;
+int _thread_CTX_SJB_value = CTX_SJB;
+int _thread_CTX_UC_value = CTX_UC;
+int _thread_sigframe_size_value = sizeof(struct pthread_signal_frame);
int
pthread_create(pthread_t * thread, const pthread_attr_t * attr,
@@ -162,7 +169,6 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/* Initialise the thread structure: */
memset(new_thread, 0, sizeof(struct pthread));
new_thread->slice_usec = -1;
- new_thread->sig_saved = 0;
new_thread->stack = stack;
new_thread->start_routine = start_routine;
new_thread->arg = arg;
@@ -179,62 +185,32 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/* Initialise the thread for signals: */
new_thread->sigmask = _thread_run->sigmask;
+ /* Initialize the first signal frame: */
+ new_thread->sigframes[0] = &new_thread->sigframe0;
+ new_thread->curframe = &new_thread->sigframe0;
+
/* Initialise the jump buffer: */
- setjmp(new_thread->saved_jmp_buf);
+ _setjmp(new_thread->curframe->ctx.jb);
/*
* Set up new stack frame so that it looks like it
* returned from a longjmp() to the beginning of
* _thread_start().
*/
-#if defined(__FreeBSD__)
-#if defined(__alpha__)
- new_thread->saved_jmp_buf[0]._jb[2] =
- (long)_thread_start;
- new_thread->saved_jmp_buf[0]._jb[4 + R_RA] =
- 0;
- new_thread->saved_jmp_buf[0]._jb[4 + R_T12] =
- (long)_thread_start;
-#else
- new_thread->saved_jmp_buf[0]._jb[0] =
- (long)_thread_start;
-#endif
-#elif defined(__NetBSD__)
-#if defined(__alpha__)
- new_thread->saved_jmp_buf[2] = (long)_thread_start;
- new_thread->saved_jmp_buf[4 + R_RA] = 0;
- new_thread->saved_jmp_buf[4 + R_T12] =
- (long)_thread_start;
-#else
- new_thread->saved_jmp_buf[0] = (long)_thread_start;
-#endif
-#else
-#error "Don't recognize this operating system!"
-#endif
+ SET_RETURN_ADDR_JB(new_thread->curframe->ctx.jb,
+ _thread_start);
/* The stack starts high and builds down: */
-#if defined(__FreeBSD__)
-#if defined(__alpha__)
- new_thread->saved_jmp_buf[0]._jb[4 + R_SP] =
- (long)new_thread->stack + pattr->stacksize_attr
- - sizeof(double);
-#else
- new_thread->saved_jmp_buf[0]._jb[2] =
- (int)(new_thread->stack + pattr->stacksize_attr -
- sizeof(double));
-#endif
-#elif defined(__NetBSD__)
-#if defined(__alpha__)
- new_thread->saved_jmp_buf[4 + R_SP] =
- (long)new_thread->stack + pattr->stacksize_attr -
- sizeof(double);
-#else
- new_thread->saved_jmp_buf[2] = (long)new_thread->stack
- + pattr->stacksize_attr - sizeof(double);
-#endif
-#else
-#error "Don't recognize this operating system!"
-#endif
+ SET_STACK_JB(new_thread->curframe->ctx.jb,
+ (long)new_thread->stack + pattr->stacksize_attr
+ - sizeof(double));
+
+ /* Initialize the rest of the frame: */
+ new_thread->curframe->ctxtype = CTX_JB_NOSIG;
+ /* Set the base of the stack: */
+ new_thread->curframe->stackp =
+ GET_STACK_JB(new_thread->curframe->ctx.jb);
+ new_thread->sigframe_count = 0;
/* Copy the thread attributes: */
memcpy(&new_thread->attr, pattr, sizeof(struct pthread_attr));
@@ -245,20 +221,22 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
*/
if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
/* Copy the scheduling attributes: */
- new_thread->base_priority
- = _thread_run->base_priority;
- new_thread->attr.prio
- = _thread_run->base_priority;
- new_thread->attr.sched_policy
- = _thread_run->attr.sched_policy;
+ new_thread->base_priority =
+ _thread_run->base_priority &
+ ~PTHREAD_SIGNAL_PRIORITY;
+ new_thread->attr.prio =
+ _thread_run->base_priority &
+ ~PTHREAD_SIGNAL_PRIORITY;
+ new_thread->attr.sched_policy =
+ _thread_run->attr.sched_policy;
} else {
/*
* Use just the thread priority, leaving the
* other scheduling attributes as their
* default values:
*/
- new_thread->base_priority
- = new_thread->attr.prio;
+ new_thread->base_priority =
+ new_thread->attr.prio;
}
new_thread->active_priority = new_thread->base_priority;
new_thread->inherited_priority = 0;
@@ -275,7 +253,6 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
new_thread->flags = 0;
new_thread->poll_data.nfds = 0;
new_thread->poll_data.fds = NULL;
- new_thread->jmpflags = 0;
new_thread->continuation = NULL;
/*
@@ -317,7 +294,6 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/* Schedule the new user thread: */
_thread_kern_sched(NULL);
-
/*
* Start a garbage collector thread
* if necessary.
@@ -325,6 +301,7 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
if (f_gc && pthread_create(&gc_thread,NULL,
_thread_gc,NULL) != 0)
PANIC("Can't create gc thread");
+
}
}
diff --git a/lib/libc_r/uthread/uthread_detach.c b/lib/libc_r/uthread/uthread_detach.c
index 89cc6671179c..3bade9d5b244 100644
--- a/lib/libc_r/uthread/uthread_detach.c
+++ b/lib/libc_r/uthread/uthread_detach.c
@@ -61,9 +61,10 @@ pthread_detach(pthread_t pthread)
/* Enter a loop to bring all threads off the join queue: */
while ((next_thread = TAILQ_FIRST(&pthread->join_queue)) != NULL) {
/* Remove the thread from the queue: */
- TAILQ_REMOVE(&pthread->join_queue, next_thread, qe);
+ TAILQ_REMOVE(&pthread->join_queue, next_thread, sqe);
+ pthread->flags &= ~PTHREAD_FLAGS_IN_JOINQ;
- /* Make the thread run: */
+ /* Make the thread runnable: */
PTHREAD_NEW_STATE(next_thread,PS_RUNNING);
}
diff --git a/lib/libc_r/uthread/uthread_execve.c b/lib/libc_r/uthread/uthread_execve.c
index 37d6e2cfe640..ed1906c54bd2 100644
--- a/lib/libc_r/uthread/uthread_execve.c
+++ b/lib/libc_r/uthread/uthread_execve.c
@@ -93,9 +93,6 @@ _execve(const char *name, char *const * argv, char *const * envp)
act.sa_mask = _thread_sigact[i - 1].sa_mask;
act.sa_flags = _thread_sigact[i - 1].sa_flags;
- /* Ensure the scheduling signal is masked: */
- sigaddset(&act.sa_mask, _SCHED_SIGNAL);
-
/* Change the signal action for the process: */
_thread_sys_sigaction(i, &act, &oact);
}
diff --git a/lib/libc_r/uthread/uthread_exit.c b/lib/libc_r/uthread/uthread_exit.c
index 0d22638fd7fc..7fbeb656192f 100644
--- a/lib/libc_r/uthread/uthread_exit.c
+++ b/lib/libc_r/uthread/uthread_exit.c
@@ -41,6 +41,9 @@
#include <pthread.h>
#include "pthread_private.h"
+#define FLAGS_IN_SCHEDQ \
+ (PTHREAD_FLAGS_IN_PRIOQ|PTHREAD_FLAGS_IN_WAITQ|PTHREAD_FLAGS_IN_WORKQ)
+
void __exit(int status)
{
int flags;
@@ -138,7 +141,7 @@ _thread_exit_cleanup(void)
void
pthread_exit(void *status)
{
- pthread_t pthread;
+ int frame;
/* Check if this thread is already in the process of exiting: */
if ((_thread_run->flags & PTHREAD_EXITING) != 0) {
@@ -172,25 +175,24 @@ pthread_exit(void *status)
_thread_run->poll_data.fds = NULL;
}
- /*
- * Defer signals to protect the scheduling queues from access
- * by the signal handler:
- */
- _thread_kern_sig_defer();
-
- /* Check if there are any threads joined to this one: */
- while ((pthread = TAILQ_FIRST(&(_thread_run->join_queue))) != NULL) {
- /* Remove the thread from the queue: */
- TAILQ_REMOVE(&_thread_run->join_queue, pthread, qe);
-
- /* Wake the joined thread and let it detach this thread: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ if ((frame = _thread_run->sigframe_count) == 0)
+ _thread_exit_finish();
+ else {
+ /*
+ * Jump back and unwind the signal frames to gracefully
+ * cleanup.
+ */
+ ___longjmp(*_thread_run->sigframes[frame]->sig_jb, 1);
}
- /*
- * Undefer and handle pending signals, yielding if necessary:
- */
- _thread_kern_sig_undefer();
+ /* This point should not be reached. */
+ PANIC("Dead thread has resumed");
+}
+
+void
+_thread_exit_finish(void)
+{
+ pthread_t pthread;
/*
* Lock the garbage collector mutex to ensure that the garbage
@@ -203,20 +205,6 @@ pthread_exit(void *status)
TAILQ_INSERT_HEAD(&_dead_list, _thread_run, dle);
/*
- * Defer signals to protect the scheduling queues from access
- * by the signal handler:
- */
- _thread_kern_sig_defer();
-
- /* Remove this thread from the thread list: */
- TAILQ_REMOVE(&_thread_list, _thread_run, tle);
-
- /*
- * Undefer and handle pending signals, yielding if necessary:
- */
- _thread_kern_sig_undefer();
-
- /*
* Signal the garbage collector thread that there is something
* to clean up.
*/
@@ -224,19 +212,33 @@ pthread_exit(void *status)
PANIC("Cannot signal gc cond");
/*
- * Mark the thread as dead so it will not return if it
- * gets context switched out when the mutex is unlocked.
+ * Avoid a race condition where a scheduling signal can occur
+ * causing the garbage collector thread to run. If this happens,
+ * the current thread can be cleaned out from under us.
*/
- PTHREAD_SET_STATE(_thread_run, PS_DEAD);
+ _thread_kern_sig_defer();
/* Unlock the garbage collector mutex: */
if (pthread_mutex_unlock(&_gc_mutex) != 0)
PANIC("Cannot lock gc mutex");
- /* This this thread will never be re-scheduled. */
- _thread_kern_sched(NULL);
+ /* Check if there are any threads joined to this one: */
+ while ((pthread = TAILQ_FIRST(&(_thread_run->join_queue))) != NULL) {
+ /* Remove the thread from the queue: */
+ TAILQ_REMOVE(&_thread_run->join_queue, pthread, sqe);
+ pthread->flags &= ~PTHREAD_FLAGS_IN_JOINQ;
+
+ /*
+ * Wake the joined thread and let it
+ * detach this thread:
+ */
+ PTHREAD_NEW_STATE(pthread, PS_RUNNING);
+ }
- /* This point should not be reached. */
- PANIC("Dead thread has resumed");
+ /* Remove this thread from the thread list: */
+ TAILQ_REMOVE(&_thread_list, _thread_run, tle);
+
+ /* This thread will never be re-scheduled. */
+ _thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__);
}
#endif
diff --git a/lib/libc_r/uthread/uthread_fd.c b/lib/libc_r/uthread/uthread_fd.c
index 77ccad1e4e57..f609ae0942b7 100644
--- a/lib/libc_r/uthread/uthread_fd.c
+++ b/lib/libc_r/uthread/uthread_fd.c
@@ -295,9 +295,6 @@ _thread_fd_unlock(int fd, int lock_type)
*/
_thread_kern_sig_undefer();
}
-
- /* Nothing to return. */
- return;
}
int
@@ -326,7 +323,8 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
* Wait for the file descriptor to be locked
* for read for the current thread:
*/
- if (_thread_fd_table[fd]->r_owner != _thread_run) {
+ while ((_thread_fd_table[fd]->r_owner != _thread_run) &&
+ (_thread_run->interrupted == 0)) {
/*
* Check if the file descriptor is locked by
* another thread:
@@ -404,7 +402,8 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
* Wait for the file descriptor to be locked
* for write for the current thread:
*/
- if (_thread_fd_table[fd]->w_owner != _thread_run) {
+ while ((_thread_fd_table[fd]->w_owner != _thread_run) &&
+ (_thread_run->interrupted == 0)) {
/*
* Check if the file descriptor is locked by
* another thread:
@@ -608,9 +607,6 @@ _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno)
*/
_thread_kern_sig_undefer();
}
-
- /* Nothing to return. */
- return;
}
int
@@ -640,7 +636,8 @@ _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
* Wait for the file descriptor to be locked
* for read for the current thread:
*/
- if (_thread_fd_table[fd]->r_owner != _thread_run) {
+ while ((_thread_fd_table[fd]->r_owner != _thread_run) &&
+ (_thread_run->interrupted == 0)) {
/*
* Check if the file descriptor is locked by
* another thread:
@@ -727,7 +724,8 @@ _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
* Wait for the file descriptor to be locked
* for write for the current thread:
*/
- if (_thread_fd_table[fd]->w_owner != _thread_run) {
+ while ((_thread_fd_table[fd]->w_owner != _thread_run) &&
+ (_thread_run->interrupted == 0)) {
/*
* Check if the file descriptor is locked by
* another thread:
@@ -902,6 +900,58 @@ _thread_fd_unlock_owned(pthread_t pthread)
}
}
+void
+_fd_lock_backout(pthread_t pthread)
+{
+ int fd;
+
+ /*
+ * Defer signals to protect the scheduling queues
+ * from access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
+ switch (pthread->state) {
+
+ case PS_FDLR_WAIT:
+ fd = pthread->data.fd.fd;
+
+ /*
+ * Lock the file descriptor table entry to prevent
+ * other threads for clashing with the current
+ * thread's accesses:
+ */
+ _SPINLOCK(&_thread_fd_table[fd]->lock);
+
+ /* Remove the thread from the waiting queue: */
+ FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, pthread);
+ break;
+
+ case PS_FDLW_WAIT:
+ fd = pthread->data.fd.fd;
+
+ /*
+ * Lock the file descriptor table entry to prevent
+ * other threads from clashing with the current
+ * thread's accesses:
+ */
+ _SPINLOCK(&_thread_fd_table[fd]->lock);
+
+ /* Remove the thread from the waiting queue: */
+ FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, pthread);
+ break;
+
+ default:
+ break;
+ }
+
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary.
+ */
+ _thread_kern_sig_undefer();
+}
+
static inline pthread_t
fd_next_reader(int fd)
{
diff --git a/lib/libc_r/uthread/uthread_file.c b/lib/libc_r/uthread/uthread_file.c
index 88b6a8fd083f..8f1afcda250b 100644
--- a/lib/libc_r/uthread/uthread_file.c
+++ b/lib/libc_r/uthread/uthread_file.c
@@ -245,6 +245,8 @@ _flockfile_debug(FILE * fp, char *fname, int lineno)
/* Unlock the hash table: */
_SPINUNLOCK(&hash_lock);
+ _thread_run->data.fp = fp;
+
/* Wait on the FILE lock: */
_thread_kern_sched_state(PS_FILE_WAIT, fname, lineno);
@@ -260,14 +262,12 @@ _flockfile_debug(FILE * fp, char *fname, int lineno)
_thread_run->continuation((void *)_thread_run);
}
}
- return;
}
void
_flockfile(FILE * fp)
{
_flockfile_debug(fp, __FILE__, __LINE__);
- return;
}
int
@@ -398,7 +398,6 @@ _funlockfile(FILE * fp)
*/
_thread_kern_sig_undefer();
}
- return;
}
void
@@ -469,4 +468,39 @@ _funlock_owned(pthread_t pthread)
_thread_kern_sig_undefer();
}
+void
+_flockfile_backout(pthread_t pthread)
+{
+ int idx = file_idx(pthread->data.fp);
+ struct file_lock *p;
+
+ /*
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
+ /*
+ * Get a pointer to the lock for the file and check that
+ * the running thread is the one with the lock:
+ */
+ if (((pthread->flags & PTHREAD_FLAGS_IN_FILEQ) != 0) &&
+ ((p = find_lock(idx, pthread->data.fp)) != NULL)) {
+ /* Lock the hash table: */
+ _SPINLOCK(&hash_lock);
+
+ /* Remove the thread from the queue: */
+ TAILQ_REMOVE(&p->l_head, pthread, qe);
+ pthread->flags &= ~PTHREAD_FLAGS_IN_FILEQ;
+
+ /* Unlock the hash table: */
+ _SPINUNLOCK(&hash_lock);
+ }
+
+ /*
+ * Undefer and handle pending signals, yielding if necessary:
+ */
+ _thread_kern_sig_undefer();
+}
+
#endif
diff --git a/lib/libc_r/uthread/uthread_fork.c b/lib/libc_r/uthread/uthread_fork.c
index a8e85d86e18f..97039b3ef804 100644
--- a/lib/libc_r/uthread/uthread_fork.c
+++ b/lib/libc_r/uthread/uthread_fork.c
@@ -183,9 +183,6 @@ _fork(void)
/* Don't queue signals yet: */
_queue_signals = 0;
- /* Initialize signal handling: */
- _thread_sig_init();
-
/* Initialize the scheduling switch hook routine: */
_sched_switch_hook = NULL;
diff --git a/lib/libc_r/uthread/uthread_gc.c b/lib/libc_r/uthread/uthread_gc.c
index cd1f8383a33d..a81ea9161912 100644
--- a/lib/libc_r/uthread/uthread_gc.c
+++ b/lib/libc_r/uthread/uthread_gc.c
@@ -57,8 +57,8 @@ _thread_gc(pthread_addr_t arg)
void *p_stack;
/* Block all signals */
- sigfillset (&mask);
- sigprocmask (SIG_BLOCK, &mask, NULL);
+ sigfillset(&mask);
+ pthread_sigmask(SIG_BLOCK, &mask, NULL);
/* Mark this thread as a library thread (not a user thread). */
_thread_run->flags |= PTHREAD_FLAGS_PRIVATE;
diff --git a/lib/libc_r/uthread/uthread_getschedparam.c b/lib/libc_r/uthread/uthread_getschedparam.c
index 09d8c1bc3283..5dbd1f0e83fb 100644
--- a/lib/libc_r/uthread/uthread_getschedparam.c
+++ b/lib/libc_r/uthread/uthread_getschedparam.c
@@ -49,7 +49,8 @@ pthread_getschedparam(pthread_t pthread, int *policy,
/* Find the thread in the list of active threads: */
else if ((ret = _find_thread(pthread)) == 0) {
/* Return the threads base priority and scheduling policy: */
- param->sched_priority = pthread->base_priority;
+ param->sched_priority =
+ PTHREAD_BASE_PRIORITY(pthread->base_priority);
*policy = pthread->attr.sched_policy;
}
diff --git a/lib/libc_r/uthread/uthread_info.c b/lib/libc_r/uthread/uthread_info.c
index d091ec12113c..ca91512edb5f 100644
--- a/lib/libc_r/uthread/uthread_info.c
+++ b/lib/libc_r/uthread/uthread_info.c
@@ -32,6 +32,7 @@
* $FreeBSD$
*/
#include <stdio.h>
+#include <stdlib.h>
#include <fcntl.h>
#include <string.h>
#include <unistd.h>
@@ -296,7 +297,6 @@ _thread_dump_info(void)
/* Close the dump file: */
_thread_sys_close(fd);
}
- return;
}
/* Set the thread name for debug: */
diff --git a/lib/libc_r/uthread/uthread_init.c b/lib/libc_r/uthread/uthread_init.c
index 8e13f90dc993..3cbd453d3e27 100644
--- a/lib/libc_r/uthread/uthread_init.c
+++ b/lib/libc_r/uthread/uthread_init.c
@@ -90,9 +90,9 @@ _thread_init(void)
int i;
size_t len;
int mib[2];
- struct timeval tv;
struct clockinfo clockinfo;
struct sigaction act;
+ struct itimerval itimer;
/* Check if this function has already been called: */
if (_thread_initial)
@@ -160,7 +160,7 @@ _thread_init(void)
PANIC("Cannot get kernel write pipe flags");
}
/* Allocate and initialize the ready queue: */
- else if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_MAX_PRIORITY) != 0) {
+ else if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_LAST_PRIORITY) != 0) {
/* Abort this application: */
PANIC("Cannot allocate priority ready queue.");
}
@@ -171,7 +171,11 @@ _thread_init(void)
* abort:
*/
PANIC("Cannot allocate memory for initial thread");
- } else {
+ }
+ /* Allocate memory for the scheduler stack: */
+ else if ((_thread_kern_sched_stack = malloc(PAGE_SIZE * 10)) == NULL)
+ PANIC("Failed to allocate stack for scheduler");
+ else {
/* Zero the global kernel thread structure: */
memset(&_thread_kern_thread, 0, sizeof(struct pthread));
_thread_kern_thread.flags = PTHREAD_FLAGS_PRIVATE;
@@ -211,6 +215,12 @@ _thread_init(void)
_thread_initial->attr.stackaddr_attr = _thread_initial->stack;
_thread_initial->attr.stacksize_attr = PTHREAD_STACK_INITIAL;
+ /* Setup the context for the scheduler: */
+ _setjmp(_thread_kern_sched_jb);
+ SET_STACK_JB(_thread_kern_sched_jb,
+ _thread_kern_sched_stack + PAGE_SIZE*10 - sizeof(double));
+ SET_RETURN_ADDR_JB(_thread_kern_sched_jb, _thread_kern_scheduler);
+
/*
* Write a magic value to the thread structure
* to help identify valid ones:
@@ -236,10 +246,19 @@ _thread_init(void)
TAILQ_INIT(&(_thread_initial->mutexq));
_thread_initial->priority_mutex_count = 0;
- /* Initialize last active time to now: */
- gettimeofday(&tv, NULL);
- _thread_initial->last_active.tv_sec = tv.tv_sec;
- _thread_initial->last_active.tv_usec = tv.tv_usec;
+ /* Initialize the global scheduling time: */
+ _sched_ticks = 0;
+ gettimeofday((struct timeval *) &_sched_tod, NULL);
+
+ /* Initialize last active: */
+ _thread_initial->last_active = (long) _sched_ticks;
+
+ /* Initialize the initial signal frame: */
+ _thread_initial->sigframes[0] = &_thread_initial->sigframe0;
+ _thread_initial->curframe = &_thread_initial->sigframe0;
+ _thread_initial->curframe->ctxtype = CTX_JB_NOSIG;
+ /* Set the base of the stack: */
+ _thread_initial->curframe->stackp = (unsigned long) USRSTACK;
/* Initialise the rest of the fields: */
_thread_initial->poll_data.nfds = 0;
@@ -257,10 +276,13 @@ _thread_init(void)
/* Initialise the global signal action structure: */
sigfillset(&act.sa_mask);
act.sa_handler = (void (*) ()) _thread_sig_handler;
- act.sa_flags = 0;
+ act.sa_flags = SA_SIGINFO;
+
+ /* Clear pending signals for the process: */
+ sigemptyset(&_process_sigpending);
- /* Initialize signal handling: */
- _thread_sig_init();
+ /* Clear the signal queue: */
+ memset(_thread_sigq, 0, sizeof(_thread_sigq));
/* Enter a loop to get the existing signal status: */
for (i = 1; i < NSIG; i++) {
@@ -295,13 +317,19 @@ _thread_init(void)
*/
PANIC("Cannot initialise signal handler");
}
+ _thread_sigact[_SCHED_SIGNAL - 1].sa_flags = SA_SIGINFO;
+ _thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO;
+ _thread_sigact[SIGCHLD - 1].sa_flags = SA_SIGINFO;
+
+ /* Get the process signal mask: */
+ _thread_sys_sigprocmask(SIG_SETMASK, NULL, &_process_sigmask);
/* Get the kernel clockrate: */
mib[0] = CTL_KERN;
mib[1] = KERN_CLOCKRATE;
len = sizeof (struct clockinfo);
if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0)
- _clock_res_nsec = clockinfo.tick * 1000;
+ _clock_res_usec = clockinfo.tick;
/* Get the table size: */
if ((_thread_dtablesize = getdtablesize()) < 0) {
@@ -346,6 +374,14 @@ _thread_init(void)
PANIC("Cannot initialize stdio file "
"descriptor table entry");
}
+
+ /* Install the scheduling timer: */
+ itimer.it_interval.tv_sec = 0;
+ itimer.it_interval.tv_usec = _clock_res_usec;
+ itimer.it_value = itimer.it_interval;
+ if (setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL) != 0)
+ PANIC("Cannot set interval timer");
+
}
}
@@ -362,10 +398,6 @@ _thread_init(void)
if (pthread_mutex_init(&_gc_mutex,NULL) != 0 ||
pthread_cond_init(&_gc_cond,NULL) != 0)
PANIC("Failed to initialise garbage collector mutex or condvar");
-
- gettimeofday(&kern_inc_prio_time, NULL);
-
- return;
}
/*
diff --git a/lib/libc_r/uthread/uthread_jmp.c b/lib/libc_r/uthread/uthread_jmp.c
index 8ab68892b96b..f2c38c2aac1e 100644
--- a/lib/libc_r/uthread/uthread_jmp.c
+++ b/lib/libc_r/uthread/uthread_jmp.c
@@ -40,39 +40,24 @@
#include <pthread.h>
#include "pthread_private.h"
-/*
- * Offset into the jmp_buf. This is highly machine-dependent, but is a
- * necessary evil in order to compare stack pointers and make decisions based on
- * where a *longjmp() is jumping to.
- */
-#if defined(__i386__)
-#define JMP_BUF_SP_OFFSET 2
-#elif defined(__alpha)
-#define JMP_BUF_SP_OFFSET (4 + R_SP)
-#else
-#error "Don't recognize this architecture!"
-#endif
-
void
siglongjmp(sigjmp_buf env, int savemask)
{
void *jmp_stackp;
void *stack_begin, *stack_end;
+ int frame, dst_frame;
- if (_thread_run->signal_nest_level == 0)
+ if ((frame = _thread_run->sigframe_count) == 0)
__siglongjmp(env, savemask);
/* Get the stack pointer from the jump buffer. */
- jmp_stackp = (void *)env->_sjb[JMP_BUF_SP_OFFSET];
+ jmp_stackp = (void *) GET_STACK_SJB(env);
/* Get the bounds of the current threads stack. */
- if (_thread_run->stack != NULL) {
- stack_begin = _thread_run->stack;
- stack_end = stack_begin + _thread_run->attr.stacksize_attr;
- } else {
- stack_end = (void *)USRSTACK;
- stack_begin = stack_end - PTHREAD_STACK_INITIAL;
- }
+ PTHREAD_ASSERT(_thread_run->stack != NULL,
+ "Thread stack pointer is null");
+ stack_begin = _thread_run->stack;
+ stack_end = stack_begin + _thread_run->attr.stacksize_attr;
/*
* Make sure we aren't jumping to a different stack. Make sure
@@ -84,19 +69,28 @@ siglongjmp(sigjmp_buf env, int savemask)
PANIC("siglongjmp()ing between thread contexts is undefined by "
"POSIX 1003.1");
- memcpy(_thread_run->nested_jmp.sigjmp, env,
- sizeof(_thread_run->nested_jmp.sigjmp));
-
+ if ((dst_frame = _thread_sigframe_find(_thread_run, jmp_stackp)) < 0)
+ /*
+ * The stack pointer was verified above, so this
+ * shouldn't happen. Let's be anal anyways.
+ */
+ PANIC("Error locating signal frame");
+ else if (dst_frame == frame) {
+ /*
+ * The stack pointer is somewhere within the current
+ * frame. Jump to the users context.
+ */
+ __siglongjmp(env, savemask);
+ }
/*
- * Only save oldstate once so that dispatching multiple signals will not
- * lose the thread's original state.
+ * Copy the users context to the return context of the
+ * destination frame.
*/
- if (_thread_run->jmpflags == JMPFLAGS_NONE)
- _thread_run->oldstate = _thread_run->state;
- PTHREAD_SET_STATE(_thread_run, PS_RUNNING);
- _thread_run->jmpflags = JMPFLAGS_SIGLONGJMP;
- _thread_run->longjmp_val = savemask;
- ___longjmp(*_thread_run->sighandler_jmp_buf, 1);
+ memcpy(&_thread_run->sigframes[dst_frame]->ctx.sigjb, env, sizeof(*env));
+ _thread_run->sigframes[dst_frame]->ctxtype = CTX_SJB;
+ _thread_run->sigframes[dst_frame]->longjmp_val = savemask;
+ _thread_run->curframe->dst_frame = dst_frame;
+ ___longjmp(*_thread_run->curframe->sig_jb, 1);
}
void
@@ -104,21 +98,19 @@ longjmp(jmp_buf env, int val)
{
void *jmp_stackp;
void *stack_begin, *stack_end;
+ int frame, dst_frame;
- if (_thread_run->signal_nest_level == 0)
+ if ((frame = _thread_run->sigframe_count) == 0)
__longjmp(env, val);
/* Get the stack pointer from the jump buffer. */
- jmp_stackp = (void *)env->_jb[JMP_BUF_SP_OFFSET];
+ jmp_stackp = (void *) GET_STACK_JB(env);
/* Get the bounds of the current threads stack. */
- if (_thread_run->stack != NULL) {
- stack_begin = _thread_run->stack;
- stack_end = stack_begin + _thread_run->attr.stacksize_attr;
- } else {
- stack_end = (void *)USRSTACK;
- stack_begin = stack_end - PTHREAD_STACK_INITIAL;
- }
+ PTHREAD_ASSERT(_thread_run->stack != NULL,
+ "Thread stack pointer is null");
+ stack_begin = _thread_run->stack;
+ stack_end = stack_begin + _thread_run->attr.stacksize_attr;
/*
* Make sure we aren't jumping to a different stack. Make sure
@@ -130,19 +122,29 @@ longjmp(jmp_buf env, int val)
PANIC("longjmp()ing between thread contexts is undefined by "
"POSIX 1003.1");
- memcpy(_thread_run->nested_jmp.jmp, env,
- sizeof(_thread_run->nested_jmp.jmp));
+ if ((dst_frame = _thread_sigframe_find(_thread_run, jmp_stackp)) < 0)
+ /*
+ * The stack pointer was verified above, so this
+ * shouldn't happen. Let's be anal anyways.
+ */
+ PANIC("Error locating signal frame");
+ else if (dst_frame == frame) {
+ /*
+ * The stack pointer is somewhere within the current
+ * frame. Jump to the users context.
+ */
+ __longjmp(env, val);
+ }
/*
- * Only save oldstate once so that dispatching multiple signals will not
- * lose the thread's original state.
+ * Copy the users context to the return context of the
+ * destination frame.
*/
- if (_thread_run->jmpflags == JMPFLAGS_NONE)
- _thread_run->oldstate = _thread_run->state;
- PTHREAD_SET_STATE(_thread_run, PS_RUNNING);
- _thread_run->jmpflags = JMPFLAGS_LONGJMP;
- _thread_run->longjmp_val = val;
- ___longjmp(*_thread_run->sighandler_jmp_buf, 1);
+ memcpy(&_thread_run->sigframes[dst_frame]->ctx.jb, env, sizeof(*env));
+ _thread_run->sigframes[dst_frame]->ctxtype = CTX_JB;
+ _thread_run->sigframes[dst_frame]->longjmp_val = val;
+ _thread_run->curframe->dst_frame = dst_frame;
+ ___longjmp(*_thread_run->curframe->sig_jb, 1);
}
void
@@ -150,21 +152,19 @@ _longjmp(jmp_buf env, int val)
{
void *jmp_stackp;
void *stack_begin, *stack_end;
+ int frame, dst_frame;
- if (_thread_run->signal_nest_level == 0)
+ if ((frame = _thread_run->sigframe_count) == 0)
___longjmp(env, val);
/* Get the stack pointer from the jump buffer. */
- jmp_stackp = (void *)env->_jb[JMP_BUF_SP_OFFSET];
+ jmp_stackp = (void *) GET_STACK_JB(env);
/* Get the bounds of the current threads stack. */
- if (_thread_run->stack != NULL) {
- stack_begin = _thread_run->stack;
- stack_end = stack_begin + _thread_run->attr.stacksize_attr;
- } else {
- stack_end = (void *)USRSTACK;
- stack_begin = stack_end - PTHREAD_STACK_INITIAL;
- }
+ PTHREAD_ASSERT(_thread_run->stack != NULL,
+ "Thread stack pointer is null");
+ stack_begin = _thread_run->stack;
+ stack_end = stack_begin + _thread_run->attr.stacksize_attr;
/*
* Make sure we aren't jumping to a different stack. Make sure
@@ -176,18 +176,27 @@ _longjmp(jmp_buf env, int val)
PANIC("_longjmp()ing between thread contexts is undefined by "
"POSIX 1003.1");
- memcpy(_thread_run->nested_jmp.jmp, env,
- sizeof(_thread_run->nested_jmp.jmp));
-
+ if ((dst_frame = _thread_sigframe_find(_thread_run, jmp_stackp)) < 0)
+ /*
+ * The stack pointer was verified above, so this
+ * shouldn't happen. Let's be anal anyways.
+ */
+ PANIC("Error locating signal frame");
+ else if (dst_frame == frame) {
+ /*
+ * The stack pointer is somewhere within the current
+ * frame. Jump to the users context.
+ */
+ ___longjmp(env, val);
+ }
/*
- * Only save oldstate once so that dispatching multiple signals will not
- * lose the thread's original state.
+ * Copy the users context to the return context of the
+ * destination frame.
*/
- if (_thread_run->jmpflags == JMPFLAGS_NONE)
- _thread_run->oldstate = _thread_run->state;
- PTHREAD_SET_STATE(_thread_run, PS_RUNNING);
- _thread_run->jmpflags = JMPFLAGS__LONGJMP;
- _thread_run->longjmp_val = val;
- ___longjmp(*_thread_run->sighandler_jmp_buf, 1);
+ memcpy(&_thread_run->sigframes[dst_frame]->ctx.jb, env, sizeof(*env));
+ _thread_run->sigframes[dst_frame]->ctxtype = CTX_JB_NOSIG;
+ _thread_run->sigframes[dst_frame]->longjmp_val = val;
+ _thread_run->curframe->dst_frame = dst_frame;
+ ___longjmp(*_thread_run->curframe->sig_jb, 1);
}
#endif
diff --git a/lib/libc_r/uthread/uthread_join.c b/lib/libc_r/uthread/uthread_join.c
index 1cffc96eaea3..cda31bda36bd 100644
--- a/lib/libc_r/uthread/uthread_join.c
+++ b/lib/libc_r/uthread/uthread_join.c
@@ -40,7 +40,6 @@ int
pthread_join(pthread_t pthread, void **thread_return)
{
int ret = 0;
- pthread_t pthread1 = NULL;
_thread_enter_cancellation_point();
@@ -62,11 +61,7 @@ pthread_join(pthread_t pthread, void **thread_return)
* Find the thread in the list of active threads or in the
* list of dead threads:
*/
- if (_find_thread(pthread) == 0 ||
- _find_dead_thread(pthread) == 0)
- pthread1 = pthread;
-
- if (pthread1 == NULL)
+ if ((_find_thread(pthread) != 0) && (_find_dead_thread(pthread) != 0))
/* Return an error: */
ret = ESRCH;
@@ -77,6 +72,8 @@ pthread_join(pthread_t pthread, void **thread_return)
/* Check if the thread is not dead: */
else if (pthread->state != PS_DEAD) {
+ PTHREAD_ASSERT_NOT_IN_SYNCQ(_thread_run);
+
/* Clear the interrupted flag: */
_thread_run->interrupted = 0;
@@ -87,13 +84,18 @@ pthread_join(pthread_t pthread, void **thread_return)
_thread_kern_sig_defer();
/* Add the running thread to the join queue: */
- TAILQ_INSERT_TAIL(&(pthread->join_queue), _thread_run, qe);
+ TAILQ_INSERT_TAIL(&(pthread->join_queue), _thread_run, sqe);
+ _thread_run->flags |= PTHREAD_FLAGS_IN_JOINQ;
+ _thread_run->data.thread = pthread;
/* Schedule the next thread: */
_thread_kern_sched_state(PS_JOIN, __FILE__, __LINE__);
- if (_thread_run->interrupted != 0)
- TAILQ_REMOVE(&(pthread->join_queue), _thread_run, qe);
+ if (_thread_run->interrupted != 0) {
+ TAILQ_REMOVE(&(pthread->join_queue), _thread_run, sqe);
+ _thread_run->flags &= ~PTHREAD_FLAGS_IN_JOINQ;
+ }
+ _thread_run->data.thread = NULL;
_thread_kern_sig_undefer();
@@ -122,4 +124,15 @@ pthread_join(pthread_t pthread, void **thread_return)
/* Return the completion status: */
return (ret);
}
+
+void
+_join_backout(pthread_t pthread)
+{
+ _thread_kern_sig_defer();
+ if (pthread->state == PS_JOIN) {
+ TAILQ_REMOVE(&pthread->data.thread->join_queue, pthread, sqe);
+ _thread_run->flags &= ~PTHREAD_FLAGS_IN_JOINQ;
+ }
+ _thread_kern_sig_undefer();
+}
#endif
diff --git a/lib/libc_r/uthread/uthread_kern.c b/lib/libc_r/uthread/uthread_kern.c
index 96a11da92d3e..23f16bc8f953 100644
--- a/lib/libc_r/uthread/uthread_kern.c
+++ b/lib/libc_r/uthread/uthread_kern.c
@@ -52,9 +52,16 @@
#include <pthread.h>
#include "pthread_private.h"
+/* #define DEBUG_THREAD_KERN */
+#ifdef DEBUG_THREAD_KERN
+#define DBG_MSG stdout_debug
+#else
+#define DBG_MSG(x...)
+#endif
+
/* Static function prototype definitions: */
static void
-_thread_kern_poll(int wait_reqd);
+thread_kern_poll(int wait_reqd);
static void
dequeue_signals(void);
@@ -62,18 +69,39 @@ dequeue_signals(void);
static inline void
thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in);
+/* Static variables: */
+static int last_tick = 0;
+
+/*
+ * This is called when a signal handler finishes and wants to
+ * return to a previous frame.
+ */
void
-_thread_kern_sched(ucontext_t * scp)
+_thread_kern_sched_frame(int frame)
{
-#ifndef __alpha__
- char *fdata;
-#endif
- pthread_t pthread, pthread_h = NULL;
- struct itimerval itimer;
- struct timespec ts, ts1;
- struct timeval tv, tv1;
- int set_timer = 0;
+ /*
+ * Flag the pthread kernel as executing scheduler code
+ * to avoid a signal from interrupting this execution and
+ * corrupting the (soon-to-be) current frame.
+ */
+ _thread_kern_in_sched = 1;
+
+ /* Return to the specified frame: */
+ _thread_run->curframe = _thread_run->sigframes[frame];
+ _thread_run->sigframe_count = frame;
+
+ if (_thread_run->sigframe_count == 0)
+ /* Restore the threads priority: */
+ _thread_run->active_priority &= ~PTHREAD_SIGNAL_PRIORITY;
+ /* Switch to the thread scheduler: */
+ ___longjmp(_thread_kern_sched_jb, 1);
+}
+
+
+void
+_thread_kern_sched(ucontext_t *scp)
+{
/*
* Flag the pthread kernel as executing scheduler code
* to avoid a scheduler signal from interrupting this
@@ -84,67 +112,94 @@ _thread_kern_sched(ucontext_t * scp)
/* Check if this function was called from the signal handler: */
if (scp != NULL) {
/*
- * Copy the signal context to the current thread's jump
- * buffer:
+ * The signal handler should have saved the state of
+ * the current thread. Restore the process signal
+ * mask.
*/
- memcpy(&_thread_run->saved_sigcontext, scp, sizeof(_thread_run->saved_sigcontext));
-
-#ifndef __alpha__
- /* Point to the floating point data in the running thread: */
- fdata = _thread_run->saved_fp;
-
- /* Save the floating point data: */
-__asm__("fnsave %0": :"m"(*fdata));
-#endif
-
- /* Flag the signal context as the last state saved: */
- _thread_run->sig_saved = 1;
- }
- /* Save the state of the current thread: */
- else if (setjmp(_thread_run->saved_jmp_buf) != 0) {
+ if (_thread_sys_sigprocmask(SIG_SETMASK,
+ &_process_sigmask, NULL) != 0)
+ PANIC("Unable to restore process mask after signal");
/*
- * This point is reached when a longjmp() is called to
- * restore the state of a thread.
- *
- * This is the normal way out of the scheduler.
+ * We're running on the signal stack; just call the
+ * kernel scheduler directly.
*/
- _thread_kern_in_sched = 0;
-
- if (((_thread_run->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) &&
- ((_thread_run->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)) {
- /*
- * Cancellations override signals.
- *
- * Stick a cancellation point at the start of
- * each async-cancellable thread's resumption.
+ DBG_MSG("Entering scheduler due to signal\n");
+ _thread_kern_scheduler();
+ } else {
+ /* Save the state of the current thread: */
+ if (_setjmp(_thread_run->curframe->ctx.jb) == 0) {
+ /* Flag the jump buffer was the last state saved: */
+ _thread_run->curframe->ctxtype = CTX_JB_NOSIG;
+ _thread_run->curframe->longjmp_val = 1;
+ } else {
+ DBG_MSG("Returned from ___longjmp, thread %p\n",
+ _thread_run);
+ /*
+ * This point is reached when a longjmp() is called
+ * to restore the state of a thread.
*
- * We allow threads woken at cancel points to do their
- * own checks.
+ * This is the normal way out of the scheduler.
*/
- pthread_testcancel();
+ _thread_kern_in_sched = 0;
+
+ if (_thread_run->sig_defer_count == 0) {
+ if (((_thread_run->cancelflags &
+ PTHREAD_AT_CANCEL_POINT) == 0) &&
+ ((_thread_run->cancelflags &
+ PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
+ /*
+ * Cancellations override signals.
+ *
+ * Stick a cancellation point at the
+ * start of each async-cancellable
+ * thread's resumption.
+ *
+ * We allow threads woken at cancel
+ * points to do their own checks.
+ */
+ pthread_testcancel();
+ }
+
+ if (_sched_switch_hook != NULL) {
+ /* Run the installed switch hook: */
+ thread_run_switch_hook(_last_user_thread,
+ _thread_run);
+ }
+ return;
}
+ /* Switch to the thread scheduler: */
+ ___longjmp(_thread_kern_sched_jb, 1);
+ }
+}
- /*
- * Check for undispatched signals due to calls to
- * pthread_kill().
- */
- if (SIGNOTEMPTY(_thread_run->sigpend))
- _dispatch_signals();
+void
+_thread_kern_sched_sig(void)
+{
+ _thread_run->check_pending = 1;
+ _thread_kern_sched(NULL);
+}
- if (_sched_switch_hook != NULL) {
- /* Run the installed switch hook: */
- thread_run_switch_hook(_last_user_thread, _thread_run);
- }
- return;
- } else
- /* Flag the jump buffer was the last state saved: */
- _thread_run->sig_saved = 0;
+void
+_thread_kern_scheduler(void)
+{
+ struct pthread_signal_frame *psf;
+ struct timespec ts;
+ struct timeval tv;
+ pthread_t pthread, pthread_h;
+ unsigned int current_tick;
+ int add_to_prioq;
/* If the currently running thread is a user thread, save it: */
if ((_thread_run->flags & PTHREAD_FLAGS_PRIVATE) == 0)
_last_user_thread = _thread_run;
+ /* Are there pending signals for this thread? */
+ if (_thread_run->check_pending != 0) {
+ _thread_run->check_pending = 0;
+ _thread_sig_check_pending(_thread_run);
+ }
+
/*
* Enter a scheduling loop that finds the next thread that is
* ready to run. This loop completes when there are no more threads
@@ -154,29 +209,37 @@ __asm__("fnsave %0": :"m"(*fdata));
*/
while (!(TAILQ_EMPTY(&_thread_list))) {
/* Get the current time of day: */
- gettimeofday(&tv, NULL);
+ GET_CURRENT_TOD(tv);
TIMEVAL_TO_TIMESPEC(&tv, &ts);
+ current_tick = _sched_ticks;
/*
* Protect the scheduling queues from access by the signal
* handler.
*/
_queue_signals = 1;
+ add_to_prioq = 0;
if (_thread_run != &_thread_kern_thread) {
-
/*
* This thread no longer needs to yield the CPU.
*/
_thread_run->yield_on_sig_undefer = 0;
- /*
- * Save the current time as the time that the thread
- * became inactive:
- */
- _thread_run->last_inactive.tv_sec = tv.tv_sec;
- _thread_run->last_inactive.tv_usec = tv.tv_usec;
-
+ if (_thread_run->state != PS_RUNNING) {
+ /*
+ * Save the current time as the time that the
+ * thread became inactive:
+ */
+ _thread_run->last_inactive = (long)current_tick;
+ if (_thread_run->last_inactive <
+ _thread_run->last_active) {
+ /* Account for a rollover: */
+ _thread_run->last_inactive =+
+ UINT_MAX + 1;
+ }
+ }
+
/*
* Place the currently running thread into the
* appropriate queue(s).
@@ -198,22 +261,7 @@ __asm__("fnsave %0": :"m"(*fdata));
* are polled (to preserve round-robin
* scheduling).
*/
- if ((_thread_run->slice_usec != -1) &&
- (_thread_run->attr.sched_policy != SCHED_FIFO)) {
- /*
- * Accumulate the number of microseconds that
- * this thread has run for:
- */
- _thread_run->slice_usec +=
- (_thread_run->last_inactive.tv_sec -
- _thread_run->last_active.tv_sec) * 1000000 +
- _thread_run->last_inactive.tv_usec -
- _thread_run->last_active.tv_usec;
-
- /* Check for time quantum exceeded: */
- if (_thread_run->slice_usec > TIMESLICE_USEC)
- _thread_run->slice_usec = -1;
- }
+ add_to_prioq = 1;
break;
/*
@@ -260,7 +308,7 @@ __asm__("fnsave %0": :"m"(*fdata));
/* Increment spinblock count: */
_spinblock_count++;
- /* fall through */
+ /* FALLTHROUGH */
case PS_FDR_WAIT:
case PS_FDW_WAIT:
case PS_POLL_WAIT:
@@ -277,17 +325,26 @@ __asm__("fnsave %0": :"m"(*fdata));
}
}
- /* Unprotect the scheduling queues: */
- _queue_signals = 0;
-
/*
- * Poll file descriptors to update the state of threads
- * waiting on file I/O where data may be available:
+ * Poll file descriptors only if a new scheduling signal
+ * has occurred or if we have no more runnable threads.
*/
- _thread_kern_poll(0);
+ if (((current_tick = _sched_ticks) != last_tick) ||
+ ((_thread_run->state != PS_RUNNING) &&
+ (PTHREAD_PRIOQ_FIRST() == NULL))) {
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
- /* Protect the scheduling queues: */
- _queue_signals = 1;
+ /*
+ * Poll file descriptors to update the state of threads
+ * waiting on file I/O where data may be available:
+ */
+ thread_kern_poll(0);
+
+ /* Protect the scheduling queues: */
+ _queue_signals = 1;
+ }
+ last_tick = current_tick;
/*
* Wake up threads that have timedout. This has to be
@@ -329,12 +386,37 @@ __asm__("fnsave %0": :"m"(*fdata));
PTHREAD_WAITQ_CLEARACTIVE();
/*
- * Check if there is a current runnable thread that isn't
- * already in the ready queue:
+ * Check to see if the current thread needs to be added
+ * to the priority queue:
*/
- if ((_thread_run != &_thread_kern_thread) &&
- (_thread_run->state == PS_RUNNING) &&
- ((_thread_run->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0)) {
+ if (add_to_prioq != 0) {
+ /*
+ * Save the current time as the time that the
+ * thread became inactive:
+ */
+ current_tick = _sched_ticks;
+ _thread_run->last_inactive = (long)current_tick;
+ if (_thread_run->last_inactive <
+ _thread_run->last_active) {
+ /* Account for a rollover: */
+ _thread_run->last_inactive =+ UINT_MAX + 1;
+ }
+
+ if ((_thread_run->slice_usec != -1) &&
+ (_thread_run->attr.sched_policy != SCHED_FIFO)) {
+ /*
+ * Accumulate the number of microseconds for
+ * which the current thread has run:
+ */
+ _thread_run->slice_usec +=
+ (_thread_run->last_inactive -
+ _thread_run->last_active) *
+ (long)_clock_res_usec;
+ /* Check for time quantum exceeded: */
+ if (_thread_run->slice_usec > TIMESLICE_USEC)
+ _thread_run->slice_usec = -1;
+ }
+
if (_thread_run->slice_usec == -1) {
/*
* The thread exceeded its time
@@ -366,6 +448,8 @@ __asm__("fnsave %0": :"m"(*fdata));
* thread structure:
*/
_thread_run = &_thread_kern_thread;
+ DBG_MSG("No runnable threads, using kernel thread %p\n",
+ _thread_run);
/* Unprotect the scheduling queues: */
_queue_signals = 0;
@@ -374,20 +458,27 @@ __asm__("fnsave %0": :"m"(*fdata));
* There are no threads ready to run, so wait until
* something happens that changes this condition:
*/
- _thread_kern_poll(1);
- } else {
- /* Remove the thread from the ready queue: */
- PTHREAD_PRIOQ_REMOVE(pthread_h);
+ thread_kern_poll(1);
- /* Get first thread on the waiting list: */
- pthread = TAILQ_FIRST(&_waitingq);
+ /*
+ * This process' usage will likely be very small
+ * while waiting in a poll. Since the scheduling
+ * clock is based on the profiling timer, it is
+ * unlikely that the profiling timer will fire
+ * and update the time of day. To account for this,
+ * get the time of day after polling with a timeout.
+ */
+ gettimeofday((struct timeval *) &_sched_tod, NULL);
+
+ /* Check once more for a runnable thread: */
+ _queue_signals = 1;
+ pthread_h = PTHREAD_PRIOQ_FIRST();
+ _queue_signals = 0;
+ }
- /* Check to see if there is more than one thread: */
- if (pthread_h != TAILQ_FIRST(&_thread_list) ||
- TAILQ_NEXT(pthread_h, tle) != NULL)
- set_timer = 1;
- else
- set_timer = 0;
+ if (pthread_h != NULL) {
+ /* Remove the thread from the ready queue: */
+ PTHREAD_PRIOQ_REMOVE(pthread_h);
/* Unprotect the scheduling queues: */
_queue_signals = 0;
@@ -411,32 +502,19 @@ __asm__("fnsave %0": :"m"(*fdata));
*/
if (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
(pthread->active_priority > pthread_h->active_priority)) {
+ /* Remove the thread from the ready queue: */
+ PTHREAD_PRIOQ_REMOVE(pthread);
+
/*
* Insert the lower priority thread
* at the head of its priority list:
*/
PTHREAD_PRIOQ_INSERT_HEAD(pthread_h);
- /* Remove the thread from the ready queue: */
- PTHREAD_PRIOQ_REMOVE(pthread);
-
/* There's a new thread in town: */
pthread_h = pthread;
}
- /* Get first thread on the waiting list: */
- pthread = TAILQ_FIRST(&_waitingq);
-
- /*
- * Check to see if there is more than one
- * thread:
- */
- if (pthread_h != TAILQ_FIRST(&_thread_list) ||
- TAILQ_NEXT(pthread_h, tle) != NULL)
- set_timer = 1;
- else
- set_timer = 0;
-
/* Unprotect the scheduling queues: */
_queue_signals = 0;
}
@@ -448,78 +526,8 @@ __asm__("fnsave %0": :"m"(*fdata));
* Save the current time as the time that the thread
* became active:
*/
- _thread_run->last_active.tv_sec = tv.tv_sec;
- _thread_run->last_active.tv_usec = tv.tv_usec;
-
- /*
- * Define the maximum time before a scheduling signal
- * is required:
- */
- itimer.it_value.tv_sec = 0;
- itimer.it_value.tv_usec = TIMESLICE_USEC;
-
- /*
- * The interval timer is not reloaded when it
- * times out. The interval time needs to be
- * calculated every time.
- */
- itimer.it_interval.tv_sec = 0;
- itimer.it_interval.tv_usec = 0;
-
- /* Get first thread on the waiting list: */
- if ((pthread != NULL) &&
- (pthread->wakeup_time.tv_sec != -1)) {
- /*
- * Calculate the time until this thread
- * is ready, allowing for the clock
- * resolution:
- */
- ts1.tv_sec = pthread->wakeup_time.tv_sec
- - ts.tv_sec;
- ts1.tv_nsec = pthread->wakeup_time.tv_nsec
- - ts.tv_nsec + _clock_res_nsec;
-
- /*
- * Check for underflow of the nanosecond field:
- */
- while (ts1.tv_nsec < 0) {
- /*
- * Allow for the underflow of the
- * nanosecond field:
- */
- ts1.tv_sec--;
- ts1.tv_nsec += 1000000000;
- }
- /*
- * Check for overflow of the nanosecond field:
- */
- while (ts1.tv_nsec >= 1000000000) {
- /*
- * Allow for the overflow of the
- * nanosecond field:
- */
- ts1.tv_sec++;
- ts1.tv_nsec -= 1000000000;
- }
- /*
- * Convert the timespec structure to a
- * timeval structure:
- */
- TIMESPEC_TO_TIMEVAL(&tv1, &ts1);
-
- /*
- * Check if the thread will be ready
- * sooner than the earliest ones found
- * so far:
- */
- if (timercmp(&tv1, &itimer.it_value, <)) {
- /*
- * Update the time value:
- */
- itimer.it_value.tv_sec = tv1.tv_sec;
- itimer.it_value.tv_usec = tv1.tv_usec;
- }
- }
+ current_tick = _sched_ticks;
+ _thread_run->last_active = (long) current_tick;
/*
* Check if this thread is running for the first time
@@ -531,88 +539,51 @@ __asm__("fnsave %0": :"m"(*fdata));
_thread_run->slice_usec = 0;
}
- /* Check if there is more than one thread: */
- if (set_timer != 0) {
- /*
- * Start the interval timer for the
- * calculated time interval:
- */
- if (setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL) != 0) {
- /*
- * Cannot initialise the timer, so
- * abort this process:
- */
- PANIC("Cannot set scheduling timer");
- }
- }
-
- /*
- * Check if this thread is being continued from a
- * longjmp() out of a signal handler:
- */
- if ((_thread_run->jmpflags & JMPFLAGS_LONGJMP) != 0) {
- _thread_run->jmpflags = 0;
- __longjmp(_thread_run->nested_jmp.jmp,
- _thread_run->longjmp_val);
- }
/*
- * Check if this thread is being continued from a
- * _longjmp() out of a signal handler:
+ * If we had a context switch, run any
+ * installed switch hooks.
*/
- else if ((_thread_run->jmpflags & JMPFLAGS__LONGJMP) !=
- 0) {
- _thread_run->jmpflags = 0;
- ___longjmp(_thread_run->nested_jmp.jmp,
- _thread_run->longjmp_val);
+ if ((_sched_switch_hook != NULL) &&
+ (_last_user_thread != _thread_run)) {
+ thread_run_switch_hook(_last_user_thread,
+ _thread_run);
}
/*
- * Check if this thread is being continued from a
- * siglongjmp() out of a signal handler:
+ * Continue the thread at its current frame:
*/
- else if ((_thread_run->jmpflags & JMPFLAGS_SIGLONGJMP)
- != 0) {
- _thread_run->jmpflags = 0;
- __siglongjmp(
- _thread_run->nested_jmp.sigjmp,
- _thread_run->longjmp_val);
- }
- /* Check if a signal context was saved: */
- else if (_thread_run->sig_saved == 1) {
-#ifndef __alpha__
- /*
- * Point to the floating point data in the
- * running thread:
- */
- fdata = _thread_run->saved_fp;
+ psf = _thread_run->curframe;
+ switch(psf->ctxtype) {
+ case CTX_JB_NOSIG:
+ ___longjmp(psf->ctx.jb, psf->longjmp_val);
+ break;
+ case CTX_JB:
+ __longjmp(psf->ctx.jb, psf->longjmp_val);
+ break;
+ case CTX_SJB:
+ __siglongjmp(psf->ctx.sigjb, psf->longjmp_val);
+ break;
+ case CTX_UC:
+ /* XXX - Restore FP regsisters? */
+ FP_RESTORE_UC(&psf->ctx.uc);
- /* Restore the floating point state: */
- __asm__("frstor %0": :"m"(*fdata));
-#endif
/*
* Do a sigreturn to restart the thread that
* was interrupted by a signal:
*/
_thread_kern_in_sched = 0;
+#if NOT_YET
+ _setcontext(&psf->ctx.uc);
+#else
/*
- * If we had a context switch, run any
- * installed switch hooks.
- */
- if ((_sched_switch_hook != NULL) &&
- (_last_user_thread != _thread_run)) {
- thread_run_switch_hook(_last_user_thread,
- _thread_run);
- }
- _thread_sys_sigreturn(&_thread_run->saved_sigcontext);
- } else {
- /*
- * Do a longjmp to restart the thread that
- * was context switched out (by a longjmp to
- * a different thread):
+ * Ensure the process signal mask is set
+ * correctly:
*/
- __longjmp(_thread_run->saved_jmp_buf, 1);
+ psf->ctx.uc.uc_sigmask = _process_sigmask;
+ _thread_sys_sigreturn(&psf->ctx.uc);
+#endif
+ break;
}
-
/* This point should not be reached. */
PANIC("Thread has returned from sigreturn or longjmp");
}
@@ -645,7 +616,6 @@ _thread_kern_sched_state(enum pthread_state state, char *fname, int lineno)
/* Schedule the next thread that is ready: */
_thread_kern_sched(NULL);
- return;
}
void
@@ -675,11 +645,10 @@ _thread_kern_sched_state_unlock(enum pthread_state state,
/* Schedule the next thread that is ready: */
_thread_kern_sched(NULL);
- return;
}
static void
-_thread_kern_poll(int wait_reqd)
+thread_kern_poll(int wait_reqd)
{
int count = 0;
int i, found;
@@ -696,7 +665,7 @@ _thread_kern_poll(int wait_reqd)
}
else {
/* Get the current time of day: */
- gettimeofday(&tv, NULL);
+ GET_CURRENT_TOD(tv);
TIMEVAL_TO_TIMESPEC(&tv, &ts);
_queue_signals = 1;
@@ -713,11 +682,11 @@ _thread_kern_poll(int wait_reqd)
else {
/*
* Calculate the time left for the next thread to
- * timeout allowing for the clock resolution:
+ * timeout:
*/
timeout_ms = ((pthread->wakeup_time.tv_sec - ts.tv_sec) *
- 1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec +
- _clock_res_nsec) / 1000000);
+ 1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec) /
+ 1000000);
/*
* Don't allow negative timeouts:
*/
@@ -1002,9 +971,6 @@ _thread_kern_poll(int wait_reqd)
/* Unprotect the scheduling queues: */
_queue_signals = 0;
}
-
- /* Nothing to return. */
- return;
}
void
@@ -1032,7 +998,7 @@ _thread_kern_set_timeout(const struct timespec * timeout)
_thread_run->wakeup_time.tv_nsec = 0;
} else {
/* Get the current time: */
- gettimeofday(&tv, NULL);
+ GET_CURRENT_TOD(tv);
TIMEVAL_TO_TIMESPEC(&tv, &current_time);
/* Calculate the time for the current thread to wake up: */
@@ -1046,7 +1012,6 @@ _thread_kern_set_timeout(const struct timespec * timeout)
_thread_run->wakeup_time.tv_nsec -= 1000000000;
}
}
- return;
}
void
@@ -1059,9 +1024,6 @@ _thread_kern_sig_defer(void)
void
_thread_kern_sig_undefer(void)
{
- pthread_t pthread;
- int need_resched = 0;
-
/*
* Perform checks to yield only if we are about to undefer
* signals.
@@ -1077,33 +1039,25 @@ _thread_kern_sig_undefer(void)
/*
* Check if there are queued signals:
*/
- while (_sigq_check_reqd != 0) {
- /* Defer scheduling while we process queued signals: */
- _thread_run->sig_defer_count = 1;
-
- /* Clear the flag before checking the signal queue: */
- _sigq_check_reqd = 0;
-
- /* Dequeue and handle signals: */
- dequeue_signals();
-
- /*
- * Avoiding an unnecessary check to reschedule, check
- * to see if signal handling caused a higher priority
- * thread to become ready.
- */
- if ((need_resched == 0) &&
- (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
- (pthread->active_priority > _thread_run->active_priority))) {
- need_resched = 1;
- }
+ if (_sigq_check_reqd != 0)
+ _thread_kern_sched(NULL);
- /* Reenable signals: */
- _thread_run->sig_defer_count = 0;
- }
+ /*
+ * Check for asynchronous cancellation before delivering any
+ * pending signals:
+ */
+ if (((_thread_run->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) &&
+ ((_thread_run->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
+ pthread_testcancel();
- /* Yield the CPU if necessary: */
- if (need_resched || _thread_run->yield_on_sig_undefer != 0) {
+ /*
+ * If there are pending signals or this thread has
+ * to yield the CPU, call the kernel scheduler:
+ *
+ * XXX - Come back and revisit the pending signal problem
+ */
+ if ((_thread_run->yield_on_sig_undefer != 0) ||
+ SIGNOTEMPTY(_thread_run->sigpend)) {
_thread_run->yield_on_sig_undefer = 0;
_thread_kern_sched(NULL);
}
@@ -1114,35 +1068,13 @@ static void
dequeue_signals(void)
{
char bufr[128];
- int i, num;
- pthread_t pthread;
+ int num;
/*
- * Enter a loop to read and handle queued signals from the
- * pthread kernel pipe:
+ * Enter a loop to clear the pthread kernel pipe:
*/
while (((num = _thread_sys_read(_thread_kern_pipe[0], bufr,
sizeof(bufr))) > 0) || (num == -1 && errno == EINTR)) {
- /*
- * The buffer read contains one byte per signal and
- * each byte is the signal number.
- */
- for (i = 0; i < num; i++) {
- if ((int) bufr[i] == _SCHED_SIGNAL) {
- /*
- * Scheduling signals shouldn't ever be
- * queued; just ignore it for now.
- */
- }
- else {
- /* Handle this signal: */
- pthread = _thread_sig_handle((int) bufr[i],
- NULL);
- if (pthread != NULL)
- _thread_sig_deliver(pthread,
- (int) bufr[i]);
- }
- }
}
if ((num < 0) && (errno != EAGAIN)) {
/*
@@ -1151,6 +1083,8 @@ dequeue_signals(void)
*/
PANIC("Unable to read from thread kernel pipe");
}
+ /* Handle any pending signals: */
+ _thread_sig_handle_pending();
}
static inline void
diff --git a/lib/libc_r/uthread/uthread_mutex.c b/lib/libc_r/uthread/uthread_mutex.c
index b8877f8453c9..f7662c71c951 100644
--- a/lib/libc_r/uthread/uthread_mutex.c
+++ b/lib/libc_r/uthread/uthread_mutex.c
@@ -79,7 +79,7 @@ static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
int
_mutex_reinit(pthread_mutex_t * mutex)
{
- int ret = 0;
+ int ret = 0;
if (mutex == NULL)
ret = EINVAL;
@@ -113,7 +113,7 @@ pthread_mutex_init(pthread_mutex_t * mutex,
int protocol;
int ceiling;
pthread_mutex_t pmutex;
- int ret = 0;
+ int ret = 0;
if (mutex == NULL)
ret = EINVAL;
@@ -203,7 +203,7 @@ pthread_mutex_init(pthread_mutex_t * mutex,
int
pthread_mutex_destroy(pthread_mutex_t * mutex)
{
- int ret = 0;
+ int ret = 0;
if (mutex == NULL || *mutex == NULL)
ret = EINVAL;
@@ -245,7 +245,7 @@ pthread_mutex_destroy(pthread_mutex_t * mutex)
static int
init_static(pthread_mutex_t *mutex)
{
- int ret;
+ int ret;
_SPINLOCK(&static_init_lock);
@@ -262,7 +262,7 @@ init_static(pthread_mutex_t *mutex)
int
pthread_mutex_trylock(pthread_mutex_t * mutex)
{
- int ret = 0;
+ int ret = 0;
if (mutex == NULL)
ret = EINVAL;
@@ -400,7 +400,7 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
int
pthread_mutex_lock(pthread_mutex_t * mutex)
{
- int ret = 0;
+ int ret = 0;
if (mutex == NULL)
ret = EINVAL;
@@ -610,9 +610,8 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
* Check to see if this thread was interrupted and
* is still in the mutex queue of waiting threads:
*/
- if (_thread_run->interrupted != 0) {
+ if (_thread_run->interrupted != 0)
mutex_queue_remove(*mutex, _thread_run);
- }
/* Unlock the mutex structure: */
_SPINUNLOCK(&(*mutex)->lock);
@@ -647,7 +646,7 @@ _mutex_cv_unlock(pthread_mutex_t * mutex)
int
_mutex_cv_lock(pthread_mutex_t * mutex)
{
- int ret;
+ int ret;
if ((ret = pthread_mutex_lock(mutex)) == 0)
(*mutex)->m_refcount--;
return (ret);
@@ -656,7 +655,7 @@ _mutex_cv_lock(pthread_mutex_t * mutex)
static inline int
mutex_self_trylock(pthread_mutex_t mutex)
{
- int ret = 0;
+ int ret = 0;
switch (mutex->m_type) {
@@ -723,7 +722,7 @@ mutex_self_lock(pthread_mutex_t mutex)
static inline int
mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
{
- int ret = 0;
+ int ret = 0;
if (mutex == NULL || *mutex == NULL) {
ret = EINVAL;
@@ -1369,6 +1368,38 @@ _mutex_unlock_private(pthread_t pthread)
}
}
+void
+_mutex_lock_backout(pthread_t pthread)
+{
+ struct pthread_mutex *mutex;
+
+ /*
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+ if (pthread->state == PS_MUTEX_WAIT) {
+ mutex = pthread->data.mutex;
+
+ /* Lock the mutex structure: */
+ _SPINLOCK(&mutex->lock);
+
+ mutex_queue_remove(mutex, pthread);
+
+ /* This thread is no longer waiting for the mutex: */
+ mutex->m_owner->data.mutex = NULL;
+
+ /* Unlock the mutex structure: */
+ _SPINUNLOCK(&mutex->lock);
+
+ }
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary:
+ */
+ _thread_kern_sig_undefer();
+}
+
/*
* Dequeue a waiting thread from the head of a mutex queue in descending
* priority order.
@@ -1379,7 +1410,7 @@ mutex_queue_deq(pthread_mutex_t mutex)
pthread_t pthread;
while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
- TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
+ TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
/*
@@ -1400,7 +1431,7 @@ static inline void
mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
{
if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
- TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
+ TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
}
}
@@ -1413,18 +1444,19 @@ mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
{
pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
+ PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
/*
* For the common case of all threads having equal priority,
* we perform a quick check against the priority of the thread
* at the tail of the queue.
*/
if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
- TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, qe);
+ TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
else {
tid = TAILQ_FIRST(&mutex->m_queue);
while (pthread->active_priority <= tid->active_priority)
- tid = TAILQ_NEXT(tid, qe);
- TAILQ_INSERT_BEFORE(tid, pthread, qe);
+ tid = TAILQ_NEXT(tid, sqe);
+ TAILQ_INSERT_BEFORE(tid, pthread, sqe);
}
pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
}
diff --git a/lib/libc_r/uthread/uthread_priority_queue.c b/lib/libc_r/uthread/uthread_priority_queue.c
index 1b9fcba09573..84c30657885a 100644
--- a/lib/libc_r/uthread/uthread_priority_queue.c
+++ b/lib/libc_r/uthread/uthread_priority_queue.c
@@ -66,9 +66,13 @@ static int _pq_active = 0;
PANIC(msg); \
} while (0)
#define _PQ_ASSERT_NOT_QUEUED(thrd, msg) do { \
- if ((thrd)->flags & _PQ_IN_SCHEDQ) \
+ if (((thrd)->flags & _PQ_IN_SCHEDQ) != 0) \
PANIC(msg); \
} while (0)
+#define _PQ_ASSERT_PROTECTED(msg) \
+ PTHREAD_ASSERT((_thread_kern_in_sched != 0) || \
+ (_thread_run->sig_defer_count > 0) || \
+ (_sig_in_handler != 0), msg);
#else
@@ -79,11 +83,10 @@ static int _pq_active = 0;
#define _PQ_ASSERT_IN_WAITQ(thrd, msg)
#define _PQ_ASSERT_IN_PRIOQ(thrd, msg)
#define _PQ_ASSERT_NOT_QUEUED(thrd, msg)
-#define _PQ_CHECK_PRIO()
+#define _PQ_ASSERT_PROTECTED(msg)
#endif
-
int
_pq_alloc(pq_queue_t *pq, int minprio, int maxprio)
{
@@ -101,9 +104,7 @@ _pq_alloc(pq_queue_t *pq, int minprio, int maxprio)
else {
/* Remember the queue size: */
pq->pq_size = prioslots;
-
ret = _pq_init(pq);
-
}
return (ret);
}
@@ -142,6 +143,7 @@ _pq_remove(pq_queue_t *pq, pthread_t pthread)
_PQ_ASSERT_INACTIVE("_pq_remove: pq_active");
_PQ_SET_ACTIVE();
_PQ_ASSERT_IN_PRIOQ(pthread, "_pq_remove: Not in priority queue");
+ _PQ_ASSERT_PROTECTED("_pq_remove: prioq not protected!");
/*
* Remove this thread from priority list. Note that if
@@ -172,6 +174,7 @@ _pq_insert_head(pq_queue_t *pq, pthread_t pthread)
_PQ_SET_ACTIVE();
_PQ_ASSERT_NOT_QUEUED(pthread,
"_pq_insert_head: Already in priority queue");
+ _PQ_ASSERT_PROTECTED("_pq_insert_head: prioq not protected!");
TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe);
if (pq->pq_lists[prio].pl_queued == 0)
@@ -197,6 +200,7 @@ _pq_insert_tail(pq_queue_t *pq, pthread_t pthread)
_PQ_SET_ACTIVE();
_PQ_ASSERT_NOT_QUEUED(pthread,
"_pq_insert_tail: Already in priority queue");
+ _PQ_ASSERT_PROTECTED("_pq_insert_tail: prioq not protected!");
TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe);
if (pq->pq_lists[prio].pl_queued == 0)
@@ -221,6 +225,7 @@ _pq_first(pq_queue_t *pq)
*/
_PQ_ASSERT_INACTIVE("_pq_first: pq_active");
_PQ_SET_ACTIVE();
+ _PQ_ASSERT_PROTECTED("_pq_first: prioq not protected!");
while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) &&
(pthread == NULL)) {
@@ -250,6 +255,7 @@ pq_insert_prio_list(pq_queue_t *pq, int prio)
* Make some assertions when debugging is enabled:
*/
_PQ_ASSERT_ACTIVE("pq_insert_prio_list: pq_active");
+ _PQ_ASSERT_PROTECTED("_pq_insert_prio_list: prioq not protected!");
/*
* The priority queue is in descending priority order. Start at
@@ -270,11 +276,10 @@ pq_insert_prio_list(pq_queue_t *pq, int prio)
pq->pq_lists[prio].pl_queued = 1;
}
-#if defined(_PTHREADS_INVARIANTS)
void
_waitq_insert(pthread_t pthread)
{
- pthread_t tid;
+ pthread_t tid;
/*
* Make some assertions when debugging is enabled:
@@ -332,4 +337,3 @@ _waitq_clearactive(void)
_PQ_CLEAR_ACTIVE();
}
#endif
-#endif
diff --git a/lib/libc_r/uthread/uthread_sem.c b/lib/libc_r/uthread/uthread_sem.c
index 2dcf72223fc3..eb4291906f09 100644
--- a/lib/libc_r/uthread/uthread_sem.c
+++ b/lib/libc_r/uthread/uthread_sem.c
@@ -29,6 +29,7 @@
* $FreeBSD$
*/
+#include <stdlib.h>
#include <errno.h>
#ifdef _THREAD_SAFE
#include <semaphore.h>
diff --git a/lib/libc_r/uthread/uthread_sendfile.c b/lib/libc_r/uthread/uthread_sendfile.c
index a3ef68d4e63a..51f424743550 100644
--- a/lib/libc_r/uthread/uthread_sendfile.c
+++ b/lib/libc_r/uthread/uthread_sendfile.c
@@ -48,7 +48,7 @@ sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr,
/* Write the headers if any. */
if ((hdtr != NULL) && (hdtr->headers != NULL)) {
- if (wvret = writev(s, hdtr->headers, hdtr->hdr_cnt) == -1) {
+ if ((wvret = writev(s, hdtr->headers, hdtr->hdr_cnt)) == -1) {
ret = -1;
goto ERROR;
} else
@@ -135,7 +135,7 @@ sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr,
if (ret == 0) {
/* Write the trailers, if any. */
if ((hdtr != NULL) && (hdtr->trailers != NULL)) {
- if (wvret = writev(s, hdtr->trailers, hdtr->trl_cnt)
+ if ((wvret = writev(s, hdtr->trailers, hdtr->trl_cnt))
== -1)
ret = -1;
else
diff --git a/lib/libc_r/uthread/uthread_setschedparam.c b/lib/libc_r/uthread/uthread_setschedparam.c
index bce965fe6603..9a44ad7a1426 100644
--- a/lib/libc_r/uthread/uthread_setschedparam.c
+++ b/lib/libc_r/uthread/uthread_setschedparam.c
@@ -59,7 +59,8 @@ pthread_setschedparam(pthread_t pthread, int policy,
*/
_thread_kern_sig_defer();
- if (param->sched_priority != pthread->base_priority) {
+ if (param->sched_priority !=
+ PTHREAD_BASE_PRIORITY(pthread->base_priority)) {
/*
* Remove the thread from its current priority
* queue before any adjustments are made to its
@@ -72,6 +73,8 @@ pthread_setschedparam(pthread_t pthread, int policy,
}
/* Set the thread base priority: */
+ pthread->base_priority &=
+ (PTHREAD_SIGNAL_PRIORITY | PTHREAD_RT_PRIORITY);
pthread->base_priority = param->sched_priority;
/* Recalculate the active priority: */
diff --git a/lib/libc_r/uthread/uthread_sig.c b/lib/libc_r/uthread/uthread_sig.c
index 86ded7f12cbb..8a9aadf177f6 100644
--- a/lib/libc_r/uthread/uthread_sig.c
+++ b/lib/libc_r/uthread/uthread_sig.c
@@ -44,46 +44,47 @@
#include "pthread_private.h"
/* Prototypes: */
-static void thread_sig_check_state(pthread_t pthread, int sig);
-static void thread_sig_finish_longjmp(void *arg);
-static void handle_state_change(pthread_t pthread);
-
+static void thread_sig_add(pthread_t pthread, int sig, int has_args);
+static pthread_t thread_sig_find(int sig);
+static void thread_sig_handle_special(int sig);
+static void thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp);
+static void thread_sigframe_add(pthread_t thread, int sig);
+static void thread_sigframe_leave(pthread_t thread, int frame);
+static void thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf);
+static void thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf);
+
+/* #define DEBUG_SIGNAL */
+#ifdef DEBUG_SIGNAL
+#define DBG_MSG stdout_debug
+#else
+#define DBG_MSG(x...)
+#endif
-/* Static variables: */
-static spinlock_t signal_lock = _SPINLOCK_INITIALIZER;
-static unsigned int pending_sigs[NSIG];
-static unsigned int handled_sigs[NSIG];
-static int volatile check_pending = 0;
-static int volatile check_waiting = 0;
+#if defined(_PTHREADS_INVARIANTS)
+#define SIG_SET_ACTIVE() _sig_in_handler = 1
+#define SIG_SET_INACTIVE() _sig_in_handler = 0
+#else
+#define SIG_SET_ACTIVE()
+#define SIG_SET_INACTIVE()
+#endif
-/* Initialize signal handling facility: */
void
-_thread_sig_init(void)
+_thread_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
{
- int i;
+ pthread_t pthread;
+ int current_frame;
+ char c;
- /* Clear pending and handled signal counts: */
- for (i = 1; i < NSIG; i++) {
- pending_sigs[i - 1] = 0;
- handled_sigs[i - 1] = 0;
- }
-
- /* Clear the lock: */
- signal_lock.access_lock = 0;
-
- /* Clear the process pending signals: */
- sigemptyset(&_process_sigpending);
-}
-
-void
-_thread_sig_handler(int sig, int code, ucontext_t * scp)
-{
- pthread_t pthread, pthread_next;
- int i;
- char c;
+ if (ucp == NULL)
+ PANIC("Thread signal handler received null context");
+ DBG_MSG("Got signal %d, current thread %p\n", sig, _thread_run);
/* Check if an interval timer signal: */
if (sig == _SCHED_SIGNAL) {
+ /* Update the scheduling clock: */
+ gettimeofday((struct timeval *)&_sched_tod, NULL);
+ _sched_ticks++;
+
if (_thread_kern_in_sched != 0) {
/*
* The scheduler is already running; ignore this
@@ -97,14 +98,18 @@ _thread_sig_handler(int sig, int code, ucontext_t * scp)
*/
else if (_thread_run->sig_defer_count > 0)
_thread_run->yield_on_sig_undefer = 1;
-
else {
/*
+ * Save the context of the currently running thread:
+ */
+ thread_sig_savecontext(_thread_run, ucp);
+
+ /*
* Schedule the next thread. This function is not
* expected to return because it will do a longjmp
* instead.
*/
- _thread_kern_sched(scp);
+ _thread_kern_sched(ucp);
/*
* This point should not be reached, so abort the
@@ -118,8 +123,8 @@ _thread_sig_handler(int sig, int code, ucontext_t * scp)
* is accessing the scheduling queues or if there is a currently
* running thread that has deferred signals.
*/
- else if ((_queue_signals != 0) || ((_thread_kern_in_sched == 0) &&
- (_thread_run->sig_defer_count > 0))) {
+ else if ((_thread_kern_in_sched != 0) ||
+ (_thread_run->sig_defer_count > 0)) {
/* Cast the signal number to a character variable: */
c = sig;
@@ -127,117 +132,150 @@ _thread_sig_handler(int sig, int code, ucontext_t * scp)
* Write the signal number to the kernel pipe so that it will
* be ready to read when this signal handler returns.
*/
- _thread_sys_write(_thread_kern_pipe[1], &c, 1);
+ if (_queue_signals != 0) {
+ _thread_sys_write(_thread_kern_pipe[1], &c, 1);
+ DBG_MSG("Got signal %d, queueing to kernel pipe\n", sig);
+ }
+ if (_thread_sigq[sig - 1].blocked == 0) {
+ DBG_MSG("Got signal %d, adding to _thread_sigq\n", sig);
+ /*
+ * Do not block this signal; it will be blocked
+ * when the pending signals are run down.
+ */
+ /* _thread_sigq[sig - 1].blocked = 1; */
- /* Indicate that there are queued signals in the pipe. */
- _sigq_check_reqd = 1;
- } else {
- if (_atomic_lock(&signal_lock.access_lock)) {
- /* There is another signal handler running: */
- pending_sigs[sig - 1]++;
- check_pending = 1;
+ /*
+ * Queue the signal, saving siginfo and sigcontext
+ * (ucontext).
+ *
+ * XXX - Do we need to copy siginfo and ucp?
+ */
+ _thread_sigq[sig - 1].signo = sig;
+ if (info != NULL)
+ memcpy(&_thread_sigq[sig - 1].siginfo, info,
+ sizeof(*info));
+ memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp));
+
+ /* Indicate that there are queued signals: */
+ _thread_sigq[sig - 1].pending = 1;
+ _sigq_check_reqd = 1;
}
- else {
- /* It's safe to handle the signal now. */
- pthread = _thread_sig_handle(sig, scp);
+ /* These signals need special handling: */
+ else if (sig == SIGCHLD || sig == SIGTSTP ||
+ sig == SIGTTIN || sig == SIGTTOU) {
+ _thread_sigq[sig - 1].pending = 1;
+ _thread_sigq[sig - 1].signo = sig;
+ _sigq_check_reqd = 1;
+ }
+ else
+ DBG_MSG("Got signal %d, ignored.\n", sig);
+ }
+ /*
+ * The signal handlers should have been installed so that they
+ * cannot be interrupted by other signals.
+ */
+ else if (_thread_sigq[sig - 1].blocked == 0) {
+ /* The signal is not blocked; handle the signal: */
+ current_frame = _thread_run->sigframe_count;
- /* Reset the pending and handled count back to 0: */
- pending_sigs[sig - 1] = 0;
- handled_sigs[sig - 1] = 0;
+ /*
+ * Ignore subsequent occurrences of this signal
+ * until the current signal is handled:
+ */
+ _thread_sigq[sig - 1].blocked = 1;
- if (pthread == NULL)
- signal_lock.access_lock = 0;
- else {
- sigaddset(&pthread->sigmask, sig);
+ /* This signal will be handled; clear the pending flag: */
+ _thread_sigq[sig - 1].pending = 0;
- /*
- * Make sure not to deliver the same signal to
- * the thread twice. sigpend is potentially
- * modified by the call chain
- * _thread_sig_handle() -->
- * thread_sig_check_state(), which can happen
- * just above.
- */
- if (sigismember(&pthread->sigpend, sig))
- sigdelset(&pthread->sigpend, sig);
+ /*
+ * Save siginfo and sigcontext (ucontext).
+ *
+ * XXX - Do we need to copy siginfo and ucp?
+ */
+ _thread_sigq[sig - 1].signo = sig;
- signal_lock.access_lock = 0;
- _thread_sig_deliver(pthread, sig);
- sigdelset(&pthread->sigmask, sig);
- }
- }
+ if (info != NULL)
+ memcpy(&_thread_sigq[sig - 1].siginfo, info,
+ sizeof(*info));
+ memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp));
+ SIG_SET_ACTIVE();
- /* Enter a loop to process pending signals: */
- while ((check_pending != 0) &&
- (_atomic_lock(&signal_lock.access_lock) == 0)) {
- check_pending = 0;
- for (i = 1; i < NSIG; i++) {
- if (pending_sigs[i - 1] > handled_sigs[i - 1]) {
- pending_sigs[i - 1] = handled_sigs[i - 1];
- pthread = _thread_sig_handle(i, scp);
- if (pthread != NULL) {
- sigaddset(&pthread->sigmask, i);
- /* Save the old state: */
- pthread->oldstate = pthread->state;
- signal_lock.access_lock = 0;
- _thread_sig_deliver(pthread, i);
- sigdelset(&pthread->sigmask, i);
- if (_atomic_lock(&signal_lock.access_lock)) {
- check_pending = 1;
- /*
- * Have the lock holder take care
- * of any state changes:
- */
- if (pthread->state != pthread->oldstate)
- check_waiting = 1;
- return;
- }
- if (pthread->state != pthread->oldstate)
- handle_state_change(pthread);
- }
- }
- }
- while (check_waiting != 0) {
- check_waiting = 0;
- /*
- * Enter a loop to wake up all threads waiting
- * for a process to complete:
- */
- for (pthread = TAILQ_FIRST(&_waitingq);
- pthread != NULL; pthread = pthread_next) {
- pthread_next = TAILQ_NEXT(pthread, pqe);
- if (pthread->state == PS_RUNNING)
- handle_state_change(pthread);
- }
- }
- /* Release the lock: */
- signal_lock.access_lock = 0;
+ /* Handle special signals: */
+ thread_sig_handle_special(sig);
+
+ if ((pthread = thread_sig_find(sig)) != NULL) {
+ DBG_MSG("Got signal %d, adding frame to thread %p\n",
+ sig, pthread);
+ /*
+ * A thread was found that can handle the signal.
+ * Save the context of the currently running thread
+ * so that we can switch to another thread without
+ * losing track of where the current thread left off.
+ * This also applies if the current thread is the
+ * thread to be signaled.
+ */
+ thread_sig_savecontext(_thread_run, ucp);
+
+ /* Setup the target thread to receive the signal: */
+ thread_sig_add(pthread, sig, /*has_args*/ 1);
+
+ /* Take a peek at the next ready to run thread: */
+ pthread = PTHREAD_PRIOQ_FIRST();
+ DBG_MSG("Finished adding frame, head of prio list %p\n",
+ pthread);
}
+ else
+ DBG_MSG("No thread to handle signal %d\n", sig);
+ SIG_SET_INACTIVE();
/*
- * Check to see if the current thread performed a
- * [sig|_]longjmp() out of a signal handler.
+ * Switch to a different context if the currently running
+ * thread takes a signal, or if another thread takes a
+ * signal and the currently running thread is not in a
+ * signal handler.
*/
- if ((_thread_run->jmpflags & (JMPFLAGS_LONGJMP |
- JMPFLAGS__LONGJMP)) != 0) {
- _thread_run->jmpflags = JMPFLAGS_NONE;
- __longjmp(_thread_run->nested_jmp.jmp,
- _thread_run->longjmp_val);
- } else if ((_thread_run->jmpflags & JMPFLAGS_SIGLONGJMP) != 0) {
- _thread_run->jmpflags = JMPFLAGS_NONE;
- __siglongjmp(_thread_run->nested_jmp.sigjmp,
- _thread_run->longjmp_val);
+ if ((_thread_run->sigframe_count > current_frame) ||
+ ((pthread != NULL) &&
+ (pthread->active_priority > _thread_run->active_priority))) {
+ /* Enter the kernel scheduler: */
+ DBG_MSG("Entering scheduler from signal handler\n");
+ _thread_kern_sched(ucp);
}
}
+ else {
+ SIG_SET_ACTIVE();
+ thread_sig_handle_special(sig);
+ SIG_SET_INACTIVE();
+ }
}
+static void
+thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp)
+{
+ struct pthread_signal_frame *psf;
+
+ psf = _thread_run->curframe;
+
+ memcpy(&psf->ctx.uc, ucp, sizeof(*ucp));
+
+ /* XXX - Save FP registers too? */
+ FP_SAVE_UC(&psf->ctx.uc);
+
+ /* Mark the context saved as a ucontext: */
+ psf->ctxtype = CTX_UC;
+}
+
+/*
+ * Find a thread that can handle the signal.
+ */
pthread_t
-_thread_sig_handle(int sig, ucontext_t * scp)
+thread_sig_find(int sig)
{
- int i, handler_installed;
+ int handler_installed;
pthread_t pthread, pthread_next;
pthread_t suspended_thread, signaled_thread;
+ DBG_MSG("Looking for thread to handle signal %d\n", sig);
/* Check if the signal requires a dump of thread information: */
if (sig == SIGINFO)
/* Dump thread information to file: */
@@ -249,77 +287,22 @@ _thread_sig_handle(int sig, ucontext_t * scp)
* This shouldn't ever occur (should this panic?).
*/
} else {
- /* Check if a child has terminated: */
- if (sig == SIGCHLD) {
- /*
- * Go through the file list and set all files
- * to non-blocking again in case the child
- * set some of them to block. Sigh.
- */
- for (i = 0; i < _thread_dtablesize; i++) {
- /* Check if this file is used: */
- if (_thread_fd_table[i] != NULL) {
- /*
- * Set the file descriptor to
- * non-blocking:
- */
- _thread_sys_fcntl(i, F_SETFL,
- _thread_fd_table[i]->flags |
- O_NONBLOCK);
- }
- }
- /*
- * Enter a loop to wake up all threads waiting
- * for a process to complete:
- */
- for (pthread = TAILQ_FIRST(&_waitingq);
- pthread != NULL; pthread = pthread_next) {
- /*
- * Grab the next thread before possibly
- * destroying the link entry:
- */
- pthread_next = TAILQ_NEXT(pthread, pqe);
-
- /*
- * If this thread is waiting for a child
- * process to complete, wake it up:
- */
- if (pthread->state == PS_WAIT_WAIT) {
- /* Make the thread runnable: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
-
- /* Return the signal number: */
- pthread->signo = sig;
- }
- }
- }
-
- /*
- * POSIX says that pending SIGCONT signals are
- * discarded when one of these signals occurs.
- */
- if (sig == SIGTSTP || sig == SIGTTIN || sig == SIGTTOU) {
- /*
- * Enter a loop to discard pending SIGCONT
- * signals:
- */
- TAILQ_FOREACH(pthread, &_thread_list, tle) {
- sigdelset(&pthread->sigpend,SIGCONT);
- }
- }
-
/*
* Enter a loop to look for threads that have the signal
* unmasked. POSIX specifies that a thread in a sigwait
* will get the signal over any other threads. Second
- * preference will be threads in in a sigsuspend. If
- * none of the above, then the signal is delivered to the
- * first thread we find. Note that if a custom handler
- * is not installed, the signal only affects threads in
- * sigwait.
+ * preference will be threads in in a sigsuspend. Third
+ * preference will be the current thread. If none of the
+ * above, then the signal is delivered to the first thread
+ * that is found. Note that if a custom handler is not
+ * installed, the signal only affects threads in sigwait.
*/
suspended_thread = NULL;
- signaled_thread = NULL;
+ if ((_thread_run != &_thread_kern_thread) &&
+ !sigismember(&_thread_run->sigmask, sig))
+ signaled_thread = _thread_run;
+ else
+ signaled_thread = NULL;
if ((_thread_sigact[sig - 1].sa_handler == SIG_IGN) ||
(_thread_sigact[sig - 1].sa_handler == SIG_DFL))
handler_installed = 0;
@@ -338,6 +321,13 @@ _thread_sig_handle(int sig, ucontext_t * scp)
sigismember(pthread->data.sigwait, sig)) {
/* Change the state of the thread to run: */
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ /*
+ * A signal handler is not invoked for threads
+ * in sigwait. Clear the blocked and pending
+ * flags.
+ */
+ _thread_sigq[sig - 1].blocked = 0;
+ _thread_sigq[sig - 1].pending = 0;
/* Return the signal number: */
pthread->signo = sig;
@@ -349,7 +339,8 @@ _thread_sig_handle(int sig, ucontext_t * scp)
* we find.
*
* Do not attempt to deliver this signal
- * to other threads.
+ * to other threads and do not add the signal
+ * to the process pending set.
*/
return (NULL);
}
@@ -367,7 +358,14 @@ _thread_sig_handle(int sig, ucontext_t * scp)
* Only perform wakeups and signal delivery if there is a
* custom handler installed:
*/
- if (handler_installed != 0) {
+ if (handler_installed == 0) {
+ /*
+ * There is no handler installed. Unblock the
+ * signal so that if a handler _is_ installed, any
+ * subsequent signals can be handled.
+ */
+ _thread_sigq[sig - 1].blocked = 0;
+ } else {
/*
* If we didn't find a thread in the waiting queue,
* check the all threads queue:
@@ -403,12 +401,6 @@ _thread_sig_handle(int sig, ucontext_t * scp)
pthread = suspended_thread;
else
pthread = signaled_thread;
-
- /*
- * Perform any state changes due to signal
- * arrival:
- */
- thread_sig_check_state(pthread, sig);
return (pthread);
}
}
@@ -418,100 +410,166 @@ _thread_sig_handle(int sig, ucontext_t * scp)
return (NULL);
}
-static void
-thread_sig_finish_longjmp(void *arg)
+void
+_thread_sig_check_pending(pthread_t pthread)
{
+ sigset_t sigset;
+ int i;
+
/*
- * Check to see if the current thread performed a [_]longjmp() out of a
- * signal handler.
+ * Check if there are pending signals for the running
+ * thread or process that aren't blocked:
*/
- if ((_thread_run->jmpflags & (JMPFLAGS_LONGJMP | JMPFLAGS__LONGJMP))
- != 0) {
- _thread_run->jmpflags = JMPFLAGS_NONE;
- _thread_run->continuation = NULL;
- __longjmp(_thread_run->nested_jmp.jmp,
- _thread_run->longjmp_val);
+ sigset = pthread->sigpend;
+ SIGSETOR(sigset, _process_sigpending);
+ SIGSETNAND(sigset, pthread->sigmask);
+ if (SIGNOTEMPTY(sigset)) {
+ for (i = 1; i < NSIG; i++) {
+ if (sigismember(&sigset, i) != 0) {
+ if (sigismember(&pthread->sigpend, i) != 0)
+ thread_sig_add(pthread, i,
+ /*has_args*/ 0);
+ else {
+ thread_sig_add(pthread, i,
+ /*has_args*/ 1);
+ sigdelset(&_process_sigpending, i);
+ }
+ }
+ }
}
+}
+
+/*
+ * This can only be called from the kernel scheduler. It assumes that
+ * all thread contexts are saved and that a signal frame can safely be
+ * added to any user thread.
+ */
+void
+_thread_sig_handle_pending(void)
+{
+ pthread_t pthread;
+ int i, sig;
+
+ PTHREAD_ASSERT(_thread_kern_in_sched != 0,
+ "_thread_sig_handle_pending called from outside kernel schedule");
/*
- * Check to see if the current thread performed a siglongjmp
- * out of a signal handler:
+ * Check the array of pending signals:
*/
- else if ((_thread_run->jmpflags & JMPFLAGS_SIGLONGJMP) != 0) {
- _thread_run->jmpflags = JMPFLAGS_NONE;
- _thread_run->continuation = NULL;
- __siglongjmp(_thread_run->nested_jmp.sigjmp,
- _thread_run->longjmp_val);
+ for (i = 0; i < NSIG; i++) {
+ if (_thread_sigq[i].pending != 0) {
+ /* This signal is no longer pending. */
+ _thread_sigq[i].pending = 0;
+
+ sig = _thread_sigq[i].signo;
+
+ /* Some signals need special handling: */
+ thread_sig_handle_special(sig);
+
+ if (_thread_sigq[i].blocked == 0) {
+ /*
+ * Block future signals until this one
+ * is handled:
+ */
+ _thread_sigq[i].blocked = 1;
+
+ if ((pthread = thread_sig_find(sig)) != NULL) {
+ /*
+ * Setup the target thread to receive
+ * the signal:
+ */
+ thread_sig_add(pthread, sig,
+ /*has_args*/ 1);
+ }
+ }
+ }
}
}
static void
-handle_state_change(pthread_t pthread)
+thread_sig_handle_special(int sig)
{
- /*
- * We should only need to handle threads whose state was
- * changed to running:
- */
- if (pthread->state == PS_RUNNING) {
- switch (pthread->oldstate) {
+ pthread_t pthread, pthread_next;
+ int i;
+
+ switch (sig) {
+ case SIGCHLD:
/*
- * States which do not change when a signal is trapped:
+ * Go through the file list and set all files
+ * to non-blocking again in case the child
+ * set some of them to block. Sigh.
*/
- case PS_DEAD:
- case PS_DEADLOCK:
- case PS_RUNNING:
- case PS_SIGTHREAD:
- case PS_STATE_MAX:
- case PS_SUSPENDED:
- break;
-
+ for (i = 0; i < _thread_dtablesize; i++) {
+ /* Check if this file is used: */
+ if (_thread_fd_table[i] != NULL) {
+ /*
+ * Set the file descriptor to non-blocking:
+ */
+ _thread_sys_fcntl(i, F_SETFL,
+ _thread_fd_table[i]->flags | O_NONBLOCK);
+ }
+ }
/*
- * States which need to return to critical sections
- * before they can switch contexts:
+ * Enter a loop to wake up all threads waiting
+ * for a process to complete:
*/
- case PS_COND_WAIT:
- case PS_FDLR_WAIT:
- case PS_FDLW_WAIT:
- case PS_FILE_WAIT:
- case PS_JOIN:
- case PS_MUTEX_WAIT:
- /* Indicate that the thread was interrupted: */
- pthread->interrupted = 1;
+ for (pthread = TAILQ_FIRST(&_waitingq);
+ pthread != NULL; pthread = pthread_next) {
+ /*
+ * Grab the next thread before possibly
+ * destroying the link entry:
+ */
+ pthread_next = TAILQ_NEXT(pthread, pqe);
+
/*
- * Defer the [sig|_]longjmp until leaving the critical
- * region:
+ * If this thread is waiting for a child
+ * process to complete, wake it up:
*/
- pthread->jmpflags |= JMPFLAGS_DEFERRED;
-
- /* Set the continuation routine: */
- pthread->continuation = thread_sig_finish_longjmp;
- /* FALLTHROUGH */
- case PS_FDR_WAIT:
- case PS_FDW_WAIT:
- case PS_POLL_WAIT:
- case PS_SELECT_WAIT:
- case PS_SIGSUSPEND:
- case PS_SIGWAIT:
- case PS_SLEEP_WAIT:
- case PS_SPINBLOCK:
- case PS_WAIT_WAIT:
- if ((pthread->flags & PTHREAD_FLAGS_IN_WAITQ) != 0) {
- PTHREAD_WAITQ_REMOVE(pthread);
- if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
- PTHREAD_WORKQ_REMOVE(pthread);
+ if (pthread->state == PS_WAIT_WAIT) {
+ /* Make the thread runnable: */
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+
+ /* Return the signal number: */
+ pthread->signo = sig;
}
- break;
}
+ break;
- if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0)
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ /*
+ * POSIX says that pending SIGCONT signals are
+ * discarded when one of these signals occurs.
+ */
+ case SIGTSTP:
+ case SIGTTIN:
+ case SIGTTOU:
+ /*
+ * Enter a loop to discard pending SIGCONT
+ * signals:
+ */
+ TAILQ_FOREACH(pthread, &_thread_list, tle) {
+ sigdelset(&pthread->sigpend, SIGCONT);
+ }
+ break;
+
+ default:
+ break;
}
}
-
-/* Perform thread specific actions in response to a signal: */
+/*
+ * Perform thread specific actions in response to a signal.
+ * This function is only called if there is a handler installed
+ * for the signal, and if the target thread has the signal
+ * unmasked.
+ */
static void
-thread_sig_check_state(pthread_t pthread, int sig)
+thread_sig_add(pthread_t pthread, int sig, int has_args)
{
+ int restart, frame;
+ int block_signals = 0;
+ int suppress_handler = 0;
+
+ restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART;
+
/*
* Process according to thread state:
*/
@@ -519,32 +577,54 @@ thread_sig_check_state(pthread_t pthread, int sig)
/*
* States which do not change when a signal is trapped:
*/
- case PS_COND_WAIT:
case PS_DEAD:
case PS_DEADLOCK:
- case PS_FILE_WAIT:
- case PS_JOIN:
- case PS_MUTEX_WAIT:
- case PS_RUNNING:
case PS_STATE_MAX:
case PS_SIGTHREAD:
- case PS_SPINBLOCK:
+ /*
+ * You can't call a signal handler for threads in these
+ * states.
+ */
+ suppress_handler = 1;
+ break;
+
+ /*
+ * States which do not need any cleanup handling when signals
+ * occur:
+ */
+ case PS_RUNNING:
+ /*
+ * Remove the thread from the queue before changing its
+ * priority:
+ */
+ if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0)
+ PTHREAD_PRIOQ_REMOVE(pthread);
+ break;
+
case PS_SUSPENDED:
- /* Increment the pending signal count. */
- sigaddset(&pthread->sigpend,sig);
+ break;
+
+ case PS_SPINBLOCK:
+ /* Remove the thread from the workq and waitq: */
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_WAITQ_REMOVE(pthread);
+ /* Make the thread runnable: */
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
break;
case PS_SIGWAIT:
+ /* The signal handler is not called for threads in SIGWAIT. */
+ suppress_handler = 1;
/* Wake up the thread if the signal is blocked. */
if (sigismember(pthread->data.sigwait, sig)) {
/* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_NEW_STATE(pthread, PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
} else
/* Increment the pending signal count. */
- sigaddset(&pthread->sigpend,sig);
+ sigaddset(&pthread->sigpend, sig);
break;
/*
@@ -552,64 +632,142 @@ thread_sig_check_state(pthread_t pthread, int sig)
* SIGCHLD signals.
*/
case PS_WAIT_WAIT:
- /*
- * Check for signals other than the death of a child
- * process:
- */
- if (sig != SIGCHLD)
- /* Flag the operation as interrupted: */
- pthread->interrupted = 1;
+ if (sig == SIGCHLD) {
+ /* Change the state of the thread to run: */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
- /* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ /* Return the signal number: */
+ pthread->signo = sig;
+ }
+ else {
+ /*
+ * Mark the thread as interrupted only if the
+ * restart flag is not set on the signal action:
+ */
+ if (restart == 0)
+ pthread->interrupted = 1;
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
+ }
+ break;
- /* Return the signal number: */
- pthread->signo = sig;
+ /*
+ * States which cannot be interrupted but still require the
+ * signal handler to run:
+ */
+ case PS_COND_WAIT:
+ case PS_JOIN:
+ case PS_MUTEX_WAIT:
+ /*
+ * Remove the thread from the wait queue. It will
+ * be added back to the wait queue once all signal
+ * handlers have been invoked.
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
break;
/*
- * States that are interrupted by the occurrence of a signal
- * other than the scheduling alarm:
+ * States which are interruptible but may need to be removed
+ * from queues before any signal handler is called.
+ *
+ * XXX - We may not need to handle this condition, but will
+ * mark it as a potential problem.
*/
case PS_FDLR_WAIT:
case PS_FDLW_WAIT:
+ case PS_FILE_WAIT:
+ if (restart == 0)
+ pthread->interrupted = 1;
+ /*
+ * Remove the thread from the wait queue. Our
+ * signal handler hook will remove this thread
+ * from the fd or file queue before invoking
+ * the actual handler.
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ /*
+ * To ensure the thread is removed from the fd and file
+ * queues before any other signal interrupts it, set the
+ * signal mask to block all signals. As soon as the thread
+ * is removed from the queue the signal mask will be
+ * restored.
+ */
+ block_signals = 1;
+ break;
+
+ /*
+ * States which are interruptible:
+ */
case PS_FDR_WAIT:
case PS_FDW_WAIT:
- case PS_POLL_WAIT:
- case PS_SLEEP_WAIT:
- case PS_SELECT_WAIT:
- if ((_thread_sigact[sig - 1].sa_flags & SA_RESTART) == 0) {
- /* Flag the operation as interrupted: */
+ if (restart == 0) {
+ /*
+ * Flag the operation as interrupted and
+ * set the state to running:
+ */
pthread->interrupted = 1;
-
- if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
- PTHREAD_WORKQ_REMOVE(pthread);
-
- /* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
-
- /* Return the signal number: */
- pthread->signo = sig;
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
}
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_WAITQ_REMOVE(pthread);
break;
- case PS_SIGSUSPEND:
+ case PS_POLL_WAIT:
+ case PS_SELECT_WAIT:
+ case PS_SLEEP_WAIT:
/*
- * Only wake up the thread if there is a handler installed
- * for the signal.
+ * Unmasked signals always cause poll, select, and sleep
+ * to terminate early, regardless of SA_RESTART:
*/
- if (_thread_sigact[sig - 1].sa_handler != SIG_DFL) {
- /* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ pthread->interrupted = 1;
+ /* Remove threads in poll and select from the workq: */
+ if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0)
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
+ break;
- /* Return the signal number: */
- pthread->signo = sig;
- }
+ case PS_SIGSUSPEND:
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
break;
}
+
+ if (suppress_handler == 0) {
+ /*
+ * Save the current state of the thread and add a
+ * new signal frame.
+ */
+ frame = pthread->sigframe_count;
+ thread_sigframe_save(pthread, pthread->curframe);
+ thread_sigframe_add(pthread, sig);
+ pthread->sigframes[frame + 1]->sig_has_args = has_args;
+ SIGSETOR(pthread->sigmask, _thread_sigact[sig - 1].sa_mask);
+ if (block_signals != 0) {
+ /* Save the signal mask and block all signals: */
+ pthread->sigframes[frame + 1]->saved_state.psd_sigmask =
+ pthread->sigmask;
+ sigfillset(&pthread->sigmask);
+ }
+
+ /* Make sure the thread is runnable: */
+ if (pthread->state != PS_RUNNING)
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
+ /*
+ * The thread should be removed from all scheduling
+ * queues at this point. Raise the priority and place
+ * the thread in the run queue.
+ */
+ pthread->active_priority |= PTHREAD_SIGNAL_PRIORITY;
+ if (pthread != _thread_run)
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ }
}
-/* Send a signal to a specific thread (ala pthread_kill): */
+/*
+ * Send a signal to a specific thread (ala pthread_kill):
+ */
void
_thread_sig_send(pthread_t pthread, int sig)
{
@@ -620,142 +778,400 @@ _thread_sig_send(pthread_t pthread, int sig)
if (pthread->state == PS_SIGWAIT &&
sigismember(pthread->data.sigwait, sig)) {
/* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_NEW_STATE(pthread, PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
- } else if (pthread->state != PS_SIGWAIT &&
- !sigismember(&pthread->sigmask, sig)) {
- /* Perform any state changes due to signal arrival: */
- thread_sig_check_state(pthread, sig);
- /* Increment the pending signal count. */
- sigaddset(&pthread->sigpend,sig);
+ } else if (pthread == _thread_run) {
+ /* Add the signal to the pending set: */
+ sigaddset(&pthread->sigpend, sig);
+ /*
+ * Deliver the signal to the process if a
+ * handler is not installed:
+ */
+ if (_thread_sigact[sig - 1].sa_handler == SIG_DFL)
+ kill(getpid(), sig);
+ if (!sigismember(&pthread->sigmask, sig)) {
+ /*
+ * Call the kernel scheduler which will safely
+ * install a signal frame for this thread:
+ */
+ _thread_kern_sched_sig();
+ }
} else {
- /* Increment the pending signal count. */
- sigaddset(&pthread->sigpend,sig);
+ if (pthread->state != PS_SIGWAIT &&
+ !sigismember(&pthread->sigmask, sig)) {
+ /* Protect the scheduling queues: */
+ _thread_kern_sig_defer();
+ /*
+ * Perform any state changes due to signal
+ * arrival:
+ */
+ thread_sig_add(pthread, sig, /* has args */ 0);
+ /* Unprotect the scheduling queues: */
+ _thread_kern_sig_undefer();
+ }
+ else
+ /* Increment the pending signal count. */
+ sigaddset(&pthread->sigpend,sig);
+
+ /*
+ * Deliver the signal to the process if a
+ * handler is not installed:
+ */
+ if (_thread_sigact[sig - 1].sa_handler == SIG_DFL)
+ kill(getpid(), sig);
}
}
}
-/* Dispatch pending signals to the running thread: */
+/*
+ * User thread signal handler wrapper.
+ *
+ * thread - current running thread
+ */
void
-_dispatch_signals()
+_thread_sig_wrapper(void)
{
- sigset_t sigset;
- int i;
+ void (*sigfunc)(int, siginfo_t *, void *);
+ struct pthread_signal_frame *psf;
+ pthread_t thread;
+ int dead = 0;
+ int i, sig, has_args;
+ int frame, dst_frame;
+
+ thread = _thread_run;
+
+ /* Get the current frame and state: */
+ frame = thread->sigframe_count;
+ PTHREAD_ASSERT(frame > 0, "Invalid signal frame in signal handler");
+ psf = thread->curframe;
+
+ /* Check the threads previous state: */
+ if (psf->saved_state.psd_state != PS_RUNNING) {
+ /*
+ * Do a little cleanup handling for those threads in
+ * queues before calling the signal handler. Signals
+ * for these threads are temporarily blocked until
+ * after cleanup handling.
+ */
+ switch (psf->saved_state.psd_state) {
+ case PS_FDLR_WAIT:
+ case PS_FDLW_WAIT:
+ _fd_lock_backout(thread);
+ psf->saved_state.psd_state = PS_RUNNING;
+ /* Reenable signals: */
+ thread->sigmask = psf->saved_state.psd_sigmask;
+ break;
+
+ case PS_FILE_WAIT:
+ _flockfile_backout(thread);
+ psf->saved_state.psd_state = PS_RUNNING;
+ /* Reenable signals: */
+ thread->sigmask = psf->saved_state.psd_sigmask;
+ break;
+
+ default:
+ break;
+ }
+ }
/*
- * Check if there are pending signals for the running
- * thread or process that aren't blocked:
+ * Unless the thread exits or longjmps out of the signal handler,
+ * return to the previous frame:
*/
- sigset = _thread_run->sigpend;
- SIGSETOR(sigset, _process_sigpending);
- SIGSETNAND(sigset, _thread_run->sigmask);
- if (SIGNOTEMPTY(sigset)) {
+ dst_frame = frame - 1;
+
+ /*
+ * Check that a custom handler is installed and if the signal
+ * is not blocked:
+ */
+ sigfunc = _thread_sigact[psf->signo - 1].sa_sigaction;
+ if (((__sighandler_t *)sigfunc != SIG_DFL) &&
+ ((__sighandler_t *)sigfunc != SIG_IGN)) {
/*
- * Enter a loop to calculate deliverable pending signals
- * before actually delivering them. The pending signals
- * must be removed from the pending signal sets before
- * calling the signal handler because the handler may
- * call library routines that again check for and deliver
- * pending signals.
+ * The signal jump buffer is allocated off the stack.
+ * If the signal handler tries to [_][sig]longjmp() or
+ * setcontext(), our wrapped versions of these routines
+ * will copy the user supplied jump buffer or context
+ * to the destination signal frame, set the destination
+ * signal frame in psf->dst_frame, and _longjmp() back
+ * to here.
*/
- for (i = 1; i < NSIG; i++) {
+ jmp_buf jb;
+
+ /*
+ * Set up the context for abnormal returns out of signal
+ * handlers.
+ */
+ psf->sig_jb = &jb;
+ if (_setjmp(jb) == 0) {
+ DBG_MSG("_thread_sig_wrapper: Entering frame %d, "
+ "stack 0x%lx\n", frame, GET_STACK_JB(jb));
/*
- * Check that a custom handler is installed
- * and if the signal is not blocked:
+ * Invalidate the destination frame before calling
+ * the signal handler.
*/
- if (_thread_sigact[i - 1].sa_handler != SIG_DFL &&
- _thread_sigact[i - 1].sa_handler != SIG_IGN &&
- sigismember(&sigset, i)) {
- if (sigismember(&_thread_run->sigpend,i))
- /* Clear the thread pending signal: */
- sigdelset(&_thread_run->sigpend,i);
- else
- /* Clear the process pending signal: */
- sigdelset(&_process_sigpending,i);
- }
+ psf->dst_frame = -1;
+
+ /*
+ * Dispatch the signal via the custom signal
+ * handler:
+ */
+ if (psf->sig_has_args == 0)
+ (*(sigfunc))(psf->signo, NULL, NULL);
+ else if ((_thread_sigact[psf->signo - 1].sa_flags &
+ SA_SIGINFO) != 0)
+ (*(sigfunc))(psf->signo,
+ &_thread_sigq[psf->signo - 1].siginfo,
+ &_thread_sigq[psf->signo - 1].uc);
else
- /* Remove the signal if it can't be handled: */
- sigdelset(&sigset, i);
+ (*(sigfunc))(psf->signo,
+ (siginfo_t *)_thread_sigq[psf->signo - 1].siginfo.si_code,
+ &_thread_sigq[psf->signo - 1].uc);
}
+ else {
+ /*
+ * The return from _setjmp() should only be non-zero
+ * when the signal handler wants to xxxlongjmp() or
+ * setcontext() to a different context, or if the
+ * thread has exited (via pthread_exit).
+ */
+ /*
+ * Grab a copy of the destination frame before it
+ * gets clobbered after unwinding.
+ */
+ dst_frame = psf->dst_frame;
+ DBG_MSG("Abnormal exit from handler for signal %d, "
+ "frame %d\n", psf->signo, frame);
+
+ /* Has the thread exited? */
+ if ((dead = thread->flags & PTHREAD_EXITING) != 0)
+ /* When exiting, unwind to frame 0. */
+ dst_frame = 0;
+ else if ((dst_frame < 0) || (dst_frame > frame))
+ PANIC("Attempt to unwind to invalid "
+ "signal frame");
+
+ /* Unwind to the target frame: */
+ for (i = frame; i > dst_frame; i--) {
+ DBG_MSG("Leaving frame %d, signal %d\n", i,
+ thread->sigframes[i]->signo);
+ /* Leave the current signal frame: */
+ thread_sigframe_leave(thread, i);
- /* Now deliver the signals: */
- for (i = 1; i < NSIG; i++) {
- if (sigismember(&sigset, i))
- /* Deliver the signal to the running thread: */
- _thread_sig_deliver(_thread_run, i);
+ /*
+ * Save whatever is needed out of the state
+ * data; as soon as the frame count is
+ * is decremented, another signal can arrive
+ * and corrupt this view of the state data.
+ */
+ sig = thread->sigframes[i]->signo;
+ has_args = thread->sigframes[i]->sig_has_args;
+
+ /*
+ * We're done with this signal frame:
+ */
+ thread->curframe = thread->sigframes[i - 1];
+ thread->sigframe_count = i - 1;
+
+ /*
+ * Only unblock the signal if it was a
+ * process signal as opposed to a signal
+ * generated by pthread_kill().
+ */
+ if (has_args != 0)
+ _thread_sigq[sig - 1].blocked = 0;
+ }
}
}
+
+ /*
+ * Call the kernel scheduler to schedule the next
+ * thread.
+ */
+ if (dead == 0) {
+ /* Restore the threads state: */
+ thread_sigframe_restore(thread, thread->sigframes[dst_frame]);
+ _thread_kern_sched_frame(dst_frame);
+ }
+ else {
+ PTHREAD_ASSERT(dst_frame == 0,
+ "Invalid signal frame for dead thread");
+
+ /* Perform any necessary cleanup before exiting. */
+ thread_sigframe_leave(thread, 0);
+
+ /* This should never return: */
+ _thread_exit_finish();
+ PANIC("Return from _thread_exit_finish in signal wrapper");
+ }
}
-/* Deliver a signal to a thread: */
-void
-_thread_sig_deliver(pthread_t pthread, int sig)
+static void
+thread_sigframe_add(pthread_t thread, int sig)
{
- sigset_t mask;
- pthread_t pthread_saved;
- jmp_buf jb, *saved_sighandler_jmp_buf;
+ unsigned long stackp = 0;
+
+ /* Get the top of the threads stack: */
+ switch (thread->curframe->ctxtype) {
+ case CTX_JB:
+ case CTX_JB_NOSIG:
+ stackp = GET_STACK_JB(thread->curframe->ctx.jb);
+ break;
+ case CTX_SJB:
+ stackp = GET_STACK_SJB(thread->curframe->ctx.sigjb);
+ break;
+ case CTX_UC:
+ stackp = GET_STACK_UC(&thread->curframe->ctx.uc);
+ break;
+ default:
+ PANIC("Invalid thread context type");
+ break;
+ }
/*
- * Check that a custom handler is installed
- * and if the signal is not blocked:
+ * Leave a little space on the stack and round down to the
+ * nearest aligned word:
*/
- if (_thread_sigact[sig - 1].sa_handler != SIG_DFL &&
- _thread_sigact[sig - 1].sa_handler != SIG_IGN) {
- /* Save the current thread: */
- pthread_saved = _thread_run;
+ stackp -= sizeof(double);
+ stackp &= ~0x3UL;
+
+ /* Allocate room on top of the stack for a new signal frame: */
+ stackp -= sizeof(struct pthread_signal_frame);
+
+ /* Set up the new frame: */
+ thread->sigframe_count++;
+ thread->sigframes[thread->sigframe_count] =
+ (struct pthread_signal_frame *) stackp;
+ thread->curframe = thread->sigframes[thread->sigframe_count];
+ thread->curframe->stackp = stackp;
+ thread->curframe->ctxtype = CTX_JB;
+ thread->curframe->longjmp_val = 1;
+ thread->curframe->signo = sig;
- /* Save the threads signal mask: */
- mask = pthread->sigmask;
-
- /*
- * Add the current signal and signal handler
- * mask to the thread's current signal mask:
- */
- SIGSETOR(pthread->sigmask, _thread_sigact[sig - 1].sa_mask);
- sigaddset(&pthread->sigmask, sig);
+ /*
+ * Set up the context:
+ */
+ _setjmp(thread->curframe->ctx.jb);
+ SET_STACK_JB(thread->curframe->ctx.jb, stackp);
+ SET_RETURN_ADDR_JB(thread->curframe->ctx.jb, _thread_sig_wrapper);
+}
- /* Current thread inside critical region? */
- if (_thread_run->sig_defer_count > 0)
- pthread->sig_defer_count++;
+/*
+ * Locate the signal frame from the specified stack pointer.
+ */
+int
+_thread_sigframe_find(pthread_t pthread, void *stackp)
+{
+ int frame;
- /* Increment the number of nested signals being handled. */
- pthread->signal_nest_level++;
+ /*
+ * Find the destination of the target frame based on the
+ * given stack pointer.
+ */
+ for (frame = pthread->sigframe_count; frame >= 0; frame--) {
+ if (stackp < (void *)pthread->sigframes[frame]->stackp)
+ break;
+ }
+ return (frame);
+}
+
+void
+thread_sigframe_leave(pthread_t thread, int frame)
+{
+ struct pthread_state_data *psd;
- /*
- * The jump buffer is allocated off the stack and the current
- * jump buffer is saved. If the signal handler tries to
- * [sig|_]longjmp(), our version of [sig|_]longjmp() will copy
- * the user supplied jump buffer into
- * _thread_run->nested_jmp.[sig]jmp and _longjmp() back to here.
- */
- saved_sighandler_jmp_buf = pthread->sighandler_jmp_buf;
- pthread->sighandler_jmp_buf = &jb;
+ psd = &thread->sigframes[frame]->saved_state;
- _thread_run = pthread;
+ /*
+ * Perform any necessary cleanup for this signal frame:
+ */
+ switch (psd->psd_state) {
+ case PS_DEAD:
+ case PS_DEADLOCK:
+ case PS_RUNNING:
+ case PS_SIGTHREAD:
+ case PS_STATE_MAX:
+ case PS_SUSPENDED:
+ break;
- if (_setjmp(jb) == 0) {
- /*
- * Dispatch the signal via the custom signal
- * handler:
- */
- (*(_thread_sigact[sig - 1].sa_handler))(sig);
- }
+ /*
+ * Threads in the following states need to be removed
+ * from queues.
+ */
+ case PS_COND_WAIT:
+ _cond_wait_backout(thread);
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
+ PTHREAD_WAITQ_REMOVE(thread);
+ break;
- _thread_run = pthread_saved;
+ case PS_FDLR_WAIT:
+ case PS_FDLW_WAIT:
+ _fd_lock_backout(thread);
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
+ PTHREAD_WAITQ_REMOVE(thread);
+ break;
- pthread->sighandler_jmp_buf = saved_sighandler_jmp_buf;
+ case PS_FILE_WAIT:
+ _flockfile_backout(thread);
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
+ PTHREAD_WAITQ_REMOVE(thread);
+ break;
- /* Decrement the signal nest level. */
- pthread->signal_nest_level--;
+ case PS_JOIN:
+ _join_backout(thread);
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
+ PTHREAD_WAITQ_REMOVE(thread);
+ break;
- /* Current thread inside critical region? */
- if (_thread_run->sig_defer_count > 0)
- pthread->sig_defer_count--;
+ case PS_MUTEX_WAIT:
+ _mutex_lock_backout(thread);
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
+ PTHREAD_WAITQ_REMOVE(thread);
+ break;
- /* Restore the threads signal mask: */
- pthread->sigmask = mask;
+ case PS_FDR_WAIT:
+ case PS_FDW_WAIT:
+ case PS_POLL_WAIT:
+ case PS_SELECT_WAIT:
+ case PS_SIGSUSPEND:
+ case PS_SIGWAIT:
+ case PS_SLEEP_WAIT:
+ case PS_SPINBLOCK:
+ case PS_WAIT_WAIT:
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) {
+ PTHREAD_WAITQ_REMOVE(thread);
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WORKQ) != 0)
+ PTHREAD_WORKQ_REMOVE(thread);
+ }
+ break;
}
}
+
+static void
+thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf)
+{
+ thread->interrupted = psf->saved_state.psd_interrupted;
+ thread->sigmask = psf->saved_state.psd_sigmask;
+ thread->state = psf->saved_state.psd_state;
+ thread->flags = psf->saved_state.psd_flags;
+ thread->wakeup_time = psf->saved_state.psd_wakeup_time;
+ thread->data = psf->saved_state.psd_wait_data;
+}
+
+static void
+thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf)
+{
+ psf->saved_state.psd_interrupted = thread->interrupted;
+ psf->saved_state.psd_sigmask = thread->sigmask;
+ psf->saved_state.psd_state = thread->state;
+ psf->saved_state.psd_flags = thread->flags;
+ thread->flags &= PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE |
+ PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ |
+ PTHREAD_FLAGS_IN_JOINQ;
+ psf->saved_state.psd_wakeup_time = thread->wakeup_time;
+ psf->saved_state.psd_wait_data = thread->data;
+}
+
#endif
diff --git a/lib/libc_r/uthread/uthread_sigaction.c b/lib/libc_r/uthread/uthread_sigaction.c
index 319999bf3047..e78f3290154c 100644
--- a/lib/libc_r/uthread/uthread_sigaction.c
+++ b/lib/libc_r/uthread/uthread_sigaction.c
@@ -74,12 +74,13 @@ _sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
*/
if (act != NULL && sig != _SCHED_SIGNAL && sig != SIGCHLD &&
sig != SIGINFO) {
- /* Initialise the global signal action structure: */
- gact.sa_mask = act->sa_mask;
- gact.sa_flags = 0;
-
- /* Ensure the scheduling signal is masked: */
- sigaddset(&gact.sa_mask, _SCHED_SIGNAL);
+ /*
+ * Ensure the signal handler cannot be interrupted
+ * by other signals. Always request the POSIX signal
+ * handler arguments.
+ */
+ sigfillset(&gact.sa_mask);
+ gact.sa_flags = SA_SIGINFO;
/*
* Check if the signal handler is being set to
diff --git a/lib/libc_r/uthread/uthread_sigmask.c b/lib/libc_r/uthread/uthread_sigmask.c
index b880d9c748c8..bdb0b438ec48 100644
--- a/lib/libc_r/uthread/uthread_sigmask.c
+++ b/lib/libc_r/uthread/uthread_sigmask.c
@@ -43,7 +43,8 @@
int
pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
{
- int ret = 0;
+ sigset_t sigset;
+ int ret = 0;
/* Check if the existing signal process mask is to be returned: */
if (oset != NULL) {
@@ -81,10 +82,18 @@ pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
}
/*
- * Dispatch signals to the running thread that are pending
- * and now unblocked:
+ * Check if there are pending signals for the running
+ * thread or process that aren't blocked:
*/
- _dispatch_signals();
+ sigset = _thread_run->sigpend;
+ SIGSETOR(sigset, _process_sigpending);
+ SIGSETNAND(sigset, _thread_run->sigmask);
+ if (SIGNOTEMPTY(sigset))
+ /*
+ * Call the kernel scheduler which will safely
+ * install a signal frame for the running thread:
+ */
+ _thread_kern_sched_sig();
}
/* Return the completion status: */
diff --git a/lib/libc_r/uthread/uthread_signal.c b/lib/libc_r/uthread/uthread_signal.c
index 2b8f46a747d7..ff4ce8885cb0 100644
--- a/lib/libc_r/uthread/uthread_signal.c
+++ b/lib/libc_r/uthread/uthread_signal.c
@@ -45,7 +45,7 @@ _thread_sys_signal(int s, sig_t a)
/* Initialise the signal action structure: */
sigemptyset(&sa.sa_mask);
sa.sa_handler = a;
- sa.sa_flags = 0;
+ sa.sa_flags = SA_SIGINFO;
/* Perform the sigaction syscall: */
if (_thread_sys_sigaction(s, &sa, &osa) < 0) {
diff --git a/lib/libc_r/uthread/uthread_sigprocmask.c b/lib/libc_r/uthread/uthread_sigprocmask.c
index 6addb4a948e9..f8ca0311c42c 100644
--- a/lib/libc_r/uthread/uthread_sigprocmask.c
+++ b/lib/libc_r/uthread/uthread_sigprocmask.c
@@ -41,53 +41,9 @@
#include "pthread_private.h"
int
-_sigprocmask(int how, const sigset_t * set, sigset_t * oset)
+_sigprocmask(int how, const sigset_t *set, sigset_t *oset)
{
- int ret = 0;
-
- /* Check if the existing signal process mask is to be returned: */
- if (oset != NULL) {
- /* Return the current mask: */
- *oset = _thread_run->sigmask;
- }
- /* Check if a new signal set was provided by the caller: */
- if (set != NULL) {
- /* Process according to what to do: */
- switch (how) {
- /* Block signals: */
- case SIG_BLOCK:
- /* Add signals to the existing mask: */
- SIGSETOR(_thread_run->sigmask, *set);
- break;
-
- /* Unblock signals: */
- case SIG_UNBLOCK:
- /* Clear signals from the existing mask: */
- SIGSETNAND(_thread_run->sigmask, *set);
- break;
-
- /* Set the signal process mask: */
- case SIG_SETMASK:
- /* Set the new mask: */
- _thread_run->sigmask = *set;
- break;
-
- /* Trap invalid actions: */
- default:
- /* Return an invalid argument: */
- errno = EINVAL;
- ret = -1;
- break;
- }
-
- /*
- * Dispatch signals to the running thread that are pending
- * and now unblocked:
- */
- _dispatch_signals();
- }
- /* Return the completion status: */
- return (ret);
+ return (pthread_sigmask(how, set, oset));
}
__strong_reference(_sigprocmask, sigprocmask);
diff --git a/lib/libc_r/uthread/uthread_sigwait.c b/lib/libc_r/uthread/uthread_sigwait.c
index b12c028740c9..6ba685559263 100644
--- a/lib/libc_r/uthread/uthread_sigwait.c
+++ b/lib/libc_r/uthread/uthread_sigwait.c
@@ -40,7 +40,7 @@
#include "pthread_private.h"
int
-sigwait(const sigset_t * set, int *sig)
+sigwait(const sigset_t *set, int *sig)
{
int ret = 0;
int i;
@@ -52,11 +52,9 @@ sigwait(const sigset_t * set, int *sig)
* Specify the thread kernel signal handler.
*/
act.sa_handler = (void (*) ()) _thread_sig_handler;
- act.sa_flags = SA_RESTART;
- act.sa_mask = *set;
-
- /* Ensure the scheduling signal is masked: */
- sigaddset(&act.sa_mask, _SCHED_SIGNAL);
+ act.sa_flags = SA_RESTART | SA_SIGINFO;
+ /* Ensure the signal handler cannot be interrupted by other signals: */
+ sigfillset(&act.sa_mask);
/*
* Initialize the set of signals that will be waited on:
diff --git a/lib/libc_r/uthread/uthread_write.c b/lib/libc_r/uthread/uthread_write.c
index 6408a64830c2..5d9ef35f3fbc 100644
--- a/lib/libc_r/uthread/uthread_write.c
+++ b/lib/libc_r/uthread/uthread_write.c
@@ -127,7 +127,7 @@ _write(int fd, const void *buf, size_t nbytes)
/* Return the number of bytes written: */
ret = num;
}
- _FD_UNLOCK(fd, FD_RDWR);
+ _FD_UNLOCK(fd, FD_WRITE);
}
return (ret);
}
diff --git a/lib/libc_r/uthread/uthread_yield.c b/lib/libc_r/uthread/uthread_yield.c
index 064dd826d34c..7d64283a9910 100644
--- a/lib/libc_r/uthread/uthread_yield.c
+++ b/lib/libc_r/uthread/uthread_yield.c
@@ -57,8 +57,5 @@ pthread_yield(void)
/* Schedule the next thread: */
_thread_kern_sched(NULL);
-
- /* Nothing to return. */
- return;
}
#endif
diff --git a/lib/libkse/thread/thr_attr_setschedparam.c b/lib/libkse/thread/thr_attr_setschedparam.c
index 6c4166b1784a..755bb13b7acc 100644
--- a/lib/libkse/thread/thr_attr_setschedparam.c
+++ b/lib/libkse/thread/thr_attr_setschedparam.c
@@ -45,6 +45,10 @@ pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param
ret = EINVAL;
else if (param == NULL) {
ret = ENOTSUP;
+ } else if ((param->sched_priority < PTHREAD_MIN_PRIORITY) ||
+ (param->sched_priority > PTHREAD_MAX_PRIORITY)) {
+ /* Return an unsupported value error. */
+ ret = ENOTSUP;
} else
(*attr)->prio = param->sched_priority;
diff --git a/lib/libkse/thread/thr_cond.c b/lib/libkse/thread/thr_cond.c
index 0a5298bcfcc0..50cf92765954 100644
--- a/lib/libkse/thread/thr_cond.c
+++ b/lib/libkse/thread/thr_cond.c
@@ -170,10 +170,7 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
* perform the dynamic initialization:
*/
else if (*cond != NULL ||
- (rval = pthread_cond_init(cond,NULL)) == 0) {
-
- _thread_enter_cancellation_point();
-
+ (rval = pthread_cond_init(cond, NULL)) == 0) {
/* Lock the condition variable structure: */
_SPINLOCK(&(*cond)->lock);
@@ -286,8 +283,6 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
if (_thread_run->continuation != NULL)
_thread_run->continuation((void *) _thread_run);
}
-
- _thread_leave_cancellation_point();
}
_thread_leave_cancellation_point();
@@ -313,8 +308,6 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
* initialization.
*/
else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
- _thread_enter_cancellation_point();
-
/* Lock the condition variable structure: */
_SPINLOCK(&(*cond)->lock);
@@ -446,8 +439,6 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
if (_thread_run->continuation != NULL)
_thread_run->continuation((void *) _thread_run);
}
-
- _thread_leave_cancellation_point();
}
_thread_leave_cancellation_point();
@@ -589,6 +580,48 @@ pthread_cond_broadcast(pthread_cond_t * cond)
return (rval);
}
+void
+_cond_wait_backout(pthread_t pthread)
+{
+ pthread_cond_t cond;
+
+ cond = pthread->data.cond;
+ if (cond != NULL) {
+ /*
+ * Defer signals to protect the scheduling queues
+ * from access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
+ /* Lock the condition variable structure: */
+ _SPINLOCK(&cond->lock);
+
+ /* Process according to condition variable type: */
+ switch (cond->c_type) {
+ /* Fast condition variable: */
+ case COND_TYPE_FAST:
+ cond_queue_remove(cond, pthread);
+
+ /* Check for no more waiters: */
+ if (TAILQ_FIRST(&cond->c_queue) == NULL)
+ cond->c_mutex = NULL;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Unlock the condition variable structure: */
+ _SPINUNLOCK(&cond->lock);
+
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary:
+ */
+ _thread_kern_sig_undefer();
+ }
+}
+
/*
* Dequeue a waiting thread from the head of a condition queue in
* descending priority order.
@@ -599,7 +632,7 @@ cond_queue_deq(pthread_cond_t cond)
pthread_t pthread;
while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
- TAILQ_REMOVE(&cond->c_queue, pthread, qe);
+ TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
if ((pthread->timeout == 0) && (pthread->interrupted == 0))
/*
@@ -628,7 +661,7 @@ cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
* it isn't in the queue.
*/
if (pthread->flags & PTHREAD_FLAGS_IN_CONDQ) {
- TAILQ_REMOVE(&cond->c_queue, pthread, qe);
+ TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
}
}
@@ -642,19 +675,22 @@ cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
{
pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head);
+ PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
+
/*
* For the common case of all threads having equal priority,
* we perform a quick check against the priority of the thread
* at the tail of the queue.
*/
if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
- TAILQ_INSERT_TAIL(&cond->c_queue, pthread, qe);
+ TAILQ_INSERT_TAIL(&cond->c_queue, pthread, sqe);
else {
tid = TAILQ_FIRST(&cond->c_queue);
while (pthread->active_priority <= tid->active_priority)
- tid = TAILQ_NEXT(tid, qe);
- TAILQ_INSERT_BEFORE(tid, pthread, qe);
+ tid = TAILQ_NEXT(tid, sqe);
+ TAILQ_INSERT_BEFORE(tid, pthread, sqe);
}
pthread->flags |= PTHREAD_FLAGS_IN_CONDQ;
+ pthread->data.cond = cond;
}
#endif
diff --git a/lib/libkse/thread/thr_create.c b/lib/libkse/thread/thr_create.c
index 3a80611dc145..0390f1be7205 100644
--- a/lib/libkse/thread/thr_create.c
+++ b/lib/libkse/thread/thr_create.c
@@ -49,17 +49,24 @@
static u_int64_t next_uniqueid = 1;
#define OFF(f) offsetof(struct pthread, f)
+#define SIGFRAME_OFF(f) offsetof(struct pthread_signal_frame, f)
int _thread_next_offset = OFF(tle.tqe_next);
int _thread_uniqueid_offset = OFF(uniqueid);
int _thread_state_offset = OFF(state);
int _thread_name_offset = OFF(name);
-int _thread_sig_saved_offset = OFF(sig_saved);
-int _thread_saved_sigcontext_offset = OFF(saved_sigcontext);
-int _thread_saved_jmp_buf_offset = OFF(saved_jmp_buf);
+int _thread_curframe_offset = OFF(curframe);
+int _thread_sigframe_ctx_offset = SIGFRAME_OFF(ctx);
+int _thread_sigframe_ctxtype_offset = SIGFRAME_OFF(ctxtype);
#undef OFF
+#undef SIGFRAME_OFF
int _thread_PS_RUNNING_value = PS_RUNNING;
int _thread_PS_DEAD_value = PS_DEAD;
+int _thread_CTX_JB_NOSIG_value = CTX_JB_NOSIG;
+int _thread_CTX_JB_value = CTX_JB;
+int _thread_CTX_SJB_value = CTX_SJB;
+int _thread_CTX_UC_value = CTX_UC;
+int _thread_sigframe_size_value = sizeof(struct pthread_signal_frame);
int
pthread_create(pthread_t * thread, const pthread_attr_t * attr,
@@ -162,7 +169,6 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/* Initialise the thread structure: */
memset(new_thread, 0, sizeof(struct pthread));
new_thread->slice_usec = -1;
- new_thread->sig_saved = 0;
new_thread->stack = stack;
new_thread->start_routine = start_routine;
new_thread->arg = arg;
@@ -179,62 +185,32 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/* Initialise the thread for signals: */
new_thread->sigmask = _thread_run->sigmask;
+ /* Initialize the first signal frame: */
+ new_thread->sigframes[0] = &new_thread->sigframe0;
+ new_thread->curframe = &new_thread->sigframe0;
+
/* Initialise the jump buffer: */
- setjmp(new_thread->saved_jmp_buf);
+ _setjmp(new_thread->curframe->ctx.jb);
/*
* Set up new stack frame so that it looks like it
* returned from a longjmp() to the beginning of
* _thread_start().
*/
-#if defined(__FreeBSD__)
-#if defined(__alpha__)
- new_thread->saved_jmp_buf[0]._jb[2] =
- (long)_thread_start;
- new_thread->saved_jmp_buf[0]._jb[4 + R_RA] =
- 0;
- new_thread->saved_jmp_buf[0]._jb[4 + R_T12] =
- (long)_thread_start;
-#else
- new_thread->saved_jmp_buf[0]._jb[0] =
- (long)_thread_start;
-#endif
-#elif defined(__NetBSD__)
-#if defined(__alpha__)
- new_thread->saved_jmp_buf[2] = (long)_thread_start;
- new_thread->saved_jmp_buf[4 + R_RA] = 0;
- new_thread->saved_jmp_buf[4 + R_T12] =
- (long)_thread_start;
-#else
- new_thread->saved_jmp_buf[0] = (long)_thread_start;
-#endif
-#else
-#error "Don't recognize this operating system!"
-#endif
+ SET_RETURN_ADDR_JB(new_thread->curframe->ctx.jb,
+ _thread_start);
/* The stack starts high and builds down: */
-#if defined(__FreeBSD__)
-#if defined(__alpha__)
- new_thread->saved_jmp_buf[0]._jb[4 + R_SP] =
- (long)new_thread->stack + pattr->stacksize_attr
- - sizeof(double);
-#else
- new_thread->saved_jmp_buf[0]._jb[2] =
- (int)(new_thread->stack + pattr->stacksize_attr -
- sizeof(double));
-#endif
-#elif defined(__NetBSD__)
-#if defined(__alpha__)
- new_thread->saved_jmp_buf[4 + R_SP] =
- (long)new_thread->stack + pattr->stacksize_attr -
- sizeof(double);
-#else
- new_thread->saved_jmp_buf[2] = (long)new_thread->stack
- + pattr->stacksize_attr - sizeof(double);
-#endif
-#else
-#error "Don't recognize this operating system!"
-#endif
+ SET_STACK_JB(new_thread->curframe->ctx.jb,
+ (long)new_thread->stack + pattr->stacksize_attr
+ - sizeof(double));
+
+ /* Initialize the rest of the frame: */
+ new_thread->curframe->ctxtype = CTX_JB_NOSIG;
+ /* Set the base of the stack: */
+ new_thread->curframe->stackp =
+ GET_STACK_JB(new_thread->curframe->ctx.jb);
+ new_thread->sigframe_count = 0;
/* Copy the thread attributes: */
memcpy(&new_thread->attr, pattr, sizeof(struct pthread_attr));
@@ -245,20 +221,22 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
*/
if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
/* Copy the scheduling attributes: */
- new_thread->base_priority
- = _thread_run->base_priority;
- new_thread->attr.prio
- = _thread_run->base_priority;
- new_thread->attr.sched_policy
- = _thread_run->attr.sched_policy;
+ new_thread->base_priority =
+ _thread_run->base_priority &
+ ~PTHREAD_SIGNAL_PRIORITY;
+ new_thread->attr.prio =
+ _thread_run->base_priority &
+ ~PTHREAD_SIGNAL_PRIORITY;
+ new_thread->attr.sched_policy =
+ _thread_run->attr.sched_policy;
} else {
/*
* Use just the thread priority, leaving the
* other scheduling attributes as their
* default values:
*/
- new_thread->base_priority
- = new_thread->attr.prio;
+ new_thread->base_priority =
+ new_thread->attr.prio;
}
new_thread->active_priority = new_thread->base_priority;
new_thread->inherited_priority = 0;
@@ -275,7 +253,6 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
new_thread->flags = 0;
new_thread->poll_data.nfds = 0;
new_thread->poll_data.fds = NULL;
- new_thread->jmpflags = 0;
new_thread->continuation = NULL;
/*
@@ -317,7 +294,6 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/* Schedule the new user thread: */
_thread_kern_sched(NULL);
-
/*
* Start a garbage collector thread
* if necessary.
@@ -325,6 +301,7 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
if (f_gc && pthread_create(&gc_thread,NULL,
_thread_gc,NULL) != 0)
PANIC("Can't create gc thread");
+
}
}
diff --git a/lib/libkse/thread/thr_detach.c b/lib/libkse/thread/thr_detach.c
index 89cc6671179c..3bade9d5b244 100644
--- a/lib/libkse/thread/thr_detach.c
+++ b/lib/libkse/thread/thr_detach.c
@@ -61,9 +61,10 @@ pthread_detach(pthread_t pthread)
/* Enter a loop to bring all threads off the join queue: */
while ((next_thread = TAILQ_FIRST(&pthread->join_queue)) != NULL) {
/* Remove the thread from the queue: */
- TAILQ_REMOVE(&pthread->join_queue, next_thread, qe);
+ TAILQ_REMOVE(&pthread->join_queue, next_thread, sqe);
+ pthread->flags &= ~PTHREAD_FLAGS_IN_JOINQ;
- /* Make the thread run: */
+ /* Make the thread runnable: */
PTHREAD_NEW_STATE(next_thread,PS_RUNNING);
}
diff --git a/lib/libkse/thread/thr_exit.c b/lib/libkse/thread/thr_exit.c
index 0d22638fd7fc..7fbeb656192f 100644
--- a/lib/libkse/thread/thr_exit.c
+++ b/lib/libkse/thread/thr_exit.c
@@ -41,6 +41,9 @@
#include <pthread.h>
#include "pthread_private.h"
+#define FLAGS_IN_SCHEDQ \
+ (PTHREAD_FLAGS_IN_PRIOQ|PTHREAD_FLAGS_IN_WAITQ|PTHREAD_FLAGS_IN_WORKQ)
+
void __exit(int status)
{
int flags;
@@ -138,7 +141,7 @@ _thread_exit_cleanup(void)
void
pthread_exit(void *status)
{
- pthread_t pthread;
+ int frame;
/* Check if this thread is already in the process of exiting: */
if ((_thread_run->flags & PTHREAD_EXITING) != 0) {
@@ -172,25 +175,24 @@ pthread_exit(void *status)
_thread_run->poll_data.fds = NULL;
}
- /*
- * Defer signals to protect the scheduling queues from access
- * by the signal handler:
- */
- _thread_kern_sig_defer();
-
- /* Check if there are any threads joined to this one: */
- while ((pthread = TAILQ_FIRST(&(_thread_run->join_queue))) != NULL) {
- /* Remove the thread from the queue: */
- TAILQ_REMOVE(&_thread_run->join_queue, pthread, qe);
-
- /* Wake the joined thread and let it detach this thread: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ if ((frame = _thread_run->sigframe_count) == 0)
+ _thread_exit_finish();
+ else {
+ /*
+ * Jump back and unwind the signal frames to gracefully
+ * cleanup.
+ */
+ ___longjmp(*_thread_run->sigframes[frame]->sig_jb, 1);
}
- /*
- * Undefer and handle pending signals, yielding if necessary:
- */
- _thread_kern_sig_undefer();
+ /* This point should not be reached. */
+ PANIC("Dead thread has resumed");
+}
+
+void
+_thread_exit_finish(void)
+{
+ pthread_t pthread;
/*
* Lock the garbage collector mutex to ensure that the garbage
@@ -203,20 +205,6 @@ pthread_exit(void *status)
TAILQ_INSERT_HEAD(&_dead_list, _thread_run, dle);
/*
- * Defer signals to protect the scheduling queues from access
- * by the signal handler:
- */
- _thread_kern_sig_defer();
-
- /* Remove this thread from the thread list: */
- TAILQ_REMOVE(&_thread_list, _thread_run, tle);
-
- /*
- * Undefer and handle pending signals, yielding if necessary:
- */
- _thread_kern_sig_undefer();
-
- /*
* Signal the garbage collector thread that there is something
* to clean up.
*/
@@ -224,19 +212,33 @@ pthread_exit(void *status)
PANIC("Cannot signal gc cond");
/*
- * Mark the thread as dead so it will not return if it
- * gets context switched out when the mutex is unlocked.
+ * Avoid a race condition where a scheduling signal can occur
+ * causing the garbage collector thread to run. If this happens,
+ * the current thread can be cleaned out from under us.
*/
- PTHREAD_SET_STATE(_thread_run, PS_DEAD);
+ _thread_kern_sig_defer();
/* Unlock the garbage collector mutex: */
if (pthread_mutex_unlock(&_gc_mutex) != 0)
PANIC("Cannot lock gc mutex");
- /* This this thread will never be re-scheduled. */
- _thread_kern_sched(NULL);
+ /* Check if there are any threads joined to this one: */
+ while ((pthread = TAILQ_FIRST(&(_thread_run->join_queue))) != NULL) {
+ /* Remove the thread from the queue: */
+ TAILQ_REMOVE(&_thread_run->join_queue, pthread, sqe);
+ pthread->flags &= ~PTHREAD_FLAGS_IN_JOINQ;
+
+ /*
+ * Wake the joined thread and let it
+ * detach this thread:
+ */
+ PTHREAD_NEW_STATE(pthread, PS_RUNNING);
+ }
- /* This point should not be reached. */
- PANIC("Dead thread has resumed");
+ /* Remove this thread from the thread list: */
+ TAILQ_REMOVE(&_thread_list, _thread_run, tle);
+
+ /* This thread will never be re-scheduled. */
+ _thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__);
}
#endif
diff --git a/lib/libkse/thread/thr_fork.c b/lib/libkse/thread/thr_fork.c
index a8e85d86e18f..97039b3ef804 100644
--- a/lib/libkse/thread/thr_fork.c
+++ b/lib/libkse/thread/thr_fork.c
@@ -183,9 +183,6 @@ _fork(void)
/* Don't queue signals yet: */
_queue_signals = 0;
- /* Initialize signal handling: */
- _thread_sig_init();
-
/* Initialize the scheduling switch hook routine: */
_sched_switch_hook = NULL;
diff --git a/lib/libkse/thread/thr_getschedparam.c b/lib/libkse/thread/thr_getschedparam.c
index 09d8c1bc3283..5dbd1f0e83fb 100644
--- a/lib/libkse/thread/thr_getschedparam.c
+++ b/lib/libkse/thread/thr_getschedparam.c
@@ -49,7 +49,8 @@ pthread_getschedparam(pthread_t pthread, int *policy,
/* Find the thread in the list of active threads: */
else if ((ret = _find_thread(pthread)) == 0) {
/* Return the threads base priority and scheduling policy: */
- param->sched_priority = pthread->base_priority;
+ param->sched_priority =
+ PTHREAD_BASE_PRIORITY(pthread->base_priority);
*policy = pthread->attr.sched_policy;
}
diff --git a/lib/libkse/thread/thr_info.c b/lib/libkse/thread/thr_info.c
index d091ec12113c..ca91512edb5f 100644
--- a/lib/libkse/thread/thr_info.c
+++ b/lib/libkse/thread/thr_info.c
@@ -32,6 +32,7 @@
* $FreeBSD$
*/
#include <stdio.h>
+#include <stdlib.h>
#include <fcntl.h>
#include <string.h>
#include <unistd.h>
@@ -296,7 +297,6 @@ _thread_dump_info(void)
/* Close the dump file: */
_thread_sys_close(fd);
}
- return;
}
/* Set the thread name for debug: */
diff --git a/lib/libkse/thread/thr_init.c b/lib/libkse/thread/thr_init.c
index 8e13f90dc993..3cbd453d3e27 100644
--- a/lib/libkse/thread/thr_init.c
+++ b/lib/libkse/thread/thr_init.c
@@ -90,9 +90,9 @@ _thread_init(void)
int i;
size_t len;
int mib[2];
- struct timeval tv;
struct clockinfo clockinfo;
struct sigaction act;
+ struct itimerval itimer;
/* Check if this function has already been called: */
if (_thread_initial)
@@ -160,7 +160,7 @@ _thread_init(void)
PANIC("Cannot get kernel write pipe flags");
}
/* Allocate and initialize the ready queue: */
- else if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_MAX_PRIORITY) != 0) {
+ else if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_LAST_PRIORITY) != 0) {
/* Abort this application: */
PANIC("Cannot allocate priority ready queue.");
}
@@ -171,7 +171,11 @@ _thread_init(void)
* abort:
*/
PANIC("Cannot allocate memory for initial thread");
- } else {
+ }
+ /* Allocate memory for the scheduler stack: */
+ else if ((_thread_kern_sched_stack = malloc(PAGE_SIZE * 10)) == NULL)
+ PANIC("Failed to allocate stack for scheduler");
+ else {
/* Zero the global kernel thread structure: */
memset(&_thread_kern_thread, 0, sizeof(struct pthread));
_thread_kern_thread.flags = PTHREAD_FLAGS_PRIVATE;
@@ -211,6 +215,12 @@ _thread_init(void)
_thread_initial->attr.stackaddr_attr = _thread_initial->stack;
_thread_initial->attr.stacksize_attr = PTHREAD_STACK_INITIAL;
+ /* Setup the context for the scheduler: */
+ _setjmp(_thread_kern_sched_jb);
+ SET_STACK_JB(_thread_kern_sched_jb,
+ _thread_kern_sched_stack + PAGE_SIZE*10 - sizeof(double));
+ SET_RETURN_ADDR_JB(_thread_kern_sched_jb, _thread_kern_scheduler);
+
/*
* Write a magic value to the thread structure
* to help identify valid ones:
@@ -236,10 +246,19 @@ _thread_init(void)
TAILQ_INIT(&(_thread_initial->mutexq));
_thread_initial->priority_mutex_count = 0;
- /* Initialize last active time to now: */
- gettimeofday(&tv, NULL);
- _thread_initial->last_active.tv_sec = tv.tv_sec;
- _thread_initial->last_active.tv_usec = tv.tv_usec;
+ /* Initialize the global scheduling time: */
+ _sched_ticks = 0;
+ gettimeofday((struct timeval *) &_sched_tod, NULL);
+
+ /* Initialize last active: */
+ _thread_initial->last_active = (long) _sched_ticks;
+
+ /* Initialize the initial signal frame: */
+ _thread_initial->sigframes[0] = &_thread_initial->sigframe0;
+ _thread_initial->curframe = &_thread_initial->sigframe0;
+ _thread_initial->curframe->ctxtype = CTX_JB_NOSIG;
+ /* Set the base of the stack: */
+ _thread_initial->curframe->stackp = (unsigned long) USRSTACK;
/* Initialise the rest of the fields: */
_thread_initial->poll_data.nfds = 0;
@@ -257,10 +276,13 @@ _thread_init(void)
/* Initialise the global signal action structure: */
sigfillset(&act.sa_mask);
act.sa_handler = (void (*) ()) _thread_sig_handler;
- act.sa_flags = 0;
+ act.sa_flags = SA_SIGINFO;
+
+ /* Clear pending signals for the process: */
+ sigemptyset(&_process_sigpending);
- /* Initialize signal handling: */
- _thread_sig_init();
+ /* Clear the signal queue: */
+ memset(_thread_sigq, 0, sizeof(_thread_sigq));
/* Enter a loop to get the existing signal status: */
for (i = 1; i < NSIG; i++) {
@@ -295,13 +317,19 @@ _thread_init(void)
*/
PANIC("Cannot initialise signal handler");
}
+ _thread_sigact[_SCHED_SIGNAL - 1].sa_flags = SA_SIGINFO;
+ _thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO;
+ _thread_sigact[SIGCHLD - 1].sa_flags = SA_SIGINFO;
+
+ /* Get the process signal mask: */
+ _thread_sys_sigprocmask(SIG_SETMASK, NULL, &_process_sigmask);
/* Get the kernel clockrate: */
mib[0] = CTL_KERN;
mib[1] = KERN_CLOCKRATE;
len = sizeof (struct clockinfo);
if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0)
- _clock_res_nsec = clockinfo.tick * 1000;
+ _clock_res_usec = clockinfo.tick;
/* Get the table size: */
if ((_thread_dtablesize = getdtablesize()) < 0) {
@@ -346,6 +374,14 @@ _thread_init(void)
PANIC("Cannot initialize stdio file "
"descriptor table entry");
}
+
+ /* Install the scheduling timer: */
+ itimer.it_interval.tv_sec = 0;
+ itimer.it_interval.tv_usec = _clock_res_usec;
+ itimer.it_value = itimer.it_interval;
+ if (setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL) != 0)
+ PANIC("Cannot set interval timer");
+
}
}
@@ -362,10 +398,6 @@ _thread_init(void)
if (pthread_mutex_init(&_gc_mutex,NULL) != 0 ||
pthread_cond_init(&_gc_cond,NULL) != 0)
PANIC("Failed to initialise garbage collector mutex or condvar");
-
- gettimeofday(&kern_inc_prio_time, NULL);
-
- return;
}
/*
diff --git a/lib/libkse/thread/thr_join.c b/lib/libkse/thread/thr_join.c
index 1cffc96eaea3..cda31bda36bd 100644
--- a/lib/libkse/thread/thr_join.c
+++ b/lib/libkse/thread/thr_join.c
@@ -40,7 +40,6 @@ int
pthread_join(pthread_t pthread, void **thread_return)
{
int ret = 0;
- pthread_t pthread1 = NULL;
_thread_enter_cancellation_point();
@@ -62,11 +61,7 @@ pthread_join(pthread_t pthread, void **thread_return)
* Find the thread in the list of active threads or in the
* list of dead threads:
*/
- if (_find_thread(pthread) == 0 ||
- _find_dead_thread(pthread) == 0)
- pthread1 = pthread;
-
- if (pthread1 == NULL)
+ if ((_find_thread(pthread) != 0) && (_find_dead_thread(pthread) != 0))
/* Return an error: */
ret = ESRCH;
@@ -77,6 +72,8 @@ pthread_join(pthread_t pthread, void **thread_return)
/* Check if the thread is not dead: */
else if (pthread->state != PS_DEAD) {
+ PTHREAD_ASSERT_NOT_IN_SYNCQ(_thread_run);
+
/* Clear the interrupted flag: */
_thread_run->interrupted = 0;
@@ -87,13 +84,18 @@ pthread_join(pthread_t pthread, void **thread_return)
_thread_kern_sig_defer();
/* Add the running thread to the join queue: */
- TAILQ_INSERT_TAIL(&(pthread->join_queue), _thread_run, qe);
+ TAILQ_INSERT_TAIL(&(pthread->join_queue), _thread_run, sqe);
+ _thread_run->flags |= PTHREAD_FLAGS_IN_JOINQ;
+ _thread_run->data.thread = pthread;
/* Schedule the next thread: */
_thread_kern_sched_state(PS_JOIN, __FILE__, __LINE__);
- if (_thread_run->interrupted != 0)
- TAILQ_REMOVE(&(pthread->join_queue), _thread_run, qe);
+ if (_thread_run->interrupted != 0) {
+ TAILQ_REMOVE(&(pthread->join_queue), _thread_run, sqe);
+ _thread_run->flags &= ~PTHREAD_FLAGS_IN_JOINQ;
+ }
+ _thread_run->data.thread = NULL;
_thread_kern_sig_undefer();
@@ -122,4 +124,15 @@ pthread_join(pthread_t pthread, void **thread_return)
/* Return the completion status: */
return (ret);
}
+
+void
+_join_backout(pthread_t pthread)
+{
+ _thread_kern_sig_defer();
+ if (pthread->state == PS_JOIN) {
+ TAILQ_REMOVE(&pthread->data.thread->join_queue, pthread, sqe);
+ _thread_run->flags &= ~PTHREAD_FLAGS_IN_JOINQ;
+ }
+ _thread_kern_sig_undefer();
+}
#endif
diff --git a/lib/libkse/thread/thr_kern.c b/lib/libkse/thread/thr_kern.c
index 96a11da92d3e..23f16bc8f953 100644
--- a/lib/libkse/thread/thr_kern.c
+++ b/lib/libkse/thread/thr_kern.c
@@ -52,9 +52,16 @@
#include <pthread.h>
#include "pthread_private.h"
+/* #define DEBUG_THREAD_KERN */
+#ifdef DEBUG_THREAD_KERN
+#define DBG_MSG stdout_debug
+#else
+#define DBG_MSG(x...)
+#endif
+
/* Static function prototype definitions: */
static void
-_thread_kern_poll(int wait_reqd);
+thread_kern_poll(int wait_reqd);
static void
dequeue_signals(void);
@@ -62,18 +69,39 @@ dequeue_signals(void);
static inline void
thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in);
+/* Static variables: */
+static int last_tick = 0;
+
+/*
+ * This is called when a signal handler finishes and wants to
+ * return to a previous frame.
+ */
void
-_thread_kern_sched(ucontext_t * scp)
+_thread_kern_sched_frame(int frame)
{
-#ifndef __alpha__
- char *fdata;
-#endif
- pthread_t pthread, pthread_h = NULL;
- struct itimerval itimer;
- struct timespec ts, ts1;
- struct timeval tv, tv1;
- int set_timer = 0;
+ /*
+ * Flag the pthread kernel as executing scheduler code
+ * to avoid a signal from interrupting this execution and
+ * corrupting the (soon-to-be) current frame.
+ */
+ _thread_kern_in_sched = 1;
+
+ /* Return to the specified frame: */
+ _thread_run->curframe = _thread_run->sigframes[frame];
+ _thread_run->sigframe_count = frame;
+
+ if (_thread_run->sigframe_count == 0)
+ /* Restore the threads priority: */
+ _thread_run->active_priority &= ~PTHREAD_SIGNAL_PRIORITY;
+ /* Switch to the thread scheduler: */
+ ___longjmp(_thread_kern_sched_jb, 1);
+}
+
+
+void
+_thread_kern_sched(ucontext_t *scp)
+{
/*
* Flag the pthread kernel as executing scheduler code
* to avoid a scheduler signal from interrupting this
@@ -84,67 +112,94 @@ _thread_kern_sched(ucontext_t * scp)
/* Check if this function was called from the signal handler: */
if (scp != NULL) {
/*
- * Copy the signal context to the current thread's jump
- * buffer:
+ * The signal handler should have saved the state of
+ * the current thread. Restore the process signal
+ * mask.
*/
- memcpy(&_thread_run->saved_sigcontext, scp, sizeof(_thread_run->saved_sigcontext));
-
-#ifndef __alpha__
- /* Point to the floating point data in the running thread: */
- fdata = _thread_run->saved_fp;
-
- /* Save the floating point data: */
-__asm__("fnsave %0": :"m"(*fdata));
-#endif
-
- /* Flag the signal context as the last state saved: */
- _thread_run->sig_saved = 1;
- }
- /* Save the state of the current thread: */
- else if (setjmp(_thread_run->saved_jmp_buf) != 0) {
+ if (_thread_sys_sigprocmask(SIG_SETMASK,
+ &_process_sigmask, NULL) != 0)
+ PANIC("Unable to restore process mask after signal");
/*
- * This point is reached when a longjmp() is called to
- * restore the state of a thread.
- *
- * This is the normal way out of the scheduler.
+ * We're running on the signal stack; just call the
+ * kernel scheduler directly.
*/
- _thread_kern_in_sched = 0;
-
- if (((_thread_run->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) &&
- ((_thread_run->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)) {
- /*
- * Cancellations override signals.
- *
- * Stick a cancellation point at the start of
- * each async-cancellable thread's resumption.
+ DBG_MSG("Entering scheduler due to signal\n");
+ _thread_kern_scheduler();
+ } else {
+ /* Save the state of the current thread: */
+ if (_setjmp(_thread_run->curframe->ctx.jb) == 0) {
+ /* Flag the jump buffer was the last state saved: */
+ _thread_run->curframe->ctxtype = CTX_JB_NOSIG;
+ _thread_run->curframe->longjmp_val = 1;
+ } else {
+ DBG_MSG("Returned from ___longjmp, thread %p\n",
+ _thread_run);
+ /*
+ * This point is reached when a longjmp() is called
+ * to restore the state of a thread.
*
- * We allow threads woken at cancel points to do their
- * own checks.
+ * This is the normal way out of the scheduler.
*/
- pthread_testcancel();
+ _thread_kern_in_sched = 0;
+
+ if (_thread_run->sig_defer_count == 0) {
+ if (((_thread_run->cancelflags &
+ PTHREAD_AT_CANCEL_POINT) == 0) &&
+ ((_thread_run->cancelflags &
+ PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
+ /*
+ * Cancellations override signals.
+ *
+ * Stick a cancellation point at the
+ * start of each async-cancellable
+ * thread's resumption.
+ *
+ * We allow threads woken at cancel
+ * points to do their own checks.
+ */
+ pthread_testcancel();
+ }
+
+ if (_sched_switch_hook != NULL) {
+ /* Run the installed switch hook: */
+ thread_run_switch_hook(_last_user_thread,
+ _thread_run);
+ }
+ return;
}
+ /* Switch to the thread scheduler: */
+ ___longjmp(_thread_kern_sched_jb, 1);
+ }
+}
- /*
- * Check for undispatched signals due to calls to
- * pthread_kill().
- */
- if (SIGNOTEMPTY(_thread_run->sigpend))
- _dispatch_signals();
+void
+_thread_kern_sched_sig(void)
+{
+ _thread_run->check_pending = 1;
+ _thread_kern_sched(NULL);
+}
- if (_sched_switch_hook != NULL) {
- /* Run the installed switch hook: */
- thread_run_switch_hook(_last_user_thread, _thread_run);
- }
- return;
- } else
- /* Flag the jump buffer was the last state saved: */
- _thread_run->sig_saved = 0;
+void
+_thread_kern_scheduler(void)
+{
+ struct pthread_signal_frame *psf;
+ struct timespec ts;
+ struct timeval tv;
+ pthread_t pthread, pthread_h;
+ unsigned int current_tick;
+ int add_to_prioq;
/* If the currently running thread is a user thread, save it: */
if ((_thread_run->flags & PTHREAD_FLAGS_PRIVATE) == 0)
_last_user_thread = _thread_run;
+ /* Are there pending signals for this thread? */
+ if (_thread_run->check_pending != 0) {
+ _thread_run->check_pending = 0;
+ _thread_sig_check_pending(_thread_run);
+ }
+
/*
* Enter a scheduling loop that finds the next thread that is
* ready to run. This loop completes when there are no more threads
@@ -154,29 +209,37 @@ __asm__("fnsave %0": :"m"(*fdata));
*/
while (!(TAILQ_EMPTY(&_thread_list))) {
/* Get the current time of day: */
- gettimeofday(&tv, NULL);
+ GET_CURRENT_TOD(tv);
TIMEVAL_TO_TIMESPEC(&tv, &ts);
+ current_tick = _sched_ticks;
/*
* Protect the scheduling queues from access by the signal
* handler.
*/
_queue_signals = 1;
+ add_to_prioq = 0;
if (_thread_run != &_thread_kern_thread) {
-
/*
* This thread no longer needs to yield the CPU.
*/
_thread_run->yield_on_sig_undefer = 0;
- /*
- * Save the current time as the time that the thread
- * became inactive:
- */
- _thread_run->last_inactive.tv_sec = tv.tv_sec;
- _thread_run->last_inactive.tv_usec = tv.tv_usec;
-
+ if (_thread_run->state != PS_RUNNING) {
+ /*
+ * Save the current time as the time that the
+ * thread became inactive:
+ */
+ _thread_run->last_inactive = (long)current_tick;
+ if (_thread_run->last_inactive <
+ _thread_run->last_active) {
+ /* Account for a rollover: */
+ _thread_run->last_inactive =+
+ UINT_MAX + 1;
+ }
+ }
+
/*
* Place the currently running thread into the
* appropriate queue(s).
@@ -198,22 +261,7 @@ __asm__("fnsave %0": :"m"(*fdata));
* are polled (to preserve round-robin
* scheduling).
*/
- if ((_thread_run->slice_usec != -1) &&
- (_thread_run->attr.sched_policy != SCHED_FIFO)) {
- /*
- * Accumulate the number of microseconds that
- * this thread has run for:
- */
- _thread_run->slice_usec +=
- (_thread_run->last_inactive.tv_sec -
- _thread_run->last_active.tv_sec) * 1000000 +
- _thread_run->last_inactive.tv_usec -
- _thread_run->last_active.tv_usec;
-
- /* Check for time quantum exceeded: */
- if (_thread_run->slice_usec > TIMESLICE_USEC)
- _thread_run->slice_usec = -1;
- }
+ add_to_prioq = 1;
break;
/*
@@ -260,7 +308,7 @@ __asm__("fnsave %0": :"m"(*fdata));
/* Increment spinblock count: */
_spinblock_count++;
- /* fall through */
+ /* FALLTHROUGH */
case PS_FDR_WAIT:
case PS_FDW_WAIT:
case PS_POLL_WAIT:
@@ -277,17 +325,26 @@ __asm__("fnsave %0": :"m"(*fdata));
}
}
- /* Unprotect the scheduling queues: */
- _queue_signals = 0;
-
/*
- * Poll file descriptors to update the state of threads
- * waiting on file I/O where data may be available:
+ * Poll file descriptors only if a new scheduling signal
+ * has occurred or if we have no more runnable threads.
*/
- _thread_kern_poll(0);
+ if (((current_tick = _sched_ticks) != last_tick) ||
+ ((_thread_run->state != PS_RUNNING) &&
+ (PTHREAD_PRIOQ_FIRST() == NULL))) {
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
- /* Protect the scheduling queues: */
- _queue_signals = 1;
+ /*
+ * Poll file descriptors to update the state of threads
+ * waiting on file I/O where data may be available:
+ */
+ thread_kern_poll(0);
+
+ /* Protect the scheduling queues: */
+ _queue_signals = 1;
+ }
+ last_tick = current_tick;
/*
* Wake up threads that have timedout. This has to be
@@ -329,12 +386,37 @@ __asm__("fnsave %0": :"m"(*fdata));
PTHREAD_WAITQ_CLEARACTIVE();
/*
- * Check if there is a current runnable thread that isn't
- * already in the ready queue:
+ * Check to see if the current thread needs to be added
+ * to the priority queue:
*/
- if ((_thread_run != &_thread_kern_thread) &&
- (_thread_run->state == PS_RUNNING) &&
- ((_thread_run->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0)) {
+ if (add_to_prioq != 0) {
+ /*
+ * Save the current time as the time that the
+ * thread became inactive:
+ */
+ current_tick = _sched_ticks;
+ _thread_run->last_inactive = (long)current_tick;
+ if (_thread_run->last_inactive <
+ _thread_run->last_active) {
+ /* Account for a rollover: */
+ _thread_run->last_inactive =+ UINT_MAX + 1;
+ }
+
+ if ((_thread_run->slice_usec != -1) &&
+ (_thread_run->attr.sched_policy != SCHED_FIFO)) {
+ /*
+ * Accumulate the number of microseconds for
+ * which the current thread has run:
+ */
+ _thread_run->slice_usec +=
+ (_thread_run->last_inactive -
+ _thread_run->last_active) *
+ (long)_clock_res_usec;
+ /* Check for time quantum exceeded: */
+ if (_thread_run->slice_usec > TIMESLICE_USEC)
+ _thread_run->slice_usec = -1;
+ }
+
if (_thread_run->slice_usec == -1) {
/*
* The thread exceeded its time
@@ -366,6 +448,8 @@ __asm__("fnsave %0": :"m"(*fdata));
* thread structure:
*/
_thread_run = &_thread_kern_thread;
+ DBG_MSG("No runnable threads, using kernel thread %p\n",
+ _thread_run);
/* Unprotect the scheduling queues: */
_queue_signals = 0;
@@ -374,20 +458,27 @@ __asm__("fnsave %0": :"m"(*fdata));
* There are no threads ready to run, so wait until
* something happens that changes this condition:
*/
- _thread_kern_poll(1);
- } else {
- /* Remove the thread from the ready queue: */
- PTHREAD_PRIOQ_REMOVE(pthread_h);
+ thread_kern_poll(1);
- /* Get first thread on the waiting list: */
- pthread = TAILQ_FIRST(&_waitingq);
+ /*
+ * This process' usage will likely be very small
+ * while waiting in a poll. Since the scheduling
+ * clock is based on the profiling timer, it is
+ * unlikely that the profiling timer will fire
+ * and update the time of day. To account for this,
+ * get the time of day after polling with a timeout.
+ */
+ gettimeofday((struct timeval *) &_sched_tod, NULL);
+
+ /* Check once more for a runnable thread: */
+ _queue_signals = 1;
+ pthread_h = PTHREAD_PRIOQ_FIRST();
+ _queue_signals = 0;
+ }
- /* Check to see if there is more than one thread: */
- if (pthread_h != TAILQ_FIRST(&_thread_list) ||
- TAILQ_NEXT(pthread_h, tle) != NULL)
- set_timer = 1;
- else
- set_timer = 0;
+ if (pthread_h != NULL) {
+ /* Remove the thread from the ready queue: */
+ PTHREAD_PRIOQ_REMOVE(pthread_h);
/* Unprotect the scheduling queues: */
_queue_signals = 0;
@@ -411,32 +502,19 @@ __asm__("fnsave %0": :"m"(*fdata));
*/
if (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
(pthread->active_priority > pthread_h->active_priority)) {
+ /* Remove the thread from the ready queue: */
+ PTHREAD_PRIOQ_REMOVE(pthread);
+
/*
* Insert the lower priority thread
* at the head of its priority list:
*/
PTHREAD_PRIOQ_INSERT_HEAD(pthread_h);
- /* Remove the thread from the ready queue: */
- PTHREAD_PRIOQ_REMOVE(pthread);
-
/* There's a new thread in town: */
pthread_h = pthread;
}
- /* Get first thread on the waiting list: */
- pthread = TAILQ_FIRST(&_waitingq);
-
- /*
- * Check to see if there is more than one
- * thread:
- */
- if (pthread_h != TAILQ_FIRST(&_thread_list) ||
- TAILQ_NEXT(pthread_h, tle) != NULL)
- set_timer = 1;
- else
- set_timer = 0;
-
/* Unprotect the scheduling queues: */
_queue_signals = 0;
}
@@ -448,78 +526,8 @@ __asm__("fnsave %0": :"m"(*fdata));
* Save the current time as the time that the thread
* became active:
*/
- _thread_run->last_active.tv_sec = tv.tv_sec;
- _thread_run->last_active.tv_usec = tv.tv_usec;
-
- /*
- * Define the maximum time before a scheduling signal
- * is required:
- */
- itimer.it_value.tv_sec = 0;
- itimer.it_value.tv_usec = TIMESLICE_USEC;
-
- /*
- * The interval timer is not reloaded when it
- * times out. The interval time needs to be
- * calculated every time.
- */
- itimer.it_interval.tv_sec = 0;
- itimer.it_interval.tv_usec = 0;
-
- /* Get first thread on the waiting list: */
- if ((pthread != NULL) &&
- (pthread->wakeup_time.tv_sec != -1)) {
- /*
- * Calculate the time until this thread
- * is ready, allowing for the clock
- * resolution:
- */
- ts1.tv_sec = pthread->wakeup_time.tv_sec
- - ts.tv_sec;
- ts1.tv_nsec = pthread->wakeup_time.tv_nsec
- - ts.tv_nsec + _clock_res_nsec;
-
- /*
- * Check for underflow of the nanosecond field:
- */
- while (ts1.tv_nsec < 0) {
- /*
- * Allow for the underflow of the
- * nanosecond field:
- */
- ts1.tv_sec--;
- ts1.tv_nsec += 1000000000;
- }
- /*
- * Check for overflow of the nanosecond field:
- */
- while (ts1.tv_nsec >= 1000000000) {
- /*
- * Allow for the overflow of the
- * nanosecond field:
- */
- ts1.tv_sec++;
- ts1.tv_nsec -= 1000000000;
- }
- /*
- * Convert the timespec structure to a
- * timeval structure:
- */
- TIMESPEC_TO_TIMEVAL(&tv1, &ts1);
-
- /*
- * Check if the thread will be ready
- * sooner than the earliest ones found
- * so far:
- */
- if (timercmp(&tv1, &itimer.it_value, <)) {
- /*
- * Update the time value:
- */
- itimer.it_value.tv_sec = tv1.tv_sec;
- itimer.it_value.tv_usec = tv1.tv_usec;
- }
- }
+ current_tick = _sched_ticks;
+ _thread_run->last_active = (long) current_tick;
/*
* Check if this thread is running for the first time
@@ -531,88 +539,51 @@ __asm__("fnsave %0": :"m"(*fdata));
_thread_run->slice_usec = 0;
}
- /* Check if there is more than one thread: */
- if (set_timer != 0) {
- /*
- * Start the interval timer for the
- * calculated time interval:
- */
- if (setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL) != 0) {
- /*
- * Cannot initialise the timer, so
- * abort this process:
- */
- PANIC("Cannot set scheduling timer");
- }
- }
-
- /*
- * Check if this thread is being continued from a
- * longjmp() out of a signal handler:
- */
- if ((_thread_run->jmpflags & JMPFLAGS_LONGJMP) != 0) {
- _thread_run->jmpflags = 0;
- __longjmp(_thread_run->nested_jmp.jmp,
- _thread_run->longjmp_val);
- }
/*
- * Check if this thread is being continued from a
- * _longjmp() out of a signal handler:
+ * If we had a context switch, run any
+ * installed switch hooks.
*/
- else if ((_thread_run->jmpflags & JMPFLAGS__LONGJMP) !=
- 0) {
- _thread_run->jmpflags = 0;
- ___longjmp(_thread_run->nested_jmp.jmp,
- _thread_run->longjmp_val);
+ if ((_sched_switch_hook != NULL) &&
+ (_last_user_thread != _thread_run)) {
+ thread_run_switch_hook(_last_user_thread,
+ _thread_run);
}
/*
- * Check if this thread is being continued from a
- * siglongjmp() out of a signal handler:
+ * Continue the thread at its current frame:
*/
- else if ((_thread_run->jmpflags & JMPFLAGS_SIGLONGJMP)
- != 0) {
- _thread_run->jmpflags = 0;
- __siglongjmp(
- _thread_run->nested_jmp.sigjmp,
- _thread_run->longjmp_val);
- }
- /* Check if a signal context was saved: */
- else if (_thread_run->sig_saved == 1) {
-#ifndef __alpha__
- /*
- * Point to the floating point data in the
- * running thread:
- */
- fdata = _thread_run->saved_fp;
+ psf = _thread_run->curframe;
+ switch(psf->ctxtype) {
+ case CTX_JB_NOSIG:
+ ___longjmp(psf->ctx.jb, psf->longjmp_val);
+ break;
+ case CTX_JB:
+ __longjmp(psf->ctx.jb, psf->longjmp_val);
+ break;
+ case CTX_SJB:
+ __siglongjmp(psf->ctx.sigjb, psf->longjmp_val);
+ break;
+ case CTX_UC:
+ /* XXX - Restore FP regsisters? */
+ FP_RESTORE_UC(&psf->ctx.uc);
- /* Restore the floating point state: */
- __asm__("frstor %0": :"m"(*fdata));
-#endif
/*
* Do a sigreturn to restart the thread that
* was interrupted by a signal:
*/
_thread_kern_in_sched = 0;
+#if NOT_YET
+ _setcontext(&psf->ctx.uc);
+#else
/*
- * If we had a context switch, run any
- * installed switch hooks.
- */
- if ((_sched_switch_hook != NULL) &&
- (_last_user_thread != _thread_run)) {
- thread_run_switch_hook(_last_user_thread,
- _thread_run);
- }
- _thread_sys_sigreturn(&_thread_run->saved_sigcontext);
- } else {
- /*
- * Do a longjmp to restart the thread that
- * was context switched out (by a longjmp to
- * a different thread):
+ * Ensure the process signal mask is set
+ * correctly:
*/
- __longjmp(_thread_run->saved_jmp_buf, 1);
+ psf->ctx.uc.uc_sigmask = _process_sigmask;
+ _thread_sys_sigreturn(&psf->ctx.uc);
+#endif
+ break;
}
-
/* This point should not be reached. */
PANIC("Thread has returned from sigreturn or longjmp");
}
@@ -645,7 +616,6 @@ _thread_kern_sched_state(enum pthread_state state, char *fname, int lineno)
/* Schedule the next thread that is ready: */
_thread_kern_sched(NULL);
- return;
}
void
@@ -675,11 +645,10 @@ _thread_kern_sched_state_unlock(enum pthread_state state,
/* Schedule the next thread that is ready: */
_thread_kern_sched(NULL);
- return;
}
static void
-_thread_kern_poll(int wait_reqd)
+thread_kern_poll(int wait_reqd)
{
int count = 0;
int i, found;
@@ -696,7 +665,7 @@ _thread_kern_poll(int wait_reqd)
}
else {
/* Get the current time of day: */
- gettimeofday(&tv, NULL);
+ GET_CURRENT_TOD(tv);
TIMEVAL_TO_TIMESPEC(&tv, &ts);
_queue_signals = 1;
@@ -713,11 +682,11 @@ _thread_kern_poll(int wait_reqd)
else {
/*
* Calculate the time left for the next thread to
- * timeout allowing for the clock resolution:
+ * timeout:
*/
timeout_ms = ((pthread->wakeup_time.tv_sec - ts.tv_sec) *
- 1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec +
- _clock_res_nsec) / 1000000);
+ 1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec) /
+ 1000000);
/*
* Don't allow negative timeouts:
*/
@@ -1002,9 +971,6 @@ _thread_kern_poll(int wait_reqd)
/* Unprotect the scheduling queues: */
_queue_signals = 0;
}
-
- /* Nothing to return. */
- return;
}
void
@@ -1032,7 +998,7 @@ _thread_kern_set_timeout(const struct timespec * timeout)
_thread_run->wakeup_time.tv_nsec = 0;
} else {
/* Get the current time: */
- gettimeofday(&tv, NULL);
+ GET_CURRENT_TOD(tv);
TIMEVAL_TO_TIMESPEC(&tv, &current_time);
/* Calculate the time for the current thread to wake up: */
@@ -1046,7 +1012,6 @@ _thread_kern_set_timeout(const struct timespec * timeout)
_thread_run->wakeup_time.tv_nsec -= 1000000000;
}
}
- return;
}
void
@@ -1059,9 +1024,6 @@ _thread_kern_sig_defer(void)
void
_thread_kern_sig_undefer(void)
{
- pthread_t pthread;
- int need_resched = 0;
-
/*
* Perform checks to yield only if we are about to undefer
* signals.
@@ -1077,33 +1039,25 @@ _thread_kern_sig_undefer(void)
/*
* Check if there are queued signals:
*/
- while (_sigq_check_reqd != 0) {
- /* Defer scheduling while we process queued signals: */
- _thread_run->sig_defer_count = 1;
-
- /* Clear the flag before checking the signal queue: */
- _sigq_check_reqd = 0;
-
- /* Dequeue and handle signals: */
- dequeue_signals();
-
- /*
- * Avoiding an unnecessary check to reschedule, check
- * to see if signal handling caused a higher priority
- * thread to become ready.
- */
- if ((need_resched == 0) &&
- (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
- (pthread->active_priority > _thread_run->active_priority))) {
- need_resched = 1;
- }
+ if (_sigq_check_reqd != 0)
+ _thread_kern_sched(NULL);
- /* Reenable signals: */
- _thread_run->sig_defer_count = 0;
- }
+ /*
+ * Check for asynchronous cancellation before delivering any
+ * pending signals:
+ */
+ if (((_thread_run->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) &&
+ ((_thread_run->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
+ pthread_testcancel();
- /* Yield the CPU if necessary: */
- if (need_resched || _thread_run->yield_on_sig_undefer != 0) {
+ /*
+ * If there are pending signals or this thread has
+ * to yield the CPU, call the kernel scheduler:
+ *
+ * XXX - Come back and revisit the pending signal problem
+ */
+ if ((_thread_run->yield_on_sig_undefer != 0) ||
+ SIGNOTEMPTY(_thread_run->sigpend)) {
_thread_run->yield_on_sig_undefer = 0;
_thread_kern_sched(NULL);
}
@@ -1114,35 +1068,13 @@ static void
dequeue_signals(void)
{
char bufr[128];
- int i, num;
- pthread_t pthread;
+ int num;
/*
- * Enter a loop to read and handle queued signals from the
- * pthread kernel pipe:
+ * Enter a loop to clear the pthread kernel pipe:
*/
while (((num = _thread_sys_read(_thread_kern_pipe[0], bufr,
sizeof(bufr))) > 0) || (num == -1 && errno == EINTR)) {
- /*
- * The buffer read contains one byte per signal and
- * each byte is the signal number.
- */
- for (i = 0; i < num; i++) {
- if ((int) bufr[i] == _SCHED_SIGNAL) {
- /*
- * Scheduling signals shouldn't ever be
- * queued; just ignore it for now.
- */
- }
- else {
- /* Handle this signal: */
- pthread = _thread_sig_handle((int) bufr[i],
- NULL);
- if (pthread != NULL)
- _thread_sig_deliver(pthread,
- (int) bufr[i]);
- }
- }
}
if ((num < 0) && (errno != EAGAIN)) {
/*
@@ -1151,6 +1083,8 @@ dequeue_signals(void)
*/
PANIC("Unable to read from thread kernel pipe");
}
+ /* Handle any pending signals: */
+ _thread_sig_handle_pending();
}
static inline void
diff --git a/lib/libkse/thread/thr_mutex.c b/lib/libkse/thread/thr_mutex.c
index b8877f8453c9..f7662c71c951 100644
--- a/lib/libkse/thread/thr_mutex.c
+++ b/lib/libkse/thread/thr_mutex.c
@@ -79,7 +79,7 @@ static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
int
_mutex_reinit(pthread_mutex_t * mutex)
{
- int ret = 0;
+ int ret = 0;
if (mutex == NULL)
ret = EINVAL;
@@ -113,7 +113,7 @@ pthread_mutex_init(pthread_mutex_t * mutex,
int protocol;
int ceiling;
pthread_mutex_t pmutex;
- int ret = 0;
+ int ret = 0;
if (mutex == NULL)
ret = EINVAL;
@@ -203,7 +203,7 @@ pthread_mutex_init(pthread_mutex_t * mutex,
int
pthread_mutex_destroy(pthread_mutex_t * mutex)
{
- int ret = 0;
+ int ret = 0;
if (mutex == NULL || *mutex == NULL)
ret = EINVAL;
@@ -245,7 +245,7 @@ pthread_mutex_destroy(pthread_mutex_t * mutex)
static int
init_static(pthread_mutex_t *mutex)
{
- int ret;
+ int ret;
_SPINLOCK(&static_init_lock);
@@ -262,7 +262,7 @@ init_static(pthread_mutex_t *mutex)
int
pthread_mutex_trylock(pthread_mutex_t * mutex)
{
- int ret = 0;
+ int ret = 0;
if (mutex == NULL)
ret = EINVAL;
@@ -400,7 +400,7 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
int
pthread_mutex_lock(pthread_mutex_t * mutex)
{
- int ret = 0;
+ int ret = 0;
if (mutex == NULL)
ret = EINVAL;
@@ -610,9 +610,8 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
* Check to see if this thread was interrupted and
* is still in the mutex queue of waiting threads:
*/
- if (_thread_run->interrupted != 0) {
+ if (_thread_run->interrupted != 0)
mutex_queue_remove(*mutex, _thread_run);
- }
/* Unlock the mutex structure: */
_SPINUNLOCK(&(*mutex)->lock);
@@ -647,7 +646,7 @@ _mutex_cv_unlock(pthread_mutex_t * mutex)
int
_mutex_cv_lock(pthread_mutex_t * mutex)
{
- int ret;
+ int ret;
if ((ret = pthread_mutex_lock(mutex)) == 0)
(*mutex)->m_refcount--;
return (ret);
@@ -656,7 +655,7 @@ _mutex_cv_lock(pthread_mutex_t * mutex)
static inline int
mutex_self_trylock(pthread_mutex_t mutex)
{
- int ret = 0;
+ int ret = 0;
switch (mutex->m_type) {
@@ -723,7 +722,7 @@ mutex_self_lock(pthread_mutex_t mutex)
static inline int
mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
{
- int ret = 0;
+ int ret = 0;
if (mutex == NULL || *mutex == NULL) {
ret = EINVAL;
@@ -1369,6 +1368,38 @@ _mutex_unlock_private(pthread_t pthread)
}
}
+void
+_mutex_lock_backout(pthread_t pthread)
+{
+ struct pthread_mutex *mutex;
+
+ /*
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+ if (pthread->state == PS_MUTEX_WAIT) {
+ mutex = pthread->data.mutex;
+
+ /* Lock the mutex structure: */
+ _SPINLOCK(&mutex->lock);
+
+ mutex_queue_remove(mutex, pthread);
+
+ /* This thread is no longer waiting for the mutex: */
+ mutex->m_owner->data.mutex = NULL;
+
+ /* Unlock the mutex structure: */
+ _SPINUNLOCK(&mutex->lock);
+
+ }
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary:
+ */
+ _thread_kern_sig_undefer();
+}
+
/*
* Dequeue a waiting thread from the head of a mutex queue in descending
* priority order.
@@ -1379,7 +1410,7 @@ mutex_queue_deq(pthread_mutex_t mutex)
pthread_t pthread;
while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
- TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
+ TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
/*
@@ -1400,7 +1431,7 @@ static inline void
mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
{
if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
- TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
+ TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
}
}
@@ -1413,18 +1444,19 @@ mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
{
pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
+ PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
/*
* For the common case of all threads having equal priority,
* we perform a quick check against the priority of the thread
* at the tail of the queue.
*/
if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
- TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, qe);
+ TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
else {
tid = TAILQ_FIRST(&mutex->m_queue);
while (pthread->active_priority <= tid->active_priority)
- tid = TAILQ_NEXT(tid, qe);
- TAILQ_INSERT_BEFORE(tid, pthread, qe);
+ tid = TAILQ_NEXT(tid, sqe);
+ TAILQ_INSERT_BEFORE(tid, pthread, sqe);
}
pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
}
diff --git a/lib/libkse/thread/thr_priority_queue.c b/lib/libkse/thread/thr_priority_queue.c
index 1b9fcba09573..84c30657885a 100644
--- a/lib/libkse/thread/thr_priority_queue.c
+++ b/lib/libkse/thread/thr_priority_queue.c
@@ -66,9 +66,13 @@ static int _pq_active = 0;
PANIC(msg); \
} while (0)
#define _PQ_ASSERT_NOT_QUEUED(thrd, msg) do { \
- if ((thrd)->flags & _PQ_IN_SCHEDQ) \
+ if (((thrd)->flags & _PQ_IN_SCHEDQ) != 0) \
PANIC(msg); \
} while (0)
+#define _PQ_ASSERT_PROTECTED(msg) \
+ PTHREAD_ASSERT((_thread_kern_in_sched != 0) || \
+ (_thread_run->sig_defer_count > 0) || \
+ (_sig_in_handler != 0), msg);
#else
@@ -79,11 +83,10 @@ static int _pq_active = 0;
#define _PQ_ASSERT_IN_WAITQ(thrd, msg)
#define _PQ_ASSERT_IN_PRIOQ(thrd, msg)
#define _PQ_ASSERT_NOT_QUEUED(thrd, msg)
-#define _PQ_CHECK_PRIO()
+#define _PQ_ASSERT_PROTECTED(msg)
#endif
-
int
_pq_alloc(pq_queue_t *pq, int minprio, int maxprio)
{
@@ -101,9 +104,7 @@ _pq_alloc(pq_queue_t *pq, int minprio, int maxprio)
else {
/* Remember the queue size: */
pq->pq_size = prioslots;
-
ret = _pq_init(pq);
-
}
return (ret);
}
@@ -142,6 +143,7 @@ _pq_remove(pq_queue_t *pq, pthread_t pthread)
_PQ_ASSERT_INACTIVE("_pq_remove: pq_active");
_PQ_SET_ACTIVE();
_PQ_ASSERT_IN_PRIOQ(pthread, "_pq_remove: Not in priority queue");
+ _PQ_ASSERT_PROTECTED("_pq_remove: prioq not protected!");
/*
* Remove this thread from priority list. Note that if
@@ -172,6 +174,7 @@ _pq_insert_head(pq_queue_t *pq, pthread_t pthread)
_PQ_SET_ACTIVE();
_PQ_ASSERT_NOT_QUEUED(pthread,
"_pq_insert_head: Already in priority queue");
+ _PQ_ASSERT_PROTECTED("_pq_insert_head: prioq not protected!");
TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe);
if (pq->pq_lists[prio].pl_queued == 0)
@@ -197,6 +200,7 @@ _pq_insert_tail(pq_queue_t *pq, pthread_t pthread)
_PQ_SET_ACTIVE();
_PQ_ASSERT_NOT_QUEUED(pthread,
"_pq_insert_tail: Already in priority queue");
+ _PQ_ASSERT_PROTECTED("_pq_insert_tail: prioq not protected!");
TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe);
if (pq->pq_lists[prio].pl_queued == 0)
@@ -221,6 +225,7 @@ _pq_first(pq_queue_t *pq)
*/
_PQ_ASSERT_INACTIVE("_pq_first: pq_active");
_PQ_SET_ACTIVE();
+ _PQ_ASSERT_PROTECTED("_pq_first: prioq not protected!");
while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) &&
(pthread == NULL)) {
@@ -250,6 +255,7 @@ pq_insert_prio_list(pq_queue_t *pq, int prio)
* Make some assertions when debugging is enabled:
*/
_PQ_ASSERT_ACTIVE("pq_insert_prio_list: pq_active");
+ _PQ_ASSERT_PROTECTED("_pq_insert_prio_list: prioq not protected!");
/*
* The priority queue is in descending priority order. Start at
@@ -270,11 +276,10 @@ pq_insert_prio_list(pq_queue_t *pq, int prio)
pq->pq_lists[prio].pl_queued = 1;
}
-#if defined(_PTHREADS_INVARIANTS)
void
_waitq_insert(pthread_t pthread)
{
- pthread_t tid;
+ pthread_t tid;
/*
* Make some assertions when debugging is enabled:
@@ -332,4 +337,3 @@ _waitq_clearactive(void)
_PQ_CLEAR_ACTIVE();
}
#endif
-#endif
diff --git a/lib/libkse/thread/thr_private.h b/lib/libkse/thread/thr_private.h
index 9d76747ff763..50e33bc181cb 100644
--- a/lib/libkse/thread/thr_private.h
+++ b/lib/libkse/thread/thr_private.h
@@ -51,6 +51,7 @@
*/
#include <setjmp.h>
#include <signal.h>
+#include <stdio.h>
#include <sys/queue.h>
#include <sys/types.h>
#include <sys/time.h>
@@ -60,13 +61,67 @@
#include <pthread_np.h>
/*
+ * Define machine dependent macros to get and set the stack pointer
+ * from the supported contexts. Also define a macro to set the return
+ * address in a jmp_buf context.
+ *
+ * XXX - These need to be moved into architecture dependent support files.
+ */
+#if defined(__i386__)
+#define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2]))
+#define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2]))
+#define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_esp))
+#define SET_STACK_JB(jb, stk) (jb)[0]._jb[2] = (int)(stk)
+#define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[2] = (int)(stk)
+#define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_esp = (int)(stk)
+#define FP_SAVE_UC(ucp) do { \
+ char *fdata; \
+ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \
+ __asm__("fnsave %0": :"m"(*fdata)); \
+} while (0)
+#define FP_RESTORE_UC(ucp) do { \
+ char *fdata; \
+ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \
+ __asm__("frstor %0": :"m"(*fdata)); \
+} while (0)
+#define SET_RETURN_ADDR_JB(jb, ra) (jb)[0]._jb[0] = (int)(ra)
+#elif defined(__alpha__)
+#include <machine/reg.h>
+#define GET_STACK_JB(jb, stk) ((unsigned long)((jb)[0]._jb[R_SP + 4]))
+#define GET_STACK_SJB(sjb, stk) ((unsigned long)((sjb)[0]._sjb[R_SP + 4]))
+#define GET_STACK_UC(ucp, stk) ((ucp)->uc_mcontext.mc_regs[R_SP])
+#define SET_STACK_JB(jb, stk) (jb)[0]._jb[R_SP + 4] = (long)(stk)
+#define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[R_SP + 4] = (long)(stk)
+#define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_regs[R_SP] = (unsigned long)(stk)
+#define FP_SAVE_UC(ucp)
+#define FP_RESTORE_UC(ucp)
+#define SET_RETURN_ADDR_JB(jb, ra) do { \
+ (jb)[0]._jb[2] = (long)(ra); \
+ (jb)[0]._jb[R_RA + 4] = 0; \
+ (jb)[0]._jb[R_T12 + 4] = (long)(ra); \
+} while (0)
+#else
+#error "Don't recognize this architecture!"
+#endif
+
+/*
* Kernel fatal error handler macro.
*/
#define PANIC(string) _thread_exit(__FILE__,__LINE__,string)
+
/* Output debug messages like this: */
-#define stdout_debug(_x) _thread_sys_write(1,_x,strlen(_x));
-#define stderr_debug(_x) _thread_sys_write(2,_x,strlen(_x));
+#define stdout_debug(args...) do { \
+ char buf[128]; \
+ snprintf(buf, sizeof(buf), ##args); \
+ _thread_sys_write(1, buf, strlen(buf)); \
+} while (0)
+#define stderr_debug(args...) do { \
+ char buf[128]; \
+ snprintf(buf, sizeof(buf), ##args); \
+ _thread_sys_write(2, buf, strlen(buf)); \
+} while (0)
+
/*
@@ -80,34 +135,13 @@
/*
* Waiting queue manipulation macros (using pqe link):
*/
-#if defined(_PTHREADS_INVARIANTS)
#define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd)
#define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd)
+
+#if defined(_PTHREADS_INVARIANTS)
#define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive()
#define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive()
#else
-#define PTHREAD_WAITQ_REMOVE(thrd) do { \
- TAILQ_REMOVE(&_waitingq,thrd,pqe); \
- (thrd)->flags &= ~PTHREAD_FLAGS_IN_WAITQ; \
-} while (0)
-
-#define PTHREAD_WAITQ_INSERT(thrd) do { \
- if ((thrd)->wakeup_time.tv_sec == -1) \
- TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe); \
- else { \
- pthread_t tid = TAILQ_FIRST(&_waitingq); \
- while ((tid != NULL) && (tid->wakeup_time.tv_sec != -1) && \
- ((tid->wakeup_time.tv_sec < (thrd)->wakeup_time.tv_sec) || \
- ((tid->wakeup_time.tv_sec == (thrd)->wakeup_time.tv_sec) && \
- (tid->wakeup_time.tv_nsec <= (thrd)->wakeup_time.tv_nsec)))) \
- tid = TAILQ_NEXT(tid, pqe); \
- if (tid == NULL) \
- TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe); \
- else \
- TAILQ_INSERT_BEFORE(tid,thrd,pqe); \
- } \
- (thrd)->flags |= PTHREAD_FLAGS_IN_WAITQ; \
-} while (0)
#define PTHREAD_WAITQ_CLEARACTIVE()
#define PTHREAD_WAITQ_SETACTIVE()
#endif
@@ -139,6 +173,14 @@
* called with preemption deferred (see thread_kern_sched_[un]defer).
*/
#if defined(_PTHREADS_INVARIANTS)
+#include <assert.h>
+#define PTHREAD_ASSERT(cond, msg) do { \
+ if (!(cond)) \
+ PANIC(msg); \
+} while (0)
+#define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \
+ PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \
+ "Illegal call from signal handler");
#define PTHREAD_NEW_STATE(thrd, newstate) do { \
if (_thread_kern_new_state != 0) \
PANIC("Recursive PTHREAD_NEW_STATE"); \
@@ -156,6 +198,8 @@
PTHREAD_SET_STATE(thrd, newstate); \
} while (0)
#else
+#define PTHREAD_ASSERT(cond, msg)
+#define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd)
#define PTHREAD_NEW_STATE(thrd, newstate) do { \
if ((thrd)->state != newstate) { \
if ((thrd)->state == PS_RUNNING) { \
@@ -379,21 +423,52 @@ enum pthread_susp {
* almost entirely on this stack.
*/
#define PTHREAD_STACK_INITIAL 0x100000
-/* Address immediately beyond the beginning of the initial thread stack. */
-#define PTHREAD_DEFAULT_PRIORITY 64
-#define PTHREAD_MAX_PRIORITY 126
+
+/*
+ * Define the different priority ranges. All applications have thread
+ * priorities constrained within 0-31. The threads library raises the
+ * priority when delivering signals in order to ensure that signal
+ * delivery happens (from the POSIX spec) "as soon as possible".
+ * In the future, the threads library will also be able to map specific
+ * threads into real-time (cooperating) processes or kernel threads.
+ * The RT and SIGNAL priorities will be used internally and added to
+ * thread base priorities so that the scheduling queue can handle both
+ * normal and RT priority threads with and without signal handling.
+ *
+ * The approach taken is that, within each class, signal delivery
+ * always has priority over thread execution.
+ */
+#define PTHREAD_DEFAULT_PRIORITY 15
#define PTHREAD_MIN_PRIORITY 0
-#define _POSIX_THREAD_ATTR_STACKSIZE
+#define PTHREAD_MAX_PRIORITY 31 /* 0x1F */
+#define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */
+#define PTHREAD_RT_PRIORITY 64 /* 0x40 */
+#define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY
+#define PTHREAD_LAST_PRIORITY \
+ (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY)
+#define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY)
/*
- * Clock resolution in nanoseconds.
+ * Clock resolution in microseconds.
*/
-#define CLOCK_RES_NSEC 10000000
+#define CLOCK_RES_USEC 10000
/*
* Time slice period in microseconds.
*/
-#define TIMESLICE_USEC 100000
+#define TIMESLICE_USEC 20000
+
+/*
+ * Define a thread-safe macro to get the current time of day
+ * which is updated at regular intervals by the scheduling signal
+ * handler.
+ */
+#define GET_CURRENT_TOD(tv) \
+ do { \
+ tv.tv_sec = _sched_tod.tv_sec; \
+ tv.tv_usec = _sched_tod.tv_usec; \
+ } while (tv.tv_sec != _sched_tod.tv_sec)
+
struct pthread_key {
spinlock_t lock;
@@ -487,8 +562,10 @@ union pthread_wait_data {
short branch; /* Line number, for debugging. */
char *fname; /* Source file name for debugging.*/
} fd;
- struct pthread_poll_data * poll_data;
+ FILE *fp;
+ struct pthread_poll_data *poll_data;
spinlock_t *spinlock;
+ struct pthread *thread;
};
/*
@@ -497,6 +574,83 @@ union pthread_wait_data {
*/
typedef void (*thread_continuation_t) (void *);
+struct pthread_state_data {
+ int psd_interrupted;
+ sigset_t psd_sigmask;
+ enum pthread_state psd_state;
+ int psd_flags;
+ struct timespec psd_wakeup_time;
+ union pthread_wait_data psd_wait_data;
+ /* XXX - What about thread->timeout and/or thread->error? */
+};
+
+
+/*
+ * Normally thread contexts are stored as jmp_bufs via _setjmp()/_longjmp(),
+ * but they may also be sigjmp_buf and ucontext_t. When a thread is
+ * interrupted by a signal, it's context is saved as a ucontext_t. An
+ * application is also free to use [_]longjmp()/[_]siglongjmp() to jump
+ * between contexts within the same thread. Future support will also
+ * include setcontext()/getcontext().
+ *
+ * Define an enumerated type that can identify the 4 different context
+ * types.
+ */
+typedef enum {
+ CTX_JB_NOSIG, /* context is jmp_buf without saved sigset */
+ CTX_JB, /* context is jmp_buf (with saved sigset) */
+ CTX_SJB, /* context is sigjmp_buf (with saved sigset) */
+ CTX_UC /* context is ucontext_t (with saved sigset) */
+} thread_context_t;
+
+/*
+ * There are 2 basic contexts that a frame may contain at any
+ * one time:
+ *
+ * o ctx - The context that the thread should return to after normal
+ * completion of the signal handler.
+ * o sig_jb - The context just before the signal handler is invoked.
+ * Attempts at abnormal returns from user supplied signal handlers
+ * will return back to the signal context to perform any necessary
+ * cleanup.
+ */
+struct pthread_signal_frame {
+ /*
+ * This stores the threads state before the signal.
+ */
+ struct pthread_state_data saved_state;
+
+ /* Beginning (bottom) of threads stack frame for this signal. */
+ unsigned long stackp;
+
+ /*
+ * Threads return context; ctxtype identifies the type of context.
+ * For signal frame 0, these point to the context storage area
+ * within the pthread structure. When handling signals (frame > 0),
+ * these point to a context storage area that is allocated off the
+ * threads stack.
+ */
+ union {
+ jmp_buf jb;
+ sigjmp_buf sigjb;
+ ucontext_t uc;
+ } ctx;
+ thread_context_t ctxtype;
+ int longjmp_val;
+
+ /* Threads "jump out of signal handler" destination frame. */
+ int dst_frame;
+
+ /*
+ * Used to return back to the signal handling frame in case
+ * the application tries to change contexts from the handler.
+ */
+ jmp_buf *sig_jb;
+
+ int signo; /* signal, arg 1 to sighandler */
+ int sig_has_args; /* use signal args if true */
+};
+
/*
* Thread structure.
*/
@@ -530,54 +684,19 @@ struct pthread {
void *stack;
struct pthread_attr attr;
-#if (defined(__FreeBSD__) || defined(__NetBSD__)) && defined(__i386__)
- /*
- * Saved floating point registers on systems where they are not
- * saved in the signal context.
- */
- char saved_fp[108];
-#endif
-
- /*
- * Saved signal context used in call to sigreturn by
- * _thread_kern_sched if sig_saved is TRUE.
- */
- ucontext_t saved_sigcontext;
-
- /*
- * Saved jump buffer used in call to longjmp by _thread_kern_sched
- * if sig_saved is FALSE.
- */
- jmp_buf saved_jmp_buf;
- jmp_buf *sighandler_jmp_buf;
-
- /*
- * Saved jump buffers for use when doing nested [sig|_]longjmp()s, as
- * when doing signal delivery.
- */
- union {
- jmp_buf jmp;
- sigjmp_buf sigjmp;
- } nested_jmp;
- int longjmp_val;
-
-#define JMPFLAGS_NONE 0x00
-#define JMPFLAGS_LONGJMP 0x01
-#define JMPFLAGS__LONGJMP 0x02
-#define JMPFLAGS_SIGLONGJMP 0x04
-#define JMPFLAGS_DEFERRED 0x08
- int jmpflags;
-
- /*
- * TRUE if the last state saved was a signal context. FALSE if the
- * last state saved was a jump buffer.
- */
- int sig_saved;
-
/*
* Used for tracking delivery of nested signal handlers.
+ * Signal frame 0 is used for normal context (when no
+ * signal handlers are active for the thread). Frame
+ * 1 is used as the context for the first signal, and
+ * frames 2 .. NSIG-1 are used when additional signals
+ * arrive interrupting already active signal handlers.
*/
- int signal_nest_level;
+ struct pthread_signal_frame *sigframes[NSIG];
+ struct pthread_signal_frame sigframe0;
+ struct pthread_signal_frame *curframe;
+ int sigframe_count;
+ int sigframe_done;
/*
* Cancelability flags - the lower 2 bits are used by cancel
@@ -588,7 +707,7 @@ struct pthread {
#define PTHREAD_CANCEL_NEEDED 0x0010
int cancelflags;
- enum pthread_susp suspended;
+ enum pthread_susp suspended;
thread_continuation_t continuation;
@@ -597,16 +716,16 @@ struct pthread {
*/
sigset_t sigmask;
sigset_t sigpend;
+ int check_pending;
/* Thread state: */
enum pthread_state state;
- enum pthread_state oldstate;
- /* Time that this thread was last made active. */
- struct timeval last_active;
+ /* Scheduling clock when this thread was last made active. */
+ long last_active;
- /* Time that this thread was last made inactive. */
- struct timeval last_inactive;
+ /* Scheduling clock when this thread was last made inactive. */
+ long last_inactive;
/*
* Number of microseconds accumulated by this thread when
@@ -615,12 +734,6 @@ struct pthread {
long slice_usec;
/*
- * Incremental priority accumulated by thread while it is ready to
- * run but is denied being run.
- */
- int inc_prio;
-
- /*
* Time to wake up thread. This is used for sleeping threads and
* for any operation which may time out (such as select).
*/
@@ -640,8 +753,7 @@ struct pthread {
/*
* The current thread can belong to only one scheduling queue at
- * a time (ready or waiting queue). It can also belong to (only)
- * one of:
+ * a time (ready or waiting queue). It can also belong to:
*
* o A queue of threads waiting for a mutex
* o A queue of threads waiting for a condition variable
@@ -651,15 +763,21 @@ struct pthread {
* o A queue of threads needing work done by the kernel thread
* (waiting for a spinlock or file I/O)
*
+ * It is possible for a thread to belong to more than one of the
+ * above queues if it is handling a signal. A thread may only
+ * enter a mutex, condition variable, or join queue when it is
+ * not being called from a signal handler. If a thread is a
+ * member of one of these queues when a signal handler is invoked,
+ * it must remain in the queue. For this reason, the links for
+ * these queues must not be (re)used for other queues.
+ *
* Use pqe for the scheduling queue link (both ready and waiting),
- * and qe for other links.
+ * sqe for synchronization (mutex, condition variable, and join)
+ * queue links, and qe for all other links.
*/
-
- /* Priority queue entry for this thread: */
- TAILQ_ENTRY(pthread) pqe;
-
- /* Queue entry for this thread: */
- TAILQ_ENTRY(pthread) qe;
+ TAILQ_ENTRY(pthread) pqe; /* priority queue link */
+ TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */
+ TAILQ_ENTRY(pthread) qe; /* all other queues link */
/* Wait data. */
union pthread_wait_data data;
@@ -694,14 +812,17 @@ struct pthread {
int flags;
#define PTHREAD_FLAGS_PRIVATE 0x0001
#define PTHREAD_EXITING 0x0002
-#define PTHREAD_FLAGS_IN_CONDQ 0x0004 /* in condition queue using qe link*/
-#define PTHREAD_FLAGS_IN_WORKQ 0x0008 /* in work queue using qe link */
-#define PTHREAD_FLAGS_IN_WAITQ 0x0010 /* in waiting queue using pqe link */
-#define PTHREAD_FLAGS_IN_PRIOQ 0x0020 /* in priority queue using pqe link */
-#define PTHREAD_FLAGS_IN_MUTEXQ 0x0040 /* in mutex queue using qe link */
-#define PTHREAD_FLAGS_IN_FILEQ 0x0080 /* in file lock queue using qe link */
-#define PTHREAD_FLAGS_IN_FDQ 0x0100 /* in fd lock queue using qe link */
-#define PTHREAD_FLAGS_TRACE 0x0200 /* for debugging purposes */
+#define PTHREAD_FLAGS_IN_WAITQ 0x0004 /* in waiting queue using pqe link */
+#define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */
+#define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */
+#define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */
+#define PTHREAD_FLAGS_IN_FDQ 0x0040 /* in fd lock queue using qe link */
+#define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/
+#define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */
+#define PTHREAD_FLAGS_IN_JOINQ 0x0200 /* in join queue using sqe link */
+#define PTHREAD_FLAGS_TRACE 0x0400 /* for debugging purposes */
+#define PTHREAD_FLAGS_IN_SYNCQ \
+ (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ | PTHREAD_FLAGS_IN_JOINQ)
/*
* Base priority is the user setable and retrievable priority
@@ -820,14 +941,31 @@ SCLASS int _thread_kern_in_sched
;
#endif
-/* Last time that an incremental priority update was performed: */
-SCLASS struct timeval kern_inc_prio_time
+SCLASS int _sig_in_handler
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= 0;
+#else
+;
+#endif
+
+/* Time of day at last scheduling timer signal: */
+SCLASS struct timeval volatile _sched_tod
#ifdef GLOBAL_PTHREAD_PRIVATE
= { 0, 0 };
#else
;
#endif
+/*
+ * Current scheduling timer ticks; used as resource usage.
+ */
+SCLASS unsigned int volatile _sched_ticks
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= 0;
+#else
+;
+#endif
+
/* Dead threads: */
SCLASS TAILQ_HEAD(, pthread) _dead_list
#ifdef GLOBAL_PTHREAD_PRIVATE
@@ -905,9 +1043,9 @@ SCLASS int _thread_dtablesize /* Descriptor table size. */
;
#endif
-SCLASS int _clock_res_nsec /* Clock resolution in nsec. */
+SCLASS int _clock_res_usec /* Clock resolution in usec. */
#ifdef GLOBAL_PTHREAD_PRIVATE
-= CLOCK_RES_NSEC;
+= CLOCK_RES_USEC;
#else
;
#endif
@@ -937,9 +1075,10 @@ SCLASS struct sigaction _thread_sigact[NSIG];
SCLASS int _thread_dfl_count[NSIG];
/*
- * Pending signals for this process.
+ * Pending signals and mask for this process:
*/
SCLASS sigset_t _process_sigpending;
+SCLASS sigset_t _process_sigmask;
/*
* Scheduling queues:
@@ -959,6 +1098,21 @@ SCLASS volatile int _spinblock_count
#endif
;
+/* Used to maintain pending and active signals: */
+struct sigstatus {
+ int pending; /* Is this a pending signal? */
+ int blocked; /*
+ * A handler is currently active for
+ * this signal; ignore subsequent
+ * signals until the handler is done.
+ */
+ int signo; /* arg 1 to signal handler */
+ siginfo_t siginfo; /* arg 2 to signal handler */
+ ucontext_t uc; /* arg 3 to signal handler */
+};
+
+SCLASS struct sigstatus _thread_sigq[NSIG];
+
/* Indicates that the signal queue needs to be checked. */
SCLASS volatile int _sigq_check_reqd
#ifdef GLOBAL_PTHREAD_PRIVATE
@@ -998,6 +1152,18 @@ SCLASS void * _next_stack
#endif
;
+/*
+ * Declare the kernel scheduler jump buffer and stack:
+ */
+SCLASS jmp_buf _thread_kern_sched_jb;
+
+SCLASS void * _thread_kern_sched_stack
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= NULL
+#endif
+;
+
+
/* Used for _PTHREADS_INVARIANTS checking. */
SCLASS int _thread_kern_new_state
#ifdef GLOBAL_PTHREAD_PRIVATE
@@ -1025,15 +1191,19 @@ __BEGIN_DECLS
char *__ttyname_basic(int);
char *__ttyname_r_basic(int, char *, size_t);
char *ttyname_r(int, char *, size_t);
+void _cond_wait_backout(pthread_t);
+void _fd_lock_backout(pthread_t);
int _find_dead_thread(pthread_t);
int _find_thread(pthread_t);
+void _flockfile_backout(pthread_t);
void _funlock_owned(pthread_t);
+void _join_backout(pthread_t);
int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t);
int _thread_fd_lock(int, int, struct timespec *);
int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno);
-void _dispatch_signals(void);
int _mutex_cv_lock(pthread_mutex_t *);
int _mutex_cv_unlock(pthread_mutex_t *);
+void _mutex_lock_backout(pthread_t);
void _mutex_notify_priochange(pthread_t);
int _mutex_reinit(pthread_mutex_t *);
void _mutex_unlock_private(pthread_t);
@@ -1044,14 +1214,15 @@ void _pq_remove(struct pq_queue *pq, struct pthread *);
void _pq_insert_head(struct pq_queue *pq, struct pthread *);
void _pq_insert_tail(struct pq_queue *pq, struct pthread *);
struct pthread *_pq_first(struct pq_queue *pq);
-#if defined(_PTHREADS_INVARIANTS)
void _waitq_insert(pthread_t pthread);
void _waitq_remove(pthread_t pthread);
+#if defined(_PTHREADS_INVARIANTS)
void _waitq_setactive(void);
void _waitq_clearactive(void);
#endif
void _thread_exit(char *, int, char *);
void _thread_exit_cleanup(void);
+void _thread_exit_finish(void);
void _thread_fd_unlock(int, int);
void _thread_fd_unlock_debug(int, int, char *, int);
void _thread_fd_unlock_owned(pthread_t);
@@ -1060,20 +1231,23 @@ void _thread_cleanupspecific(void);
void _thread_dump_info(void);
void _thread_init(void);
void _thread_kern_sched(ucontext_t *);
-void _thread_kern_sched_state(enum pthread_state,char *fname,int lineno);
+void _thread_kern_scheduler(void);
+void _thread_kern_sched_frame(int frame);
+void _thread_kern_sched_sig(void);
+void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno);
void _thread_kern_sched_state_unlock(enum pthread_state state,
spinlock_t *lock, char *fname, int lineno);
void _thread_kern_set_timeout(const struct timespec *);
void _thread_kern_sig_defer(void);
void _thread_kern_sig_undefer(void);
-void _thread_sig_handler(int, int, ucontext_t *);
-pthread_t _thread_sig_handle(int, ucontext_t *);
-void _thread_sig_init(void);
+void _thread_sig_handler(int, siginfo_t *, ucontext_t *);
+void _thread_sig_check_pending(pthread_t pthread);
+void _thread_sig_handle_pending(void);
void _thread_sig_send(pthread_t pthread, int sig);
-void _thread_sig_deliver(pthread_t pthread, int sig);
+void _thread_sig_wrapper(void);
+int _thread_sigframe_find(pthread_t pthread, void *stackp);
void _thread_start(void);
-void _thread_start_sig_handler(void);
-void _thread_seterrno(pthread_t,int);
+void _thread_seterrno(pthread_t, int);
int _thread_fd_table_init(int fd);
pthread_addr_t _thread_gc(pthread_addr_t);
void _thread_enter_cancellation_point(void);
diff --git a/lib/libkse/thread/thr_sem.c b/lib/libkse/thread/thr_sem.c
index 2dcf72223fc3..eb4291906f09 100644
--- a/lib/libkse/thread/thr_sem.c
+++ b/lib/libkse/thread/thr_sem.c
@@ -29,6 +29,7 @@
* $FreeBSD$
*/
+#include <stdlib.h>
#include <errno.h>
#ifdef _THREAD_SAFE
#include <semaphore.h>
diff --git a/lib/libkse/thread/thr_setschedparam.c b/lib/libkse/thread/thr_setschedparam.c
index bce965fe6603..9a44ad7a1426 100644
--- a/lib/libkse/thread/thr_setschedparam.c
+++ b/lib/libkse/thread/thr_setschedparam.c
@@ -59,7 +59,8 @@ pthread_setschedparam(pthread_t pthread, int policy,
*/
_thread_kern_sig_defer();
- if (param->sched_priority != pthread->base_priority) {
+ if (param->sched_priority !=
+ PTHREAD_BASE_PRIORITY(pthread->base_priority)) {
/*
* Remove the thread from its current priority
* queue before any adjustments are made to its
@@ -72,6 +73,8 @@ pthread_setschedparam(pthread_t pthread, int policy,
}
/* Set the thread base priority: */
+ pthread->base_priority &=
+ (PTHREAD_SIGNAL_PRIORITY | PTHREAD_RT_PRIORITY);
pthread->base_priority = param->sched_priority;
/* Recalculate the active priority: */
diff --git a/lib/libkse/thread/thr_sig.c b/lib/libkse/thread/thr_sig.c
index 86ded7f12cbb..8a9aadf177f6 100644
--- a/lib/libkse/thread/thr_sig.c
+++ b/lib/libkse/thread/thr_sig.c
@@ -44,46 +44,47 @@
#include "pthread_private.h"
/* Prototypes: */
-static void thread_sig_check_state(pthread_t pthread, int sig);
-static void thread_sig_finish_longjmp(void *arg);
-static void handle_state_change(pthread_t pthread);
-
+static void thread_sig_add(pthread_t pthread, int sig, int has_args);
+static pthread_t thread_sig_find(int sig);
+static void thread_sig_handle_special(int sig);
+static void thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp);
+static void thread_sigframe_add(pthread_t thread, int sig);
+static void thread_sigframe_leave(pthread_t thread, int frame);
+static void thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf);
+static void thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf);
+
+/* #define DEBUG_SIGNAL */
+#ifdef DEBUG_SIGNAL
+#define DBG_MSG stdout_debug
+#else
+#define DBG_MSG(x...)
+#endif
-/* Static variables: */
-static spinlock_t signal_lock = _SPINLOCK_INITIALIZER;
-static unsigned int pending_sigs[NSIG];
-static unsigned int handled_sigs[NSIG];
-static int volatile check_pending = 0;
-static int volatile check_waiting = 0;
+#if defined(_PTHREADS_INVARIANTS)
+#define SIG_SET_ACTIVE() _sig_in_handler = 1
+#define SIG_SET_INACTIVE() _sig_in_handler = 0
+#else
+#define SIG_SET_ACTIVE()
+#define SIG_SET_INACTIVE()
+#endif
-/* Initialize signal handling facility: */
void
-_thread_sig_init(void)
+_thread_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
{
- int i;
+ pthread_t pthread;
+ int current_frame;
+ char c;
- /* Clear pending and handled signal counts: */
- for (i = 1; i < NSIG; i++) {
- pending_sigs[i - 1] = 0;
- handled_sigs[i - 1] = 0;
- }
-
- /* Clear the lock: */
- signal_lock.access_lock = 0;
-
- /* Clear the process pending signals: */
- sigemptyset(&_process_sigpending);
-}
-
-void
-_thread_sig_handler(int sig, int code, ucontext_t * scp)
-{
- pthread_t pthread, pthread_next;
- int i;
- char c;
+ if (ucp == NULL)
+ PANIC("Thread signal handler received null context");
+ DBG_MSG("Got signal %d, current thread %p\n", sig, _thread_run);
/* Check if an interval timer signal: */
if (sig == _SCHED_SIGNAL) {
+ /* Update the scheduling clock: */
+ gettimeofday((struct timeval *)&_sched_tod, NULL);
+ _sched_ticks++;
+
if (_thread_kern_in_sched != 0) {
/*
* The scheduler is already running; ignore this
@@ -97,14 +98,18 @@ _thread_sig_handler(int sig, int code, ucontext_t * scp)
*/
else if (_thread_run->sig_defer_count > 0)
_thread_run->yield_on_sig_undefer = 1;
-
else {
/*
+ * Save the context of the currently running thread:
+ */
+ thread_sig_savecontext(_thread_run, ucp);
+
+ /*
* Schedule the next thread. This function is not
* expected to return because it will do a longjmp
* instead.
*/
- _thread_kern_sched(scp);
+ _thread_kern_sched(ucp);
/*
* This point should not be reached, so abort the
@@ -118,8 +123,8 @@ _thread_sig_handler(int sig, int code, ucontext_t * scp)
* is accessing the scheduling queues or if there is a currently
* running thread that has deferred signals.
*/
- else if ((_queue_signals != 0) || ((_thread_kern_in_sched == 0) &&
- (_thread_run->sig_defer_count > 0))) {
+ else if ((_thread_kern_in_sched != 0) ||
+ (_thread_run->sig_defer_count > 0)) {
/* Cast the signal number to a character variable: */
c = sig;
@@ -127,117 +132,150 @@ _thread_sig_handler(int sig, int code, ucontext_t * scp)
* Write the signal number to the kernel pipe so that it will
* be ready to read when this signal handler returns.
*/
- _thread_sys_write(_thread_kern_pipe[1], &c, 1);
+ if (_queue_signals != 0) {
+ _thread_sys_write(_thread_kern_pipe[1], &c, 1);
+ DBG_MSG("Got signal %d, queueing to kernel pipe\n", sig);
+ }
+ if (_thread_sigq[sig - 1].blocked == 0) {
+ DBG_MSG("Got signal %d, adding to _thread_sigq\n", sig);
+ /*
+ * Do not block this signal; it will be blocked
+ * when the pending signals are run down.
+ */
+ /* _thread_sigq[sig - 1].blocked = 1; */
- /* Indicate that there are queued signals in the pipe. */
- _sigq_check_reqd = 1;
- } else {
- if (_atomic_lock(&signal_lock.access_lock)) {
- /* There is another signal handler running: */
- pending_sigs[sig - 1]++;
- check_pending = 1;
+ /*
+ * Queue the signal, saving siginfo and sigcontext
+ * (ucontext).
+ *
+ * XXX - Do we need to copy siginfo and ucp?
+ */
+ _thread_sigq[sig - 1].signo = sig;
+ if (info != NULL)
+ memcpy(&_thread_sigq[sig - 1].siginfo, info,
+ sizeof(*info));
+ memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp));
+
+ /* Indicate that there are queued signals: */
+ _thread_sigq[sig - 1].pending = 1;
+ _sigq_check_reqd = 1;
}
- else {
- /* It's safe to handle the signal now. */
- pthread = _thread_sig_handle(sig, scp);
+ /* These signals need special handling: */
+ else if (sig == SIGCHLD || sig == SIGTSTP ||
+ sig == SIGTTIN || sig == SIGTTOU) {
+ _thread_sigq[sig - 1].pending = 1;
+ _thread_sigq[sig - 1].signo = sig;
+ _sigq_check_reqd = 1;
+ }
+ else
+ DBG_MSG("Got signal %d, ignored.\n", sig);
+ }
+ /*
+ * The signal handlers should have been installed so that they
+ * cannot be interrupted by other signals.
+ */
+ else if (_thread_sigq[sig - 1].blocked == 0) {
+ /* The signal is not blocked; handle the signal: */
+ current_frame = _thread_run->sigframe_count;
- /* Reset the pending and handled count back to 0: */
- pending_sigs[sig - 1] = 0;
- handled_sigs[sig - 1] = 0;
+ /*
+ * Ignore subsequent occurrences of this signal
+ * until the current signal is handled:
+ */
+ _thread_sigq[sig - 1].blocked = 1;
- if (pthread == NULL)
- signal_lock.access_lock = 0;
- else {
- sigaddset(&pthread->sigmask, sig);
+ /* This signal will be handled; clear the pending flag: */
+ _thread_sigq[sig - 1].pending = 0;
- /*
- * Make sure not to deliver the same signal to
- * the thread twice. sigpend is potentially
- * modified by the call chain
- * _thread_sig_handle() -->
- * thread_sig_check_state(), which can happen
- * just above.
- */
- if (sigismember(&pthread->sigpend, sig))
- sigdelset(&pthread->sigpend, sig);
+ /*
+ * Save siginfo and sigcontext (ucontext).
+ *
+ * XXX - Do we need to copy siginfo and ucp?
+ */
+ _thread_sigq[sig - 1].signo = sig;
- signal_lock.access_lock = 0;
- _thread_sig_deliver(pthread, sig);
- sigdelset(&pthread->sigmask, sig);
- }
- }
+ if (info != NULL)
+ memcpy(&_thread_sigq[sig - 1].siginfo, info,
+ sizeof(*info));
+ memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp));
+ SIG_SET_ACTIVE();
- /* Enter a loop to process pending signals: */
- while ((check_pending != 0) &&
- (_atomic_lock(&signal_lock.access_lock) == 0)) {
- check_pending = 0;
- for (i = 1; i < NSIG; i++) {
- if (pending_sigs[i - 1] > handled_sigs[i - 1]) {
- pending_sigs[i - 1] = handled_sigs[i - 1];
- pthread = _thread_sig_handle(i, scp);
- if (pthread != NULL) {
- sigaddset(&pthread->sigmask, i);
- /* Save the old state: */
- pthread->oldstate = pthread->state;
- signal_lock.access_lock = 0;
- _thread_sig_deliver(pthread, i);
- sigdelset(&pthread->sigmask, i);
- if (_atomic_lock(&signal_lock.access_lock)) {
- check_pending = 1;
- /*
- * Have the lock holder take care
- * of any state changes:
- */
- if (pthread->state != pthread->oldstate)
- check_waiting = 1;
- return;
- }
- if (pthread->state != pthread->oldstate)
- handle_state_change(pthread);
- }
- }
- }
- while (check_waiting != 0) {
- check_waiting = 0;
- /*
- * Enter a loop to wake up all threads waiting
- * for a process to complete:
- */
- for (pthread = TAILQ_FIRST(&_waitingq);
- pthread != NULL; pthread = pthread_next) {
- pthread_next = TAILQ_NEXT(pthread, pqe);
- if (pthread->state == PS_RUNNING)
- handle_state_change(pthread);
- }
- }
- /* Release the lock: */
- signal_lock.access_lock = 0;
+ /* Handle special signals: */
+ thread_sig_handle_special(sig);
+
+ if ((pthread = thread_sig_find(sig)) != NULL) {
+ DBG_MSG("Got signal %d, adding frame to thread %p\n",
+ sig, pthread);
+ /*
+ * A thread was found that can handle the signal.
+ * Save the context of the currently running thread
+ * so that we can switch to another thread without
+ * losing track of where the current thread left off.
+ * This also applies if the current thread is the
+ * thread to be signaled.
+ */
+ thread_sig_savecontext(_thread_run, ucp);
+
+ /* Setup the target thread to receive the signal: */
+ thread_sig_add(pthread, sig, /*has_args*/ 1);
+
+ /* Take a peek at the next ready to run thread: */
+ pthread = PTHREAD_PRIOQ_FIRST();
+ DBG_MSG("Finished adding frame, head of prio list %p\n",
+ pthread);
}
+ else
+ DBG_MSG("No thread to handle signal %d\n", sig);
+ SIG_SET_INACTIVE();
/*
- * Check to see if the current thread performed a
- * [sig|_]longjmp() out of a signal handler.
+ * Switch to a different context if the currently running
+ * thread takes a signal, or if another thread takes a
+ * signal and the currently running thread is not in a
+ * signal handler.
*/
- if ((_thread_run->jmpflags & (JMPFLAGS_LONGJMP |
- JMPFLAGS__LONGJMP)) != 0) {
- _thread_run->jmpflags = JMPFLAGS_NONE;
- __longjmp(_thread_run->nested_jmp.jmp,
- _thread_run->longjmp_val);
- } else if ((_thread_run->jmpflags & JMPFLAGS_SIGLONGJMP) != 0) {
- _thread_run->jmpflags = JMPFLAGS_NONE;
- __siglongjmp(_thread_run->nested_jmp.sigjmp,
- _thread_run->longjmp_val);
+ if ((_thread_run->sigframe_count > current_frame) ||
+ ((pthread != NULL) &&
+ (pthread->active_priority > _thread_run->active_priority))) {
+ /* Enter the kernel scheduler: */
+ DBG_MSG("Entering scheduler from signal handler\n");
+ _thread_kern_sched(ucp);
}
}
+ else {
+ SIG_SET_ACTIVE();
+ thread_sig_handle_special(sig);
+ SIG_SET_INACTIVE();
+ }
}
+static void
+thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp)
+{
+ struct pthread_signal_frame *psf;
+
+ psf = _thread_run->curframe;
+
+ memcpy(&psf->ctx.uc, ucp, sizeof(*ucp));
+
+ /* XXX - Save FP registers too? */
+ FP_SAVE_UC(&psf->ctx.uc);
+
+ /* Mark the context saved as a ucontext: */
+ psf->ctxtype = CTX_UC;
+}
+
+/*
+ * Find a thread that can handle the signal.
+ */
pthread_t
-_thread_sig_handle(int sig, ucontext_t * scp)
+thread_sig_find(int sig)
{
- int i, handler_installed;
+ int handler_installed;
pthread_t pthread, pthread_next;
pthread_t suspended_thread, signaled_thread;
+ DBG_MSG("Looking for thread to handle signal %d\n", sig);
/* Check if the signal requires a dump of thread information: */
if (sig == SIGINFO)
/* Dump thread information to file: */
@@ -249,77 +287,22 @@ _thread_sig_handle(int sig, ucontext_t * scp)
* This shouldn't ever occur (should this panic?).
*/
} else {
- /* Check if a child has terminated: */
- if (sig == SIGCHLD) {
- /*
- * Go through the file list and set all files
- * to non-blocking again in case the child
- * set some of them to block. Sigh.
- */
- for (i = 0; i < _thread_dtablesize; i++) {
- /* Check if this file is used: */
- if (_thread_fd_table[i] != NULL) {
- /*
- * Set the file descriptor to
- * non-blocking:
- */
- _thread_sys_fcntl(i, F_SETFL,
- _thread_fd_table[i]->flags |
- O_NONBLOCK);
- }
- }
- /*
- * Enter a loop to wake up all threads waiting
- * for a process to complete:
- */
- for (pthread = TAILQ_FIRST(&_waitingq);
- pthread != NULL; pthread = pthread_next) {
- /*
- * Grab the next thread before possibly
- * destroying the link entry:
- */
- pthread_next = TAILQ_NEXT(pthread, pqe);
-
- /*
- * If this thread is waiting for a child
- * process to complete, wake it up:
- */
- if (pthread->state == PS_WAIT_WAIT) {
- /* Make the thread runnable: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
-
- /* Return the signal number: */
- pthread->signo = sig;
- }
- }
- }
-
- /*
- * POSIX says that pending SIGCONT signals are
- * discarded when one of these signals occurs.
- */
- if (sig == SIGTSTP || sig == SIGTTIN || sig == SIGTTOU) {
- /*
- * Enter a loop to discard pending SIGCONT
- * signals:
- */
- TAILQ_FOREACH(pthread, &_thread_list, tle) {
- sigdelset(&pthread->sigpend,SIGCONT);
- }
- }
-
/*
* Enter a loop to look for threads that have the signal
* unmasked. POSIX specifies that a thread in a sigwait
* will get the signal over any other threads. Second
- * preference will be threads in in a sigsuspend. If
- * none of the above, then the signal is delivered to the
- * first thread we find. Note that if a custom handler
- * is not installed, the signal only affects threads in
- * sigwait.
+ * preference will be threads in in a sigsuspend. Third
+ * preference will be the current thread. If none of the
+ * above, then the signal is delivered to the first thread
+ * that is found. Note that if a custom handler is not
+ * installed, the signal only affects threads in sigwait.
*/
suspended_thread = NULL;
- signaled_thread = NULL;
+ if ((_thread_run != &_thread_kern_thread) &&
+ !sigismember(&_thread_run->sigmask, sig))
+ signaled_thread = _thread_run;
+ else
+ signaled_thread = NULL;
if ((_thread_sigact[sig - 1].sa_handler == SIG_IGN) ||
(_thread_sigact[sig - 1].sa_handler == SIG_DFL))
handler_installed = 0;
@@ -338,6 +321,13 @@ _thread_sig_handle(int sig, ucontext_t * scp)
sigismember(pthread->data.sigwait, sig)) {
/* Change the state of the thread to run: */
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ /*
+ * A signal handler is not invoked for threads
+ * in sigwait. Clear the blocked and pending
+ * flags.
+ */
+ _thread_sigq[sig - 1].blocked = 0;
+ _thread_sigq[sig - 1].pending = 0;
/* Return the signal number: */
pthread->signo = sig;
@@ -349,7 +339,8 @@ _thread_sig_handle(int sig, ucontext_t * scp)
* we find.
*
* Do not attempt to deliver this signal
- * to other threads.
+ * to other threads and do not add the signal
+ * to the process pending set.
*/
return (NULL);
}
@@ -367,7 +358,14 @@ _thread_sig_handle(int sig, ucontext_t * scp)
* Only perform wakeups and signal delivery if there is a
* custom handler installed:
*/
- if (handler_installed != 0) {
+ if (handler_installed == 0) {
+ /*
+ * There is no handler installed. Unblock the
+ * signal so that if a handler _is_ installed, any
+ * subsequent signals can be handled.
+ */
+ _thread_sigq[sig - 1].blocked = 0;
+ } else {
/*
* If we didn't find a thread in the waiting queue,
* check the all threads queue:
@@ -403,12 +401,6 @@ _thread_sig_handle(int sig, ucontext_t * scp)
pthread = suspended_thread;
else
pthread = signaled_thread;
-
- /*
- * Perform any state changes due to signal
- * arrival:
- */
- thread_sig_check_state(pthread, sig);
return (pthread);
}
}
@@ -418,100 +410,166 @@ _thread_sig_handle(int sig, ucontext_t * scp)
return (NULL);
}
-static void
-thread_sig_finish_longjmp(void *arg)
+void
+_thread_sig_check_pending(pthread_t pthread)
{
+ sigset_t sigset;
+ int i;
+
/*
- * Check to see if the current thread performed a [_]longjmp() out of a
- * signal handler.
+ * Check if there are pending signals for the running
+ * thread or process that aren't blocked:
*/
- if ((_thread_run->jmpflags & (JMPFLAGS_LONGJMP | JMPFLAGS__LONGJMP))
- != 0) {
- _thread_run->jmpflags = JMPFLAGS_NONE;
- _thread_run->continuation = NULL;
- __longjmp(_thread_run->nested_jmp.jmp,
- _thread_run->longjmp_val);
+ sigset = pthread->sigpend;
+ SIGSETOR(sigset, _process_sigpending);
+ SIGSETNAND(sigset, pthread->sigmask);
+ if (SIGNOTEMPTY(sigset)) {
+ for (i = 1; i < NSIG; i++) {
+ if (sigismember(&sigset, i) != 0) {
+ if (sigismember(&pthread->sigpend, i) != 0)
+ thread_sig_add(pthread, i,
+ /*has_args*/ 0);
+ else {
+ thread_sig_add(pthread, i,
+ /*has_args*/ 1);
+ sigdelset(&_process_sigpending, i);
+ }
+ }
+ }
}
+}
+
+/*
+ * This can only be called from the kernel scheduler. It assumes that
+ * all thread contexts are saved and that a signal frame can safely be
+ * added to any user thread.
+ */
+void
+_thread_sig_handle_pending(void)
+{
+ pthread_t pthread;
+ int i, sig;
+
+ PTHREAD_ASSERT(_thread_kern_in_sched != 0,
+ "_thread_sig_handle_pending called from outside kernel schedule");
/*
- * Check to see if the current thread performed a siglongjmp
- * out of a signal handler:
+ * Check the array of pending signals:
*/
- else if ((_thread_run->jmpflags & JMPFLAGS_SIGLONGJMP) != 0) {
- _thread_run->jmpflags = JMPFLAGS_NONE;
- _thread_run->continuation = NULL;
- __siglongjmp(_thread_run->nested_jmp.sigjmp,
- _thread_run->longjmp_val);
+ for (i = 0; i < NSIG; i++) {
+ if (_thread_sigq[i].pending != 0) {
+ /* This signal is no longer pending. */
+ _thread_sigq[i].pending = 0;
+
+ sig = _thread_sigq[i].signo;
+
+ /* Some signals need special handling: */
+ thread_sig_handle_special(sig);
+
+ if (_thread_sigq[i].blocked == 0) {
+ /*
+ * Block future signals until this one
+ * is handled:
+ */
+ _thread_sigq[i].blocked = 1;
+
+ if ((pthread = thread_sig_find(sig)) != NULL) {
+ /*
+ * Setup the target thread to receive
+ * the signal:
+ */
+ thread_sig_add(pthread, sig,
+ /*has_args*/ 1);
+ }
+ }
+ }
}
}
static void
-handle_state_change(pthread_t pthread)
+thread_sig_handle_special(int sig)
{
- /*
- * We should only need to handle threads whose state was
- * changed to running:
- */
- if (pthread->state == PS_RUNNING) {
- switch (pthread->oldstate) {
+ pthread_t pthread, pthread_next;
+ int i;
+
+ switch (sig) {
+ case SIGCHLD:
/*
- * States which do not change when a signal is trapped:
+ * Go through the file list and set all files
+ * to non-blocking again in case the child
+ * set some of them to block. Sigh.
*/
- case PS_DEAD:
- case PS_DEADLOCK:
- case PS_RUNNING:
- case PS_SIGTHREAD:
- case PS_STATE_MAX:
- case PS_SUSPENDED:
- break;
-
+ for (i = 0; i < _thread_dtablesize; i++) {
+ /* Check if this file is used: */
+ if (_thread_fd_table[i] != NULL) {
+ /*
+ * Set the file descriptor to non-blocking:
+ */
+ _thread_sys_fcntl(i, F_SETFL,
+ _thread_fd_table[i]->flags | O_NONBLOCK);
+ }
+ }
/*
- * States which need to return to critical sections
- * before they can switch contexts:
+ * Enter a loop to wake up all threads waiting
+ * for a process to complete:
*/
- case PS_COND_WAIT:
- case PS_FDLR_WAIT:
- case PS_FDLW_WAIT:
- case PS_FILE_WAIT:
- case PS_JOIN:
- case PS_MUTEX_WAIT:
- /* Indicate that the thread was interrupted: */
- pthread->interrupted = 1;
+ for (pthread = TAILQ_FIRST(&_waitingq);
+ pthread != NULL; pthread = pthread_next) {
+ /*
+ * Grab the next thread before possibly
+ * destroying the link entry:
+ */
+ pthread_next = TAILQ_NEXT(pthread, pqe);
+
/*
- * Defer the [sig|_]longjmp until leaving the critical
- * region:
+ * If this thread is waiting for a child
+ * process to complete, wake it up:
*/
- pthread->jmpflags |= JMPFLAGS_DEFERRED;
-
- /* Set the continuation routine: */
- pthread->continuation = thread_sig_finish_longjmp;
- /* FALLTHROUGH */
- case PS_FDR_WAIT:
- case PS_FDW_WAIT:
- case PS_POLL_WAIT:
- case PS_SELECT_WAIT:
- case PS_SIGSUSPEND:
- case PS_SIGWAIT:
- case PS_SLEEP_WAIT:
- case PS_SPINBLOCK:
- case PS_WAIT_WAIT:
- if ((pthread->flags & PTHREAD_FLAGS_IN_WAITQ) != 0) {
- PTHREAD_WAITQ_REMOVE(pthread);
- if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
- PTHREAD_WORKQ_REMOVE(pthread);
+ if (pthread->state == PS_WAIT_WAIT) {
+ /* Make the thread runnable: */
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+
+ /* Return the signal number: */
+ pthread->signo = sig;
}
- break;
}
+ break;
- if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0)
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ /*
+ * POSIX says that pending SIGCONT signals are
+ * discarded when one of these signals occurs.
+ */
+ case SIGTSTP:
+ case SIGTTIN:
+ case SIGTTOU:
+ /*
+ * Enter a loop to discard pending SIGCONT
+ * signals:
+ */
+ TAILQ_FOREACH(pthread, &_thread_list, tle) {
+ sigdelset(&pthread->sigpend, SIGCONT);
+ }
+ break;
+
+ default:
+ break;
}
}
-
-/* Perform thread specific actions in response to a signal: */
+/*
+ * Perform thread specific actions in response to a signal.
+ * This function is only called if there is a handler installed
+ * for the signal, and if the target thread has the signal
+ * unmasked.
+ */
static void
-thread_sig_check_state(pthread_t pthread, int sig)
+thread_sig_add(pthread_t pthread, int sig, int has_args)
{
+ int restart, frame;
+ int block_signals = 0;
+ int suppress_handler = 0;
+
+ restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART;
+
/*
* Process according to thread state:
*/
@@ -519,32 +577,54 @@ thread_sig_check_state(pthread_t pthread, int sig)
/*
* States which do not change when a signal is trapped:
*/
- case PS_COND_WAIT:
case PS_DEAD:
case PS_DEADLOCK:
- case PS_FILE_WAIT:
- case PS_JOIN:
- case PS_MUTEX_WAIT:
- case PS_RUNNING:
case PS_STATE_MAX:
case PS_SIGTHREAD:
- case PS_SPINBLOCK:
+ /*
+ * You can't call a signal handler for threads in these
+ * states.
+ */
+ suppress_handler = 1;
+ break;
+
+ /*
+ * States which do not need any cleanup handling when signals
+ * occur:
+ */
+ case PS_RUNNING:
+ /*
+ * Remove the thread from the queue before changing its
+ * priority:
+ */
+ if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0)
+ PTHREAD_PRIOQ_REMOVE(pthread);
+ break;
+
case PS_SUSPENDED:
- /* Increment the pending signal count. */
- sigaddset(&pthread->sigpend,sig);
+ break;
+
+ case PS_SPINBLOCK:
+ /* Remove the thread from the workq and waitq: */
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_WAITQ_REMOVE(pthread);
+ /* Make the thread runnable: */
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
break;
case PS_SIGWAIT:
+ /* The signal handler is not called for threads in SIGWAIT. */
+ suppress_handler = 1;
/* Wake up the thread if the signal is blocked. */
if (sigismember(pthread->data.sigwait, sig)) {
/* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_NEW_STATE(pthread, PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
} else
/* Increment the pending signal count. */
- sigaddset(&pthread->sigpend,sig);
+ sigaddset(&pthread->sigpend, sig);
break;
/*
@@ -552,64 +632,142 @@ thread_sig_check_state(pthread_t pthread, int sig)
* SIGCHLD signals.
*/
case PS_WAIT_WAIT:
- /*
- * Check for signals other than the death of a child
- * process:
- */
- if (sig != SIGCHLD)
- /* Flag the operation as interrupted: */
- pthread->interrupted = 1;
+ if (sig == SIGCHLD) {
+ /* Change the state of the thread to run: */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
- /* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ /* Return the signal number: */
+ pthread->signo = sig;
+ }
+ else {
+ /*
+ * Mark the thread as interrupted only if the
+ * restart flag is not set on the signal action:
+ */
+ if (restart == 0)
+ pthread->interrupted = 1;
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
+ }
+ break;
- /* Return the signal number: */
- pthread->signo = sig;
+ /*
+ * States which cannot be interrupted but still require the
+ * signal handler to run:
+ */
+ case PS_COND_WAIT:
+ case PS_JOIN:
+ case PS_MUTEX_WAIT:
+ /*
+ * Remove the thread from the wait queue. It will
+ * be added back to the wait queue once all signal
+ * handlers have been invoked.
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
break;
/*
- * States that are interrupted by the occurrence of a signal
- * other than the scheduling alarm:
+ * States which are interruptible but may need to be removed
+ * from queues before any signal handler is called.
+ *
+ * XXX - We may not need to handle this condition, but will
+ * mark it as a potential problem.
*/
case PS_FDLR_WAIT:
case PS_FDLW_WAIT:
+ case PS_FILE_WAIT:
+ if (restart == 0)
+ pthread->interrupted = 1;
+ /*
+ * Remove the thread from the wait queue. Our
+ * signal handler hook will remove this thread
+ * from the fd or file queue before invoking
+ * the actual handler.
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ /*
+ * To ensure the thread is removed from the fd and file
+ * queues before any other signal interrupts it, set the
+ * signal mask to block all signals. As soon as the thread
+ * is removed from the queue the signal mask will be
+ * restored.
+ */
+ block_signals = 1;
+ break;
+
+ /*
+ * States which are interruptible:
+ */
case PS_FDR_WAIT:
case PS_FDW_WAIT:
- case PS_POLL_WAIT:
- case PS_SLEEP_WAIT:
- case PS_SELECT_WAIT:
- if ((_thread_sigact[sig - 1].sa_flags & SA_RESTART) == 0) {
- /* Flag the operation as interrupted: */
+ if (restart == 0) {
+ /*
+ * Flag the operation as interrupted and
+ * set the state to running:
+ */
pthread->interrupted = 1;
-
- if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
- PTHREAD_WORKQ_REMOVE(pthread);
-
- /* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
-
- /* Return the signal number: */
- pthread->signo = sig;
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
}
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_WAITQ_REMOVE(pthread);
break;
- case PS_SIGSUSPEND:
+ case PS_POLL_WAIT:
+ case PS_SELECT_WAIT:
+ case PS_SLEEP_WAIT:
/*
- * Only wake up the thread if there is a handler installed
- * for the signal.
+ * Unmasked signals always cause poll, select, and sleep
+ * to terminate early, regardless of SA_RESTART:
*/
- if (_thread_sigact[sig - 1].sa_handler != SIG_DFL) {
- /* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ pthread->interrupted = 1;
+ /* Remove threads in poll and select from the workq: */
+ if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0)
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
+ break;
- /* Return the signal number: */
- pthread->signo = sig;
- }
+ case PS_SIGSUSPEND:
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
break;
}
+
+ if (suppress_handler == 0) {
+ /*
+ * Save the current state of the thread and add a
+ * new signal frame.
+ */
+ frame = pthread->sigframe_count;
+ thread_sigframe_save(pthread, pthread->curframe);
+ thread_sigframe_add(pthread, sig);
+ pthread->sigframes[frame + 1]->sig_has_args = has_args;
+ SIGSETOR(pthread->sigmask, _thread_sigact[sig - 1].sa_mask);
+ if (block_signals != 0) {
+ /* Save the signal mask and block all signals: */
+ pthread->sigframes[frame + 1]->saved_state.psd_sigmask =
+ pthread->sigmask;
+ sigfillset(&pthread->sigmask);
+ }
+
+ /* Make sure the thread is runnable: */
+ if (pthread->state != PS_RUNNING)
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
+ /*
+ * The thread should be removed from all scheduling
+ * queues at this point. Raise the priority and place
+ * the thread in the run queue.
+ */
+ pthread->active_priority |= PTHREAD_SIGNAL_PRIORITY;
+ if (pthread != _thread_run)
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ }
}
-/* Send a signal to a specific thread (ala pthread_kill): */
+/*
+ * Send a signal to a specific thread (ala pthread_kill):
+ */
void
_thread_sig_send(pthread_t pthread, int sig)
{
@@ -620,142 +778,400 @@ _thread_sig_send(pthread_t pthread, int sig)
if (pthread->state == PS_SIGWAIT &&
sigismember(pthread->data.sigwait, sig)) {
/* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_NEW_STATE(pthread, PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
- } else if (pthread->state != PS_SIGWAIT &&
- !sigismember(&pthread->sigmask, sig)) {
- /* Perform any state changes due to signal arrival: */
- thread_sig_check_state(pthread, sig);
- /* Increment the pending signal count. */
- sigaddset(&pthread->sigpend,sig);
+ } else if (pthread == _thread_run) {
+ /* Add the signal to the pending set: */
+ sigaddset(&pthread->sigpend, sig);
+ /*
+ * Deliver the signal to the process if a
+ * handler is not installed:
+ */
+ if (_thread_sigact[sig - 1].sa_handler == SIG_DFL)
+ kill(getpid(), sig);
+ if (!sigismember(&pthread->sigmask, sig)) {
+ /*
+ * Call the kernel scheduler which will safely
+ * install a signal frame for this thread:
+ */
+ _thread_kern_sched_sig();
+ }
} else {
- /* Increment the pending signal count. */
- sigaddset(&pthread->sigpend,sig);
+ if (pthread->state != PS_SIGWAIT &&
+ !sigismember(&pthread->sigmask, sig)) {
+ /* Protect the scheduling queues: */
+ _thread_kern_sig_defer();
+ /*
+ * Perform any state changes due to signal
+ * arrival:
+ */
+ thread_sig_add(pthread, sig, /* has args */ 0);
+ /* Unprotect the scheduling queues: */
+ _thread_kern_sig_undefer();
+ }
+ else
+ /* Increment the pending signal count. */
+ sigaddset(&pthread->sigpend,sig);
+
+ /*
+ * Deliver the signal to the process if a
+ * handler is not installed:
+ */
+ if (_thread_sigact[sig - 1].sa_handler == SIG_DFL)
+ kill(getpid(), sig);
}
}
}
-/* Dispatch pending signals to the running thread: */
+/*
+ * User thread signal handler wrapper.
+ *
+ * thread - current running thread
+ */
void
-_dispatch_signals()
+_thread_sig_wrapper(void)
{
- sigset_t sigset;
- int i;
+ void (*sigfunc)(int, siginfo_t *, void *);
+ struct pthread_signal_frame *psf;
+ pthread_t thread;
+ int dead = 0;
+ int i, sig, has_args;
+ int frame, dst_frame;
+
+ thread = _thread_run;
+
+ /* Get the current frame and state: */
+ frame = thread->sigframe_count;
+ PTHREAD_ASSERT(frame > 0, "Invalid signal frame in signal handler");
+ psf = thread->curframe;
+
+ /* Check the threads previous state: */
+ if (psf->saved_state.psd_state != PS_RUNNING) {
+ /*
+ * Do a little cleanup handling for those threads in
+ * queues before calling the signal handler. Signals
+ * for these threads are temporarily blocked until
+ * after cleanup handling.
+ */
+ switch (psf->saved_state.psd_state) {
+ case PS_FDLR_WAIT:
+ case PS_FDLW_WAIT:
+ _fd_lock_backout(thread);
+ psf->saved_state.psd_state = PS_RUNNING;
+ /* Reenable signals: */
+ thread->sigmask = psf->saved_state.psd_sigmask;
+ break;
+
+ case PS_FILE_WAIT:
+ _flockfile_backout(thread);
+ psf->saved_state.psd_state = PS_RUNNING;
+ /* Reenable signals: */
+ thread->sigmask = psf->saved_state.psd_sigmask;
+ break;
+
+ default:
+ break;
+ }
+ }
/*
- * Check if there are pending signals for the running
- * thread or process that aren't blocked:
+ * Unless the thread exits or longjmps out of the signal handler,
+ * return to the previous frame:
*/
- sigset = _thread_run->sigpend;
- SIGSETOR(sigset, _process_sigpending);
- SIGSETNAND(sigset, _thread_run->sigmask);
- if (SIGNOTEMPTY(sigset)) {
+ dst_frame = frame - 1;
+
+ /*
+ * Check that a custom handler is installed and if the signal
+ * is not blocked:
+ */
+ sigfunc = _thread_sigact[psf->signo - 1].sa_sigaction;
+ if (((__sighandler_t *)sigfunc != SIG_DFL) &&
+ ((__sighandler_t *)sigfunc != SIG_IGN)) {
/*
- * Enter a loop to calculate deliverable pending signals
- * before actually delivering them. The pending signals
- * must be removed from the pending signal sets before
- * calling the signal handler because the handler may
- * call library routines that again check for and deliver
- * pending signals.
+ * The signal jump buffer is allocated off the stack.
+ * If the signal handler tries to [_][sig]longjmp() or
+ * setcontext(), our wrapped versions of these routines
+ * will copy the user supplied jump buffer or context
+ * to the destination signal frame, set the destination
+ * signal frame in psf->dst_frame, and _longjmp() back
+ * to here.
*/
- for (i = 1; i < NSIG; i++) {
+ jmp_buf jb;
+
+ /*
+ * Set up the context for abnormal returns out of signal
+ * handlers.
+ */
+ psf->sig_jb = &jb;
+ if (_setjmp(jb) == 0) {
+ DBG_MSG("_thread_sig_wrapper: Entering frame %d, "
+ "stack 0x%lx\n", frame, GET_STACK_JB(jb));
/*
- * Check that a custom handler is installed
- * and if the signal is not blocked:
+ * Invalidate the destination frame before calling
+ * the signal handler.
*/
- if (_thread_sigact[i - 1].sa_handler != SIG_DFL &&
- _thread_sigact[i - 1].sa_handler != SIG_IGN &&
- sigismember(&sigset, i)) {
- if (sigismember(&_thread_run->sigpend,i))
- /* Clear the thread pending signal: */
- sigdelset(&_thread_run->sigpend,i);
- else
- /* Clear the process pending signal: */
- sigdelset(&_process_sigpending,i);
- }
+ psf->dst_frame = -1;
+
+ /*
+ * Dispatch the signal via the custom signal
+ * handler:
+ */
+ if (psf->sig_has_args == 0)
+ (*(sigfunc))(psf->signo, NULL, NULL);
+ else if ((_thread_sigact[psf->signo - 1].sa_flags &
+ SA_SIGINFO) != 0)
+ (*(sigfunc))(psf->signo,
+ &_thread_sigq[psf->signo - 1].siginfo,
+ &_thread_sigq[psf->signo - 1].uc);
else
- /* Remove the signal if it can't be handled: */
- sigdelset(&sigset, i);
+ (*(sigfunc))(psf->signo,
+ (siginfo_t *)_thread_sigq[psf->signo - 1].siginfo.si_code,
+ &_thread_sigq[psf->signo - 1].uc);
}
+ else {
+ /*
+ * The return from _setjmp() should only be non-zero
+ * when the signal handler wants to xxxlongjmp() or
+ * setcontext() to a different context, or if the
+ * thread has exited (via pthread_exit).
+ */
+ /*
+ * Grab a copy of the destination frame before it
+ * gets clobbered after unwinding.
+ */
+ dst_frame = psf->dst_frame;
+ DBG_MSG("Abnormal exit from handler for signal %d, "
+ "frame %d\n", psf->signo, frame);
+
+ /* Has the thread exited? */
+ if ((dead = thread->flags & PTHREAD_EXITING) != 0)
+ /* When exiting, unwind to frame 0. */
+ dst_frame = 0;
+ else if ((dst_frame < 0) || (dst_frame > frame))
+ PANIC("Attempt to unwind to invalid "
+ "signal frame");
+
+ /* Unwind to the target frame: */
+ for (i = frame; i > dst_frame; i--) {
+ DBG_MSG("Leaving frame %d, signal %d\n", i,
+ thread->sigframes[i]->signo);
+ /* Leave the current signal frame: */
+ thread_sigframe_leave(thread, i);
- /* Now deliver the signals: */
- for (i = 1; i < NSIG; i++) {
- if (sigismember(&sigset, i))
- /* Deliver the signal to the running thread: */
- _thread_sig_deliver(_thread_run, i);
+ /*
+ * Save whatever is needed out of the state
+ * data; as soon as the frame count is
+ * is decremented, another signal can arrive
+ * and corrupt this view of the state data.
+ */
+ sig = thread->sigframes[i]->signo;
+ has_args = thread->sigframes[i]->sig_has_args;
+
+ /*
+ * We're done with this signal frame:
+ */
+ thread->curframe = thread->sigframes[i - 1];
+ thread->sigframe_count = i - 1;
+
+ /*
+ * Only unblock the signal if it was a
+ * process signal as opposed to a signal
+ * generated by pthread_kill().
+ */
+ if (has_args != 0)
+ _thread_sigq[sig - 1].blocked = 0;
+ }
}
}
+
+ /*
+ * Call the kernel scheduler to schedule the next
+ * thread.
+ */
+ if (dead == 0) {
+ /* Restore the threads state: */
+ thread_sigframe_restore(thread, thread->sigframes[dst_frame]);
+ _thread_kern_sched_frame(dst_frame);
+ }
+ else {
+ PTHREAD_ASSERT(dst_frame == 0,
+ "Invalid signal frame for dead thread");
+
+ /* Perform any necessary cleanup before exiting. */
+ thread_sigframe_leave(thread, 0);
+
+ /* This should never return: */
+ _thread_exit_finish();
+ PANIC("Return from _thread_exit_finish in signal wrapper");
+ }
}
-/* Deliver a signal to a thread: */
-void
-_thread_sig_deliver(pthread_t pthread, int sig)
+static void
+thread_sigframe_add(pthread_t thread, int sig)
{
- sigset_t mask;
- pthread_t pthread_saved;
- jmp_buf jb, *saved_sighandler_jmp_buf;
+ unsigned long stackp = 0;
+
+ /* Get the top of the threads stack: */
+ switch (thread->curframe->ctxtype) {
+ case CTX_JB:
+ case CTX_JB_NOSIG:
+ stackp = GET_STACK_JB(thread->curframe->ctx.jb);
+ break;
+ case CTX_SJB:
+ stackp = GET_STACK_SJB(thread->curframe->ctx.sigjb);
+ break;
+ case CTX_UC:
+ stackp = GET_STACK_UC(&thread->curframe->ctx.uc);
+ break;
+ default:
+ PANIC("Invalid thread context type");
+ break;
+ }
/*
- * Check that a custom handler is installed
- * and if the signal is not blocked:
+ * Leave a little space on the stack and round down to the
+ * nearest aligned word:
*/
- if (_thread_sigact[sig - 1].sa_handler != SIG_DFL &&
- _thread_sigact[sig - 1].sa_handler != SIG_IGN) {
- /* Save the current thread: */
- pthread_saved = _thread_run;
+ stackp -= sizeof(double);
+ stackp &= ~0x3UL;
+
+ /* Allocate room on top of the stack for a new signal frame: */
+ stackp -= sizeof(struct pthread_signal_frame);
+
+ /* Set up the new frame: */
+ thread->sigframe_count++;
+ thread->sigframes[thread->sigframe_count] =
+ (struct pthread_signal_frame *) stackp;
+ thread->curframe = thread->sigframes[thread->sigframe_count];
+ thread->curframe->stackp = stackp;
+ thread->curframe->ctxtype = CTX_JB;
+ thread->curframe->longjmp_val = 1;
+ thread->curframe->signo = sig;
- /* Save the threads signal mask: */
- mask = pthread->sigmask;
-
- /*
- * Add the current signal and signal handler
- * mask to the thread's current signal mask:
- */
- SIGSETOR(pthread->sigmask, _thread_sigact[sig - 1].sa_mask);
- sigaddset(&pthread->sigmask, sig);
+ /*
+ * Set up the context:
+ */
+ _setjmp(thread->curframe->ctx.jb);
+ SET_STACK_JB(thread->curframe->ctx.jb, stackp);
+ SET_RETURN_ADDR_JB(thread->curframe->ctx.jb, _thread_sig_wrapper);
+}
- /* Current thread inside critical region? */
- if (_thread_run->sig_defer_count > 0)
- pthread->sig_defer_count++;
+/*
+ * Locate the signal frame from the specified stack pointer.
+ */
+int
+_thread_sigframe_find(pthread_t pthread, void *stackp)
+{
+ int frame;
- /* Increment the number of nested signals being handled. */
- pthread->signal_nest_level++;
+ /*
+ * Find the destination of the target frame based on the
+ * given stack pointer.
+ */
+ for (frame = pthread->sigframe_count; frame >= 0; frame--) {
+ if (stackp < (void *)pthread->sigframes[frame]->stackp)
+ break;
+ }
+ return (frame);
+}
+
+void
+thread_sigframe_leave(pthread_t thread, int frame)
+{
+ struct pthread_state_data *psd;
- /*
- * The jump buffer is allocated off the stack and the current
- * jump buffer is saved. If the signal handler tries to
- * [sig|_]longjmp(), our version of [sig|_]longjmp() will copy
- * the user supplied jump buffer into
- * _thread_run->nested_jmp.[sig]jmp and _longjmp() back to here.
- */
- saved_sighandler_jmp_buf = pthread->sighandler_jmp_buf;
- pthread->sighandler_jmp_buf = &jb;
+ psd = &thread->sigframes[frame]->saved_state;
- _thread_run = pthread;
+ /*
+ * Perform any necessary cleanup for this signal frame:
+ */
+ switch (psd->psd_state) {
+ case PS_DEAD:
+ case PS_DEADLOCK:
+ case PS_RUNNING:
+ case PS_SIGTHREAD:
+ case PS_STATE_MAX:
+ case PS_SUSPENDED:
+ break;
- if (_setjmp(jb) == 0) {
- /*
- * Dispatch the signal via the custom signal
- * handler:
- */
- (*(_thread_sigact[sig - 1].sa_handler))(sig);
- }
+ /*
+ * Threads in the following states need to be removed
+ * from queues.
+ */
+ case PS_COND_WAIT:
+ _cond_wait_backout(thread);
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
+ PTHREAD_WAITQ_REMOVE(thread);
+ break;
- _thread_run = pthread_saved;
+ case PS_FDLR_WAIT:
+ case PS_FDLW_WAIT:
+ _fd_lock_backout(thread);
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
+ PTHREAD_WAITQ_REMOVE(thread);
+ break;
- pthread->sighandler_jmp_buf = saved_sighandler_jmp_buf;
+ case PS_FILE_WAIT:
+ _flockfile_backout(thread);
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
+ PTHREAD_WAITQ_REMOVE(thread);
+ break;
- /* Decrement the signal nest level. */
- pthread->signal_nest_level--;
+ case PS_JOIN:
+ _join_backout(thread);
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
+ PTHREAD_WAITQ_REMOVE(thread);
+ break;
- /* Current thread inside critical region? */
- if (_thread_run->sig_defer_count > 0)
- pthread->sig_defer_count--;
+ case PS_MUTEX_WAIT:
+ _mutex_lock_backout(thread);
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
+ PTHREAD_WAITQ_REMOVE(thread);
+ break;
- /* Restore the threads signal mask: */
- pthread->sigmask = mask;
+ case PS_FDR_WAIT:
+ case PS_FDW_WAIT:
+ case PS_POLL_WAIT:
+ case PS_SELECT_WAIT:
+ case PS_SIGSUSPEND:
+ case PS_SIGWAIT:
+ case PS_SLEEP_WAIT:
+ case PS_SPINBLOCK:
+ case PS_WAIT_WAIT:
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) {
+ PTHREAD_WAITQ_REMOVE(thread);
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WORKQ) != 0)
+ PTHREAD_WORKQ_REMOVE(thread);
+ }
+ break;
}
}
+
+static void
+thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf)
+{
+ thread->interrupted = psf->saved_state.psd_interrupted;
+ thread->sigmask = psf->saved_state.psd_sigmask;
+ thread->state = psf->saved_state.psd_state;
+ thread->flags = psf->saved_state.psd_flags;
+ thread->wakeup_time = psf->saved_state.psd_wakeup_time;
+ thread->data = psf->saved_state.psd_wait_data;
+}
+
+static void
+thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf)
+{
+ psf->saved_state.psd_interrupted = thread->interrupted;
+ psf->saved_state.psd_sigmask = thread->sigmask;
+ psf->saved_state.psd_state = thread->state;
+ psf->saved_state.psd_flags = thread->flags;
+ thread->flags &= PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE |
+ PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ |
+ PTHREAD_FLAGS_IN_JOINQ;
+ psf->saved_state.psd_wakeup_time = thread->wakeup_time;
+ psf->saved_state.psd_wait_data = thread->data;
+}
+
#endif
diff --git a/lib/libkse/thread/thr_sigaction.c b/lib/libkse/thread/thr_sigaction.c
index 319999bf3047..e78f3290154c 100644
--- a/lib/libkse/thread/thr_sigaction.c
+++ b/lib/libkse/thread/thr_sigaction.c
@@ -74,12 +74,13 @@ _sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
*/
if (act != NULL && sig != _SCHED_SIGNAL && sig != SIGCHLD &&
sig != SIGINFO) {
- /* Initialise the global signal action structure: */
- gact.sa_mask = act->sa_mask;
- gact.sa_flags = 0;
-
- /* Ensure the scheduling signal is masked: */
- sigaddset(&gact.sa_mask, _SCHED_SIGNAL);
+ /*
+ * Ensure the signal handler cannot be interrupted
+ * by other signals. Always request the POSIX signal
+ * handler arguments.
+ */
+ sigfillset(&gact.sa_mask);
+ gact.sa_flags = SA_SIGINFO;
/*
* Check if the signal handler is being set to
diff --git a/lib/libkse/thread/thr_sigmask.c b/lib/libkse/thread/thr_sigmask.c
index b880d9c748c8..bdb0b438ec48 100644
--- a/lib/libkse/thread/thr_sigmask.c
+++ b/lib/libkse/thread/thr_sigmask.c
@@ -43,7 +43,8 @@
int
pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
{
- int ret = 0;
+ sigset_t sigset;
+ int ret = 0;
/* Check if the existing signal process mask is to be returned: */
if (oset != NULL) {
@@ -81,10 +82,18 @@ pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
}
/*
- * Dispatch signals to the running thread that are pending
- * and now unblocked:
+ * Check if there are pending signals for the running
+ * thread or process that aren't blocked:
*/
- _dispatch_signals();
+ sigset = _thread_run->sigpend;
+ SIGSETOR(sigset, _process_sigpending);
+ SIGSETNAND(sigset, _thread_run->sigmask);
+ if (SIGNOTEMPTY(sigset))
+ /*
+ * Call the kernel scheduler which will safely
+ * install a signal frame for the running thread:
+ */
+ _thread_kern_sched_sig();
}
/* Return the completion status: */
diff --git a/lib/libkse/thread/thr_sigprocmask.c b/lib/libkse/thread/thr_sigprocmask.c
index 6addb4a948e9..f8ca0311c42c 100644
--- a/lib/libkse/thread/thr_sigprocmask.c
+++ b/lib/libkse/thread/thr_sigprocmask.c
@@ -41,53 +41,9 @@
#include "pthread_private.h"
int
-_sigprocmask(int how, const sigset_t * set, sigset_t * oset)
+_sigprocmask(int how, const sigset_t *set, sigset_t *oset)
{
- int ret = 0;
-
- /* Check if the existing signal process mask is to be returned: */
- if (oset != NULL) {
- /* Return the current mask: */
- *oset = _thread_run->sigmask;
- }
- /* Check if a new signal set was provided by the caller: */
- if (set != NULL) {
- /* Process according to what to do: */
- switch (how) {
- /* Block signals: */
- case SIG_BLOCK:
- /* Add signals to the existing mask: */
- SIGSETOR(_thread_run->sigmask, *set);
- break;
-
- /* Unblock signals: */
- case SIG_UNBLOCK:
- /* Clear signals from the existing mask: */
- SIGSETNAND(_thread_run->sigmask, *set);
- break;
-
- /* Set the signal process mask: */
- case SIG_SETMASK:
- /* Set the new mask: */
- _thread_run->sigmask = *set;
- break;
-
- /* Trap invalid actions: */
- default:
- /* Return an invalid argument: */
- errno = EINVAL;
- ret = -1;
- break;
- }
-
- /*
- * Dispatch signals to the running thread that are pending
- * and now unblocked:
- */
- _dispatch_signals();
- }
- /* Return the completion status: */
- return (ret);
+ return (pthread_sigmask(how, set, oset));
}
__strong_reference(_sigprocmask, sigprocmask);
diff --git a/lib/libkse/thread/thr_sigwait.c b/lib/libkse/thread/thr_sigwait.c
index b12c028740c9..6ba685559263 100644
--- a/lib/libkse/thread/thr_sigwait.c
+++ b/lib/libkse/thread/thr_sigwait.c
@@ -40,7 +40,7 @@
#include "pthread_private.h"
int
-sigwait(const sigset_t * set, int *sig)
+sigwait(const sigset_t *set, int *sig)
{
int ret = 0;
int i;
@@ -52,11 +52,9 @@ sigwait(const sigset_t * set, int *sig)
* Specify the thread kernel signal handler.
*/
act.sa_handler = (void (*) ()) _thread_sig_handler;
- act.sa_flags = SA_RESTART;
- act.sa_mask = *set;
-
- /* Ensure the scheduling signal is masked: */
- sigaddset(&act.sa_mask, _SCHED_SIGNAL);
+ act.sa_flags = SA_RESTART | SA_SIGINFO;
+ /* Ensure the signal handler cannot be interrupted by other signals: */
+ sigfillset(&act.sa_mask);
/*
* Initialize the set of signals that will be waited on:
diff --git a/lib/libkse/thread/thr_write.c b/lib/libkse/thread/thr_write.c
index 6408a64830c2..5d9ef35f3fbc 100644
--- a/lib/libkse/thread/thr_write.c
+++ b/lib/libkse/thread/thr_write.c
@@ -127,7 +127,7 @@ _write(int fd, const void *buf, size_t nbytes)
/* Return the number of bytes written: */
ret = num;
}
- _FD_UNLOCK(fd, FD_RDWR);
+ _FD_UNLOCK(fd, FD_WRITE);
}
return (ret);
}
diff --git a/lib/libkse/thread/thr_yield.c b/lib/libkse/thread/thr_yield.c
index 064dd826d34c..7d64283a9910 100644
--- a/lib/libkse/thread/thr_yield.c
+++ b/lib/libkse/thread/thr_yield.c
@@ -57,8 +57,5 @@ pthread_yield(void)
/* Schedule the next thread: */
_thread_kern_sched(NULL);
-
- /* Nothing to return. */
- return;
}
#endif
diff --git a/lib/libpthread/thread/thr_attr_setschedparam.c b/lib/libpthread/thread/thr_attr_setschedparam.c
index 6c4166b1784a..755bb13b7acc 100644
--- a/lib/libpthread/thread/thr_attr_setschedparam.c
+++ b/lib/libpthread/thread/thr_attr_setschedparam.c
@@ -45,6 +45,10 @@ pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param
ret = EINVAL;
else if (param == NULL) {
ret = ENOTSUP;
+ } else if ((param->sched_priority < PTHREAD_MIN_PRIORITY) ||
+ (param->sched_priority > PTHREAD_MAX_PRIORITY)) {
+ /* Return an unsupported value error. */
+ ret = ENOTSUP;
} else
(*attr)->prio = param->sched_priority;
diff --git a/lib/libpthread/thread/thr_cond.c b/lib/libpthread/thread/thr_cond.c
index 0a5298bcfcc0..50cf92765954 100644
--- a/lib/libpthread/thread/thr_cond.c
+++ b/lib/libpthread/thread/thr_cond.c
@@ -170,10 +170,7 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
* perform the dynamic initialization:
*/
else if (*cond != NULL ||
- (rval = pthread_cond_init(cond,NULL)) == 0) {
-
- _thread_enter_cancellation_point();
-
+ (rval = pthread_cond_init(cond, NULL)) == 0) {
/* Lock the condition variable structure: */
_SPINLOCK(&(*cond)->lock);
@@ -286,8 +283,6 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
if (_thread_run->continuation != NULL)
_thread_run->continuation((void *) _thread_run);
}
-
- _thread_leave_cancellation_point();
}
_thread_leave_cancellation_point();
@@ -313,8 +308,6 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
* initialization.
*/
else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
- _thread_enter_cancellation_point();
-
/* Lock the condition variable structure: */
_SPINLOCK(&(*cond)->lock);
@@ -446,8 +439,6 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
if (_thread_run->continuation != NULL)
_thread_run->continuation((void *) _thread_run);
}
-
- _thread_leave_cancellation_point();
}
_thread_leave_cancellation_point();
@@ -589,6 +580,48 @@ pthread_cond_broadcast(pthread_cond_t * cond)
return (rval);
}
+void
+_cond_wait_backout(pthread_t pthread)
+{
+ pthread_cond_t cond;
+
+ cond = pthread->data.cond;
+ if (cond != NULL) {
+ /*
+ * Defer signals to protect the scheduling queues
+ * from access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
+ /* Lock the condition variable structure: */
+ _SPINLOCK(&cond->lock);
+
+ /* Process according to condition variable type: */
+ switch (cond->c_type) {
+ /* Fast condition variable: */
+ case COND_TYPE_FAST:
+ cond_queue_remove(cond, pthread);
+
+ /* Check for no more waiters: */
+ if (TAILQ_FIRST(&cond->c_queue) == NULL)
+ cond->c_mutex = NULL;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Unlock the condition variable structure: */
+ _SPINUNLOCK(&cond->lock);
+
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary:
+ */
+ _thread_kern_sig_undefer();
+ }
+}
+
/*
* Dequeue a waiting thread from the head of a condition queue in
* descending priority order.
@@ -599,7 +632,7 @@ cond_queue_deq(pthread_cond_t cond)
pthread_t pthread;
while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
- TAILQ_REMOVE(&cond->c_queue, pthread, qe);
+ TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
if ((pthread->timeout == 0) && (pthread->interrupted == 0))
/*
@@ -628,7 +661,7 @@ cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
* it isn't in the queue.
*/
if (pthread->flags & PTHREAD_FLAGS_IN_CONDQ) {
- TAILQ_REMOVE(&cond->c_queue, pthread, qe);
+ TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
}
}
@@ -642,19 +675,22 @@ cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
{
pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head);
+ PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
+
/*
* For the common case of all threads having equal priority,
* we perform a quick check against the priority of the thread
* at the tail of the queue.
*/
if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
- TAILQ_INSERT_TAIL(&cond->c_queue, pthread, qe);
+ TAILQ_INSERT_TAIL(&cond->c_queue, pthread, sqe);
else {
tid = TAILQ_FIRST(&cond->c_queue);
while (pthread->active_priority <= tid->active_priority)
- tid = TAILQ_NEXT(tid, qe);
- TAILQ_INSERT_BEFORE(tid, pthread, qe);
+ tid = TAILQ_NEXT(tid, sqe);
+ TAILQ_INSERT_BEFORE(tid, pthread, sqe);
}
pthread->flags |= PTHREAD_FLAGS_IN_CONDQ;
+ pthread->data.cond = cond;
}
#endif
diff --git a/lib/libpthread/thread/thr_create.c b/lib/libpthread/thread/thr_create.c
index 3a80611dc145..0390f1be7205 100644
--- a/lib/libpthread/thread/thr_create.c
+++ b/lib/libpthread/thread/thr_create.c
@@ -49,17 +49,24 @@
static u_int64_t next_uniqueid = 1;
#define OFF(f) offsetof(struct pthread, f)
+#define SIGFRAME_OFF(f) offsetof(struct pthread_signal_frame, f)
int _thread_next_offset = OFF(tle.tqe_next);
int _thread_uniqueid_offset = OFF(uniqueid);
int _thread_state_offset = OFF(state);
int _thread_name_offset = OFF(name);
-int _thread_sig_saved_offset = OFF(sig_saved);
-int _thread_saved_sigcontext_offset = OFF(saved_sigcontext);
-int _thread_saved_jmp_buf_offset = OFF(saved_jmp_buf);
+int _thread_curframe_offset = OFF(curframe);
+int _thread_sigframe_ctx_offset = SIGFRAME_OFF(ctx);
+int _thread_sigframe_ctxtype_offset = SIGFRAME_OFF(ctxtype);
#undef OFF
+#undef SIGFRAME_OFF
int _thread_PS_RUNNING_value = PS_RUNNING;
int _thread_PS_DEAD_value = PS_DEAD;
+int _thread_CTX_JB_NOSIG_value = CTX_JB_NOSIG;
+int _thread_CTX_JB_value = CTX_JB;
+int _thread_CTX_SJB_value = CTX_SJB;
+int _thread_CTX_UC_value = CTX_UC;
+int _thread_sigframe_size_value = sizeof(struct pthread_signal_frame);
int
pthread_create(pthread_t * thread, const pthread_attr_t * attr,
@@ -162,7 +169,6 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/* Initialise the thread structure: */
memset(new_thread, 0, sizeof(struct pthread));
new_thread->slice_usec = -1;
- new_thread->sig_saved = 0;
new_thread->stack = stack;
new_thread->start_routine = start_routine;
new_thread->arg = arg;
@@ -179,62 +185,32 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/* Initialise the thread for signals: */
new_thread->sigmask = _thread_run->sigmask;
+ /* Initialize the first signal frame: */
+ new_thread->sigframes[0] = &new_thread->sigframe0;
+ new_thread->curframe = &new_thread->sigframe0;
+
/* Initialise the jump buffer: */
- setjmp(new_thread->saved_jmp_buf);
+ _setjmp(new_thread->curframe->ctx.jb);
/*
* Set up new stack frame so that it looks like it
* returned from a longjmp() to the beginning of
* _thread_start().
*/
-#if defined(__FreeBSD__)
-#if defined(__alpha__)
- new_thread->saved_jmp_buf[0]._jb[2] =
- (long)_thread_start;
- new_thread->saved_jmp_buf[0]._jb[4 + R_RA] =
- 0;
- new_thread->saved_jmp_buf[0]._jb[4 + R_T12] =
- (long)_thread_start;
-#else
- new_thread->saved_jmp_buf[0]._jb[0] =
- (long)_thread_start;
-#endif
-#elif defined(__NetBSD__)
-#if defined(__alpha__)
- new_thread->saved_jmp_buf[2] = (long)_thread_start;
- new_thread->saved_jmp_buf[4 + R_RA] = 0;
- new_thread->saved_jmp_buf[4 + R_T12] =
- (long)_thread_start;
-#else
- new_thread->saved_jmp_buf[0] = (long)_thread_start;
-#endif
-#else
-#error "Don't recognize this operating system!"
-#endif
+ SET_RETURN_ADDR_JB(new_thread->curframe->ctx.jb,
+ _thread_start);
/* The stack starts high and builds down: */
-#if defined(__FreeBSD__)
-#if defined(__alpha__)
- new_thread->saved_jmp_buf[0]._jb[4 + R_SP] =
- (long)new_thread->stack + pattr->stacksize_attr
- - sizeof(double);
-#else
- new_thread->saved_jmp_buf[0]._jb[2] =
- (int)(new_thread->stack + pattr->stacksize_attr -
- sizeof(double));
-#endif
-#elif defined(__NetBSD__)
-#if defined(__alpha__)
- new_thread->saved_jmp_buf[4 + R_SP] =
- (long)new_thread->stack + pattr->stacksize_attr -
- sizeof(double);
-#else
- new_thread->saved_jmp_buf[2] = (long)new_thread->stack
- + pattr->stacksize_attr - sizeof(double);
-#endif
-#else
-#error "Don't recognize this operating system!"
-#endif
+ SET_STACK_JB(new_thread->curframe->ctx.jb,
+ (long)new_thread->stack + pattr->stacksize_attr
+ - sizeof(double));
+
+ /* Initialize the rest of the frame: */
+ new_thread->curframe->ctxtype = CTX_JB_NOSIG;
+ /* Set the base of the stack: */
+ new_thread->curframe->stackp =
+ GET_STACK_JB(new_thread->curframe->ctx.jb);
+ new_thread->sigframe_count = 0;
/* Copy the thread attributes: */
memcpy(&new_thread->attr, pattr, sizeof(struct pthread_attr));
@@ -245,20 +221,22 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
*/
if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
/* Copy the scheduling attributes: */
- new_thread->base_priority
- = _thread_run->base_priority;
- new_thread->attr.prio
- = _thread_run->base_priority;
- new_thread->attr.sched_policy
- = _thread_run->attr.sched_policy;
+ new_thread->base_priority =
+ _thread_run->base_priority &
+ ~PTHREAD_SIGNAL_PRIORITY;
+ new_thread->attr.prio =
+ _thread_run->base_priority &
+ ~PTHREAD_SIGNAL_PRIORITY;
+ new_thread->attr.sched_policy =
+ _thread_run->attr.sched_policy;
} else {
/*
* Use just the thread priority, leaving the
* other scheduling attributes as their
* default values:
*/
- new_thread->base_priority
- = new_thread->attr.prio;
+ new_thread->base_priority =
+ new_thread->attr.prio;
}
new_thread->active_priority = new_thread->base_priority;
new_thread->inherited_priority = 0;
@@ -275,7 +253,6 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
new_thread->flags = 0;
new_thread->poll_data.nfds = 0;
new_thread->poll_data.fds = NULL;
- new_thread->jmpflags = 0;
new_thread->continuation = NULL;
/*
@@ -317,7 +294,6 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/* Schedule the new user thread: */
_thread_kern_sched(NULL);
-
/*
* Start a garbage collector thread
* if necessary.
@@ -325,6 +301,7 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
if (f_gc && pthread_create(&gc_thread,NULL,
_thread_gc,NULL) != 0)
PANIC("Can't create gc thread");
+
}
}
diff --git a/lib/libpthread/thread/thr_detach.c b/lib/libpthread/thread/thr_detach.c
index 89cc6671179c..3bade9d5b244 100644
--- a/lib/libpthread/thread/thr_detach.c
+++ b/lib/libpthread/thread/thr_detach.c
@@ -61,9 +61,10 @@ pthread_detach(pthread_t pthread)
/* Enter a loop to bring all threads off the join queue: */
while ((next_thread = TAILQ_FIRST(&pthread->join_queue)) != NULL) {
/* Remove the thread from the queue: */
- TAILQ_REMOVE(&pthread->join_queue, next_thread, qe);
+ TAILQ_REMOVE(&pthread->join_queue, next_thread, sqe);
+ pthread->flags &= ~PTHREAD_FLAGS_IN_JOINQ;
- /* Make the thread run: */
+ /* Make the thread runnable: */
PTHREAD_NEW_STATE(next_thread,PS_RUNNING);
}
diff --git a/lib/libpthread/thread/thr_exit.c b/lib/libpthread/thread/thr_exit.c
index 0d22638fd7fc..7fbeb656192f 100644
--- a/lib/libpthread/thread/thr_exit.c
+++ b/lib/libpthread/thread/thr_exit.c
@@ -41,6 +41,9 @@
#include <pthread.h>
#include "pthread_private.h"
+#define FLAGS_IN_SCHEDQ \
+ (PTHREAD_FLAGS_IN_PRIOQ|PTHREAD_FLAGS_IN_WAITQ|PTHREAD_FLAGS_IN_WORKQ)
+
void __exit(int status)
{
int flags;
@@ -138,7 +141,7 @@ _thread_exit_cleanup(void)
void
pthread_exit(void *status)
{
- pthread_t pthread;
+ int frame;
/* Check if this thread is already in the process of exiting: */
if ((_thread_run->flags & PTHREAD_EXITING) != 0) {
@@ -172,25 +175,24 @@ pthread_exit(void *status)
_thread_run->poll_data.fds = NULL;
}
- /*
- * Defer signals to protect the scheduling queues from access
- * by the signal handler:
- */
- _thread_kern_sig_defer();
-
- /* Check if there are any threads joined to this one: */
- while ((pthread = TAILQ_FIRST(&(_thread_run->join_queue))) != NULL) {
- /* Remove the thread from the queue: */
- TAILQ_REMOVE(&_thread_run->join_queue, pthread, qe);
-
- /* Wake the joined thread and let it detach this thread: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ if ((frame = _thread_run->sigframe_count) == 0)
+ _thread_exit_finish();
+ else {
+ /*
+ * Jump back and unwind the signal frames to gracefully
+ * cleanup.
+ */
+ ___longjmp(*_thread_run->sigframes[frame]->sig_jb, 1);
}
- /*
- * Undefer and handle pending signals, yielding if necessary:
- */
- _thread_kern_sig_undefer();
+ /* This point should not be reached. */
+ PANIC("Dead thread has resumed");
+}
+
+void
+_thread_exit_finish(void)
+{
+ pthread_t pthread;
/*
* Lock the garbage collector mutex to ensure that the garbage
@@ -203,20 +205,6 @@ pthread_exit(void *status)
TAILQ_INSERT_HEAD(&_dead_list, _thread_run, dle);
/*
- * Defer signals to protect the scheduling queues from access
- * by the signal handler:
- */
- _thread_kern_sig_defer();
-
- /* Remove this thread from the thread list: */
- TAILQ_REMOVE(&_thread_list, _thread_run, tle);
-
- /*
- * Undefer and handle pending signals, yielding if necessary:
- */
- _thread_kern_sig_undefer();
-
- /*
* Signal the garbage collector thread that there is something
* to clean up.
*/
@@ -224,19 +212,33 @@ pthread_exit(void *status)
PANIC("Cannot signal gc cond");
/*
- * Mark the thread as dead so it will not return if it
- * gets context switched out when the mutex is unlocked.
+ * Avoid a race condition where a scheduling signal can occur
+ * causing the garbage collector thread to run. If this happens,
+ * the current thread can be cleaned out from under us.
*/
- PTHREAD_SET_STATE(_thread_run, PS_DEAD);
+ _thread_kern_sig_defer();
/* Unlock the garbage collector mutex: */
if (pthread_mutex_unlock(&_gc_mutex) != 0)
PANIC("Cannot lock gc mutex");
- /* This this thread will never be re-scheduled. */
- _thread_kern_sched(NULL);
+ /* Check if there are any threads joined to this one: */
+ while ((pthread = TAILQ_FIRST(&(_thread_run->join_queue))) != NULL) {
+ /* Remove the thread from the queue: */
+ TAILQ_REMOVE(&_thread_run->join_queue, pthread, sqe);
+ pthread->flags &= ~PTHREAD_FLAGS_IN_JOINQ;
+
+ /*
+ * Wake the joined thread and let it
+ * detach this thread:
+ */
+ PTHREAD_NEW_STATE(pthread, PS_RUNNING);
+ }
- /* This point should not be reached. */
- PANIC("Dead thread has resumed");
+ /* Remove this thread from the thread list: */
+ TAILQ_REMOVE(&_thread_list, _thread_run, tle);
+
+ /* This thread will never be re-scheduled. */
+ _thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__);
}
#endif
diff --git a/lib/libpthread/thread/thr_fork.c b/lib/libpthread/thread/thr_fork.c
index a8e85d86e18f..97039b3ef804 100644
--- a/lib/libpthread/thread/thr_fork.c
+++ b/lib/libpthread/thread/thr_fork.c
@@ -183,9 +183,6 @@ _fork(void)
/* Don't queue signals yet: */
_queue_signals = 0;
- /* Initialize signal handling: */
- _thread_sig_init();
-
/* Initialize the scheduling switch hook routine: */
_sched_switch_hook = NULL;
diff --git a/lib/libpthread/thread/thr_gc.c b/lib/libpthread/thread/thr_gc.c
index cd1f8383a33d..a81ea9161912 100644
--- a/lib/libpthread/thread/thr_gc.c
+++ b/lib/libpthread/thread/thr_gc.c
@@ -57,8 +57,8 @@ _thread_gc(pthread_addr_t arg)
void *p_stack;
/* Block all signals */
- sigfillset (&mask);
- sigprocmask (SIG_BLOCK, &mask, NULL);
+ sigfillset(&mask);
+ pthread_sigmask(SIG_BLOCK, &mask, NULL);
/* Mark this thread as a library thread (not a user thread). */
_thread_run->flags |= PTHREAD_FLAGS_PRIVATE;
diff --git a/lib/libpthread/thread/thr_getschedparam.c b/lib/libpthread/thread/thr_getschedparam.c
index 09d8c1bc3283..5dbd1f0e83fb 100644
--- a/lib/libpthread/thread/thr_getschedparam.c
+++ b/lib/libpthread/thread/thr_getschedparam.c
@@ -49,7 +49,8 @@ pthread_getschedparam(pthread_t pthread, int *policy,
/* Find the thread in the list of active threads: */
else if ((ret = _find_thread(pthread)) == 0) {
/* Return the threads base priority and scheduling policy: */
- param->sched_priority = pthread->base_priority;
+ param->sched_priority =
+ PTHREAD_BASE_PRIORITY(pthread->base_priority);
*policy = pthread->attr.sched_policy;
}
diff --git a/lib/libpthread/thread/thr_info.c b/lib/libpthread/thread/thr_info.c
index d091ec12113c..ca91512edb5f 100644
--- a/lib/libpthread/thread/thr_info.c
+++ b/lib/libpthread/thread/thr_info.c
@@ -32,6 +32,7 @@
* $FreeBSD$
*/
#include <stdio.h>
+#include <stdlib.h>
#include <fcntl.h>
#include <string.h>
#include <unistd.h>
@@ -296,7 +297,6 @@ _thread_dump_info(void)
/* Close the dump file: */
_thread_sys_close(fd);
}
- return;
}
/* Set the thread name for debug: */
diff --git a/lib/libpthread/thread/thr_init.c b/lib/libpthread/thread/thr_init.c
index 8e13f90dc993..3cbd453d3e27 100644
--- a/lib/libpthread/thread/thr_init.c
+++ b/lib/libpthread/thread/thr_init.c
@@ -90,9 +90,9 @@ _thread_init(void)
int i;
size_t len;
int mib[2];
- struct timeval tv;
struct clockinfo clockinfo;
struct sigaction act;
+ struct itimerval itimer;
/* Check if this function has already been called: */
if (_thread_initial)
@@ -160,7 +160,7 @@ _thread_init(void)
PANIC("Cannot get kernel write pipe flags");
}
/* Allocate and initialize the ready queue: */
- else if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_MAX_PRIORITY) != 0) {
+ else if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_LAST_PRIORITY) != 0) {
/* Abort this application: */
PANIC("Cannot allocate priority ready queue.");
}
@@ -171,7 +171,11 @@ _thread_init(void)
* abort:
*/
PANIC("Cannot allocate memory for initial thread");
- } else {
+ }
+ /* Allocate memory for the scheduler stack: */
+ else if ((_thread_kern_sched_stack = malloc(PAGE_SIZE * 10)) == NULL)
+ PANIC("Failed to allocate stack for scheduler");
+ else {
/* Zero the global kernel thread structure: */
memset(&_thread_kern_thread, 0, sizeof(struct pthread));
_thread_kern_thread.flags = PTHREAD_FLAGS_PRIVATE;
@@ -211,6 +215,12 @@ _thread_init(void)
_thread_initial->attr.stackaddr_attr = _thread_initial->stack;
_thread_initial->attr.stacksize_attr = PTHREAD_STACK_INITIAL;
+ /* Setup the context for the scheduler: */
+ _setjmp(_thread_kern_sched_jb);
+ SET_STACK_JB(_thread_kern_sched_jb,
+ _thread_kern_sched_stack + PAGE_SIZE*10 - sizeof(double));
+ SET_RETURN_ADDR_JB(_thread_kern_sched_jb, _thread_kern_scheduler);
+
/*
* Write a magic value to the thread structure
* to help identify valid ones:
@@ -236,10 +246,19 @@ _thread_init(void)
TAILQ_INIT(&(_thread_initial->mutexq));
_thread_initial->priority_mutex_count = 0;
- /* Initialize last active time to now: */
- gettimeofday(&tv, NULL);
- _thread_initial->last_active.tv_sec = tv.tv_sec;
- _thread_initial->last_active.tv_usec = tv.tv_usec;
+ /* Initialize the global scheduling time: */
+ _sched_ticks = 0;
+ gettimeofday((struct timeval *) &_sched_tod, NULL);
+
+ /* Initialize last active: */
+ _thread_initial->last_active = (long) _sched_ticks;
+
+ /* Initialize the initial signal frame: */
+ _thread_initial->sigframes[0] = &_thread_initial->sigframe0;
+ _thread_initial->curframe = &_thread_initial->sigframe0;
+ _thread_initial->curframe->ctxtype = CTX_JB_NOSIG;
+ /* Set the base of the stack: */
+ _thread_initial->curframe->stackp = (unsigned long) USRSTACK;
/* Initialise the rest of the fields: */
_thread_initial->poll_data.nfds = 0;
@@ -257,10 +276,13 @@ _thread_init(void)
/* Initialise the global signal action structure: */
sigfillset(&act.sa_mask);
act.sa_handler = (void (*) ()) _thread_sig_handler;
- act.sa_flags = 0;
+ act.sa_flags = SA_SIGINFO;
+
+ /* Clear pending signals for the process: */
+ sigemptyset(&_process_sigpending);
- /* Initialize signal handling: */
- _thread_sig_init();
+ /* Clear the signal queue: */
+ memset(_thread_sigq, 0, sizeof(_thread_sigq));
/* Enter a loop to get the existing signal status: */
for (i = 1; i < NSIG; i++) {
@@ -295,13 +317,19 @@ _thread_init(void)
*/
PANIC("Cannot initialise signal handler");
}
+ _thread_sigact[_SCHED_SIGNAL - 1].sa_flags = SA_SIGINFO;
+ _thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO;
+ _thread_sigact[SIGCHLD - 1].sa_flags = SA_SIGINFO;
+
+ /* Get the process signal mask: */
+ _thread_sys_sigprocmask(SIG_SETMASK, NULL, &_process_sigmask);
/* Get the kernel clockrate: */
mib[0] = CTL_KERN;
mib[1] = KERN_CLOCKRATE;
len = sizeof (struct clockinfo);
if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0)
- _clock_res_nsec = clockinfo.tick * 1000;
+ _clock_res_usec = clockinfo.tick;
/* Get the table size: */
if ((_thread_dtablesize = getdtablesize()) < 0) {
@@ -346,6 +374,14 @@ _thread_init(void)
PANIC("Cannot initialize stdio file "
"descriptor table entry");
}
+
+ /* Install the scheduling timer: */
+ itimer.it_interval.tv_sec = 0;
+ itimer.it_interval.tv_usec = _clock_res_usec;
+ itimer.it_value = itimer.it_interval;
+ if (setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL) != 0)
+ PANIC("Cannot set interval timer");
+
}
}
@@ -362,10 +398,6 @@ _thread_init(void)
if (pthread_mutex_init(&_gc_mutex,NULL) != 0 ||
pthread_cond_init(&_gc_cond,NULL) != 0)
PANIC("Failed to initialise garbage collector mutex or condvar");
-
- gettimeofday(&kern_inc_prio_time, NULL);
-
- return;
}
/*
diff --git a/lib/libpthread/thread/thr_join.c b/lib/libpthread/thread/thr_join.c
index 1cffc96eaea3..cda31bda36bd 100644
--- a/lib/libpthread/thread/thr_join.c
+++ b/lib/libpthread/thread/thr_join.c
@@ -40,7 +40,6 @@ int
pthread_join(pthread_t pthread, void **thread_return)
{
int ret = 0;
- pthread_t pthread1 = NULL;
_thread_enter_cancellation_point();
@@ -62,11 +61,7 @@ pthread_join(pthread_t pthread, void **thread_return)
* Find the thread in the list of active threads or in the
* list of dead threads:
*/
- if (_find_thread(pthread) == 0 ||
- _find_dead_thread(pthread) == 0)
- pthread1 = pthread;
-
- if (pthread1 == NULL)
+ if ((_find_thread(pthread) != 0) && (_find_dead_thread(pthread) != 0))
/* Return an error: */
ret = ESRCH;
@@ -77,6 +72,8 @@ pthread_join(pthread_t pthread, void **thread_return)
/* Check if the thread is not dead: */
else if (pthread->state != PS_DEAD) {
+ PTHREAD_ASSERT_NOT_IN_SYNCQ(_thread_run);
+
/* Clear the interrupted flag: */
_thread_run->interrupted = 0;
@@ -87,13 +84,18 @@ pthread_join(pthread_t pthread, void **thread_return)
_thread_kern_sig_defer();
/* Add the running thread to the join queue: */
- TAILQ_INSERT_TAIL(&(pthread->join_queue), _thread_run, qe);
+ TAILQ_INSERT_TAIL(&(pthread->join_queue), _thread_run, sqe);
+ _thread_run->flags |= PTHREAD_FLAGS_IN_JOINQ;
+ _thread_run->data.thread = pthread;
/* Schedule the next thread: */
_thread_kern_sched_state(PS_JOIN, __FILE__, __LINE__);
- if (_thread_run->interrupted != 0)
- TAILQ_REMOVE(&(pthread->join_queue), _thread_run, qe);
+ if (_thread_run->interrupted != 0) {
+ TAILQ_REMOVE(&(pthread->join_queue), _thread_run, sqe);
+ _thread_run->flags &= ~PTHREAD_FLAGS_IN_JOINQ;
+ }
+ _thread_run->data.thread = NULL;
_thread_kern_sig_undefer();
@@ -122,4 +124,15 @@ pthread_join(pthread_t pthread, void **thread_return)
/* Return the completion status: */
return (ret);
}
+
+void
+_join_backout(pthread_t pthread)
+{
+ _thread_kern_sig_defer();
+ if (pthread->state == PS_JOIN) {
+ TAILQ_REMOVE(&pthread->data.thread->join_queue, pthread, sqe);
+ _thread_run->flags &= ~PTHREAD_FLAGS_IN_JOINQ;
+ }
+ _thread_kern_sig_undefer();
+}
#endif
diff --git a/lib/libpthread/thread/thr_kern.c b/lib/libpthread/thread/thr_kern.c
index 96a11da92d3e..23f16bc8f953 100644
--- a/lib/libpthread/thread/thr_kern.c
+++ b/lib/libpthread/thread/thr_kern.c
@@ -52,9 +52,16 @@
#include <pthread.h>
#include "pthread_private.h"
+/* #define DEBUG_THREAD_KERN */
+#ifdef DEBUG_THREAD_KERN
+#define DBG_MSG stdout_debug
+#else
+#define DBG_MSG(x...)
+#endif
+
/* Static function prototype definitions: */
static void
-_thread_kern_poll(int wait_reqd);
+thread_kern_poll(int wait_reqd);
static void
dequeue_signals(void);
@@ -62,18 +69,39 @@ dequeue_signals(void);
static inline void
thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in);
+/* Static variables: */
+static int last_tick = 0;
+
+/*
+ * This is called when a signal handler finishes and wants to
+ * return to a previous frame.
+ */
void
-_thread_kern_sched(ucontext_t * scp)
+_thread_kern_sched_frame(int frame)
{
-#ifndef __alpha__
- char *fdata;
-#endif
- pthread_t pthread, pthread_h = NULL;
- struct itimerval itimer;
- struct timespec ts, ts1;
- struct timeval tv, tv1;
- int set_timer = 0;
+ /*
+ * Flag the pthread kernel as executing scheduler code
+ * to avoid a signal from interrupting this execution and
+ * corrupting the (soon-to-be) current frame.
+ */
+ _thread_kern_in_sched = 1;
+
+ /* Return to the specified frame: */
+ _thread_run->curframe = _thread_run->sigframes[frame];
+ _thread_run->sigframe_count = frame;
+
+ if (_thread_run->sigframe_count == 0)
+ /* Restore the threads priority: */
+ _thread_run->active_priority &= ~PTHREAD_SIGNAL_PRIORITY;
+ /* Switch to the thread scheduler: */
+ ___longjmp(_thread_kern_sched_jb, 1);
+}
+
+
+void
+_thread_kern_sched(ucontext_t *scp)
+{
/*
* Flag the pthread kernel as executing scheduler code
* to avoid a scheduler signal from interrupting this
@@ -84,67 +112,94 @@ _thread_kern_sched(ucontext_t * scp)
/* Check if this function was called from the signal handler: */
if (scp != NULL) {
/*
- * Copy the signal context to the current thread's jump
- * buffer:
+ * The signal handler should have saved the state of
+ * the current thread. Restore the process signal
+ * mask.
*/
- memcpy(&_thread_run->saved_sigcontext, scp, sizeof(_thread_run->saved_sigcontext));
-
-#ifndef __alpha__
- /* Point to the floating point data in the running thread: */
- fdata = _thread_run->saved_fp;
-
- /* Save the floating point data: */
-__asm__("fnsave %0": :"m"(*fdata));
-#endif
-
- /* Flag the signal context as the last state saved: */
- _thread_run->sig_saved = 1;
- }
- /* Save the state of the current thread: */
- else if (setjmp(_thread_run->saved_jmp_buf) != 0) {
+ if (_thread_sys_sigprocmask(SIG_SETMASK,
+ &_process_sigmask, NULL) != 0)
+ PANIC("Unable to restore process mask after signal");
/*
- * This point is reached when a longjmp() is called to
- * restore the state of a thread.
- *
- * This is the normal way out of the scheduler.
+ * We're running on the signal stack; just call the
+ * kernel scheduler directly.
*/
- _thread_kern_in_sched = 0;
-
- if (((_thread_run->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) &&
- ((_thread_run->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)) {
- /*
- * Cancellations override signals.
- *
- * Stick a cancellation point at the start of
- * each async-cancellable thread's resumption.
+ DBG_MSG("Entering scheduler due to signal\n");
+ _thread_kern_scheduler();
+ } else {
+ /* Save the state of the current thread: */
+ if (_setjmp(_thread_run->curframe->ctx.jb) == 0) {
+ /* Flag the jump buffer was the last state saved: */
+ _thread_run->curframe->ctxtype = CTX_JB_NOSIG;
+ _thread_run->curframe->longjmp_val = 1;
+ } else {
+ DBG_MSG("Returned from ___longjmp, thread %p\n",
+ _thread_run);
+ /*
+ * This point is reached when a longjmp() is called
+ * to restore the state of a thread.
*
- * We allow threads woken at cancel points to do their
- * own checks.
+ * This is the normal way out of the scheduler.
*/
- pthread_testcancel();
+ _thread_kern_in_sched = 0;
+
+ if (_thread_run->sig_defer_count == 0) {
+ if (((_thread_run->cancelflags &
+ PTHREAD_AT_CANCEL_POINT) == 0) &&
+ ((_thread_run->cancelflags &
+ PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
+ /*
+ * Cancellations override signals.
+ *
+ * Stick a cancellation point at the
+ * start of each async-cancellable
+ * thread's resumption.
+ *
+ * We allow threads woken at cancel
+ * points to do their own checks.
+ */
+ pthread_testcancel();
+ }
+
+ if (_sched_switch_hook != NULL) {
+ /* Run the installed switch hook: */
+ thread_run_switch_hook(_last_user_thread,
+ _thread_run);
+ }
+ return;
}
+ /* Switch to the thread scheduler: */
+ ___longjmp(_thread_kern_sched_jb, 1);
+ }
+}
- /*
- * Check for undispatched signals due to calls to
- * pthread_kill().
- */
- if (SIGNOTEMPTY(_thread_run->sigpend))
- _dispatch_signals();
+void
+_thread_kern_sched_sig(void)
+{
+ _thread_run->check_pending = 1;
+ _thread_kern_sched(NULL);
+}
- if (_sched_switch_hook != NULL) {
- /* Run the installed switch hook: */
- thread_run_switch_hook(_last_user_thread, _thread_run);
- }
- return;
- } else
- /* Flag the jump buffer was the last state saved: */
- _thread_run->sig_saved = 0;
+void
+_thread_kern_scheduler(void)
+{
+ struct pthread_signal_frame *psf;
+ struct timespec ts;
+ struct timeval tv;
+ pthread_t pthread, pthread_h;
+ unsigned int current_tick;
+ int add_to_prioq;
/* If the currently running thread is a user thread, save it: */
if ((_thread_run->flags & PTHREAD_FLAGS_PRIVATE) == 0)
_last_user_thread = _thread_run;
+ /* Are there pending signals for this thread? */
+ if (_thread_run->check_pending != 0) {
+ _thread_run->check_pending = 0;
+ _thread_sig_check_pending(_thread_run);
+ }
+
/*
* Enter a scheduling loop that finds the next thread that is
* ready to run. This loop completes when there are no more threads
@@ -154,29 +209,37 @@ __asm__("fnsave %0": :"m"(*fdata));
*/
while (!(TAILQ_EMPTY(&_thread_list))) {
/* Get the current time of day: */
- gettimeofday(&tv, NULL);
+ GET_CURRENT_TOD(tv);
TIMEVAL_TO_TIMESPEC(&tv, &ts);
+ current_tick = _sched_ticks;
/*
* Protect the scheduling queues from access by the signal
* handler.
*/
_queue_signals = 1;
+ add_to_prioq = 0;
if (_thread_run != &_thread_kern_thread) {
-
/*
* This thread no longer needs to yield the CPU.
*/
_thread_run->yield_on_sig_undefer = 0;
- /*
- * Save the current time as the time that the thread
- * became inactive:
- */
- _thread_run->last_inactive.tv_sec = tv.tv_sec;
- _thread_run->last_inactive.tv_usec = tv.tv_usec;
-
+ if (_thread_run->state != PS_RUNNING) {
+ /*
+ * Save the current time as the time that the
+ * thread became inactive:
+ */
+ _thread_run->last_inactive = (long)current_tick;
+ if (_thread_run->last_inactive <
+ _thread_run->last_active) {
+ /* Account for a rollover: */
+ _thread_run->last_inactive =+
+ UINT_MAX + 1;
+ }
+ }
+
/*
* Place the currently running thread into the
* appropriate queue(s).
@@ -198,22 +261,7 @@ __asm__("fnsave %0": :"m"(*fdata));
* are polled (to preserve round-robin
* scheduling).
*/
- if ((_thread_run->slice_usec != -1) &&
- (_thread_run->attr.sched_policy != SCHED_FIFO)) {
- /*
- * Accumulate the number of microseconds that
- * this thread has run for:
- */
- _thread_run->slice_usec +=
- (_thread_run->last_inactive.tv_sec -
- _thread_run->last_active.tv_sec) * 1000000 +
- _thread_run->last_inactive.tv_usec -
- _thread_run->last_active.tv_usec;
-
- /* Check for time quantum exceeded: */
- if (_thread_run->slice_usec > TIMESLICE_USEC)
- _thread_run->slice_usec = -1;
- }
+ add_to_prioq = 1;
break;
/*
@@ -260,7 +308,7 @@ __asm__("fnsave %0": :"m"(*fdata));
/* Increment spinblock count: */
_spinblock_count++;
- /* fall through */
+ /* FALLTHROUGH */
case PS_FDR_WAIT:
case PS_FDW_WAIT:
case PS_POLL_WAIT:
@@ -277,17 +325,26 @@ __asm__("fnsave %0": :"m"(*fdata));
}
}
- /* Unprotect the scheduling queues: */
- _queue_signals = 0;
-
/*
- * Poll file descriptors to update the state of threads
- * waiting on file I/O where data may be available:
+ * Poll file descriptors only if a new scheduling signal
+ * has occurred or if we have no more runnable threads.
*/
- _thread_kern_poll(0);
+ if (((current_tick = _sched_ticks) != last_tick) ||
+ ((_thread_run->state != PS_RUNNING) &&
+ (PTHREAD_PRIOQ_FIRST() == NULL))) {
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
- /* Protect the scheduling queues: */
- _queue_signals = 1;
+ /*
+ * Poll file descriptors to update the state of threads
+ * waiting on file I/O where data may be available:
+ */
+ thread_kern_poll(0);
+
+ /* Protect the scheduling queues: */
+ _queue_signals = 1;
+ }
+ last_tick = current_tick;
/*
* Wake up threads that have timedout. This has to be
@@ -329,12 +386,37 @@ __asm__("fnsave %0": :"m"(*fdata));
PTHREAD_WAITQ_CLEARACTIVE();
/*
- * Check if there is a current runnable thread that isn't
- * already in the ready queue:
+ * Check to see if the current thread needs to be added
+ * to the priority queue:
*/
- if ((_thread_run != &_thread_kern_thread) &&
- (_thread_run->state == PS_RUNNING) &&
- ((_thread_run->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0)) {
+ if (add_to_prioq != 0) {
+ /*
+ * Save the current time as the time that the
+ * thread became inactive:
+ */
+ current_tick = _sched_ticks;
+ _thread_run->last_inactive = (long)current_tick;
+ if (_thread_run->last_inactive <
+ _thread_run->last_active) {
+ /* Account for a rollover: */
+ _thread_run->last_inactive =+ UINT_MAX + 1;
+ }
+
+ if ((_thread_run->slice_usec != -1) &&
+ (_thread_run->attr.sched_policy != SCHED_FIFO)) {
+ /*
+ * Accumulate the number of microseconds for
+ * which the current thread has run:
+ */
+ _thread_run->slice_usec +=
+ (_thread_run->last_inactive -
+ _thread_run->last_active) *
+ (long)_clock_res_usec;
+ /* Check for time quantum exceeded: */
+ if (_thread_run->slice_usec > TIMESLICE_USEC)
+ _thread_run->slice_usec = -1;
+ }
+
if (_thread_run->slice_usec == -1) {
/*
* The thread exceeded its time
@@ -366,6 +448,8 @@ __asm__("fnsave %0": :"m"(*fdata));
* thread structure:
*/
_thread_run = &_thread_kern_thread;
+ DBG_MSG("No runnable threads, using kernel thread %p\n",
+ _thread_run);
/* Unprotect the scheduling queues: */
_queue_signals = 0;
@@ -374,20 +458,27 @@ __asm__("fnsave %0": :"m"(*fdata));
* There are no threads ready to run, so wait until
* something happens that changes this condition:
*/
- _thread_kern_poll(1);
- } else {
- /* Remove the thread from the ready queue: */
- PTHREAD_PRIOQ_REMOVE(pthread_h);
+ thread_kern_poll(1);
- /* Get first thread on the waiting list: */
- pthread = TAILQ_FIRST(&_waitingq);
+ /*
+ * This process' usage will likely be very small
+ * while waiting in a poll. Since the scheduling
+ * clock is based on the profiling timer, it is
+ * unlikely that the profiling timer will fire
+ * and update the time of day. To account for this,
+ * get the time of day after polling with a timeout.
+ */
+ gettimeofday((struct timeval *) &_sched_tod, NULL);
+
+ /* Check once more for a runnable thread: */
+ _queue_signals = 1;
+ pthread_h = PTHREAD_PRIOQ_FIRST();
+ _queue_signals = 0;
+ }
- /* Check to see if there is more than one thread: */
- if (pthread_h != TAILQ_FIRST(&_thread_list) ||
- TAILQ_NEXT(pthread_h, tle) != NULL)
- set_timer = 1;
- else
- set_timer = 0;
+ if (pthread_h != NULL) {
+ /* Remove the thread from the ready queue: */
+ PTHREAD_PRIOQ_REMOVE(pthread_h);
/* Unprotect the scheduling queues: */
_queue_signals = 0;
@@ -411,32 +502,19 @@ __asm__("fnsave %0": :"m"(*fdata));
*/
if (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
(pthread->active_priority > pthread_h->active_priority)) {
+ /* Remove the thread from the ready queue: */
+ PTHREAD_PRIOQ_REMOVE(pthread);
+
/*
* Insert the lower priority thread
* at the head of its priority list:
*/
PTHREAD_PRIOQ_INSERT_HEAD(pthread_h);
- /* Remove the thread from the ready queue: */
- PTHREAD_PRIOQ_REMOVE(pthread);
-
/* There's a new thread in town: */
pthread_h = pthread;
}
- /* Get first thread on the waiting list: */
- pthread = TAILQ_FIRST(&_waitingq);
-
- /*
- * Check to see if there is more than one
- * thread:
- */
- if (pthread_h != TAILQ_FIRST(&_thread_list) ||
- TAILQ_NEXT(pthread_h, tle) != NULL)
- set_timer = 1;
- else
- set_timer = 0;
-
/* Unprotect the scheduling queues: */
_queue_signals = 0;
}
@@ -448,78 +526,8 @@ __asm__("fnsave %0": :"m"(*fdata));
* Save the current time as the time that the thread
* became active:
*/
- _thread_run->last_active.tv_sec = tv.tv_sec;
- _thread_run->last_active.tv_usec = tv.tv_usec;
-
- /*
- * Define the maximum time before a scheduling signal
- * is required:
- */
- itimer.it_value.tv_sec = 0;
- itimer.it_value.tv_usec = TIMESLICE_USEC;
-
- /*
- * The interval timer is not reloaded when it
- * times out. The interval time needs to be
- * calculated every time.
- */
- itimer.it_interval.tv_sec = 0;
- itimer.it_interval.tv_usec = 0;
-
- /* Get first thread on the waiting list: */
- if ((pthread != NULL) &&
- (pthread->wakeup_time.tv_sec != -1)) {
- /*
- * Calculate the time until this thread
- * is ready, allowing for the clock
- * resolution:
- */
- ts1.tv_sec = pthread->wakeup_time.tv_sec
- - ts.tv_sec;
- ts1.tv_nsec = pthread->wakeup_time.tv_nsec
- - ts.tv_nsec + _clock_res_nsec;
-
- /*
- * Check for underflow of the nanosecond field:
- */
- while (ts1.tv_nsec < 0) {
- /*
- * Allow for the underflow of the
- * nanosecond field:
- */
- ts1.tv_sec--;
- ts1.tv_nsec += 1000000000;
- }
- /*
- * Check for overflow of the nanosecond field:
- */
- while (ts1.tv_nsec >= 1000000000) {
- /*
- * Allow for the overflow of the
- * nanosecond field:
- */
- ts1.tv_sec++;
- ts1.tv_nsec -= 1000000000;
- }
- /*
- * Convert the timespec structure to a
- * timeval structure:
- */
- TIMESPEC_TO_TIMEVAL(&tv1, &ts1);
-
- /*
- * Check if the thread will be ready
- * sooner than the earliest ones found
- * so far:
- */
- if (timercmp(&tv1, &itimer.it_value, <)) {
- /*
- * Update the time value:
- */
- itimer.it_value.tv_sec = tv1.tv_sec;
- itimer.it_value.tv_usec = tv1.tv_usec;
- }
- }
+ current_tick = _sched_ticks;
+ _thread_run->last_active = (long) current_tick;
/*
* Check if this thread is running for the first time
@@ -531,88 +539,51 @@ __asm__("fnsave %0": :"m"(*fdata));
_thread_run->slice_usec = 0;
}
- /* Check if there is more than one thread: */
- if (set_timer != 0) {
- /*
- * Start the interval timer for the
- * calculated time interval:
- */
- if (setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL) != 0) {
- /*
- * Cannot initialise the timer, so
- * abort this process:
- */
- PANIC("Cannot set scheduling timer");
- }
- }
-
- /*
- * Check if this thread is being continued from a
- * longjmp() out of a signal handler:
- */
- if ((_thread_run->jmpflags & JMPFLAGS_LONGJMP) != 0) {
- _thread_run->jmpflags = 0;
- __longjmp(_thread_run->nested_jmp.jmp,
- _thread_run->longjmp_val);
- }
/*
- * Check if this thread is being continued from a
- * _longjmp() out of a signal handler:
+ * If we had a context switch, run any
+ * installed switch hooks.
*/
- else if ((_thread_run->jmpflags & JMPFLAGS__LONGJMP) !=
- 0) {
- _thread_run->jmpflags = 0;
- ___longjmp(_thread_run->nested_jmp.jmp,
- _thread_run->longjmp_val);
+ if ((_sched_switch_hook != NULL) &&
+ (_last_user_thread != _thread_run)) {
+ thread_run_switch_hook(_last_user_thread,
+ _thread_run);
}
/*
- * Check if this thread is being continued from a
- * siglongjmp() out of a signal handler:
+ * Continue the thread at its current frame:
*/
- else if ((_thread_run->jmpflags & JMPFLAGS_SIGLONGJMP)
- != 0) {
- _thread_run->jmpflags = 0;
- __siglongjmp(
- _thread_run->nested_jmp.sigjmp,
- _thread_run->longjmp_val);
- }
- /* Check if a signal context was saved: */
- else if (_thread_run->sig_saved == 1) {
-#ifndef __alpha__
- /*
- * Point to the floating point data in the
- * running thread:
- */
- fdata = _thread_run->saved_fp;
+ psf = _thread_run->curframe;
+ switch(psf->ctxtype) {
+ case CTX_JB_NOSIG:
+ ___longjmp(psf->ctx.jb, psf->longjmp_val);
+ break;
+ case CTX_JB:
+ __longjmp(psf->ctx.jb, psf->longjmp_val);
+ break;
+ case CTX_SJB:
+ __siglongjmp(psf->ctx.sigjb, psf->longjmp_val);
+ break;
+ case CTX_UC:
+ /* XXX - Restore FP regsisters? */
+ FP_RESTORE_UC(&psf->ctx.uc);
- /* Restore the floating point state: */
- __asm__("frstor %0": :"m"(*fdata));
-#endif
/*
* Do a sigreturn to restart the thread that
* was interrupted by a signal:
*/
_thread_kern_in_sched = 0;
+#if NOT_YET
+ _setcontext(&psf->ctx.uc);
+#else
/*
- * If we had a context switch, run any
- * installed switch hooks.
- */
- if ((_sched_switch_hook != NULL) &&
- (_last_user_thread != _thread_run)) {
- thread_run_switch_hook(_last_user_thread,
- _thread_run);
- }
- _thread_sys_sigreturn(&_thread_run->saved_sigcontext);
- } else {
- /*
- * Do a longjmp to restart the thread that
- * was context switched out (by a longjmp to
- * a different thread):
+ * Ensure the process signal mask is set
+ * correctly:
*/
- __longjmp(_thread_run->saved_jmp_buf, 1);
+ psf->ctx.uc.uc_sigmask = _process_sigmask;
+ _thread_sys_sigreturn(&psf->ctx.uc);
+#endif
+ break;
}
-
/* This point should not be reached. */
PANIC("Thread has returned from sigreturn or longjmp");
}
@@ -645,7 +616,6 @@ _thread_kern_sched_state(enum pthread_state state, char *fname, int lineno)
/* Schedule the next thread that is ready: */
_thread_kern_sched(NULL);
- return;
}
void
@@ -675,11 +645,10 @@ _thread_kern_sched_state_unlock(enum pthread_state state,
/* Schedule the next thread that is ready: */
_thread_kern_sched(NULL);
- return;
}
static void
-_thread_kern_poll(int wait_reqd)
+thread_kern_poll(int wait_reqd)
{
int count = 0;
int i, found;
@@ -696,7 +665,7 @@ _thread_kern_poll(int wait_reqd)
}
else {
/* Get the current time of day: */
- gettimeofday(&tv, NULL);
+ GET_CURRENT_TOD(tv);
TIMEVAL_TO_TIMESPEC(&tv, &ts);
_queue_signals = 1;
@@ -713,11 +682,11 @@ _thread_kern_poll(int wait_reqd)
else {
/*
* Calculate the time left for the next thread to
- * timeout allowing for the clock resolution:
+ * timeout:
*/
timeout_ms = ((pthread->wakeup_time.tv_sec - ts.tv_sec) *
- 1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec +
- _clock_res_nsec) / 1000000);
+ 1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec) /
+ 1000000);
/*
* Don't allow negative timeouts:
*/
@@ -1002,9 +971,6 @@ _thread_kern_poll(int wait_reqd)
/* Unprotect the scheduling queues: */
_queue_signals = 0;
}
-
- /* Nothing to return. */
- return;
}
void
@@ -1032,7 +998,7 @@ _thread_kern_set_timeout(const struct timespec * timeout)
_thread_run->wakeup_time.tv_nsec = 0;
} else {
/* Get the current time: */
- gettimeofday(&tv, NULL);
+ GET_CURRENT_TOD(tv);
TIMEVAL_TO_TIMESPEC(&tv, &current_time);
/* Calculate the time for the current thread to wake up: */
@@ -1046,7 +1012,6 @@ _thread_kern_set_timeout(const struct timespec * timeout)
_thread_run->wakeup_time.tv_nsec -= 1000000000;
}
}
- return;
}
void
@@ -1059,9 +1024,6 @@ _thread_kern_sig_defer(void)
void
_thread_kern_sig_undefer(void)
{
- pthread_t pthread;
- int need_resched = 0;
-
/*
* Perform checks to yield only if we are about to undefer
* signals.
@@ -1077,33 +1039,25 @@ _thread_kern_sig_undefer(void)
/*
* Check if there are queued signals:
*/
- while (_sigq_check_reqd != 0) {
- /* Defer scheduling while we process queued signals: */
- _thread_run->sig_defer_count = 1;
-
- /* Clear the flag before checking the signal queue: */
- _sigq_check_reqd = 0;
-
- /* Dequeue and handle signals: */
- dequeue_signals();
-
- /*
- * Avoiding an unnecessary check to reschedule, check
- * to see if signal handling caused a higher priority
- * thread to become ready.
- */
- if ((need_resched == 0) &&
- (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
- (pthread->active_priority > _thread_run->active_priority))) {
- need_resched = 1;
- }
+ if (_sigq_check_reqd != 0)
+ _thread_kern_sched(NULL);
- /* Reenable signals: */
- _thread_run->sig_defer_count = 0;
- }
+ /*
+ * Check for asynchronous cancellation before delivering any
+ * pending signals:
+ */
+ if (((_thread_run->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) &&
+ ((_thread_run->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
+ pthread_testcancel();
- /* Yield the CPU if necessary: */
- if (need_resched || _thread_run->yield_on_sig_undefer != 0) {
+ /*
+ * If there are pending signals or this thread has
+ * to yield the CPU, call the kernel scheduler:
+ *
+ * XXX - Come back and revisit the pending signal problem
+ */
+ if ((_thread_run->yield_on_sig_undefer != 0) ||
+ SIGNOTEMPTY(_thread_run->sigpend)) {
_thread_run->yield_on_sig_undefer = 0;
_thread_kern_sched(NULL);
}
@@ -1114,35 +1068,13 @@ static void
dequeue_signals(void)
{
char bufr[128];
- int i, num;
- pthread_t pthread;
+ int num;
/*
- * Enter a loop to read and handle queued signals from the
- * pthread kernel pipe:
+ * Enter a loop to clear the pthread kernel pipe:
*/
while (((num = _thread_sys_read(_thread_kern_pipe[0], bufr,
sizeof(bufr))) > 0) || (num == -1 && errno == EINTR)) {
- /*
- * The buffer read contains one byte per signal and
- * each byte is the signal number.
- */
- for (i = 0; i < num; i++) {
- if ((int) bufr[i] == _SCHED_SIGNAL) {
- /*
- * Scheduling signals shouldn't ever be
- * queued; just ignore it for now.
- */
- }
- else {
- /* Handle this signal: */
- pthread = _thread_sig_handle((int) bufr[i],
- NULL);
- if (pthread != NULL)
- _thread_sig_deliver(pthread,
- (int) bufr[i]);
- }
- }
}
if ((num < 0) && (errno != EAGAIN)) {
/*
@@ -1151,6 +1083,8 @@ dequeue_signals(void)
*/
PANIC("Unable to read from thread kernel pipe");
}
+ /* Handle any pending signals: */
+ _thread_sig_handle_pending();
}
static inline void
diff --git a/lib/libpthread/thread/thr_mutex.c b/lib/libpthread/thread/thr_mutex.c
index b8877f8453c9..f7662c71c951 100644
--- a/lib/libpthread/thread/thr_mutex.c
+++ b/lib/libpthread/thread/thr_mutex.c
@@ -79,7 +79,7 @@ static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
int
_mutex_reinit(pthread_mutex_t * mutex)
{
- int ret = 0;
+ int ret = 0;
if (mutex == NULL)
ret = EINVAL;
@@ -113,7 +113,7 @@ pthread_mutex_init(pthread_mutex_t * mutex,
int protocol;
int ceiling;
pthread_mutex_t pmutex;
- int ret = 0;
+ int ret = 0;
if (mutex == NULL)
ret = EINVAL;
@@ -203,7 +203,7 @@ pthread_mutex_init(pthread_mutex_t * mutex,
int
pthread_mutex_destroy(pthread_mutex_t * mutex)
{
- int ret = 0;
+ int ret = 0;
if (mutex == NULL || *mutex == NULL)
ret = EINVAL;
@@ -245,7 +245,7 @@ pthread_mutex_destroy(pthread_mutex_t * mutex)
static int
init_static(pthread_mutex_t *mutex)
{
- int ret;
+ int ret;
_SPINLOCK(&static_init_lock);
@@ -262,7 +262,7 @@ init_static(pthread_mutex_t *mutex)
int
pthread_mutex_trylock(pthread_mutex_t * mutex)
{
- int ret = 0;
+ int ret = 0;
if (mutex == NULL)
ret = EINVAL;
@@ -400,7 +400,7 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
int
pthread_mutex_lock(pthread_mutex_t * mutex)
{
- int ret = 0;
+ int ret = 0;
if (mutex == NULL)
ret = EINVAL;
@@ -610,9 +610,8 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
* Check to see if this thread was interrupted and
* is still in the mutex queue of waiting threads:
*/
- if (_thread_run->interrupted != 0) {
+ if (_thread_run->interrupted != 0)
mutex_queue_remove(*mutex, _thread_run);
- }
/* Unlock the mutex structure: */
_SPINUNLOCK(&(*mutex)->lock);
@@ -647,7 +646,7 @@ _mutex_cv_unlock(pthread_mutex_t * mutex)
int
_mutex_cv_lock(pthread_mutex_t * mutex)
{
- int ret;
+ int ret;
if ((ret = pthread_mutex_lock(mutex)) == 0)
(*mutex)->m_refcount--;
return (ret);
@@ -656,7 +655,7 @@ _mutex_cv_lock(pthread_mutex_t * mutex)
static inline int
mutex_self_trylock(pthread_mutex_t mutex)
{
- int ret = 0;
+ int ret = 0;
switch (mutex->m_type) {
@@ -723,7 +722,7 @@ mutex_self_lock(pthread_mutex_t mutex)
static inline int
mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
{
- int ret = 0;
+ int ret = 0;
if (mutex == NULL || *mutex == NULL) {
ret = EINVAL;
@@ -1369,6 +1368,38 @@ _mutex_unlock_private(pthread_t pthread)
}
}
+void
+_mutex_lock_backout(pthread_t pthread)
+{
+ struct pthread_mutex *mutex;
+
+ /*
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+ if (pthread->state == PS_MUTEX_WAIT) {
+ mutex = pthread->data.mutex;
+
+ /* Lock the mutex structure: */
+ _SPINLOCK(&mutex->lock);
+
+ mutex_queue_remove(mutex, pthread);
+
+ /* This thread is no longer waiting for the mutex: */
+ mutex->m_owner->data.mutex = NULL;
+
+ /* Unlock the mutex structure: */
+ _SPINUNLOCK(&mutex->lock);
+
+ }
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary:
+ */
+ _thread_kern_sig_undefer();
+}
+
/*
* Dequeue a waiting thread from the head of a mutex queue in descending
* priority order.
@@ -1379,7 +1410,7 @@ mutex_queue_deq(pthread_mutex_t mutex)
pthread_t pthread;
while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
- TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
+ TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
/*
@@ -1400,7 +1431,7 @@ static inline void
mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
{
if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
- TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
+ TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
}
}
@@ -1413,18 +1444,19 @@ mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
{
pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
+ PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
/*
* For the common case of all threads having equal priority,
* we perform a quick check against the priority of the thread
* at the tail of the queue.
*/
if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
- TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, qe);
+ TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
else {
tid = TAILQ_FIRST(&mutex->m_queue);
while (pthread->active_priority <= tid->active_priority)
- tid = TAILQ_NEXT(tid, qe);
- TAILQ_INSERT_BEFORE(tid, pthread, qe);
+ tid = TAILQ_NEXT(tid, sqe);
+ TAILQ_INSERT_BEFORE(tid, pthread, sqe);
}
pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
}
diff --git a/lib/libpthread/thread/thr_priority_queue.c b/lib/libpthread/thread/thr_priority_queue.c
index 1b9fcba09573..84c30657885a 100644
--- a/lib/libpthread/thread/thr_priority_queue.c
+++ b/lib/libpthread/thread/thr_priority_queue.c
@@ -66,9 +66,13 @@ static int _pq_active = 0;
PANIC(msg); \
} while (0)
#define _PQ_ASSERT_NOT_QUEUED(thrd, msg) do { \
- if ((thrd)->flags & _PQ_IN_SCHEDQ) \
+ if (((thrd)->flags & _PQ_IN_SCHEDQ) != 0) \
PANIC(msg); \
} while (0)
+#define _PQ_ASSERT_PROTECTED(msg) \
+ PTHREAD_ASSERT((_thread_kern_in_sched != 0) || \
+ (_thread_run->sig_defer_count > 0) || \
+ (_sig_in_handler != 0), msg);
#else
@@ -79,11 +83,10 @@ static int _pq_active = 0;
#define _PQ_ASSERT_IN_WAITQ(thrd, msg)
#define _PQ_ASSERT_IN_PRIOQ(thrd, msg)
#define _PQ_ASSERT_NOT_QUEUED(thrd, msg)
-#define _PQ_CHECK_PRIO()
+#define _PQ_ASSERT_PROTECTED(msg)
#endif
-
int
_pq_alloc(pq_queue_t *pq, int minprio, int maxprio)
{
@@ -101,9 +104,7 @@ _pq_alloc(pq_queue_t *pq, int minprio, int maxprio)
else {
/* Remember the queue size: */
pq->pq_size = prioslots;
-
ret = _pq_init(pq);
-
}
return (ret);
}
@@ -142,6 +143,7 @@ _pq_remove(pq_queue_t *pq, pthread_t pthread)
_PQ_ASSERT_INACTIVE("_pq_remove: pq_active");
_PQ_SET_ACTIVE();
_PQ_ASSERT_IN_PRIOQ(pthread, "_pq_remove: Not in priority queue");
+ _PQ_ASSERT_PROTECTED("_pq_remove: prioq not protected!");
/*
* Remove this thread from priority list. Note that if
@@ -172,6 +174,7 @@ _pq_insert_head(pq_queue_t *pq, pthread_t pthread)
_PQ_SET_ACTIVE();
_PQ_ASSERT_NOT_QUEUED(pthread,
"_pq_insert_head: Already in priority queue");
+ _PQ_ASSERT_PROTECTED("_pq_insert_head: prioq not protected!");
TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe);
if (pq->pq_lists[prio].pl_queued == 0)
@@ -197,6 +200,7 @@ _pq_insert_tail(pq_queue_t *pq, pthread_t pthread)
_PQ_SET_ACTIVE();
_PQ_ASSERT_NOT_QUEUED(pthread,
"_pq_insert_tail: Already in priority queue");
+ _PQ_ASSERT_PROTECTED("_pq_insert_tail: prioq not protected!");
TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe);
if (pq->pq_lists[prio].pl_queued == 0)
@@ -221,6 +225,7 @@ _pq_first(pq_queue_t *pq)
*/
_PQ_ASSERT_INACTIVE("_pq_first: pq_active");
_PQ_SET_ACTIVE();
+ _PQ_ASSERT_PROTECTED("_pq_first: prioq not protected!");
while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) &&
(pthread == NULL)) {
@@ -250,6 +255,7 @@ pq_insert_prio_list(pq_queue_t *pq, int prio)
* Make some assertions when debugging is enabled:
*/
_PQ_ASSERT_ACTIVE("pq_insert_prio_list: pq_active");
+ _PQ_ASSERT_PROTECTED("_pq_insert_prio_list: prioq not protected!");
/*
* The priority queue is in descending priority order. Start at
@@ -270,11 +276,10 @@ pq_insert_prio_list(pq_queue_t *pq, int prio)
pq->pq_lists[prio].pl_queued = 1;
}
-#if defined(_PTHREADS_INVARIANTS)
void
_waitq_insert(pthread_t pthread)
{
- pthread_t tid;
+ pthread_t tid;
/*
* Make some assertions when debugging is enabled:
@@ -332,4 +337,3 @@ _waitq_clearactive(void)
_PQ_CLEAR_ACTIVE();
}
#endif
-#endif
diff --git a/lib/libpthread/thread/thr_private.h b/lib/libpthread/thread/thr_private.h
index 9d76747ff763..50e33bc181cb 100644
--- a/lib/libpthread/thread/thr_private.h
+++ b/lib/libpthread/thread/thr_private.h
@@ -51,6 +51,7 @@
*/
#include <setjmp.h>
#include <signal.h>
+#include <stdio.h>
#include <sys/queue.h>
#include <sys/types.h>
#include <sys/time.h>
@@ -60,13 +61,67 @@
#include <pthread_np.h>
/*
+ * Define machine dependent macros to get and set the stack pointer
+ * from the supported contexts. Also define a macro to set the return
+ * address in a jmp_buf context.
+ *
+ * XXX - These need to be moved into architecture dependent support files.
+ */
+#if defined(__i386__)
+#define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2]))
+#define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2]))
+#define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_esp))
+#define SET_STACK_JB(jb, stk) (jb)[0]._jb[2] = (int)(stk)
+#define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[2] = (int)(stk)
+#define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_esp = (int)(stk)
+#define FP_SAVE_UC(ucp) do { \
+ char *fdata; \
+ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \
+ __asm__("fnsave %0": :"m"(*fdata)); \
+} while (0)
+#define FP_RESTORE_UC(ucp) do { \
+ char *fdata; \
+ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \
+ __asm__("frstor %0": :"m"(*fdata)); \
+} while (0)
+#define SET_RETURN_ADDR_JB(jb, ra) (jb)[0]._jb[0] = (int)(ra)
+#elif defined(__alpha__)
+#include <machine/reg.h>
+#define GET_STACK_JB(jb, stk) ((unsigned long)((jb)[0]._jb[R_SP + 4]))
+#define GET_STACK_SJB(sjb, stk) ((unsigned long)((sjb)[0]._sjb[R_SP + 4]))
+#define GET_STACK_UC(ucp, stk) ((ucp)->uc_mcontext.mc_regs[R_SP])
+#define SET_STACK_JB(jb, stk) (jb)[0]._jb[R_SP + 4] = (long)(stk)
+#define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[R_SP + 4] = (long)(stk)
+#define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_regs[R_SP] = (unsigned long)(stk)
+#define FP_SAVE_UC(ucp)
+#define FP_RESTORE_UC(ucp)
+#define SET_RETURN_ADDR_JB(jb, ra) do { \
+ (jb)[0]._jb[2] = (long)(ra); \
+ (jb)[0]._jb[R_RA + 4] = 0; \
+ (jb)[0]._jb[R_T12 + 4] = (long)(ra); \
+} while (0)
+#else
+#error "Don't recognize this architecture!"
+#endif
+
+/*
* Kernel fatal error handler macro.
*/
#define PANIC(string) _thread_exit(__FILE__,__LINE__,string)
+
/* Output debug messages like this: */
-#define stdout_debug(_x) _thread_sys_write(1,_x,strlen(_x));
-#define stderr_debug(_x) _thread_sys_write(2,_x,strlen(_x));
+#define stdout_debug(args...) do { \
+ char buf[128]; \
+ snprintf(buf, sizeof(buf), ##args); \
+ _thread_sys_write(1, buf, strlen(buf)); \
+} while (0)
+#define stderr_debug(args...) do { \
+ char buf[128]; \
+ snprintf(buf, sizeof(buf), ##args); \
+ _thread_sys_write(2, buf, strlen(buf)); \
+} while (0)
+
/*
@@ -80,34 +135,13 @@
/*
* Waiting queue manipulation macros (using pqe link):
*/
-#if defined(_PTHREADS_INVARIANTS)
#define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd)
#define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd)
+
+#if defined(_PTHREADS_INVARIANTS)
#define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive()
#define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive()
#else
-#define PTHREAD_WAITQ_REMOVE(thrd) do { \
- TAILQ_REMOVE(&_waitingq,thrd,pqe); \
- (thrd)->flags &= ~PTHREAD_FLAGS_IN_WAITQ; \
-} while (0)
-
-#define PTHREAD_WAITQ_INSERT(thrd) do { \
- if ((thrd)->wakeup_time.tv_sec == -1) \
- TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe); \
- else { \
- pthread_t tid = TAILQ_FIRST(&_waitingq); \
- while ((tid != NULL) && (tid->wakeup_time.tv_sec != -1) && \
- ((tid->wakeup_time.tv_sec < (thrd)->wakeup_time.tv_sec) || \
- ((tid->wakeup_time.tv_sec == (thrd)->wakeup_time.tv_sec) && \
- (tid->wakeup_time.tv_nsec <= (thrd)->wakeup_time.tv_nsec)))) \
- tid = TAILQ_NEXT(tid, pqe); \
- if (tid == NULL) \
- TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe); \
- else \
- TAILQ_INSERT_BEFORE(tid,thrd,pqe); \
- } \
- (thrd)->flags |= PTHREAD_FLAGS_IN_WAITQ; \
-} while (0)
#define PTHREAD_WAITQ_CLEARACTIVE()
#define PTHREAD_WAITQ_SETACTIVE()
#endif
@@ -139,6 +173,14 @@
* called with preemption deferred (see thread_kern_sched_[un]defer).
*/
#if defined(_PTHREADS_INVARIANTS)
+#include <assert.h>
+#define PTHREAD_ASSERT(cond, msg) do { \
+ if (!(cond)) \
+ PANIC(msg); \
+} while (0)
+#define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \
+ PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \
+ "Illegal call from signal handler");
#define PTHREAD_NEW_STATE(thrd, newstate) do { \
if (_thread_kern_new_state != 0) \
PANIC("Recursive PTHREAD_NEW_STATE"); \
@@ -156,6 +198,8 @@
PTHREAD_SET_STATE(thrd, newstate); \
} while (0)
#else
+#define PTHREAD_ASSERT(cond, msg)
+#define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd)
#define PTHREAD_NEW_STATE(thrd, newstate) do { \
if ((thrd)->state != newstate) { \
if ((thrd)->state == PS_RUNNING) { \
@@ -379,21 +423,52 @@ enum pthread_susp {
* almost entirely on this stack.
*/
#define PTHREAD_STACK_INITIAL 0x100000
-/* Address immediately beyond the beginning of the initial thread stack. */
-#define PTHREAD_DEFAULT_PRIORITY 64
-#define PTHREAD_MAX_PRIORITY 126
+
+/*
+ * Define the different priority ranges. All applications have thread
+ * priorities constrained within 0-31. The threads library raises the
+ * priority when delivering signals in order to ensure that signal
+ * delivery happens (from the POSIX spec) "as soon as possible".
+ * In the future, the threads library will also be able to map specific
+ * threads into real-time (cooperating) processes or kernel threads.
+ * The RT and SIGNAL priorities will be used internally and added to
+ * thread base priorities so that the scheduling queue can handle both
+ * normal and RT priority threads with and without signal handling.
+ *
+ * The approach taken is that, within each class, signal delivery
+ * always has priority over thread execution.
+ */
+#define PTHREAD_DEFAULT_PRIORITY 15
#define PTHREAD_MIN_PRIORITY 0
-#define _POSIX_THREAD_ATTR_STACKSIZE
+#define PTHREAD_MAX_PRIORITY 31 /* 0x1F */
+#define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */
+#define PTHREAD_RT_PRIORITY 64 /* 0x40 */
+#define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY
+#define PTHREAD_LAST_PRIORITY \
+ (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY)
+#define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY)
/*
- * Clock resolution in nanoseconds.
+ * Clock resolution in microseconds.
*/
-#define CLOCK_RES_NSEC 10000000
+#define CLOCK_RES_USEC 10000
/*
* Time slice period in microseconds.
*/
-#define TIMESLICE_USEC 100000
+#define TIMESLICE_USEC 20000
+
+/*
+ * Define a thread-safe macro to get the current time of day
+ * which is updated at regular intervals by the scheduling signal
+ * handler.
+ */
+#define GET_CURRENT_TOD(tv) \
+ do { \
+ tv.tv_sec = _sched_tod.tv_sec; \
+ tv.tv_usec = _sched_tod.tv_usec; \
+ } while (tv.tv_sec != _sched_tod.tv_sec)
+
struct pthread_key {
spinlock_t lock;
@@ -487,8 +562,10 @@ union pthread_wait_data {
short branch; /* Line number, for debugging. */
char *fname; /* Source file name for debugging.*/
} fd;
- struct pthread_poll_data * poll_data;
+ FILE *fp;
+ struct pthread_poll_data *poll_data;
spinlock_t *spinlock;
+ struct pthread *thread;
};
/*
@@ -497,6 +574,83 @@ union pthread_wait_data {
*/
typedef void (*thread_continuation_t) (void *);
+struct pthread_state_data {
+ int psd_interrupted;
+ sigset_t psd_sigmask;
+ enum pthread_state psd_state;
+ int psd_flags;
+ struct timespec psd_wakeup_time;
+ union pthread_wait_data psd_wait_data;
+ /* XXX - What about thread->timeout and/or thread->error? */
+};
+
+
+/*
+ * Normally thread contexts are stored as jmp_bufs via _setjmp()/_longjmp(),
+ * but they may also be sigjmp_buf and ucontext_t. When a thread is
+ * interrupted by a signal, it's context is saved as a ucontext_t. An
+ * application is also free to use [_]longjmp()/[_]siglongjmp() to jump
+ * between contexts within the same thread. Future support will also
+ * include setcontext()/getcontext().
+ *
+ * Define an enumerated type that can identify the 4 different context
+ * types.
+ */
+typedef enum {
+ CTX_JB_NOSIG, /* context is jmp_buf without saved sigset */
+ CTX_JB, /* context is jmp_buf (with saved sigset) */
+ CTX_SJB, /* context is sigjmp_buf (with saved sigset) */
+ CTX_UC /* context is ucontext_t (with saved sigset) */
+} thread_context_t;
+
+/*
+ * There are 2 basic contexts that a frame may contain at any
+ * one time:
+ *
+ * o ctx - The context that the thread should return to after normal
+ * completion of the signal handler.
+ * o sig_jb - The context just before the signal handler is invoked.
+ * Attempts at abnormal returns from user supplied signal handlers
+ * will return back to the signal context to perform any necessary
+ * cleanup.
+ */
+struct pthread_signal_frame {
+ /*
+ * This stores the threads state before the signal.
+ */
+ struct pthread_state_data saved_state;
+
+ /* Beginning (bottom) of threads stack frame for this signal. */
+ unsigned long stackp;
+
+ /*
+ * Threads return context; ctxtype identifies the type of context.
+ * For signal frame 0, these point to the context storage area
+ * within the pthread structure. When handling signals (frame > 0),
+ * these point to a context storage area that is allocated off the
+ * threads stack.
+ */
+ union {
+ jmp_buf jb;
+ sigjmp_buf sigjb;
+ ucontext_t uc;
+ } ctx;
+ thread_context_t ctxtype;
+ int longjmp_val;
+
+ /* Threads "jump out of signal handler" destination frame. */
+ int dst_frame;
+
+ /*
+ * Used to return back to the signal handling frame in case
+ * the application tries to change contexts from the handler.
+ */
+ jmp_buf *sig_jb;
+
+ int signo; /* signal, arg 1 to sighandler */
+ int sig_has_args; /* use signal args if true */
+};
+
/*
* Thread structure.
*/
@@ -530,54 +684,19 @@ struct pthread {
void *stack;
struct pthread_attr attr;
-#if (defined(__FreeBSD__) || defined(__NetBSD__)) && defined(__i386__)
- /*
- * Saved floating point registers on systems where they are not
- * saved in the signal context.
- */
- char saved_fp[108];
-#endif
-
- /*
- * Saved signal context used in call to sigreturn by
- * _thread_kern_sched if sig_saved is TRUE.
- */
- ucontext_t saved_sigcontext;
-
- /*
- * Saved jump buffer used in call to longjmp by _thread_kern_sched
- * if sig_saved is FALSE.
- */
- jmp_buf saved_jmp_buf;
- jmp_buf *sighandler_jmp_buf;
-
- /*
- * Saved jump buffers for use when doing nested [sig|_]longjmp()s, as
- * when doing signal delivery.
- */
- union {
- jmp_buf jmp;
- sigjmp_buf sigjmp;
- } nested_jmp;
- int longjmp_val;
-
-#define JMPFLAGS_NONE 0x00
-#define JMPFLAGS_LONGJMP 0x01
-#define JMPFLAGS__LONGJMP 0x02
-#define JMPFLAGS_SIGLONGJMP 0x04
-#define JMPFLAGS_DEFERRED 0x08
- int jmpflags;
-
- /*
- * TRUE if the last state saved was a signal context. FALSE if the
- * last state saved was a jump buffer.
- */
- int sig_saved;
-
/*
* Used for tracking delivery of nested signal handlers.
+ * Signal frame 0 is used for normal context (when no
+ * signal handlers are active for the thread). Frame
+ * 1 is used as the context for the first signal, and
+ * frames 2 .. NSIG-1 are used when additional signals
+ * arrive interrupting already active signal handlers.
*/
- int signal_nest_level;
+ struct pthread_signal_frame *sigframes[NSIG];
+ struct pthread_signal_frame sigframe0;
+ struct pthread_signal_frame *curframe;
+ int sigframe_count;
+ int sigframe_done;
/*
* Cancelability flags - the lower 2 bits are used by cancel
@@ -588,7 +707,7 @@ struct pthread {
#define PTHREAD_CANCEL_NEEDED 0x0010
int cancelflags;
- enum pthread_susp suspended;
+ enum pthread_susp suspended;
thread_continuation_t continuation;
@@ -597,16 +716,16 @@ struct pthread {
*/
sigset_t sigmask;
sigset_t sigpend;
+ int check_pending;
/* Thread state: */
enum pthread_state state;
- enum pthread_state oldstate;
- /* Time that this thread was last made active. */
- struct timeval last_active;
+ /* Scheduling clock when this thread was last made active. */
+ long last_active;
- /* Time that this thread was last made inactive. */
- struct timeval last_inactive;
+ /* Scheduling clock when this thread was last made inactive. */
+ long last_inactive;
/*
* Number of microseconds accumulated by this thread when
@@ -615,12 +734,6 @@ struct pthread {
long slice_usec;
/*
- * Incremental priority accumulated by thread while it is ready to
- * run but is denied being run.
- */
- int inc_prio;
-
- /*
* Time to wake up thread. This is used for sleeping threads and
* for any operation which may time out (such as select).
*/
@@ -640,8 +753,7 @@ struct pthread {
/*
* The current thread can belong to only one scheduling queue at
- * a time (ready or waiting queue). It can also belong to (only)
- * one of:
+ * a time (ready or waiting queue). It can also belong to:
*
* o A queue of threads waiting for a mutex
* o A queue of threads waiting for a condition variable
@@ -651,15 +763,21 @@ struct pthread {
* o A queue of threads needing work done by the kernel thread
* (waiting for a spinlock or file I/O)
*
+ * It is possible for a thread to belong to more than one of the
+ * above queues if it is handling a signal. A thread may only
+ * enter a mutex, condition variable, or join queue when it is
+ * not being called from a signal handler. If a thread is a
+ * member of one of these queues when a signal handler is invoked,
+ * it must remain in the queue. For this reason, the links for
+ * these queues must not be (re)used for other queues.
+ *
* Use pqe for the scheduling queue link (both ready and waiting),
- * and qe for other links.
+ * sqe for synchronization (mutex, condition variable, and join)
+ * queue links, and qe for all other links.
*/
-
- /* Priority queue entry for this thread: */
- TAILQ_ENTRY(pthread) pqe;
-
- /* Queue entry for this thread: */
- TAILQ_ENTRY(pthread) qe;
+ TAILQ_ENTRY(pthread) pqe; /* priority queue link */
+ TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */
+ TAILQ_ENTRY(pthread) qe; /* all other queues link */
/* Wait data. */
union pthread_wait_data data;
@@ -694,14 +812,17 @@ struct pthread {
int flags;
#define PTHREAD_FLAGS_PRIVATE 0x0001
#define PTHREAD_EXITING 0x0002
-#define PTHREAD_FLAGS_IN_CONDQ 0x0004 /* in condition queue using qe link*/
-#define PTHREAD_FLAGS_IN_WORKQ 0x0008 /* in work queue using qe link */
-#define PTHREAD_FLAGS_IN_WAITQ 0x0010 /* in waiting queue using pqe link */
-#define PTHREAD_FLAGS_IN_PRIOQ 0x0020 /* in priority queue using pqe link */
-#define PTHREAD_FLAGS_IN_MUTEXQ 0x0040 /* in mutex queue using qe link */
-#define PTHREAD_FLAGS_IN_FILEQ 0x0080 /* in file lock queue using qe link */
-#define PTHREAD_FLAGS_IN_FDQ 0x0100 /* in fd lock queue using qe link */
-#define PTHREAD_FLAGS_TRACE 0x0200 /* for debugging purposes */
+#define PTHREAD_FLAGS_IN_WAITQ 0x0004 /* in waiting queue using pqe link */
+#define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */
+#define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */
+#define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */
+#define PTHREAD_FLAGS_IN_FDQ 0x0040 /* in fd lock queue using qe link */
+#define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/
+#define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */
+#define PTHREAD_FLAGS_IN_JOINQ 0x0200 /* in join queue using sqe link */
+#define PTHREAD_FLAGS_TRACE 0x0400 /* for debugging purposes */
+#define PTHREAD_FLAGS_IN_SYNCQ \
+ (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ | PTHREAD_FLAGS_IN_JOINQ)
/*
* Base priority is the user setable and retrievable priority
@@ -820,14 +941,31 @@ SCLASS int _thread_kern_in_sched
;
#endif
-/* Last time that an incremental priority update was performed: */
-SCLASS struct timeval kern_inc_prio_time
+SCLASS int _sig_in_handler
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= 0;
+#else
+;
+#endif
+
+/* Time of day at last scheduling timer signal: */
+SCLASS struct timeval volatile _sched_tod
#ifdef GLOBAL_PTHREAD_PRIVATE
= { 0, 0 };
#else
;
#endif
+/*
+ * Current scheduling timer ticks; used as resource usage.
+ */
+SCLASS unsigned int volatile _sched_ticks
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= 0;
+#else
+;
+#endif
+
/* Dead threads: */
SCLASS TAILQ_HEAD(, pthread) _dead_list
#ifdef GLOBAL_PTHREAD_PRIVATE
@@ -905,9 +1043,9 @@ SCLASS int _thread_dtablesize /* Descriptor table size. */
;
#endif
-SCLASS int _clock_res_nsec /* Clock resolution in nsec. */
+SCLASS int _clock_res_usec /* Clock resolution in usec. */
#ifdef GLOBAL_PTHREAD_PRIVATE
-= CLOCK_RES_NSEC;
+= CLOCK_RES_USEC;
#else
;
#endif
@@ -937,9 +1075,10 @@ SCLASS struct sigaction _thread_sigact[NSIG];
SCLASS int _thread_dfl_count[NSIG];
/*
- * Pending signals for this process.
+ * Pending signals and mask for this process:
*/
SCLASS sigset_t _process_sigpending;
+SCLASS sigset_t _process_sigmask;
/*
* Scheduling queues:
@@ -959,6 +1098,21 @@ SCLASS volatile int _spinblock_count
#endif
;
+/* Used to maintain pending and active signals: */
+struct sigstatus {
+ int pending; /* Is this a pending signal? */
+ int blocked; /*
+ * A handler is currently active for
+ * this signal; ignore subsequent
+ * signals until the handler is done.
+ */
+ int signo; /* arg 1 to signal handler */
+ siginfo_t siginfo; /* arg 2 to signal handler */
+ ucontext_t uc; /* arg 3 to signal handler */
+};
+
+SCLASS struct sigstatus _thread_sigq[NSIG];
+
/* Indicates that the signal queue needs to be checked. */
SCLASS volatile int _sigq_check_reqd
#ifdef GLOBAL_PTHREAD_PRIVATE
@@ -998,6 +1152,18 @@ SCLASS void * _next_stack
#endif
;
+/*
+ * Declare the kernel scheduler jump buffer and stack:
+ */
+SCLASS jmp_buf _thread_kern_sched_jb;
+
+SCLASS void * _thread_kern_sched_stack
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= NULL
+#endif
+;
+
+
/* Used for _PTHREADS_INVARIANTS checking. */
SCLASS int _thread_kern_new_state
#ifdef GLOBAL_PTHREAD_PRIVATE
@@ -1025,15 +1191,19 @@ __BEGIN_DECLS
char *__ttyname_basic(int);
char *__ttyname_r_basic(int, char *, size_t);
char *ttyname_r(int, char *, size_t);
+void _cond_wait_backout(pthread_t);
+void _fd_lock_backout(pthread_t);
int _find_dead_thread(pthread_t);
int _find_thread(pthread_t);
+void _flockfile_backout(pthread_t);
void _funlock_owned(pthread_t);
+void _join_backout(pthread_t);
int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t);
int _thread_fd_lock(int, int, struct timespec *);
int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno);
-void _dispatch_signals(void);
int _mutex_cv_lock(pthread_mutex_t *);
int _mutex_cv_unlock(pthread_mutex_t *);
+void _mutex_lock_backout(pthread_t);
void _mutex_notify_priochange(pthread_t);
int _mutex_reinit(pthread_mutex_t *);
void _mutex_unlock_private(pthread_t);
@@ -1044,14 +1214,15 @@ void _pq_remove(struct pq_queue *pq, struct pthread *);
void _pq_insert_head(struct pq_queue *pq, struct pthread *);
void _pq_insert_tail(struct pq_queue *pq, struct pthread *);
struct pthread *_pq_first(struct pq_queue *pq);
-#if defined(_PTHREADS_INVARIANTS)
void _waitq_insert(pthread_t pthread);
void _waitq_remove(pthread_t pthread);
+#if defined(_PTHREADS_INVARIANTS)
void _waitq_setactive(void);
void _waitq_clearactive(void);
#endif
void _thread_exit(char *, int, char *);
void _thread_exit_cleanup(void);
+void _thread_exit_finish(void);
void _thread_fd_unlock(int, int);
void _thread_fd_unlock_debug(int, int, char *, int);
void _thread_fd_unlock_owned(pthread_t);
@@ -1060,20 +1231,23 @@ void _thread_cleanupspecific(void);
void _thread_dump_info(void);
void _thread_init(void);
void _thread_kern_sched(ucontext_t *);
-void _thread_kern_sched_state(enum pthread_state,char *fname,int lineno);
+void _thread_kern_scheduler(void);
+void _thread_kern_sched_frame(int frame);
+void _thread_kern_sched_sig(void);
+void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno);
void _thread_kern_sched_state_unlock(enum pthread_state state,
spinlock_t *lock, char *fname, int lineno);
void _thread_kern_set_timeout(const struct timespec *);
void _thread_kern_sig_defer(void);
void _thread_kern_sig_undefer(void);
-void _thread_sig_handler(int, int, ucontext_t *);
-pthread_t _thread_sig_handle(int, ucontext_t *);
-void _thread_sig_init(void);
+void _thread_sig_handler(int, siginfo_t *, ucontext_t *);
+void _thread_sig_check_pending(pthread_t pthread);
+void _thread_sig_handle_pending(void);
void _thread_sig_send(pthread_t pthread, int sig);
-void _thread_sig_deliver(pthread_t pthread, int sig);
+void _thread_sig_wrapper(void);
+int _thread_sigframe_find(pthread_t pthread, void *stackp);
void _thread_start(void);
-void _thread_start_sig_handler(void);
-void _thread_seterrno(pthread_t,int);
+void _thread_seterrno(pthread_t, int);
int _thread_fd_table_init(int fd);
pthread_addr_t _thread_gc(pthread_addr_t);
void _thread_enter_cancellation_point(void);
diff --git a/lib/libpthread/thread/thr_sem.c b/lib/libpthread/thread/thr_sem.c
index 2dcf72223fc3..eb4291906f09 100644
--- a/lib/libpthread/thread/thr_sem.c
+++ b/lib/libpthread/thread/thr_sem.c
@@ -29,6 +29,7 @@
* $FreeBSD$
*/
+#include <stdlib.h>
#include <errno.h>
#ifdef _THREAD_SAFE
#include <semaphore.h>
diff --git a/lib/libpthread/thread/thr_setschedparam.c b/lib/libpthread/thread/thr_setschedparam.c
index bce965fe6603..9a44ad7a1426 100644
--- a/lib/libpthread/thread/thr_setschedparam.c
+++ b/lib/libpthread/thread/thr_setschedparam.c
@@ -59,7 +59,8 @@ pthread_setschedparam(pthread_t pthread, int policy,
*/
_thread_kern_sig_defer();
- if (param->sched_priority != pthread->base_priority) {
+ if (param->sched_priority !=
+ PTHREAD_BASE_PRIORITY(pthread->base_priority)) {
/*
* Remove the thread from its current priority
* queue before any adjustments are made to its
@@ -72,6 +73,8 @@ pthread_setschedparam(pthread_t pthread, int policy,
}
/* Set the thread base priority: */
+ pthread->base_priority &=
+ (PTHREAD_SIGNAL_PRIORITY | PTHREAD_RT_PRIORITY);
pthread->base_priority = param->sched_priority;
/* Recalculate the active priority: */
diff --git a/lib/libpthread/thread/thr_sig.c b/lib/libpthread/thread/thr_sig.c
index 86ded7f12cbb..8a9aadf177f6 100644
--- a/lib/libpthread/thread/thr_sig.c
+++ b/lib/libpthread/thread/thr_sig.c
@@ -44,46 +44,47 @@
#include "pthread_private.h"
/* Prototypes: */
-static void thread_sig_check_state(pthread_t pthread, int sig);
-static void thread_sig_finish_longjmp(void *arg);
-static void handle_state_change(pthread_t pthread);
-
+static void thread_sig_add(pthread_t pthread, int sig, int has_args);
+static pthread_t thread_sig_find(int sig);
+static void thread_sig_handle_special(int sig);
+static void thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp);
+static void thread_sigframe_add(pthread_t thread, int sig);
+static void thread_sigframe_leave(pthread_t thread, int frame);
+static void thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf);
+static void thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf);
+
+/* #define DEBUG_SIGNAL */
+#ifdef DEBUG_SIGNAL
+#define DBG_MSG stdout_debug
+#else
+#define DBG_MSG(x...)
+#endif
-/* Static variables: */
-static spinlock_t signal_lock = _SPINLOCK_INITIALIZER;
-static unsigned int pending_sigs[NSIG];
-static unsigned int handled_sigs[NSIG];
-static int volatile check_pending = 0;
-static int volatile check_waiting = 0;
+#if defined(_PTHREADS_INVARIANTS)
+#define SIG_SET_ACTIVE() _sig_in_handler = 1
+#define SIG_SET_INACTIVE() _sig_in_handler = 0
+#else
+#define SIG_SET_ACTIVE()
+#define SIG_SET_INACTIVE()
+#endif
-/* Initialize signal handling facility: */
void
-_thread_sig_init(void)
+_thread_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
{
- int i;
+ pthread_t pthread;
+ int current_frame;
+ char c;
- /* Clear pending and handled signal counts: */
- for (i = 1; i < NSIG; i++) {
- pending_sigs[i - 1] = 0;
- handled_sigs[i - 1] = 0;
- }
-
- /* Clear the lock: */
- signal_lock.access_lock = 0;
-
- /* Clear the process pending signals: */
- sigemptyset(&_process_sigpending);
-}
-
-void
-_thread_sig_handler(int sig, int code, ucontext_t * scp)
-{
- pthread_t pthread, pthread_next;
- int i;
- char c;
+ if (ucp == NULL)
+ PANIC("Thread signal handler received null context");
+ DBG_MSG("Got signal %d, current thread %p\n", sig, _thread_run);
/* Check if an interval timer signal: */
if (sig == _SCHED_SIGNAL) {
+ /* Update the scheduling clock: */
+ gettimeofday((struct timeval *)&_sched_tod, NULL);
+ _sched_ticks++;
+
if (_thread_kern_in_sched != 0) {
/*
* The scheduler is already running; ignore this
@@ -97,14 +98,18 @@ _thread_sig_handler(int sig, int code, ucontext_t * scp)
*/
else if (_thread_run->sig_defer_count > 0)
_thread_run->yield_on_sig_undefer = 1;
-
else {
/*
+ * Save the context of the currently running thread:
+ */
+ thread_sig_savecontext(_thread_run, ucp);
+
+ /*
* Schedule the next thread. This function is not
* expected to return because it will do a longjmp
* instead.
*/
- _thread_kern_sched(scp);
+ _thread_kern_sched(ucp);
/*
* This point should not be reached, so abort the
@@ -118,8 +123,8 @@ _thread_sig_handler(int sig, int code, ucontext_t * scp)
* is accessing the scheduling queues or if there is a currently
* running thread that has deferred signals.
*/
- else if ((_queue_signals != 0) || ((_thread_kern_in_sched == 0) &&
- (_thread_run->sig_defer_count > 0))) {
+ else if ((_thread_kern_in_sched != 0) ||
+ (_thread_run->sig_defer_count > 0)) {
/* Cast the signal number to a character variable: */
c = sig;
@@ -127,117 +132,150 @@ _thread_sig_handler(int sig, int code, ucontext_t * scp)
* Write the signal number to the kernel pipe so that it will
* be ready to read when this signal handler returns.
*/
- _thread_sys_write(_thread_kern_pipe[1], &c, 1);
+ if (_queue_signals != 0) {
+ _thread_sys_write(_thread_kern_pipe[1], &c, 1);
+ DBG_MSG("Got signal %d, queueing to kernel pipe\n", sig);
+ }
+ if (_thread_sigq[sig - 1].blocked == 0) {
+ DBG_MSG("Got signal %d, adding to _thread_sigq\n", sig);
+ /*
+ * Do not block this signal; it will be blocked
+ * when the pending signals are run down.
+ */
+ /* _thread_sigq[sig - 1].blocked = 1; */
- /* Indicate that there are queued signals in the pipe. */
- _sigq_check_reqd = 1;
- } else {
- if (_atomic_lock(&signal_lock.access_lock)) {
- /* There is another signal handler running: */
- pending_sigs[sig - 1]++;
- check_pending = 1;
+ /*
+ * Queue the signal, saving siginfo and sigcontext
+ * (ucontext).
+ *
+ * XXX - Do we need to copy siginfo and ucp?
+ */
+ _thread_sigq[sig - 1].signo = sig;
+ if (info != NULL)
+ memcpy(&_thread_sigq[sig - 1].siginfo, info,
+ sizeof(*info));
+ memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp));
+
+ /* Indicate that there are queued signals: */
+ _thread_sigq[sig - 1].pending = 1;
+ _sigq_check_reqd = 1;
}
- else {
- /* It's safe to handle the signal now. */
- pthread = _thread_sig_handle(sig, scp);
+ /* These signals need special handling: */
+ else if (sig == SIGCHLD || sig == SIGTSTP ||
+ sig == SIGTTIN || sig == SIGTTOU) {
+ _thread_sigq[sig - 1].pending = 1;
+ _thread_sigq[sig - 1].signo = sig;
+ _sigq_check_reqd = 1;
+ }
+ else
+ DBG_MSG("Got signal %d, ignored.\n", sig);
+ }
+ /*
+ * The signal handlers should have been installed so that they
+ * cannot be interrupted by other signals.
+ */
+ else if (_thread_sigq[sig - 1].blocked == 0) {
+ /* The signal is not blocked; handle the signal: */
+ current_frame = _thread_run->sigframe_count;
- /* Reset the pending and handled count back to 0: */
- pending_sigs[sig - 1] = 0;
- handled_sigs[sig - 1] = 0;
+ /*
+ * Ignore subsequent occurrences of this signal
+ * until the current signal is handled:
+ */
+ _thread_sigq[sig - 1].blocked = 1;
- if (pthread == NULL)
- signal_lock.access_lock = 0;
- else {
- sigaddset(&pthread->sigmask, sig);
+ /* This signal will be handled; clear the pending flag: */
+ _thread_sigq[sig - 1].pending = 0;
- /*
- * Make sure not to deliver the same signal to
- * the thread twice. sigpend is potentially
- * modified by the call chain
- * _thread_sig_handle() -->
- * thread_sig_check_state(), which can happen
- * just above.
- */
- if (sigismember(&pthread->sigpend, sig))
- sigdelset(&pthread->sigpend, sig);
+ /*
+ * Save siginfo and sigcontext (ucontext).
+ *
+ * XXX - Do we need to copy siginfo and ucp?
+ */
+ _thread_sigq[sig - 1].signo = sig;
- signal_lock.access_lock = 0;
- _thread_sig_deliver(pthread, sig);
- sigdelset(&pthread->sigmask, sig);
- }
- }
+ if (info != NULL)
+ memcpy(&_thread_sigq[sig - 1].siginfo, info,
+ sizeof(*info));
+ memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp));
+ SIG_SET_ACTIVE();
- /* Enter a loop to process pending signals: */
- while ((check_pending != 0) &&
- (_atomic_lock(&signal_lock.access_lock) == 0)) {
- check_pending = 0;
- for (i = 1; i < NSIG; i++) {
- if (pending_sigs[i - 1] > handled_sigs[i - 1]) {
- pending_sigs[i - 1] = handled_sigs[i - 1];
- pthread = _thread_sig_handle(i, scp);
- if (pthread != NULL) {
- sigaddset(&pthread->sigmask, i);
- /* Save the old state: */
- pthread->oldstate = pthread->state;
- signal_lock.access_lock = 0;
- _thread_sig_deliver(pthread, i);
- sigdelset(&pthread->sigmask, i);
- if (_atomic_lock(&signal_lock.access_lock)) {
- check_pending = 1;
- /*
- * Have the lock holder take care
- * of any state changes:
- */
- if (pthread->state != pthread->oldstate)
- check_waiting = 1;
- return;
- }
- if (pthread->state != pthread->oldstate)
- handle_state_change(pthread);
- }
- }
- }
- while (check_waiting != 0) {
- check_waiting = 0;
- /*
- * Enter a loop to wake up all threads waiting
- * for a process to complete:
- */
- for (pthread = TAILQ_FIRST(&_waitingq);
- pthread != NULL; pthread = pthread_next) {
- pthread_next = TAILQ_NEXT(pthread, pqe);
- if (pthread->state == PS_RUNNING)
- handle_state_change(pthread);
- }
- }
- /* Release the lock: */
- signal_lock.access_lock = 0;
+ /* Handle special signals: */
+ thread_sig_handle_special(sig);
+
+ if ((pthread = thread_sig_find(sig)) != NULL) {
+ DBG_MSG("Got signal %d, adding frame to thread %p\n",
+ sig, pthread);
+ /*
+ * A thread was found that can handle the signal.
+ * Save the context of the currently running thread
+ * so that we can switch to another thread without
+ * losing track of where the current thread left off.
+ * This also applies if the current thread is the
+ * thread to be signaled.
+ */
+ thread_sig_savecontext(_thread_run, ucp);
+
+ /* Setup the target thread to receive the signal: */
+ thread_sig_add(pthread, sig, /*has_args*/ 1);
+
+ /* Take a peek at the next ready to run thread: */
+ pthread = PTHREAD_PRIOQ_FIRST();
+ DBG_MSG("Finished adding frame, head of prio list %p\n",
+ pthread);
}
+ else
+ DBG_MSG("No thread to handle signal %d\n", sig);
+ SIG_SET_INACTIVE();
/*
- * Check to see if the current thread performed a
- * [sig|_]longjmp() out of a signal handler.
+ * Switch to a different context if the currently running
+ * thread takes a signal, or if another thread takes a
+ * signal and the currently running thread is not in a
+ * signal handler.
*/
- if ((_thread_run->jmpflags & (JMPFLAGS_LONGJMP |
- JMPFLAGS__LONGJMP)) != 0) {
- _thread_run->jmpflags = JMPFLAGS_NONE;
- __longjmp(_thread_run->nested_jmp.jmp,
- _thread_run->longjmp_val);
- } else if ((_thread_run->jmpflags & JMPFLAGS_SIGLONGJMP) != 0) {
- _thread_run->jmpflags = JMPFLAGS_NONE;
- __siglongjmp(_thread_run->nested_jmp.sigjmp,
- _thread_run->longjmp_val);
+ if ((_thread_run->sigframe_count > current_frame) ||
+ ((pthread != NULL) &&
+ (pthread->active_priority > _thread_run->active_priority))) {
+ /* Enter the kernel scheduler: */
+ DBG_MSG("Entering scheduler from signal handler\n");
+ _thread_kern_sched(ucp);
}
}
+ else {
+ SIG_SET_ACTIVE();
+ thread_sig_handle_special(sig);
+ SIG_SET_INACTIVE();
+ }
}
+static void
+thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp)
+{
+ struct pthread_signal_frame *psf;
+
+ psf = _thread_run->curframe;
+
+ memcpy(&psf->ctx.uc, ucp, sizeof(*ucp));
+
+ /* XXX - Save FP registers too? */
+ FP_SAVE_UC(&psf->ctx.uc);
+
+ /* Mark the context saved as a ucontext: */
+ psf->ctxtype = CTX_UC;
+}
+
+/*
+ * Find a thread that can handle the signal.
+ */
pthread_t
-_thread_sig_handle(int sig, ucontext_t * scp)
+thread_sig_find(int sig)
{
- int i, handler_installed;
+ int handler_installed;
pthread_t pthread, pthread_next;
pthread_t suspended_thread, signaled_thread;
+ DBG_MSG("Looking for thread to handle signal %d\n", sig);
/* Check if the signal requires a dump of thread information: */
if (sig == SIGINFO)
/* Dump thread information to file: */
@@ -249,77 +287,22 @@ _thread_sig_handle(int sig, ucontext_t * scp)
* This shouldn't ever occur (should this panic?).
*/
} else {
- /* Check if a child has terminated: */
- if (sig == SIGCHLD) {
- /*
- * Go through the file list and set all files
- * to non-blocking again in case the child
- * set some of them to block. Sigh.
- */
- for (i = 0; i < _thread_dtablesize; i++) {
- /* Check if this file is used: */
- if (_thread_fd_table[i] != NULL) {
- /*
- * Set the file descriptor to
- * non-blocking:
- */
- _thread_sys_fcntl(i, F_SETFL,
- _thread_fd_table[i]->flags |
- O_NONBLOCK);
- }
- }
- /*
- * Enter a loop to wake up all threads waiting
- * for a process to complete:
- */
- for (pthread = TAILQ_FIRST(&_waitingq);
- pthread != NULL; pthread = pthread_next) {
- /*
- * Grab the next thread before possibly
- * destroying the link entry:
- */
- pthread_next = TAILQ_NEXT(pthread, pqe);
-
- /*
- * If this thread is waiting for a child
- * process to complete, wake it up:
- */
- if (pthread->state == PS_WAIT_WAIT) {
- /* Make the thread runnable: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
-
- /* Return the signal number: */
- pthread->signo = sig;
- }
- }
- }
-
- /*
- * POSIX says that pending SIGCONT signals are
- * discarded when one of these signals occurs.
- */
- if (sig == SIGTSTP || sig == SIGTTIN || sig == SIGTTOU) {
- /*
- * Enter a loop to discard pending SIGCONT
- * signals:
- */
- TAILQ_FOREACH(pthread, &_thread_list, tle) {
- sigdelset(&pthread->sigpend,SIGCONT);
- }
- }
-
/*
* Enter a loop to look for threads that have the signal
* unmasked. POSIX specifies that a thread in a sigwait
* will get the signal over any other threads. Second
- * preference will be threads in in a sigsuspend. If
- * none of the above, then the signal is delivered to the
- * first thread we find. Note that if a custom handler
- * is not installed, the signal only affects threads in
- * sigwait.
+ * preference will be threads in in a sigsuspend. Third
+ * preference will be the current thread. If none of the
+ * above, then the signal is delivered to the first thread
+ * that is found. Note that if a custom handler is not
+ * installed, the signal only affects threads in sigwait.
*/
suspended_thread = NULL;
- signaled_thread = NULL;
+ if ((_thread_run != &_thread_kern_thread) &&
+ !sigismember(&_thread_run->sigmask, sig))
+ signaled_thread = _thread_run;
+ else
+ signaled_thread = NULL;
if ((_thread_sigact[sig - 1].sa_handler == SIG_IGN) ||
(_thread_sigact[sig - 1].sa_handler == SIG_DFL))
handler_installed = 0;
@@ -338,6 +321,13 @@ _thread_sig_handle(int sig, ucontext_t * scp)
sigismember(pthread->data.sigwait, sig)) {
/* Change the state of the thread to run: */
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ /*
+ * A signal handler is not invoked for threads
+ * in sigwait. Clear the blocked and pending
+ * flags.
+ */
+ _thread_sigq[sig - 1].blocked = 0;
+ _thread_sigq[sig - 1].pending = 0;
/* Return the signal number: */
pthread->signo = sig;
@@ -349,7 +339,8 @@ _thread_sig_handle(int sig, ucontext_t * scp)
* we find.
*
* Do not attempt to deliver this signal
- * to other threads.
+ * to other threads and do not add the signal
+ * to the process pending set.
*/
return (NULL);
}
@@ -367,7 +358,14 @@ _thread_sig_handle(int sig, ucontext_t * scp)
* Only perform wakeups and signal delivery if there is a
* custom handler installed:
*/
- if (handler_installed != 0) {
+ if (handler_installed == 0) {
+ /*
+ * There is no handler installed. Unblock the
+ * signal so that if a handler _is_ installed, any
+ * subsequent signals can be handled.
+ */
+ _thread_sigq[sig - 1].blocked = 0;
+ } else {
/*
* If we didn't find a thread in the waiting queue,
* check the all threads queue:
@@ -403,12 +401,6 @@ _thread_sig_handle(int sig, ucontext_t * scp)
pthread = suspended_thread;
else
pthread = signaled_thread;
-
- /*
- * Perform any state changes due to signal
- * arrival:
- */
- thread_sig_check_state(pthread, sig);
return (pthread);
}
}
@@ -418,100 +410,166 @@ _thread_sig_handle(int sig, ucontext_t * scp)
return (NULL);
}
-static void
-thread_sig_finish_longjmp(void *arg)
+void
+_thread_sig_check_pending(pthread_t pthread)
{
+ sigset_t sigset;
+ int i;
+
/*
- * Check to see if the current thread performed a [_]longjmp() out of a
- * signal handler.
+ * Check if there are pending signals for the running
+ * thread or process that aren't blocked:
*/
- if ((_thread_run->jmpflags & (JMPFLAGS_LONGJMP | JMPFLAGS__LONGJMP))
- != 0) {
- _thread_run->jmpflags = JMPFLAGS_NONE;
- _thread_run->continuation = NULL;
- __longjmp(_thread_run->nested_jmp.jmp,
- _thread_run->longjmp_val);
+ sigset = pthread->sigpend;
+ SIGSETOR(sigset, _process_sigpending);
+ SIGSETNAND(sigset, pthread->sigmask);
+ if (SIGNOTEMPTY(sigset)) {
+ for (i = 1; i < NSIG; i++) {
+ if (sigismember(&sigset, i) != 0) {
+ if (sigismember(&pthread->sigpend, i) != 0)
+ thread_sig_add(pthread, i,
+ /*has_args*/ 0);
+ else {
+ thread_sig_add(pthread, i,
+ /*has_args*/ 1);
+ sigdelset(&_process_sigpending, i);
+ }
+ }
+ }
}
+}
+
+/*
+ * This can only be called from the kernel scheduler. It assumes that
+ * all thread contexts are saved and that a signal frame can safely be
+ * added to any user thread.
+ */
+void
+_thread_sig_handle_pending(void)
+{
+ pthread_t pthread;
+ int i, sig;
+
+ PTHREAD_ASSERT(_thread_kern_in_sched != 0,
+ "_thread_sig_handle_pending called from outside kernel schedule");
/*
- * Check to see if the current thread performed a siglongjmp
- * out of a signal handler:
+ * Check the array of pending signals:
*/
- else if ((_thread_run->jmpflags & JMPFLAGS_SIGLONGJMP) != 0) {
- _thread_run->jmpflags = JMPFLAGS_NONE;
- _thread_run->continuation = NULL;
- __siglongjmp(_thread_run->nested_jmp.sigjmp,
- _thread_run->longjmp_val);
+ for (i = 0; i < NSIG; i++) {
+ if (_thread_sigq[i].pending != 0) {
+ /* This signal is no longer pending. */
+ _thread_sigq[i].pending = 0;
+
+ sig = _thread_sigq[i].signo;
+
+ /* Some signals need special handling: */
+ thread_sig_handle_special(sig);
+
+ if (_thread_sigq[i].blocked == 0) {
+ /*
+ * Block future signals until this one
+ * is handled:
+ */
+ _thread_sigq[i].blocked = 1;
+
+ if ((pthread = thread_sig_find(sig)) != NULL) {
+ /*
+ * Setup the target thread to receive
+ * the signal:
+ */
+ thread_sig_add(pthread, sig,
+ /*has_args*/ 1);
+ }
+ }
+ }
}
}
static void
-handle_state_change(pthread_t pthread)
+thread_sig_handle_special(int sig)
{
- /*
- * We should only need to handle threads whose state was
- * changed to running:
- */
- if (pthread->state == PS_RUNNING) {
- switch (pthread->oldstate) {
+ pthread_t pthread, pthread_next;
+ int i;
+
+ switch (sig) {
+ case SIGCHLD:
/*
- * States which do not change when a signal is trapped:
+ * Go through the file list and set all files
+ * to non-blocking again in case the child
+ * set some of them to block. Sigh.
*/
- case PS_DEAD:
- case PS_DEADLOCK:
- case PS_RUNNING:
- case PS_SIGTHREAD:
- case PS_STATE_MAX:
- case PS_SUSPENDED:
- break;
-
+ for (i = 0; i < _thread_dtablesize; i++) {
+ /* Check if this file is used: */
+ if (_thread_fd_table[i] != NULL) {
+ /*
+ * Set the file descriptor to non-blocking:
+ */
+ _thread_sys_fcntl(i, F_SETFL,
+ _thread_fd_table[i]->flags | O_NONBLOCK);
+ }
+ }
/*
- * States which need to return to critical sections
- * before they can switch contexts:
+ * Enter a loop to wake up all threads waiting
+ * for a process to complete:
*/
- case PS_COND_WAIT:
- case PS_FDLR_WAIT:
- case PS_FDLW_WAIT:
- case PS_FILE_WAIT:
- case PS_JOIN:
- case PS_MUTEX_WAIT:
- /* Indicate that the thread was interrupted: */
- pthread->interrupted = 1;
+ for (pthread = TAILQ_FIRST(&_waitingq);
+ pthread != NULL; pthread = pthread_next) {
+ /*
+ * Grab the next thread before possibly
+ * destroying the link entry:
+ */
+ pthread_next = TAILQ_NEXT(pthread, pqe);
+
/*
- * Defer the [sig|_]longjmp until leaving the critical
- * region:
+ * If this thread is waiting for a child
+ * process to complete, wake it up:
*/
- pthread->jmpflags |= JMPFLAGS_DEFERRED;
-
- /* Set the continuation routine: */
- pthread->continuation = thread_sig_finish_longjmp;
- /* FALLTHROUGH */
- case PS_FDR_WAIT:
- case PS_FDW_WAIT:
- case PS_POLL_WAIT:
- case PS_SELECT_WAIT:
- case PS_SIGSUSPEND:
- case PS_SIGWAIT:
- case PS_SLEEP_WAIT:
- case PS_SPINBLOCK:
- case PS_WAIT_WAIT:
- if ((pthread->flags & PTHREAD_FLAGS_IN_WAITQ) != 0) {
- PTHREAD_WAITQ_REMOVE(pthread);
- if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
- PTHREAD_WORKQ_REMOVE(pthread);
+ if (pthread->state == PS_WAIT_WAIT) {
+ /* Make the thread runnable: */
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+
+ /* Return the signal number: */
+ pthread->signo = sig;
}
- break;
}
+ break;
- if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0)
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ /*
+ * POSIX says that pending SIGCONT signals are
+ * discarded when one of these signals occurs.
+ */
+ case SIGTSTP:
+ case SIGTTIN:
+ case SIGTTOU:
+ /*
+ * Enter a loop to discard pending SIGCONT
+ * signals:
+ */
+ TAILQ_FOREACH(pthread, &_thread_list, tle) {
+ sigdelset(&pthread->sigpend, SIGCONT);
+ }
+ break;
+
+ default:
+ break;
}
}
-
-/* Perform thread specific actions in response to a signal: */
+/*
+ * Perform thread specific actions in response to a signal.
+ * This function is only called if there is a handler installed
+ * for the signal, and if the target thread has the signal
+ * unmasked.
+ */
static void
-thread_sig_check_state(pthread_t pthread, int sig)
+thread_sig_add(pthread_t pthread, int sig, int has_args)
{
+ int restart, frame;
+ int block_signals = 0;
+ int suppress_handler = 0;
+
+ restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART;
+
/*
* Process according to thread state:
*/
@@ -519,32 +577,54 @@ thread_sig_check_state(pthread_t pthread, int sig)
/*
* States which do not change when a signal is trapped:
*/
- case PS_COND_WAIT:
case PS_DEAD:
case PS_DEADLOCK:
- case PS_FILE_WAIT:
- case PS_JOIN:
- case PS_MUTEX_WAIT:
- case PS_RUNNING:
case PS_STATE_MAX:
case PS_SIGTHREAD:
- case PS_SPINBLOCK:
+ /*
+ * You can't call a signal handler for threads in these
+ * states.
+ */
+ suppress_handler = 1;
+ break;
+
+ /*
+ * States which do not need any cleanup handling when signals
+ * occur:
+ */
+ case PS_RUNNING:
+ /*
+ * Remove the thread from the queue before changing its
+ * priority:
+ */
+ if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0)
+ PTHREAD_PRIOQ_REMOVE(pthread);
+ break;
+
case PS_SUSPENDED:
- /* Increment the pending signal count. */
- sigaddset(&pthread->sigpend,sig);
+ break;
+
+ case PS_SPINBLOCK:
+ /* Remove the thread from the workq and waitq: */
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_WAITQ_REMOVE(pthread);
+ /* Make the thread runnable: */
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
break;
case PS_SIGWAIT:
+ /* The signal handler is not called for threads in SIGWAIT. */
+ suppress_handler = 1;
/* Wake up the thread if the signal is blocked. */
if (sigismember(pthread->data.sigwait, sig)) {
/* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_NEW_STATE(pthread, PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
} else
/* Increment the pending signal count. */
- sigaddset(&pthread->sigpend,sig);
+ sigaddset(&pthread->sigpend, sig);
break;
/*
@@ -552,64 +632,142 @@ thread_sig_check_state(pthread_t pthread, int sig)
* SIGCHLD signals.
*/
case PS_WAIT_WAIT:
- /*
- * Check for signals other than the death of a child
- * process:
- */
- if (sig != SIGCHLD)
- /* Flag the operation as interrupted: */
- pthread->interrupted = 1;
+ if (sig == SIGCHLD) {
+ /* Change the state of the thread to run: */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
- /* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ /* Return the signal number: */
+ pthread->signo = sig;
+ }
+ else {
+ /*
+ * Mark the thread as interrupted only if the
+ * restart flag is not set on the signal action:
+ */
+ if (restart == 0)
+ pthread->interrupted = 1;
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
+ }
+ break;
- /* Return the signal number: */
- pthread->signo = sig;
+ /*
+ * States which cannot be interrupted but still require the
+ * signal handler to run:
+ */
+ case PS_COND_WAIT:
+ case PS_JOIN:
+ case PS_MUTEX_WAIT:
+ /*
+ * Remove the thread from the wait queue. It will
+ * be added back to the wait queue once all signal
+ * handlers have been invoked.
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
break;
/*
- * States that are interrupted by the occurrence of a signal
- * other than the scheduling alarm:
+ * States which are interruptible but may need to be removed
+ * from queues before any signal handler is called.
+ *
+ * XXX - We may not need to handle this condition, but will
+ * mark it as a potential problem.
*/
case PS_FDLR_WAIT:
case PS_FDLW_WAIT:
+ case PS_FILE_WAIT:
+ if (restart == 0)
+ pthread->interrupted = 1;
+ /*
+ * Remove the thread from the wait queue. Our
+ * signal handler hook will remove this thread
+ * from the fd or file queue before invoking
+ * the actual handler.
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ /*
+ * To ensure the thread is removed from the fd and file
+ * queues before any other signal interrupts it, set the
+ * signal mask to block all signals. As soon as the thread
+ * is removed from the queue the signal mask will be
+ * restored.
+ */
+ block_signals = 1;
+ break;
+
+ /*
+ * States which are interruptible:
+ */
case PS_FDR_WAIT:
case PS_FDW_WAIT:
- case PS_POLL_WAIT:
- case PS_SLEEP_WAIT:
- case PS_SELECT_WAIT:
- if ((_thread_sigact[sig - 1].sa_flags & SA_RESTART) == 0) {
- /* Flag the operation as interrupted: */
+ if (restart == 0) {
+ /*
+ * Flag the operation as interrupted and
+ * set the state to running:
+ */
pthread->interrupted = 1;
-
- if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
- PTHREAD_WORKQ_REMOVE(pthread);
-
- /* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
-
- /* Return the signal number: */
- pthread->signo = sig;
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
}
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_WAITQ_REMOVE(pthread);
break;
- case PS_SIGSUSPEND:
+ case PS_POLL_WAIT:
+ case PS_SELECT_WAIT:
+ case PS_SLEEP_WAIT:
/*
- * Only wake up the thread if there is a handler installed
- * for the signal.
+ * Unmasked signals always cause poll, select, and sleep
+ * to terminate early, regardless of SA_RESTART:
*/
- if (_thread_sigact[sig - 1].sa_handler != SIG_DFL) {
- /* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ pthread->interrupted = 1;
+ /* Remove threads in poll and select from the workq: */
+ if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0)
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
+ break;
- /* Return the signal number: */
- pthread->signo = sig;
- }
+ case PS_SIGSUSPEND:
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
break;
}
+
+ if (suppress_handler == 0) {
+ /*
+ * Save the current state of the thread and add a
+ * new signal frame.
+ */
+ frame = pthread->sigframe_count;
+ thread_sigframe_save(pthread, pthread->curframe);
+ thread_sigframe_add(pthread, sig);
+ pthread->sigframes[frame + 1]->sig_has_args = has_args;
+ SIGSETOR(pthread->sigmask, _thread_sigact[sig - 1].sa_mask);
+ if (block_signals != 0) {
+ /* Save the signal mask and block all signals: */
+ pthread->sigframes[frame + 1]->saved_state.psd_sigmask =
+ pthread->sigmask;
+ sigfillset(&pthread->sigmask);
+ }
+
+ /* Make sure the thread is runnable: */
+ if (pthread->state != PS_RUNNING)
+ PTHREAD_SET_STATE(pthread, PS_RUNNING);
+ /*
+ * The thread should be removed from all scheduling
+ * queues at this point. Raise the priority and place
+ * the thread in the run queue.
+ */
+ pthread->active_priority |= PTHREAD_SIGNAL_PRIORITY;
+ if (pthread != _thread_run)
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ }
}
-/* Send a signal to a specific thread (ala pthread_kill): */
+/*
+ * Send a signal to a specific thread (ala pthread_kill):
+ */
void
_thread_sig_send(pthread_t pthread, int sig)
{
@@ -620,142 +778,400 @@ _thread_sig_send(pthread_t pthread, int sig)
if (pthread->state == PS_SIGWAIT &&
sigismember(pthread->data.sigwait, sig)) {
/* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_NEW_STATE(pthread, PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
- } else if (pthread->state != PS_SIGWAIT &&
- !sigismember(&pthread->sigmask, sig)) {
- /* Perform any state changes due to signal arrival: */
- thread_sig_check_state(pthread, sig);
- /* Increment the pending signal count. */
- sigaddset(&pthread->sigpend,sig);
+ } else if (pthread == _thread_run) {
+ /* Add the signal to the pending set: */
+ sigaddset(&pthread->sigpend, sig);
+ /*
+ * Deliver the signal to the process if a
+ * handler is not installed:
+ */
+ if (_thread_sigact[sig - 1].sa_handler == SIG_DFL)
+ kill(getpid(), sig);
+ if (!sigismember(&pthread->sigmask, sig)) {
+ /*
+ * Call the kernel scheduler which will safely
+ * install a signal frame for this thread:
+ */
+ _thread_kern_sched_sig();
+ }
} else {
- /* Increment the pending signal count. */
- sigaddset(&pthread->sigpend,sig);
+ if (pthread->state != PS_SIGWAIT &&
+ !sigismember(&pthread->sigmask, sig)) {
+ /* Protect the scheduling queues: */
+ _thread_kern_sig_defer();
+ /*
+ * Perform any state changes due to signal
+ * arrival:
+ */
+ thread_sig_add(pthread, sig, /* has args */ 0);
+ /* Unprotect the scheduling queues: */
+ _thread_kern_sig_undefer();
+ }
+ else
+ /* Increment the pending signal count. */
+ sigaddset(&pthread->sigpend,sig);
+
+ /*
+ * Deliver the signal to the process if a
+ * handler is not installed:
+ */
+ if (_thread_sigact[sig - 1].sa_handler == SIG_DFL)
+ kill(getpid(), sig);
}
}
}
-/* Dispatch pending signals to the running thread: */
+/*
+ * User thread signal handler wrapper.
+ *
+ * thread - current running thread
+ */
void
-_dispatch_signals()
+_thread_sig_wrapper(void)
{
- sigset_t sigset;
- int i;
+ void (*sigfunc)(int, siginfo_t *, void *);
+ struct pthread_signal_frame *psf;
+ pthread_t thread;
+ int dead = 0;
+ int i, sig, has_args;
+ int frame, dst_frame;
+
+ thread = _thread_run;
+
+ /* Get the current frame and state: */
+ frame = thread->sigframe_count;
+ PTHREAD_ASSERT(frame > 0, "Invalid signal frame in signal handler");
+ psf = thread->curframe;
+
+ /* Check the threads previous state: */
+ if (psf->saved_state.psd_state != PS_RUNNING) {
+ /*
+ * Do a little cleanup handling for those threads in
+ * queues before calling the signal handler. Signals
+ * for these threads are temporarily blocked until
+ * after cleanup handling.
+ */
+ switch (psf->saved_state.psd_state) {
+ case PS_FDLR_WAIT:
+ case PS_FDLW_WAIT:
+ _fd_lock_backout(thread);
+ psf->saved_state.psd_state = PS_RUNNING;
+ /* Reenable signals: */
+ thread->sigmask = psf->saved_state.psd_sigmask;
+ break;
+
+ case PS_FILE_WAIT:
+ _flockfile_backout(thread);
+ psf->saved_state.psd_state = PS_RUNNING;
+ /* Reenable signals: */
+ thread->sigmask = psf->saved_state.psd_sigmask;
+ break;
+
+ default:
+ break;
+ }
+ }
/*
- * Check if there are pending signals for the running
- * thread or process that aren't blocked:
+ * Unless the thread exits or longjmps out of the signal handler,
+ * return to the previous frame:
*/
- sigset = _thread_run->sigpend;
- SIGSETOR(sigset, _process_sigpending);
- SIGSETNAND(sigset, _thread_run->sigmask);
- if (SIGNOTEMPTY(sigset)) {
+ dst_frame = frame - 1;
+
+ /*
+ * Check that a custom handler is installed and if the signal
+ * is not blocked:
+ */
+ sigfunc = _thread_sigact[psf->signo - 1].sa_sigaction;
+ if (((__sighandler_t *)sigfunc != SIG_DFL) &&
+ ((__sighandler_t *)sigfunc != SIG_IGN)) {
/*
- * Enter a loop to calculate deliverable pending signals
- * before actually delivering them. The pending signals
- * must be removed from the pending signal sets before
- * calling the signal handler because the handler may
- * call library routines that again check for and deliver
- * pending signals.
+ * The signal jump buffer is allocated off the stack.
+ * If the signal handler tries to [_][sig]longjmp() or
+ * setcontext(), our wrapped versions of these routines
+ * will copy the user supplied jump buffer or context
+ * to the destination signal frame, set the destination
+ * signal frame in psf->dst_frame, and _longjmp() back
+ * to here.
*/
- for (i = 1; i < NSIG; i++) {
+ jmp_buf jb;
+
+ /*
+ * Set up the context for abnormal returns out of signal
+ * handlers.
+ */
+ psf->sig_jb = &jb;
+ if (_setjmp(jb) == 0) {
+ DBG_MSG("_thread_sig_wrapper: Entering frame %d, "
+ "stack 0x%lx\n", frame, GET_STACK_JB(jb));
/*
- * Check that a custom handler is installed
- * and if the signal is not blocked:
+ * Invalidate the destination frame before calling
+ * the signal handler.
*/
- if (_thread_sigact[i - 1].sa_handler != SIG_DFL &&
- _thread_sigact[i - 1].sa_handler != SIG_IGN &&
- sigismember(&sigset, i)) {
- if (sigismember(&_thread_run->sigpend,i))
- /* Clear the thread pending signal: */
- sigdelset(&_thread_run->sigpend,i);
- else
- /* Clear the process pending signal: */
- sigdelset(&_process_sigpending,i);
- }
+ psf->dst_frame = -1;
+
+ /*
+ * Dispatch the signal via the custom signal
+ * handler:
+ */
+ if (psf->sig_has_args == 0)
+ (*(sigfunc))(psf->signo, NULL, NULL);
+ else if ((_thread_sigact[psf->signo - 1].sa_flags &
+ SA_SIGINFO) != 0)
+ (*(sigfunc))(psf->signo,
+ &_thread_sigq[psf->signo - 1].siginfo,
+ &_thread_sigq[psf->signo - 1].uc);
else
- /* Remove the signal if it can't be handled: */
- sigdelset(&sigset, i);
+ (*(sigfunc))(psf->signo,
+ (siginfo_t *)_thread_sigq[psf->signo - 1].siginfo.si_code,
+ &_thread_sigq[psf->signo - 1].uc);
}
+ else {
+ /*
+ * The return from _setjmp() should only be non-zero
+ * when the signal handler wants to xxxlongjmp() or
+ * setcontext() to a different context, or if the
+ * thread has exited (via pthread_exit).
+ */
+ /*
+ * Grab a copy of the destination frame before it
+ * gets clobbered after unwinding.
+ */
+ dst_frame = psf->dst_frame;
+ DBG_MSG("Abnormal exit from handler for signal %d, "
+ "frame %d\n", psf->signo, frame);
+
+ /* Has the thread exited? */
+ if ((dead = thread->flags & PTHREAD_EXITING) != 0)
+ /* When exiting, unwind to frame 0. */
+ dst_frame = 0;
+ else if ((dst_frame < 0) || (dst_frame > frame))
+ PANIC("Attempt to unwind to invalid "
+ "signal frame");
+
+ /* Unwind to the target frame: */
+ for (i = frame; i > dst_frame; i--) {
+ DBG_MSG("Leaving frame %d, signal %d\n", i,
+ thread->sigframes[i]->signo);
+ /* Leave the current signal frame: */
+ thread_sigframe_leave(thread, i);
- /* Now deliver the signals: */
- for (i = 1; i < NSIG; i++) {
- if (sigismember(&sigset, i))
- /* Deliver the signal to the running thread: */
- _thread_sig_deliver(_thread_run, i);
+ /*
+ * Save whatever is needed out of the state
+ * data; as soon as the frame count is
+ * is decremented, another signal can arrive
+ * and corrupt this view of the state data.
+ */
+ sig = thread->sigframes[i]->signo;
+ has_args = thread->sigframes[i]->sig_has_args;
+
+ /*
+ * We're done with this signal frame:
+ */
+ thread->curframe = thread->sigframes[i - 1];
+ thread->sigframe_count = i - 1;
+
+ /*
+ * Only unblock the signal if it was a
+ * process signal as opposed to a signal
+ * generated by pthread_kill().
+ */
+ if (has_args != 0)
+ _thread_sigq[sig - 1].blocked = 0;
+ }
}
}
+
+ /*
+ * Call the kernel scheduler to schedule the next
+ * thread.
+ */
+ if (dead == 0) {
+ /* Restore the threads state: */
+ thread_sigframe_restore(thread, thread->sigframes[dst_frame]);
+ _thread_kern_sched_frame(dst_frame);
+ }
+ else {
+ PTHREAD_ASSERT(dst_frame == 0,
+ "Invalid signal frame for dead thread");
+
+ /* Perform any necessary cleanup before exiting. */
+ thread_sigframe_leave(thread, 0);
+
+ /* This should never return: */
+ _thread_exit_finish();
+ PANIC("Return from _thread_exit_finish in signal wrapper");
+ }
}
-/* Deliver a signal to a thread: */
-void
-_thread_sig_deliver(pthread_t pthread, int sig)
+static void
+thread_sigframe_add(pthread_t thread, int sig)
{
- sigset_t mask;
- pthread_t pthread_saved;
- jmp_buf jb, *saved_sighandler_jmp_buf;
+ unsigned long stackp = 0;
+
+ /* Get the top of the threads stack: */
+ switch (thread->curframe->ctxtype) {
+ case CTX_JB:
+ case CTX_JB_NOSIG:
+ stackp = GET_STACK_JB(thread->curframe->ctx.jb);
+ break;
+ case CTX_SJB:
+ stackp = GET_STACK_SJB(thread->curframe->ctx.sigjb);
+ break;
+ case CTX_UC:
+ stackp = GET_STACK_UC(&thread->curframe->ctx.uc);
+ break;
+ default:
+ PANIC("Invalid thread context type");
+ break;
+ }
/*
- * Check that a custom handler is installed
- * and if the signal is not blocked:
+ * Leave a little space on the stack and round down to the
+ * nearest aligned word:
*/
- if (_thread_sigact[sig - 1].sa_handler != SIG_DFL &&
- _thread_sigact[sig - 1].sa_handler != SIG_IGN) {
- /* Save the current thread: */
- pthread_saved = _thread_run;
+ stackp -= sizeof(double);
+ stackp &= ~0x3UL;
+
+ /* Allocate room on top of the stack for a new signal frame: */
+ stackp -= sizeof(struct pthread_signal_frame);
+
+ /* Set up the new frame: */
+ thread->sigframe_count++;
+ thread->sigframes[thread->sigframe_count] =
+ (struct pthread_signal_frame *) stackp;
+ thread->curframe = thread->sigframes[thread->sigframe_count];
+ thread->curframe->stackp = stackp;
+ thread->curframe->ctxtype = CTX_JB;
+ thread->curframe->longjmp_val = 1;
+ thread->curframe->signo = sig;
- /* Save the threads signal mask: */
- mask = pthread->sigmask;
-
- /*
- * Add the current signal and signal handler
- * mask to the thread's current signal mask:
- */
- SIGSETOR(pthread->sigmask, _thread_sigact[sig - 1].sa_mask);
- sigaddset(&pthread->sigmask, sig);
+ /*
+ * Set up the context:
+ */
+ _setjmp(thread->curframe->ctx.jb);
+ SET_STACK_JB(thread->curframe->ctx.jb, stackp);
+ SET_RETURN_ADDR_JB(thread->curframe->ctx.jb, _thread_sig_wrapper);
+}
- /* Current thread inside critical region? */
- if (_thread_run->sig_defer_count > 0)
- pthread->sig_defer_count++;
+/*
+ * Locate the signal frame from the specified stack pointer.
+ */
+int
+_thread_sigframe_find(pthread_t pthread, void *stackp)
+{
+ int frame;
- /* Increment the number of nested signals being handled. */
- pthread->signal_nest_level++;
+ /*
+ * Find the destination of the target frame based on the
+ * given stack pointer.
+ */
+ for (frame = pthread->sigframe_count; frame >= 0; frame--) {
+ if (stackp < (void *)pthread->sigframes[frame]->stackp)
+ break;
+ }
+ return (frame);
+}
+
+void
+thread_sigframe_leave(pthread_t thread, int frame)
+{
+ struct pthread_state_data *psd;
- /*
- * The jump buffer is allocated off the stack and the current
- * jump buffer is saved. If the signal handler tries to
- * [sig|_]longjmp(), our version of [sig|_]longjmp() will copy
- * the user supplied jump buffer into
- * _thread_run->nested_jmp.[sig]jmp and _longjmp() back to here.
- */
- saved_sighandler_jmp_buf = pthread->sighandler_jmp_buf;
- pthread->sighandler_jmp_buf = &jb;
+ psd = &thread->sigframes[frame]->saved_state;
- _thread_run = pthread;
+ /*
+ * Perform any necessary cleanup for this signal frame:
+ */
+ switch (psd->psd_state) {
+ case PS_DEAD:
+ case PS_DEADLOCK:
+ case PS_RUNNING:
+ case PS_SIGTHREAD:
+ case PS_STATE_MAX:
+ case PS_SUSPENDED:
+ break;
- if (_setjmp(jb) == 0) {
- /*
- * Dispatch the signal via the custom signal
- * handler:
- */
- (*(_thread_sigact[sig - 1].sa_handler))(sig);
- }
+ /*
+ * Threads in the following states need to be removed
+ * from queues.
+ */
+ case PS_COND_WAIT:
+ _cond_wait_backout(thread);
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
+ PTHREAD_WAITQ_REMOVE(thread);
+ break;
- _thread_run = pthread_saved;
+ case PS_FDLR_WAIT:
+ case PS_FDLW_WAIT:
+ _fd_lock_backout(thread);
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
+ PTHREAD_WAITQ_REMOVE(thread);
+ break;
- pthread->sighandler_jmp_buf = saved_sighandler_jmp_buf;
+ case PS_FILE_WAIT:
+ _flockfile_backout(thread);
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
+ PTHREAD_WAITQ_REMOVE(thread);
+ break;
- /* Decrement the signal nest level. */
- pthread->signal_nest_level--;
+ case PS_JOIN:
+ _join_backout(thread);
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
+ PTHREAD_WAITQ_REMOVE(thread);
+ break;
- /* Current thread inside critical region? */
- if (_thread_run->sig_defer_count > 0)
- pthread->sig_defer_count--;
+ case PS_MUTEX_WAIT:
+ _mutex_lock_backout(thread);
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
+ PTHREAD_WAITQ_REMOVE(thread);
+ break;
- /* Restore the threads signal mask: */
- pthread->sigmask = mask;
+ case PS_FDR_WAIT:
+ case PS_FDW_WAIT:
+ case PS_POLL_WAIT:
+ case PS_SELECT_WAIT:
+ case PS_SIGSUSPEND:
+ case PS_SIGWAIT:
+ case PS_SLEEP_WAIT:
+ case PS_SPINBLOCK:
+ case PS_WAIT_WAIT:
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) {
+ PTHREAD_WAITQ_REMOVE(thread);
+ if ((psd->psd_flags & PTHREAD_FLAGS_IN_WORKQ) != 0)
+ PTHREAD_WORKQ_REMOVE(thread);
+ }
+ break;
}
}
+
+static void
+thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf)
+{
+ thread->interrupted = psf->saved_state.psd_interrupted;
+ thread->sigmask = psf->saved_state.psd_sigmask;
+ thread->state = psf->saved_state.psd_state;
+ thread->flags = psf->saved_state.psd_flags;
+ thread->wakeup_time = psf->saved_state.psd_wakeup_time;
+ thread->data = psf->saved_state.psd_wait_data;
+}
+
+static void
+thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf)
+{
+ psf->saved_state.psd_interrupted = thread->interrupted;
+ psf->saved_state.psd_sigmask = thread->sigmask;
+ psf->saved_state.psd_state = thread->state;
+ psf->saved_state.psd_flags = thread->flags;
+ thread->flags &= PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE |
+ PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ |
+ PTHREAD_FLAGS_IN_JOINQ;
+ psf->saved_state.psd_wakeup_time = thread->wakeup_time;
+ psf->saved_state.psd_wait_data = thread->data;
+}
+
#endif
diff --git a/lib/libpthread/thread/thr_sigaction.c b/lib/libpthread/thread/thr_sigaction.c
index 319999bf3047..e78f3290154c 100644
--- a/lib/libpthread/thread/thr_sigaction.c
+++ b/lib/libpthread/thread/thr_sigaction.c
@@ -74,12 +74,13 @@ _sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
*/
if (act != NULL && sig != _SCHED_SIGNAL && sig != SIGCHLD &&
sig != SIGINFO) {
- /* Initialise the global signal action structure: */
- gact.sa_mask = act->sa_mask;
- gact.sa_flags = 0;
-
- /* Ensure the scheduling signal is masked: */
- sigaddset(&gact.sa_mask, _SCHED_SIGNAL);
+ /*
+ * Ensure the signal handler cannot be interrupted
+ * by other signals. Always request the POSIX signal
+ * handler arguments.
+ */
+ sigfillset(&gact.sa_mask);
+ gact.sa_flags = SA_SIGINFO;
/*
* Check if the signal handler is being set to
diff --git a/lib/libpthread/thread/thr_sigmask.c b/lib/libpthread/thread/thr_sigmask.c
index b880d9c748c8..bdb0b438ec48 100644
--- a/lib/libpthread/thread/thr_sigmask.c
+++ b/lib/libpthread/thread/thr_sigmask.c
@@ -43,7 +43,8 @@
int
pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
{
- int ret = 0;
+ sigset_t sigset;
+ int ret = 0;
/* Check if the existing signal process mask is to be returned: */
if (oset != NULL) {
@@ -81,10 +82,18 @@ pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
}
/*
- * Dispatch signals to the running thread that are pending
- * and now unblocked:
+ * Check if there are pending signals for the running
+ * thread or process that aren't blocked:
*/
- _dispatch_signals();
+ sigset = _thread_run->sigpend;
+ SIGSETOR(sigset, _process_sigpending);
+ SIGSETNAND(sigset, _thread_run->sigmask);
+ if (SIGNOTEMPTY(sigset))
+ /*
+ * Call the kernel scheduler which will safely
+ * install a signal frame for the running thread:
+ */
+ _thread_kern_sched_sig();
}
/* Return the completion status: */
diff --git a/lib/libpthread/thread/thr_sigprocmask.c b/lib/libpthread/thread/thr_sigprocmask.c
index 6addb4a948e9..f8ca0311c42c 100644
--- a/lib/libpthread/thread/thr_sigprocmask.c
+++ b/lib/libpthread/thread/thr_sigprocmask.c
@@ -41,53 +41,9 @@
#include "pthread_private.h"
int
-_sigprocmask(int how, const sigset_t * set, sigset_t * oset)
+_sigprocmask(int how, const sigset_t *set, sigset_t *oset)
{
- int ret = 0;
-
- /* Check if the existing signal process mask is to be returned: */
- if (oset != NULL) {
- /* Return the current mask: */
- *oset = _thread_run->sigmask;
- }
- /* Check if a new signal set was provided by the caller: */
- if (set != NULL) {
- /* Process according to what to do: */
- switch (how) {
- /* Block signals: */
- case SIG_BLOCK:
- /* Add signals to the existing mask: */
- SIGSETOR(_thread_run->sigmask, *set);
- break;
-
- /* Unblock signals: */
- case SIG_UNBLOCK:
- /* Clear signals from the existing mask: */
- SIGSETNAND(_thread_run->sigmask, *set);
- break;
-
- /* Set the signal process mask: */
- case SIG_SETMASK:
- /* Set the new mask: */
- _thread_run->sigmask = *set;
- break;
-
- /* Trap invalid actions: */
- default:
- /* Return an invalid argument: */
- errno = EINVAL;
- ret = -1;
- break;
- }
-
- /*
- * Dispatch signals to the running thread that are pending
- * and now unblocked:
- */
- _dispatch_signals();
- }
- /* Return the completion status: */
- return (ret);
+ return (pthread_sigmask(how, set, oset));
}
__strong_reference(_sigprocmask, sigprocmask);
diff --git a/lib/libpthread/thread/thr_sigwait.c b/lib/libpthread/thread/thr_sigwait.c
index b12c028740c9..6ba685559263 100644
--- a/lib/libpthread/thread/thr_sigwait.c
+++ b/lib/libpthread/thread/thr_sigwait.c
@@ -40,7 +40,7 @@
#include "pthread_private.h"
int
-sigwait(const sigset_t * set, int *sig)
+sigwait(const sigset_t *set, int *sig)
{
int ret = 0;
int i;
@@ -52,11 +52,9 @@ sigwait(const sigset_t * set, int *sig)
* Specify the thread kernel signal handler.
*/
act.sa_handler = (void (*) ()) _thread_sig_handler;
- act.sa_flags = SA_RESTART;
- act.sa_mask = *set;
-
- /* Ensure the scheduling signal is masked: */
- sigaddset(&act.sa_mask, _SCHED_SIGNAL);
+ act.sa_flags = SA_RESTART | SA_SIGINFO;
+ /* Ensure the signal handler cannot be interrupted by other signals: */
+ sigfillset(&act.sa_mask);
/*
* Initialize the set of signals that will be waited on:
diff --git a/lib/libpthread/thread/thr_write.c b/lib/libpthread/thread/thr_write.c
index 6408a64830c2..5d9ef35f3fbc 100644
--- a/lib/libpthread/thread/thr_write.c
+++ b/lib/libpthread/thread/thr_write.c
@@ -127,7 +127,7 @@ _write(int fd, const void *buf, size_t nbytes)
/* Return the number of bytes written: */
ret = num;
}
- _FD_UNLOCK(fd, FD_RDWR);
+ _FD_UNLOCK(fd, FD_WRITE);
}
return (ret);
}
diff --git a/lib/libpthread/thread/thr_yield.c b/lib/libpthread/thread/thr_yield.c
index 064dd826d34c..7d64283a9910 100644
--- a/lib/libpthread/thread/thr_yield.c
+++ b/lib/libpthread/thread/thr_yield.c
@@ -57,8 +57,5 @@ pthread_yield(void)
/* Schedule the next thread: */
_thread_kern_sched(NULL);
-
- /* Nothing to return. */
- return;
}
#endif