From 85c1b3c1cbb1a7eded29554a75ae8b21fc96c3cd Mon Sep 17 00:00:00 2001 From: Mateusz Guzik Date: Fri, 11 May 2018 06:59:54 +0000 Subject: rmlock: partially depessimize lock/unlock fastpath Previusly the slow path was folded in and partially jumped over in the common case. --- sys/kern/kern_rmlock.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'sys') diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c index a77fa946e4bf..a82646095420 100644 --- a/sys/kern/kern_rmlock.c +++ b/sys/kern/kern_rmlock.c @@ -344,7 +344,7 @@ rm_sysinit(void *arg) rm_init_flags(args->ra_rm, args->ra_desc, args->ra_flags); } -static int +static __noinline int _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) { struct pcpu *pc; @@ -459,15 +459,15 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) * Fast path to combine two common conditions into a single * conditional jump. */ - if (0 == (td->td_owepreempt | - CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus))) + if (__predict_true(0 == (td->td_owepreempt | + CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))) return (1); /* We do not have a read token and need to acquire one. */ return _rm_rlock_hard(rm, tracker, trylock); } -static void +static __noinline void _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker) { @@ -518,7 +518,7 @@ _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker) if (rm->lock_object.lo_flags & LO_SLEEPABLE) THREAD_SLEEPING_OK(); - if (0 == (td->td_owepreempt | tracker->rmp_flags)) + if (__predict_true(0 == (td->td_owepreempt | tracker->rmp_flags))) return; _rm_unlock_hard(td, tracker); -- cgit v1.2.3