diff options
author | Gleb Smirnoff <glebius@FreeBSD.org> | 2016-12-09 17:58:34 +0000 |
---|---|---|
committer | Gleb Smirnoff <glebius@FreeBSD.org> | 2016-12-09 17:58:34 +0000 |
commit | 169170209c38774c8755f21b2f40a1dd86945a71 (patch) | |
tree | d7250c2acb3b4158b1f49c6accb98c0e59e1b559 /sys/kern/subr_counter.c | |
parent | ebecdad811cd77b49725d13ea6734296d7e79238 (diff) | |
download | src-169170209c38774c8755f21b2f40a1dd86945a71.tar.gz src-169170209c38774c8755f21b2f40a1dd86945a71.zip |
Provide counter_ratecheck(), a MP-friendly substitution to ppsratecheck().
When rated event happens at a very quick rate, the ppsratecheck() is not
only racy, but also becomes a performance bottleneck.
Together with: rrs, jtl
Notes
Notes:
svn path=/head/; revision=309745
Diffstat (limited to 'sys/kern/subr_counter.c')
-rw-r--r-- | sys/kern/subr_counter.c | 54 |
1 files changed, 54 insertions, 0 deletions
diff --git a/sys/kern/subr_counter.c b/sys/kern/subr_counter.c index 5149f2d2a37d..f27a46ab94fd 100644 --- a/sys/kern/subr_counter.c +++ b/sys/kern/subr_counter.c @@ -119,3 +119,57 @@ sysctl_handle_counter_u64_array(SYSCTL_HANDLER_ARGS) return (0); } + +/* + * MP-friendly version of ppsratecheck(). + * + * Returns non-negative if we are in the rate, negative otherwise. + * 0 - rate limit not reached. + * -1 - rate limit reached. + * >0 - rate limit was reached before, and was just reset. The return value + * is number of events since last reset. + */ +int64_t +counter_ratecheck(struct counter_rate *cr, int64_t limit) +{ + int64_t val; + int now; + + val = cr->cr_over; + now = ticks; + + if (now - cr->cr_ticks >= hz) { + /* + * Time to clear the structure, we are in the next second. + * First try unlocked read, and then proceed with atomic. + */ + if ((cr->cr_lock == 0) && + atomic_cmpset_int(&cr->cr_lock, 0, 1)) { + /* + * Check if other thread has just went through the + * reset sequence before us. + */ + if (now - cr->cr_ticks >= hz) { + val = counter_u64_fetch(cr->cr_rate); + counter_u64_zero(cr->cr_rate); + cr->cr_over = 0; + cr->cr_ticks = now; + } + atomic_store_rel_int(&cr->cr_lock, 0); + } else + /* + * We failed to lock, in this case other thread may + * be running counter_u64_zero(), so it is not safe + * to do an update, we skip it. + */ + return (val); + } + + counter_u64_add(cr->cr_rate, 1); + if (cr->cr_over != 0) + return (-1); + if (counter_u64_fetch(cr->cr_rate) > limit) + val = cr->cr_over = -1; + + return (val); +} |