aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2020-10-17 08:48:32 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2020-10-17 08:48:32 +0000
commitb31b5e9cfd8967e69bfb63560d21b9588cd14126 (patch)
tree77994c53a9dd4d2bb0f30c24a6d5691befe17052
parentad89066af446052cd7fc970fb70da5db6387cf16 (diff)
downloadsrc-b31b5e9cfd8967e69bfb63560d21b9588cd14126.tar.gz
src-b31b5e9cfd8967e69bfb63560d21b9588cd14126.zip
cache: remove entries before trying to add new ones, not after
Should allow positive entries to replace negative ones in case the cache is full.
Notes
Notes: svn path=/head/; revision=366784
-rw-r--r--sys/kern/vfs_cache.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index ef79d5ca4da0..5f206c5cb64a 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -1923,8 +1923,21 @@ cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
/*
* Avoid blowout in namecache entries.
+ *
+ * Bugs:
+ * 1. filesystems may end up tryng to add an already existing entry
+ * (for example this can happen after a cache miss during concurrent
+ * lookup), in which case we will call cache_negative_zap_one despite
+ * not adding anything.
+ * 2. the routine may fail to free anything and no provisions are made
+ * to make it try harder (see the inside for failure modes)
+ * 3. it only ever looks at negative entries.
*/
lnumcache = atomic_fetchadd_long(&numcache, 1) + 1;
+ if (numneg * ncnegfactor > lnumcache) {
+ cache_negative_zap_one();
+ lnumcache = atomic_load_long(&numcache);
+ }
if (__predict_false(lnumcache >= ncsize)) {
atomic_subtract_long(&numcache, 1);
counter_u64_add(numdrops, 1);
@@ -2087,8 +2100,6 @@ cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
atomic_store_char(&ncp->nc_flag, ncp->nc_flag & ~NCF_WIP);
cache_enter_unlock(&cel);
- if (numneg * ncnegfactor > lnumcache)
- cache_negative_zap_one();
if (ndd != NULL)
cache_free(ndd);
return;