aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMohan Srinivasan <mohans@FreeBSD.org>2007-01-25 01:05:23 +0000
committerMohan Srinivasan <mohans@FreeBSD.org>2007-01-25 01:05:23 +0000
commit6c125b8df6750dba0aa916ee7779fe5b20bf63b8 (patch)
tree29d5e3c67fe7103c5fe9363f82111c4d5203c0f7 /sys
parentfd7b77628d10ab8e30adb344626665cd6ff61155 (diff)
downloadsrc-6c125b8df6750dba0aa916ee7779fe5b20bf63b8.tar.gz
src-6c125b8df6750dba0aa916ee7779fe5b20bf63b8.zip
Fix for problems that occur when all mbuf clusters migrate to the mbuf packet
zone. Cluster allocations fail when this happens. Also processes that may have blocked on cluster allocations will never be woken up. Thanks to rwatson for an overview of the issue and pointers to the mbuma paper and his tool to dump out UMA zones. Reviewed by: andre@
Notes
Notes: svn path=/head/; revision=166213
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_mbuf.c8
-rw-r--r--sys/sys/mbuf.h8
-rw-r--r--sys/vm/uma.h3
-rw-r--r--sys/vm/uma_core.c9
4 files changed, 26 insertions, 2 deletions
diff --git a/sys/kern/kern_mbuf.c b/sys/kern/kern_mbuf.c
index 0f999247aa1e..39349654b503 100644
--- a/sys/kern/kern_mbuf.c
+++ b/sys/kern/kern_mbuf.c
@@ -372,6 +372,14 @@ mb_dtor_pack(void *mem, int size, void *arg)
#ifdef INVARIANTS
trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg);
#endif
+ /*
+ * If there are processes blocked on zone_clust, waiting for pages to be freed up,
+ * cause them to be woken up by draining the packet zone. We are exposed to a race here
+ * (in the check for the UMA_ZFLAG_FULL) where we might miss the flag set, but that is
+ * deliberate. We don't want to acquire the zone lock for every mbuf free.
+ */
+ if (uma_zone_exhausted_nolock(zone_clust))
+ zone_drain(zone_pack);
}
/*
diff --git a/sys/sys/mbuf.h b/sys/sys/mbuf.h
index 13e7e7f9ff95..bfb82be44c33 100644
--- a/sys/sys/mbuf.h
+++ b/sys/sys/mbuf.h
@@ -457,6 +457,14 @@ m_clget(struct mbuf *m, int how)
printf("%s: %p mbuf already has cluster\n", __func__, m);
m->m_ext.ext_buf = (char *)NULL;
uma_zalloc_arg(zone_clust, m, how);
+ /*
+ * On a cluster allocation failure, drain the packet zone and retry,
+ * we might be able to loosen a few clusters up on the drain.
+ */
+ if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) {
+ zone_drain(zone_pack);
+ uma_zalloc_arg(zone_clust, m, how);
+ }
}
/*
diff --git a/sys/vm/uma.h b/sys/vm/uma.h
index 08a55d675e88..002717723369 100644
--- a/sys/vm/uma.h
+++ b/sys/vm/uma.h
@@ -48,6 +48,8 @@ struct uma_zone;
/* Opaque type used as a handle to the zone */
typedef struct uma_zone * uma_zone_t;
+void zone_drain(uma_zone_t);
+
/*
* Item constructor
*
@@ -518,6 +520,7 @@ u_int32_t *uma_find_refcnt(uma_zone_t zone, void *item);
* Non-zero if zone is exhausted.
*/
int uma_zone_exhausted(uma_zone_t zone);
+int uma_zone_exhausted_nolock(uma_zone_t zone);
/*
* Exported statistics structures to be used by user space monitoring tools.
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 62e7aff5af6c..fa2cb05f7e3b 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -238,7 +238,6 @@ static void bucket_zone_drain(void);
static int uma_zalloc_bucket(uma_zone_t zone, int flags);
static uma_slab_t uma_zone_slab(uma_zone_t zone, int flags);
static void *uma_slab_alloc(uma_zone_t zone, uma_slab_t slab);
-static void zone_drain(uma_zone_t);
static uma_zone_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
uma_fini fini, int align, u_int32_t flags);
@@ -680,7 +679,7 @@ bucket_cache_drain(uma_zone_t zone)
* Returns:
* Nothing.
*/
-static void
+void
zone_drain(uma_zone_t zone)
{
struct slabhead freeslabs = { 0 };
@@ -2679,6 +2678,12 @@ uma_zone_exhausted(uma_zone_t zone)
return (full);
}
+int
+uma_zone_exhausted_nolock(uma_zone_t zone)
+{
+ return (zone->uz_keg->uk_flags & UMA_ZFLAG_FULL);
+}
+
void *
uma_large_malloc(int size, int wait)
{