diff options
author | Bosko Milekic <bmilekic@FreeBSD.org> | 2004-06-10 00:04:27 +0000 |
---|---|---|
committer | Bosko Milekic <bmilekic@FreeBSD.org> | 2004-06-10 00:04:27 +0000 |
commit | b5b2ea9a462adca42acbf0e8edb785af1f827b45 (patch) | |
tree | cf0c4150052fc3fb0da94db193b0300ccbc69e0b /sys/kern/uipc_mbuf.c | |
parent | c240bd8cf8b35701de48ebaaa18e74c6c907c23e (diff) | |
download | src-b5b2ea9a462adca42acbf0e8edb785af1f827b45.tar.gz src-b5b2ea9a462adca42acbf0e8edb785af1f827b45.zip |
Plug a race where upon free this scenario could occur:
(time grows downward)
thread 1 thread 2
------------|------------
dec ref_cnt |
| dec ref_cnt <-- ref_cnt now zero
cmpset |
free all |
return |
|
alloc again,|
reuse prev |
ref_cnt |
| cmpset, read
| already freed
| ref_cnt
------------|------------
This should fix that by performing only a single
atomic test-and-set that will serve to decrement
the ref_cnt, only if it hasn't changed since the
earlier read, otherwise it'll loop and re-read.
This forces ordering of decrements so that truly
the thread which did the LAST decrement is the
one that frees.
This is how atomic-instruction-based refcnting
should probably be handled.
Submitted by: Julian Elischer
Notes
Notes:
svn path=/head/; revision=130289
Diffstat (limited to 'sys/kern/uipc_mbuf.c')
-rw-r--r-- | sys/kern/uipc_mbuf.c | 45 |
1 files changed, 30 insertions, 15 deletions
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c index 9fb5c4aa0394..2c8af1ff5dc7 100644 --- a/sys/kern/uipc_mbuf.c +++ b/sys/kern/uipc_mbuf.c @@ -221,23 +221,38 @@ m_extadd(struct mbuf *mb, caddr_t buf, u_int size, void mb_free_ext(struct mbuf *m) { + u_int cnt; - MEXT_REM_REF(m); - if (atomic_cmpset_int(m->m_ext.ref_cnt, 0, 1)) { - if (m->m_ext.ext_type == EXT_PACKET) { - uma_zfree(zone_pack, m); - return; - } else if (m->m_ext.ext_type == EXT_CLUSTER) { - uma_zfree(zone_clust, m->m_ext.ext_buf); - m->m_ext.ext_buf = NULL; - } else { - (*(m->m_ext.ext_free))(m->m_ext.ext_buf, - m->m_ext.ext_args); - if (m->m_ext.ext_type != EXT_EXTREF) - free(m->m_ext.ref_cnt, M_MBUF); + /* + * This is tricky. We need to make sure to decrement the + * refcount in a safe way but to also clean up if we're the + * last reference. This method seems to do it without race. + */ + do { + cnt = *(m->m_ext.ref_cnt); + if (atomic_cmpset_int(m->m_ext.ref_cnt, cnt, cnt - 1)) { + if (cnt == 1) { + /* + * Do the free, should be safe. + */ + if (m->m_ext.ext_type == EXT_PACKET) { + uma_zfree(zone_pack, m); + break; + } else if (m->m_ext.ext_type == EXT_CLUSTER) { + uma_zfree(zone_clust, m->m_ext.ext_buf); + m->m_ext.ext_buf = NULL; + } else { + (*(m->m_ext.ext_free))(m->m_ext.ext_buf, + m->m_ext.ext_args); + if (m->m_ext.ext_type != EXT_EXTREF) + free(m->m_ext.ref_cnt, M_MBUF); + } + uma_zfree(zone_mbuf, m); + } + /* Decrement (and potentially free) done, safely. */ + break; } - } - uma_zfree(zone_mbuf, m); + } while (1); } /* |