aboutsummaryrefslogtreecommitdiff
path: root/sys/net/if_llatbl.h
diff options
context:
space:
mode:
authorRobert Watson <rwatson@FreeBSD.org>2009-08-25 09:52:38 +0000
committerRobert Watson <rwatson@FreeBSD.org>2009-08-25 09:52:38 +0000
commitdc56e98f0d95e809a1044d38f86dd9e734857fe0 (patch)
tree4a34d8a5aab4678ff905e0a94bd345c9b1c8d805 /sys/net/if_llatbl.h
parent18159f6a492148603a5338751a65286ef724dec2 (diff)
downloadsrc-dc56e98f0d95e809a1044d38f86dd9e734857fe0.tar.gz
src-dc56e98f0d95e809a1044d38f86dd9e734857fe0.zip
Use locks specific to the lltable code, rather than borrow the ifnet
list/index locks, to protect link layer address tables. This avoids lock order issues during interface teardown, but maintains the bug that sysctl copy routines may be called while a non-sleepable lock is held. Reviewed by: bz, kmacy MFC after: 3 days
Notes
Notes: svn path=/head/; revision=196535
Diffstat (limited to 'sys/net/if_llatbl.h')
-rw-r--r--sys/net/if_llatbl.h7
1 files changed, 7 insertions, 0 deletions
diff --git a/sys/net/if_llatbl.h b/sys/net/if_llatbl.h
index 4d721efc35f7..f54c78ad8a28 100644
--- a/sys/net/if_llatbl.h
+++ b/sys/net/if_llatbl.h
@@ -41,6 +41,13 @@ struct rt_addrinfo;
struct llentry;
LIST_HEAD(llentries, llentry);
+extern struct rwlock lltable_rwlock;
+#define LLTABLE_RLOCK() rw_rlock(&lltable_rwlock)
+#define LLTABLE_RUNLOCK() rw_runlock(&lltable_rwlock)
+#define LLTABLE_WLOCK() rw_wlock(&lltable_rwlock)
+#define LLTABLE_WUNLOCK() rw_wunlock(&lltable_rwlock)
+#define LLTABLE_LOCK_ASSERT() rw_assert(&lltable_rwlock, RA_LOCKED)
+
/*
* Code referencing llentry must at least hold
* a shared lock