aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/include/atomic.h27
1 files changed, 27 insertions, 0 deletions
diff --git a/sys/amd64/include/atomic.h b/sys/amd64/include/atomic.h
index c737a22e9b1a..be3a20d6a131 100644
--- a/sys/amd64/include/atomic.h
+++ b/sys/amd64/include/atomic.h
@@ -152,6 +152,31 @@ atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src)
return (res);
}
+#if defined(_KERNEL) && !defined(SMP)
+
+/*
+ * We assume that a = b will do atomic loads and stores. However, on a
+ * PentiumPro or higher, reads may pass writes, so for that case we have
+ * to use a serializing instruction (i.e. with LOCK) to do the load in
+ * SMP kernels. For UP kernels, however, the cache of the single processor
+ * is always consistent, so we don't need any memory barriers.
+ */
+#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
+static __inline u_##TYPE \
+atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
+{ \
+ return (*p); \
+} \
+ \
+static __inline void \
+atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
+{ \
+ *p = v; \
+} \
+struct __hack
+
+#else /* defined(SMP) */
+
#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
static __inline u_##TYPE \
atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
@@ -179,6 +204,8 @@ atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
} \
struct __hack
+#endif /* SMP */
+
#endif /* KLD_MODULE || !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */
ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);