aboutsummaryrefslogtreecommitdiff
path: root/sys/arm64/include/atomic.h
diff options
context:
space:
mode:
authorAndrew Turner <andrew@FreeBSD.org>2019-11-07 17:34:44 +0000
committerAndrew Turner <andrew@FreeBSD.org>2019-11-07 17:34:44 +0000
commit4ffa494e4ff30e63accf01a4f333941b59aad65a (patch)
tree86585cba0dc895e49f4275a2a8b44d5fabb6a7d5 /sys/arm64/include/atomic.h
parent542c56ea9a11533f1631961cb14f524fc725434e (diff)
downloadsrc-4ffa494e4ff30e63accf01a4f333941b59aad65a.tar.gz
src-4ffa494e4ff30e63accf01a4f333941b59aad65a.zip
Add more 8 and 16 bit variants of the the atomic(9) functions on arm64.
These are direct copies of the 32 bit functions, adjusted ad needed. While here fix atomic_fcmpset_16 to use the valid load and store exclusive instructions. Sponsored by: DARPA, AFRL
Notes
Notes: svn path=/head/; revision=354452
Diffstat (limited to 'sys/arm64/include/atomic.h')
-rw-r--r--sys/arm64/include/atomic.h84
1 files changed, 82 insertions, 2 deletions
diff --git a/sys/arm64/include/atomic.h b/sys/arm64/include/atomic.h
index 098d7e70f7d6..27f03359da34 100644
--- a/sys/arm64/include/atomic.h
+++ b/sys/arm64/include/atomic.h
@@ -57,6 +57,40 @@
#define ATOMIC_OP(op, asm_op, bar, a, l) \
static __inline void \
+atomic_##op##_##bar##8(volatile uint8_t *p, uint8_t val) \
+{ \
+ uint8_t tmp; \
+ int res; \
+ \
+ __asm __volatile( \
+ "1: ld"#a"xrb %w0, [%2] \n" \
+ " "#asm_op" %w0, %w0, %w3 \n" \
+ " st"#l"xrb %w1, %w0, [%2] \n" \
+ " cbnz %w1, 1b \n" \
+ : "=&r"(tmp), "=&r"(res) \
+ : "r" (p), "r" (val) \
+ : "memory" \
+ ); \
+} \
+ \
+static __inline void \
+atomic_##op##_##bar##16(volatile uint16_t *p, uint16_t val) \
+{ \
+ uint16_t tmp; \
+ int res; \
+ \
+ __asm __volatile( \
+ "1: ld"#a"xrh %w0, [%2] \n" \
+ " "#asm_op" %w0, %w0, %w3 \n" \
+ " st"#l"xrh %w1, %w0, [%2] \n" \
+ " cbnz %w1, 1b \n" \
+ : "=&r"(tmp), "=&r"(res) \
+ : "r" (p), "r" (val) \
+ : "memory" \
+ ); \
+} \
+ \
+static __inline void \
atomic_##op##_##bar##32(volatile uint32_t *p, uint32_t val) \
{ \
uint32_t tmp; \
@@ -135,10 +169,10 @@ atomic_fcmpset_##bar##16(volatile uint16_t *p, uint16_t *cmpval, \
\
__asm __volatile( \
"1: mov %w1, #1 \n" \
- " ld"#a"xh %w0, [%2] \n" \
+ " ld"#a"xrh %w0, [%2] \n" \
" cmp %w0, %w3 \n" \
" b.ne 2f \n" \
- " st"#l"xh %w1, %w4, [%2] \n" \
+ " st"#l"xrh %w1, %w4, [%2] \n" \
"2:" \
: "=&r"(tmp), "=&r"(res) \
: "r" (p), "r" (_cmpval), "r" (newval) \
@@ -205,6 +239,52 @@ ATOMIC_FCMPSET(rel_, ,l)
#define ATOMIC_CMPSET(bar, a, l) \
static __inline int \
+atomic_cmpset_##bar##8(volatile uint8_t *p, uint8_t cmpval, \
+ uint8_t newval) \
+{ \
+ uint8_t tmp; \
+ int res; \
+ \
+ __asm __volatile( \
+ "1: mov %w1, #1 \n" \
+ " ld"#a"xrb %w0, [%2] \n" \
+ " cmp %w0, %w3 \n" \
+ " b.ne 2f \n" \
+ " st"#l"xrb %w1, %w4, [%2] \n" \
+ " cbnz %w1, 1b \n" \
+ "2:" \
+ : "=&r"(tmp), "=&r"(res) \
+ : "r" (p), "r" (cmpval), "r" (newval) \
+ : "cc", "memory" \
+ ); \
+ \
+ return (!res); \
+} \
+ \
+static __inline int \
+atomic_cmpset_##bar##16(volatile uint16_t *p, uint16_t cmpval, \
+ uint16_t newval) \
+{ \
+ uint16_t tmp; \
+ int res; \
+ \
+ __asm __volatile( \
+ "1: mov %w1, #1 \n" \
+ " ld"#a"xrh %w0, [%2] \n" \
+ " cmp %w0, %w3 \n" \
+ " b.ne 2f \n" \
+ " st"#l"xrh %w1, %w4, [%2] \n" \
+ " cbnz %w1, 1b \n" \
+ "2:" \
+ : "=&r"(tmp), "=&r"(res) \
+ : "r" (p), "r" (cmpval), "r" (newval) \
+ : "cc", "memory" \
+ ); \
+ \
+ return (!res); \
+} \
+ \
+static __inline int \
atomic_cmpset_##bar##32(volatile uint32_t *p, uint32_t cmpval, \
uint32_t newval) \
{ \