forked from uACPI/uACPI
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathatomic.h
129 lines (102 loc) · 5.99 KB
/
atomic.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
#pragma once
/*
* Most of this header is a giant workaround for MSVC to make atomics into a
* somewhat unified interface with how GCC and Clang handle them.
*
* We don't use the absolutely disgusting C11 stdatomic.h header because it is
* unable to operate on non _Atomic types, which enforce implicit sequential
* consistency and alter the behavior of the standard C binary/unary operators.
*
* The strictness of the atomic helpers defined here is assumed to be at least
* acquire for loads and release for stores. Cmpxchg uses the standard acq/rel
* for success, acq for failure, and is assumed to be strong.
*/
#ifdef UACPI_OVERRIDE_ATOMIC
#include "uacpi_atomic.h"
#else
#include <uacpi/platform/compiler.h>
#if defined(_MSC_VER) && !defined(__clang__)
#include <intrin.h>
// mimic __atomic_compare_exchange_n that doesn't exist on MSVC
#define UACPI_MAKE_MSVC_CMPXCHG(width, type, suffix) \
static inline int uacpi_do_atomic_cmpxchg##width( \
type volatile *ptr, type volatile *expected, type desired \
) \
{ \
type current; \
\
current = _InterlockedCompareExchange##suffix(ptr, *expected, desired); \
if (current != *expected) { \
*expected = current; \
return 0; \
} \
return 1; \
}
#define UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, width, type) \
uacpi_do_atomic_cmpxchg##width( \
(type volatile*)ptr, (type volatile*)expected, desired \
)
#define UACPI_MSVC_ATOMIC_STORE(ptr, value, type, width) \
_InterlockedExchange##width((type volatile*)(ptr), (type)(value))
#define UACPI_MSVC_ATOMIC_LOAD(ptr, type, width) \
_InterlockedOr##width((type volatile*)(ptr), 0)
#define UACPI_MSVC_ATOMIC_INC(ptr, type, width) \
_InterlockedIncrement##width((type volatile*)(ptr))
#define UACPI_MSVC_ATOMIC_DEC(ptr, type, width) \
_InterlockedDecrement##width((type volatile*)(ptr))
UACPI_MAKE_MSVC_CMPXCHG(64, __int64, 64)
UACPI_MAKE_MSVC_CMPXCHG(32, long,)
UACPI_MAKE_MSVC_CMPXCHG(16, short, 16)
#define uacpi_atomic_cmpxchg16(ptr, expected, desired) \
UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, 16, short)
#define uacpi_atomic_cmpxchg32(ptr, expected, desired) \
UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, 32, long)
#define uacpi_atomic_cmpxchg64(ptr, expected, desired) \
UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, 64, __int64)
#define uacpi_atomic_load8(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, char, 8)
#define uacpi_atomic_load16(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, short, 16)
#define uacpi_atomic_load32(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, long,)
#define uacpi_atomic_load64(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, __int64, 64)
#define uacpi_atomic_store8(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, char, 8)
#define uacpi_atomic_store16(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, short, 16)
#define uacpi_atomic_store32(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, long,)
#define uacpi_atomic_store64(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, __int64, 64)
#define uacpi_atomic_inc16(ptr) UACPI_MSVC_ATOMIC_INC(ptr, short, 16)
#define uacpi_atomic_inc32(ptr) UACPI_MSVC_ATOMIC_INC(ptr, long,)
#define uacpi_atomic_inc64(ptr) UACPI_MSVC_ATOMIC_INC(ptr, __int64, 64)
#define uacpi_atomic_dec16(ptr) UACPI_MSVC_ATOMIC_DEC(ptr, short, 16)
#define uacpi_atomic_dec32(ptr) UACPI_MSVC_ATOMIC_DEC(ptr, long,)
#define uacpi_atomic_dec64(ptr) UACPI_MSVC_ATOMIC_DEC(ptr, __int64, 64)
#else
#define UACPI_DO_CMPXCHG(ptr, expected, desired) \
__atomic_compare_exchange_n(ptr, expected, desired, 0, \
__ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
#define uacpi_atomic_cmpxchg16(ptr, expected, desired) \
UACPI_DO_CMPXCHG(ptr, expected, desired)
#define uacpi_atomic_cmpxchg32(ptr, expected, desired) \
UACPI_DO_CMPXCHG(ptr, expected, desired)
#define uacpi_atomic_cmpxchg64(ptr, expected, desired) \
UACPI_DO_CMPXCHG(ptr, expected, desired)
#define uacpi_atomic_load8(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE)
#define uacpi_atomic_load16(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE)
#define uacpi_atomic_load32(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE)
#define uacpi_atomic_load64(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE)
#define uacpi_atomic_store8(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE)
#define uacpi_atomic_store16(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE)
#define uacpi_atomic_store32(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE)
#define uacpi_atomic_store64(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE)
#define uacpi_atomic_inc16(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_ACQ_REL)
#define uacpi_atomic_inc32(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_ACQ_REL)
#define uacpi_atomic_inc64(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_ACQ_REL)
#define uacpi_atomic_dec16(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_ACQ_REL)
#define uacpi_atomic_dec32(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_ACQ_REL)
#define uacpi_atomic_dec64(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_ACQ_REL)
#endif
#if UACPI_POINTER_SIZE == 4
#define uacpi_atomic_load_ptr(ptr_to_ptr) uacpi_atomic_load32(ptr_to_ptr)
#define uacpi_atomic_store_ptr(ptr_to_ptr, value) uacpi_atomic_store32(ptr_to_ptr, value)
#else
#define uacpi_atomic_load_ptr(ptr_to_ptr) uacpi_atomic_load64(ptr_to_ptr)
#define uacpi_atomic_store_ptr(ptr_to_ptr, value) uacpi_atomic_store64(ptr_to_ptr, value)
#endif
#endif