1
+ // Based on: https://github.com/ziglang/zig/blob/79460d4a3eef8eb927b02a7eda8bc9999a766672/lib/compiler_rt/atomics.zig
2
+ // and: https://github.com/raspberrypi/pico-sdk/blob/ee68c78d0afae2b69c03ae1a72bf5cc267a2d94c/src/rp2_common/pico_atomic/atomic.c
3
+
4
+ //! Atomic operations for RP2040
5
+ //!
6
+ //! These functions should not be called directly. Instead, use the Zig atomic
7
+ //! builtins or `std.atomic.Value`.
8
+
1
9
const std = @import ("std" );
2
10
const builtin = @import ("builtin" );
3
11
const microzig = @import ("microzig" );
@@ -15,60 +23,193 @@ inline fn atomic_unlock(critical_section: CriticalSection) void {
15
23
atomic_spinlock .unlock_irq (critical_section );
16
24
}
17
25
26
+ fn atomic_store (comptime T : type , ptr : * volatile T , val : T , _ : i32 ) void {
27
+ const save = atomic_lock ();
28
+ defer atomic_unlock (save );
29
+ ptr .* = val ;
30
+ }
31
+
18
32
fn atomic_load (comptime T : type , ptr : * volatile T , _ : i32 ) T {
19
33
const save = atomic_lock ();
20
34
defer atomic_unlock (save );
21
35
const val = ptr .* ;
22
36
return val ;
23
37
}
24
38
25
- fn atomic_rmw_and (comptime T : type , ptr : * volatile T , val : T , _ : i32 ) T {
39
+ fn atomic_rmw (comptime T : type , ptr : * volatile T , val : T , _ : i32 , comptime op : std.builtin.AtomicRmwOp ) T {
26
40
const save = atomic_lock ();
27
41
defer atomic_unlock (save );
28
42
const tmp = ptr .* ;
29
- ptr .* = tmp & val ;
43
+
44
+ switch (op ) {
45
+ .Xchg = > ptr .* = val ,
46
+ .Add = > ptr .* += val ,
47
+ .Sub = > ptr .* -= val ,
48
+ .And = > ptr .* &= val ,
49
+ .Or = > ptr .* |= val ,
50
+ .Xor = > ptr .* ^= val ,
51
+ .Nand = > ptr .* = ~ (ptr .* & val ),
52
+ .Max = > ptr .* = @max (ptr .* , val ),
53
+ .Min = > ptr .* = @min (ptr .* , val ),
54
+ }
55
+
30
56
return tmp ;
31
57
}
32
58
33
- fn atomic_rmw_or (comptime T : type , ptr : * volatile T , val : T , _ : i32 ) T {
59
+ fn atomic_compare_exchange (comptime T : type , ptr : * volatile T , expected : * T , desired : T , _ : i32 , _ : i32 ) bool {
34
60
const save = atomic_lock ();
35
61
defer atomic_unlock (save );
36
- const tmp = ptr .* ;
37
- ptr .* = tmp | val ;
38
- return tmp ;
62
+ const old_value = ptr .* ;
63
+
64
+ if (old_value == expected .* ) {
65
+ ptr .* = desired ;
66
+ return true ;
67
+ } else {
68
+ expected .* = old_value ;
69
+ return false ;
70
+ }
39
71
}
40
72
41
- fn __atomic_fetch_and_4 (ptr : * u32 , val : u32 , model : i32 ) callconv (.c ) u32 {
42
- return atomic_rmw_and ( u32 , ptr , val , model );
73
+ export fn __atomic_store_1 (ptr : * u8 , val : u8 , model : i32 ) callconv (.c ) void {
74
+ atomic_store ( u8 , ptr , val , model );
43
75
}
44
76
45
- fn __atomic_fetch_or_4 (ptr : * u32 , val : u32 , model : i32 ) callconv (.c ) u32 {
46
- return atomic_rmw_or ( u32 , ptr , val , model );
77
+ export fn __atomic_store_2 (ptr : * u16 , val : u16 , model : i32 ) callconv (.c ) void {
78
+ atomic_store ( u16 , ptr , val , model );
47
79
}
48
80
49
- fn __atomic_load_4 ( src : * u32 , model : i32 ) callconv (.c ) u32 {
50
- return atomic_load (u32 , src , model );
81
+ export fn __atomic_store_4 ( ptr : * u32 , val : u32 , model : i32 ) callconv (.c ) void {
82
+ atomic_store (u32 , ptr , val , model );
51
83
}
52
84
53
- const linkage : std.builtin.GlobalLinkage = if (builtin .is_test )
54
- .internal
55
- else if (builtin .object_format == .c )
56
- .strong
57
- else
58
- .weak ;
85
+ export fn __atomic_load_1 (ptr : * u8 , model : i32 ) callconv (.c ) u8 {
86
+ return atomic_load (u8 , ptr , model );
87
+ }
59
88
60
- const visibility : std.builtin.SymbolVisibility = .default ;
89
+ export fn __atomic_load_2 (ptr : * u16 , model : i32 ) callconv (.c ) u16 {
90
+ return atomic_load (u16 , ptr , model );
91
+ }
61
92
62
- // Based on: https://github.com/ziglang/zig/blob/79460d4a3eef8eb927b02a7eda8bc9999a766672/lib/compiler_rt/atomics.zig
63
- // and: https://github.com/raspberrypi/pico-sdk/blob/ee68c78d0afae2b69c03ae1a72bf5cc267a2d94c/src/rp2_common/pico_atomic/atomic.c
64
- // TODO: export all the rest atomics
93
+ export fn __atomic_load_4 ( ptr : * u32 , model : i32 ) callconv ( .c ) u32 {
94
+ return atomic_load ( u32 , ptr , model );
95
+ }
65
96
66
- comptime {
67
- // This export should only happen for the RP2040 due to the ARMv6-M ISA used by the ARM Cortex-M0+.
68
- if (chip == .RP2040 ) {
69
- @export (& __atomic_fetch_and_4 , .{ .name = "__atomic_fetch_and_4" , .linkage = linkage , .visibility = visibility });
70
- @export (& __atomic_fetch_or_4 , .{ .name = "__atomic_fetch_or_4" , .linkage = linkage , .visibility = visibility });
97
+ export fn __atomic_exchange_1 (ptr : * u8 , val : u8 , model : i32 ) callconv (.c ) u8 {
98
+ return atomic_rmw (u8 , ptr , val , model , .Xchg );
99
+ }
71
100
72
- @export (& __atomic_load_4 , .{ .name = "__atomic_load_4" , .linkage = linkage , .visibility = visibility });
73
- }
101
+ export fn __atomic_exchange_2 (ptr : * u16 , val : u16 , model : i32 ) callconv (.c ) u16 {
102
+ return atomic_rmw (u16 , ptr , val , model , .Xchg );
103
+ }
104
+
105
+ export fn __atomic_exchange_4 (ptr : * u32 , val : u32 , model : i32 ) callconv (.c ) u32 {
106
+ return atomic_rmw (u32 , ptr , val , model , .Xchg );
107
+ }
108
+
109
+ export fn __atomic_fetch_add_1 (ptr : * u8 , val : u8 , model : i32 ) callconv (.c ) u8 {
110
+ return atomic_rmw (u8 , ptr , val , model , .Add );
111
+ }
112
+
113
+ export fn __atomic_fetch_add_2 (ptr : * u16 , val : u16 , model : i32 ) callconv (.c ) u16 {
114
+ return atomic_rmw (u16 , ptr , val , model , .Add );
115
+ }
116
+
117
+ export fn __atomic_fetch_add_4 (ptr : * u32 , val : u32 , model : i32 ) callconv (.c ) u32 {
118
+ return atomic_rmw (u32 , ptr , val , model , .Add );
119
+ }
120
+
121
+ export fn __atomic_fetch_sub_1 (ptr : * u8 , val : u8 , model : i32 ) callconv (.c ) u8 {
122
+ return atomic_rmw (u8 , ptr , val , model , .Sub );
123
+ }
124
+
125
+ export fn __atomic_fetch_sub_2 (ptr : * u16 , val : u16 , model : i32 ) callconv (.c ) u16 {
126
+ return atomic_rmw (u16 , ptr , val , model , .Sub );
127
+ }
128
+
129
+ export fn __atomic_fetch_sub_4 (ptr : * u32 , val : u32 , model : i32 ) callconv (.c ) u32 {
130
+ return atomic_rmw (u32 , ptr , val , model , .Sub );
131
+ }
132
+
133
+ export fn __atomic_fetch_and_1 (ptr : * u8 , val : u8 , model : i32 ) callconv (.c ) u8 {
134
+ return atomic_rmw (u8 , ptr , val , model , .And );
135
+ }
136
+
137
+ export fn __atomic_fetch_and_2 (ptr : * u16 , val : u16 , model : i32 ) callconv (.c ) u16 {
138
+ return atomic_rmw (u16 , ptr , val , model , .And );
139
+ }
140
+
141
+ export fn __atomic_fetch_and_4 (ptr : * u32 , val : u32 , model : i32 ) callconv (.c ) u32 {
142
+ return atomic_rmw (u32 , ptr , val , model , .And );
143
+ }
144
+
145
+ export fn __atomic_fetch_or_1 (ptr : * u8 , val : u8 , model : i32 ) callconv (.c ) u8 {
146
+ return atomic_rmw (u8 , ptr , val , model , .Or );
147
+ }
148
+
149
+ export fn __atomic_fetch_or_2 (ptr : * u16 , val : u16 , model : i32 ) callconv (.c ) u16 {
150
+ return atomic_rmw (u16 , ptr , val , model , .Or );
151
+ }
152
+
153
+ export fn __atomic_fetch_or_4 (ptr : * u32 , val : u32 , model : i32 ) callconv (.c ) u32 {
154
+ return atomic_rmw (u32 , ptr , val , model , .Or );
155
+ }
156
+
157
+ export fn __atomic_fetch_xor_1 (ptr : * u8 , val : u8 , model : i32 ) callconv (.c ) u8 {
158
+ return atomic_rmw (u8 , ptr , val , model , .Xor );
159
+ }
160
+
161
+ export fn __atomic_fetch_xor_2 (ptr : * u16 , val : u16 , model : i32 ) callconv (.c ) u16 {
162
+ return atomic_rmw (u16 , ptr , val , model , .Xor );
163
+ }
164
+
165
+ export fn __atomic_fetch_xor_4 (ptr : * u32 , val : u32 , model : i32 ) callconv (.c ) u32 {
166
+ return atomic_rmw (u32 , ptr , val , model , .Xor );
167
+ }
168
+
169
+ export fn __atomic_fetch_nand_1 (ptr : * u8 , val : u8 , model : i32 ) callconv (.c ) u8 {
170
+ return atomic_rmw (u8 , ptr , val , model , .Nand );
171
+ }
172
+
173
+ export fn __atomic_fetch_nand_2 (ptr : * u16 , val : u16 , model : i32 ) callconv (.c ) u16 {
174
+ return atomic_rmw (u16 , ptr , val , model , .Nand );
175
+ }
176
+
177
+ export fn __atomic_fetch_nand_4 (ptr : * u32 , val : u32 , model : i32 ) callconv (.c ) u32 {
178
+ return atomic_rmw (u32 , ptr , val , model , .Nand );
179
+ }
180
+
181
+ export fn __atomic_fetch_max_1 (ptr : * u8 , val : u8 , model : i32 ) callconv (.c ) u8 {
182
+ return atomic_rmw (u8 , ptr , val , model , .Max );
183
+ }
184
+
185
+ export fn __atomic_fetch_max_2 (ptr : * u16 , val : u16 , model : i32 ) callconv (.c ) u16 {
186
+ return atomic_rmw (u16 , ptr , val , model , .Max );
187
+ }
188
+
189
+ export fn __atomic_fetch_max_4 (ptr : * u32 , val : u32 , model : i32 ) callconv (.c ) u32 {
190
+ return atomic_rmw (u32 , ptr , val , model , .Max );
191
+ }
192
+
193
+ export fn __atomic_fetch_min_1 (ptr : * u8 , val : u8 , model : i32 ) callconv (.c ) u8 {
194
+ return atomic_rmw (u8 , ptr , val , model , .Min );
195
+ }
196
+
197
+ export fn __atomic_fetch_min_2 (ptr : * u16 , val : u16 , model : i32 ) callconv (.c ) u16 {
198
+ return atomic_rmw (u16 , ptr , val , model , .Min );
199
+ }
200
+
201
+ export fn __atomic_fetch_min_4 (ptr : * u32 , val : u32 , model : i32 ) callconv (.c ) u32 {
202
+ return atomic_rmw (u32 , ptr , val , model , .Min );
203
+ }
204
+
205
+ export fn __atomic_compare_exchange_1 (ptr : * u8 , expected : * u8 , desired : u8 , success : i32 , failure : i32 ) callconv (.c ) bool {
206
+ return atomic_compare_exchange (u8 , ptr , expected , desired , success , failure );
207
+ }
208
+
209
+ export fn __atomic_compare_exchange_2 (ptr : * u16 , expected : * u16 , desired : u16 , success : i32 , failure : i32 ) callconv (.c ) bool {
210
+ return atomic_compare_exchange (u16 , ptr , expected , desired , success , failure );
211
+ }
212
+
213
+ export fn __atomic_compare_exchange_4 (ptr : * u32 , expected : * u32 , desired : u32 , success : i32 , failure : i32 ) callconv (.c ) bool {
214
+ return atomic_compare_exchange (u32 , ptr , expected , desired , success , failure );
74
215
}
0 commit comments