Skip to content

Commit 1f98c56

Browse files
committed
[kernel][x86] Add support for spinlock
1 parent 50e6f7e commit 1f98c56

5 files changed

Lines changed: 437 additions & 0 deletions

File tree

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
//! Copyright 2025 The Drift Authors. All rights reserved.
2+
//! Use of this source code is governed by a BSD-style license that can be
3+
//! found in the LICENSE file.
4+
5+
const std = @import("std");
6+
const assert = @import("../../kernel/assert.zig");
7+
const mp = @import("mp.zig");
8+
const arch = @import("../../lib/arch/x86/intrin.zig");
9+
const ArchSpinLock = @import("../../kernel/arch/SpinLock.zig");
10+
11+
inline fn archSpinLockCore(lock: *ArchSpinLock, val: u32) void {
12+
var expected: u32 = 0;
13+
while (!lock.value.cmpxchgWeak(&expected, val, .acquire, .monotonic)) {
14+
expected = 0;
15+
while (lock.value.load(.monotonic) != 0) {
16+
arch.yield();
17+
}
18+
}
19+
20+
//assert_lock_held
21+
assert.assert(@src(), lock.value.load(.monotonic) == val);
22+
}
23+
24+
/// Non-instrumented spinlock acquisition
25+
pub fn archSpinLockNonInstrumented(lock: *ArchSpinLock) void {
26+
const percpu = mp.x86_get_percpu();
27+
const val = percpu.cpu_num + 1;
28+
archSpinLockCore(lock, val);
29+
percpu.num_spinlocks += 1;
30+
}
31+
32+
/// Trace-instrumented spinlock acquisition
33+
pub fn archSpinLockTraceInstrumented(lock: *ArchSpinLock, _: u32) void {
34+
const percpu = mp.x86_get_percpu();
35+
const val = percpu.cpu_num + 1;
36+
37+
// If this lock acquisition is trace instrumented, try to obtain the lock once
38+
// before we decide that we need to spin and produce spin trace events.
39+
var expected: u32 = 0;
40+
if (lock.value.cmpxchgWeak(&expected, val, .acquire, .monotonic)) {
41+
percpu.num_spinlocks += 1;
42+
//assert_lock_held
43+
assert.assert(@src(), lock.value.load(.monotonic) == val);
44+
return;
45+
}
46+
47+
// TODO: Add spin tracing
48+
archSpinLockCore(lock, val);
49+
// TODO: Finish spin tracing
50+
51+
percpu.num_spinlocks += 1;
52+
}
53+
54+
/// Try to acquire spinlock without waiting
55+
pub fn archSpinTryLock(lock: *ArchSpinLock) bool {
56+
const percpu = mp.x86_get_percpu();
57+
const val = percpu.cpu_num + 1;
58+
59+
var expected: u32 = 0;
60+
if (lock.value.cmpxchgWeak(&expected, val, .acquire, .monotonic)) {
61+
percpu.num_spinlocks += 1;
62+
return true;
63+
}
64+
return false;
65+
}
66+
67+
pub fn archSpinUnlock(lock: *ArchSpinLock) void {
68+
mp.x86_get_percpu().num_spinlocks -= 1;
69+
lock.value.store(0, .release);
70+
}
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
//! Copyright 2025 The Drift Authors. All rights reserved.
2+
//! Use of this source code is governed by a BSD-style license that can be
3+
//! found in the LICENSE file.
4+
5+
const std = @import("std");
6+
const builtin = @import("builtin");
7+
const cpu = @import("../cpu.zig");
8+
const arch = @import("../arch/mp.zig").Impl;
9+
const spin_tracing_config = @import("../spin_tracing_config.zig");
10+
const SpinLock = @This();
11+
12+
const CpuNum = cpu.CpuNum;
13+
14+
pub const Impl = if (builtin.cpu.arch == .x86_64)
15+
@import("../../arch/x86/spin_lock.zig")
16+
else
17+
@compileError("Unsupported architecture: " ++ @tagName(builtin.cpu.arch));
18+
19+
value: std.atomic.Value(CpuNum),
20+
21+
impl: Impl = .{},
22+
23+
pub inline fn archSpinLock(self: *SpinLock) void {
24+
if (comptime std.builtin.mode == .Debug) {
25+
//const elid = spin_tracing.EncodedLockId{
26+
// .lock_type = .spinlock,
27+
// .lock_id = @intFromPtr(self),
28+
// .class_id = fxt.InternedString.invalid_id,
29+
//};
30+
//self.impl.spinLockInstrumented(self, elid);
31+
} else {
32+
self.impl.archSpinLockNonInstrumented(self);
33+
}
34+
}
35+
36+
pub inline fn spinLockHolderCpu(lock: *const SpinLock) CpuNum {
37+
return lock.value.load(.monotonic) - 1;
38+
}
39+
40+
pub inline fn spinLockHeld(lock: *const SpinLock) bool {
41+
return spinLockHolderCpu(lock) == arch.currCpuNum();
42+
}
Lines changed: 110 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,110 @@
1+
//! Copyright 2025 The Drift Authors. All rights reserved.
2+
//! Use of this source code is governed by a BSD-style license that can be
3+
//! found in the LICENSE file.
4+
5+
const std = @import("std");
6+
const assert = @import("assert.zig");
7+
const cpu = @import("cpu.zig");
8+
const ArchSpinLock = @import("arch/SpinLock.zig");
9+
10+
const arch = ArchSpinLock.Impl;
11+
12+
/// Options for controlling SpinLock behavior
13+
pub const SpinLockOptions = packed struct(u3) {
14+
/// No special options
15+
none: bool = false,
16+
17+
/// Enable integration with the lockup_detector to monitor spinlock critical sections.
18+
///
19+
/// See //zircon/kernel/lib/lockup_detector/README.md.
20+
monitored: bool = false,
21+
22+
/// Disable tracing
23+
trace_disabled: bool = false,
24+
};
25+
26+
/// Base spinlock implementation with configurable options
27+
pub fn SpinLockBase(comptime options: SpinLockOptions) type {
28+
return struct {
29+
const Self = @This();
30+
31+
spinlock: ArchSpinLock = .{ .value = undefined },
32+
33+
/// Acquire the spinlock
34+
/// Interrupts must already be disabled
35+
pub fn acquire(self: *Self) void {
36+
assert.debug_assert(@src(), !options.monitored);
37+
//assert.debug_assert(@src(), arch.intsDisabled());
38+
assert.debug_assert(@src(), !arch.spinLockHeld(&self.spinlock));
39+
40+
if (options.trace_disabled) {
41+
arch.spinLockNonInstrumented(&self.spinlock);
42+
} else {
43+
arch.spinLockTraceInstrumented(&self.spinlock, 0); // TODO: Add lock ID
44+
}
45+
}
46+
47+
/// Acquire with monitoring
48+
pub fn acquireMonitored(self: *Self, _: []const u8) void {
49+
assert.debug_assert(@src(), options.monitored);
50+
assert.debug_assert(@src(), arch.intsDisabled());
51+
assert.debug_assert(@src(), !arch.spinLockHeld(&self.spinlock));
52+
53+
// TODO: Add lockup detector integration
54+
if (options.trace_disabled) {
55+
arch.spinLockNonInstrumented(&self.spinlock);
56+
} else {
57+
arch.spinLockTraceInstrumented(&self.spinlock, 0);
58+
}
59+
}
60+
61+
/// Try to acquire the spinlock without waiting
62+
pub fn tryAcquire(self: *Self) bool {
63+
assert.debug_assert(@src(), !options.monitored);
64+
return arch.spinTryLock(&self.spinlock);
65+
}
66+
67+
/// Try to acquire with monitoring
68+
pub fn tryAcquireMonitored(self: *Self, _: []const u8) bool {
69+
assert.debug_assert(@src(), options.monitored);
70+
const failed = arch.spinTryLock(&self.spinlock);
71+
if (!failed) {
72+
// TODO: Add lockup detector integration
73+
}
74+
return failed;
75+
}
76+
77+
/// Release the spinlock
78+
pub fn release(self: *Self) void {
79+
arch.spinUnlock(&self.spinlock);
80+
if (options.monitored) {
81+
// TODO: Add lockup detector integration
82+
}
83+
}
84+
85+
/// Check if held by current CPU
86+
pub fn isHeld(self: *const Self) bool {
87+
assert.debug_assert(@src(), arch.intsDisabled());
88+
return arch.spinLockHeld(&self.spinlock);
89+
}
90+
91+
/// Assert that lock is held
92+
pub fn assertHeld(self: *const Self) void {
93+
assert(@src(), self.isHeld());
94+
}
95+
96+
/// Get CPU holding the lock
97+
pub fn holderCpu(self: *const Self) u32 {
98+
return arch.spinLockHolderCpu(&self.spinlock);
99+
}
100+
};
101+
}
102+
103+
/// Standard spinlock without monitoring
104+
pub const SpinLock = SpinLockBase(.{ .none = true });
105+
106+
//Spinlock with lockup detection
107+
pub const MonitoredSpinLock = SpinLockBase(.{ .monitored = true });
108+
109+
// Spinlock with tracing disabled
110+
pub const TraceDisabledSpinLock = SpinLockBase(.{ .trace_disabled = true });
Lines changed: 142 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,142 @@
1+
//! Copyright 2025 The Drift Authors. All rights reserved.
2+
//! Use of this source code is governed by a BSD-style license that can be
3+
//! found in the LICENSE file.
4+
5+
const std = @import("std");
6+
7+
pub const build_options = @import("build_options");
8+
9+
pub const scheduler_lock_spin_tracing_enabled = build_options.SCHEDULER_LOCK_SPIN_TRACING_ENABLED;
10+
pub const scheduler_lock_spin_tracing_compressed = build_options.SCHEDULER_LOCK_SPIN_TRACING_COMPRESSED;
11+
12+
comptime {
13+
if (scheduler_lock_spin_tracing_compressed and !scheduler_lock_spin_tracing_enabled) {
14+
@compileError("Error: Compress lock-spin trace records requested, but lock-spin tracing is not enabled.");
15+
}
16+
}
17+
18+
pub const LockType = enum(u2) {
19+
spinlock = 0,
20+
mutex = 1,
21+
chain_lock = 2,
22+
rw_lock = 3,
23+
};
24+
25+
pub const FinishType = enum(u1) {
26+
lock_acquired = 0,
27+
blocked = 1,
28+
};
29+
30+
pub const EncodedLockId = struct {
31+
const Self = @This();
32+
33+
value: u64 = 0,
34+
35+
const IdBits = struct {
36+
const bits: u32 = 49;
37+
const shift: u32 = 0;
38+
const mask: u64 = ((@as(u64, 1) << bits) - 1);
39+
40+
fn encode(val: u64) u64 {
41+
return (val & mask) << shift;
42+
}
43+
44+
fn decode(val: u64) u64 {
45+
return (val >> shift) & mask;
46+
}
47+
48+
fn reset(old_val: u64, new_val: u64) u64 {
49+
return (old_val & ~(mask << shift)) | ((new_val & mask) << shift);
50+
}
51+
};
52+
53+
const ClassNameBits = struct {
54+
const bits: u32 = 12;
55+
const shift: u32 = 49;
56+
const mask: u64 = ((@as(u64, 1) << bits) - 1);
57+
58+
fn encode(val: u16) u64 {
59+
return (@as(u64, val) & mask) << shift;
60+
}
61+
62+
fn decode(val: u64) u16 {
63+
return @intCast((val >> shift) & mask);
64+
}
65+
66+
fn reset(old_val: u64, new_val: u16) u64 {
67+
return (old_val & ~(mask << shift)) | ((@as(u64, new_val) & mask) << shift);
68+
}
69+
};
70+
71+
const LockTypeBits = struct {
72+
const bits: u32 = 2;
73+
const shift: u32 = 61;
74+
const mask: u64 = ((@as(u64, 1) << bits) - 1);
75+
76+
fn encode(val: LockType) u64 {
77+
return (@as(u64, @intFromEnum(val)) & mask) << shift;
78+
}
79+
80+
fn decode(val: u64) LockType {
81+
return @enumFromInt(@intFromEnum((val >> shift) & mask));
82+
}
83+
84+
fn reset(old_val: u64, new_val: LockType) u64 {
85+
return (old_val & ~(mask << shift)) | ((@as(u64, @intFromEnum(new_val)) & mask) << shift);
86+
}
87+
};
88+
89+
const FinishTypeBits = struct {
90+
const bits: u32 = 1;
91+
const shift: u32 = 63;
92+
const mask: u64 = ((@as(u64, 1) << bits) - 1);
93+
94+
fn encode(val: FinishType) u64 {
95+
return (@as(u64, @intFromEnum(val)) & mask) << shift;
96+
}
97+
98+
fn decode(val: u64) FinishType {
99+
return @enumFromInt(@intFromEnum((val >> shift) & mask));
100+
}
101+
102+
fn reset(old_val: u64, new_val: FinishType) u64 {
103+
return (old_val & ~(mask << shift)) | ((@as(u64, @intFromEnum(new_val)) & mask) << shift);
104+
}
105+
};
106+
107+
pub fn init(lock_type: LockType, lock_id: u64, class_name_id: u16) Self {
108+
return .{
109+
.value = LockTypeBits.encode(lock_type) |
110+
IdBits.encode(lock_id) |
111+
ClassNameBits.encode(class_name_id),
112+
};
113+
}
114+
115+
pub fn invalid() Self {
116+
return .{};
117+
}
118+
119+
pub fn setLockClassId(self: *Self, class_name_id: u16) void {
120+
self.value = ClassNameBits.reset(self.value, class_name_id);
121+
}
122+
123+
pub fn finishedValue(self: *const Self, finish_type: FinishType) u64 {
124+
return self.value | FinishTypeBits.encode(finish_type);
125+
}
126+
127+
pub fn id(self: *const Self) u64 {
128+
return IdBits.decode(self.value);
129+
}
130+
131+
pub fn className(self: *const Self) u16 {
132+
return ClassNameBits.decode(self.value);
133+
}
134+
135+
pub fn lockType(self: *const Self) LockType {
136+
return LockTypeBits.decode(self.value);
137+
}
138+
139+
pub fn finishType(self: *const Self) FinishType {
140+
return FinishTypeBits.decode(self.value);
141+
}
142+
};

0 commit comments

Comments
 (0)