diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0aefbda3d3478..c67993c387a87 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1481,6 +1481,7 @@ config CC_HAVE_SHADOW_CALL_STACK source "kernel/Kconfig.dovetail" source "kernel/Kconfig.rros" +source "kernel/Kconfig.rros_spinlock" config PARAVIRT bool "Enable paravirtualization code" diff --git a/kernel/Kconfig.rros_spinlock b/kernel/Kconfig.rros_spinlock new file mode 100644 index 0000000000000..e3a22f7ff785c --- /dev/null +++ b/kernel/Kconfig.rros_spinlock @@ -0,0 +1,9 @@ +config RROS_SPINLOCK + bool "Enable RROS Spinlock" + depends on RROS + help + Enable spinlock functionality for RROS. This option provides a + specialized spinlock mechanism designed for the Rust Real-time Core. + + Note: This option is experimental and should only be enabled if + you understand the implications on system performance. diff --git a/kernel/rros/clock.rs b/kernel/rros/clock.rs index 9384840c8d9c0..d9ba11869488b 100644 --- a/kernel/rros/clock.rs +++ b/kernel/rros/clock.rs @@ -20,6 +20,8 @@ use crate::{ RROS_OOB_CPUS, }; +use core::cell::OnceCell; + use alloc::rc::Rc; use core::{ @@ -46,7 +48,7 @@ use kernel::{ ktime::*, percpu, prelude::*, - premmpt, spinlock_init, + premmpt, new_spinlock, str::CStr, sync::{Lock, SpinLock}, sysfs, @@ -56,8 +58,15 @@ use kernel::{ user_ptr::{UserSlicePtr, UserSlicePtrReader, UserSlicePtrWriter}, }; -static mut CLOCKLIST_LOCK: SpinLock = unsafe { SpinLock::new(1) }; +pub static mut CLOCKLIST_LOCK: OnceCell>>> = OnceCell::new(); +pub fn clocklist_lock_init() { + unsafe { + CLOCKLIST_LOCK.get_or_init(|| { + Box::pin_init(new_spinlock!(1, "CLOCKLIST_LOCK")).unwrap() + }); + } +} // Define it as a constant here first, and then read it from /dev/rros. const CONFIG_RROS_LATENCY_USER: KtimeT = 0; const CONFIG_RROS_LATENCY_KERNEL: KtimeT = 0; @@ -293,8 +302,8 @@ impl RrosClock { pub fn adjust_timer( clock: &RrosClock, - timer: Arc>, - tq: &mut List>>, + timer: Arc>>>, + tq: &mut List>>>>, delta: KtimeT, ) { let date = timer.lock().get_date(); @@ -350,7 +359,7 @@ pub fn rros_adjust_timers(clock: &mut RrosClock, delta: KtimeT) -> Result { let flags: u64 = unsafe { (*tmb).lock.irq_lock_noguard() }; - let mut timers_adjust: Vec>> = + let mut timers_adjust: Vec>>>> = Vec::try_with_capacity(tq.len() as usize)?; while !tq.is_empty() { @@ -538,33 +547,40 @@ pub static mut CLOCK_LIST: List<*mut RrosClock> = List::<*mut RrosClock> { }, }; -pub static mut RROS_CLOCK_FACTORY: SpinLock = unsafe { - SpinLock::new(factory::RrosFactory { - name: unsafe { CStr::from_bytes_with_nul_unchecked("clock\0".as_bytes()) }, - nrdev: CONFIG_RROS_NR_CLOCKS, - build: None, - dispose: Some(clock_factory_dispose), - attrs: None, //sysfs::attribute_group::new(), - flags: factory::RrosFactoryType::Invalid, - inside: Some(factory::RrosFactoryInside { - type_: DeviceType::new(), - class: None, - cdev: None, - device: None, - sub_rdev: None, - kuid: None, - kgid: None, - minor_map: None, - index: None, - name_hash: None, - hash_lock: None, - register: None, - }), - }) -}; +pub static mut RROS_CLOCK_FACTORY: OnceCell>>> = OnceCell::new(); + +pub fn rros_clock_factory_init() { + unsafe { + RROS_CLOCK_FACTORY.get_or_init(|| { + Box::pin_init(new_spinlock!(factory::RrosFactory { + name: CStr::from_bytes_with_nul_unchecked("clock\0".as_bytes()), + nrdev: CONFIG_RROS_NR_CLOCKS, + build: None, + dispose: Some(clock_factory_dispose), + attrs: None, // sysfs::attribute_group::new(), + flags: factory::RrosFactoryType::Invalid, + inside: Some(factory::RrosFactoryInside { + type_: DeviceType::new(), + class: None, + cdev: None, + device: None, + sub_rdev: None, + kuid: None, + kgid: None, + minor_map: None, + index: None, + name_hash: None, + hash_lock: None, + register: None, + }), + })) + .unwrap() + }); + } +} pub struct RrosTimerFd { - timer: Arc>, + timer: Arc>>>, readers: RrosWaitQueue, poll_head: RrosPollHead, efile: RrosFile, @@ -574,7 +590,7 @@ pub struct RrosTimerFd { impl RrosTimerFd { fn new() -> Self { Self { - timer: Arc::try_new(unsafe { SpinLock::new(RrosTimer::new(0)) }).unwrap(), + timer: Arc::try_new(unsafe { Box::pin_init(new_spinlock!(RrosTimer::new(0))).unwrap() }).unwrap(), //FIXME: readers initiation is not sure readers: RrosWaitQueue::new(core::ptr::null_mut(), 0), poll_head: RrosPollHead::new(), @@ -584,7 +600,7 @@ impl RrosTimerFd { } } -fn get_timer_value(timer: Arc>, value: &mut Itimerspec64) { +fn get_timer_value(timer: Arc>>>, value: &mut Itimerspec64) { let mut inner_timer_lock = timer.lock(); let inner_timer: &mut RrosTimer = inner_timer_lock.deref_mut(); value.it_interval = ktime_to_timespec64(inner_timer.interval); @@ -596,7 +612,7 @@ fn get_timer_value(timer: Arc>, value: &mut Itimerspec64) { } } -fn set_timer_value(timer: Arc>, value: &Itimerspec64) -> Result { +fn set_timer_value(timer: Arc>>>, value: &Itimerspec64) -> Result { let start: KtimeT; let period: KtimeT; @@ -648,7 +664,7 @@ pub fn double_timer_base_unlock(tb1: *mut RrosTimerbase, tb2: *mut RrosTimerbase // `RrosClock`, `RrosTimerbase`, `RrosRq`. Maybe we can use references to avoid so many raw pointers. // FYI: https://github.com/BUPT-OS/RROS/pull/41#discussion_r1680738528 pub fn rros_move_timer( - timer: Arc>, + timer: Arc>>>, clock: *mut RrosClock, mut rq: *mut rros_rq, ) { @@ -697,7 +713,7 @@ pub fn rros_move_timer( } #[cfg(CONFIG_SMP)] -fn pin_timer(timer: Arc>) { +fn pin_timer(timer: Arc>>>) { let flags = hard_local_irq_save(); let this_rq = rros_current_rq(); @@ -713,7 +729,7 @@ fn pin_timer(timer: Arc>) { } #[cfg(not(CONFIG_SMP))] -fn pin_timer(_timer: Arc>) {} +fn pin_timer(_timer: Arc>>>) {} fn set_timerfd( timerfd: &RrosTimerFd, @@ -1079,7 +1095,7 @@ pub fn do_clock_tick(clock: &mut RrosClock, tmb: *mut RrosTimerbase) { let timer_addr = timer.locked_data().get(); let inband_timer_addr = (*rq).get_inband_timer().locked_data().get(); - if (timer_addr == inband_timer_addr) { + if (timer_addr as *const _ == inband_timer_addr as *const _) { (*rq).add_local_flags(RQ_TPROXY); (*rq).change_local_flags(!RQ_TDEFER); continue; @@ -1172,7 +1188,7 @@ fn init_clock(clock: *mut RrosClock, master: *mut RrosClock) -> Result { unsafe { ret = factory::rros_init_element( (*clock).element.as_ref().unwrap().clone(), - &mut RROS_CLOCK_FACTORY, + RROS_CLOCK_FACTORY.get_mut().unwrap(), (*clock).flags & RROS_CLONE_PUBLIC, ); } @@ -1188,7 +1204,7 @@ fn init_clock(clock: *mut RrosClock, master: *mut RrosClock) -> Result { unsafe { ret = factory::rros_create_core_element_device( (*clock).element.as_ref().unwrap().clone(), - &mut RROS_CLOCK_FACTORY, + RROS_CLOCK_FACTORY.get_mut().unwrap(), (*clock).name, ); } @@ -1199,9 +1215,9 @@ fn init_clock(clock: *mut RrosClock, master: *mut RrosClock) -> Result { } unsafe { - CLOCKLIST_LOCK.lock(); + clocklist_lock_init(); + CLOCKLIST_LOCK.get().unwrap().lock(); CLOCK_LIST.add_head(clock); - CLOCKLIST_LOCK.unlock(); } Ok(0) @@ -1269,8 +1285,8 @@ fn rros_init_clock(clock: &mut RrosClock, affinity: &CpumaskT) -> Result } pub fn rros_clock_init() -> Result { - let pinned = unsafe { Pin::new_unchecked(&mut CLOCKLIST_LOCK) }; - spinlock_init!(pinned, "CLOCKLIST_LOCK"); + // let pinned = unsafe { Pin::new_unchecked(&mut CLOCKLIST_LOCK) }; + // spinlock_init!(pinned, "CLOCKLIST_LOCK"); unsafe { RROS_MONO_CLOCK.reset_gravity(); RROS_REALTIME_CLOCK.reset_gravity(); diff --git a/kernel/rros/clock_test.rs b/kernel/rros/clock_test.rs index 61488c45e4ea1..f0e38904f34ee 100644 --- a/kernel/rros/clock_test.rs +++ b/kernel/rros/clock_test.rs @@ -1,15 +1,15 @@ use crate::{clock::*, timer::*}; -use kernel::{ktime::*, prelude::*, spinlock_init, sync::SpinLock}; +use kernel::{ktime::*, prelude::*, new_spinlock, sync::SpinLock}; #[allow(dead_code)] pub fn test_do_clock_tick() -> Result { pr_debug!("~~~test_do_clock_tick begin~~~"); unsafe { let tmb = rros_percpu_timers(&RROS_MONO_CLOCK, 0); - let mut a = SpinLock::new(RrosTimer::new(580000000)); - let pinned = Pin::new_unchecked(&mut a); - spinlock_init!(pinned, "zbw"); + let mut a = Box::pin_init(new_spinlock!(RrosTimer::new(580000000))).unwrap(); + // let pinned = Pin::new_unchecked(&mut a); + // spinlock_init!(pinned, "zbw"); let xx = Arc::try_new(a)?; xx.lock().add_status(RROS_TIMER_DEQUEUED); @@ -33,9 +33,9 @@ pub fn test_adjust_timer() -> Result { pr_debug!("~~~test_adjust_timer begin~~~"); unsafe { let tmb = rros_percpu_timers(&RROS_MONO_CLOCK, 0); - let mut a = SpinLock::new(RrosTimer::new(580000000)); - let pinned = Pin::new_unchecked(&mut a); - spinlock_init!(pinned, "a"); + let mut a = Box::pin_init(new_spinlock!(RrosTimer::new(580000000))).unwrap(); + // let pinned = Pin::new_unchecked(&mut a); + // spinlock_init!(pinned, "a"); let xx = Arc::try_new(a)?; xx.lock().add_status(RROS_TIMER_DEQUEUED); @@ -59,13 +59,13 @@ pub fn test_rros_adjust_timers() -> Result { pr_debug!("~~~test_rros_adjust_timers begin~~~"); unsafe { let tmb = rros_percpu_timers(&RROS_MONO_CLOCK, 0); - let mut a = SpinLock::new(RrosTimer::new(580000000)); - let pinned = Pin::new_unchecked(&mut a); - spinlock_init!(pinned, "a"); + let mut a = Box::pin_init(new_spinlock!(RrosTimer::new(580000000))).unwrap(); + // let pinned = Pin::new_unchecked(&mut a); + // spinlock_init!(pinned, "a"); - let mut b = SpinLock::new(RrosTimer::new(580000000)); - let pinned = Pin::new_unchecked(&mut b); - spinlock_init!(pinned, "b"); + let mut b = Box::pin_init(new_spinlock!(RrosTimer::new(580000000))).unwrap(); + // let pinned = Pin::new_unchecked(&mut b); + // spinlock_init!(pinned, "b"); let xx = Arc::try_new(a)?; let yy = Arc::try_new(b)?; @@ -104,13 +104,13 @@ pub fn test_rros_stop_timers() -> Result { pr_debug!("~~~test_rros_stop_timers begin~~~"); unsafe { let tmb = rros_percpu_timers(&RROS_MONO_CLOCK, 0); - let mut a = SpinLock::new(RrosTimer::new(580000000)); - let pinned = Pin::new_unchecked(&mut a); - spinlock_init!(pinned, "a"); + let mut a = Box::pin_init(new_spinlock!(RrosTimer::new(580000000))).unwrap(); + // let pinned = Pin::new_unchecked(&mut a); + // spinlock_init!(pinned, "a"); - let mut b = SpinLock::new(RrosTimer::new(580000000)); - let pinned = Pin::new_unchecked(&mut b); - spinlock_init!(pinned, "b"); + let mut b = Box::pin_init(new_spinlock!(RrosTimer::new(580000000))).unwrap(); + // let pinned = Pin::new_unchecked(&mut b); + // spinlock_init!(pinned, "b"); let xx = Arc::try_new(a)?; let yy = Arc::try_new(b)?; diff --git a/kernel/rros/control.rs b/kernel/rros/control.rs index b5d3f94b9a4cd..20b8050ab0462 100644 --- a/kernel/rros/control.rs +++ b/kernel/rros/control.rs @@ -16,34 +16,51 @@ use kernel::{ prelude::*, str::CStr, sync::SpinLock, + new_spinlock, }; +use core::cell::OnceCell; + pub const CONFIG_RROS_NR_CONTROL: usize = 0; -pub static mut RROS_CONTROL_FACTORY: SpinLock = unsafe { - SpinLock::new(RrosFactory { - name: CStr::from_bytes_with_nul_unchecked("control\0".as_bytes()), - nrdev: CONFIG_RROS_NR_CONTROL, - build: None, - dispose: None, - attrs: None, - flags: crate::factory::RrosFactoryType::SINGLE, - inside: Some(RrosFactoryInside { - type_: DeviceType::new(), - class: None, - cdev: None, - device: None, - sub_rdev: None, - kuid: None, - kgid: None, - minor_map: None, - index: None, - name_hash: None, - hash_lock: None, - register: None, - }), - }) -}; +pub static mut RROS_CONTROL_FACTORY: OnceCell>>> = OnceCell::new(); + +pub fn rros_control_factory_init() +{ +unsafe{ + RROS_CONTROL_FACTORY.get_or_init(|| { + let temp_lock = Box::pin_init( + new_spinlock!( + RrosFactory { + name: CStr::from_bytes_with_nul("control\0".as_bytes()).expect("Invalid CStr"), + nrdev: CONFIG_RROS_NR_CONTROL, + build: None, + dispose: None, + attrs: None, + flags: crate::factory::RrosFactoryType::SINGLE, + inside: Some(RrosFactoryInside { + type_: DeviceType::new(), + class: None, + cdev: None, + device: None, + sub_rdev: None, + kuid: None, + kgid: None, + minor_map: None, + index: None, + name_hash: None, + hash_lock: None, + register: None, + }), + } + ) + ).unwrap(); + temp_lock + }); + }; +} + + pub struct ControlOps; diff --git a/kernel/rros/double_linked_list_test.rs b/kernel/rros/double_linked_list_test.rs index 9ec60ab1ee0bf..d0be13988e937 100644 --- a/kernel/rros/double_linked_list_test.rs +++ b/kernel/rros/double_linked_list_test.rs @@ -1,23 +1,23 @@ use crate::{clock::*, timer::*}; -use kernel::{prelude::*, spinlock_init, sync::SpinLock}; +use kernel::{prelude::*, new_spinlock, sync::SpinLock}; #[allow(dead_code)] pub fn test_enqueue_by_index() -> Result { pr_debug!("~~~test_double_linked_list begin~~~"); unsafe { let tmb = rros_percpu_timers(&RROS_MONO_CLOCK, 0); - let mut x = SpinLock::new(RrosTimer::new(1)); - let pinned = Pin::new_unchecked(&mut x); - spinlock_init!(pinned, "x"); - let mut y = SpinLock::new(RrosTimer::new(2)); - let pinned = Pin::new_unchecked(&mut y); - spinlock_init!(pinned, "y"); - let mut z = SpinLock::new(RrosTimer::new(3)); - let pinned = Pin::new_unchecked(&mut z); - spinlock_init!(pinned, "z"); - let mut a = SpinLock::new(RrosTimer::new(4)); - let pinned = Pin::new_unchecked(&mut a); - spinlock_init!(pinned, "a"); + let mut x = Box::pin_init(new_spinlock!(RrosTimer::new(1),"x")).unwrap(); + // let pinned = Pin::new_unchecked(&mut x); + // spinlock_init!(pinned, "x"); + let mut y = Box::pin_init(new_spinlock!(RrosTimer::new(2),"y")).unwrap(); + // let pinned = Pin::new_unchecked(&mut y); + // spinlock_init!(pinned, "y"); + let mut z = Box::pin_init(new_spinlock!(RrosTimer::new(3),"z")).unwrap(); + // let pinned = Pin::new_unchecked(&mut z); + // spinlock_init!(pinned, "z"); + let mut a = Box::pin_init(new_spinlock!(RrosTimer::new(4),"a")).unwrap(); + // let pinned = Pin::new_unchecked(&mut a); + // spinlock_init!(pinned, "a"); let xx = Arc::try_new(x)?; let yy = Arc::try_new(y)?; diff --git a/kernel/rros/drivers/hectic.rs b/kernel/rros/drivers/hectic.rs index 15ee77b4cf345..effa9a1221340 100644 --- a/kernel/rros/drivers/hectic.rs +++ b/kernel/rros/drivers/hectic.rs @@ -30,7 +30,7 @@ use kernel::{ ktime::KtimeT, mutex_init, prelude::*, - spinlock_init, + new_spinlock, str::CStr, sync::{Lock, Mutex, Semaphore, SpinLock}, task::Task, @@ -115,14 +115,14 @@ pub struct RtswitchContext { switches_count: u32, pause_us: u64, next_task: u32, - wake_up_delay: Arc>, + wake_up_delay: Arc>>>, failed: bool, error: HecticError, utask: u32, wake_utask: IrqWork, stax: Pin>>, - o_guard: SpinLock>, - i_guard: SpinLock>, + o_guard: Pin>>>, + i_guard: Pin>>>, rfile: RrosFile, } @@ -152,7 +152,7 @@ impl RtswitchContext { switches_count: 0, pause_us: 0, next_task: 0, - wake_up_delay: Arc::try_new(unsafe { SpinLock::new(RrosTimer::new(0)) })?, + wake_up_delay: Arc::try_new(unsafe { Box::pin_init(new_spinlock!(RrosTimer::new(0),"wake_up_delay")).unwrap() })?, failed: false, error: HecticError { last_switch: HecticSwitchReq { @@ -164,8 +164,8 @@ impl RtswitchContext { utask: u32::MAX, wake_utask: IrqWork::new(), stax: unsafe { Pin::from(Box::try_new(Stax::new(()))?) }, - o_guard: unsafe { SpinLock::new(Vec::new()) }, - i_guard: unsafe { SpinLock::new(Vec::new()) }, + o_guard: unsafe { Box::pin_init(new_spinlock!(Vec::new(),"o_guard")).unwrap() }, + i_guard: unsafe { Box::pin_init(new_spinlock!(Vec::new(),"i_guard")).unwrap() }, rfile: RrosFile::new(), }; Ok(ctx) @@ -186,8 +186,8 @@ impl RtswitchContext { return Err(Error::EINVAL); } (*timer.locked_data().get()).pointer = this as *mut u8; - let t_pinned = Pin::new_unchecked(timer); - spinlock_init!(t_pinned, "wake_up_delay"); + // let t_pinned = Pin::new_unchecked(timer); + // spinlock_init!(t_pinned, "wake_up_delay"); rros_init_timer_on_rq( self.wake_up_delay.clone(), @@ -200,10 +200,10 @@ impl RtswitchContext { } Stax::init((&mut self.stax).as_mut())?; - let o_pinned = unsafe { Pin::new_unchecked(&mut self.o_guard) }; - spinlock_init!(o_pinned, "o_guard"); - let i_pinned = unsafe { Pin::new_unchecked(&mut self.i_guard) }; - spinlock_init!(i_pinned, "i_guard"); + // let o_pinned = unsafe { Pin::new_unchecked(&mut self.o_guard) }; + // spinlock_init!(o_pinned, "o_guard"); + // let i_pinned = unsafe { Pin::new_unchecked(&mut self.i_guard) }; + // spinlock_init!(i_pinned, "i_guard"); Ok(()) } diff --git a/kernel/rros/factory.rs b/kernel/rros/factory.rs index 0b404c2ec7cd3..7b9769406199c 100644 --- a/kernel/rros/factory.rs +++ b/kernel/rros/factory.rs @@ -1,8 +1,11 @@ use core::{ cell::RefCell, clone::Clone, convert::TryInto, default::Default, mem::size_of, ptr, result::Result::Ok, + ops::Deref, }; + +use core::cell::OnceCell; use crate::{clock, control, file::RrosFileBinding, observable, poll, proxy, thread, xbuf}; use alloc::rc::Rc; @@ -18,7 +21,7 @@ use kernel::{ io_buffer::IoBufferWriter, irq_work, kernelh, prelude::*, - rbtree, spinlock_init, + rbtree, new_spinlock, str::CStr, sync::{Lock, SpinLock}, sysfs, types, @@ -75,7 +78,7 @@ const RROS_HANDLE_INDEX_MASK: FundleT = RROS_MUTEX_FLCEIL | RROS_MUTEX_FLCLAIM; pub struct RrosIndex { // #[allow(dead_code)] - rbtree: SpinLock>>>, // TODO: modify the u32. + rbtree: Pin>>>>>, // TODO: modify the u32. // lock: SpinLock, // #[allow(dead_code)] generator: FundleT, @@ -92,7 +95,7 @@ pub struct RrosFactoryInside { pub minor_map: Option, pub index: Option, pub name_hash: Option<[types::HlistHead; NAME_HASH_TABLE_SIZE as usize]>, - pub hash_lock: Option>, + pub hash_lock: Option>>>, // FIXME: This const should not be limited to 256. But the rust compiler does not support it. pub register: Option>>>, } @@ -155,7 +158,7 @@ pub struct RrosFactory { pub nrdev: usize, pub build: Option< fn( - fac: &'static mut SpinLock, + fac: &'static mut Pin>>, uname: &'static CStr, u_attrs: Option<*mut u8>, clone_flags: i32, @@ -169,31 +172,38 @@ pub struct RrosFactory { // pub fops: PhantomData, } -pub static mut RROS_FACTORY: SpinLock = unsafe { - SpinLock::new(RrosFactory { - name: CStr::from_bytes_with_nul_unchecked("RROS_DEV\0".as_bytes()), - // fops: Some(&Tmpops), - nrdev: CONFIG_RROS, - build: None, - dispose: None, - attrs: None, //sysfs::attribute_group::new(), - flags: RrosFactoryType::Invalid, - inside: Some(RrosFactoryInside { - type_: DeviceType::new(), - class: None, - cdev: None, - device: None, - sub_rdev: None, - kuid: None, - kgid: None, - minor_map: None, - index: None, - name_hash: None, - hash_lock: None, - register: None, - }), - }) -}; +pub static mut RROS_FACTORY: OnceCell>>> = OnceCell::new(); + +pub fn rros_factory_init() { + unsafe { + RROS_FACTORY.get_or_init(|| { + Box::pin_init(new_spinlock!(RrosFactory { + name: CStr::from_bytes_with_nul_unchecked("RROS_DEV\0".as_bytes()), + // fops: Some(&Tmpops), + nrdev: CONFIG_RROS, + build: None, + dispose: None, + attrs: None, // sysfs::attribute_group::new(), + flags: RrosFactoryType::Invalid, + inside: Some(RrosFactoryInside { + type_: DeviceType::new(), + class: None, + cdev: None, + device: None, + sub_rdev: None, + kuid: None, + kgid: None, + minor_map: None, + index: None, + name_hash: None, + hash_lock: None, + register: None, + }), + })) + .unwrap() + }); + } +} struct Tmpops; @@ -213,14 +223,14 @@ impl FileOperations for Tmpops { pub struct RrosElement { pub rcu_head: types::RcuHead, - pub factory: &'static mut SpinLock, + pub factory: &'static mut Pin>>, pub cdev: Option, pub dev: Option, pub devname: Option, pub minor: u64, pub refs: i32, pub zombie: bool, - pub ref_lock: SpinLock, + pub ref_lock: Pin>>, pub fundle: FundleT, pub clone_flags: i32, // pub struct rb_node index_node;// TODO: in rfl rb_node is not embedded in the struct. @@ -233,16 +243,17 @@ pub struct RrosElement { impl RrosElement { pub fn new() -> Result { + rros_factory_init(); Ok(Self { rcu_head: types::RcuHead::new(), - factory: unsafe { &mut RROS_FACTORY }, + factory: unsafe { RROS_FACTORY.get_mut().unwrap() }, cdev: None, dev: None, devname: None, minor: 0, refs: 0, zombie: false, - ref_lock: unsafe { kernel::sync::SpinLock::::new(0) }, + ref_lock: unsafe { Box::pin_init(new_spinlock!(0,"value")).unwrap() }, fundle: 0, clone_flags: 0, irq_work: irq_work::IrqWork::new(), @@ -319,7 +330,7 @@ impl device::Devnode for FactoryTypeDevnode { fn create_element_device( e: Rc>, - fac: &'static mut SpinLock, + fac: &'static mut Pin>>, ) -> Result { let mut fac_lock = unsafe { (*fac.locked_data().get()).inside.as_mut() }; let mut rdev: class::DevT = class::DevT::new(0); @@ -339,11 +350,7 @@ fn create_element_device( // goto fail_hash; // } - // hash_add(fac->name_hash, &e->hash, hlen); - - unsafe { - inside.hash_lock.as_ref().unwrap().unlock(); - } + // hash_add(fac->name_hash, &e->hash, hlen) 0 } @@ -395,7 +402,7 @@ fn rros_element_has_coredev(e: Rc>) -> bool { fn do_element_visibility( e: Rc>, - fac: &'static mut SpinLock, + fac: &'static mut Pin>>, _rdev: &mut class::DevT, ) -> Result { // static int do_element_visibility(struct rros_element *e, @@ -633,7 +640,7 @@ pub fn bind_file_to_element( pub fn rros_create_core_element_device( e: Rc>, - fac: &'static mut SpinLock, + fac: &'static mut Pin>>, name: &'static CStr, ) -> Result { let e_clone = e.clone(); @@ -682,7 +689,7 @@ pub fn rros_create_core_element_device( // TODO: The global variable should not use *mut to pass the value. pub fn rros_init_element( e: Rc>, - fac: &'static mut SpinLock, + fac: &'static mut Pin>>, clone_flags: i32, ) -> Result { let mut minor = 0; @@ -714,9 +721,7 @@ pub fn rros_init_element( } None => 1, }; - unsafe { - fac.unlock(); - } + drop(fac_lock); let e_clone = e.clone(); let mut e_mut = e_clone.borrow_mut(); @@ -731,8 +736,8 @@ pub fn rros_init_element( e_mut.fundle = RROS_NO_HANDLE; e_mut.devname = None; e_mut.clone_flags = clone_flags; - let pinned = unsafe { Pin::new_unchecked(&mut e_mut.ref_lock) }; - spinlock_init!(pinned, "value"); + // let pinned = unsafe { Pin::new_unchecked(&mut e_mut.ref_lock) }; + // spinlock_init!(pinned, "value"); Ok(0) } @@ -761,7 +766,7 @@ fn create_sys_device( } fn rros_create_factory( - fac: &mut SpinLock, + fac: &mut Pin>>, rros_class: Arc, chrdev_reg: &mut Pin>>, this_module: &'static ThisModule, @@ -910,27 +915,26 @@ fn rros_create_factory( } let mut index = RrosIndex { - rbtree: unsafe { SpinLock::new(rbtree::RBTree::new()) }, + rbtree: unsafe { Box::pin_init(new_spinlock!(rbtree::RBTree::new(),"value")).unwrap() }, generator: RROS_NO_HANDLE, }; - let pinned = unsafe { Pin::new_unchecked(&mut index.rbtree) }; - spinlock_init!(pinned, "value"); + // let pinned = unsafe { Pin::new_unchecked(&mut index.rbtree) }; + // spinlock_init!(pinned, "value"); inside.index = Some(index); let mut hashname: [types::HlistHead; NAME_HASH_TABLE_SIZE as usize] = [types::HlistHead::new(); NAME_HASH_TABLE_SIZE as usize]; types::hash_init(hashname[0].as_list_head(), NAME_HASH_TABLE_SIZE); inside.name_hash = Some(hashname); - let mut hash_lock = unsafe { SpinLock::new(0) }; - let pinned = unsafe { Pin::new_unchecked(&mut hash_lock) }; - spinlock_init!(pinned, "device_name_hash_lock"); + let mut hash_lock = unsafe { Box::pin_init(new_spinlock!(0,"device_name_hash_lock")).unwrap() }; + // let pinned = unsafe { Pin::new_unchecked(&mut hash_lock) }; + // spinlock_init!(pinned, "device_name_hash_lock"); inside.hash_lock = Some(hash_lock); 0 } None => 1, }; - unsafe { fac.unlock() }; match res { 1 => Err(kernel::Error::EINVAL), _ => Ok(0), @@ -955,17 +959,18 @@ pub struct CloneOps; impl FileOpener for CloneOps { fn open(shared: &u8, _fileref: &File) -> Result { + thread::rros_thread_factory_init(); let mut data = CloneData::default(); unsafe { data.ptr = shared as *const u8 as *mut u8; let a = KuidT::from_inode_ptr(shared as *const u8); let b = KgidT::from_inode_ptr(shared as *const u8); - (*thread::RROS_THREAD_FACTORY.locked_data().get()) + (*thread::RROS_THREAD_FACTORY.get_mut().unwrap().locked_data().get()) .inside .as_mut() .unwrap() .kuid = Some(a); - (*thread::RROS_THREAD_FACTORY.locked_data().get()) + (*thread::RROS_THREAD_FACTORY.get_mut().unwrap().locked_data().get()) .inside .as_mut() .unwrap() @@ -1021,7 +1026,7 @@ impl FileOperations for CloneOps { } fn create_core_factories( - factories: &mut [&mut SpinLock], + factories: &mut [&mut Pin>>], nr: usize, rros_class: Arc, chrdev_reg: &mut Pin>>, @@ -1041,16 +1046,26 @@ fn create_core_factories( pub fn rros_early_init_factories( this_module: &'static ThisModule, ) -> Result>>> { + //init + clock::rros_clock_factory_init(); + thread::rros_thread_factory_init(); + xbuf::rros_xbuf_factory_init(); + proxy::rros_proxy_factory_init(); + control::rros_control_factory_init(); + poll::rros_poll_factory_init(); + observable::rros_observable_factory_init(); + + // TODO: move the number of factories to a variable - let mut early_factories: [&mut SpinLock; 7] = unsafe { + let mut early_factories: [&mut Pin>>; 7] = unsafe { [ - &mut clock::RROS_CLOCK_FACTORY, - &mut thread::RROS_THREAD_FACTORY, - &mut xbuf::RROS_XBUF_FACTORY, - &mut proxy::RROS_PROXY_FACTORY, - &mut control::RROS_CONTROL_FACTORY, - &mut poll::RROS_POLL_FACTORY, - &mut observable::RROS_OBSERVABLE_FACTORY, + clock::RROS_CLOCK_FACTORY.get_mut().unwrap(), + thread::RROS_THREAD_FACTORY.get_mut().unwrap(), + xbuf::RROS_XBUF_FACTORY.get_mut().unwrap(), + proxy::RROS_PROXY_FACTORY.get_mut().unwrap(), + control::RROS_CONTROL_FACTORY.get_mut().unwrap(), + poll::RROS_POLL_FACTORY.get_mut().unwrap(), + observable::RROS_OBSERVABLE_FACTORY.get_mut().unwrap(), ] }; // static struct rros_factory *early_factories[] = { @@ -1311,10 +1326,11 @@ pub fn ioctl_clone_device(file: &File, _cmd: u32, arg: usize) -> Result { let e: Rc> = if fdname == "xbuf" { pr_debug!("ioctl_clone_device: xbuf clone"); unsafe { - (*xbuf::RROS_XBUF_FACTORY.locked_data().get()) + xbuf::rros_xbuf_factory_init(); + (*xbuf::RROS_XBUF_FACTORY.get_mut().unwrap().locked_data().get()) .build .unwrap()( - &mut xbuf::RROS_XBUF_FACTORY, + xbuf::RROS_XBUF_FACTORY.get_mut().unwrap(), cstr_u_name, Some(u_attrs), real_req.clone_flags.try_into().unwrap(), @@ -1324,10 +1340,11 @@ pub fn ioctl_clone_device(file: &File, _cmd: u32, arg: usize) -> Result { } else if fdname == "proxy" { pr_debug!("ioctl_clone_device: proxy clone"); unsafe { - (*proxy::RROS_PROXY_FACTORY.locked_data().get()) + proxy::rros_proxy_factory_init(); + (*proxy::RROS_PROXY_FACTORY.get_mut().unwrap().locked_data().get()) .build .unwrap()( - &mut proxy::RROS_PROXY_FACTORY, + proxy::RROS_PROXY_FACTORY.get_mut().unwrap(), cstr_u_name, Some(u_attrs), real_req.clone_flags.try_into().unwrap(), @@ -1337,10 +1354,11 @@ pub fn ioctl_clone_device(file: &File, _cmd: u32, arg: usize) -> Result { } else if fdname == "observable" { pr_debug!("ioctl_clone_device: observable clone"); unsafe { - (*observable::RROS_OBSERVABLE_FACTORY.locked_data().get()) + observable::rros_observable_factory_init(); + (*observable::RROS_OBSERVABLE_FACTORY.get_mut().unwrap().locked_data().get()) .build .unwrap()( - &mut observable::RROS_OBSERVABLE_FACTORY, + observable::RROS_OBSERVABLE_FACTORY.get_mut().unwrap(), cstr_u_name, Some(u_attrs), real_req.clone_flags.try_into().unwrap(), @@ -1350,10 +1368,11 @@ pub fn ioctl_clone_device(file: &File, _cmd: u32, arg: usize) -> Result { } else { pr_debug!("maybe a thread"); unsafe { - (*thread::RROS_THREAD_FACTORY.locked_data().get()) + thread::rros_thread_factory_init(); + (*thread::RROS_THREAD_FACTORY.get_mut().unwrap().locked_data().get()) .build .unwrap()( - &mut thread::RROS_THREAD_FACTORY, + thread::RROS_THREAD_FACTORY.get_mut().unwrap(), cstr_u_name, Some(u_attrs), 0, @@ -1377,18 +1396,20 @@ pub fn ioctl_clone_device(file: &File, _cmd: u32, arg: usize) -> Result { // TODO: create the element device let _ret = if fdname == "xbuf" { pr_debug!("ioctl_clone_device: xbuf element create"); - create_element_device(e.clone(), unsafe { &mut xbuf::RROS_XBUF_FACTORY }) + create_element_device(e.clone(), unsafe { xbuf::RROS_XBUF_FACTORY.get_mut().unwrap() }) } else if fdname == "proxy" { pr_debug!("ioctl_clone_device: proxy element create"); - create_element_device(e.clone(), unsafe { &mut proxy::RROS_PROXY_FACTORY }) + proxy::rros_proxy_factory_init(); + create_element_device(e.clone(), unsafe { proxy::RROS_PROXY_FACTORY.get_mut().unwrap() }) } else if fdname == "observable" { pr_debug!("ioctl_clone_device: observable element create"); + observable::rros_observable_factory_init(); create_element_device(e.clone(), unsafe { - &mut observable::RROS_OBSERVABLE_FACTORY + observable::RROS_OBSERVABLE_FACTORY.get_mut().unwrap() }) } else { pr_debug!("maybe a thread"); - create_element_device(e.clone(), unsafe { &mut thread::RROS_THREAD_FACTORY }) + create_element_device(e.clone(), unsafe { thread::RROS_THREAD_FACTORY.get_mut().unwrap() }) }; let e_clone = e.clone(); let mut e_mut = e_clone.borrow_mut(); diff --git a/kernel/rros/fifo.rs b/kernel/rros/fifo.rs index 98f3dfebe8084..4744ed5a263a3 100644 --- a/kernel/rros/fifo.rs +++ b/kernel/rros/fifo.rs @@ -5,6 +5,7 @@ use kernel::{ prelude::*, sync::{Lock, SpinLock}, }; +use core::ops::Deref; pub static mut RROS_SCHED_FIFO: sched::RrosSchedClass = sched::RrosSchedClass { sched_init: Some(rros_fifo_init), @@ -93,34 +94,34 @@ fn rros_fifo_tick(rq: Option<*mut sched::rros_rq>) -> Result { } fn rros_fifo_setparam( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) -> Result { return __rros_set_fifo_schedparam(thread.clone(), p.clone()); } fn rros_fifo_getparam( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) { __rros_get_fifo_schedparam(thread.clone(), p.clone()); } fn rros_fifo_chkparam( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) -> Result { return __rros_chk_fifo_schedparam(thread.clone(), p.clone()); } fn rros_fifo_trackprio( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) { __rros_track_fifo_priority(thread.clone(), p.clone()); } -fn rros_fifo_ceilprio(thread: Arc>, prio: i32) { +fn rros_fifo_ceilprio(thread: Arc>>>, prio: i32) { __rros_ceil_fifo_priority(thread.clone(), prio); } @@ -140,8 +141,8 @@ fn rros_fifo_show( } fn __rros_set_fifo_schedparam( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) -> Result { let thread_clone = thread.clone(); let p_unwrap = p.unwrap(); @@ -158,16 +159,16 @@ fn __rros_set_fifo_schedparam( } fn __rros_get_fifo_schedparam( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) { p.unwrap().lock().fifo.prio = thread.unwrap().lock().cprio; } // The logic is complete, but haven't been tested. fn __rros_chk_fifo_schedparam( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) -> Result { let thread_unwrap = thread.unwrap(); let mut min = RROS_FIFO_MIN_PRIO; @@ -188,8 +189,8 @@ fn __rros_chk_fifo_schedparam( } fn __rros_track_fifo_priority( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) { let thread_unwrap = thread.unwrap(); if p.is_some() { @@ -200,11 +201,11 @@ fn __rros_track_fifo_priority( } } -fn __rros_ceil_fifo_priority(thread: Arc>, prio: i32) { +fn __rros_ceil_fifo_priority(thread: Arc>>>, prio: i32) { unsafe { (*thread.locked_data().get()).cprio = prio }; } -pub fn __rros_dequeue_fifo_thread(thread: Arc>) -> Result { +pub fn __rros_dequeue_fifo_thread(thread: Arc>>>) -> Result { let rq_next = thread.lock().rq_next.clone(); if rq_next.is_none() { return Err(kernel::Error::EINVAL); @@ -219,7 +220,7 @@ pub fn __rros_dequeue_fifo_thread(thread: Arc>) -> R } // Enter the queue according to the priority. Note that rq_next must be assigned here---this variable is used when dequeuing. -pub fn __rros_enqueue_fifo_thread(thread: Arc>) -> Result { +pub fn __rros_enqueue_fifo_thread(thread: Arc>>>) -> Result { let rq_ptr; match thread.lock().rq.clone() { Some(rq) => rq_ptr = rq, @@ -238,7 +239,7 @@ pub fn __rros_enqueue_fifo_thread(thread: Arc>) -> R loop { unsafe { let pos_cprio = p.unwrap().as_ref().value.lock().cprio; - if p.unwrap().as_ptr() == &mut q.head as *mut Node>> + if p.unwrap().as_ptr() == &mut q.head as *mut Node>>>> || new_cprio <= pos_cprio { p.unwrap() @@ -259,7 +260,7 @@ pub fn __rros_enqueue_fifo_thread(thread: Arc>) -> R Ok(0) } -pub fn __rros_requeue_fifo_thread(thread: Arc>) -> Result { +pub fn __rros_requeue_fifo_thread(thread: Arc>>>) -> Result { unsafe { let rq_ptr; match (*thread.locked_data().get()).rq.clone() { @@ -278,7 +279,7 @@ pub fn __rros_requeue_fifo_thread(thread: Arc>) -> R // Traverse in reverse order. loop { let pos_cprio = (*(p.unwrap().as_ref().value).locked_data().get()).cprio; - if p.unwrap().as_ptr() == &mut q.head as *mut Node>> + if p.unwrap().as_ptr() == &mut q.head as *mut Node>>>> || new_cprio < pos_cprio { p.unwrap() diff --git a/kernel/rros/fifo_test.rs b/kernel/rros/fifo_test.rs index c6f78b5929f5e..e94a2983eb7b0 100644 --- a/kernel/rros/fifo_test.rs +++ b/kernel/rros/fifo_test.rs @@ -1,14 +1,15 @@ use crate::{fifo::*, sched::*, thread::*, timer::*, RROS_OOB_CPUS}; use core::clone::Clone; -use kernel::{c_str, cpumask::CpumaskT, prelude::*, spinlock_init, sync::Lock, sync::SpinLock}; +use core::ops::Deref; +use kernel::{c_str, cpumask::CpumaskT, prelude::*, new_spinlock, sync::Lock, sync::SpinLock}; #[allow(dead_code)] -pub fn test_init_thread(thread: Arc>, prio: i32) -> Result { +pub fn test_init_thread(thread: Arc>>>, prio: i32) -> Result { let mut iattr = RrosInitThreadAttr::new(); unsafe { iattr.affinity = &RROS_OOB_CPUS as *const CpumaskT; iattr.sched_class = Some(&RROS_SCHED_FIFO); - let sched_param = Arc::try_new(SpinLock::new(RrosSchedParam::new()))?; + let sched_param = Arc::try_new(Box::pin_init(new_spinlock!(RrosSchedParam::new())).unwrap())?; (*sched_param.locked_data().get()).fifo.prio = prio; (*sched_param.locked_data().get()).idle.prio = prio; (*sched_param.locked_data().get()).weak.prio = prio; @@ -25,51 +26,51 @@ pub fn test_rros_enqueue_fifo_thread() -> Result { let mut length; // create thread1 - let mut t1 = SpinLock::new(RrosThread::new().unwrap()); - let pinned = Pin::new_unchecked(&mut t1); - spinlock_init!(pinned, "create_thread1"); + let mut t1 = Box::pin_init(new_spinlock!(RrosThread::new().unwrap(),"create_thread1")).unwrap(); + // let pinned = Pin::new_unchecked(&mut t1); + // spinlock_init!(pinned, "create_thread1"); let thread1 = Arc::try_new(t1)?; - let mut r1 = SpinLock::new(RrosTimer::new(1)); - let pinned_r1 = Pin::new_unchecked(&mut r1); - spinlock_init!(pinned_r1, "rtimer_1"); - let mut p1 = SpinLock::new(RrosTimer::new(1)); - let pinned_p = Pin::new_unchecked(&mut p1); - spinlock_init!(pinned_p, "ptimer_1"); + let mut r1 = Box::pin_init(new_spinlock!(RrosTimer::new(1), "rtimer_1")).unwrap(); + // let pinned_r1 = Pin::new_unchecked(&mut r1); + // spinlock_init!(pinned_r1, "rtimer_1"); + let mut p1 = Box::pin_init(new_spinlock!(RrosTimer::new(1), "ptimer_1")).unwrap(); + // let pinned_p = Pin::new_unchecked(&mut p1); + // spinlock_init!(pinned_p, "ptimer_1"); thread1.lock().rtimer = Some(Arc::try_new(r1).unwrap()); thread1.lock().ptimer = Some(Arc::try_new(p1).unwrap()); test_init_thread(thread1.clone(), 22)?; // create thread2 - let mut t2 = SpinLock::new(RrosThread::new().unwrap()); - let pinned = Pin::new_unchecked(&mut t2); - spinlock_init!(pinned, "create_thread1"); + let mut t2 = Box::pin_init(new_spinlock!(RrosThread::new().unwrap(), "create_thread1")).unwrap(); + // let pinned = Pin::new_unchecked(&mut t2); + // spinlock_init!(pinned, "create_thread1"); let thread2 = Arc::try_new(t2)?; - let mut r2 = SpinLock::new(RrosTimer::new(1)); - let pinned_r2 = Pin::new_unchecked(&mut r2); - spinlock_init!(pinned_r2, "rtimer_2"); - let mut p2 = SpinLock::new(RrosTimer::new(1)); - let pinned_p = Pin::new_unchecked(&mut p2); - spinlock_init!(pinned_p, "ptimer_2"); + let mut r2 = Box::pin_init(new_spinlock!(RrosTimer::new(1), "rtimer_2")).unwrap(); + // let pinned_r2 = Pin::new_unchecked(&mut r2); + // spinlock_init!(pinned_r2, "rtimer_2"); + let mut p2 = Box::pin_init(new_spinlock!(RrosTimer::new(1), "ptimer_2")).unwrap(); + // let pinned_p = Pin::new_unchecked(&mut p2); + // spinlock_init!(pinned_p, "ptimer_2"); thread2.lock().rtimer = Some(Arc::try_new(r2).unwrap()); thread2.lock().ptimer = Some(Arc::try_new(p2).unwrap()); test_init_thread(thread2.clone(), 33)?; // // create thread3 - let mut t3 = SpinLock::new(RrosThread::new().unwrap()); - let pinned = Pin::new_unchecked(&mut t3); - spinlock_init!(pinned, "create_thread1"); + let mut t3 = Box::pin_init(new_spinlock!(RrosThread::new().unwrap(), "create_thread1")).unwrap(); + // let pinned = Pin::new_unchecked(&mut t3); + // spinlock_init!(pinned, "create_thread1"); let thread3 = Arc::try_new(t3)?; - let mut r3 = SpinLock::new(RrosTimer::new(1)); - let pinned_r3 = Pin::new_unchecked(&mut r3); - spinlock_init!(pinned_r3, "rtimer_3"); - let mut p3 = SpinLock::new(RrosTimer::new(1)); - let pinned_p = Pin::new_unchecked(&mut p3); - spinlock_init!(pinned_p, "ptimer_3"); + let mut r3 = Box::pin_init(new_spinlock!(RrosTimer::new(1), "rtimer_3")).unwrap(); + // let pinned_r3 = Pin::new_unchecked(&mut r3); + // spinlock_init!(pinned_r3, "rtimer_3"); + let mut p3 = Box::pin_init(new_spinlock!(RrosTimer::new(1), "ptimer_3")).unwrap(); + // let pinned_p = Pin::new_unchecked(&mut p3); + // spinlock_init!(pinned_p, "ptimer_3"); thread3.lock().rtimer = Some(Arc::try_new(r3).unwrap()); thread3.lock().ptimer = Some(Arc::try_new(p3).unwrap()); diff --git a/kernel/rros/file.rs b/kernel/rros/file.rs index aceefc771d5ad..f2291a85fd9dc 100644 --- a/kernel/rros/file.rs +++ b/kernel/rros/file.rs @@ -6,6 +6,8 @@ use core::{ ptr::{self, NonNull}, }; +use core::cell::OnceCell; + use crate::{ crossing::{ rros_down_crossing, rros_init_crossing, rros_pass_crossing, rros_up_crossing, RrosCrossing, @@ -25,6 +27,7 @@ use kernel::{ str::CStr, sync::{Lock, SpinLock}, task::Task, + new_spinlock, }; pub struct RrosFileBinding { @@ -150,19 +153,27 @@ impl DerefMut for FdTree { } } unsafe impl Send for FdTree {} -init_static_sync! { - static FD_TREE: SpinLock = FdTree(rbtree::RBTree::new()); -} +// init_static_sync! { +// static FD_TREE: SpinLock = FdTree(rbtree::RBTree::new()); +// } -// pub static mut FD_TREE: Option>>> = Some(init_rbtree().unwrap()); +static mut FD_TREE: OnceCell>>> = OnceCell::new(); +pub fn fd_tree_init() { + unsafe { + FD_TREE.get_or_init(|| { + Box::pin_init(new_spinlock!(FdTree(rbtree::RBTree::new()))).unwrap() + }); + } +} /// Insert the given rfd to static rbtree FD_TREE. pub fn index_rfd(rfd: RrosFd, _filp: *mut bindings::file) -> Result { - let flags = FD_TREE.irq_lock_noguard(); + fd_tree_init(); + let flags = unsafe { FD_TREE.get().unwrap().irq_lock_noguard() }; unsafe { - (*FD_TREE.locked_data().get()).try_insert(rfd.fd, rfd)?; + (*FD_TREE.get().unwrap().locked_data().get()).try_insert(rfd.fd, rfd)?; } - FD_TREE.irq_unlock_noguard(flags); + unsafe { FD_TREE.get().unwrap().irq_unlock_noguard(flags) }; Ok(0) // unlock FD_TREE here } @@ -171,13 +182,14 @@ pub fn index_rfd(rfd: RrosFd, _filp: *mut bindings::file) -> Result { /// /// Returns a reference to the rfd corresponding to the fd. pub fn lookup_rfd(fd: u32, _files: &mut FilesStruct) -> Option<*mut RrosFd> { - let flags = FD_TREE.irq_lock_noguard(); + fd_tree_init(); + let flags = unsafe { FD_TREE.get().unwrap().irq_lock_noguard() }; // `get_mut` has the same name as the lock's `get_mut`, so unsafe is used. - if let Some(rfd) = unsafe { (*FD_TREE.locked_data().get()).get_mut(&fd) } { - FD_TREE.irq_unlock_noguard(flags); + if let Some(rfd) = unsafe { (*FD_TREE.get().unwrap().locked_data().get()).get_mut(&fd) } { + unsafe { FD_TREE.get().unwrap().irq_unlock_noguard(flags) }; return Some(rfd as *mut RrosFd); } else { - FD_TREE.irq_unlock_noguard(flags); + unsafe { FD_TREE.get().unwrap().irq_unlock_noguard(flags) }; return None; } } @@ -186,17 +198,18 @@ pub fn lookup_rfd(fd: u32, _files: &mut FilesStruct) -> Option<*mut RrosFd> { /// /// It returns the value that was removed if rfd exists, or ['None'] otherwise. pub fn unindex_rfd(fd: u32, _files: &mut FilesStruct) -> Option { - let flags = FD_TREE.irq_lock_noguard(); + fd_tree_init(); + let flags = unsafe { FD_TREE.get().unwrap().irq_lock_noguard() }; pr_debug!("unindex_rfd 1"); - let ret = unsafe { (*FD_TREE.locked_data().get()).remove(&fd) }; + let ret = unsafe { (*FD_TREE.get().unwrap().locked_data().get()).remove(&fd) }; pr_debug!("unindex_rfd 2"); if ret.is_none() { pr_debug!("unindex_rfd 3"); - FD_TREE.irq_unlock_noguard(flags); + unsafe { FD_TREE.get().unwrap().irq_unlock_noguard(flags) }; return None; } else { pr_debug!("unindex_rfd 4"); - FD_TREE.irq_unlock_noguard(flags); + unsafe { FD_TREE.get().unwrap().irq_unlock_noguard(flags) }; return Some(ret.unwrap()); } } diff --git a/kernel/rros/guard.rs b/kernel/rros/guard.rs index 977993e6f03c3..fe3339c729956 100644 --- a/kernel/rros/guard.rs +++ b/kernel/rros/guard.rs @@ -28,6 +28,7 @@ impl core::ops::Deref for Guard<'_, L> { } } + impl core::ops::DerefMut for Guard<'_, L> { fn deref_mut(&mut self) -> &mut L::Inner { // SAFETY: The caller owns the lock, so it is safe to deref the protected data. @@ -35,6 +36,7 @@ impl core::ops::DerefMut for Guard<'_, L> { } } + impl Drop for Guard<'_, L> { fn drop(&mut self) { // SAFETY: The caller owns the lock, so it is safe to unlock it. @@ -42,6 +44,7 @@ impl Drop for Guard<'_, L> { } } + impl<'a, L: RrosLock + ?Sized> Guard<'a, L> { /// Constructs a new lock guard. /// diff --git a/kernel/rros/idle.rs b/kernel/rros/idle.rs index f9513556af5f6..2ce02a4078de6 100644 --- a/kernel/rros/idle.rs +++ b/kernel/rros/idle.rs @@ -3,13 +3,14 @@ use kernel::{ prelude::*, sync::{Lock, SpinLock}, }; +use core::ops::Deref; pub static mut RROS_SCHED_IDLE: sched::RrosSchedClass = sched::RrosSchedClass { - sched_pick: Some(rros_idle_pick), - sched_setparam: Some(rros_idle_setparam), - sched_getparam: Some(rros_idle_getparam), - sched_trackprio: Some(rros_idle_trackprio), - sched_ceilprio: Some(rros_idle_ceilprio), + sched_pick: Some(rros_idle_pick as fn(Option<*mut sched::rros_rq>) -> Result>>>>), + sched_setparam: Some(rros_idle_setparam as fn(Option>>>>, Option>>>>) -> Result), + sched_getparam: Some(rros_idle_getparam as fn(Option>>>>, Option>>>>) -> ()), + sched_trackprio: Some(rros_idle_trackprio as fn(Option>>>>, Option>>>>) -> ()), + sched_ceilprio: Some(rros_idle_ceilprio as fn(Arc>>>, i32) -> ()), weight: 0 * sched::RROS_CLASS_WEIGHT_FACTOR, policy: sched::SCHED_IDLE, name: "idle", @@ -63,7 +64,7 @@ pub const RROS_IDLE_PRIO: i32 = -1; // return RROS_SCHED_IDLE.clone(); // } -fn rros_idle_pick(rq: Option<*mut sched::rros_rq>) -> Result>> { +fn rros_idle_pick(rq: Option<*mut sched::rros_rq>) -> Result>>>> { match rq { Some(_) => (), None => return Err(kernel::Error::EINVAL), @@ -79,49 +80,49 @@ fn rros_idle_pick(rq: Option<*mut sched::rros_rq>) -> Result>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) -> Result { return __rros_set_idle_schedparam(thread.clone(), p.clone()); } fn __rros_set_idle_schedparam( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) -> Result { let thread_clone = thread.clone(); let thread_unwrap = thread_clone.unwrap(); // let mut thread_lock = thread_unwrap.lock(); let p_unwrap = p.unwrap(); thread_unwrap.lock().state &= !T_WEAK; - let prio = unsafe { (*p_unwrap.locked_data().get()).idle.prio }; + let prio = unsafe { (*p_unwrap.lock().deref()).idle.prio }; return sched::rros_set_effective_thread_priority(thread.clone(), prio); } fn rros_idle_getparam( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) { __rros_get_idle_schedparam(thread.clone(), p.clone()); } fn __rros_get_idle_schedparam( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) { p.unwrap().lock().idle.prio = thread.unwrap().lock().cprio; } fn rros_idle_trackprio( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) { __rros_track_idle_priority(thread.clone(), p.clone()); } fn __rros_track_idle_priority( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) { if p.is_some() { pr_warn!("Inheriting a priority-less class makes no sense."); @@ -130,10 +131,10 @@ fn __rros_track_idle_priority( } } -fn rros_idle_ceilprio(thread: Arc>, prio: i32) { +fn rros_idle_ceilprio(thread: Arc>>>, prio: i32) { __rros_ceil_idle_priority(thread.clone(), prio); } -fn __rros_ceil_idle_priority(_thread: Arc>, _prio: i32) { +fn __rros_ceil_idle_priority(_thread: Arc>>>, _prio: i32) { pr_warn!("RROS_WARN_ON_ONCE(CORE, 1)"); } diff --git a/kernel/rros/lock.rs b/kernel/rros/lock.rs index a23ffb4b9e708..6d7c7fd8558cf 100644 --- a/kernel/rros/lock.rs +++ b/kernel/rros/lock.rs @@ -1,9 +1,9 @@ -use kernel::{c_types, prelude::*, spinlock_init, sync::SpinLock}; +use kernel::{c_types, prelude::*, new_spinlock, sync::SpinLock}; -pub fn raw_spin_lock_init(lock: &mut SpinLock) { - *lock = unsafe { SpinLock::new(1) }; - let pinned = unsafe { Pin::new_unchecked(lock) }; - spinlock_init!(pinned, "timerbase"); +pub fn raw_spin_lock_init(lock: &mut Pin>>) { + *lock = unsafe { Box::pin_init(new_spinlock!(1,"timerbase")).unwrap() }; + // let pinned = unsafe { Pin::new_unchecked(lock) }; + // spinlock_init!(pinned, "timerbase"); } extern "C" { diff --git a/kernel/rros/memory.rs b/kernel/rros/memory.rs index 15a380f2e4e20..299d81557a7cd 100644 --- a/kernel/rros/memory.rs +++ b/kernel/rros/memory.rs @@ -3,7 +3,7 @@ use core::{mem::size_of, mem::zeroed}; use kernel::{ mm, prelude::*, - premmpt, rbtree, spinlock_init, + premmpt, rbtree, new_spinlock, sync::{self, SpinLock}, vmalloc, }; @@ -61,7 +61,8 @@ struct RrosHeap { usable_size: usize, used_size: usize, buckets: [u32; RROS_HEAP_MAX_BUCKETS as usize], - lock: sync::SpinLock, + lock: Pin>> +, next: list::ListHead, } @@ -76,7 +77,7 @@ impl RrosHeap { usable_size: 0, used_size: 0, buckets: [0; RROS_HEAP_MAX_BUCKETS as usize], - lock: unsafe { sync::SpinLock::::new(0) }, + lock: unsafe { Box::pin_init(new_spinlock!(0)).unwrap() }, next: list::ListHead::default(), }) } @@ -125,10 +126,10 @@ pub static mut RROS_SHM_SIZE: usize = 0; #[allow(dead_code)] pub fn init_memory(sysheap_size_arg: u32) -> Result { - let rros_system_heap: Arc> = - Arc::try_new(unsafe { SpinLock::new(RrosHeap::new()?) })?; - let rros_shared_heap: Arc> = - Arc::try_new(unsafe { SpinLock::new(RrosHeap::new()?) })?; + let rros_system_heap: Arc>>> = + Arc::try_new(unsafe { Box::pin_init(new_spinlock!(RrosHeap::new()?)).unwrap() })?; + let rros_shared_heap: Arc>>> = + Arc::try_new(unsafe { Box::pin_init(new_spinlock!(RrosHeap::new()?)).unwrap() })?; // let mut RrosHeapRangeManage: Arc> = // Arc::try_new(unsafe{SpinLock::new(RrosHeapPgentry::new()?)})?; @@ -139,8 +140,8 @@ pub fn init_memory(sysheap_size_arg: u32) -> Result { } #[allow(dead_code)] -fn init_system_heap(heap: Arc>, sysheap_size_arg: u32) -> Result { - //hrm: Arc> +fn init_system_heap(heap: Arc>>>, sysheap_size_arg: u32) -> Result { + //hrm: Arc>>> let mut size: usize = sysheap_size_arg as usize; if sysheap_size_arg == 0 { size = CONFIG_RROS_COREMEM_SIZE * 1024; @@ -166,7 +167,7 @@ fn init_system_heap(heap: Arc>, sysheap_size_arg: u32) -> Res } #[allow(dead_code)] -fn init_shared_heap(heap: Arc>) -> Result { +fn init_shared_heap(heap: Arc>>>) -> Result { //hrm: Arc> let mut size: usize = CONFIG_RROS_NR_THREADS * size_of::() + CONFIG_RROS_NR_MONITORS * size_of::(); @@ -194,7 +195,7 @@ fn init_shared_heap(heap: Arc>) -> Result { Ok(0) } -fn rros_init_heap(heap: Arc>, membase: *mut u8, size: usize) -> Result { +fn rros_init_heap(heap: Arc>>>, membase: *mut u8, size: usize) -> Result { let nrpages; premmpt::running_inband()?; @@ -209,9 +210,9 @@ fn rros_init_heap(heap: Arc>, membase: *mut u8, size: usize) *i = u32::MAX; } - op_lock.lock = unsafe { SpinLock::new(1) }; - let pinned = unsafe { Pin::new_unchecked(&mut op_lock.lock) }; - spinlock_init!(pinned, "value"); + op_lock.lock = unsafe { Box::pin_init(new_spinlock!(1)).unwrap() }; + // let pinned = unsafe { Pin::new_unchecked(&mut op_lock.lock) }; + // spinlock_init!(pinned, "value"); nrpages = size >> RROS_HEAP_PAGE_SHIFT; let a: u64 = size_of::() as u64; @@ -236,14 +237,14 @@ fn rros_init_heap(heap: Arc>, membase: *mut u8, size: usize) } #[allow(dead_code)] -fn release_page_range(_heap: Arc>, _membase: *mut u8, _size: usize) { +fn release_page_range(_heap: Arc>>>, _membase: *mut u8, _size: usize) { // let freed: *mut RrosHeapRange = membase as *mut RrosHeapRange; // insert_range_byaddr(heap.clone(), freed); // insert_range_bysize(heap.clone(), freed); } #[allow(dead_code)] -fn insert_range_byaddr(heap: Arc>, _freed: *mut RrosHeapRange) -> Result { +fn insert_range_byaddr(heap: Arc>>>, _freed: *mut RrosHeapRange) -> Result { let mut _op_lock = heap.lock(); // unsafe { // op_lock.addr_tree.try_insert(0, Arc::try_new(*freed)?); @@ -252,7 +253,7 @@ fn insert_range_byaddr(heap: Arc>, _freed: *mut RrosHeapRange } #[allow(dead_code)] -fn insert_range_bysize(heap: Arc>, _freed: *mut RrosHeapRange) -> Result { +fn insert_range_bysize(heap: Arc>>>, _freed: *mut RrosHeapRange) -> Result { let mut _op_lock = heap.lock(); // unsafe { // op_lock.size_tree.try_insert(0, Arc::try_new(*freed)?); diff --git a/kernel/rros/monitor.rs b/kernel/rros/monitor.rs index 640c9988c27da..1f58928d4be6b 100644 --- a/kernel/rros/monitor.rs +++ b/kernel/rros/monitor.rs @@ -1,6 +1,6 @@ use alloc::rc::Rc; -use core::{cell::RefCell, convert::TryFrom, mem::size_of, sync::atomic::AtomicUsize}; +use core::{cell::RefCell, convert::TryFrom, mem::size_of, sync::atomic::AtomicUsize,cell::OnceCell}; use crate::{ clock, factory, @@ -12,24 +12,24 @@ use crate::{ use kernel::{ c_types, device::DeviceType, file::File, file_operations::FileOperations, - io_buffer::IoBufferWriter, prelude::*, spinlock_init, str::CStr, sync::SpinLock, user_ptr, + io_buffer::IoBufferWriter, prelude::*, new_spinlock, str::CStr, sync::SpinLock, user_ptr, Error, }; #[allow(dead_code)] pub struct RrosMonitorItem1 { - pub mutex: SpinLock, + pub mutex: Pin>>, pub events: list::ListHead, - pub lock: SpinLock, + pub lock: Pin>>, } impl RrosMonitorItem1 { #[allow(dead_code)] fn new() -> Result { Ok(Self { - mutex: unsafe { SpinLock::new(0) }, + mutex: unsafe { Box::pin_init(new_spinlock!(0,"RrosMonitorItem1_lock")).unwrap() }, events: list::ListHead::default(), - lock: unsafe { SpinLock::::new(0) }, + lock: unsafe { Box::pin_init(new_spinlock!(0,"value")).unwrap() }, }) } } @@ -238,7 +238,8 @@ pub fn monitor_factory_build( }; let element = Rc::try_new(RefCell::new(RrosElement::new()?))?; - let factory: &mut SpinLock = unsafe { &mut RROS_MONITOR_FACTORY }; + rros_monitor_factory_init(); + let factory: &mut Pin>> = unsafe { RROS_MONITOR_FACTORY.get_mut().unwrap() }; let _ret = factory::rros_init_element(element.clone(), factory, clone_flags); let mut state = RrosMonitorState::new()?; @@ -271,12 +272,12 @@ pub fn monitor_factory_build( // init monitor let mon = match state.u { Some(RrosMonitorStateItem::Gate(ref _rros_monitor_state_item_gate)) => { - let mut item = RrosMonitorItem1::new()?; - let pinned = unsafe { Pin::new_unchecked(&mut item.mutex) }; - spinlock_init!(pinned, "RrosMonitorItem1_lock"); + let mut item = RrosMonitorItem1::new()?; + // let pinned = unsafe { Pin::new_unchecked(&mut item.mutex) }; + // spinlock_init!(pinned, "RrosMonitorItem1_lock"); - let pinned = unsafe { Pin::new_unchecked(&mut item.lock) }; - spinlock_init!(pinned, "value"); + // let pinned = unsafe { Pin::new_unchecked(&mut item.lock) }; + // spinlock_init!(pinned, "value"); RrosMonitor::new( element, Some(state), @@ -304,31 +305,38 @@ pub fn monitor_factory_build( } #[allow(dead_code)] -pub static mut RROS_MONITOR_FACTORY: SpinLock = unsafe { - SpinLock::new(factory::RrosFactory { - name: CStr::from_bytes_with_nul_unchecked("monitor\0".as_bytes()), - // fops: Some(&MonitorOps), - nrdev: CONFIG_RROS_MONITOR, - build: None, - dispose: Some(monitor_factory_dispose), - attrs: None, //sysfs::attribute_group::new(), - flags: factory::RrosFactoryType::CLONE, - inside: Some(factory::RrosFactoryInside { - type_: DeviceType::new(), - class: None, - cdev: None, - device: None, - sub_rdev: None, - kuid: None, - kgid: None, - minor_map: None, - index: None, - name_hash: None, - hash_lock: None, - register: None, - }), - }) -}; +pub static mut RROS_MONITOR_FACTORY: OnceCell>>> = OnceCell::new(); + +pub fn rros_monitor_factory_init() { + unsafe { + RROS_MONITOR_FACTORY.get_or_init(|| { + Box::pin_init(new_spinlock!(factory::RrosFactory { + name: CStr::from_bytes_with_nul_unchecked("monitor\0".as_bytes()), + // fops: Some(&MonitorOps), + nrdev: CONFIG_RROS_MONITOR, + build: None, + dispose: Some(monitor_factory_dispose), + attrs: None, // sysfs::attribute_group::new(), + flags: factory::RrosFactoryType::CLONE, + inside: Some(factory::RrosFactoryInside { + type_: DeviceType::new(), + class: None, + cdev: None, + device: None, + sub_rdev: None, + kuid: None, + kgid: None, + minor_map: None, + index: None, + name_hash: None, + hash_lock: None, + register: None, + }), + })) + .unwrap() + }); + } +} #[allow(dead_code)] pub fn monitor_factory_dispose(_ele: factory::RrosElement) {} diff --git a/kernel/rros/net/device.rs b/kernel/rros/net/device.rs index 4b837cf404136..2b3605f4aa2a9 100644 --- a/kernel/rros/net/device.rs +++ b/kernel/rros/net/device.rs @@ -1,4 +1,4 @@ -use core::{clone::Clone, ffi::c_void, mem::size_of, ptr::NonNull}; +use core::{clone::Clone, ffi::c_void, mem::size_of, ptr::NonNull,ops::Deref}; use kernel::{ bindings, c_str, init_static_sync, @@ -9,16 +9,21 @@ use kernel::{ spinlock_init, sync::{Lock, SpinLock}, vmalloc, Result, + new_spinlock, }; use super::{skb::RrosSkbQueue, socket::RrosNetdevActivation}; use crate::{ crossing::RrosCrossing, flags::RrosFlag, - net::{input::rros_net_do_rx, skb::rros_net_dev_build_pool}, + net::{input::rros_net_do_rx, skb::{rros_net_dev_build_pool, RrosSkbQueueInner}}, thread::KthreadRunner, wait::RrosWaitQueue, }; +use core::pin::Pin; +use alloc::boxed::Box; +use kernel::init::InPlaceInit; +use core::cell::OnceCell; const IFF_OOB_PORT: usize = 1 << 1; const IFF_OOB_CAPABLE: usize = 1 << 0; @@ -37,10 +42,18 @@ struct ListThreadSafeWrapper(pub List>); unsafe impl Sync for ListThreadSafeWrapper {} unsafe impl Send for ListThreadSafeWrapper {} -init_static_sync! { - static ACTIVE_PORT_LIST : SpinLock = ListThreadSafeWrapper(List::new()); -} +// init_static_sync! { +// static ACTIVE_PORT_LIST : SpinLock = ListThreadSafeWrapper(List::new()); +// } +static mut ACTIVE_PORT_LIST: OnceCell>>> = OnceCell::new(); +pub fn active_port_list_init() { + unsafe { + ACTIVE_PORT_LIST.get_or_init(|| { + Box::pin_init(new_spinlock!(ListThreadSafeWrapper(List::new()))).unwrap() + }); + } +} pub fn start_handler_thread( func: Box, name: &'static kernel::str::CStr, @@ -253,8 +266,9 @@ impl NetDevice { } // est.qdisc = //TODO: - let pinned = unsafe { Pin::new_unchecked(&mut est.rx_queue) }; - spinlock_init!(pinned, "RrosSkbQueue"); + // let pinned = unsafe { Pin::new_unchecked(&mut est.rx_queue) }; + // spinlock_init!(pinned, "RrosSkbQueue"); + est.rx_queue = Box::pin_init(new_spinlock!(RrosSkbQueueInner::default(),"RrosSkbQueue")).unwrap(); unsafe { (*est.rx_queue.locked_data().get()).init() }; est.rx_flag.init(); @@ -284,13 +298,14 @@ impl NetDevice { est.refs += 1; self.set_oob_port(); - let flags = ACTIVE_PORT_LIST.irq_lock_noguard(); + active_port_list_init(); + let flags = unsafe { ACTIVE_PORT_LIST.get().unwrap().irq_lock_noguard() }; unsafe { - (*ACTIVE_PORT_LIST.locked_data().get()) + (*ACTIVE_PORT_LIST.get().unwrap().locked_data().get()) .0 .push_back(Box::try_new(NetDevice(self.0.clone())).unwrap()); } - ACTIVE_PORT_LIST.irq_unlock_noguard(flags); + unsafe { ACTIVE_PORT_LIST.get().unwrap().irq_unlock_noguard(flags) }; pr_crit!("enable oob port success"); return 0; @@ -359,20 +374,21 @@ impl NetDevice { pub fn net_get_dev_by_index(net: *mut Namespace, ifindex: i32) -> Option { assert!(ifindex != 0); - let flags = ACTIVE_PORT_LIST.irq_lock_noguard(); + active_port_list_init(); + let flags = unsafe { ACTIVE_PORT_LIST.get().unwrap().irq_lock_noguard() }; - let list = unsafe { &mut (*ACTIVE_PORT_LIST.locked_data().get()).0 }; + let list = unsafe { &mut (*ACTIVE_PORT_LIST.get().unwrap().locked_data().get()).0 }; let cursor = list.cursor_front(); while let Some(item) = cursor.current_mut() { if core::ptr::eq(item.get_net(), net) && item.ifindex() == ifindex { unsafe { item.dev_state_mut().as_mut().crossing.down() }; let ret = NetDevice(item.0.clone()); - ACTIVE_PORT_LIST.irq_unlock_noguard(flags); + unsafe { ACTIVE_PORT_LIST.get().unwrap().irq_unlock_noguard(flags) }; return Some(ret); } } - ACTIVE_PORT_LIST.irq_unlock_noguard(flags); + unsafe { ACTIVE_PORT_LIST.get().unwrap().irq_unlock_noguard(flags) }; return None; } } diff --git a/kernel/rros/net/input.rs b/kernel/rros/net/input.rs index 642caa072b5ac..91b03ad1550a2 100644 --- a/kernel/rros/net/input.rs +++ b/kernel/rros/net/input.rs @@ -9,11 +9,12 @@ use kernel::{ c_types::c_void, endian::be16, prelude::*, - spinlock_init, + new_spinlock, sync::{Lock, SpinLock}, types::HlistNode, vmalloc, }; +use core::ops::Deref; // pub struct RROSNetHandler{ // ingress : fn(skb : *mut bindings::sk_buff), @@ -23,7 +24,7 @@ pub struct RrosNetRxqueue { pub hkey: u32, pub hash: HlistNode, pub subscribers: bindings::list_head, - pub lock: SpinLock<()>, + pub lock: Pin>>, pub next: bindings::list_head, } @@ -40,8 +41,9 @@ impl RrosNetRxqueue { let ptr = unsafe { &mut *(ptr.unwrap() as *const _ as *mut RrosNetRxqueue) }; ptr.hkey = hkey; unsafe { rust_helper_INIT_LIST_HEAD(&mut ptr.subscribers) }; - let pinned = unsafe { core::pin::Pin::new_unchecked(&mut ptr.lock) }; - spinlock_init!(pinned, "RrosNetRxqueue"); + ptr.lock = Box::pin_init(new_spinlock!((),"RrosNetRxqueue")).unwrap(); + // let pinned = unsafe { core::pin::Pin::new_unchecked(&mut ptr.lock) }; + // spinlock_init!(pinned, "RrosNetRxqueue"); NonNull::new(ptr) } diff --git a/kernel/rros/net/output.rs b/kernel/rros/net/output.rs index bb202dbdc54c6..5eee26f6a61db 100644 --- a/kernel/rros/net/output.rs +++ b/kernel/rros/net/output.rs @@ -1,21 +1,39 @@ use super::skb::RrosSkBuff; +use core::pin::Pin; + use crate::{ list_entry_is_head, list_next_entry, net::{skb::RrosSkbQueueInner, socket::uncharge_socke_wmem}, }; use core::ffi::c_void; +use core::ops::Deref; +use core::cell::OnceCell; + use kernel::{ bindings, init_static_sync, interrupt, irq_work::IrqWork, sync::{Lock, SpinLock}, Error, Result, + new_spinlock, + prelude::Box, + init::InPlaceInit, }; // NOTE:initialize in rros_net_init_tx // TODO: The implementation here does not use DEFINE_PER_CPU because Rust does not yet support statically defined percpu variables. -init_static_sync! { - static OOB_TX_RELAY : SpinLock = RrosSkbQueueInner::default(); +// init_static_sync! { +// static OOB_TX_RELAY : SpinLock = RrosSkbQueueInner::default(); +// } +pub static mut OOB_TX_RELAY: OnceCell>>> = OnceCell::new(); + +pub fn oob_tx_relay_init() { + unsafe { + OOB_TX_RELAY.get_or_init(|| { + Box::pin_init(new_spinlock!(RrosSkbQueueInner::default())).unwrap() + }); + } } + static mut OOB_XMIT_WORK: IrqWork = unsafe { core::mem::transmute::<[u8; core::mem::size_of::()], IrqWork>( [0; core::mem::size_of::()], @@ -101,9 +119,10 @@ fn skb_inband_xmit_backlog() { } let mut list = bindings::list_head::default(); init_list_head!(&mut list); - let flags = OOB_TX_RELAY.irq_lock_noguard(); // TODO: Whether lock is required. + oob_tx_relay_init(); + let flags = unsafe { OOB_TX_RELAY.get().unwrap().irq_lock_noguard() }; // TODO: Whether lock is required. - if unsafe { (*OOB_TX_RELAY.locked_data().get()).move_queue(&mut list) } { + if unsafe { (*OOB_TX_RELAY.get().unwrap().locked_data().get()).move_queue(&mut list) } { list_for_each_entry_safe!( skb, n, @@ -119,7 +138,7 @@ fn skb_inband_xmit_backlog() { __bindgen_anon_1.list ); } - OOB_TX_RELAY.irq_unlock_noguard(flags); + unsafe { OOB_TX_RELAY.get().unwrap().irq_unlock_noguard(flags) }; } // fn xmit_oob(dev : *mut bindings::net_device, skb : *mut bindings::sk_buff) -> i32{ @@ -165,9 +184,10 @@ pub fn rros_net_transmit(mut skb: &mut RrosSkBuff) -> Result<()> { } } - let flags = OOB_TX_RELAY.irq_lock_noguard(); - unsafe { (*OOB_TX_RELAY.locked_data().get()).add(skb) }; - OOB_TX_RELAY.irq_unlock_noguard(flags); + oob_tx_relay_init(); + let flags = unsafe { OOB_TX_RELAY.get().unwrap().irq_lock_noguard() }; + unsafe { (*OOB_TX_RELAY.get().unwrap().locked_data().get()).add(skb) }; + unsafe { OOB_TX_RELAY.get().unwrap().irq_unlock_noguard(flags) }; unsafe { OOB_XMIT_WORK.irq_work_queue()?; } diff --git a/kernel/rros/net/packet.rs b/kernel/rros/net/packet.rs index 148a757f0761d..45e84d66005cd 100644 --- a/kernel/rros/net/packet.rs +++ b/kernel/rros/net/packet.rs @@ -12,7 +12,7 @@ use crate::{ sched::{rros_disable_preempt, rros_enable_preempt}, timeout::{RrosTmode, RROS_INFINITE, RROS_NONBLOCK}, }; -use core::{convert::TryInto, default::Default, mem::transmute, ops::DerefMut, ptr::NonNull, u16}; +use core::{convert::TryInto, default::Default, mem::transmute, ops::DerefMut, ops::Deref,ptr::NonNull, u16}; use kernel::{ bindings, c_types, endian::be16, @@ -22,14 +22,25 @@ use kernel::{ prelude::*, skbuff, socket::Sockaddr, - sync::Lock, + sync::Lock,sync::SpinLock, types::*, Error, Result, + new_spinlock, }; +use core::cell::OnceCell; // protocol hash table -init_static_sync! { - static PROTOCOL_HASHTABLE: kernel::sync::SpinLock> = Hashtable::<8>::new(); +// init_static_sync! { +// static PROTOCOL_HASHTABLE: kernel::sync::SpinLock> = Hashtable::<8>::new(); +// } +pub static mut PROTOCOL_HASHTABLE: OnceCell>>>> = OnceCell::new(); + +pub fn protocol_hashtable_init() { + unsafe { + PROTOCOL_HASHTABLE.get_or_init(|| { + Box::pin_init(new_spinlock!(Hashtable::<8>::new())).unwrap() + }); + } } fn get_protol_hash(protocol: be16) -> u32 { @@ -41,7 +52,8 @@ fn get_protol_hash(protocol: be16) -> u32 { } fn find_rxqueue(hkey: u32) -> Option> { - let head = unsafe { (*PROTOCOL_HASHTABLE.locked_data().get()).head(hkey) }; + protocol_hashtable_init(); + let head = unsafe { (*PROTOCOL_HASHTABLE.get().unwrap().locked_data().get()).head(hkey) }; hash_for_each_possible!(rxq, head, RrosNetRxqueue, hash, { if unsafe { (*rxq).hkey } == hkey { return NonNull::new(rxq as *mut RrosNetRxqueue); @@ -76,7 +88,9 @@ impl RrosNetProto for EthernetRrosNetProto { } let mut rxq = rxq.unwrap(); let mut redundant_rxq = false; - let flags = PROTOCOL_HASHTABLE.irq_lock_noguard(); + + protocol_hashtable_init(); + let flags = unsafe { PROTOCOL_HASHTABLE.get().unwrap().irq_lock_noguard() }; sock.proto = Some(ÐERNET_NET_PROTO); sock.binding.proto_hash = hkey; sock.protocol = protocol; @@ -90,15 +104,14 @@ impl RrosNetProto for EthernetRrosNetProto { queue.lock.lock_noguard(); unsafe { rust_helper_list_add(&mut sock.next_sub, &mut queue.subscribers) } // rros_spin_unlock - unsafe { queue.lock.unlock() }; rros_enable_preempt(); // drop q_guard here } else { let queue = unsafe { &mut *rxq.as_ptr() }; - unsafe { (*PROTOCOL_HASHTABLE.locked_data().get()).add(&mut queue.hash.0, hkey) }; + unsafe { (*PROTOCOL_HASHTABLE.get().unwrap().locked_data().get()).add(&mut queue.hash.0, hkey) }; unsafe { rust_helper_list_add(&mut sock.next_sub, &mut queue.subscribers) } } - PROTOCOL_HASHTABLE.irq_unlock_noguard(flags); + unsafe { PROTOCOL_HASHTABLE.get().unwrap().irq_unlock_noguard(flags) }; if redundant_rxq { unsafe { rxq.as_mut().free() }; @@ -116,17 +129,18 @@ impl RrosNetProto for EthernetRrosNetProto { let mut tmp = bindings::list_head::default(); init_list_head!(&mut tmp); - let flags = PROTOCOL_HASHTABLE.irq_lock_noguard(); + protocol_hashtable_init(); + let flags = unsafe { PROTOCOL_HASHTABLE.get().unwrap().irq_lock_noguard() }; let rxq = unsafe { find_rxqueue(sock.binding.proto_hash).unwrap().as_mut() }; list_del_init!(&mut sock.next_sub); if unsafe { rust_helper_list_empty(&rxq.subscribers) } { - unsafe { (*PROTOCOL_HASHTABLE.locked_data().get()).del(&mut rxq.hash.0) }; + unsafe { (*PROTOCOL_HASHTABLE.get().unwrap().locked_data().get()).del(&mut rxq.hash.0) }; list_add!(&mut rxq.next, &mut tmp); } - PROTOCOL_HASHTABLE.irq_unlock_noguard(flags); + unsafe { PROTOCOL_HASHTABLE.get().unwrap().irq_unlock_noguard(flags) }; list_for_each_entry_safe!( rxq, @@ -189,7 +203,6 @@ impl RrosNetProto for EthernetRrosNetProto { self.detach(sock); let ret = self.attach(sock, be16::new(sll.get_mut().sll_protocol)); if ret != 0 { - unsafe { sock.oob_lock.unlock() }; if dev.is_some() { let mut dev = dev.unwrap(); dev.put_dev(); @@ -204,7 +217,6 @@ impl RrosNetProto for EthernetRrosNetProto { sock.binding.vlan_ifindex = new_ifindex; } sock.oob_lock.irq_unlock_noguard(flags); - unsafe { sock.oob_lock.unlock() }; if dev.is_some() { let mut dev = dev.unwrap(); dev.put_dev(); @@ -555,8 +567,6 @@ fn __packet_deliver(rxq: &mut RrosNetRxqueue, skb: &mut RrosSkBuff, protocol: be rsk = list_next_entry!(rsk, RrosSocket, next_sub); } - unsafe { rxq.lock.unlock() }; - delivered } @@ -564,13 +574,14 @@ fn packet_deliver(skb: &mut RrosSkBuff, protocol: be16) -> bool { let hkey = get_protol_hash(protocol); let mut ret = false; - let flags = PROTOCOL_HASHTABLE.irq_lock_noguard(); + protocol_hashtable_init(); + let flags = unsafe { PROTOCOL_HASHTABLE.get().unwrap().irq_lock_noguard() }; if let Some(mut rxq) = find_rxqueue(hkey) { ret = __packet_deliver(unsafe { rxq.as_mut() }, skb, protocol); } - PROTOCOL_HASHTABLE.irq_unlock_noguard(flags); + unsafe { PROTOCOL_HASHTABLE.get().unwrap().irq_unlock_noguard(flags) }; ret } diff --git a/kernel/rros/net/skb.rs b/kernel/rros/net/skb.rs index 4e796fc7d6947..0350c5a21b1f7 100644 --- a/kernel/rros/net/skb.rs +++ b/kernel/rros/net/skb.rs @@ -21,7 +21,12 @@ use kernel::{ pr_debug, skbuff, sync::{Lock, SpinLock}, Result, + new_spinlock, }; +use core::pin::Pin; +use alloc::boxed::Box; +use kernel::init::InPlaceInit; +use core::cell::OnceCell; struct CloneControl { pub(crate) queue: bindings::list_head, @@ -39,22 +44,54 @@ struct RecyclingWork { unsafe impl Sync for RecyclingWork {} unsafe impl Send for RecyclingWork {} -init_static_sync! { - static CLONE_QUEUE: SpinLock = CloneControl{ - queue: bindings::list_head{ - next: core::ptr::null_mut() as *mut bindings::list_head, - prev: core::ptr::null_mut() as *mut bindings::list_head, - }, - count: 0, - }; - static RECYCLER_WORK : SpinLock = RecyclingWork{ - work : RrosWork::new(), - count : 0, - queue : bindings::list_head{ - next: core::ptr::null_mut() as *mut bindings::list_head, - prev: core::ptr::null_mut() as *mut bindings::list_head, - }, - }; +// init_static_sync! { +// static CLONE_QUEUE: SpinLock = CloneControl{ +// queue: bindings::list_head{ +// next: core::ptr::null_mut() as *mut bindings::list_head, +// prev: core::ptr::null_mut() as *mut bindings::list_head, +// }, +// count: 0, +// }; +// static RECYCLER_WORK : SpinLock = RecyclingWork{ +// work : RrosWork::new(), +// count : 0, +// queue : bindings::list_head{ +// next: core::ptr::null_mut() as *mut bindings::list_head, +// prev: core::ptr::null_mut() as *mut bindings::list_head, +// }, +// }; +// } +static mut CLONE_QUEUE: OnceCell>>> = OnceCell::new(); + +pub fn clone_queue_init() { + unsafe { + CLONE_QUEUE.get_or_init(|| { + Box::pin_init(new_spinlock!(CloneControl { + queue: bindings::list_head { + next: core::ptr::null_mut() as *mut bindings::list_head, + prev: core::ptr::null_mut() as *mut bindings::list_head, + }, + count: 0, + })).unwrap() + }); + } +} + +static mut RECYCLER_WORK: OnceCell>>> = OnceCell::new(); + +pub fn recycler_work_init() { + unsafe { + RECYCLER_WORK.get_or_init(|| { + Box::pin_init(new_spinlock!(RecyclingWork { + work: RrosWork::new(), + count: 0, + queue: bindings::list_head { + next: core::ptr::null_mut() as *mut bindings::list_head, + prev: core::ptr::null_mut() as *mut bindings::list_head, + }, + })).unwrap() + }); + } } const SKB_RECYCLING_THRESHOLD: usize = 64; @@ -67,9 +104,10 @@ pub struct RrosNetCb { } fn maybe_kick_recycler() { + recycler_work_init(); unsafe { - if (*RECYCLER_WORK.locked_data().get()).count > SKB_RECYCLING_THRESHOLD as i32 { - (*RECYCLER_WORK.locked_data().get()).work.call_inband(); + if (*RECYCLER_WORK.get().unwrap().locked_data().get()).count > SKB_RECYCLING_THRESHOLD as i32 { + (*RECYCLER_WORK.get().unwrap().locked_data().get()).work.call_inband(); } } } @@ -109,8 +147,9 @@ impl RrosSkBuff { skb: *mut bindings::sk_buff, ); } - let flags = CLONE_QUEUE.irq_lock_noguard(); - let clone_data_control = unsafe { &mut *CLONE_QUEUE.locked_data().get() }; + clone_queue_init(); + let flags = unsafe { CLONE_QUEUE.get().unwrap().irq_lock_noguard() }; + let clone_data_control = unsafe { &mut *CLONE_QUEUE.get().unwrap().locked_data().get() }; let clone: *mut bindings::sk_buff = if unsafe { !rust_helper_list_empty(&clone_data_control.queue) } { clone_data_control.count -= 1; @@ -123,7 +162,7 @@ impl RrosSkBuff { } else { panic!("No more skb to clone"); }; - CLONE_QUEUE.irq_unlock_noguard(flags); + unsafe { CLONE_QUEUE.get().unwrap().irq_unlock_noguard(flags) }; unsafe { rust_helper_skb_morph_oob_skb(clone, self.0.as_ptr()); @@ -303,15 +342,16 @@ impl RrosSkBuff { extern "C" { fn rust_helper_list_add(new: *mut bindings::list_head, head: *mut bindings::list_head); } - let flags = RECYCLER_WORK.irq_lock_noguard(); + recycler_work_init(); + let flags = unsafe { RECYCLER_WORK.get().unwrap().irq_lock_noguard() }; unsafe { rust_helper_list_add( self.list_mut(), - &mut (*RECYCLER_WORK.locked_data().get()).queue, + &mut (*RECYCLER_WORK.get().unwrap().locked_data().get()).queue, ); } - unsafe { (*RECYCLER_WORK.locked_data().get()).count += 1 }; - RECYCLER_WORK.irq_unlock_noguard(flags); + unsafe { (*RECYCLER_WORK.get().unwrap().locked_data().get()).count += 1 }; + unsafe { RECYCLER_WORK.get().unwrap().irq_unlock_noguard(flags) }; } fn free_clone(&mut self) { @@ -319,21 +359,22 @@ impl RrosSkBuff { extern "C" { fn rust_helper_list_add(new: *mut bindings::list_head, head: *mut bindings::list_head); } - let flags = CLONE_QUEUE.irq_lock_noguard(); + clone_queue_init(); + let flags = unsafe { CLONE_QUEUE.get().unwrap().irq_lock_noguard() }; unsafe { rust_helper_list_add( self.list_mut() as *mut bindings::list_head, - &mut (*CLONE_QUEUE.locked_data().get()).queue, + &mut (*CLONE_QUEUE.get().unwrap().locked_data().get()).queue, ) }; - unsafe { (*CLONE_QUEUE.locked_data().get()).count += 1 }; + unsafe { (*CLONE_QUEUE.get().unwrap().locked_data().get()).count += 1 }; unsafe { pr_debug!( "free skb count:{}", - (*CLONE_QUEUE.locked_data().get()).count + (*CLONE_QUEUE.get().unwrap().locked_data().get()).count ) }; - CLONE_QUEUE.irq_unlock_noguard(flags); + unsafe { CLONE_QUEUE.get().unwrap().irq_unlock_noguard(flags) }; } fn free_to_dev(&mut self) { @@ -526,7 +567,7 @@ pub struct RrosSkbQueueInner { unsafe impl Send for RrosSkbQueueInner {} unsafe impl Sync for RrosSkbQueueInner {} -pub type RrosSkbQueue = SpinLock; +pub type RrosSkbQueue = Pin>>; impl RrosSkbQueueInner { #[inline] @@ -669,9 +710,10 @@ fn skb_recycler(_work: &mut RrosWork) -> i32 { let mut list = bindings::list_head::default(); init_list_head!(&mut list); - let flags = RECYCLER_WORK.irq_lock_noguard(); + recycler_work_init(); + let flags = unsafe { RECYCLER_WORK.get().unwrap().irq_lock_noguard() }; unsafe { - rust_helper_list_splice_init(&mut (*RECYCLER_WORK.locked_data().get()).queue, &mut list); + rust_helper_list_splice_init(&mut (*RECYCLER_WORK.get().unwrap().locked_data().get()).queue, &mut list); } list_for_each_entry_safe!( skb, @@ -686,7 +728,7 @@ fn skb_recycler(_work: &mut RrosWork) -> i32 { }, __bindgen_anon_1.list ); - RECYCLER_WORK.irq_unlock_noguard(flags); + unsafe { RECYCLER_WORK.get().unwrap().irq_unlock_noguard(flags) }; 0 } @@ -695,9 +737,10 @@ pub fn rros_net_init_pools() -> Result<()> { fn rust_helper_list_add(new: *mut bindings::list_head, head: *mut bindings::list_head); fn rust_helper_INIT_LIST_HEAD(list: *mut bindings::list_head); } - unsafe { rust_helper_INIT_LIST_HEAD(&mut (*(*CLONE_QUEUE.locked_data()).get()).queue) }; - unsafe { (*(*CLONE_QUEUE.locked_data()).get()).count = NET_CLONES as i32 }; - let head = unsafe { &mut (*(*CLONE_QUEUE.locked_data()).get()).queue }; + clone_queue_init(); + unsafe { rust_helper_INIT_LIST_HEAD(&mut (*CLONE_QUEUE.get().unwrap().locked_data().get()).queue) }; + unsafe { (*CLONE_QUEUE.get().unwrap().locked_data().get()).count = NET_CLONES as i32 }; + let head = unsafe { &mut (*CLONE_QUEUE.get().unwrap().locked_data().get()).queue }; // CLONE_QUEUE. for _n in 0..NET_CLONES { let clone = skbuff::skb_alloc_oob_head(bindings::GFP_KERNEL); @@ -710,13 +753,14 @@ pub fn rros_net_init_pools() -> Result<()> { } } + recycler_work_init(); unsafe { - (&mut *RECYCLER_WORK.locked_data().get()) + (&mut *RECYCLER_WORK.get().unwrap().locked_data().get()) .work .init(skb_recycler) }; unsafe { - rust_helper_INIT_LIST_HEAD(&mut (*(*RECYCLER_WORK.locked_data()).get()).queue); + rust_helper_INIT_LIST_HEAD(&mut (*RECYCLER_WORK.get().unwrap().locked_data().get()).queue); } Ok(()) diff --git a/kernel/rros/net/socket.rs b/kernel/rros/net/socket.rs index ce223460dbe86..8768767703263 100644 --- a/kernel/rros/net/socket.rs +++ b/kernel/rros/net/socket.rs @@ -20,7 +20,7 @@ use kernel::{ prelude::*, sock::Sock, socket::Sockaddr, - spinlock_init, static_init_net_proto_family, + new_spinlock, static_init_net_proto_family, sync::{Mutex, SpinLock}, types::HlistNode, vmalloc::{c_kzalloc, c_kzfree}, @@ -110,7 +110,7 @@ pub struct RrosSocket { pub wmem_drain: RrosCrossing, pub protocol: be16, pub binding: Binding, - pub oob_lock: SpinLock<()>, + pub oob_lock: Pin>>, } const RROS_SOCKIOC_RECVMSG: u32 = 3226529285; @@ -397,8 +397,9 @@ no_mangle_function_declaration! { rsk.wmem_wait.init(&mut RROS_MONO_CLOCK, 0); } // rros_init_poll_head(&esk->poll_head); - let pinned = unsafe { Pin::new_unchecked(&mut rsk.oob_lock) }; - spinlock_init!(pinned, "net oob spinlock"); + // let pinned = unsafe { Pin::new_unchecked(&mut rsk.oob_lock) }; + // spinlock_init!(pinned, "net oob spinlock"); + rsk.oob_lock = Box::pin_init(new_spinlock!((), "net oob spinlock")).unwrap(); rsk.rmem_max = unsafe { (*sk).sk_rcvbuf }; rsk.wmem_max = unsafe { (*sk).sk_sndbuf }; diff --git a/kernel/rros/observable.rs b/kernel/rros/observable.rs index 8d5db31163ab7..5278223314d74 100644 --- a/kernel/rros/observable.rs +++ b/kernel/rros/observable.rs @@ -12,7 +12,9 @@ use crate::{ thread::{rros_init_user_element, CONFIG_RROS_NR_THREADS}, wait::{RrosWaitQueue, RROS_WAIT_PRIO}, }; -use core::{cell::RefCell, default::Default, mem::size_of}; +use core::{cell::RefCell, default::Default, mem::size_of,ops::Deref}; +use core::cell::OnceCell; + use kernel::{ c_types, device, dovetail::{self, DovetailSubscriber}, @@ -25,7 +27,7 @@ use kernel::{ prelude::*, premmpt::running_inband, rbtree::RBTree, - spinlock_init, + new_spinlock, str::CStr, sync::{HardSpinlock, Lock, SpinLock}, task, @@ -80,18 +82,18 @@ impl RrosNotice { unsafe impl ReadableFromBytes for RrosNotice {} pub struct RrosSubscriber { - pub subscriptions: SpinLock>>, + pub subscriptions: Pin>>>>, } impl RrosSubscriber { pub fn new_and_init() -> Self { let mut s = Self { - subscriptions: unsafe { SpinLock::new(RBTree::new()) }, + subscriptions: Box::pin_init(new_spinlock!(RBTree::new(),"RrosSubscriber")).unwrap() , }; - spinlock_init!( - unsafe { Pin::new_unchecked(&mut s.subscriptions) }, - "RrosSubscriber" - ); + // spinlock_init!( + // unsafe { Pin::new_unchecked(&mut s.subscriptions) }, + // "RrosSubscriber" + // ); s } } @@ -803,12 +805,13 @@ impl FileOpener for ObservableOps { let b = KgidT::from_inode_ptr(shared as *const u8); // let a = KuidT((*(shared as *const u8 as *const bindings::inode)).i_uid); // let b = KgidT((*(shared as *const u8 as *const bindings::inode)).i_gid); - (*RROS_OBSERVABLE_FACTORY.locked_data().get()) + rros_observable_factory_init(); + (*RROS_OBSERVABLE_FACTORY.get_mut().unwrap().locked_data().get()) .inside .as_mut() .unwrap() .kuid = Some(a); - (*RROS_OBSERVABLE_FACTORY.locked_data().get()) + (*RROS_OBSERVABLE_FACTORY.get_mut().unwrap().locked_data().get()) .inside .as_mut() .unwrap() @@ -901,7 +904,7 @@ impl FileOperations for ObservableOps { } pub fn observable_factory_build( - fac: &'static mut SpinLock, + fac: &'static mut Pin>>, uname: &'static CStr, _u_attrs: Option<*mut u8>, clone_flags: i32, @@ -954,27 +957,36 @@ pub fn observable_factory_dispose(_ele: RrosElement) { pr_debug!("[observable] observable_factory_dispose"); } -pub static mut RROS_OBSERVABLE_FACTORY: SpinLock = unsafe { - SpinLock::new(RrosFactory { - name: CStr::from_bytes_with_nul_unchecked("observable\0".as_bytes()), - nrdev: CONFIG_RROS_NR_OBSERVABLE + CONFIG_RROS_NR_THREADS, - build: Some(observable_factory_build), - dispose: Some(observable_factory_dispose), - attrs: None, - flags: factory::RrosFactoryType::CLONE, - inside: Some(RrosFactoryInside { - type_: device::DeviceType::new(), - class: None, - cdev: None, - device: None, - sub_rdev: None, - kuid: None, - kgid: None, - minor_map: None, - index: None, - name_hash: None, - hash_lock: None, - register: None, - }), - }) -}; +pub static mut RROS_OBSERVABLE_FACTORY: OnceCell>>> = OnceCell::new(); + +pub fn rros_observable_factory_init() { + unsafe { + RROS_OBSERVABLE_FACTORY.get_or_init(|| { + Box::pin_init( + new_spinlock!(RrosFactory { + name: CStr::from_bytes_with_nul_unchecked("observable\0".as_bytes()), + nrdev: CONFIG_RROS_NR_OBSERVABLE + CONFIG_RROS_NR_THREADS, + build: Some(observable_factory_build), + dispose: Some(observable_factory_dispose), + attrs: None, + flags: factory::RrosFactoryType::CLONE, + inside: Some(RrosFactoryInside { + type_: device::DeviceType::new(), + class: None, + cdev: None, + device: None, + sub_rdev: None, + kuid: None, + kgid: None, + minor_map: None, + index: None, + name_hash: None, + hash_lock: None, + register: None, + }), + }) + ) + .unwrap() + }); + } +} \ No newline at end of file diff --git a/kernel/rros/poll.rs b/kernel/rros/poll.rs index d04ba2296686a..3125ae0e81e58 100644 --- a/kernel/rros/poll.rs +++ b/kernel/rros/poll.rs @@ -5,6 +5,7 @@ use core::{ ops::DerefMut, ptr::NonNull, usize, + ops::Deref }; use crate::{ @@ -43,13 +44,14 @@ use kernel::{ ktime::{self, timespec64_to_ktime, Timespec64}, linked_list::{GetLinks, Links, List}, prelude::*, - rbtree, spinlock_init, + rbtree, new_spinlock, str::CStr, sync::Lock, sync::SpinLock, user_ptr::UserSlicePtr, Error, }; +use core::cell::OnceCell; const POLLER_NEST_MAX: i32 = 4; const RROS_POLL_NR_CONNECTORS: usize = 4; @@ -67,7 +69,7 @@ pub const RROS_POLIOC_WAIT: u32 = kernel::ioctl::_IOWR::<(i64, i64, i64)>(RROS_P pub struct RrosPollGroup { pub item_index: rbtree::RBTree>, pub item_list: List>, - pub waiter_list: SpinLock>>, + pub waiter_list: Pin>>>>, pub rfile: RrosFile, // pub item_lock: mutex::RrosKMutex, pub nr_items: i32, @@ -79,7 +81,7 @@ impl RrosPollGroup { Self { item_index: rbtree::RBTree::new(), item_list: List::new(), - waiter_list: unsafe { SpinLock::new(List::new()) }, + waiter_list: Box::pin_init(new_spinlock!(List::new(),"RrosPollGroup::waiter_list")).unwrap(), rfile: RrosFile::new(), // item_lock: mutex::RrosKMutex::new(), nr_items: 0, @@ -88,8 +90,8 @@ impl RrosPollGroup { } pub fn init(&mut self) { - let pinned = unsafe { Pin::new_unchecked(&mut self.waiter_list) }; - spinlock_init!(pinned, "RrosPollGroup::waiter_list"); + // let pinned = unsafe { Pin::new_unchecked(&mut self.waiter_list) }; + // spinlock_init!(pinned, "RrosPollGroup::waiter_list"); //FIXME: init kmutex fail // rros_init_kmutex(&mut item_lock as *mut RrosKMutex); } @@ -155,22 +157,22 @@ impl RrosPollWaiter { } pub struct RrosPollHead { - pub watchpoints: SpinLock, + pub watchpoints: Pin>>, } impl RrosPollHead { pub fn new() -> Self { Self { - watchpoints: unsafe { SpinLock::new(list_head::default()) }, + watchpoints: Box::pin_init(new_spinlock!(list_head::default(),"RrosPollHead")).unwrap() , } } pub fn init(&mut self) { init_list_head!(self.watchpoints.locked_data().get()); - spinlock_init!( - unsafe { Pin::new_unchecked(&mut self.watchpoints) }, - "RrosPollHead" - ); + // spinlock_init!( + // unsafe { Pin::new_unchecked(&mut self.watchpoints) }, + // "RrosPollHead" + // ); } } pub struct RrosPollConnector { @@ -1123,28 +1125,36 @@ impl FileOperations for PollOps { } } -pub static mut RROS_POLL_FACTORY: SpinLock = unsafe { - SpinLock::new(factory::RrosFactory { - name: CStr::from_bytes_with_nul_unchecked("poll\0".as_bytes()), - // fops: Some(&Pollops), - nrdev: 0, - build: None, - dispose: None, - attrs: None, - flags: factory::RrosFactoryType::SINGLE, - inside: Some(factory::RrosFactoryInside { - type_: DeviceType::new(), - class: None, - cdev: None, - device: None, - sub_rdev: None, - kuid: None, - kgid: None, - minor_map: None, - index: None, - name_hash: None, - hash_lock: None, - register: None, - }), - }) -}; +pub static mut RROS_POLL_FACTORY: OnceCell>>> = OnceCell::new(); + +pub fn rros_poll_factory_init() { + unsafe { + RROS_POLL_FACTORY.get_or_init(|| { + Box::pin_init( + new_spinlock!(factory::RrosFactory { + name: CStr::from_bytes_with_nul_unchecked("poll\0".as_bytes()), + // fops: Some(&Pollops), + nrdev: 0, + build: None, + dispose: None, + attrs: None, + flags: factory::RrosFactoryType::SINGLE, + inside: Some(factory::RrosFactoryInside { + type_: DeviceType::new(), + class: None, + cdev: None, + device: None, + sub_rdev: None, + kuid: None, + kgid: None, + minor_map: None, + index: None, + name_hash: None, + hash_lock: None, + register: None, + }), + }) + ).unwrap() + }); + } +} diff --git a/kernel/rros/proxy.rs b/kernel/rros/proxy.rs index 066f8bfab1e0f..a2c8b277e603e 100644 --- a/kernel/rros/proxy.rs +++ b/kernel/rros/proxy.rs @@ -33,8 +33,11 @@ use kernel::{ vmalloc::c_kzalloc, waitqueue, workqueue::*, + new_spinlock, }; +use core::cell::OnceCell; + const FMODE_ATOMIC_POS: u32 = 0x8000; type LoffT = i64; // this should be 64 @@ -53,9 +56,9 @@ pub struct ProxyRing { pub oob_wait: RrosFlag, pub inband_wait: waitqueue::WaitQueueHead, pub relay_work: RrosWork, - pub lock: SpinLock, + pub lock: Pin>>, pub wq: Option, - pub worker_lock: Arc>, + pub worker_lock: Arc>>>, } impl ProxyRing { @@ -72,9 +75,9 @@ impl ProxyRing { oob_wait: RrosFlag::new(), inband_wait: waitqueue::WaitQueueHead::new(), relay_work: RrosWork::new(), - lock: unsafe { SpinLock::new(0) }, + lock: unsafe { Box::pin_init(new_spinlock!(0)).unwrap() }, wq: None, - worker_lock: unsafe { Arc::try_new(SpinLock::new(0))? }, + worker_lock: unsafe { Arc::try_new(Box::pin_init(new_spinlock!(0)).unwrap())? }, }) } } @@ -776,7 +779,7 @@ pub fn init_input_ring(proxy: &mut RrosProxy, bufsz: u32, granularity: u32) -> R } fn proxy_factory_build( - fac: &'static mut SpinLock, + fac: &'static mut Pin>>, uname: &'static CStr, u_attrs: Option<*mut u8>, mut clone_flags: i32, @@ -839,31 +842,38 @@ fn proxy_factory_build( unsafe { (*proxy).element.clone() } } -pub static mut RROS_PROXY_FACTORY: SpinLock = unsafe { - SpinLock::new(RrosFactory { - name: CStr::from_bytes_with_nul_unchecked("proxy\0".as_bytes()), - // fops: Some(&RustFileProxy), - nrdev: CONFIG_RROS_NR_PROXIES, - build: Some(proxy_factory_build), - dispose: Some(proxy_factory_dispose), - attrs: None, //sysfs::attribute_group::new(), - flags: crate::factory::RrosFactoryType::CLONE, - inside: Some(RrosFactoryInside { - type_: DeviceType::new(), - class: None, - cdev: None, - device: None, - sub_rdev: None, - kuid: None, - kgid: None, - minor_map: None, - index: None, - name_hash: None, - hash_lock: None, - register: None, - }), - }) -}; +pub static mut RROS_PROXY_FACTORY: OnceCell>>> = OnceCell::new(); + +pub fn rros_proxy_factory_init() { + unsafe { + RROS_PROXY_FACTORY.get_or_init(|| { + Box::pin_init(new_spinlock!(RrosFactory { + name: CStr::from_bytes_with_nul_unchecked("proxy\0".as_bytes()), + // fops: Some(&RustFileProxy), + nrdev: CONFIG_RROS_NR_PROXIES, + build: Some(proxy_factory_build), + dispose: Some(proxy_factory_dispose), + attrs: None, // sysfs::attribute_group::new(), + flags: crate::factory::RrosFactoryType::CLONE, + inside: Some(RrosFactoryInside { + type_: DeviceType::new(), + class: None, + cdev: None, + device: None, + sub_rdev: None, + kuid: None, + kgid: None, + minor_map: None, + index: None, + name_hash: None, + hash_lock: None, + register: None, + }), + })) + .unwrap() + }); + } +} pub struct ProxyOps; impl FileOpener for ProxyOps { diff --git a/kernel/rros/sched.rs b/kernel/rros/sched.rs index 13531f25d4544..2f99d06367890 100644 --- a/kernel/rros/sched.rs +++ b/kernel/rros/sched.rs @@ -7,6 +7,7 @@ use core::{ mem::{align_of, size_of, transmute}, ops::DerefMut, ptr::{null, null_mut, NonNull}, + ops::Deref, }; #[warn(unused_mut)] @@ -22,7 +23,7 @@ use kernel::{ percpu::alloc_per_cpu, percpu_defs, prelude::*, - premmpt, spinlock_init, + premmpt, new_spinlock, str::{kstrdup, CStr}, sync::{HardSpinlock, Lock, SpinLock}, types::Atomic, @@ -81,14 +82,14 @@ static mut RROS_SCHED_LOWER: *mut RrosSchedClass = 0 as *mut RrosSchedClass; #[repr(C)] pub struct rros_rq { pub flags: u64, - pub curr: Option>>, + pub curr: Option>>>>, pub fifo: RrosSchedFifo, pub weak: RrosSchedWeak, pub tp: tp::RrosSchedTp, - pub root_thread: Option>>, + pub root_thread: Option>>>>, pub local_flags: u64, - pub inband_timer: Option>>, - pub rrbtimer: Option>>, + pub inband_timer: Option>>>>, + pub rrbtimer: Option>>>>, pub proxy_timer_name: *mut c_types::c_char, pub rrb_timer_name: *mut c_types::c_char, #[cfg(CONFIG_SMP)] @@ -129,15 +130,15 @@ impl rros_rq { }) } - pub fn get_inband_timer(&self) -> Arc> { + pub fn get_inband_timer(&self) -> Arc>>> { self.inband_timer.as_ref().unwrap().clone() } - pub fn get_rrbtimer(&self) -> Arc> { + pub fn get_rrbtimer(&self) -> Arc>>> { self.rrbtimer.as_ref().unwrap().clone() } - pub fn get_curr(&self) -> Arc> { + pub fn get_curr(&self) -> Arc>>> { self.curr.as_ref().unwrap().clone() } @@ -181,7 +182,7 @@ pub fn this_rros_rq() -> *mut rros_rq { } } -pub fn this_rros_rq_thread() -> Option>> { +pub fn this_rros_rq_thread() -> Option>>>> { let rq = this_rros_rq(); unsafe { (*rq).curr.clone() } } @@ -212,7 +213,7 @@ pub fn rros_rq_cpu(rq: *mut rros_rq) -> i32 { } #[allow(dead_code)] -pub fn rros_protect_thread_priority(thread: Arc>, prio: i32) -> Result { +pub fn rros_protect_thread_priority(thread: Arc>>>, prio: i32) -> Result { unsafe { // raw_spin_lock(&thread->rq->lock); let mut state = (*thread.locked_data().get()).state; @@ -320,7 +321,7 @@ pub fn rros_double_rq_lock(_rq1: *mut rros_rq, _rq2: *mut rros_rq) {} pub fn rros_double_rq_unlock(_rq1: *mut rros_rq, _rq2: *mut rros_rq) {} #[cfg(CONFIG_SMP)] -pub fn migrate_thread(thread: Arc>, dst_rq: *mut rros_rq) { +pub fn migrate_thread(thread: Arc>>>, dst_rq: *mut rros_rq) { let src_rq = unsafe { (*thread.locked_data().get()).rq.unwrap() }; rros_double_rq_lock(src_rq, dst_rq); @@ -353,7 +354,7 @@ pub fn migrate_thread(thread: Arc>, dst_rq: *mut rros_rq) { #[cfg(CONFIG_SMP)] #[allow(dead_code)] -pub fn rros_migrate_thread(thread: Arc>, dst_rq: *mut rros_rq) { +pub fn rros_migrate_thread(thread: Arc>>>, dst_rq: *mut rros_rq) { // TODO: assert_hard_lock(&thread.lock); let src_rq = unsafe { (*thread.locked_data().get()).rq.unwrap() }; if src_rq == dst_rq { @@ -371,7 +372,7 @@ pub fn rros_migrate_thread(thread: Arc>, dst_rq: *mut rros_ } #[cfg(not(CONFIG_SMP))] -pub fn rros_migrate_thread(thread: Arc>, dst_rq: *mut rros_rq) {} +pub fn rros_migrate_thread(thread: Arc>>>, dst_rq: *mut rros_rq) {} #[allow(dead_code)] pub fn rros_in_irq() -> bool { @@ -462,7 +463,7 @@ impl RrosSchedWeak { } pub struct RrosSchedQueue { - pub head: Option>>>, + pub head: Option>>>>>, } impl RrosSchedQueue { pub fn new() -> Result { @@ -477,41 +478,41 @@ pub type SsizeT = bindings::__kernel_ssize_t; pub struct RrosSchedClass { pub sched_init: Option Result>, - pub sched_enqueue: Option>) -> Result>, - pub sched_dequeue: Option>)>, - pub sched_requeue: Option>)>, - pub sched_pick: Option) -> Result>>>, + pub sched_enqueue: Option>>>) -> Result>, + pub sched_dequeue: Option>>>)>, + pub sched_requeue: Option>>>)>, + pub sched_pick: Option) -> Result>>>>>, pub sched_tick: Option) -> Result>, pub sched_migrate: - Option>, rq: *mut rros_rq) -> Result>, + Option>>>, rq: *mut rros_rq) -> Result>, pub sched_setparam: Option< fn( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) -> Result, >, pub sched_getparam: Option< - fn(thread: Option>>, p: Option>>), + fn(thread: Option>>>>, p: Option>>>>), >, pub sched_chkparam: Option< fn( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) -> Result, >, pub sched_trackprio: Option< - fn(thread: Option>>, p: Option>>), + fn(thread: Option>>>>, p: Option>>>>), >, - pub sched_ceilprio: Option>, prio: i32)>, + pub sched_ceilprio: Option>>>, prio: i32)>, pub sched_declare: Option< fn( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) -> Result, >, - pub sched_forget: Option>) -> Result>, - pub sched_kick: Option>)>, + pub sched_forget: Option>>>) -> Result>, + pub sched_kick: Option>>>)>, pub sched_show: Option< fn(thread: *mut RrosThread, buf: *mut c_types::c_char, count: SsizeT) -> Result, >, @@ -867,10 +868,10 @@ impl RrosStat { } } -pub struct RrosThreadWithLock(SpinLock); +pub struct RrosThreadWithLock(Pin>>); impl RrosThreadWithLock { /// transmute back - pub unsafe fn transmute_to_original(ptr: Arc) -> Arc> { + pub unsafe fn transmute_to_original(ptr: Arc) -> Arc>>> { unsafe { let ptr = Arc::into_raw(ptr) as *mut SpinLock; Arc::from_raw(transmute(NonNull::new_unchecked(ptr).as_ptr())) @@ -886,7 +887,7 @@ impl RrosThreadWithLock { } } pub fn get_wprio(&self) -> i32 { - unsafe { (*(*self.0.locked_data()).get()).wprio } + unsafe { (*self.0.locked_data().get()).wprio } } } @@ -912,16 +913,16 @@ pub struct RrosThread { pub wchan: *mut RrosWaitChannel, pub wait_next: Links, pub wwake: *mut RrosWaitChannel, - pub rtimer: Option>>, - pub ptimer: Option>>, + pub rtimer: Option>>>>, + pub ptimer: Option>>>>, pub rrperiod: KtimeT, pub state: u32, pub info: u32, // pub rq_next: Option>>>, - pub next: *mut Node>>, + pub next: *mut Node>>>>, - pub rq_next: Option>>>>, + pub rq_next: Option>>>>>>, pub altsched: dovetail::DovetailAltschedContext, pub local_info: u32, @@ -945,7 +946,7 @@ pub struct RrosThread { pub observable: Option>>, pub name: &'static str, pub tps: *mut tp::RrosTpRq, - pub tp_link: Option>>>, + pub tp_link: Option>>>>>, } impl RrosThread { @@ -973,7 +974,7 @@ impl RrosThread { // next: 0 as *mut ListHead, // prev: 0 as *mut ListHead, // }, - next: 0 as *mut Node>>, // kernel corrupted bug + next: 0 as *mut Node>>>>, // kernel corrupted bug altsched: dovetail::DovetailAltschedContext::new(), local_info: 0, wait_data: null_mut(), @@ -1021,7 +1022,7 @@ impl RrosThread { self.state = 0; self.info = 0; self.rq_next = None; - self.next = 0 as *mut Node>>; // kernel; + self.next = 0 as *mut Node>>>>; // kernel; self.altsched = dovetail::DovetailAltschedContext::new(); self.local_info = 0; self.wait_data = null_mut(); @@ -1153,7 +1154,7 @@ pub struct RrosInitThreadAttr { pub observable: Option>>, pub flags: i32, pub sched_class: Option<&'static RrosSchedClass>, - pub sched_param: Option>>, + pub sched_param: Option>>>>, } impl RrosInitThreadAttr { pub fn new() -> Self { @@ -1169,9 +1170,9 @@ impl RrosInitThreadAttr { fn init_inband_timer(rq_ptr: *mut rros_rq) -> Result { unsafe { (*rq_ptr) = rros_rq::new()?; - let mut x = SpinLock::new(RrosTimer::new(1)); - let pinned = Pin::new_unchecked(&mut x); - spinlock_init!(pinned, "inband_timer"); + let mut x = Box::pin_init(new_spinlock!(RrosTimer::new(1),"inband_timer")).unwrap(); + // let pinned = Pin::new_unchecked(&mut x); + // spinlock_init!(pinned, "inband_timer"); (*rq_ptr).inband_timer = Some(Arc::try_new(x)?); } Ok(0) @@ -1179,9 +1180,9 @@ fn init_inband_timer(rq_ptr: *mut rros_rq) -> Result { fn init_rrbtimer(rq_ptr: *mut rros_rq) -> Result { unsafe { - let mut y = SpinLock::new(RrosTimer::new(1)); - let pinned = Pin::new_unchecked(&mut y); - spinlock_init!(pinned, "rrb_timer"); + let mut y = Box::pin_init(new_spinlock!(RrosTimer::new(1),"rrb_timer")).unwrap(); + // let pinned = Pin::new_unchecked(&mut y); + // spinlock_init!(pinned, "rrb_timer"); (*rq_ptr).rrbtimer = Some(Arc::try_new(y)?); } Ok(0) @@ -1189,13 +1190,15 @@ fn init_rrbtimer(rq_ptr: *mut rros_rq) -> Result { fn init_root_thread(rq_ptr: *mut rros_rq) -> Result { unsafe { - let mut tmp = Arc::>::try_new_uninit()?; + let mut tmp = Arc::>>>::try_new_uninit()?; + let tmp_spinlock = Box::pin_init(new_spinlock!(sched::RrosThread::new().unwrap(),"rros_kthreads")).unwrap(); let mut tmp = { - core::ptr::write_bytes(Arc::get_mut_unchecked(&mut tmp), 0, 1); + Arc::get_mut_unchecked(&mut tmp).write(tmp_spinlock); + // core::ptr::write_bytes(Arc::get_mut_unchecked(&mut tmp), 0, 1); tmp.assume_init() }; - let pinned = { Pin::new_unchecked(Arc::get_mut_unchecked(&mut tmp)) }; - spinlock_init!(pinned, "rros_kthreads"); + // let pinned = { Pin::new_unchecked(Arc::get_mut_unchecked(&mut tmp)) }; + // spinlock_init!(pinned, "rros_kthreads"); // let mut thread = SpinLock::new(RrosThread::new()?); // let pinned = Pin::new_unchecked(&mut thread); @@ -1204,22 +1207,23 @@ fn init_root_thread(rq_ptr: *mut rros_rq) -> Result { (*rq_ptr).root_thread = Some(tmp); //Arc::try_new(thread)? (*(*rq_ptr).root_thread.as_mut().unwrap().locked_data().get()).init()?; - let pinned = Pin::new_unchecked( - &mut *(Arc::into_raw((*rq_ptr).root_thread.clone().unwrap()) - as *mut SpinLock), - ); - // &mut *Arc::into_raw( *(*rq_ptr).root_thread.clone().as_mut().unwrap()) as &mut SpinLock - spinlock_init!(pinned, "rros_threads"); + // let pinned = Pin::new_unchecked( + // &mut *(Arc::into_raw((*rq_ptr).root_thread.clone().unwrap()) + // as *mut SpinLock), + // ); + // // &mut *Arc::into_raw( *(*rq_ptr).root_thread.clone().as_mut().unwrap()) as &mut SpinLock + // spinlock_init!(pinned, "rros_threads"); // (*rq_ptr).root_thread.as_mut().unwrap().assume_init(); + } Ok(0) } fn init_rtimer(rq_ptr: *mut rros_rq) -> Result { unsafe { - let mut r = SpinLock::new(rros_timer::new(1)); - let pinned_r = Pin::new_unchecked(&mut r); - spinlock_init!(pinned_r, "rtimer"); + let mut r = Box::pin_init(new_spinlock!(rros_timer::new(1),"rtimer")).unwrap(); + // let pinned_r = Pin::new_unchecked(&mut r); + // spinlock_init!(pinned_r, "rtimer"); (*rq_ptr).root_thread.as_ref().unwrap().lock().rtimer = Some(Arc::try_new(r)?); } Ok(0) @@ -1227,9 +1231,9 @@ fn init_rtimer(rq_ptr: *mut rros_rq) -> Result { fn init_ptimer(rq_ptr: *mut rros_rq) -> Result { unsafe { - let mut p = SpinLock::new(rros_timer::new(1)); - let pinned_p = Pin::new_unchecked(&mut p); - spinlock_init!(pinned_p, "ptimer"); + let mut p = Box::pin_init(new_spinlock!(rros_timer::new(1),"ptimer")).unwrap(); + // let pinned_p = Pin::new_unchecked(&mut p); + // spinlock_init!(pinned_p, "ptimer"); (*rq_ptr).root_thread.as_ref().unwrap().lock().ptimer = Some(Arc::try_new(p)?); } Ok(0) @@ -1280,13 +1284,15 @@ fn init_rq_ptr(rq_ptr: *mut rros_rq) -> Result { fn init_rq_ptr_inband_timer(rq_ptr: *mut rros_rq) -> Result { unsafe { - let mut tmp = Arc::>::try_new_uninit()?; + let mut tmp = Arc::>>>::try_new_uninit()?; + let tmp_spinlock = Box::pin_init(new_spinlock!(RrosThread::new().unwrap(),"rros_kthreads")).unwrap(); let mut tmp = { - core::ptr::write_bytes(Arc::get_mut_unchecked(&mut tmp), 0, 1); + Arc::get_mut(&mut tmp).unwrap().write(tmp_spinlock); tmp.assume_init() }; - let pinned = { Pin::new_unchecked(Arc::get_mut_unchecked(&mut tmp)) }; - spinlock_init!(pinned, "rros_kthreads"); + // let pinned = { Pin::new_unchecked(Arc::get_mut_unchecked(&mut tmp)) }; + // spinlock_init!(pinned, "rros_kthreads"); + // let mut thread = SpinLock::new(RrosThread::new()?); // let pinned = Pin::new_unchecked(&mut thread); // spinlock_init!(pinned, "rros_threads"); @@ -1304,21 +1310,21 @@ fn init_rq_ptr_inband_timer(rq_ptr: *mut rros_rq) -> Result { .locked_data() .get()) .init()?; - let pinned = Pin::new_unchecked( - &mut *(Arc::into_raw( - (*rq_ptr) - .fifo - .runnable - .head - .as_mut() - .unwrap() - .head - .value - .clone(), - ) as *mut SpinLock), - ); - // &mut *Arc::into_raw( *(*rq_ptr).root_thread.clone().as_mut().unwrap()) as &mut SpinLock - spinlock_init!(pinned, "rros_threads"); + // let pinned = Pin::new_unchecked( + // &mut *(Arc::into_raw( + // (*rq_ptr) + // .fifo + // .runnable + // .head + // .as_mut() + // .unwrap() + // .head + // .value + // .clone(), + // ) as *mut SpinLock), + // ); + // // &mut *Arc::into_raw( *(*rq_ptr).root_thread.clone().as_mut().unwrap()) as &mut SpinLock + // spinlock_init!(pinned, "rros_threads"); // let mut x = SpinLock::new(RrosThread::new()?); @@ -1668,7 +1674,7 @@ fn init_rq(rq: *mut rros_rq, cpu: i32) -> Result { // sched_param_ptr = sched_param_clone.borrow_mut(); // sched_param_ptr.idle.prio = idle::RROS_IDLE_PRIO; - let sched_param = unsafe { Arc::try_new(SpinLock::new(RrosSchedParam::new()))? }; + let sched_param = unsafe { Arc::try_new(Box::pin_init(new_spinlock!(RrosSchedParam::new())).unwrap())? }; unsafe { (*sched_param.locked_data().get()).fifo.prio = idle::RROS_IDLE_PRIO }; iattr.sched_param = Some(sched_param); @@ -1749,7 +1755,7 @@ fn list_add_tail(new: *mut ListHead, head: *mut ListHead) { } pub fn rros_set_effective_thread_priority( - thread: Option>>, + thread: Option>>>>, prio: i32, ) -> Result { let thread_clone = thread.clone(); @@ -1779,8 +1785,8 @@ pub fn rros_set_effective_thread_priority( #[allow(dead_code)] pub fn rros_track_priority( - thread: Arc>, - p: Arc>, + thread: Arc>>>, + p: Arc>>>, ) -> Result { unsafe { let func; @@ -1804,7 +1810,7 @@ pub fn rros_track_priority( Ok(0) } -fn rros_ceil_priority(thread: Arc>, prio: i32) -> Result { +fn rros_ceil_priority(thread: Arc>>>, prio: i32) -> Result { unsafe { let func; match (*thread.locked_data().get()) @@ -1830,7 +1836,7 @@ pub fn rros_calc_weighted_prio(sched_class: &'static RrosSchedClass, prio: i32) return prio + sched_class.weight; } -pub fn rros_putback_thread(thread: Arc>) -> Result { +pub fn rros_putback_thread(thread: Arc>>>) -> Result { let state = thread.lock().state; if state & T_READY != 0 { rros_dequeue_thread(thread.clone())?; @@ -1843,7 +1849,7 @@ pub fn rros_putback_thread(thread: Arc>) -> Result { Ok(0) } -pub fn rros_dequeue_thread(thread: Arc>) -> Result { +pub fn rros_dequeue_thread(thread: Arc>>>) -> Result { let sched_class; match thread.lock().sched_class.clone() { Some(c) => sched_class = c, @@ -1862,7 +1868,7 @@ pub fn rros_dequeue_thread(thread: Arc>) -> Result { Ok(0) } -pub fn rros_enqueue_thread(thread: Arc>) -> Result { +pub fn rros_enqueue_thread(thread: Arc>>>) -> Result { let sched_class; match thread.lock().sched_class.clone() { Some(c) => sched_class = c, @@ -1881,7 +1887,7 @@ pub fn rros_enqueue_thread(thread: Arc>) -> Result { Ok(0) } -pub fn rros_requeue_thread(thread: Arc>) -> Result { +pub fn rros_requeue_thread(thread: Arc>>>) -> Result { let sched_class; unsafe { match (*thread.locked_data().get()).sched_class.clone() { @@ -2015,7 +2021,7 @@ unsafe extern "C" fn __rros_schedule(_arg: *mut c_types::c_void) -> i32 { let next_add = next.locked_data().get(); - if next_add == curr_add { + if next_add as *const _ == curr_add as *const _ { // if the curr and next are both root, we should call the inband thread pr_debug!("__rros_schedule: next_add == curr_add "); let next_state = (*next.locked_data().get()).state; @@ -2101,8 +2107,8 @@ unsafe extern "C" fn __rros_schedule(_arg: *mut c_types::c_void) -> i32 { #[allow(dead_code)] fn finish_rq_switch() {} -pub fn pick_next_thread(rq: Option<*mut rros_rq>) -> Option>> { - let mut next: Option>>; +pub fn pick_next_thread(rq: Option<*mut rros_rq>) -> Option>>>> { + let mut next: Option>>>>; loop { next = __pick_next_thread(rq); let next_clone = next.clone().unwrap(); @@ -2131,10 +2137,10 @@ pub fn pick_next_thread(rq: Option<*mut rros_rq>) -> Option) -> Option>> { +pub fn __pick_next_thread(rq: Option<*mut rros_rq>) -> Option>>>> { let curr = unsafe { (*rq.clone().unwrap()).curr.clone().unwrap() }; - let next: Option>>; + let next: Option>>>>; let curr_state = unsafe { (*curr.locked_data().get()).state }; if curr_state & (RROS_THREAD_BLOCK_BITS | T_ZOMBIE) == 0 { @@ -2192,7 +2198,7 @@ pub fn __pick_next_thread(rq: Option<*mut rros_rq>) -> Option) -> Option>> { +pub fn lookup_fifo_class(rq: Option<*mut rros_rq>) -> Option>>>> { let q = &mut unsafe { (*rq.clone().unwrap()).fifo.runnable.head.as_mut().unwrap() }; if q.is_empty() { return None; @@ -2211,7 +2217,7 @@ pub fn lookup_fifo_class(rq: Option<*mut rros_rq>) -> Option, next: Option>>) { +pub fn set_next_running(rq: Option<*mut rros_rq>, next: Option>>>>) { let next = next.unwrap(); unsafe { (*next.locked_data().get()).state &= !T_READY }; let state = unsafe { (*next.locked_data().get()).state }; @@ -2241,9 +2247,9 @@ fn test_bit(nr: i32, addr: *const usize) -> bool { } pub fn rros_set_thread_policy( - thread: Option>>, + thread: Option>>>>, sched_class: Option<&'static RrosSchedClass>, - p: Option>>, + p: Option>>>>, ) -> Result { let mut flags: c_types::c_ulong = 0; // let test = p.clone().unwrap(); @@ -2258,7 +2264,7 @@ pub fn rros_set_thread_policy( } pub fn rros_get_thread_rq( - thread: Option>>, + thread: Option>>>>, flags: &mut c_types::c_ulong, ) -> Option<*mut rros_rq> { // pr_debug!("yinyongcishu is {}", Arc::strong_count(&thread.clone().unwrap())); @@ -2269,7 +2275,7 @@ pub fn rros_get_thread_rq( } pub fn rros_put_thread_rq( - _thread: Option>>, + _thread: Option>>>>, _rq: Option<*mut rros_rq>, flags: c_types::c_ulong, ) -> Result { @@ -2283,9 +2289,9 @@ pub fn rros_put_thread_rq( } pub fn rros_set_thread_policy_locked( - thread: Option>>, + thread: Option>>>>, sched_class: Option<&'static RrosSchedClass>, - p: Option>>, + p: Option>>>>, ) -> Result { let _test = p.clone().unwrap(); let thread_unwrap = thread.clone().unwrap(); @@ -2355,9 +2361,9 @@ pub fn rros_set_thread_policy_locked( } fn rros_check_schedparams( - thread: Option>>, + thread: Option>>>>, sched_class: Option<&'static RrosSchedClass>, - p: Option>>, + p: Option>>>>, ) -> Result { let sched_class_ptr = sched_class.unwrap(); if sched_class_ptr.sched_chkparam.is_some() { @@ -2371,8 +2377,8 @@ fn rros_check_schedparams( #[allow(dead_code)] pub fn rros_get_schedparam( - thread: Arc>, - p: Arc>, + thread: Arc>>>, + p: Arc>>>, ) -> Result { let func; unsafe { @@ -2393,8 +2399,8 @@ pub fn rros_get_schedparam( } fn rros_set_schedparam( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) -> Result { let thread_clone = thread.clone(); let thread_unwrap = thread_clone.unwrap(); @@ -2414,9 +2420,9 @@ fn rros_set_schedparam( // TODO: Remain to be refactored. fn rros_declare_thread( - thread: Option>>, + thread: Option>>>>, sched_class: Option<&'static RrosSchedClass>, - p: Option>>, + p: Option>>>>, ) -> Result { let thread_clone = thread.clone(); let thread_unwrap = thread_clone.unwrap(); @@ -2441,7 +2447,7 @@ fn rros_declare_thread( Ok(0) } -pub fn rros_forget_thread(thread: Arc>) -> Result { +pub fn rros_forget_thread(thread: Arc>>>) -> Result { let thread_clone = thread.clone(); // let thread_lock = thread_clone.lock(); let sched_class = thread_clone.lock().base_class.clone(); @@ -2467,7 +2473,7 @@ extern "C" { } #[cfg(CONFIG_SMP)] -pub fn check_cpu_affinity(thread: Arc>, cpu: i32) { +pub fn check_cpu_affinity(thread: Arc>>>, cpu: i32) { let rq = rros_cpu_rq(cpu); unsafe { (*thread.locked_data().get()).lock.raw_spin_lock() }; @@ -2515,10 +2521,10 @@ unsafe extern "C" fn rust_resume_oob_task(ptr: *mut c_types::c_void, cpu: i32) { // struct RrosThread *thread = rros_thread_from_task(p); // pr_debug!("rros rros mutex ptr{:p}", ptr); - let thread: Arc>; + let thread: Arc>>>; unsafe { - thread = Arc::from_raw(ptr as *mut SpinLock); + thread = Arc::from_raw(ptr as *mut Pin>>); pr_debug!( "0600 uninit_thread: x ref is {}", Arc::strong_count(&thread) @@ -2648,7 +2654,7 @@ pub fn rros_disable_preempt() { } #[inline] -pub fn rros_force_thread(thread: Arc>) { +pub fn rros_force_thread(thread: Arc>>>) { // assert_thread_pinned(thread); { let guard = thread.lock(); diff --git a/kernel/rros/stax.rs b/kernel/rros/stax.rs index 4e150a0385bf6..38a13bb182bbd 100644 --- a/kernel/rros/stax.rs +++ b/kernel/rros/stax.rs @@ -369,7 +369,7 @@ impl RrosStax { } fn claim_stax_from_oob(&mut self, gateval: u32) -> Result<()> { let ptr = rros_current(); - let curr: Arc>; + let curr: Arc>>>; // Safety: rros_current() guarantees that the ptr is valid. unsafe { curr = Arc::from_raw(ptr); diff --git a/kernel/rros/syscall.rs b/kernel/rros/syscall.rs index 964c04cd1eb5c..5a4ae2d51c912 100644 --- a/kernel/rros/syscall.rs +++ b/kernel/rros/syscall.rs @@ -1,4 +1,5 @@ use core::mem::size_of; +use core::ops::Deref; use kernel::{ bindings, @@ -11,7 +12,7 @@ use kernel::{ ptrace::{IrqStage, PtRegs}, sync::{Lock, SpinLock}, task::Task, - uapi::time_types::{KernelOldTimespec, KernelTimespec}, + time_types::{KernelOldTimespec, KernelTimespec}, user_ptr::UserSlicePtr, }; @@ -157,8 +158,8 @@ fn invoke_syscall(nr: u32, regs: PtRegs) { } fn prepare_for_signal( - _p: *mut SpinLock, - curr: *mut SpinLock, + _p: *mut Pin>>, + curr: *mut Pin>>, regs: PtRegs, ) { let flags; @@ -346,7 +347,7 @@ fn do_oob_syscall(stage: IrqStage, regs: PtRegs) -> i32 { bindings::CAP_SYS_NICE as i32, ) != 0); pr_debug!("curr is {:p} res is {}", curr, res1); - if curr == 0 as *mut SpinLock || res1 { + if curr == 0 as *mut Pin>> || res1 { // [TODO: lack RROS_DEBUG] pr_err!("ERROR: syscall denied"); // if (RROS_DEBUG(CORE)) @@ -422,7 +423,7 @@ fn do_inband_syscall(_stage: IrqStage, regs: PtRegs) -> i32 { * assume this is an in-band syscall which we need to * propagate downstream to the common handler. */ - if curr == 0 as *mut SpinLock { + if curr == 0 as *mut Pin>> { return SYSCALL_PROPAGATE; } diff --git a/kernel/rros/thread.rs b/kernel/rros/thread.rs index fac7920b29b37..e02e043cb0b24 100644 --- a/kernel/rros/thread.rs +++ b/kernel/rros/thread.rs @@ -13,6 +13,7 @@ use crate::{ use alloc::rc::Rc; use kernel::dovetail::InbandEventType; +use core::cell::OnceCell; use core::{ cell::RefCell, @@ -20,6 +21,7 @@ use core::{ ops::DerefMut, ptr::{self, null_mut}, result::Result::{Err, Ok}, + ops::Deref, }; #[warn(unused_mut)] @@ -41,7 +43,7 @@ use kernel::{ prelude::*, premmpt, sched::sched_setscheduler, - spinlock_init, + new_spinlock, str::CStr, sync::{Guard, Lock, SpinLock}, task::{self, Task}, @@ -152,36 +154,43 @@ pub const RROS_THRIOC_GET_STATE: u32 = 4; // TODO: move this to the config file pub const CONFIG_RROS_NR_THREADS: usize = 16; -pub static mut RROS_THREAD_FACTORY: SpinLock = unsafe { - SpinLock::new(factory::RrosFactory { - // TODO: move this and clock factory name to a variable - name: CStr::from_bytes_with_nul_unchecked("thread\0".as_bytes()), - // fops: Some(&ThreadOps), - nrdev: CONFIG_RROS_NR_THREADS, - // TODO: add the corresponding ops - build: Some(thread_factory_build), - // TODO: add the corresponding ops - dispose: None, - // TODO: add the corresponding attr - attrs: None, //sysfs::attribute_group::new(), - // TODO: rename this flags to the bit level variable RROS_FACTORY_CLONE and RROS_FACTORY_SINGLE - flags: factory::RrosFactoryType::CLONE, - inside: Some(factory::RrosFactoryInside { - type_: DeviceType::new(), - class: None, - cdev: None, - device: None, - sub_rdev: None, - kuid: None, - kgid: None, - minor_map: None, - index: None, - name_hash: None, - hash_lock: None, - register: None, - }), - }) -}; +pub static mut RROS_THREAD_FACTORY: OnceCell>>> = OnceCell::new(); + +pub fn rros_thread_factory_init() { + unsafe { + RROS_THREAD_FACTORY.get_or_init(|| { + Box::pin_init(new_spinlock!(factory::RrosFactory { + // TODO: move this and clock factory name to a variable + name: CStr::from_bytes_with_nul_unchecked("thread\0".as_bytes()), + // fops: Some(&ThreadOps), + nrdev: CONFIG_RROS_NR_THREADS, + // TODO: add the corresponding ops + build: Some(thread_factory_build), + // TODO: add the corresponding ops + dispose: None, + // TODO: add the corresponding attr + attrs: None, // sysfs::attribute_group::new(), + // TODO: rename this flags to the bit level variable RROS_FACTORY_CLONE and RROS_FACTORY_SINGLE + flags: factory::RrosFactoryType::CLONE, + inside: Some(factory::RrosFactoryInside { + type_: DeviceType::new(), + class: None, + cdev: None, + device: None, + sub_rdev: None, + kuid: None, + kgid: None, + minor_map: None, + index: None, + name_hash: None, + hash_lock: None, + register: None, + }), + })) + .unwrap() + }); + } +} #[derive(Default)] pub struct ThreadOps; @@ -204,7 +213,7 @@ use core::fmt; use core::fmt::{Debug, Formatter}; pub struct RrosKthread { - pub thread: Option>>, + pub thread: Option>>>>, pub done: Completion, pub kthread_fn: Option ()>>, status: i32, @@ -278,7 +287,7 @@ impl RrosThreadState { } pub fn rros_init_thread( - thread: &Option>>, + thread: &Option>>>>, // rq_s: Rc>, // iattr: Rc>, iattr: RrosInitThreadAttr, @@ -438,14 +447,14 @@ pub fn periodic_handler(timer: *mut timer::RrosTimer) { // rros_sched_tick(rq); } -pub fn rros_wakeup_thread(thread: Arc>, mask: u32, info: i32) { +pub fn rros_wakeup_thread(thread: Arc>>>, mask: u32, info: i32) { let mut flags: c_types::c_ulong = 0; let rq = rros_get_thread_rq(Some(thread.clone()), &mut flags); rros_wakeup_thread_locked(thread.clone(), mask, info); let _ret = rros_put_thread_rq(Some(thread.clone()), rq, flags); } -fn rros_wakeup_thread_locked(thread: Arc>, mut mask: u32, info: i32) { +fn rros_wakeup_thread_locked(thread: Arc>>>, mut mask: u32, info: i32) { let rq = thread.lock().rq; // struct rros_rq *rq = thread->rq; // unsigned long oldstate; @@ -657,7 +666,7 @@ unsafe extern "C" fn kthread_trampoline(arg: *mut c_types::c_void) -> c_types::c 0 as c_types::c_int } -fn rros_cancel_thread(thread: Arc>) { +fn rros_cancel_thread(thread: Arc>>>) { pr_debug!(" in rros_cancel_thread"); let mut flags: c_types::c_ulong = 0; @@ -695,10 +704,10 @@ fn rros_cancel_thread(thread: Arc>) { let curr = unsafe { (*rq).get_curr() }; let curr_addr = curr.locked_data().get(); let thread_addr = thread.locked_data().get(); - pr_debug!("curr_addr is {:?}", curr_addr); - pr_debug!("thread_addr is {:?}", thread_addr); + pr_debug!("curr_addr is {:?}", curr_addr as *const _ ); + pr_debug!("thread_addr is {:?}", thread_addr as *const _); - if curr_addr == thread_addr { + if curr_addr as *const _ == thread_addr as *const _ { pr_debug!("rros_current() == thread"); rros_test_cancel(); return; @@ -719,7 +728,7 @@ fn rros_cancel_thread(thread: Arc>) { pub fn rros_test_cancel() { let curr_ptr = rros_current(); - if curr_ptr != 0 as *mut SpinLock { + if curr_ptr != 0 as *mut Pin>> { let curr = unsafe { &mut *curr_ptr }; let info = unsafe { (*curr.locked_data().get()).info }; if info & T_CANCELD != 0x0 { @@ -729,7 +738,7 @@ pub fn rros_test_cancel() { } } -fn __rros_test_cancel(curr_ptr: *mut SpinLock) { +fn __rros_test_cancel(curr_ptr: *mut Pin>>) { let curr = unsafe { &mut *curr_ptr }; let rq_local_flags = unsafe { (&*((*curr.locked_data().get()).rq.clone().unwrap())).local_flags }; @@ -751,7 +760,7 @@ unsafe extern "C" fn wakeup_kthread_parent(irq_work: *mut IrqWork) { } } -pub fn pin_to_initial_cpu(thread: Arc>) { +pub fn pin_to_initial_cpu(thread: Arc>>>) { let current_ptr = task::Task::current_ptr(); let mut cpu: u32 = task::Task::task_cpu(current_ptr as *const _); @@ -795,7 +804,7 @@ fn map_kthread_self(kthread: &mut RrosKthread) -> Result { pr_debug!("map_kthread_self: after dovetail_init_altsched"); set_oob_threadinfo( - Arc::into_raw(thread.clone()) as *mut SpinLock as *mut c_types::c_void + Arc::into_raw(thread.clone()) as *mut Pin>> as *mut c_types::c_void ); pr_debug!( "map_kthread_self rros_current address is {:p}", @@ -820,8 +829,8 @@ fn map_kthread_self(kthread: &mut RrosKthread) -> Result { return ret; } -pub fn rros_current() -> *mut SpinLock { - dovetail::dovetail_current_state().thread() as *mut SpinLock +pub fn rros_current() -> *mut Pin>> { + dovetail::dovetail_current_state().thread() as *mut Pin>> } pub fn rros_switch_oob() -> Result { @@ -951,14 +960,14 @@ pub fn rros_switch_oob() -> Result { // bindings::_raw_spin_unlock_irq() // } -pub fn rros_release_thread(thread: Arc>, mask: u32, info: u32) { +pub fn rros_release_thread(thread: Arc>>>, mask: u32, info: u32) { let mut flags: c_types::c_ulong = 0; let rq = rros_get_thread_rq(Some(thread.clone()), &mut flags); rros_release_thread_locked(thread.clone(), mask, info); // For smp, this needs to be changed, but now there is no problem. let _ret = rros_put_thread_rq(Some(thread.clone()), rq, flags); } -pub fn rros_release_thread_locked(thread: Arc>, mask: u32, info: u32) { +pub fn rros_release_thread_locked(thread: Arc>>>, mask: u32, info: u32) { let rq = thread.lock().rq.unwrap(); let oldstate = thread.lock().state; // if (RROS_WARN_ON(CORE, mask & ~(T_SUSP|T_HALT|T_INBAND|T_DORMANT|T_PTSYNC))) @@ -1041,7 +1050,7 @@ pub fn set_oob_threadinfo(curr: *mut c_types::c_void) { // unsafe{Arc::decrement_strong_count(curr);} } -pub fn set_oob_mminfo(thread: Arc>) { +pub fn set_oob_mminfo(thread: Arc>>>) { // pr_debug!("set_oob_mminfo: in"); unsafe { (*thread.locked_data().get()).oob_mm = dovetail::dovetail_mm_state(); @@ -1079,7 +1088,7 @@ pub fn rros_run_kthread(kthread: &mut RrosKthread, fmt: &'static CStr) -> Result iattr.sched_class = Some(&RROS_SCHED_FIFO); // iattr.sched_class = Some(&RROS_SCHED_TP); let prio = 98; - let sched_param = Arc::try_new(SpinLock::new(RrosSchedParam::new()))?; + let sched_param = Arc::try_new(Box::pin_init(new_spinlock!(RrosSchedParam::new())).unwrap())?; (*sched_param.locked_data().get()).fifo.prio = prio; (*sched_param.locked_data().get()).idle.prio = prio; (*sched_param.locked_data().get()).weak.prio = prio; @@ -1114,10 +1123,10 @@ pub fn rros_run_kthread(kthread: &mut RrosKthread, fmt: &'static CStr) -> Result #[allow(dead_code)] pub fn rros_set_thread_schedparam( - thread: Arc>, + thread: Arc>>>, sched_class: Option<&'static RrosSchedClass>, - sched_param: Option>>, + sched_param: Option>>>>, ) -> Result { let mut flags: c_types::c_ulong = 0; let rq = rros_get_thread_rq(Some(thread.clone()), &mut flags); @@ -1127,9 +1136,9 @@ pub fn rros_set_thread_schedparam( } pub fn rros_set_thread_schedparam_locked( - thread: Arc>, + thread: Arc>>>, _sched_class: Option<&'static RrosSchedClass>, - _sched_param: Option>>, + _sched_param: Option>>>>, ) -> Result { let old_wprio: i32; let new_wprio: i32; @@ -1212,7 +1221,7 @@ pub fn rros_sleep_on( ) { let mut flags: c_types::c_ulong = 0; let curr = unsafe { &mut *rros_current() }; - let thread = unsafe { Arc::from_raw(curr as *const SpinLock) }; + let thread = unsafe { Arc::from_raw(curr as *const Pin>>) }; // pr_debug!("rros_sleep_on: x ref is {}", Arc::strong_count(&thread.clone())); unsafe { Arc::increment_strong_count(curr); @@ -1303,7 +1312,7 @@ pub fn rros_sleep_on_locked( } #[allow(dead_code)] -pub fn rros_propagate_schedparam_change(curr: &mut SpinLock) { +pub fn rros_propagate_schedparam_change(curr: &mut Pin>>) { // Can't write here: curr_lock = curr.lock() // cannot borrow `*curr` as mutable because it is also borrowed as immutable if curr.lock().info & T_SCHEDP != 0x0 { @@ -1311,7 +1320,7 @@ pub fn rros_propagate_schedparam_change(curr: &mut SpinLock) { } } -pub fn __rros_propagate_schedparam_change(_curr: &mut SpinLock) { +pub fn __rros_propagate_schedparam_change(_curr: &mut Pin>>) { //todo } @@ -1365,7 +1374,7 @@ unsafe extern "C" fn rust_handle_inband_event( InbandEventType::InbandTaskExit => { // pr_debug!("{}",rust_helper_dovetail_current_state().subscriber); // rros_drop_subscriptions(rros_get_subscriber()); // sbr in rros is NULL, comment here first. - if rros_current() != 0 as *mut SpinLock { + if rros_current() != 0 as *mut Pin>> { let _ret = put_current_thread(); } } // case INBAND_TASK_MIGRATION: @@ -1415,7 +1424,7 @@ fn put_current_thread() -> Result { fn cleanup_current_thread() -> Result { // unsafe{pr_debug!("00 uninit_thread: x ref is {}", Arc::strong_count(&uthread.clone().unwrap()));} let curr = rros_current(); - let thread = unsafe { Arc::from_raw(curr as *const SpinLock) }; + let thread = unsafe { Arc::from_raw(curr as *const Pin>>) }; unsafe { Arc::increment_strong_count(curr); } @@ -1432,8 +1441,8 @@ fn cleanup_current_thread() -> Result { Ok(0) } -fn do_cleanup_current(curr: Arc>) -> Result { - // fn do_cleanup_current(curr: &mut SpinLock) -> Result { +fn do_cleanup_current(curr: Arc>>>) -> Result { + // fn do_cleanup_current(curr: &mut Pin>>) -> Result { // struct cred *newcap; // unsafe{pr_debug!("03 uninit_thread: x ref is {}", Arc::strong_count(&UTHREAD.clone().unwrap()));} let mut flags: c_types::c_ulong = 0; @@ -1466,7 +1475,7 @@ fn do_cleanup_current(curr: Arc>) -> Result { pr_debug!("before dequeue_old_thread"); // dequeue_old_thread(curr); pr_debug!("after dequeue_old_thread,before rros_get_thread_rq"); - // let x = unsafe { Arc::from_raw(curr as *const SpinLock) }; + // let x = unsafe { Arc::from_raw(curr as *const Pin>>) }; // unsafe{pr_debug!("b 6 uninit_thread: x ref is {}", Arc::strong_count(&UTHREAD.clone().unwrap()));} let x = curr.clone(); // unsafe{pr_debug!("c 6 uninit_thread: x ref is {}", Arc::strong_count(&UTHREAD.clone().unwrap()));} @@ -1492,8 +1501,8 @@ fn do_cleanup_current(curr: Arc>) -> Result { } #[allow(dead_code)] -fn dequeue_old_thread(_thread: Arc>) -> Result { - // fn dequeue_old_thread(thread: &mut SpinLock) -> Result { +fn dequeue_old_thread(_thread: Arc>>>) -> Result { + // fn dequeue_old_thread(thread: &mut Pin>>) -> Result { let flags = lock::hard_local_irq_save(); // kernel corrupted bug is here: next is 0 at initialization, but uses * to get its value // let next = unsafe{&mut *thread.lock().next}; @@ -1504,7 +1513,7 @@ fn dequeue_old_thread(_thread: Arc>) -> Result { Ok(0) } -fn uninit_thread(thread: Arc>) { +fn uninit_thread(thread: Arc>>>) { pr_debug!("the thread address is {:p}", thread); // pr_debug!("the UTHREAD address is {:p}", UTHREAD.clone().unwrap()); // unsafe{pr_debug!("d 6 uninit_thread: x ref is {}", Arc::strong_count(&UTHREAD.clone().unwrap()));} @@ -1536,11 +1545,11 @@ fn uninit_thread(thread: Arc>) { // bindings::kfree(name); } -pub static mut UTHREAD: Option>> = None; +pub static mut UTHREAD: Option>>>> = None; // TODO: update the __user attribute and modify the `*mut` type fn thread_factory_build( - fac: &'static mut SpinLock, + fac: &'static mut Pin>>, uname: &'static CStr, _u_attrs: Option<*mut u8>, clone_flags: i32, @@ -1559,7 +1568,7 @@ fn thread_factory_build( // struct RrosThread *curr; // int ret; - if rros_current() != 0 as *mut SpinLock { + if rros_current() != 0 as *mut Pin>> { pr_warn!("this condition should not be met!!!!!!!!!"); // return Err(Error::EBUSY); } @@ -1589,21 +1598,21 @@ fn thread_factory_build( unsafe { // KTHREAD_RUNNER_1 = Some(KthreadRunner::new(kfn_1)); - let mut thread = SpinLock::new(RrosThread::new().unwrap()); + let mut thread = Box::pin_init(new_spinlock!(RrosThread::new().unwrap(),"test_threads1")).unwrap(); pr_debug!("at the thread build thread address is {:p} ", &thread); - let pinned = Pin::new_unchecked(&mut thread); - // pr_debug!("at the thread build thread address is {:p} ", &thread); - spinlock_init!(pinned, "test_threads1"); + // let pinned = Pin::new_unchecked(&mut thread); + // // pr_debug!("at the thread build thread address is {:p} ", &thread); + // spinlock_init!(pinned, "test_threads1"); curr = Arc::try_new(thread).unwrap(); // pr_debug!("at the thread build thread address is {:p} ", &thread); - let mut r = SpinLock::new(timer::RrosTimer::new(1)); - let pinned_r = Pin::new_unchecked(&mut r); - spinlock_init!(pinned_r, "rtimer_1"); + let mut r = Box::pin_init(new_spinlock!(timer::RrosTimer::new(1),"rtimer_1")).unwrap(); + // let pinned_r = Pin::new_unchecked(&mut r); + // spinlock_init!(pinned_r, "rtimer_1"); - let mut p = SpinLock::new(timer::RrosTimer::new(1)); - let pinned_p = Pin::new_unchecked(&mut p); - spinlock_init!(pinned_p, "ptimer_1"); + let mut p = Box::pin_init(new_spinlock!(timer::RrosTimer::new(1),"ptimer_1")).unwrap(); + // let pinned_p = Pin::new_unchecked(&mut p); + // spinlock_init!(pinned_p, "ptimer_1"); curr.lock().rtimer = Some(Arc::try_new(r).unwrap()); curr.lock().ptimer = Some(Arc::try_new(p).unwrap()); @@ -1685,7 +1694,7 @@ fn thread_factory_build( // iattr.sched_class = &rros_sched_weak; // FIXME: alter fifo with weak, but for now we just use fifo unsafe { - let sched_param = Arc::try_new(SpinLock::new(RrosSchedParam::new())).unwrap(); + let sched_param = Arc::try_new(Box::pin_init(new_spinlock!(RrosSchedParam::new())).unwrap()).unwrap(); (*sched_param.locked_data().get()).fifo.prio = 10; iattr.sched_param = Some(sched_param); } @@ -1773,7 +1782,7 @@ fn thread_factory_build( pub fn rros_init_user_element( e: Rc>, - fac: &'static mut SpinLock, + fac: &'static mut Pin>>, uname: &'static CStr, clone_flags: i32, ) -> Result { @@ -1812,7 +1821,7 @@ pub fn rros_init_user_element( // } } -fn map_uthread_self(thread: Arc>) -> Result { +fn map_uthread_self(thread: Arc>>>) -> Result { // static int map_uthread_self(struct RrosThread *thread) // { // mkdir /dev/rros @@ -1887,7 +1896,7 @@ fn map_uthread_self(thread: Arc>) -> Result { ); } set_oob_threadinfo( - Arc::into_raw(thread.clone()) as *mut SpinLock as *mut c_types::c_void + Arc::into_raw(thread.clone()) as *mut Pin>> as *mut c_types::c_void ); unsafe { @@ -1935,7 +1944,7 @@ fn map_uthread_self(thread: Arc>) -> Result { } // TODO: update the cred wrappers -fn add_u_cap(thread: Arc>, new_cap: *mut cred::Credential, cap: u32) { +fn add_u_cap(thread: Arc>>>, new_cap: *mut cred::Credential, cap: u32) { // TODO: add the cap_raise&&capable wrappers if !capability::capable(cap as i32) { unsafe { @@ -2019,8 +2028,8 @@ pub fn rros_get_inband_pid(thread: *const RrosThread) -> i32 { #[allow(dead_code)] pub fn rros_track_thread_policy( - thread: Arc>, - target: Arc>, + thread: Arc>>>, + target: Arc>>>, ) -> Result { unsafe { let param = RrosSchedParam::new(); @@ -2032,23 +2041,23 @@ pub fn rros_track_thread_policy( } let thread_ptr = - Arc::into_raw(thread.clone()) as *mut SpinLock as *mut RrosThread; + Arc::into_raw(thread.clone()) as *mut Pin>> as *mut RrosThread; let target_ptr = - Arc::into_raw(target.clone()) as *mut SpinLock as *mut RrosThread; + Arc::into_raw(target.clone()) as *mut Pin>> as *mut RrosThread; if target_ptr == thread_ptr { (*thread.locked_data().get()).sched_class = (*thread.locked_data().get()).base_class; rros_track_priority( thread.clone(), - Arc::try_new(SpinLock::new(RrosSchedParam::new()))?, + Arc::try_new(Box::pin_init(new_spinlock!(RrosSchedParam::new())).unwrap())?, )?; state = (*thread.locked_data().get()).state; if state & T_READY != 0 { rros_requeue_thread(thread.clone())?; } } else { - rros_get_schedparam(target.clone(), Arc::try_new(SpinLock::new(param))?)?; + rros_get_schedparam(target.clone(), Arc::try_new(Box::pin_init(new_spinlock!(param)).unwrap())?)?; (*thread.locked_data().get()).sched_class = (*target.locked_data().get()).sched_class; - rros_track_priority(thread.clone(), Arc::try_new(SpinLock::new(param))?)?; + rros_track_priority(thread.clone(), Arc::try_new(Box::pin_init(new_spinlock!(param)).unwrap())?)?; state = (*thread.locked_data().get()).state; if state & T_READY != 0 { rros_enqueue_thread(thread.clone())?; @@ -2168,8 +2177,8 @@ pub fn set_sched_attrs(thread: *mut RrosThread, attrs: RrosSchedAttrs) -> Result let mut flags: c_types::c_ulong = 0; let mut ret: Result; let tslice = unsafe { (*thread).rrperiod }; - let thread: Option>> = - unsafe { Some(Arc::from_raw(thread as *mut SpinLock)) }; + let thread: Option>>>> = + unsafe { Some(Arc::from_raw(thread as *mut Pin>>)) }; let rq = rros_get_thread_rq(thread.clone(), &mut flags); let sched_class = rros_find_sched_class(&mut param, attrs, tslice); @@ -2189,7 +2198,7 @@ pub fn set_sched_attrs(thread: *mut RrosThread, attrs: RrosSchedAttrs) -> Result ret = rros_set_thread_schedparam_locked( thread.clone().unwrap(), Some(sched_class), - Some(Arc::try_new(SpinLock::new(param))?), + Some(Arc::try_new(Box::pin_init(new_spinlock!(param)).unwrap())?), ) }; @@ -2274,7 +2283,7 @@ pub fn rros_find_sched_class( #[allow(dead_code)] pub fn set_time_slice( - thread: Option>>, + thread: Option>>>>, quantum: ktime::KtimeT, ) -> Result { let thread = thread.clone().unwrap(); @@ -2301,7 +2310,7 @@ pub fn set_time_slice( unsafe { (*thread.locked_data().get()).state |= T_RRB }; unsafe { - if curr_ptr == thread_ptr { + if curr_ptr as *const _ == thread_ptr as *const _ { timer::rros_start_timer( (*rq).rrbtimer.clone().unwrap(), timer::rros_abs_timeout((*rq).rrbtimer.clone().unwrap(), quantum), @@ -2312,7 +2321,7 @@ pub fn set_time_slice( } else { unsafe { (*thread.locked_data().get()).state &= !T_RRB; - if curr_ptr == thread_ptr { + if curr_ptr as *const _ == thread_ptr as *const _ { timer::rros_stop_timer((*rq).rrbtimer.clone().unwrap()); } } @@ -2325,8 +2334,8 @@ pub fn set_time_slice( pub fn get_sched_attrs(thread: *mut RrosThread, attrs: &mut RrosSchedAttrs) -> Result { let mut flags: c_types::c_ulong = 0; let thread_ptr = thread; - let thread: Option>> = - unsafe { Some(Arc::from_raw(thread as *mut SpinLock)) }; + let thread: Option>>>> = + unsafe { Some(Arc::from_raw(thread as *mut Pin>>)) }; let rq = rros_get_thread_rq(thread.clone(), &mut flags); /* Get the base scheduling attributes. */ attrs.sched_priority = unsafe { (*thread.clone().unwrap().locked_data().get()).bprio }; @@ -2341,12 +2350,12 @@ pub fn __get_sched_attrs( thread: *mut RrosThread, attrs: &mut RrosSchedAttrs, ) -> Result { - let param = unsafe { Some(Arc::try_new(SpinLock::new(RrosSchedParam::new()))?) }; + let param = unsafe { Some(Arc::try_new(Box::pin_init(new_spinlock!(RrosSchedParam::new())).unwrap())?) }; attrs.sched_policy = sched_class.unwrap().policy; match sched_class.unwrap().sched_getparam { Some(f) => { - let thread_option = unsafe { Some(Arc::from_raw(thread as *mut SpinLock)) }; + let thread_option = unsafe { Some(Arc::from_raw(thread as *mut Pin>>)) }; f(thread_option.clone(), param.clone()); } None => { @@ -2387,7 +2396,7 @@ pub fn rros_get_thread_state( statebuf: &mut RrosThreadState, ) -> Result { let mut flags: c_types::c_ulong = 0; - let thread_option = unsafe { Some(Arc::from_raw(thread as *mut SpinLock)) }; + let thread_option = unsafe { Some(Arc::from_raw(thread as *mut Pin>>)) }; let rq = rros_get_thread_rq(thread_option.clone(), &mut flags); statebuf.eattrs.sched_priority = unsafe { (*thread).cprio }; unsafe { __get_sched_attrs((*thread).sched_class.clone(), thread, &mut statebuf.eattrs)? }; @@ -2444,36 +2453,37 @@ impl KthreadRunner { pub fn init(&mut self, kfn: Box) { let mut kthread = RrosKthread::new(Some(kfn)); // let mut thread = unsafe{SpinLock::new(RrosThread::new().unwrap())}; - // let pinned: Pin<&mut SpinLock> = unsafe{Pin::new_unchecked(&mut thread)}; + // let pinned: Pin<&mut Pin>>> = unsafe{Pin::new_unchecked(&mut thread)}; // spinlock_init!(pinned, "test_threads2"); // kthread.thread = Some(Arc::try_new(thread).unwrap()); - let mut tmp = Arc::>::try_new_uninit().unwrap(); + let mut tmp = Arc::>>>::try_new_uninit().unwrap(); + let mut tmp_spinlock = Box::pin_init(new_spinlock!(RrosThread::new().unwrap(),"rros_kthreads")).unwrap(); let mut tmp = unsafe { - ptr::write_bytes(Arc::get_mut_unchecked(&mut tmp), 0, 1); + Arc::get_mut(&mut tmp).unwrap().write(tmp_spinlock); tmp.assume_init() }; - let pinned = unsafe { Pin::new_unchecked(Arc::get_mut_unchecked(&mut tmp)) }; - spinlock_init!(pinned, "rros_kthreads"); + // let pinned = unsafe { Pin::new_unchecked(Arc::get_mut_unchecked(&mut tmp)) }; + // spinlock_init!(pinned, "rros_kthreads"); unsafe { let _ret = (*Arc::get_mut_unchecked(&mut tmp).locked_data().get()).init(); } kthread.thread = Some(tmp); //Arc::try_new(thread)? // unsafe{(*(*kthread.thread.as_mut().unwrap().locked_dataed_data().get()).get()).init().unwrap()}; - let pinned = unsafe { - Pin::new_unchecked( - &mut *(Arc::into_raw(kthread.thread.clone().unwrap()) as *mut SpinLock), - ) - }; - // &mut *Arc::into_raw( *(*rq_ptr).root_thread.clone().as_mut().unwrap()) as &mut SpinLock - spinlock_init!(pinned, "rros_threads"); - - let mut r = unsafe { SpinLock::new(timer::RrosTimer::new(1)) }; - let pinned_r = unsafe { Pin::new_unchecked(&mut r) }; - spinlock_init!(pinned_r, "rtimer_3"); - let mut p = unsafe { SpinLock::new(timer::RrosTimer::new(1)) }; - let pinned_p = unsafe { Pin::new_unchecked(&mut p) }; - spinlock_init!(pinned_p, "ptimer_3"); + // let pinned = unsafe { + // Pin::new_unchecked( + // &mut *(Arc::into_raw(kthread.thread.clone().unwrap()) as *mut Pin>>), + // ) + // }; + // // &mut *Arc::into_raw( *(*rq_ptr).root_thread.clone().as_mut().unwrap()) as &mut Pin>> + // spinlock_init!(pinned, "rros_threads"); + + let mut r = unsafe { Box::pin_init(new_spinlock!(timer::RrosTimer::new(1),"rtimer_3")).unwrap() }; + // let pinned_r = unsafe { Pin::new_unchecked(&mut r) }; + // spinlock_init!(pinned_r, "rtimer_3"); + let mut p = unsafe { Box::pin_init(new_spinlock!(timer::RrosTimer::new(1),"ptimer_3")).unwrap() }; + // let pinned_p = unsafe { Pin::new_unchecked(&mut p) }; + // spinlock_init!(pinned_p, "ptimer_3"); kthread.thread.as_mut().map(|thread| unsafe { let mut t = &mut (*(*thread).locked_data().get()); @@ -2514,7 +2524,7 @@ pub fn rros_set_period( let curr = rros_current(); let thread; unsafe { - thread = Arc::from_raw(curr as *mut SpinLock); + thread = Arc::from_raw(curr as *mut Pin>>); Arc::increment_strong_count(curr); } let timer = unsafe { @@ -2575,7 +2585,7 @@ pub fn rros_set_period( pub fn rros_wait_period() -> Result { let curr = rros_current(); - let thread = unsafe { Arc::from_raw(curr as *mut SpinLock) }; + let thread = unsafe { Arc::from_raw(curr as *mut Pin>>) }; unsafe { Arc::increment_strong_count(curr); } @@ -2655,7 +2665,7 @@ pub fn rros_wait_period() -> Result { // EXPORT_SYMBOL_GPL(rros_wait_period); } -fn rros_get_timer_overruns(timer: Arc>) -> Result { +fn rros_get_timer_overruns(timer: Arc>>>) -> Result { // unsigned long rros_get_timer_overruns(struct rros_timer *timer) // { let mut flags = 0; @@ -2794,12 +2804,12 @@ fn rros_get_timer_overruns(timer: Arc>) -> Result>, mut info: u32) { +pub fn rros_kick_thread(thread: Arc>>>, mut info: u32) { let mut flags = 0u64; let rq = rros_get_thread_rq(Some(thread.clone()), &mut flags); loop { { - let guard: Guard<'_, SpinLock> = thread.lock(); + let guard = thread.lock(); if guard.state & T_INBAND != 0 { break; } @@ -2858,10 +2868,10 @@ pub fn rros_kick_thread(thread: Arc>, mut info: u32) { } #[inline] -pub fn rros_thread_from_task(p: *mut bindings::task_struct) -> Result>> { - let thread: Arc>; - let thread_ptr = dovetail::dovetail_task_state(p).thread() as *mut SpinLock; - if thread_ptr == 0 as *mut SpinLock { +pub fn rros_thread_from_task(p: *mut bindings::task_struct) -> Result>>>> { + let thread: Arc>>>; + let thread_ptr = dovetail::dovetail_task_state(p).thread() as *mut Pin>>; + if thread_ptr == 0 as *mut Pin>> { return Err(kernel::Error::EINVAL); } @@ -2882,43 +2892,43 @@ pub fn rros_run_kthread_on_cpu( fmt: &'static CStr, ) -> Result { let mut iattr: RrosInitThreadAttr; - let sched_param: Arc>; - let mut thread: Arc>; + let sched_param: Arc>>>; + let mut thread: Arc>>>; /* this unsafe block is used to initialize RrosInitThreadAttr. */ unsafe { iattr = RrosInitThreadAttr::new(); iattr.affinity = CpumaskT::cpumask_of(cpu); iattr.sched_class = Some(&RROS_SCHED_FIFO); - sched_param = Arc::try_new(SpinLock::new(RrosSchedParam::new()))?; + sched_param = Arc::try_new(Box::pin_init(new_spinlock!(RrosSchedParam::new())).unwrap())?; (*sched_param.locked_data().get()).fifo.prio = 1; iattr.sched_param = Some(sched_param); } /* this unsafe block is used to initialize RrosThread. */ unsafe { - let r = Arc::try_new(SpinLock::new(timer::RrosTimer::new(1)))?; - let p = Arc::try_new(SpinLock::new(timer::RrosTimer::new(1)))?; + let r = Arc::try_new(Box::pin_init(new_spinlock!(timer::RrosTimer::new(1),"rtimer")).unwrap())?; + let p = Arc::try_new(Box::pin_init(new_spinlock!(timer::RrosTimer::new(1),"ptimer")).unwrap())?; - thread = Arc::try_new(SpinLock::new(RrosThread::new()?))?; - let thread_ref = Arc::get_mut(&mut thread).unwrap(); - let pinned = Pin::new_unchecked(thread_ref); - spinlock_init!(pinned, "kthread"); + thread = Arc::try_new(Box::pin_init(new_spinlock!(RrosThread::new()?,"kthread")).unwrap())?; + // let thread_ref = Arc::get_mut(&mut thread).unwrap(); + // let pinned = Pin::new_unchecked(thread_ref); + // spinlock_init!(pinned, "kthread"); thread.lock().init()?; thread.lock().rtimer = Some(r); thread.lock().ptimer = Some(p); - let mut thread_guard = thread.lock(); - let mut r_opt = thread_guard.rtimer.as_mut().unwrap(); - let r_ref = Arc::get_mut(&mut r_opt).unwrap(); - let pinned_r = Pin::new_unchecked(r_ref); - spinlock_init!(pinned_r, "rtimer"); + // let mut thread_guard = thread.lock(); + // let mut r_opt = thread_guard.rtimer.as_mut().unwrap(); + // let r_ref = Arc::get_mut(&mut r_opt).unwrap(); + // let pinned_r = Pin::new_unchecked(r_ref); + // spinlock_init!(pinned_r, "rtimer"); - let mut p_opt = thread_guard.ptimer.as_mut().unwrap(); - let p_ref = Arc::get_mut(&mut p_opt).unwrap(); - let pinned_p = Pin::new_unchecked(p_ref); - spinlock_init!(pinned_p, "ptimer"); + // let mut p_opt = thread_guard.ptimer.as_mut().unwrap(); + // let p_ref = Arc::get_mut(&mut p_opt).unwrap(); + // let pinned_p = Pin::new_unchecked(p_ref); + // spinlock_init!(pinned_p, "ptimer"); } kthread.thread = Some(thread); diff --git a/kernel/rros/tick.rs b/kernel/rros/tick.rs index 4efc3c6a135d8..41ecaa6957842 100644 --- a/kernel/rros/tick.rs +++ b/kernel/rros/tick.rs @@ -20,6 +20,7 @@ use core::{ cmp, mem::{align_of, size_of}, ptr::null_mut, + ops::Deref, }; extern "C" { @@ -249,7 +250,7 @@ pub fn rros_program_proxy_tick(clock: &RrosClock) { let mut timer = unsafe { (*tmb).q.get_head().unwrap().value.clone() }; let inband_timer_addr = unsafe { (*this_rq).get_inband_timer().locked_data().get() }; let timer_addr = timer.locked_data().get(); - if timer_addr == inband_timer_addr { + if timer_addr as *const _ == inband_timer_addr as *const _{ unsafe { let state = (*(*this_rq).get_curr().locked_data().get()).state; if rros_need_resched(this_rq) || state & T_ROOT == 0x0 { diff --git a/kernel/rros/timer.rs b/kernel/rros/timer.rs index 7ea6d6717effb..401652154d233 100644 --- a/kernel/rros/timer.rs +++ b/kernel/rros/timer.rs @@ -2,6 +2,7 @@ use crate::{clock::*, lock::*, sched::*, stat::*, tick::*, timeout::*}; use core::ops::DerefMut; +use core::ops::Deref; use kernel::{ bindings, c_str, double_linked_list::*, ktime::*, percpu_defs, prelude::*, spinlock_init, str::CStr, sync::Lock, sync::SpinLock, @@ -20,8 +21,8 @@ pub const RROS_TIMER_IGRAVITY: i32 = 0; /* most conservative */ pub const RROS_TIMER_GRAVITY_MASK: i32 = (RROS_TIMER_KGRAVITY | RROS_TIMER_UGRAVITY); pub const RROS_TIMER_INIT_MASK: i32 = RROS_TIMER_GRAVITY_MASK; pub struct RrosTimerbase { - pub lock: SpinLock, - pub q: List>>, + pub lock: Pin>>, + pub q: List>>>>, } pub fn rros_this_cpu_timers(clock: &RrosClock) -> *mut RrosTimerbase { @@ -50,7 +51,7 @@ pub struct RrosTimer { fired: RrosCounter, #[cfg(CONFIG_SMP)] rq: *mut RrosRq, - pub thread: Option>>, + pub thread: Option>>>>, // FIXME: In RROS, the timer is wrapped by various pointers, so container_of cannot be used. A raw pointer pointing to the structure referencing it has to be added. pub pointer: *mut u8, } @@ -202,17 +203,17 @@ impl RrosTimer { (self.get_status() & RROS_TIMER_PERIODIC) != 0 } - pub fn thread(&self) -> Option>> { + pub fn thread(&self) -> Option>>>> { self.thread.clone() } } #[cfg(CONFIG_RROS_TIMER_SCALABLE)] -pub fn rros_insert_tnode(tq: &mut List>>, timer: Arc>) { +pub fn rros_insert_tnode(tq: &mut List>>>>, timer: Arc>>>) { todo!(); } #[cfg(not(CONFIG_RROS_TIMER_SCALABLE))] -pub fn rros_insert_tnode(tq: &mut List>>, timer: Arc>) { +pub fn rros_insert_tnode(tq: &mut List>>>>, timer: Arc>>>) { let mut l = tq.len(); while l >= 1 { let x = tq.get_by_index(l).unwrap().value.clone(); @@ -227,7 +228,7 @@ pub fn rros_insert_tnode(tq: &mut List>>, timer: Arc>) -> KtimeT { +pub fn rros_get_timer_gravity(timer: Arc>>>) -> KtimeT { let status = unsafe { (*timer.locked_data().get()).get_status() }; if status & RROS_TIMER_KGRAVITY != 0 { return unsafe { (*(*timer.locked_data().get()).get_clock()).get_gravity_kernel() }; @@ -240,7 +241,7 @@ pub fn rros_get_timer_gravity(timer: Arc>) -> KtimeT { return unsafe { (*(*timer.locked_data().get()).get_clock()).get_gravity_irq() }; } -pub fn rros_update_timer_date(timer: Arc>) { +pub fn rros_update_timer_date(timer: Arc>>>) { unsafe { let start_date = (*timer.locked_data().get()).get_start_date(); let periodic_ticks = (*timer.locked_data().get()).get_periodic_ticks(); @@ -253,7 +254,7 @@ pub fn rros_update_timer_date(timer: Arc>) { } } -pub fn rros_get_timer_next_date(timer: Arc>) -> KtimeT { +pub fn rros_get_timer_next_date(timer: Arc>>>) -> KtimeT { let start_date = unsafe { (*timer.locked_data().get()).get_start_date() }; let periodic_ticks = unsafe { (*timer.locked_data().get()).get_periodic_ticks() }; let interval = ktime_to_ns(unsafe { (*timer.locked_data().get()).get_interval() }); @@ -261,32 +262,32 @@ pub fn rros_get_timer_next_date(timer: Arc>) -> KtimeT { } #[cfg(CONFIG_RROS_RUNSTATS)] -pub fn rros_reset_timer_stats(timer: Arc>) { +pub fn rros_reset_timer_stats(timer: Arc>>>) { // Insufficient conditions, so no modification timer.lock().get_scheduled().set_counter(0); timer.lock().get_fired().set_counter(0); } #[cfg(CONFIG_RROS_RUNSTATS)] -pub fn rros_account_timer_scheduled(timer: Arc>) { +pub fn rros_account_timer_scheduled(timer: Arc>>>) { timer.lock().get_scheduled().inc_counter(); } #[cfg(CONFIG_RROS_RUNSTATS)] -pub fn rros_account_timer_fired(timer: Arc>) { +pub fn rros_account_timer_fired(timer: Arc>>>) { timer.lock().get_fired().inc_counter(); } #[cfg(not(CONFIG_RROS_RUNSTATS))] -pub fn rros_reset_timer_stats(timer: Arc>) {} // Insufficient conditions, so no modification +pub fn rros_reset_timer_stats(timer: Arc>>>) {} // Insufficient conditions, so no modification #[cfg(not(CONFIG_RROS_RUNSTATS))] -pub fn rros_account_timer_scheduled(timer: Arc>) {} +pub fn rros_account_timer_scheduled(timer: Arc>>>) {} #[cfg(not(CONFIG_RROS_RUNSTATS))] -pub fn rros_account_timer_fired(timer: Arc>) {} +pub fn rros_account_timer_fired(timer: Arc>>>) {} -pub fn rros_timer_deactivate(timer: Arc>) -> bool { +pub fn rros_timer_deactivate(timer: Arc>>>) -> bool { let mut heading = true; let tmb = unsafe { (*timer.locked_data().get()).get_base() }; let status = unsafe { (*timer.locked_data().get()).get_status() }; @@ -301,16 +302,16 @@ pub fn rros_timer_deactivate(timer: Arc>) -> bool { } #[cfg(CONFIG_SMP)] -pub fn rros_timer_on_rq(timer: Arc>, rq: *mut RrosRq) -> bool { +pub fn rros_timer_on_rq(timer: Arc>>>, rq: *mut RrosRq) -> bool { unsafe { (*timer.locked_data().get()).get_rq() == rq } } #[cfg(not(CONFIG_SMP))] -pub fn rros_timer_on_rq(timer: Arc>, rq: *mut RrosRq) -> bool { +pub fn rros_timer_on_rq(timer: Arc>>>, rq: *mut RrosRq) -> bool { return true; } -pub fn stop_timer_locked(timer: Arc>) { +pub fn stop_timer_locked(timer: Arc>>>) { // let timer_lock = timer.lock(); let is_running = unsafe { (*timer.locked_data().get()).is_running() }; if is_running { @@ -322,14 +323,14 @@ pub fn stop_timer_locked(timer: Arc>) { } } -pub fn __rros_stop_timer(timer: Arc>) { +pub fn __rros_stop_timer(timer: Arc>>>) { let mut flags: u64 = 0; let base: *mut RrosTimerbase = lock_timer_base(timer.clone(), &mut flags); stop_timer_locked(timer); unlock_timer_base(base, flags); } -pub fn rros_stop_timer(timer: Arc>) { +pub fn rros_stop_timer(timer: Arc>>>) { unsafe { let is_running = (*timer.locked_data().get()).is_running(); if is_running { @@ -339,7 +340,7 @@ pub fn rros_stop_timer(timer: Arc>) { } #[cfg(CONFIG_SMP)] -pub fn lock_timer_base(timer: Arc>, flags: &mut u64) -> *mut RrosTimerbase { +pub fn lock_timer_base(timer: Arc>>>, flags: &mut u64) -> *mut RrosTimerbase { unsafe { let mut base = (*timer.locked_data().get()).get_base(); while true { @@ -358,7 +359,7 @@ pub fn lock_timer_base(timer: Arc>, flags: &mut u64) -> *mut } #[cfg(not(CONFIG_SMP))] -pub fn lock_timer_base(timer: Arc>, flags: &mut u64) -> *mut RrosTimerbase { +pub fn lock_timer_base(timer: Arc>>>, flags: &mut u64) -> *mut RrosTimerbase { pr_err!("!!!!!!!!!!!! this is wrong. lock_timer_base"); unsafe { (*timer.locked_data().get()).get_base() } } @@ -376,8 +377,8 @@ pub fn unlock_timer_base(_base: *mut RrosTimerbase, _flags: u64) { } pub fn rros_dequeue_timer( - timer: Arc>, - tq: &mut List>>, + timer: Arc>>>, + tq: &mut List>>>>, ) { // pr_debug!("len tq is {}", tq.len()); let timer_addr = unsafe { timer.clone().locked_data().get() }; @@ -386,7 +387,7 @@ pub fn rros_dequeue_timer( for i in 1..=tq.len() { let mut _x = tq.get_by_index(i).unwrap().value.clone(); let x = _x.locked_data().get(); - if x == timer_addr { + if x as *const _ == timer_addr as *const _{ tq.dequeue(i); break; } @@ -397,13 +398,13 @@ pub fn rros_dequeue_timer( } } -pub fn rros_get_timer_expiry(timer: Arc>) -> KtimeT { +pub fn rros_get_timer_expiry(timer: Arc>>>) -> KtimeT { let date = unsafe { (*timer.locked_data().get()).get_date() }; let gravity = rros_get_timer_gravity(timer.clone()); return ktime_add(date, gravity); } -pub fn __rros_get_timer_delta(timer: Arc>) -> KtimeT { +pub fn __rros_get_timer_delta(timer: Arc>>>) -> KtimeT { let expiry = rros_get_timer_expiry(timer.clone()); let now = unsafe { (*timer.lock().get_clock()).read() }; if expiry <= now { @@ -413,7 +414,7 @@ pub fn __rros_get_timer_delta(timer: Arc>) -> KtimeT { return ktime_sub(expiry, now); } -pub fn rros_get_timer_delta(timer: Arc>) -> KtimeT { +pub fn rros_get_timer_delta(timer: Arc>>>) -> KtimeT { let timer_clone = timer.clone(); let is_running = unsafe { (*timer_clone.locked_data().get()).is_running() }; if is_running == false { @@ -451,7 +452,7 @@ use core::borrow::BorrowMut; use core::cell::RefCell; pub fn rros_init_timer_on_rq( - timer: Arc>, + timer: Arc>>>, clock: &mut RrosClock, handler: Option, rq: *mut RrosRq, @@ -483,7 +484,7 @@ pub fn rros_init_timer_on_rq( rros_reset_timer_stats(timer.clone()); } -pub fn program_timer(timer: Arc>, tq: &mut List>>) { +pub fn program_timer(timer: Arc>>>, tq: &mut List>>>>) { rros_enqueue_timer(timer.clone(), tq); let rq = unsafe { (*timer.locked_data().get()).get_rq() }; let local_flags = unsafe { (*rq).local_flags }; @@ -498,7 +499,7 @@ pub fn program_timer(timer: Arc>, tq: &mut List>, value: KtimeT, interval: KtimeT) { +pub fn rros_start_timer(timer: Arc>>>, value: KtimeT, interval: KtimeT) { // pr_debug!("yinyongcishu is {}",Arc::strong_count(&timer)); // pr_debug!("rros_start_timer: 1"); // pr_debug!("the start timer{:?} {:?}", value, interval); @@ -532,7 +533,7 @@ pub fn rros_start_timer(timer: Arc>, value: KtimeT, interval } } -pub fn timer_at_front(timer: Arc>) -> bool { +pub fn timer_at_front(timer: Arc>>>) -> bool { unsafe { let tmb = (*timer.locked_data().get()).get_base(); unsafe { @@ -543,7 +544,7 @@ pub fn timer_at_front(timer: Arc>) -> bool { let mut _head = unsafe { (*tmb).q.get_head().unwrap().value.clone() }; let head = _head.locked_data().get(); let timer_addr = timer.clone().locked_data().get(); - if head == timer_addr { + if head as *const _ == timer_addr as *const _{ return true; } unsafe { @@ -555,7 +556,7 @@ pub fn timer_at_front(timer: Arc>) -> bool { if (local_flags & RQ_TDEFER) != 0x0 { let _next = unsafe { (*tmb).q.get_by_index(2).unwrap().value.clone() }; let next = _next.locked_data().get(); - if next == timer_addr { + if next as *const _ == timer_addr as *const _ { return true; } } @@ -563,7 +564,7 @@ pub fn timer_at_front(timer: Arc>) -> bool { } } -pub fn rros_get_timer_date(timer: Arc>) -> KtimeT { +pub fn rros_get_timer_date(timer: Arc>>>) -> KtimeT { let mut expiry = 0; let is_running = unsafe { (*timer.locked_data().get()).is_running() }; if is_running == false { @@ -574,11 +575,11 @@ pub fn rros_get_timer_date(timer: Arc>) -> KtimeT { return expiry; } -pub fn __rros_get_stopped_timer_delta(timer: Arc>) -> KtimeT { +pub fn __rros_get_stopped_timer_delta(timer: Arc>>>) -> KtimeT { return __rros_get_timer_delta(timer.clone()); } -pub fn rros_get_stopped_timer_delta(timer: Arc>) -> KtimeT { +pub fn rros_get_stopped_timer_delta(timer: Arc>>>) -> KtimeT { let t = __rros_get_stopped_timer_delta(timer.clone()); if ktime_to_ns(t) <= 1 { @@ -589,8 +590,8 @@ pub fn rros_get_stopped_timer_delta(timer: Arc>) -> KtimeT { } pub fn rros_enqueue_timer( - timer: Arc>, - tq: &mut List>>, + timer: Arc>>>, + tq: &mut List>>>>, ) { rros_insert_tnode(tq, timer.clone()); unsafe { @@ -599,7 +600,7 @@ pub fn rros_enqueue_timer( rros_account_timer_scheduled(timer.clone()); } -pub fn rros_destroy_timer(timer: Arc>) { +pub fn rros_destroy_timer(timer: Arc>>>) { rros_stop_timer(timer.clone()); timer.lock().add_status(RROS_TIMER_KILLED); #[cfg(CONFIG_SMP)] @@ -608,13 +609,13 @@ pub fn rros_destroy_timer(timer: Arc>) { timer.lock().set_base(0 as *mut RrosTimerbase); } -pub fn rros_abs_timeout(timer: Arc>, delta: KtimeT) -> KtimeT { +pub fn rros_abs_timeout(timer: Arc>>>, delta: KtimeT) -> KtimeT { unsafe { ktime_add((*(*timer.locked_data().get()).get_clock()).read(), delta) } } #[cfg(CONFIG_SMP)] pub fn rros_prepare_timed_wait( - timer: Arc>, + timer: Arc>>>, clock: &mut RrosClock, rq: *mut rros_rq, ) { @@ -627,7 +628,7 @@ pub fn rros_prepare_timed_wait( #[cfg(not(CONFIG_SMP))] pub fn rros_prepare_timed_wait( - timer: Arc>, + timer: Arc>>>, clock: &mut RrosClock, rq: *mut rros_rq, ) { diff --git a/kernel/rros/timer_test.rs b/kernel/rros/timer_test.rs index cc34213319411..3a6fbcd72743f 100644 --- a/kernel/rros/timer_test.rs +++ b/kernel/rros/timer_test.rs @@ -1,23 +1,23 @@ use crate::{clock::*, sched::*, timer::*}; -use kernel::{prelude::*, spinlock_init, sync::SpinLock}; +use kernel::{prelude::*, new_spinlock, sync::SpinLock}; #[allow(dead_code)] pub fn test_rros_insert_tnode() -> Result { pr_debug!("~~~test_rros_insert_tnode begin~~~"); unsafe { let tmb = rros_percpu_timers(&RROS_MONO_CLOCK, 0); - let mut x = SpinLock::new(RrosTimer::new(12)); - let pinned = Pin::new_unchecked(&mut x); - spinlock_init!(pinned, "x"); - let mut y = SpinLock::new(RrosTimer::new(2)); - let pinned = Pin::new_unchecked(&mut y); - spinlock_init!(pinned, "y"); - let mut z = SpinLock::new(RrosTimer::new(31)); - let pinned = Pin::new_unchecked(&mut z); - spinlock_init!(pinned, "z"); - let mut a = SpinLock::new(RrosTimer::new(14)); - let pinned = Pin::new_unchecked(&mut a); - spinlock_init!(pinned, "a"); + let mut x = Box::pin_init(new_spinlock!(RrosTimer::new(12),"x")).unwrap(); + // let pinned = Pin::new_unchecked(&mut x); + // spinlock_init!(pinned, "x"); + let mut y = Box::pin_init(new_spinlock!(RrosTimer::new(2),"y")).unwrap(); + // let pinned = Pin::new_unchecked(&mut y); + // spinlock_init!(pinned, "y"); + let mut z = Box::pin_init(new_spinlock!(RrosTimer::new(31),"z")).unwrap(); + // let pinned = Pin::new_unchecked(&mut z); + // spinlock_init!(pinned, "z"); + let mut a = Box::pin_init(new_spinlock!(RrosTimer::new(14),"a")).unwrap(); + // let pinned = Pin::new_unchecked(&mut a); + // spinlock_init!(pinned, "a"); let xx = Arc::try_new(x)?; let yy = Arc::try_new(y)?; @@ -46,18 +46,18 @@ pub fn test_rros_enqueue_timer() -> Result { pr_debug!("~~~test_rros_insert_tnode begin~~~"); unsafe { let tmb = rros_percpu_timers(&RROS_MONO_CLOCK, 0); - let mut x = SpinLock::new(RrosTimer::new(12)); - let pinned = Pin::new_unchecked(&mut x); - spinlock_init!(pinned, "x"); - let mut y = SpinLock::new(RrosTimer::new(2)); - let pinned = Pin::new_unchecked(&mut y); - spinlock_init!(pinned, "y"); - let mut z = SpinLock::new(RrosTimer::new(31)); - let pinned = Pin::new_unchecked(&mut z); - spinlock_init!(pinned, "z"); - let mut a = SpinLock::new(RrosTimer::new(14)); - let pinned = Pin::new_unchecked(&mut a); - spinlock_init!(pinned, "a"); + let mut x = Box::pin_init(new_spinlock!(RrosTimer::new(12),"x")).unwrap(); + // let pinned = Pin::new_unchecked(&mut x); + // spinlock_init!(pinned, "x"); + let mut y = Box::pin_init(new_spinlock!(RrosTimer::new(2),"y")).unwrap(); + // let pinned = Pin::new_unchecked(&mut y); + // spinlock_init!(pinned, "y"); + let mut z = Box::pin_init(new_spinlock!(RrosTimer::new(31),"z")).unwrap(); + // let pinned = Pin::new_unchecked(&mut z); + // spinlock_init!(pinned, "z"); + let mut a = Box::pin_init(new_spinlock!(RrosTimer::new(14),"a")).unwrap(); + // let pinned = Pin::new_unchecked(&mut a); + // spinlock_init!(pinned, "a"); let xx = Arc::try_new(x)?; let yy = Arc::try_new(y)?; @@ -86,9 +86,9 @@ pub fn test_rros_enqueue_timer() -> Result { pub fn test_rros_get_timer_gravity() -> Result { pr_debug!("~~~test_rros_get_timer_gravity begin~~~"); unsafe { - let mut x = SpinLock::new(RrosTimer::new(1)); - let pinned = Pin::new_unchecked(&mut x); - spinlock_init!(pinned, "x"); + let mut x = Box::pin_init(new_spinlock!(RrosTimer::new(1),"x")).unwrap(); + // let pinned = Pin::new_unchecked(&mut x); + // spinlock_init!(pinned, "x"); let xx = Arc::try_new(x)?; xx.lock().set_clock(&mut RROS_MONO_CLOCK as *mut RrosClock); @@ -109,9 +109,9 @@ pub fn test_rros_get_timer_gravity() -> Result { pub fn test_rros_update_timer_date() -> Result { pr_debug!("~~~test_rros_update_timer_date begin~~~"); unsafe { - let mut x = SpinLock::new(RrosTimer::new(1)); - let pinned = Pin::new_unchecked(&mut x); - spinlock_init!(pinned, "x"); + let mut x = Box::pin_init(new_spinlock!(RrosTimer::new(1),"x")).unwrap(); + // let pinned = Pin::new_unchecked(&mut x); + // spinlock_init!(pinned, "x"); let xx = Arc::try_new(x)?; xx.lock().set_clock(&mut RROS_MONO_CLOCK as *mut RrosClock); @@ -131,9 +131,9 @@ pub fn test_rros_update_timer_date() -> Result { pub fn test_rros_get_timer_next_date() -> Result { pr_debug!("~~~test_rros_get_timer_next_date begin~~~"); unsafe { - let mut x = SpinLock::new(RrosTimer::new(1)); - let pinned = Pin::new_unchecked(&mut x); - spinlock_init!(pinned, "x"); + let mut x = Box::pin_init(new_spinlock!(RrosTimer::new(1),"x")).unwrap(); + // let pinned = Pin::new_unchecked(&mut x); + // spinlock_init!(pinned, "x"); let xx = Arc::try_new(x)?; xx.lock().set_start_date(2); @@ -150,15 +150,15 @@ pub fn test_rros_get_timer_next_date() -> Result { pub fn test_timer_at_front() -> Result { unsafe { let tmb = rros_percpu_timers(&RROS_MONO_CLOCK, 0); - let mut x = SpinLock::new(RrosTimer::new(1)); - let pinned = Pin::new_unchecked(&mut x); - spinlock_init!(pinned, "x"); - let mut y = SpinLock::new(RrosTimer::new(2)); - let pinned = Pin::new_unchecked(&mut y); - spinlock_init!(pinned, "y"); - let mut z = SpinLock::new(RrosTimer::new(3)); - let pinned = Pin::new_unchecked(&mut z); - spinlock_init!(pinned, "z"); + let mut x = Box::pin_init(new_spinlock!(RrosTimer::new(1),"x")).unwrap(); + // let pinned = Pin::new_unchecked(&mut x); + // spinlock_init!(pinned, "x"); + let mut y = Box::pin_init(new_spinlock!(RrosTimer::new(2),"y")).unwrap(); + // let pinned = Pin::new_unchecked(&mut y); + // spinlock_init!(pinned, "y"); + let mut z = Box::pin_init(new_spinlock!(RrosTimer::new(3),"z")).unwrap(); + // let pinned = Pin::new_unchecked(&mut z); + // spinlock_init!(pinned, "z"); let xx = Arc::try_new(x)?; let yy = Arc::try_new(y)?; @@ -198,15 +198,15 @@ pub fn test_timer_at_front() -> Result { pub fn test_rros_timer_deactivate() -> Result { unsafe { let tmb = rros_percpu_timers(&RROS_MONO_CLOCK, 0); - let mut x = SpinLock::new(RrosTimer::new(1)); - let pinned = Pin::new_unchecked(&mut x); - spinlock_init!(pinned, "x"); - let mut y = SpinLock::new(RrosTimer::new(2)); - let pinned = Pin::new_unchecked(&mut y); - spinlock_init!(pinned, "y"); - let mut z = SpinLock::new(RrosTimer::new(3)); - let pinned = Pin::new_unchecked(&mut z); - spinlock_init!(pinned, "z"); + let mut x = Box::pin_init(new_spinlock!(RrosTimer::new(1),"x")).unwrap(); + // let pinned = Pin::new_unchecked(&mut x); + // spinlock_init!(pinned, "x"); + let mut y = Box::pin_init(new_spinlock!(RrosTimer::new(2),"y")).unwrap(); + // let pinned = Pin::new_unchecked(&mut y); + // spinlock_init!(pinned, "y"); + let mut z = Box::pin_init(new_spinlock!(RrosTimer::new(3),"z")).unwrap(); + // let pinned = Pin::new_unchecked(&mut z); + // spinlock_init!(pinned, "z"); let xx = Arc::try_new(x)?; let yy = Arc::try_new(y)?; @@ -246,9 +246,9 @@ pub fn test_rros_timer_deactivate() -> Result { pub fn test_rros_get_timer_expiry() -> Result { pr_debug!("~~~test_rros_get_timer_expiry begin~~~"); unsafe { - let mut x = SpinLock::new(RrosTimer::new(1)); - let pinned = Pin::new_unchecked(&mut x); - spinlock_init!(pinned, "x"); + let mut x = Box::pin_init(new_spinlock!(RrosTimer::new(1),"x")).unwrap(); + // let pinned = Pin::new_unchecked(&mut x); + // spinlock_init!(pinned, "x"); let xx = Arc::try_new(x)?; xx.lock().set_clock(&mut RROS_MONO_CLOCK as *mut RrosClock); @@ -265,9 +265,9 @@ pub fn test_rros_get_timer_expiry() -> Result { pub fn test_rros_get_timer_delta() -> Result { pr_debug!("~~~test_rros_get_timer_delta begin~~~"); unsafe { - let mut x = SpinLock::new(RrosTimer::new(1)); - let pinned = Pin::new_unchecked(&mut x); - spinlock_init!(pinned, "x"); + let mut x = Box::pin_init(new_spinlock!(RrosTimer::new(1),"x")).unwrap(); + // let pinned = Pin::new_unchecked(&mut x); + // spinlock_init!(pinned, "x"); let xx = Arc::try_new(x)?; xx.lock().set_clock(&mut RROS_MONO_CLOCK as *mut RrosClock); @@ -289,9 +289,9 @@ pub fn test_rros_get_timer_delta() -> Result { pub fn test_rros_get_timer_date() -> Result { pr_debug!("~~~test_rros_get_timer_date begin~~~"); unsafe { - let mut x = SpinLock::new(RrosTimer::new(1)); - let pinned = Pin::new_unchecked(&mut x); - spinlock_init!(pinned, "x"); + let mut x = Box::pin_init(new_spinlock!(RrosTimer::new(1),"x")).unwrap(); + // let pinned = Pin::new_unchecked(&mut x); + // spinlock_init!(pinned, "x"); let xx = Arc::try_new(x)?; xx.lock().set_clock(&mut RROS_MONO_CLOCK as *mut RrosClock); @@ -312,9 +312,9 @@ pub fn test_program_timer() -> Result { pr_debug!("~~~test_program_timer begin~~~"); unsafe { let tmb = rros_percpu_timers(&RROS_MONO_CLOCK, 0); - let mut x = SpinLock::new(RrosTimer::new(1)); - let pinned = Pin::new_unchecked(&mut x); - spinlock_init!(pinned, "x"); + let mut x = Box::pin_init(new_spinlock!(RrosTimer::new(1),"x")).unwrap(); + // let pinned = Pin::new_unchecked(&mut x); + // spinlock_init!(pinned, "x"); let xx = Arc::try_new(x)?; @@ -339,9 +339,9 @@ pub fn test_rros_start_timer() -> Result { pr_debug!("~~~test_rros_start_timer begin~~~"); unsafe { let tmb = rros_percpu_timers(&RROS_MONO_CLOCK, 0); - let mut x = SpinLock::new(RrosTimer::new(17)); - let pinned = Pin::new_unchecked(&mut x); - spinlock_init!(pinned, "x"); + let mut x = Box::pin_init(new_spinlock!(RrosTimer::new(17),"x")).unwrap(); + // let pinned = Pin::new_unchecked(&mut x); + // spinlock_init!(pinned, "x"); let xx = Arc::try_new(x)?; @@ -370,9 +370,9 @@ pub fn test_stop_timer_locked() -> Result { pr_debug!("~~~test_stop_timer_locked begin~~~"); unsafe { let tmb = rros_percpu_timers(&RROS_MONO_CLOCK, 0); - let mut x = SpinLock::new(RrosTimer::new(17)); - let pinned = Pin::new_unchecked(&mut x); - spinlock_init!(pinned, "x"); + let mut x = Box::pin_init(new_spinlock!(RrosTimer::new(17),"x")).unwrap(); + // let pinned = Pin::new_unchecked(&mut x); + // spinlock_init!(pinned, "x"); let xx = Arc::try_new(x)?; @@ -396,9 +396,9 @@ pub fn test_rros_destroy_timer() -> Result { pr_debug!("~~~test_rros_destroy_timer begin~~~"); unsafe { let tmb = rros_percpu_timers(&RROS_MONO_CLOCK, 0); - let mut x = SpinLock::new(RrosTimer::new(17)); - let pinned = Pin::new_unchecked(&mut x); - spinlock_init!(pinned, "x"); + let mut x = Box::pin_init(new_spinlock!(RrosTimer::new(17),"x")).unwrap(); + // let pinned = Pin::new_unchecked(&mut x); + // spinlock_init!(pinned, "x"); let xx = Arc::try_new(x)?; @@ -434,15 +434,15 @@ pub fn handler(_timer: &RrosTimer) { pub fn test_get_handler() -> Result { pr_debug!("~~~test_get_handler begin~~~"); unsafe { - let mut x = SpinLock::new(RrosTimer::new(17)); - let pinned = Pin::new_unchecked(&mut x); - spinlock_init!(pinned, "x"); + let mut x = Box::pin_init(new_spinlock!(RrosTimer::new(17),"x")).unwrap(); + // let pinned = Pin::new_unchecked(&mut x); + // spinlock_init!(pinned, "x"); let mut _xx = Arc::try_new(x)?; //xx.lock().set_handler(Some(handler)); //let handler = xx.lock().get_handler(); - //handler(xx.lock().deref()); + //handler(xx.locked_data().get()); } pr_debug!("~~~test_get_handler end~~~"); Ok(0) diff --git a/kernel/rros/tp.rs b/kernel/rros/tp.rs index 9bdd53b27ca94..9e66731a04b39 100644 --- a/kernel/rros/tp.rs +++ b/kernel/rros/tp.rs @@ -7,10 +7,11 @@ use kernel::{ ktime::{ktime_to_timespec64, timespec64_to_ktime, Timespec64}, memory_rros::*, prelude::*, - spinlock_init, + new_spinlock, sync::{Lock, SpinLock}, types::Atomic, }; +use core::ops::Deref; pub static mut RROS_SCHED_TP: RrosSchedClass = RrosSchedClass { sched_init: Some(tp_init), @@ -73,11 +74,11 @@ pub struct RrosSchedTp { pub partitions: Option<[RrosTpRq; CONFIG_RROS_SCHED_TP_NR_PART as usize]>, pub idle: RrosTpRq, pub tps: *mut RrosTpRq, - pub tf_timer: Option>>, + pub tf_timer: Option>>>>, pub gps: *mut RrosTpSchedule, pub wnext: i32, pub tf_start: KtimeT, - pub threads: Option>>>, + pub threads: Option>>>>>, } impl RrosSchedTp { pub fn new() -> Result { @@ -160,13 +161,14 @@ pub fn tp_init(rq: *mut rros_rq) -> Result { let mut temp: [RrosTpRq; CONFIG_RROS_SCHED_TP_NR_PART as usize] = [r1, r2, r3, r4, r5]; for n in 0..CONFIG_RROS_SCHED_TP_NR_PART { // temp[n as usize].runnable.head = Some(List::new(Arc::try_new(SpinLock::new(RrosThread::new()?))?)); - let mut tmp = Arc::>::try_new_uninit()?; + let mut tmp = Arc::>>>::try_new_uninit()?; + let mut tmp_spinlock = Box::pin_init(new_spinlock!(RrosThread::new().unwrap(),"tp kthread")).unwrap(); let mut tmp = { - core::ptr::write_bytes(Arc::get_mut_unchecked(&mut tmp), 0, 1); + Arc::get_mut(&mut tmp).unwrap().write(tmp_spinlock); tmp.assume_init() }; - let pinned = Pin::new_unchecked(Arc::get_mut_unchecked(&mut tmp)); - spinlock_init!(pinned, "tp kthread"); + // let pinned = Pin::new_unchecked(Arc::get_mut_unchecked(&mut tmp)); + // spinlock_init!(pinned, "tp kthread"); // let mut thread = SpinLock::new(RrosThread::new()?); // let pinned = Pin::new_unchecked(&mut thread); @@ -190,11 +192,11 @@ pub fn tp_init(rq: *mut rros_rq) -> Result { tp.partitions = Some(temp); tp.tps = 0 as *mut RrosTpRq; tp.gps = 0 as *mut RrosTpSchedule; - tp.tf_timer = Some(Arc::try_new(SpinLock::new(RrosTimer::new(0)))?); + // tp.tf_timer = Some(Arc::try_new(SpinLock::new(RrosTimer::new(0)))?); - let mut tf_timer = SpinLock::new(RrosTimer::new(2)); - let pinned_p = Pin::new_unchecked(&mut tf_timer); - spinlock_init!(pinned_p, "ptimer"); + let mut tf_timer = Box::pin_init(new_spinlock!(RrosTimer::new(2),"ptimer")).unwrap(); + // let pinned_p = Pin::new_unchecked(&mut tf_timer); + // spinlock_init!(pinned_p, "ptimer"); tp.tf_timer = Some(Arc::try_new(tf_timer)?); rros_init_timer_on_rq( @@ -212,8 +214,8 @@ pub fn tp_init(rq: *mut rros_rq) -> Result { } pub fn tp_setparam( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) -> Result { unsafe { let thread_clone = thread.clone(); @@ -230,8 +232,8 @@ pub fn tp_setparam( } pub fn tp_getparam( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) { let thread = thread.unwrap(); let p = p.unwrap(); @@ -248,8 +250,8 @@ pub fn tp_getparam( } pub fn tp_trackprio( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) { let thread = thread.unwrap(); unsafe { @@ -266,7 +268,7 @@ pub fn tp_trackprio( } } -pub fn tp_ceilprio(thread: Arc>, mut prio: i32) { +pub fn tp_ceilprio(thread: Arc>>>, mut prio: i32) { if prio > RROS_TP_MAX_PRIO { prio = RROS_TP_MAX_PRIO; } @@ -275,8 +277,8 @@ pub fn tp_ceilprio(thread: Arc>, mut prio: i32) { } pub fn tp_chkparam( - thread: Option>>, - p: Option>>, + thread: Option>>>>, + p: Option>>>>, ) -> Result { unsafe { let thread = thread.unwrap(); @@ -308,17 +310,17 @@ pub fn tp_chkparam( } pub fn tp_declare( - thread: Option>>, - _p: Option>>, + thread: Option>>>>, + _p: Option>>>>, ) -> Result { let thread = thread.unwrap(); // let p = p.unwrap(); unsafe { let rq = (*thread.locked_data().get()).rq.unwrap(); (*thread.locked_data().get()).tp_link = - Some(Node::new(Arc::try_new(SpinLock::new(RrosThread::new()?))?)); + Some(Node::new(Arc::try_new(Box::pin_init(new_spinlock!(RrosThread::new()?)).unwrap())?)); let tp_link = (*thread.locked_data().get()).tp_link.clone(); - (*rq).tp.threads = Some(List::new(Arc::try_new(SpinLock::new(RrosThread::new()?))?)); + (*rq).tp.threads = Some(List::new(Arc::try_new(Box::pin_init(new_spinlock!(RrosThread::new()?)).unwrap())?)); if (*rq).tp.threads.clone().as_mut().unwrap().is_empty() { pr_debug!("tp.threads is empty!"); } @@ -334,7 +336,7 @@ pub fn tp_declare( Ok(0) } -pub fn tp_forget(thread: Arc>) -> Result { +pub fn tp_forget(thread: Arc>>>) -> Result { unsafe { (*thread.locked_data().get()) .tp_link @@ -347,7 +349,7 @@ pub fn tp_forget(thread: Arc>) -> Result { Ok(0) } -pub fn tp_enqueue(thread: Arc>) -> Result { +pub fn tp_enqueue(thread: Arc>>>) -> Result { unsafe { let head = (*((*thread.locked_data().get()).tps)) .runnable @@ -355,7 +357,7 @@ pub fn tp_enqueue(thread: Arc>) -> Result { .as_mut() .unwrap(); if head.is_empty() { - let node = Node::new(Arc::try_new(SpinLock::new(RrosThread::new()?))?); + let node = Node::new(Arc::try_new(Box::pin_init(new_spinlock!(RrosThread::new()?)).unwrap())?); let box_node = Box::try_new(node).unwrap(); let ptr = Box::into_raw(box_node); (*thread.locked_data().get()).rq_next = Some(NonNull::new(ptr).unwrap()); @@ -370,8 +372,8 @@ pub fn tp_enqueue(thread: Arc>) -> Result { .unwrap() .value .clone() - .locked_data() - .get()) + .lock() + .deref()) .cprio; if thread_cprio <= cprio_in_list { flag = 0; @@ -392,7 +394,7 @@ pub fn tp_enqueue(thread: Arc>) -> Result { } } -pub fn tp_dequeue(thread: Arc>) { +pub fn tp_dequeue(thread: Arc>>>) { unsafe { (*thread.locked_data().get()) .rq_next @@ -403,7 +405,7 @@ pub fn tp_dequeue(thread: Arc>) { } } -pub fn tp_requeue(thread: Arc>) { +pub fn tp_requeue(thread: Arc>>>) { unsafe { let head = (*((*thread.locked_data().get()).tps)) .runnable @@ -422,8 +424,8 @@ pub fn tp_requeue(thread: Arc>) { .unwrap() .value .clone() - .locked_data() - .get()) + .lock() + .deref()) .cprio; if thread_cprio < cprio_in_list { flag = 0; @@ -443,11 +445,11 @@ pub fn tp_requeue(thread: Arc>) { } } -pub fn tp_pick(rq: Option<*mut rros_rq>) -> Result>> { +pub fn tp_pick(rq: Option<*mut rros_rq>) -> Result>>>> { let rq = rq.unwrap(); unsafe { let timer = Arc::into_raw((*rq).tp.tf_timer.as_mut().unwrap().clone()) - as *mut SpinLock as *mut RrosTimer; + as *mut Pin>> as *mut RrosTimer; if rros_timer_is_running(timer) == false { return Err(kernel::Error::EINVAL); } @@ -467,14 +469,14 @@ pub fn tp_pick(rq: Option<*mut rros_rq>) -> Result>> { } } -pub fn tp_migrate(thread: Arc>, _rq: *mut rros_rq) -> Result { +pub fn tp_migrate(thread: Arc>>>, _rq: *mut rros_rq) -> Result { let mut param = RrosSchedParam::new(); unsafe { param.fifo.prio = (*thread.locked_data().get()).cprio; rros_set_thread_schedparam_locked( thread.clone(), Some(&RROS_SCHED_FIFO), - Some(Arc::try_new(SpinLock::new(param))?), + Some(Arc::try_new(Box::pin_init(new_spinlock!(param)).unwrap())?), )?; } Ok(0) @@ -551,7 +553,7 @@ pub fn set_tp_schedule(rq: *mut rros_rq, gps: *mut RrosTpSchedule) -> Result<*mu rros_set_thread_schedparam_locked( thread.clone(), Some(&RROS_SCHED_FIFO), - Some(Arc::try_new(SpinLock::new(param))?), + Some(Arc::try_new(Box::pin_init(new_spinlock!(param)).unwrap())?), )?; } old_gps = tp.gps; diff --git a/kernel/rros/wait.rs b/kernel/rros/wait.rs index 95ab469505e00..e26de08dafe75 100644 --- a/kernel/rros/wait.rs +++ b/kernel/rros/wait.rs @@ -11,7 +11,7 @@ use crate::{ use alloc::sync::Arc; -use core::{clone::Clone, ops::FnMut, ptr::NonNull, sync::atomic::AtomicBool}; +use core::{clone::Clone, ops::FnMut, ptr::NonNull, sync::atomic::AtomicBool,ops::Deref}; use kernel::{ bindings, @@ -28,12 +28,12 @@ pub struct RrosWaitChannel { // pub name: &'static CStr, pub wait_list: List>, pub reorder_wait: Option< - fn(waiter: Arc>, originator: Arc>) -> Result, + fn(waiter: Arc>>>, originator: Arc>>>) -> Result, >, pub follow_depend: Option< fn( - wchan: Arc>, - originator: Arc>, + wchan: Arc>>>, + originator: Arc>>>, ) -> Result, >, } @@ -100,7 +100,7 @@ impl RrosWaitQueue { &mut self, waiter: *mut RrosThread, reason: i32, - ) -> Option>> { + ) -> Option>>>> { // trace_rros_wake_up(wq); // assert!(self.lock) //TODO: if self.wchan.wait_list.is_empty() { @@ -181,7 +181,7 @@ impl RrosWaitQueue { pub fn wait_schedule(&mut self) -> i32 { // rros_wait_schedule - let _curr: *mut SpinLock = rros_current(); + let _curr: *mut Pin>> = rros_current(); unsafe { rros_schedule() }; @@ -261,7 +261,7 @@ impl RrosWaitQueue { !self.wchan.wait_list.is_empty() } - pub fn wake_up_head(&mut self) -> Option>> { + pub fn wake_up_head(&mut self) -> Option>>>> { self.wake_up(core::ptr::null_mut(), 0) } diff --git a/kernel/rros/xbuf.rs b/kernel/rros/xbuf.rs index 2b6f022211d6a..d6b456b1f055c 100644 --- a/kernel/rros/xbuf.rs +++ b/kernel/rros/xbuf.rs @@ -31,8 +31,11 @@ use kernel::{ user_ptr::{UserSlicePtrReader, UserSlicePtrWriter}, vmalloc::c_kzalloc, waitqueue, + new_spinlock, }; +use core::cell::OnceCell; + #[derive(Default)] pub struct XbufOps; @@ -204,7 +207,7 @@ pub struct XbufInbound { pub o_event: RrosFlag, pub irq_work: IrqWork, pub ring: XbufRing, - pub lock: SpinLock, + pub lock: Pin>>, } impl XbufInbound { @@ -214,7 +217,7 @@ impl XbufInbound { o_event: RrosFlag::new(), irq_work: IrqWork::new(), ring: XbufRing::new()?, - lock: unsafe { SpinLock::new(0) }, + lock: unsafe { Box::pin_init(new_spinlock!(0)).unwrap() }, }) } } @@ -926,7 +929,7 @@ pub fn rros_write_xbuf(xbuf: &mut RrosXbuf, buf: *const i8, count: usize, f_flag } fn xbuf_factory_build( - fac: &'static mut SpinLock, + fac: &'static mut Pin>>, uname: &'static CStr, u_attrs: Option<*mut u8>, clone_flags: i32, @@ -1016,30 +1019,37 @@ fn xbuf_factory_build( } } -pub static mut RROS_XBUF_FACTORY: SpinLock = unsafe { - SpinLock::new(RrosFactory { - name: CStr::from_bytes_with_nul_unchecked("xbuf\0".as_bytes()), - // fops: Some(RustFileXbuf), - nrdev: CONFIG_RROS_NR_XBUFS, - build: Some(xbuf_factory_build), - dispose: Some(xbuf_factory_dispose), - attrs: None, //sysfs::attribute_group::new(), - flags: RrosFactoryType::CLONE, - inside: Some(RrosFactoryInside { - type_: DeviceType::new(), - class: None, - cdev: None, - device: None, - sub_rdev: None, - kuid: None, - kgid: None, - minor_map: None, - index: None, - name_hash: None, - hash_lock: None, - register: None, - }), - }) -}; +pub static mut RROS_XBUF_FACTORY: OnceCell>>> = OnceCell::new(); + +pub fn rros_xbuf_factory_init() { + unsafe { + RROS_XBUF_FACTORY.get_or_init(|| { + Box::pin_init(new_spinlock!(RrosFactory { + name: CStr::from_bytes_with_nul_unchecked("xbuf\0".as_bytes()), + // fops: Some(RustFileXbuf), + nrdev: CONFIG_RROS_NR_XBUFS, + build: Some(xbuf_factory_build), + dispose: Some(xbuf_factory_dispose), + attrs: None, // sysfs::attribute_group::new(), + flags: RrosFactoryType::CLONE, + inside: Some(RrosFactoryInside { + type_: DeviceType::new(), + class: None, + cdev: None, + device: None, + sub_rdev: None, + kuid: None, + kgid: None, + minor_map: None, + index: None, + name_hash: None, + hash_lock: None, + register: None, + }), + })) + .unwrap() + }); + } +} pub fn xbuf_factory_dispose(_ele: RrosElement) {} diff --git a/rust/helpers.c b/rust/helpers.c index 25a77c7476a82..f9dd5e6836bdf 100644 --- a/rust/helpers.c +++ b/rust/helpers.c @@ -551,13 +551,6 @@ refcount_t rust_helper_REFCOUNT_INIT(int n) } EXPORT_SYMBOL_GPL(rust_helper_REFCOUNT_INIT); -struct class *rust_helper_class_create(const char *name) -{ - struct class *res = class_create(name); - return res; -} -EXPORT_SYMBOL_GPL(rust_helper_class_create); - const char *rust_helper_dev_name(const struct device *dev) { return dev_name(dev); diff --git a/rust/kernel/class.rs b/rust/kernel/class.rs index 0f4b4fd3f0683..f0ab90200b6f4 100644 --- a/rust/kernel/class.rs +++ b/rust/kernel/class.rs @@ -9,11 +9,6 @@ use core::u32; use crate::{bindings, c_types, device, error::Error, Result}; extern "C" { - #[allow(improper_ctypes)] - fn rust_helper_class_create( - this_module: &'static crate::ThisModule, - buf: *const c_types::c_char, - ) -> *mut bindings::class; #[allow(dead_code)] #[allow(improper_ctypes)] fn rust_helper_dev_name(dev: *const bindings::device) -> *const c_types::c_char; @@ -43,7 +38,7 @@ impl Class { this_module: &'static crate::ThisModule, name: *const c_types::c_char, ) -> Result { - let ptr = class_create(this_module, name); + let ptr = unsafe { bindings::class_create(name) }; if ptr.is_null() { return Err(Error::EBADF); } @@ -63,11 +58,3 @@ impl Class { self.0 } } - -/// The `class_create` function is a helper function that creates a new device class. It takes a reference to the current module and a name, and returns a raw pointer to the created class./// The `DevT` struct is a wrapper around the `bindings::dev_t` struct from the kernel bindings. It represents a device type. -fn class_create( - this_module: &'static crate::ThisModule, - name: *const c_types::c_char, -) -> *mut bindings::class { - unsafe { rust_helper_class_create(this_module, name) } -} diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index a9d33424667ea..eea6c459366a0 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -142,6 +142,8 @@ pub mod sysfs; #[cfg(CONFIG_RROS)] pub mod tick; #[cfg(CONFIG_RROS)] +pub mod time_types; +#[cfg(CONFIG_RROS)] pub mod timekeeping; #[cfg(CONFIG_RROS)] pub mod uidgid; diff --git a/rust/kernel/memory_rros.rs b/rust/kernel/memory_rros.rs index 5df054c0170b3..4a42efb29b869 100644 --- a/rust/kernel/memory_rros.rs +++ b/rust/kernel/memory_rros.rs @@ -3,7 +3,7 @@ //! Rros Memory. use crate::timekeeping::*; use crate::{bindings, Result}; -use crate::{c_types, mm, prelude::*, premmpt, spinlock_init, sync::SpinLock, vmalloc}; +use crate::{c_types, mm, prelude::*, premmpt, new_spinlock, sync::SpinLock, vmalloc}; use core::{mem::size_of, ptr::addr_of_mut}; const PAGE_SIZE: u32 = 4096 as u32; @@ -175,7 +175,7 @@ pub struct RrosHeap { pub buckets: [u32; RROS_HEAP_MAX_BUCKETS as usize], /// `lock` is an optional SpinLock used for ensuring thread-safety in the `RrosHeap`. /// It is initialized in the `init` method of the `RrosHeap` struct. - pub lock: Option>, + pub lock: Option>>>, } /// Implementation of the `RrosHeap` struct. @@ -192,9 +192,7 @@ impl RrosHeap { return Err(crate::Error::EINVAL); } - let mut spinlock = unsafe { SpinLock::new(1) }; - let pinned = unsafe { Pin::new_unchecked(&mut spinlock) }; - spinlock_init!(pinned, "spinlock"); + let mut spinlock = Box::pin_init(new_spinlock!(1 as i32,"spinlock",)).unwrap(); self.lock = Some(spinlock); for i in self.buckets.iter_mut() { diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs index dde4e5a7a5135..05f21e27e4dcd 100644 --- a/rust/kernel/sync/lock.rs +++ b/rust/kernel/sync/lock.rs @@ -73,11 +73,33 @@ pub unsafe trait Backend { } } +#[cfg(CONFIG_RROS)] +extern "C" { + #[allow(improper_ctypes)] + fn rust_helper_spin_lock_init( + lock: *mut bindings::spinlock_t, + name: *const c_types::c_char, + key: *mut bindings::lock_class_key, + ); + #[allow(dead_code)] + fn rust_helper_spin_lock(lock: *mut bindings::spinlock); + #[allow(dead_code)] + fn rust_helper_spin_unlock(lock: *mut bindings::spinlock); + fn rust_helper_hard_spin_lock(lock: *mut bindings::raw_spinlock); + fn rust_helper_hard_spin_unlock(lock: *mut bindings::raw_spinlock); + fn rust_helper_raw_spin_lock_irqsave(lock: *mut bindings::hard_spinlock_t) -> u64; + fn rust_helper_raw_spin_unlock_irqrestore(lock: *mut bindings::hard_spinlock_t, flags: u64); + fn rust_helper_raw_spin_lock_init(lock: *mut bindings::raw_spinlock_t); + fn rust_helper_raw_spin_lock(lock: *mut bindings::hard_spinlock_t); + fn rust_helper_raw_spin_unlock(lock: *mut bindings::hard_spinlock_t); + fn rust_helper_raw_spin_lock_nested(lock: *mut bindings::hard_spinlock_t, depth: u32); +} + /// A mutual exclusion primitive. /// /// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock /// [`Backend`] specified as the generic parameter `B`. -#[cfg(not(CONFIG_RROS))] +#[cfg(not(CONFIG_RROS_SPINLOCK))] #[pin_data] pub struct Lock { /// The kernel lock object. @@ -95,15 +117,15 @@ pub struct Lock { } // SAFETY: `Lock` can be transferred across thread boundaries iff the data it protects can. -#[cfg(not(CONFIG_RROS))] +#[cfg(not(CONFIG_RROS_SPINLOCK))] unsafe impl Send for Lock {} // SAFETY: `Lock` serialises the interior mutability it provides, so it is `Sync` as long as the // data it protects is `Send`. -#[cfg(not(CONFIG_RROS))] +#[cfg(not(CONFIG_RROS_SPINLOCK))] unsafe impl Sync for Lock {} -#[cfg(not(CONFIG_RROS))] +#[cfg(not(CONFIG_RROS_SPINLOCK))] impl Lock { /// Constructs a new lock initialiser. #[allow(clippy::new_ret_no_self)] @@ -120,16 +142,19 @@ impl Lock { } } -#[cfg(not(CONFIG_RROS))] + + +#[cfg(not(CONFIG_RROS_SPINLOCK))] impl Lock { /// Acquires the lock and gives the caller access to the data protected by it. pub fn lock(&self) -> Guard<'_, T, B> { // SAFETY: The constructor of the type calls `init`, so the existence of the object proves // that `init` was called. - let state = unsafe { B::lock(self.state.get()) }; + let state = unsafe { B::q(self.state.get()) }; // SAFETY: The lock was just acquired. unsafe { Guard::new(self, state) } } + } /// A lock guard. @@ -137,7 +162,7 @@ impl Lock { /// Allows mutual exclusion primitives that implement the [`Backend`] trait to automatically unlock /// when a guard goes out of scope. It also provides a safe and convenient way to access the data /// protected by the lock. -#[cfg(not(CONFIG_RROS))] +#[cfg(not(CONFIG_RROS_SPINLOCK))] #[must_use = "the lock unlocks immediately when the guard is unused"] pub struct Guard<'a, T: ?Sized, B: Backend> { pub(crate) lock: &'a Lock, @@ -146,10 +171,10 @@ pub struct Guard<'a, T: ?Sized, B: Backend> { } // SAFETY: `Guard` is sync when the data protected by the lock is also sync. -#[cfg(not(CONFIG_RROS))] +#[cfg(not(CONFIG_RROS_SPINLOCK))] unsafe impl Sync for Guard<'_, T, B> {} -#[cfg(not(CONFIG_RROS))] +#[cfg(not(CONFIG_RROS_SPINLOCK))] impl Guard<'_, T, B> { pub(crate) fn do_unlocked(&mut self, cb: impl FnOnce()) { // SAFETY: The caller owns the lock, so it is safe to unlock it. @@ -163,7 +188,7 @@ impl Guard<'_, T, B> { } } -#[cfg(not(CONFIG_RROS))] +#[cfg(not(CONFIG_RROS_SPINLOCK))] impl core::ops::Deref for Guard<'_, T, B> { type Target = T; @@ -173,7 +198,7 @@ impl core::ops::Deref for Guard<'_, T, B> { } } -#[cfg(not(CONFIG_RROS))] +#[cfg(not(CONFIG_RROS_SPINLOCK))] impl core::ops::DerefMut for Guard<'_, T, B> { fn deref_mut(&mut self) -> &mut Self::Target { // SAFETY: The caller owns the lock, so it is safe to deref the protected data. @@ -181,7 +206,7 @@ impl core::ops::DerefMut for Guard<'_, T, B> { } } -#[cfg(not(CONFIG_RROS))] +#[cfg(not(CONFIG_RROS_SPINLOCK))] impl Drop for Guard<'_, T, B> { fn drop(&mut self) { // SAFETY: The caller owns the lock, so it is safe to unlock it. @@ -189,7 +214,7 @@ impl Drop for Guard<'_, T, B> { } } -#[cfg(not(CONFIG_RROS))] +#[cfg(not(CONFIG_RROS_SPINLOCK))] impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> { /// Constructs a new immutable lock guard. /// @@ -247,7 +272,7 @@ pub trait NeedsLockClass { } /// Reschedules the caller's task if needed. -#[cfg(CONFIG_RROS)] +#[cfg(CONFIG_RROS_SPINLOCK)] pub fn cond_resched() -> bool { // SAFETY: No arguments, reschedules `current` if needed. unsafe { rust_helper_cond_resched() != 0 } diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs index bf45ad9ee1275..a58eea11c785a 100644 --- a/rust/kernel/sync/lock/spinlock.rs +++ b/rust/kernel/sync/lock/spinlock.rs @@ -23,7 +23,7 @@ use crate::str::CStr; /// /// It uses the name if one is given, otherwise it generates one based on the file name and line /// number. -#[cfg(not(CONFIG_RROS))] +#[cfg(not(CONFIG_RROS_SPINLOCK))] #[macro_export] macro_rules! new_spinlock { ($inner:expr $(, $name:literal)? $(,)?) => { @@ -97,7 +97,7 @@ macro_rules! new_spinlock { /// ``` /// /// [`spinlock_t`]: ../../../../include/linux/spinlock.h -#[cfg(not(CONFIG_RROS))] +#[cfg(not(CONFIG_RROS_SPINLOCK))] pub type SpinLock = super::Lock; /// A kernel `spinlock_t` lock backend. @@ -176,7 +176,7 @@ macro_rules! spinlock_init { /// handlers (in which case it is ok for interrupts to be enabled). /// /// [`spinlock_t`]: ../../../include/linux/spinlock.h -#[cfg(CONFIG_RROS)] +#[cfg(CONFIG_RROS_SPINLOCK)] pub struct SpinLock { spin_lock: Opaque, @@ -188,15 +188,15 @@ pub struct SpinLock { } // SAFETY: `SpinLock` can be transferred across thread boundaries iff the data it protects can. -#[cfg(CONFIG_RROS)] +#[cfg(CONFIG_RROS_SPINLOCK)] unsafe impl Send for SpinLock {} // SAFETY: `SpinLock` serialises the interior mutability it provides, so it is `Sync` as long as the // data it protects is `Send`. -#[cfg(CONFIG_RROS)] +#[cfg(CONFIG_RROS_SPINLOCK)] unsafe impl Sync for SpinLock {} -#[cfg(CONFIG_RROS)] +#[cfg(CONFIG_RROS_SPINLOCK)] impl SpinLock { /// Constructs a new spinlock. /// @@ -212,30 +212,17 @@ impl SpinLock { } } -#[cfg(CONFIG_RROS)] +#[cfg(not (CONFIG_RROS_SPINLOCK))] impl SpinLock { - /// Locks the spinlock and gives the caller access to the data protected by it. Only one thread - /// at a time is allowed to access the protected data. - pub fn lock(&self) -> Guard<'_, Self> { - self.lock_noguard(); - // SAFETY: The spinlock was just acquired. - unsafe { Guard::new(self) } - } - - /// The `irq_lock` method is similar to `lock`, but it also disables interrupts before acquiring the lock. This can be used to prevent race conditions between interrupt handlers and normal code. - pub fn irq_lock(&self) -> Guard<'_, Self> { - self.lock_noguard(); - - // SAFETY: The spinlock was just acquired. - unsafe { Guard::new(self) } - } - + /// The `irq_lock_noguard` method acquires the lock and disables interrupts, but does not return a `Guard`. Instead, it returns a `u64` that represents the previous interrupt state. This method is unsafe because it does not provide any guarantees about the lifetime of the lock. // FIXME: use this to enable the smp function pub fn irq_lock_noguard(&self) -> u64 { // SAFETY: The caller guarantees that self is initialised. So the pointer is valid. unsafe { - rust_helper_raw_spin_lock_irqsave(self.spin_lock.get() as *mut bindings::hard_spinlock_t) + + rust_helper_raw_spin_lock_irqsave(self.state.get() as *mut bindings::hard_spinlock_t) + } } @@ -245,7 +232,7 @@ impl SpinLock { // SAFETY: The caller guarantees that self is initialised. So the pointer is valid. unsafe { rust_helper_raw_spin_unlock_irqrestore( - self.spin_lock.get() as *mut bindings::hard_spinlock_t, + self.state.get() as *mut bindings::hard_spinlock_t, flags, ); } @@ -254,15 +241,15 @@ impl SpinLock { /// The `raw_spin_lock` method acquires the lock. pub fn raw_spin_lock(&self) { // SAFETY: The caller guarantees that self is initialised. So the pointer is valid. - unsafe { rust_helper_raw_spin_lock(self.spin_lock.get() as *mut bindings::hard_spinlock_t) } + unsafe { rust_helper_raw_spin_lock(self.state.get() as *mut bindings::hard_spinlock_t) } } - /// The `raw_spin_lock_nested` method acquires the lock nestly. + /// The `raw_spin_lock_nested` method acquires the lock nestly. pub fn raw_spin_lock_nested(&self, depth: u32) { // SAFETY: The caller guarantees that self is initialised. So the pointer is valid. unsafe { rust_helper_raw_spin_lock_nested( - self.spin_lock.get() as *mut bindings::hard_spinlock_t, + self.state.get() as *mut bindings::hard_spinlock_t, depth, ) } @@ -272,12 +259,12 @@ impl SpinLock { pub fn raw_spin_unlock(&self) { // SAFETY: The caller guarantees that self is initialised. So the pointer is valid. unsafe { - rust_helper_raw_spin_unlock(self.spin_lock.get() as *mut bindings::hard_spinlock_t) + rust_helper_raw_spin_unlock(self.state.get() as *mut bindings::hard_spinlock_t) } } } -#[cfg(CONFIG_RROS)] +#[cfg(CONFIG_RROS_SPINLOCK)] impl NeedsLockClass for SpinLock { unsafe fn init(self: Pin<&mut Self>, name: &'static CStr, key: *mut bindings::lock_class_key) { // SAFETY: The caller guarantees that `name` and `key` are initialised. So the pointers are valid. @@ -285,14 +272,14 @@ impl NeedsLockClass for SpinLock { } } -#[cfg(CONFIG_RROS)] +#[cfg(not (CONFIG_RROS_SPINLOCK))] impl Lock for SpinLock { type Inner = T; fn lock_noguard(&self) { // SAFETY: `spin_lock` points to valid memory. // unsafe { rust_helper_spin_lock(self.spin_lock.get()) }; - unsafe { rust_helper_hard_spin_lock(self.spin_lock.get() as *mut bindings::raw_spinlock) }; + unsafe { rust_helper_hard_spin_lock(self.state.get() as *mut bindings::raw_spinlock) }; // unsafe { rust_helper_hard_spin_lock((*self.spin_lock.get()).rlock() // as *mut bindings::raw_spinlock) }; } @@ -301,7 +288,7 @@ impl Lock for SpinLock { // SAFETY: `spin_lock` points to valid memory. // unsafe { rust_helper_spin_unlock(self.spin_lock.get()) }; unsafe { - rust_helper_hard_spin_unlock(self.spin_lock.get() as *mut bindings::raw_spinlock) + rust_helper_hard_spin_unlock(self.state.get() as *mut bindings::raw_spinlock) }; // unsafe { rust_helper_hard_spin_unlock((*self.spin_lock.get()).rlock() // as *mut bindings::raw_spinlock) }; diff --git a/rust/uapi/time_types.rs b/rust/kernel/time_types.rs similarity index 100% rename from rust/uapi/time_types.rs rename to rust/kernel/time_types.rs diff --git a/rust/uapi/lib.rs b/rust/uapi/lib.rs index b474cca0fc861..0caad902ba40a 100644 --- a/rust/uapi/lib.rs +++ b/rust/uapi/lib.rs @@ -23,9 +23,4 @@ unsafe_op_in_unsafe_fn )] -extern crate bindings; - include!(concat!(env!("OBJTREE"), "/rust/uapi/uapi_generated.rs")); - -#[cfg(CONFIG_RROS)] -pub mod time_types;