diff --git a/Cargo.toml b/Cargo.toml index 16d1342..d6162ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,7 @@ edition = "2018" [dependencies] bitflags = "1.2.0" nix = "0.18.0" -uring-sys = "0.7.4" +uring-sys = { package = "uring-sys2", version = "0.9.1" } libc = "0.2.77" [dev-dependencies] diff --git a/src/completion_queue.rs b/src/completion_queue.rs index 867fb1a..7061ac1 100644 --- a/src/completion_queue.rs +++ b/src/completion_queue.rs @@ -65,8 +65,8 @@ impl<'ring> CompletionQueue<'ring> { self.ring.as_ptr(), cqe.as_mut_ptr(), count as _, - ptr::null(), - ptr::null(), + ptr::null_mut(), + ptr::null_mut(), ))?; Ok(&mut *cqe.assume_init()) diff --git a/src/cqe.rs b/src/cqe.rs index bd97257..b1cef5e 100644 --- a/src/cqe.rs +++ b/src/cqe.rs @@ -159,8 +159,8 @@ impl<'a> CQEsBlocking<'a> { self.ring.as_ptr(), cqe.as_mut_ptr(), self.wait_for as _, - ptr::null(), - ptr::null(), + ptr::null_mut(), + ptr::null_mut(), ))?; Ok(&mut *cqe.assume_init()) diff --git a/src/lib.rs b/src/lib.rs index c9638d5..2dd2985 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -97,29 +97,46 @@ bitflags::bitflags! { /// ``` pub struct SetupFlags: u32 { /// Poll the IO context instead of defaulting to interrupts. - const IOPOLL = 1 << 0; /* io_context is polled */ + const IOPOLL = uring_sys::IORING_SETUP_IOPOLL; /* io_context is polled */ /// Assign a kernel thread to poll the submission queue. Requires elevated privileges to set. - const SQPOLL = 1 << 1; /* SQ poll thread */ + const SQPOLL = uring_sys::IORING_SETUP_SQPOLL; /* SQ poll thread */ /// Force the kernel thread created with `SQPOLL` to be bound to the CPU used by the /// `SubmissionQueue`. Requires `SQPOLL` set. - const SQ_AFF = 1 << 2; /* sq_thread_cpu is valid */ - - const CQSIZE = 1 << 3; - const CLAMP = 1 << 4; - const ATTACH_WQ = 1 << 5; + const SQ_AFF = uring_sys::IORING_SETUP_SQ_AFF; /* sq_thread_cpu is valid */ + + const CQSIZE = uring_sys::IORING_SETUP_CQSIZE; + const CLAMP = uring_sys::IORING_SETUP_CLAMP; + const ATTACH_WQ = uring_sys::IORING_SETUP_ATTACH_WQ; + const R_DISABLED = uring_sys::IORING_SETUP_R_DISABLED; + const SUBMIT_ALL = uring_sys::IORING_SETUP_SUBMIT_ALL; + const COOP_TASKRUN = uring_sys::IORING_SETUP_COOP_TASKRUN; + const TASKRUN_FLAG = uring_sys::IORING_SETUP_TASKRUN_FLAG; + const SQE128 = uring_sys::IORING_SETUP_SQE128; + const CQE32 = uring_sys::IORING_SETUP_CQE32; + const SINGLE_ISSUER = uring_sys::IORING_SETUP_SINGLE_ISSUER; + const DEFER_TASKRUN = uring_sys::IORING_SETUP_DEFER_TASKRUN; + const NO_MMAP = uring_sys::IORING_SETUP_NO_MMAP; + const REGISTERED_FD_ONLY = uring_sys::IORING_SETUP_REGISTERED_FD_ONLY; } } bitflags::bitflags! { /// Advanced features that can be enabled when setting up an [`IoUring`] instance. pub struct SetupFeatures: u32 { - const SINGLE_MMAP = 1 << 0; - const NODROP = 1 << 1; - const SUBMIT_STABLE = 1 << 2; - const RW_CUR_POS = 1 << 3; - const CUR_PERSONALITY = 1 << 4; - const FAST_POLL = 1 << 5; - const POLL_32BITS = 1 << 6; + const SINGLE_MMAP = uring_sys::IORING_FEAT_SINGLE_MMAP; + const NODROP = uring_sys::IORING_FEAT_NODROP; + const SUBMIT_STABLE = uring_sys::IORING_FEAT_SUBMIT_STABLE; + const RW_CUR_POS = uring_sys::IORING_FEAT_RW_CUR_POS; + const CUR_PERSONALITY = uring_sys::IORING_FEAT_CUR_PERSONALITY; + const FAST_POLL = uring_sys::IORING_FEAT_FAST_POLL; + const POLL_32BITS = uring_sys::IORING_FEAT_POLL_32BITS; + const SQPOLL_NONFIXED = uring_sys::IORING_FEAT_SQPOLL_NONFIXED; + const EXT_ARG = uring_sys::IORING_FEAT_EXT_ARG; + const NATIVE_WORKERS = uring_sys::IORING_FEAT_NATIVE_WORKERS; + const RSRC_TAGS = uring_sys::IORING_FEAT_RSRC_TAGS; + const CQE_SKIP = uring_sys::IORING_FEAT_CQE_SKIP; + const LINKED_FILE = uring_sys::IORING_FEAT_LINKED_FILE; + const REG_REG_RING = uring_sys::IORING_FEAT_REG_REG_RING; } } @@ -273,20 +290,20 @@ impl IoUring { /// Block until at least one [`CQE`] is completed. This will consume that CQE. pub fn wait_for_cqe(&mut self) -> io::Result { let ring = NonNull::from(&self.ring); - self.inner_wait_for_cqes(1, ptr::null()).map(|cqe| CQE::new(ring, cqe)) + self.inner_wait_for_cqes(1, ptr::null_mut()).map(|cqe| CQE::new(ring, cqe)) } /// Block until a [`CQE`] is ready or timeout. pub fn wait_for_cqe_with_timeout(&mut self, duration: Duration) -> io::Result { - let ts = uring_sys::__kernel_timespec { + let mut ts = uring_sys::__kernel_timespec { tv_sec: duration.as_secs() as _, tv_nsec: duration.subsec_nanos() as _ }; let ring = NonNull::from(&self.ring); - self.inner_wait_for_cqes(1, &ts).map(|cqe| CQE::new(ring, cqe)) + self.inner_wait_for_cqes(1, &mut ts).map(|cqe| CQE::new(ring, cqe)) } /// Returns an iterator of [`CQE`]s which are ready from the kernel. @@ -305,10 +322,10 @@ impl IoUring { /// Wait until `count` [`CQE`]s are ready, without submitting any events. pub fn wait_for_cqes(&mut self, count: u32) -> io::Result<()> { - self.inner_wait_for_cqes(count as _, ptr::null()).map(|_| ()) + self.inner_wait_for_cqes(count as _, ptr::null_mut()).map(|_| ()) } - fn inner_wait_for_cqes(&mut self, count: u32, ts: *const uring_sys::__kernel_timespec) + fn inner_wait_for_cqes(&mut self, count: u32, ts: *mut uring_sys::__kernel_timespec) -> io::Result<&mut uring_sys::io_uring_cqe> { unsafe { @@ -319,7 +336,7 @@ impl IoUring { cqe.as_mut_ptr(), count, ts, - ptr::null(), + ptr::null_mut(), ))?; Ok(&mut *cqe.assume_init()) diff --git a/src/probe.rs b/src/probe.rs index 319dc16..f259772 100644 --- a/src/probe.rs +++ b/src/probe.rs @@ -22,7 +22,7 @@ impl Probe { } } - pub fn supports(&self, op: uring_sys::IoRingOp) -> bool { + pub fn supports(&self, op: uring_sys::io_uring_op) -> bool { unsafe { uring_sys::io_uring_opcode_supported(self.probe.as_ptr(), op as _) != 0 } } } diff --git a/src/registrar/mod.rs b/src/registrar/mod.rs index 1c31fbb..54c036d 100644 --- a/src/registrar/mod.rs +++ b/src/registrar/mod.rs @@ -291,7 +291,7 @@ mod tests { } #[test] - #[should_panic(expected = "Device or resource busy")] + #[should_panic(expected = "Resource busy")] fn double_register() { let ring = IoUring::new(1).unwrap(); let _ = ring.registrar().register_files(&[1]).unwrap(); diff --git a/src/sqe.rs b/src/sqe.rs index 7b6f70a..0ef2303 100644 --- a/src/sqe.rs +++ b/src/sqe.rs @@ -100,7 +100,7 @@ impl<'a> SQE<'a> { /// Set the [`Personality`] associated with this submission. #[inline] pub fn set_personality(&mut self, personality: Personality) { - self.sqe.buf_index.buf_index.personality = personality.id; + self.sqe.personality = personality.id; } /// Prepare a read on a file descriptor. @@ -244,12 +244,16 @@ impl<'a> SQE<'a> { /// Prepare a recvmsg event on a file descriptor. pub unsafe fn prep_recvmsg(&mut self, fd: impl UringFd, msg: *mut libc::msghdr, flags: MsgFlags) { + // `libc::msghdr` and `uring_sys::msghdr` are identical. + let msg: *mut uring_sys::msghdr = msg.cast(); uring_sys::io_uring_prep_recvmsg(self.sqe, fd.as_raw_fd(), msg, flags.bits() as _); fd.update_sqe(self); } /// Prepare a sendmsg event on a file descriptor. pub unsafe fn prep_sendmsg(&mut self, fd: impl UringFd, msg: *mut libc::msghdr, flags: MsgFlags) { + // `libc::msghdr` and `uring_sys::msghdr` are identical. + let msg = msg.cast_const().cast(); uring_sys::io_uring_prep_sendmsg(self.sqe, fd.as_raw_fd(), msg, flags.bits() as _); fd.update_sqe(self); } @@ -274,7 +278,7 @@ impl<'a> SQE<'a> { path: &CStr, flags: StatxFlags, mask: StatxMode, - buf: &mut libc::statx, + buf: &mut uring_sys::statx, ) { uring_sys::io_uring_prep_statx(self.sqe, dirfd.as_raw_fd(), path.as_ptr() as _, flags.bits() as _, mask.bits() as _, @@ -343,7 +347,13 @@ impl<'a> SQE<'a> { #[inline] pub unsafe fn prep_poll_add(&mut self, fd: impl UringFd, poll_flags: PollFlags) { - uring_sys::io_uring_prep_poll_add(self.sqe, fd.as_raw_fd(), poll_flags.bits()); + // `PollFlags::bits` gives us an `i16`, and we want those exact bits. + // The ugly cast should be the most sane option in this situation. Since + // we don't want it to turn into a foot-gun, we should also make sure we + // feed the type we actually expect into the cast. + let poll_flags: i16 = poll_flags.bits(); + let poll_flags = (poll_flags as u16).into(); + uring_sys::io_uring_prep_poll_add(self.sqe, fd.as_raw_fd(), poll_flags); fd.update_sqe(self); } diff --git a/src/submission_queue.rs b/src/submission_queue.rs index 7a72a72..15631cc 100644 --- a/src/submission_queue.rs +++ b/src/submission_queue.rs @@ -113,7 +113,7 @@ impl<'ring> SubmissionQueue<'ring> { sqe.clear(); unsafe { sqe.prep_timeout(&ts, 0, crate::sqe::TimeoutFlags::empty()); - sqe.set_user_data(uring_sys::LIBURING_UDATA_TIMEOUT); + sqe.set_user_data(libc::__u64::max_value()); return resultify(uring_sys::io_uring_submit_and_wait(self.ring.as_ptr(), wait_for as _)) } } diff --git a/tests/probe.rs b/tests/probe.rs index 51aeb5d..6bb4451 100644 --- a/tests/probe.rs +++ b/tests/probe.rs @@ -1,8 +1,8 @@ use iou::Probe; -use uring_sys::IoRingOp; +use uring_sys::io_uring_op; #[test] fn probe() { let probe = Probe::new().unwrap(); - assert!(probe.supports(IoRingOp::IORING_OP_NOP)); + assert!(probe.supports(io_uring_op::IORING_OP_NOP)); } diff --git a/tests/read.rs b/tests/read.rs index 9542531..e0e7468 100644 --- a/tests/read.rs +++ b/tests/read.rs @@ -91,7 +91,7 @@ fn read_registered_buf() -> io::Result<()> { let mut sqe = sq.prepare_sqe().unwrap(); sqe.prep_read(file.as_raw_fd(), buf.as_mut(), 0); sqe.set_user_data(0xDEADBEEF); - assert!(sqe.raw().opcode == uring_sys::IoRingOp::IORING_OP_READ_FIXED as u8); + assert!(sqe.raw().opcode == uring_sys::io_uring_op::IORING_OP_READ_FIXED as u8); sq.submit()?; } @@ -126,7 +126,7 @@ fn read_registered_fd_and_buf() -> io::Result<()> { let mut sqe = sq.prepare_sqe().unwrap(); sqe.prep_read(fd, buf.as_mut(), 0); sqe.set_user_data(0xDEADBEEF); - assert!(sqe.raw().opcode == uring_sys::IoRingOp::IORING_OP_READ_FIXED as u8); + assert!(sqe.raw().opcode == uring_sys::io_uring_op::IORING_OP_READ_FIXED as u8); assert!(sqe.flags().contains(iou::sqe::SubmissionFlags::FIXED_FILE)); sq.submit()?; } diff --git a/tests/write.rs b/tests/write.rs index 8e093a0..ae0e484 100644 --- a/tests/write.rs +++ b/tests/write.rs @@ -110,7 +110,7 @@ fn write_registered_buf() -> io::Result<()> { let mut sq = io_uring.sq(); let mut sqe = sq.prepare_sqe().unwrap(); sqe.prep_write(file.as_raw_fd(), buf.slice_to(TEXT.len()), 0); - assert!(sqe.raw().opcode == uring_sys::IoRingOp::IORING_OP_WRITE_FIXED as u8); + assert!(sqe.raw().opcode == uring_sys::io_uring_op::IORING_OP_WRITE_FIXED as u8); sqe.set_user_data(0xDEADBEEF); io_uring.sq().submit()?; }