diff --git a/io-uring-bench/src/nop.rs b/io-uring-bench/src/nop.rs index 8eebaf31..198f1622 100644 --- a/io-uring-bench/src/nop.rs +++ b/io-uring-bench/src/nop.rs @@ -41,5 +41,66 @@ fn bench_normal(c: &mut Criterion) { }); } -criterion_group!(squeue, bench_normal); +fn bench_prepare(c: &mut Criterion) { + let mut io_uring = IoUring::new(16).unwrap(); + + c.bench_function("prepare", |b| { + b.iter(|| { + let mut queue = TaskQueue(128); + + while queue.want() { + { + let mut sq = io_uring.submission(); + while queue.want() { + unsafe { + match sq.push_command(&black_box(opcode::Nop::new()), None) { + Ok(_) => queue.pop(), + Err(_) => break, + } + } + } + } + + io_uring.submit_and_wait(16).unwrap(); + + io_uring.completion().map(black_box).for_each(drop); + } + }); + }); +} + +fn bench_prepare_sqe(c: &mut Criterion) { + let mut io_uring = IoUring::new(16).unwrap(); + + c.bench_function("prepare_sqe", |b| { + b.iter(|| { + let mut queue = TaskQueue(128); + + while queue.want() { + { + let mut sq = io_uring.submission(); + while queue.want() { + unsafe { + match sq.get_available_sqe(0) { + Ok(sqe) => { + let nop_sqe: &mut opcode::NopSqe = black_box(sqe.into()); + nop_sqe.prepare(); + sq.move_forward(1); + queue.pop(); + } + Err(_) => break, + }; + } + } + } + + io_uring.submit_and_wait(16).unwrap(); + + io_uring.completion().map(black_box).for_each(drop); + } + }); + }); +} + +criterion_group!(squeue, bench_normal, bench_prepare, bench_prepare_sqe); criterion_main!(squeue); diff --git a/io-uring-test/src/main.rs b/io-uring-test/src/main.rs index adecda05..87bea908 100644 --- a/io-uring-test/src/main.rs +++ b/io-uring-test/src/main.rs @@ -29,6 +29,10 @@ fn main() -> anyhow::Result<()> { tests::queue::test_nop(&mut ring, &test)?; tests::queue::test_queue_split(&mut ring, &test)?; tests::queue::test_debug_print(&mut ring, &test)?; + #[cfg(feature = "unstable")] + tests::queue::test_nop_prepare(&mut ring, &test)?; + #[cfg(feature = "unstable")] + tests::queue::test_nop_prepare_sqe(&mut ring, &test)?; #[cfg(feature = "unstable")] tests::queue::test_batch(&mut ring, &test)?; diff --git a/io-uring-test/src/tests/queue.rs b/io-uring-test/src/tests/queue.rs index c9d2a083..e49e1507 100644 --- a/io-uring-test/src/tests/queue.rs +++ b/io-uring-test/src/tests/queue.rs @@ -1,4 +1,5 @@ use crate::Test; +use io_uring::squeue::SqeCommonOptions; use io_uring::{opcode, IoUring}; pub fn test_nop(ring: &mut IoUring, test: &Test) -> anyhow::Result<()> { @@ -26,6 +27,63 @@ pub fn test_nop(ring: &mut IoUring, test: &Test) -> anyhow::Result<()> { Ok(()) } +#[cfg(feature = "unstable")] +pub fn test_nop_prepare(ring: &mut IoUring, test: &Test) -> anyhow::Result<()> { + require! { + test; + } + + println!("test nop_prepare"); + + let nop = opcode::Nop::new(); + let opt = SqeCommonOptions::default().user_data(0x42); + + unsafe { + let mut queue = ring.submission(); + queue.push_command(&nop, Some(&opt)).expect("queue is full"); + } + + ring.submit_and_wait(1)?; + + let cqes = ring.completion().collect::>(); + + assert_eq!(cqes.len(), 1); + assert_eq!(cqes[0].user_data(), 0x42); + assert_eq!(cqes[0].result(), 0); + + Ok(()) +} + +#[cfg(feature = "unstable")] +pub fn test_nop_prepare_sqe(ring: &mut IoUring, test: &Test) -> anyhow::Result<()> { + require! { + test; + } + + println!("test nop_prepare_sqe"); + + let opt = SqeCommonOptions::default().user_data(0x42); + + unsafe { + let mut queue = ring.submission(); + let sqe = queue.get_available_sqe(0).unwrap(); + let nop_sqe: &mut opcode::NopSqe = sqe.into(); + nop_sqe.prepare(); + opt.set(nop_sqe.get_mut_sqe()); + queue.move_forward(1); + } + + ring.submit_and_wait(1)?; + + let cqes = ring.completion().collect::>(); + + assert_eq!(cqes.len(), 1); + assert_eq!(cqes[0].user_data(), 0x42); + assert_eq!(cqes[0].result(), 0); + + Ok(()) +} + #[cfg(feature = "unstable")] pub fn test_batch(ring: &mut IoUring, test: &Test) -> anyhow::Result<()> { use std::mem::MaybeUninit; diff --git a/src/opcode.rs b/src/opcode.rs index 824fd3ac..b296a456 100644 --- a/src/opcode.rs +++ b/src/opcode.rs @@ -9,14 +9,109 @@ use crate::squeue::Entry; use crate::sys; use crate::types::{self, sealed}; -macro_rules! assign_fd { - ( $sqe:ident . fd = $opfd:expr ) => { - match $opfd { - sealed::Target::Fd(fd) => $sqe.fd = fd, - sealed::Target::Fixed(i) => { - $sqe.fd = i as _; - $sqe.flags |= crate::squeue::Flags::FIXED_FILE.bits(); - } +/// Trait to prepare an SQE from an opcode object. +pub trait PrepareSQE { + /// Prepare an SQE from an opcode object. + fn prepare(&self, sqe: &mut sys::io_uring_sqe); +} + +trait AsSqeValue { + type T; + fn as_sqe_value(&self) -> Self::T; +} + +impl AsSqeValue for sealed::Target { + type T = i32; + + fn as_sqe_value(&self) -> Self::T { + match self { + sealed::Target::Fd(fd) => *fd as _, + sealed::Target::Fixed(i) => *i as _, + } + } +} + +impl AsSqeValue for types::FsyncFlags { + type T = u32; + + fn as_sqe_value(&self) -> Self::T { + self.bits() + } +} + +impl AsSqeValue for types::TimeoutFlags { + type T = u32; + + fn as_sqe_value(&self) -> Self::T { + self.bits() + } +} + +impl AsSqeValue for u64 { + type T = u64; + + fn as_sqe_value(&self) -> Self::T { + *self as Self::T + } +} + +impl AsSqeValue for i64 { + type T = u64; + + fn as_sqe_value(&self) -> Self::T { + *self as Self::T + } +} + +impl AsSqeValue for u32 { + type T = u32; + + fn as_sqe_value(&self) -> Self::T { + *self + } +} + +impl AsSqeValue for i32 { + type T = i32; + + fn as_sqe_value(&self) -> Self::T { + *self + } +} + +impl AsSqeValue for u16 { + type T = u16; + + fn as_sqe_value(&self) -> Self::T { + *self + } +} + +impl AsSqeValue for *const U { + type T = u64; + + fn as_sqe_value(&self) -> Self::T { + *self as _ + } +} + +impl AsSqeValue for *mut U { + type T = u64; + + fn as_sqe_value(&self) -> Self::T { + *self as _ + } +} + +macro_rules! set_fd_flags { + ( $sqe:ident, $opfd:expr ) => { + if matches!($opfd, sealed::Target::Fixed(_)) { + $sqe.flags |= crate::squeue::Flags::FIXED_FILE.bits(); + } + }; + ( $self:ident . $sqe:ident, $opfd:expr ) => { + if matches!($opfd, sealed::Target::Fixed(_)) { + $self.$sqe.flags |= crate::squeue::Flags::FIXED_FILE.bits(); } }; } @@ -36,7 +131,7 @@ macro_rules! opcode { pub struct $name:ident { $( #[$new_meta:meta] )* - $( $field:ident : { $( $tnt:tt )+ } ),* + $( $field:ident : { $( $tnt:tt )+ } => sqe $(. $sqe_field:ident)+ ),* $(,)? @@ -44,16 +139,21 @@ macro_rules! opcode { $( $( #[$opt_meta:meta] )* - $opt_field:ident : $opt_tname:ty = $default:expr + $opt_field:ident : $opt_tname:ty = $default:expr => sqe $(. $opt_sqe_field:ident)+ ),* $(,)? } + $( #[$outer_sqe:meta] )* + pub struct $name_sqe:ident { } + pub const CODE = $opcode:expr; - $( #[$build_meta:meta] )* - pub fn build($self:ident) -> Entry $build_block:block + fn set_special_fields(&mut $self:ident, $sqe1:ident : &mut sys::io_uring_sqe) { + $( op: $set_op_special_fields:block )? + $( op_sqe: $set_op_sqe_special_fields:block )? + } ) => { $( #[$outer] )* pub struct $name { @@ -62,6 +162,11 @@ macro_rules! opcode { } impl $name { + /// The opcode of the operation. This can be passed to + /// [`Probe::is_supported`](crate::Probe::is_supported) to check if this operation is + /// supported with the current kernel. + pub const CODE: u8 = $opcode as _; + $( #[$new_meta] )* #[inline] pub fn new($( $field : $( $tnt )* ),*) -> Self { @@ -71,30 +176,82 @@ macro_rules! opcode { } } + $( + $( #[$opt_meta] )* + #[inline] + pub const fn $opt_field(mut self, $opt_field: $opt_tname) -> Self { + self.$opt_field = $opt_field; + self + } + )* + + #[inline] + pub fn build(self) -> Entry { + let mut sqe = sqe_zeroed(); + self.prepare(&mut sqe); + Entry(sqe) + } + } + + impl PrepareSQE for $name { + #[inline] + fn prepare(& $self, $sqe1: &mut sys::io_uring_sqe) { + $sqe1 .opcode = $name::CODE; + $( $sqe1 $(. $sqe_field)* = $self. $field .as_sqe_value() as _; )* + $( $sqe1 $(. $opt_sqe_field)* = $self. $opt_field .as_sqe_value() as _; )* + $( $set_op_special_fields )? + } + } + + $( #[$outer_sqe] )* + #[repr(transparent)] + pub struct $name_sqe { + sqe: sys::io_uring_sqe, + } + + impl $name_sqe { /// The opcode of the operation. This can be passed to /// [`Probe::is_supported`](crate::Probe::is_supported) to check if this operation is /// supported with the current kernel. pub const CODE: u8 = $opcode as _; + #[inline] + pub fn prepare(&mut $self, $( $field : opcode!(@type $( $tnt )*), )* ) { + $self.sqe.opcode = Self::CODE; + $( $self.sqe $(. $sqe_field)* = $field .as_sqe_value() as _; )* + $( $self.sqe $(. $opt_sqe_field)* = $default .as_sqe_value() as _; )* + $( $set_op_sqe_special_fields )? + } + + #[inline] + pub fn get_mut_sqe(&mut self) -> &mut sys::io_uring_sqe { + &mut self.sqe + } + $( $( #[$opt_meta] )* #[inline] - pub const fn $opt_field(mut self, $opt_field: $opt_tname) -> Self { - self.$opt_field = $opt_field; + pub fn $opt_field(mut self, $opt_field: $opt_tname) -> Self { + self.sqe$(. $opt_sqe_field )* = $opt_field .as_sqe_value() as _; self } )* + } - $( #[$build_meta] )* + impl<'a> From<&'a mut sys::io_uring_sqe> for &'a mut $name_sqe { #[inline] - pub fn build($self) -> Entry $build_block + fn from(sqe: &'a mut sys::io_uring_sqe) -> &'a mut $name_sqe { + unsafe { + mem::transmute(sqe) + } + } } } } /// inline zeroed to improve codegen #[inline(always)] -fn sqe_zeroed() -> sys::io_uring_sqe { +pub(crate) fn sqe_zeroed() -> sys::io_uring_sqe { unsafe { std::mem::zeroed() } } @@ -105,15 +262,16 @@ opcode!( #[derive(Debug)] pub struct Nop { ;; } - pub const CODE = sys::IORING_OP_NOP; + /// Do not perform any I/O. + /// + /// This is useful for testing the performance of the io_uring implementation itself. + pub struct NopSqe { } - pub fn build(self) -> Entry { - let Nop {} = self; + pub const CODE = sys::IORING_OP_NOP; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - sqe.fd = -1; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { sqe.fd = -1; } + op_sqe: { self.sqe.fd = -1; } } ); @@ -121,35 +279,25 @@ opcode!( /// Vectored read, equivalent to `preadv2(2)`. #[derive(Debug)] pub struct Readv { - fd: { impl sealed::UseFixed }, - iovec: { *const libc::iovec }, - len: { u32 }, + fd: { impl sealed::UseFixed } => sqe.fd, + iovec: { *const libc::iovec } => sqe.__bindgen_anon_2.addr, + len: { u32 } => sqe.len, ;; - ioprio: u16 = 0, - offset: libc::off_t = 0, + ioprio: u16 = 0u16 => sqe.ioprio, + offset: libc::off_t = 0i64 => sqe.__bindgen_anon_1.off, /// specified for read operations, contains a bitwise OR of per-I/O flags, /// as described in the `preadv2(2)` man page. - rw_flags: types::RwFlags = 0 + rw_flags: types::RwFlags = 0i32 => sqe.__bindgen_anon_3.rw_flags, } + /// Vectored read, equivalent to `preadv2(2)`. + pub struct ReadvSqe { } + pub const CODE = sys::IORING_OP_READV; - pub fn build(self) -> Entry { - let Readv { - fd, - iovec, len, offset, - ioprio, rw_flags - } = self; - - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = fd); - sqe.ioprio = ioprio; - sqe.__bindgen_anon_2.addr = iovec as _; - sqe.len = len; - sqe.__bindgen_anon_1.off = offset as _; - sqe.__bindgen_anon_3.rw_flags = rw_flags; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { set_fd_flags!(sqe, self.fd); } + op_sqe: { set_fd_flags!(self.sqe, fd); } } ); @@ -157,35 +305,25 @@ opcode!( /// Vectored write, equivalent to `pwritev2(2)`. #[derive(Debug)] pub struct Writev { - fd: { impl sealed::UseFixed }, - iovec: { *const libc::iovec }, - len: { u32 }, + fd: { impl sealed::UseFixed } => sqe.fd, + iovec: { *const libc::iovec } => sqe.__bindgen_anon_2.addr, + len: { u32 } => sqe.len, ;; - ioprio: u16 = 0, - offset: libc::off_t = 0, + ioprio: u16 = 0u16 => sqe.ioprio, + offset: libc::off_t = 0i64 => sqe.__bindgen_anon_1.off, /// specified for write operations, contains a bitwise OR of per-I/O flags, /// as described in the `preadv2(2)` man page. - rw_flags: types::RwFlags = 0 + rw_flags: types::RwFlags = 0i32 => sqe.__bindgen_anon_3.rw_flags, } + /// Vectored write, equivalent to `pwritev2(2)`. + pub struct WritevSqe { } + pub const CODE = sys::IORING_OP_WRITEV; - pub fn build(self) -> Entry { - let Writev { - fd, - iovec, len, offset, - ioprio, rw_flags - } = self; - - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = fd); - sqe.ioprio = ioprio; - sqe.__bindgen_anon_2.addr = iovec as _; - sqe.len = len; - sqe.__bindgen_anon_1.off = offset as _; - sqe.__bindgen_anon_3.rw_flags = rw_flags; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { set_fd_flags!(sqe, self.fd); } + op_sqe: { set_fd_flags!(self.sqe, fd); } } ); @@ -200,24 +338,29 @@ opcode!( /// the fsync. #[derive(Debug)] pub struct Fsync { - fd: { impl sealed::UseFixed }, + fd: { impl sealed::UseFixed } => sqe.fd, ;; /// The `flags` bit mask may contain either 0, for a normal file integrity sync, /// or [types::FsyncFlags::DATASYNC] to provide data sync only semantics. /// See the descriptions of `O_SYNC` and `O_DSYNC` in the `open(2)` manual page for more information. - flags: types::FsyncFlags = types::FsyncFlags::empty() + flags: types::FsyncFlags = types::FsyncFlags::empty() => sqe.__bindgen_anon_3.fsync_flags, } - pub const CODE = sys::IORING_OP_FSYNC; + /// File sync, equivalent to `fsync(2)`. + /// + /// Note that, while I/O is initiated in the order in which it appears in the submission queue, + /// completions are unordered. For example, an application which places a write I/O followed by + /// an fsync in the submission queue cannot expect the fsync to apply to the write. The two + /// operations execute in parallel, so the fsync may complete before the write is issued to the + /// storage. The same is also true for previously issued writes that have not completed prior to + /// the fsync. + pub struct FsyncSqe { } - pub fn build(self) -> Entry { - let Fsync { fd, flags } = self; + pub const CODE = sys::IORING_OP_FSYNC; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = fd); - sqe.__bindgen_anon_3.fsync_flags = flags.bits(); - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { set_fd_flags!(sqe, self.fd); } + op_sqe: { set_fd_flags!(self.sqe, fd); } } ); @@ -230,38 +373,29 @@ opcode!( pub struct ReadFixed { /// The `buf_index` is an index into an array of fixed buffers, /// and is only valid if fixed buffers were registered. - fd: { impl sealed::UseFixed }, - buf: { *mut u8 }, - len: { u32 }, - buf_index: { u16 }, + fd: { impl sealed::UseFixed } => sqe.fd, + buf: { *mut u8 } => sqe.__bindgen_anon_2.addr, + len: { u32 } => sqe.len, + buf_index: { u16 } => sqe.__bindgen_anon_4.buf_index, ;; - offset: libc::off_t = 0, - ioprio: u16 = 0, + offset: libc::off_t = 0i64 => sqe.__bindgen_anon_1.off, + ioprio: u16 = 0u16 => sqe.ioprio, /// specified for read operations, contains a bitwise OR of per-I/O flags, /// as described in the `preadv2(2)` man page. - rw_flags: types::RwFlags = 0 + rw_flags: types::RwFlags = 0i32 => sqe.__bindgen_anon_3.rw_flags, } + /// Read from pre-mapped buffers that have been previously registered with + /// [`Submitter::register_buffers`](crate::Submitter::register_buffers). + /// + /// The return values match those documented in the `preadv2(2)` man pages. + pub struct ReadFixedSqe { } + pub const CODE = sys::IORING_OP_READ_FIXED; - pub fn build(self) -> Entry { - let ReadFixed { - fd, - buf, len, offset, - buf_index, - ioprio, rw_flags - } = self; - - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = fd); - sqe.ioprio = ioprio; - sqe.__bindgen_anon_2.addr = buf as _; - sqe.len = len; - sqe.__bindgen_anon_1.off = offset as _; - sqe.__bindgen_anon_3.rw_flags = rw_flags; - sqe.__bindgen_anon_4.buf_index = buf_index; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { set_fd_flags!(sqe, self.fd); } + op_sqe: { set_fd_flags!(self.sqe, fd); } } ); @@ -274,38 +408,29 @@ opcode!( pub struct WriteFixed { /// The `buf_index` is an index into an array of fixed buffers, /// and is only valid if fixed buffers were registered. - fd: { impl sealed::UseFixed }, - buf: { *const u8 }, - len: { u32 }, - buf_index: { u16 }, + fd: { impl sealed::UseFixed } => sqe.fd, + buf: { *const u8 } => sqe.__bindgen_anon_2.addr, + len: { u32 } => sqe.len, + buf_index: { u16 } => sqe.__bindgen_anon_4.buf_index, ;; - ioprio: u16 = 0, - offset: libc::off_t = 0, + ioprio: u16 = 0u16 => sqe.ioprio, + offset: libc::off_t = 0i64 => sqe.__bindgen_anon_1.off, /// specified for write operations, contains a bitwise OR of per-I/O flags, /// as described in the `preadv2(2)` man page. - rw_flags: types::RwFlags = 0 + rw_flags: types::RwFlags = 0i32 => sqe.__bindgen_anon_3.rw_flags, } + /// Write to pre-mapped buffers that have been previously registered with + /// [`Submitter::register_buffers`](crate::Submitter::register_buffers). + /// + /// The return values match those documented in the `pwritev2(2)` man pages. + pub struct WriteFixedSqe { } + pub const CODE = sys::IORING_OP_WRITE_FIXED; - pub fn build(self) -> Entry { - let WriteFixed { - fd, - buf, len, offset, - buf_index, - ioprio, rw_flags - } = self; - - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = fd); - sqe.ioprio = ioprio; - sqe.__bindgen_anon_2.addr = buf as _; - sqe.len = len; - sqe.__bindgen_anon_1.off = offset as _; - sqe.__bindgen_anon_3.rw_flags = rw_flags; - sqe.__bindgen_anon_4.buf_index = buf_index; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { set_fd_flags!(sqe, self.fd); } + op_sqe: { set_fd_flags!(self.sqe, fd); } } ); @@ -318,32 +443,38 @@ opcode!( pub struct PollAdd { /// The bits that may be set in `flags` are defined in ``, /// and documented in `poll(2)`. - fd: { impl sealed::UseFixed }, - flags: { u32 } + fd: { impl sealed::UseFixed } => sqe.fd, + flags: { u32 } => sqe.__bindgen_anon_3.poll32_events ;; } - pub const CODE = sys::IORING_OP_POLL_ADD; - - pub fn build(self) -> Entry { - let PollAdd { fd, flags } = self; + /// Poll the specified fd. + /// + /// Unlike poll or epoll without `EPOLLONESHOT`, this interface always works in one shot mode. + /// That is, once the poll operation is completed, it will have to be resubmitted. + pub struct PollAddSqe { } - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = fd); + pub const CODE = sys::IORING_OP_POLL_ADD; - #[cfg(target_endian = "little")] { - sqe.__bindgen_anon_3.poll32_events = flags; + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { + set_fd_flags!(sqe, self.fd); + #[cfg(target_endian = "big")] { + let x = self.flags << 16; + let y = self.flags >> 16; + let flags = x | y; + sqe.__bindgen_anon_3.poll32_events = flags; + } } - - #[cfg(target_endian = "big")] { - let x = flags << 16; - let y = flags >> 16; - let flags = x | y; - sqe.__bindgen_anon_3.poll32_events = flags; + op_sqe: { + set_fd_flags!(self.sqe, fd); + #[cfg(target_endian = "big")] { + let x = flags << 16; + let y = flags >> 16; + let flags = x | y; + self.sqe.__bindgen_anon_3.poll32_events = flags; + } } - - Entry(sqe) } ); @@ -354,20 +485,21 @@ opcode!( /// If not found, `result` will return `-libc::ENOENT`. #[derive(Debug)] pub struct PollRemove { - user_data: { u64 } + user_data: { u64 } => sqe.__bindgen_anon_2.addr, ;; } - pub const CODE = sys::IORING_OP_POLL_REMOVE; + /// Remove an existing [poll](PollAdd) request. + /// + /// If found, the `result` method of the `cqueue::Entry` will return 0. + /// If not found, `result` will return `-libc::ENOENT`. + pub struct PollRemoveSqe { } - pub fn build(self) -> Entry { - let PollRemove { user_data } = self; + pub const CODE = sys::IORING_OP_POLL_REMOVE; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - sqe.fd = -1; - sqe.__bindgen_anon_2.addr = user_data as _; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { sqe.fd = -1; } + op_sqe: { self.sqe.fd = -1; } } ); @@ -375,31 +507,23 @@ opcode!( /// Sync a file segment with disk, equivalent to `sync_file_range(2)`. #[derive(Debug)] pub struct SyncFileRange { - fd: { impl sealed::UseFixed }, - len: { u32 }, + fd: { impl sealed::UseFixed } => sqe.fd, + len: { u32 } => sqe.len, ;; /// the offset method holds the offset in bytes - offset: libc::off64_t = 0, + offset: libc::off64_t = 0i64 => sqe.__bindgen_anon_1.off, /// the flags method holds the flags for the command - flags: u32 = 0 + flags: u32 = 0u32 => sqe.__bindgen_anon_3.sync_range_flags, } + /// Sync a file segment with disk, equivalent to `sync_file_range(2)`. + pub struct SyncFileRangeSqe { } + pub const CODE = sys::IORING_OP_SYNC_FILE_RANGE; - pub fn build(self) -> Entry { - let SyncFileRange { - fd, - len, offset, - flags - } = self; - - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = fd); - sqe.len = len as _; - sqe.__bindgen_anon_1.off = offset as _; - sqe.__bindgen_anon_3.sync_range_flags = flags; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { set_fd_flags!(sqe, self.fd); } + op_sqe: { set_fd_flags!(self.sqe, fd); } } ); @@ -410,26 +534,30 @@ opcode!( /// structure, and flags holds the flags associated with the system call. #[derive(Debug)] pub struct SendMsg { - fd: { impl sealed::UseFixed }, - msg: { *const libc::msghdr }, + fd: { impl sealed::UseFixed } => sqe.fd, + msg: { *const libc::msghdr } => sqe.__bindgen_anon_2.addr, ;; - ioprio: u16 = 0, - flags: u32 = 0 + ioprio: u16 = 0u16 => sqe.ioprio, + flags: u32 = 0u32 => sqe.__bindgen_anon_3.msg_flags, } - pub const CODE = sys::IORING_OP_SENDMSG; + /// Send a message on a socket, equivalent to `send(2)`. + /// + /// fd must be set to the socket file descriptor, addr must contains a pointer to the msghdr + /// structure, and flags holds the flags associated with the system call. + pub struct SendMsgSqe { } - pub fn build(self) -> Entry { - let SendMsg { fd, msg, ioprio, flags } = self; + pub const CODE = sys::IORING_OP_SENDMSG; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = fd); - sqe.ioprio = ioprio; - sqe.__bindgen_anon_2.addr = msg as _; - sqe.len = 1; - sqe.__bindgen_anon_3.msg_flags = flags; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { + set_fd_flags!(sqe, self.fd); + sqe.len = 1; + } + op_sqe: { + set_fd_flags!(self.sqe, fd); + self.sqe.len = 1; + } } ); @@ -439,26 +567,29 @@ opcode!( /// See also the description of [`SendMsg`]. #[derive(Debug)] pub struct RecvMsg { - fd: { impl sealed::UseFixed }, - msg: { *mut libc::msghdr }, + fd: { impl sealed::UseFixed } => sqe.fd, + msg: { *mut libc::msghdr } => sqe.__bindgen_anon_2.addr, ;; - ioprio: u16 = 0, - flags: u32 = 0 + ioprio: u16 = 0u16 => sqe.ioprio, + flags: u32 = 0u32 => sqe.__bindgen_anon_3.msg_flags, } - pub const CODE = sys::IORING_OP_RECVMSG; + /// Receive a message on a socket, equivalent to `recvmsg(2)`. + /// + /// See also the description of [`SendMsg`]. + pub struct RecvMsgSqe { } - pub fn build(self) -> Entry { - let RecvMsg { fd, msg, ioprio, flags } = self; + pub const CODE = sys::IORING_OP_RECVMSG; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = fd); - sqe.ioprio = ioprio; - sqe.__bindgen_anon_2.addr = msg as _; - sqe.len = 1; - sqe.__bindgen_anon_3.msg_flags = flags; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { + set_fd_flags!(sqe, self.fd); + sqe.len = 1; + } + op_sqe: { + set_fd_flags!(self.sqe, fd); + self.sqe.len = 1; + } } ); @@ -473,28 +604,37 @@ opcode!( /// If the timeout was cancelled before it expired, the request will complete with `-ECANCELED`. #[derive(Debug)] pub struct Timeout { - timespec: { *const types::Timespec }, + timespec: { *const types::Timespec } => sqe.__bindgen_anon_2.addr, ;; /// `count` may contain a completion event count. - count: u32 = 0, + count: u32 = 0u32 => sqe.__bindgen_anon_1.off, /// `flags` may contain [types::TimeoutFlags::ABS] for an absolute timeout value, or 0 for a relative timeout. - flags: types::TimeoutFlags = types::TimeoutFlags::empty() + flags: types::TimeoutFlags = types::TimeoutFlags::empty() => sqe.__bindgen_anon_3.timeout_flags, } - pub const CODE = sys::IORING_OP_TIMEOUT; + /// Register a timeout operation. + /// + /// A timeout will trigger a wakeup event on the completion ring for anyone waiting for events. + /// A timeout condition is met when either the specified timeout expires, or the specified number of events have completed. + /// Either condition will trigger the event. + /// The request will complete with `-ETIME` if the timeout got completed through expiration of the timer, + /// or 0 if the timeout got completed through requests completing on their own. + /// If the timeout was cancelled before it expired, the request will complete with `-ECANCELED`. + pub struct TimeoutSqe { } - pub fn build(self) -> Entry { - let Timeout { timespec, count, flags } = self; + pub const CODE = sys::IORING_OP_TIMEOUT; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - sqe.fd = -1; - sqe.__bindgen_anon_2.addr = timespec as _; - sqe.len = 1; - sqe.__bindgen_anon_1.off = count as _; - sqe.__bindgen_anon_3.timeout_flags = flags.bits(); - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { + sqe.fd = -1; + sqe.len = 1; + } + op_sqe: { + let sqe = &mut self.sqe; + sqe.fd = -1; + sqe.len = 1; + } } ); @@ -503,69 +643,60 @@ opcode!( opcode!( /// Attempt to remove an existing [timeout operation](Timeout). pub struct TimeoutRemove { - user_data: { u64 }, + user_data: { u64 } => sqe.__bindgen_anon_2.addr, ;; - flags: types::TimeoutFlags = types::TimeoutFlags::empty() + flags: types::TimeoutFlags = types::TimeoutFlags::empty() => sqe.__bindgen_anon_3.timeout_flags, } - pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE; + /// Attempt to remove an existing [timeout operation](Timeout). + pub struct TimeoutRemoveSqe { } - pub fn build(self) -> Entry { - let TimeoutRemove { user_data, flags } = self; + pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - sqe.fd = -1; - sqe.__bindgen_anon_2.addr = user_data as _; - sqe.__bindgen_anon_3.timeout_flags = flags.bits(); - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { sqe.fd = -1; } + op_sqe: { self.sqe.fd = -1; } } ); opcode!( /// Accept a new connection on a socket, equivalent to `accept4(2)`. pub struct Accept { - fd: { impl sealed::UseFixed }, - addr: { *mut libc::sockaddr }, - addrlen: { *mut libc::socklen_t }, + fd: { impl sealed::UseFixed } => sqe.fd, + addr: { *mut libc::sockaddr } => sqe.__bindgen_anon_2.addr, + addrlen: { *mut libc::socklen_t } => sqe.__bindgen_anon_1.addr2, ;; - flags: i32 = 0 + flags: i32 = 0i32 => sqe.__bindgen_anon_3.accept_flags, } - pub const CODE = sys::IORING_OP_ACCEPT; + /// Accept a new connection on a socket, equivalent to `accept4(2)`. + pub struct AcceptSqe { } - pub fn build(self) -> Entry { - let Accept { fd, addr, addrlen, flags } = self; + pub const CODE = sys::IORING_OP_ACCEPT; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = fd); - sqe.__bindgen_anon_2.addr = addr as _; - sqe.__bindgen_anon_1.addr2 = addrlen as _; - sqe.__bindgen_anon_3.accept_flags = flags as _; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { set_fd_flags!(sqe, self.fd); } + op_sqe: { set_fd_flags!(self.sqe, fd); } } ); opcode!( /// Attempt to cancel an already issued request. pub struct AsyncCancel { - user_data: { u64 } + user_data: { u64 } => sqe.__bindgen_anon_2.addr, ;; // TODO flags } - pub const CODE = sys::IORING_OP_ASYNC_CANCEL; + /// Attempt to cancel an already issued request. + pub struct AsyncCancelSqe { } - pub fn build(self) -> Entry { - let AsyncCancel { user_data } = self; + pub const CODE = sys::IORING_OP_ASYNC_CANCEL; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - sqe.fd = -1; - sqe.__bindgen_anon_2.addr = user_data as _; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { sqe.fd = -1; } + op_sqe: { self.sqe.fd = -1; } } ); @@ -574,46 +705,47 @@ opcode!( /// [`Flags::IO_LINK`](crate::squeue::Flags::IO_LINK) which is described below. /// Unlike [`Timeout`], [`LinkTimeout`] acts on the linked request, not the completion queue. pub struct LinkTimeout { - timespec: { *const types::Timespec }, + timespec: { *const types::Timespec } => sqe.__bindgen_anon_2.addr, ;; - flags: types::TimeoutFlags = types::TimeoutFlags::empty() + flags: types::TimeoutFlags = types::TimeoutFlags::empty() => sqe.__bindgen_anon_3.timeout_flags, } - pub const CODE = sys::IORING_OP_LINK_TIMEOUT; + /// This request must be linked with another request through + /// [`Flags::IO_LINK`](crate::squeue::Flags::IO_LINK) which is described below. + /// Unlike [`Timeout`], [`LinkTimeout`] acts on the linked request, not the completion queue. + pub struct LinkTimeoutSqe { } - pub fn build(self) -> Entry { - let LinkTimeout { timespec, flags } = self; + pub const CODE = sys::IORING_OP_LINK_TIMEOUT; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - sqe.fd = -1; - sqe.__bindgen_anon_2.addr = timespec as _; - sqe.len = 1; - sqe.__bindgen_anon_3.timeout_flags = flags.bits(); - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { + sqe.fd = -1; + sqe.len = 1; + } + op_sqe: { + self.sqe.fd = -1; + self.sqe.fd = 1; + } } ); opcode!( /// Connect a socket, equivalent to `connect(2)`. pub struct Connect { - fd: { impl sealed::UseFixed }, - addr: { *const libc::sockaddr }, - addrlen: { libc::socklen_t } + fd: { impl sealed::UseFixed } => sqe.fd, + addr: { *const libc::sockaddr } => sqe.__bindgen_anon_2.addr, + addrlen: { libc::socklen_t } => sqe.__bindgen_anon_1.off, ;; } - pub const CODE = sys::IORING_OP_CONNECT; + /// Connect a socket, equivalent to `connect(2)`. + pub struct ConnectSqe { } - pub fn build(self) -> Entry { - let Connect { fd, addr, addrlen } = self; + pub const CODE = sys::IORING_OP_CONNECT; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = fd); - sqe.__bindgen_anon_2.addr = addr as _; - sqe.__bindgen_anon_1.off = addrlen as _; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { set_fd_flags!(sqe, self.fd); } + op_sqe: { set_fd_flags!(self.sqe, fd); } } ); @@ -622,70 +754,55 @@ opcode!( opcode!( /// Preallocate or deallocate space to a file, equivalent to `fallocate(2)`. pub struct Fallocate { - fd: { impl sealed::UseFixed }, - len: { libc::off_t }, + fd: { impl sealed::UseFixed } => sqe.fd, + len: { libc::off_t } => sqe.__bindgen_anon_2.addr, ;; - offset: libc::off_t = 0, - mode: i32 = 0 + offset: libc::off_t = 0i64 => sqe.__bindgen_anon_1.off, + mode: i32 = 0i32 => sqe.len, } - pub const CODE = sys::IORING_OP_FALLOCATE; + /// Preallocate or deallocate space to a file, equivalent to `fallocate(2)`. + pub struct FallocateSqe { } - pub fn build(self) -> Entry { - let Fallocate { fd, len, offset, mode } = self; + pub const CODE = sys::IORING_OP_FALLOCATE; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = fd); - sqe.__bindgen_anon_2.addr = len as _; - sqe.len = mode as _; - sqe.__bindgen_anon_1.off = offset as _; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { set_fd_flags!(sqe, self.fd); } + op_sqe: { set_fd_flags!(self.sqe, fd); } } ); opcode!( /// Open a file, equivalent to `openat(2)`. pub struct OpenAt { - dirfd: { impl sealed::UseFd }, - pathname: { *const libc::c_char }, + dirfd: { impl sealed::UseFd } => sqe.fd, + pathname: { *const libc::c_char } => sqe.__bindgen_anon_2.addr, ;; - flags: i32 = 0, - mode: libc::mode_t = 0 + flags: i32 = 0i32 => sqe.__bindgen_anon_3.open_flags, + mode: libc::mode_t = 0u32 => sqe.len, } - pub const CODE = sys::IORING_OP_OPENAT; + /// Open a file, equivalent to `openat(2)`. + pub struct OpenAtSqe { } - pub fn build(self) -> Entry { - let OpenAt { dirfd, pathname, flags, mode } = self; + pub const CODE = sys::IORING_OP_OPENAT; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - sqe.fd = dirfd; - sqe.__bindgen_anon_2.addr = pathname as _; - sqe.len = mode; - sqe.__bindgen_anon_3.open_flags = flags as _; - Entry(sqe) - } + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { } ); opcode!( /// Close a file descriptor, equivalent to `close(2)`. pub struct Close { - fd: { impl sealed::UseFd } + fd: { impl sealed::UseFd } => sqe.fd, ;; } - pub const CODE = sys::IORING_OP_CLOSE; + /// Close a file descriptor, equivalent to `close(2)`. + pub struct CloseSqe { } - pub fn build(self) -> Entry { - let Close { fd } = self; + pub const CODE = sys::IORING_OP_CLOSE; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - sqe.fd = fd; - Entry(sqe) - } + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { } ); opcode!( @@ -693,55 +810,42 @@ opcode!( /// [`Submitter::register_files_update`](crate::Submitter::register_files_update) which then /// works in an async fashion, like the rest of the io_uring commands. pub struct FilesUpdate { - fds: { *const RawFd }, - len: { u32 }, + fds: { *const RawFd } => sqe.__bindgen_anon_2.addr, + len: { u32 } => sqe.len, ;; - offset: i32 = 0 + offset: i32 = 0i32 => sqe.__bindgen_anon_1.off, } - pub const CODE = sys::IORING_OP_FILES_UPDATE; + /// This command is an alternative to using + /// [`Submitter::register_files_update`](crate::Submitter::register_files_update) which then + /// works in an async fashion, like the rest of the io_uring commands. + pub struct FilesUpdateSqe { } - pub fn build(self) -> Entry { - let FilesUpdate { fds, len, offset } = self; + pub const CODE = sys::IORING_OP_FILES_UPDATE; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - sqe.fd = -1; - sqe.__bindgen_anon_2.addr = fds as _; - sqe.len = len; - sqe.__bindgen_anon_1.off = offset as _; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { sqe.fd = -1; } + op_sqe: { self.sqe.fd = -1; } } ); opcode!( /// Get file status, equivalent to `statx(2)`. pub struct Statx { - dirfd: { impl sealed::UseFd }, - pathname: { *const libc::c_char }, - statxbuf: { *mut types::statx }, + dirfd: { impl sealed::UseFd } => sqe.fd, + pathname: { *const libc::c_char } => sqe.__bindgen_anon_2.addr, + statxbuf: { *mut types::statx } => sqe.__bindgen_anon_1.off, ;; - flags: i32 = 0, - mask: u32 = 0 + flags: i32 = 0i32 => sqe.__bindgen_anon_3.statx_flags, + mask: u32 = 0u32 => sqe.len, } + /// Get file status, equivalent to `statx(2)`. + pub struct StatxSqe { } + pub const CODE = sys::IORING_OP_STATX; - pub fn build(self) -> Entry { - let Statx { - dirfd, pathname, statxbuf, - flags, mask - } = self; - - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - sqe.fd = dirfd; - sqe.__bindgen_anon_2.addr = pathname as _; - sqe.len = mask; - sqe.__bindgen_anon_1.off = statxbuf as _; - sqe.__bindgen_anon_3.statx_flags = flags as _; - Entry(sqe) - } + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { } ); opcode!( @@ -756,41 +860,38 @@ opcode!( /// /// Available since 5.6. pub struct Read { - fd: { impl sealed::UseFixed }, - buf: { *mut u8 }, - len: { u32 }, + fd: { impl sealed::UseFixed } => sqe.fd, + buf: { *mut u8 } => sqe.__bindgen_anon_2.addr, + len: { u32 } => sqe.len, ;; /// `offset` contains the read or write offset. /// /// If `fd` does not refer to a seekable file, `offset` must be set to zero. /// If `offsett` is set to `-1`, the offset will use (and advance) the file position, /// like the `read(2)` and `write(2)` system calls. - offset: libc::off_t = 0, - ioprio: u16 = 0, - rw_flags: types::RwFlags = 0, - buf_group: u16 = 0 + offset: libc::off_t = 0i64 => sqe.__bindgen_anon_1.off, + ioprio: u16 = 0u16 => sqe.ioprio, + rw_flags: types::RwFlags = 0i32 => sqe.__bindgen_anon_3.rw_flags, + buf_group: u16 = 0u16 => sqe.__bindgen_anon_4.buf_group, } + /// Issue the equivalent of a `pread(2)` or `pwrite(2)` system call + /// + /// * `fd` is the file descriptor to be operated on, + /// * `addr` contains the buffer in question, + /// * `len` contains the length of the IO operation, + /// + /// These are non-vectored versions of the `IORING_OP_READV` and `IORING_OP_WRITEV` opcodes. + /// See also `read(2)` and `write(2)` for the general description of the related system call. + /// + /// Available since 5.6. + pub struct ReadSqe { } + pub const CODE = sys::IORING_OP_READ; - pub fn build(self) -> Entry { - let Read { - fd, - buf, len, offset, - ioprio, rw_flags, - buf_group - } = self; - - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = fd); - sqe.ioprio = ioprio; - sqe.__bindgen_anon_2.addr = buf as _; - sqe.len = len; - sqe.__bindgen_anon_1.off = offset as _; - sqe.__bindgen_anon_3.rw_flags = rw_flags; - sqe.__bindgen_anon_4.buf_group = buf_group; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { set_fd_flags!(sqe, self.fd); } + op_sqe: { set_fd_flags!(self.sqe, fd); } } ); @@ -806,188 +907,162 @@ opcode!( /// /// Available since 5.6. pub struct Write { - fd: { impl sealed::UseFixed }, - buf: { *const u8 }, - len: { u32 }, + fd: { impl sealed::UseFixed } => sqe.fd, + buf: { *const u8 } => sqe.__bindgen_anon_2.addr, + len: { u32 } => sqe.len, ;; /// `offset` contains the read or write offset. /// /// If `fd` does not refer to a seekable file, `offset` must be set to zero. /// If `offsett` is set to `-1`, the offset will use (and advance) the file position, /// like the `read(2)` and `write(2)` system calls. - offset: libc::off_t = 0, - ioprio: u16 = 0, - rw_flags: types::RwFlags = 0 + offset: libc::off_t = 0i64 => sqe.__bindgen_anon_1.off, + ioprio: u16 = 0u16 => sqe.ioprio, + rw_flags: types::RwFlags = 0i32 => sqe.__bindgen_anon_3.rw_flags, } + /// Issue the equivalent of a `pread(2)` or `pwrite(2)` system call + /// + /// * `fd` is the file descriptor to be operated on, + /// * `addr` contains the buffer in question, + /// * `len` contains the length of the IO operation, + /// + /// These are non-vectored versions of the `IORING_OP_READV` and `IORING_OP_WRITEV` opcodes. + /// See also `read(2)` and `write(2)` for the general description of the related system call. + /// + /// Available since 5.6. + pub struct WriteSqe { } + pub const CODE = sys::IORING_OP_WRITE; - pub fn build(self) -> Entry { - let Write { - fd, - buf, len, offset, - ioprio, rw_flags - } = self; - - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = fd); - sqe.ioprio = ioprio; - sqe.__bindgen_anon_2.addr = buf as _; - sqe.len = len; - sqe.__bindgen_anon_1.off = offset as _; - sqe.__bindgen_anon_3.rw_flags = rw_flags; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { set_fd_flags!(sqe, self.fd); } + op_sqe: { set_fd_flags!(self.sqe, fd); } } ); opcode!( /// Predeclare an access pattern for file data, equivalent to `posix_fadvise(2)`. pub struct Fadvise { - fd: { impl sealed::UseFixed }, - len: { libc::off_t }, - advice: { i32 }, + fd: { impl sealed::UseFixed } => sqe.fd, + len: { libc::off_t } => sqe.len, + advice: { i32 } => sqe.__bindgen_anon_3.fadvise_advice, ;; - offset: libc::off_t = 0, + offset: libc::off_t = 0i64 => sqe.__bindgen_anon_1.off, } - pub const CODE = sys::IORING_OP_FADVISE; + /// Predeclare an access pattern for file data, equivalent to `posix_fadvise(2)`. + pub struct FadviseSqe { } - pub fn build(self) -> Entry { - let Fadvise { fd, len, advice, offset } = self; + pub const CODE = sys::IORING_OP_FADVISE; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = fd); - sqe.len = len as _; - sqe.__bindgen_anon_1.off = offset as _; - sqe.__bindgen_anon_3.fadvise_advice = advice as _; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { set_fd_flags!(sqe, self.fd); } + op_sqe: { set_fd_flags!(self.sqe, fd); } } ); opcode!( /// Give advice about use of memory, equivalent to `madvise(2)`. pub struct Madvise { - addr: { *const libc::c_void }, - len: { libc::off_t }, - advice: { i32 }, + addr: { *const libc::c_void } => sqe.__bindgen_anon_2.addr, + len: { libc::off_t } => sqe.len, + advice: { i32 } => sqe.__bindgen_anon_3.fadvise_advice, ;; } - pub const CODE = sys::IORING_OP_MADVISE; + /// Give advice about use of memory, equivalent to `madvise(2)`. + pub struct MadviseSqe { } - pub fn build(self) -> Entry { - let Madvise { addr, len, advice } = self; + pub const CODE = sys::IORING_OP_MADVISE; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - sqe.fd = -1; - sqe.__bindgen_anon_2.addr = addr as _; - sqe.len = len as _; - sqe.__bindgen_anon_3.fadvise_advice = advice as _; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { sqe.fd = -1; } + op_sqe: { self.sqe.fd = -1; } } ); opcode!( /// Send a message on a socket, equivalent to `send(2)`. pub struct Send { - fd: { impl sealed::UseFixed }, - buf: { *const u8 }, - len: { u32 }, + fd: { impl sealed::UseFixed } => sqe.fd, + buf: { *const u8 } => sqe.__bindgen_anon_2.addr, + len: { u32 } => sqe.len, ;; - flags: i32 = 0 + flags: i32 = 0i32 => sqe.__bindgen_anon_3.msg_flags, } - pub const CODE = sys::IORING_OP_SEND; + /// Send a message on a socket, equivalent to `send(2)`. + pub struct SendSqe { } - pub fn build(self) -> Entry { - let Send { fd, buf, len, flags } = self; + pub const CODE = sys::IORING_OP_SEND; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = fd); - sqe.__bindgen_anon_2.addr = buf as _; - sqe.len = len; - sqe.__bindgen_anon_3.msg_flags = flags as _; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { set_fd_flags!(sqe, self.fd); } + op_sqe: { set_fd_flags!(self.sqe, fd); } } ); opcode!( /// Receive a message from a socket, equivalent to `recv(2)`. pub struct Recv { - fd: { impl sealed::UseFixed }, - buf: { *mut u8 }, - len: { u32 }, + fd: { impl sealed::UseFixed } => sqe.fd, + buf: { *mut u8 } => sqe.__bindgen_anon_2.addr, + len: { u32 } => sqe.len, ;; - flags: i32 = 0, - buf_group: u16 = 0 + flags: i32 = 0i32 => sqe.__bindgen_anon_3.msg_flags, + buf_group: u16 = 0u16 => sqe.__bindgen_anon_4.buf_group, } - pub const CODE = sys::IORING_OP_RECV; + /// Receive a message from a socket, equivalent to `recv(2)`. + pub struct RecvSqe { } - pub fn build(self) -> Entry { - let Recv { fd, buf, len, flags, buf_group } = self; + pub const CODE = sys::IORING_OP_RECV; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = fd); - sqe.__bindgen_anon_2.addr = buf as _; - sqe.len = len; - sqe.__bindgen_anon_3.msg_flags = flags as _; - sqe.__bindgen_anon_4.buf_group = buf_group; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { set_fd_flags!(sqe, self.fd); } + op_sqe: { set_fd_flags!(self.sqe, fd); } } ); opcode!( /// Open a file, equivalent to `openat2(2)`. pub struct OpenAt2 { - dirfd: { impl sealed::UseFd }, - pathname: { *const libc::c_char }, - how: { *const types::OpenHow } + dirfd: { impl sealed::UseFd } => sqe.fd, + pathname: { *const libc::c_char } => sqe.__bindgen_anon_2.addr, + how: { *const types::OpenHow } => sqe.__bindgen_anon_1.off, ;; } - pub const CODE = sys::IORING_OP_OPENAT2; + /// Open a file, equivalent to `openat2(2)`. + pub struct OpenAt2Sqe { } - pub fn build(self) -> Entry { - let OpenAt2 { dirfd, pathname, how } = self; + pub const CODE = sys::IORING_OP_OPENAT2; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - sqe.fd = dirfd; - sqe.__bindgen_anon_2.addr = pathname as _; - sqe.len = mem::size_of::() as _; - sqe.__bindgen_anon_1.off = how as _; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { sqe.len = mem::size_of::() as _; } + op_sqe: { self.sqe.len = mem::size_of::() as _;} } ); opcode!( /// Modify an epoll file descriptor, equivalent to `epoll_ctl(2)`. pub struct EpollCtl { - epfd: { impl sealed::UseFixed }, - fd: { impl sealed::UseFd }, - op: { i32 }, - ev: { *const types::epoll_event }, + epfd: { impl sealed::UseFixed } => sqe.fd, + fd: { impl sealed::UseFd } => sqe.__bindgen_anon_1.off, + op: { i32 } => sqe.len, + ev: { *const types::epoll_event } => sqe.__bindgen_anon_2.addr, ;; } - pub const CODE = sys::IORING_OP_EPOLL_CTL; + /// Modify an epoll file descriptor, equivalent to `epoll_ctl(2)`. + pub struct EpollCtlSqe { } - pub fn build(self) -> Entry { - let EpollCtl { epfd, fd, op, ev } = self; + pub const CODE = sys::IORING_OP_EPOLL_CTL; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = epfd); - sqe.__bindgen_anon_2.addr = ev as _; - sqe.len = op as _; - sqe.__bindgen_anon_1.off = fd as _; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { set_fd_flags!(sqe, self.epfd); } + op_sqe: { set_fd_flags!(self.sqe, epfd); } } ); @@ -999,38 +1074,39 @@ opcode!( /// if `fd_in` refers to a pipe, `off_in` must be `-1`; /// The description of `off_in` also applied to `off_out`. pub struct Splice { - fd_in: { impl sealed::UseFixed }, - off_in: { i64 }, - fd_out: { impl sealed::UseFixed }, - off_out: { i64 }, - len: { u32 }, + fd_in: { impl sealed::UseFixed } => sqe.__bindgen_anon_5.splice_fd_in, + off_in: { i64 } => sqe.__bindgen_anon_2.splice_off_in, + fd_out: { impl sealed::UseFixed } => sqe.fd, + off_out: { i64 } => sqe.__bindgen_anon_1.off, + len: { u32 } => sqe.len, ;; /// see man `splice(2)` for description of flags. - flags: u32 = 0 + flags: u32 = 0u32 => sqe.__bindgen_anon_3.splice_flags, } - pub const CODE = sys::IORING_OP_SPLICE; - - pub fn build(self) -> Entry { - let Splice { fd_in, off_in, fd_out, off_out, len, mut flags } = self; + /// Splice data to/from a pipe, equivalent to `splice(2)`. + /// + /// if `fd_in` refers to a pipe, `off_in` must be `-1`; + /// The description of `off_in` also applied to `off_out`. + pub struct SpliceSqe { } - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = fd_out); - sqe.len = len; - sqe.__bindgen_anon_1.off = off_out as _; + pub const CODE = sys::IORING_OP_SPLICE; - sqe.__bindgen_anon_5.splice_fd_in = match fd_in { - sealed::Target::Fd(fd) => fd, - sealed::Target::Fixed(i) => { - flags |= sys::SPLICE_F_FD_IN_FIXED; - i as _ + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { + set_fd_flags!(sqe, self.fd_out); + if matches!(self.fd_in, sealed::Target::Fixed(_)) { + sqe.__bindgen_anon_3.splice_flags = self.flags | sys::SPLICE_F_FD_IN_FIXED; } - }; - - sqe.__bindgen_anon_2.splice_off_in = off_in as _; - sqe.__bindgen_anon_3.splice_flags = flags; - Entry(sqe) + } + op_sqe: { + set_fd_flags!(self.sqe, fd_out); + if matches!(fd_in, sealed::Target::Fixed(_)) { + unsafe { + self.sqe.__bindgen_anon_3.splice_flags |= sys::SPLICE_F_FD_IN_FIXED; + } + } + } } ); @@ -1042,28 +1118,24 @@ opcode!( /// /// Requires the `unstable` feature. pub struct ProvideBuffers { - addr: { *mut u8 }, - len: { i32 }, - nbufs: { u16 }, - bgid: { u16 }, - bid: { u16 } + addr: { *mut u8 } => sqe.__bindgen_anon_2.addr, + len: { i32 } => sqe.len, + nbufs: { u16 } => sqe.fd, + bgid: { u16 } => sqe.__bindgen_anon_4.buf_group, + bid: { u16 } => sqe.__bindgen_anon_1.off, ;; } - pub const CODE = sys::IORING_OP_PROVIDE_BUFFERS; + /// Register `nbufs` buffers that each have the length `len` with ids starting from `big` in the + /// group `bgid` that can be used for any request. See + /// [`BUFFER_SELECT`](crate::squeue::Flags::BUFFER_SELECT) for more info. + /// + /// Requires the `unstable` feature. + pub struct ProvideBuffersSqe { } - pub fn build(self) -> Entry { - let ProvideBuffers { addr, len, nbufs, bgid, bid } = self; + pub const CODE = sys::IORING_OP_PROVIDE_BUFFERS; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - sqe.fd = nbufs as _; - sqe.__bindgen_anon_2.addr = addr as _; - sqe.len = len as _; - sqe.__bindgen_anon_1.off = bid as _; - sqe.__bindgen_anon_4.buf_group = bgid; - Entry(sqe) - } + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { } ); #[cfg(feature = "unstable")] @@ -1073,22 +1145,20 @@ opcode!( /// /// Requires the `unstable` feature. pub struct RemoveBuffers { - nbufs: { u16 }, - bgid: { u16 } + nbufs: { u16 } => sqe.fd, + bgid: { u16 } => sqe.__bindgen_anon_4.buf_group, ;; } - pub const CODE = sys::IORING_OP_REMOVE_BUFFERS; + /// Remove some number of buffers from a buffer group. See + /// [`BUFFER_SELECT`](crate::squeue::Flags::BUFFER_SELECT) for more info. + /// + /// Requires the `unstable` feature. + pub struct RemoveBuffersSqe { } - pub fn build(self) -> Entry { - let RemoveBuffers { nbufs, bgid } = self; + pub const CODE = sys::IORING_OP_REMOVE_BUFFERS; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - sqe.fd = nbufs as _; - sqe.__bindgen_anon_4.buf_group = bgid; - Entry(sqe) - } + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { } ); // === 5.8 === @@ -1099,35 +1169,35 @@ opcode!( /// /// Requires the `unstable` feature. pub struct Tee { - fd_in: { impl sealed::UseFixed }, - fd_out: { impl sealed::UseFixed }, - len: { u32 } + fd_in: { impl sealed::UseFixed } => sqe.__bindgen_anon_5.splice_fd_in, + fd_out: { impl sealed::UseFixed } => sqe.fd, + len: { u32 } => sqe.len, ;; - flags: u32 = 0 + flags: u32 = 0 => sqe.__bindgen_anon_3.splice_flags, } - pub const CODE = sys::IORING_OP_TEE; - - pub fn build(self) -> Entry { - let Tee { fd_in, fd_out, len, mut flags } = self; - - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; + /// Duplicate pipe content, equivalent to `tee(2)`. + /// + /// Requires the `unstable` feature. + pub struct TeeSqe { } - assign_fd!(sqe.fd = fd_out); - sqe.len = len; + pub const CODE = sys::IORING_OP_TEE; - sqe.__bindgen_anon_5.splice_fd_in = match fd_in { - sealed::Target::Fd(fd) => fd, - sealed::Target::Fixed(i) => { - flags |= sys::SPLICE_F_FD_IN_FIXED; - i as _ + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { + set_fd_flags!(sqe, self.fd_out); + if matches!(self.fd_in, sealed::Target::Fixed(_)) { + sqe.__bindgen_anon_3.splice_flags = self.flags | sys::SPLICE_F_FD_IN_FIXED; } - }; - - sqe.__bindgen_anon_3.splice_flags = flags; - - Entry(sqe) + } + op_sqe: { + set_fd_flags!(self.sqe, fd_out); + if matches!(fd_in, sealed::Target::Fixed(_)) { + unsafe { + self.sqe.__bindgen_anon_3.splice_flags |= sys::SPLICE_F_FD_IN_FIXED; + } + } + } } ); @@ -1136,76 +1206,53 @@ opcode!( #[cfg(feature = "unstable")] opcode!( pub struct Shutdown { - fd: { impl sealed::UseFixed }, - how: { i32 }, + fd: { impl sealed::UseFixed } => sqe.fd, + how: { i32 } => sqe.len, ;; } - pub const CODE = sys::IORING_OP_SHUTDOWN; + pub struct ShutdownSqe { } - pub fn build(self) -> Entry { - let Shutdown { fd, how } = self; + pub const CODE = sys::IORING_OP_SHUTDOWN; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - assign_fd!(sqe.fd = fd); - sqe.len = how as _; - Entry(sqe) + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { + op: { set_fd_flags!(sqe, self.fd); } + op_sqe: { set_fd_flags!(self.sqe, fd); } } ); #[cfg(feature = "unstable")] opcode!( pub struct RenameAt { - olddirfd: { impl sealed::UseFd }, - oldpath: { *const libc::c_char }, - newdirfd: { impl sealed::UseFd }, - newpath: { *const libc::c_char }, + olddirfd: { impl sealed::UseFd } => sqe.fd, + oldpath: { *const libc::c_char } => sqe.__bindgen_anon_2.addr, + newdirfd: { impl sealed::UseFd } => sqe.len, + newpath: { *const libc::c_char } => sqe.__bindgen_anon_1.off, ;; - flags: u32 = 0 + flags: u32 = 0u32 => sqe.__bindgen_anon_3.rename_flags, } + pub struct RenameAtSqe { } + pub const CODE = sys::IORING_OP_RENAMEAT; - pub fn build(self) -> Entry { - let RenameAt { - olddirfd, oldpath, - newdirfd, newpath, - flags - } = self; - - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - sqe.fd = olddirfd; - sqe.__bindgen_anon_2.addr = oldpath as _; - sqe.len = newdirfd as _; - sqe.__bindgen_anon_1.off = newpath as _; - sqe.__bindgen_anon_3.rename_flags = flags; - Entry(sqe) - } + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { } ); #[cfg(feature = "unstable")] opcode!( pub struct UnlinkAt { - dirfd: { impl sealed::UseFd }, - pathname: { *const libc::c_char }, + dirfd: { impl sealed::UseFd } => sqe.fd, + pathname: { *const libc::c_char } => sqe.__bindgen_anon_2.addr, ;; - flags: i32 = 0 + flags: i32 = 0i32 => sqe.__bindgen_anon_3.unlink_flags, } - pub const CODE = sys::IORING_OP_UNLINKAT; + pub struct UnlinkAtSqe { } - pub fn build(self) -> Entry { - let UnlinkAt { dirfd, pathname, flags } = self; + pub const CODE = sys::IORING_OP_UNLINKAT; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - sqe.fd = dirfd; - sqe.__bindgen_anon_2.addr = pathname as _; - sqe.__bindgen_anon_3.unlink_flags = flags as _; - Entry(sqe) - } + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { } ); // === 5.15 === @@ -1216,24 +1263,17 @@ opcode!( /// /// Requires the `unstable` feature. pub struct MkDirAt { - dirfd: { impl sealed::UseFd }, - pathname: { *const libc::c_char }, + dirfd: { impl sealed::UseFd } => sqe.fd, + pathname: { *const libc::c_char } => sqe.__bindgen_anon_2.addr, ;; - mode: libc::mode_t = 0 + mode: libc::mode_t = 0u32 => sqe.len, } - pub const CODE = sys::IORING_OP_MKDIRAT; + pub struct MkDirAtSqe { } - pub fn build(self) -> Entry { - let MkDirAt { dirfd, pathname, mode } = self; + pub const CODE = sys::IORING_OP_MKDIRAT; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - sqe.fd = dirfd; - sqe.__bindgen_anon_2.addr = pathname as _; - sqe.len = mode; - Entry(sqe) - } + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { } ); #[cfg(feature = "unstable")] @@ -1242,24 +1282,20 @@ opcode!( /// /// Requires the `unstable` feature. pub struct SymlinkAt { - newdirfd: { impl sealed::UseFd }, - target: { *const libc::c_char }, - linkpath: { *const libc::c_char }, + newdirfd: { impl sealed::UseFd } => sqe.fd, + target: { *const libc::c_char } => sqe.__bindgen_anon_2.addr, + linkpath: { *const libc::c_char } => sqe.__bindgen_anon_1.addr2, ;; } - pub const CODE = sys::IORING_OP_SYMLINKAT; + /// Create a symlink, equivalent to `symlinkat2(2)`. + /// + /// Requires the `unstable` feature. + pub struct SymlinkAtSqe { } - pub fn build(self) -> Entry { - let SymlinkAt { newdirfd, target, linkpath } = self; + pub const CODE = sys::IORING_OP_SYMLINKAT; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - sqe.fd = newdirfd; - sqe.__bindgen_anon_2.addr = target as _; - sqe.__bindgen_anon_1.addr2 = linkpath as _; - Entry(sqe) - } + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { } ); #[cfg(feature = "unstable")] @@ -1268,26 +1304,20 @@ opcode!( /// /// Requires the `unstable` feature. pub struct LinkAt { - olddirfd: { impl sealed::UseFd }, - oldpath: { *const libc::c_char }, - newdirfd: { impl sealed::UseFd }, - newpath: { *const libc::c_char }, + olddirfd: { impl sealed::UseFd } => sqe.fd, + oldpath: { *const libc::c_char } => sqe.__bindgen_anon_2.addr, + newdirfd: { impl sealed::UseFd } => sqe.len, + newpath: { *const libc::c_char } => sqe.__bindgen_anon_1.addr2 , ;; - flags: i32 = 0 + flags: i32 = 0i32 => sqe.__bindgen_anon_3.hardlink_flags, } - pub const CODE = sys::IORING_OP_LINKAT; + /// Create a hard link, equivalent to `linkat2(2)`. + /// + /// Requires the `unstable` feature. + pub struct LinkAtSqe { } - pub fn build(self) -> Entry { - let LinkAt { olddirfd, oldpath, newdirfd, newpath, flags } = self; + pub const CODE = sys::IORING_OP_LINKAT; - let mut sqe = sqe_zeroed(); - sqe.opcode = Self::CODE; - sqe.fd = olddirfd as _; - sqe.__bindgen_anon_2.addr = oldpath as _; - sqe.len = newdirfd as _; - sqe.__bindgen_anon_1.addr2 = newpath as _; - sqe.__bindgen_anon_3.hardlink_flags = flags as _; - Entry(sqe) - } + fn set_special_fields(&mut self, sqe: &mut sys::io_uring_sqe) { } ); diff --git a/src/squeue.rs b/src/squeue.rs index 2afaa241..1eb408c2 100644 --- a/src/squeue.rs +++ b/src/squeue.rs @@ -4,6 +4,9 @@ use std::error::Error; use std::fmt::{self, Debug, Display, Formatter}; use std::sync::atomic; +use crate::opcode::sqe_zeroed; +#[cfg(feature = "unstable")] +use crate::opcode::PrepareSQE; use crate::sys; use crate::util::{unsync_load, Mmap}; @@ -34,6 +37,15 @@ pub struct SubmissionQueue<'a> { #[derive(Clone)] pub struct Entry(pub(crate) sys::io_uring_sqe); +/// Common options for Submission Queue Entry. +#[derive(Clone, Copy, Default, Debug)] +pub struct SqeCommonOptions { + pub user_data: u64, + pub personality: u16, + pub flags_set: u8, + pub flags_clear: u8, +} + bitflags! { /// Submission flags pub struct Flags: u8 { @@ -212,16 +224,10 @@ impl SubmissionQueue<'_> { /// be valid for the entire duration of the operation, otherwise it may cause memory problems. #[inline] pub unsafe fn push(&mut self, Entry(entry): &Entry) -> Result<(), PushError> { - if !self.is_full() { - *self - .queue - .sqes - .add((self.tail & self.queue.ring_mask) as usize) = *entry; - self.tail = self.tail.wrapping_add(1); - Ok(()) - } else { - Err(PushError) - } + let sqe = self.get_available_sqe(0)?; + *sqe = *entry; + self.move_forward(1); + Ok(()) } /// Attempts to push several [entries](Entry) into the queue. @@ -240,15 +246,134 @@ impl SubmissionQueue<'_> { } for Entry(entry) in entries { - *self + let sqe = self.get_available_sqe(0)?; + *sqe = *entry; + self.move_forward(1); + } + + Ok(()) + } +} + +#[cfg(not(feature = "unstable"))] +impl SubmissionQueue<'_> { + /// Try to get a mutable reference to the SQE at `next_available + offset`. + /// + /// # Safety + /// + /// The returned mutable reference has been zeroed-out. Developers must ensure that legal + /// values are set onto the SQE and call `move_forward()` to commit the prepared SQE. + #[inline] + unsafe fn get_available_sqe( + &mut self, + offset: u32, + ) -> Result<&mut sys::io_uring_sqe, PushError> { + if self.capacity() - self.len() <= offset as usize { + Err(PushError) + } else { + let sqe = &mut *self .queue .sqes - .add((self.tail & self.queue.ring_mask) as usize) = *entry; - self.tail = self.tail.wrapping_add(1); + .add((self.tail.wrapping_add(offset) & self.queue.ring_mask) as usize); + *sqe = sqe_zeroed(); + Ok(sqe) + } + } + + /// Move the submission queue forward by `count` steps. + /// + /// # Safety + /// + /// Developers must ensure that `count` is valid and the next `count` SQEs have been correctly + /// initialized. + #[inline] + unsafe fn move_forward(&mut self, count: u32) { + self.tail = self.tail.wrapping_add(count); + } +} + +#[cfg(feature = "unstable")] +impl SubmissionQueue<'_> { + /// Attempts to push an opcode into the submission queue. + /// If the queue is full, an error is returned. + /// + /// # Safety + /// + /// Developers must ensure that parameters of the opcode (such as buffer) are valid and will + /// be valid for the entire duration of the operation, otherwise it may cause memory problems. + #[inline] + pub unsafe fn push_command<'a, T: PrepareSQE>( + &'a mut self, + opcode: &T, + options: Option<&SqeCommonOptions>, + ) -> Result<(), PushError> { + let sqe = self.get_available_sqe(0)?; + opcode.prepare(sqe); + options.map(|v| v.set(sqe)); + self.move_forward(1); + Ok(()) + } + + /// Attempts to push several opcodes into the queue. + /// If the queue does not have space for all of the entries, an error is returned. + /// + /// # Safety + /// + /// Developers must ensure that parameters of all the entries (such as buffer) are valid and + /// will be valid for the entire duration of the operation, otherwise it may cause memory + /// problems. + #[inline] + pub unsafe fn push_commands<'a, T: PrepareSQE>( + &'a mut self, + ops: &[(T, Option<&SqeCommonOptions>)], + ) -> Result<(), PushError> { + if self.capacity() - self.len() < ops.len() { + return Err(PushError); + } + + for (opcode, options) in ops { + let sqe = self.get_available_sqe(0)?; + opcode.prepare(sqe); + options.map(|v| v.set(sqe)); + self.move_forward(1); } Ok(()) } + + /// Try to get a mutable reference to the SQE at `next_available + offset`. + /// + /// # Safety + /// + /// The returned mutable reference has been zeroed-out. Developers must ensure that legal + /// values are set onto the SQE and call `move_forward()` to commit the prepared SQE. + #[inline] + pub unsafe fn get_available_sqe( + &mut self, + offset: u32, + ) -> Result<&mut sys::io_uring_sqe, PushError> { + if self.capacity() - self.len() <= offset as usize { + Err(PushError) + } else { + let sqe = &mut *self + .queue + .sqes + .add((self.tail.wrapping_add(offset) & self.queue.ring_mask) as usize); + *sqe = sqe_zeroed(); + Ok(sqe) + } + } + + /// Move the submission queue forward by `count` steps. + /// + /// # Safety + /// + /// Developers must ensure that `count` is valid and the next `count` SQEs have been correctly + /// initialized. + #[inline] + pub unsafe fn move_forward(&mut self, count: u32) { + self.tail = self.tail.wrapping_add(count); + } } impl Drop for SubmissionQueue<'_> { @@ -285,6 +410,60 @@ impl Entry { } } +impl SqeCommonOptions { + /// Create a new instance of `OptionValues`. + pub fn new(user_data: u64, personality: u16, flags_set: Flags, flags_clear: Flags) -> Self { + SqeCommonOptions { + user_data, + personality, + flags_set: flags_set.bits(), + flags_clear: flags_clear.bits(), + } + } + + /// Set the user data. + /// + /// This is an application-supplied value that will be passed straight through into the + /// [completion queue entry](crate::cqueue::Entry::user_data). + #[inline] + pub fn user_data(mut self, user_data: u64) -> Self { + self.user_data = user_data; + self + } + + /// Set the personality of this event. + /// + /// You can obtain a personality using + /// [`Submitter::register_personality`](crate::Submitter::register_personality). + #[inline] + pub fn personality(mut self, personality: u16) -> Self { + self.personality = personality; + self + } + + /// Mark the flags to set on the submission event's [flags](Flags). + #[inline] + pub fn set_flags(mut self, flags: Flags) -> Self { + self.flags_set |= flags.bits(); + self + } + + /// Mark the flags to cleared on the submission event's [flags](Flags). + #[inline] + pub fn clear_flags(mut self, flags: Flags) -> Self { + self.flags_clear |= flags.bits(); + self + } + + /// Set common options for a submission queue entry. + pub fn set(&self, sqe: &mut sys::io_uring_sqe) { + sqe.personality = self.personality; + sqe.user_data = self.user_data; + sqe.flags |= self.flags_set; + sqe.flags &= !self.flags_clear; + } +} + /// An error pushing to the submission queue due to it being full. #[derive(Debug, Clone, PartialEq, Eq)] #[non_exhaustive]