Skip to content

Commit 6bc4a8f

Browse files
authored
sctp: remove unnecessary drops and use precise padding (#381)
* add comments for QUEUE_BYTES_LIMIT and QUEUE_APPEND_LARGE * remove unnecessary drop(sem_lock) lock will be dropped automatically * use PADDING_MULTIPLE instead of 16 contract: padding_needed <= PADDING_MULTIPLE Refs #364 and #367
1 parent 3be403f commit 6bc4a8f

File tree

3 files changed

+20
-22
lines changed

3 files changed

+20
-22
lines changed

sctp/src/packet.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ impl Packet {
165165
let padding_needed = get_padding_size(writer.len());
166166
if padding_needed != 0 {
167167
// padding needed if < 4 because we pad to 4
168-
writer.extend_from_slice(&[0u8; 16][..padding_needed]);
168+
writer.extend_from_slice(&[0u8; PADDING_MULTIPLE][..padding_needed]);
169169
}
170170
}
171171

sctp/src/queue/pending_queue.rs

Lines changed: 18 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,28 @@
1+
use tokio::sync::{Mutex, Semaphore};
2+
use util::sync::RwLock;
3+
14
use std::{
25
collections::VecDeque,
36
sync::atomic::{AtomicBool, AtomicUsize, Ordering},
47
};
58

6-
use tokio::sync::{Mutex, Semaphore};
7-
use util::sync::RwLock;
8-
99
use crate::chunk::chunk_payload_data::ChunkPayloadData;
1010

11+
// TODO: benchmark performance between multiple Atomic+Mutex vs one Mutex<PendingQueueInternal>
12+
13+
// Some tests push a lot of data before starting to process any data...
14+
#[cfg(test)]
15+
const QUEUE_BYTES_LIMIT: usize = 128 * 1024 * 1024;
16+
/// Maximum size of the pending queue, in bytes.
17+
#[cfg(not(test))]
18+
const QUEUE_BYTES_LIMIT: usize = 128 * 1024;
19+
/// Total user data size, beyound which the packet will be split into chunks. The chunks will be
20+
/// added to the pending queue one by one.
21+
const QUEUE_APPEND_LARGE: usize = (QUEUE_BYTES_LIMIT * 2) / 3;
22+
1123
/// Basic queue for either ordered or unordered chunks.
1224
pub(crate) type PendingBaseQueue = VecDeque<ChunkPayloadData>;
1325

14-
// TODO: benchmark performance between multiple Atomic+Mutex vs one Mutex<PendingQueueInternal>
15-
1626
/// A queue for both ordered and unordered chunks.
1727
#[derive(Debug)]
1828
pub(crate) struct PendingQueue {
@@ -39,14 +49,6 @@ impl Default for PendingQueue {
3949
}
4050
}
4151

42-
// Some tests push a lot of data before starting to process any data...
43-
#[cfg(test)]
44-
const QUEUE_BYTES_LIMIT: usize = 128 * 1024 * 1024;
45-
#[cfg(not(test))]
46-
const QUEUE_BYTES_LIMIT: usize = 128 * 1024;
47-
48-
const QUEUE_APPEND_LARGE: usize = (QUEUE_BYTES_LIMIT * 2) / 3;
49-
5052
impl PendingQueue {
5153
pub(crate) fn new() -> Self {
5254
Self {
@@ -66,7 +68,7 @@ impl PendingQueue {
6668
let user_data_len = c.user_data.len();
6769

6870
{
69-
let sem_lock = self.semaphore_lock.lock().await;
71+
let _sem_lock = self.semaphore_lock.lock().await;
7072
let permits = self.semaphore.acquire_many(user_data_len as u32).await;
7173
// unwrap ok because we never close the semaphore unless we have dropped self
7274
permits.unwrap().forget();
@@ -78,7 +80,6 @@ impl PendingQueue {
7880
let mut ordered_queue = self.ordered_queue.write();
7981
ordered_queue.push_back(c);
8082
}
81-
drop(sem_lock);
8283
}
8384

8485
self.n_bytes.fetch_add(user_data_len, Ordering::SeqCst);
@@ -100,22 +101,21 @@ impl PendingQueue {
100101
if total_user_data_len >= QUEUE_APPEND_LARGE {
101102
self.append_large(chunks).await
102103
} else {
103-
let sem_lock = self.semaphore_lock.lock().await;
104+
let _sem_lock = self.semaphore_lock.lock().await;
104105
let permits = self
105106
.semaphore
106107
.acquire_many(total_user_data_len as u32)
107108
.await;
108109
// unwrap ok because we never close the semaphore unless we have dropped self
109110
permits.unwrap().forget();
110111
self.append_unlimited(chunks, total_user_data_len);
111-
drop(sem_lock);
112112
}
113113
}
114114

115115
// If this is a very large message we append chunks one by one to allow progress while we are appending
116116
async fn append_large(&self, chunks: Vec<ChunkPayloadData>) {
117117
// lock this for the whole duration
118-
let sem_lock = self.semaphore_lock.lock().await;
118+
let _sem_lock = self.semaphore_lock.lock().await;
119119

120120
for chunk in chunks.into_iter() {
121121
let user_data_len = chunk.user_data.len();
@@ -133,8 +133,6 @@ impl PendingQueue {
133133
self.n_bytes.fetch_add(user_data_len, Ordering::SeqCst);
134134
self.queue_len.fetch_add(1, Ordering::SeqCst);
135135
}
136-
137-
drop(sem_lock);
138136
}
139137

140138
/// Assumes that A) enough permits have been acquired and forget from the semaphore and that the semaphore_lock is held

sctp/src/util.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
use bytes::Bytes;
22
use crc::{Crc, CRC_32_ISCSI};
33

4-
const PADDING_MULTIPLE: usize = 4;
4+
pub(crate) const PADDING_MULTIPLE: usize = 4;
55

66
pub(crate) fn get_padding_size(len: usize) -> usize {
77
(PADDING_MULTIPLE - (len % PADDING_MULTIPLE)) % PADDING_MULTIPLE

0 commit comments

Comments
 (0)