Skip to content

Commit

Permalink
Remove unsafe (#47)
Browse files Browse the repository at this point in the history
* remove unsafe, bump deps, fix demos
  • Loading branch information
Billy Messenger authored Dec 30, 2023
1 parent 75020d7 commit 643f58b
Show file tree
Hide file tree
Showing 25 changed files with 234 additions and 278 deletions.
12 changes: 12 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,17 @@
# Version History

## Version 1.2.0 (2023-12-28)

### Breaking changes:

- The trait methods `Decoder::decode()` and `Encoder::encode()` are no longer marked unsafe. Refer to the new documentation on how these trait methods should be implemented.

### Other changes:

- Removed all unsafe code
- Bumped `rtrb` to version "0.3.0"
- Updated demos to use latest version of `egui`

## Version 1.1.0 (2023-07-11)

- Added the ability to decode MP4/AAC/ALAC files
Expand Down
8 changes: 4 additions & 4 deletions Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "creek"
version = "1.1.1"
version = "1.2.0"
authors = ["Billy Messenger <[email protected]>"]
edition = "2021"
license = "MIT OR Apache-2.0"
Expand Down Expand Up @@ -57,9 +57,9 @@ decode-all = [
encode-wav = ["creek-encode-wav"]

[dependencies]
creek-core = { version = "0.1.1", path = "core" }
creek-decode-symphonia = { version = "0.2.1", path = "decode_symphonia", optional = true }
creek-encode-wav = { version = "0.1.1", path = "encode_wav", optional = true }
creek-core = { version = "0.2.0", path = "core" }
creek-decode-symphonia = { version = "0.3.0", path = "decode_symphonia", optional = true }
creek-encode-wav = { version = "0.2.0", path = "encode_wav", optional = true }

# Unoptimized builds result in prominent gaps of silence after cache misses in the demo player.
[profile.dev]
Expand Down
4 changes: 2 additions & 2 deletions core/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "creek-core"
version = "0.1.1"
version = "0.2.0"
authors = ["Billy Messenger <[email protected]>"]
edition = "2021"
license = "MIT OR Apache-2.0"
Expand All @@ -13,4 +13,4 @@ repository = "https://github.com/RustyDAW/creek"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

[dependencies]
rtrb = "0.2"
rtrb = "0.3.0"
1 change: 1 addition & 0 deletions core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
// TODO: #![warn(clippy::missing_panics_doc)]
#![warn(clippy::clone_on_ref_ptr)]
#![deny(trivial_numeric_casts)]
#![forbid(unsafe_code)]

use std::time;

Expand Down
40 changes: 22 additions & 18 deletions core/src/read/data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,22 +4,27 @@ pub struct DataBlock<T: Copy + Clone + Default + Send> {
}

impl<T: Copy + Clone + Default + Send> DataBlock<T> {
/// # Safety
///
/// Using an allocated but uninitialized [`Vec`] is safe because the block data
/// will be always filled before it is sent to be read by the client.
pub fn new(num_channels: usize, block_size: usize) -> Self {
let mut block: Vec<Vec<T>> = Vec::with_capacity(num_channels);
for _ in 0..num_channels {
let mut data: Vec<T> = Vec::with_capacity(block_size);
#[allow(clippy::uninit_vec)] // TODO
unsafe {
data.set_len(block_size)
};
block.push(data);
DataBlock {
block: (0..num_channels)
.map(|_| Vec::with_capacity(block_size))
.collect(),
}
}

pub fn clear(&mut self) {
for ch in self.block.iter_mut() {
ch.clear();
}
}

DataBlock { block }
pub(crate) fn ensure_correct_size(&mut self, block_size: usize) {
// If the decoder didn't fill enough frames, then fill the rest with zeros.
for ch in self.block.iter_mut() {
if ch.len() < block_size {
ch.resize(block_size, Default::default());
}
}
}
}

Expand All @@ -29,12 +34,11 @@ pub(crate) struct DataBlockCache<T: Copy + Clone + Default + Send> {

impl<T: Copy + Clone + Default + Send> DataBlockCache<T> {
pub(crate) fn new(num_channels: usize, num_prefetch_blocks: usize, block_size: usize) -> Self {
let mut blocks: Vec<DataBlock<T>> = Vec::with_capacity(num_prefetch_blocks);
for _ in 0..num_prefetch_blocks {
blocks.push(DataBlock::new(num_channels, block_size));
Self {
blocks: (0..num_prefetch_blocks)
.map(|_| DataBlock::new(num_channels, block_size))
.collect(),
}

Self { blocks }
}
}

Expand Down
24 changes: 10 additions & 14 deletions core/src/read/decoder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,22 +52,18 @@ pub trait Decoder: Sized + 'static {
/// set the read position the end of the file instead of returning an error.
fn seek(&mut self, frame: usize) -> Result<(), Self::FatalError>;

/// Decode data into the `data_block` starting from the read position. This is streaming,
/// meaning the next call to `decode()` should pick up where the previous left off.
/// Decode data into the `data_block` starting from your current internal read position.
/// This is streaming, meaning the next call to `decode()` should pick up where the
/// previous left off.
///
/// If the end of the file is reached, fill data up to the end of the file, then set the
/// read position to the last frame in the file and do nothing.
/// Fill each channel in the data block with `block_size` number of frames (you should
/// have gotten this value from `Decoder::new()`). If there isn't enough data left
/// because the end of the file has been reached, then only fill up how ever many frames
/// are left. If the end of the file has already been reached since the last call to
/// `decode()`, then do nothing.
///
/// # Safety
///
/// This is marked as `unsafe` because a `data_block` may be uninitialized, causing
/// undefined behavior if data is not filled into the block. It is your responsibility to
/// always fill the block (unless the end of the file is reached, in which case the server
/// will tell the client to not read data past that frame).
unsafe fn decode(
&mut self,
data_block: &mut DataBlock<Self::T>,
) -> Result<(), Self::FatalError>;
/// Each channel Vec in `data_block` will have a length of zero.
fn decode(&mut self, data_block: &mut DataBlock<Self::T>) -> Result<(), Self::FatalError>;

/// Return the current read position.
fn current_frame(&self) -> usize;
Expand Down
82 changes: 43 additions & 39 deletions core/src/read/read_stream.rs
Original file line number Diff line number Diff line change
Expand Up @@ -767,6 +767,8 @@ impl<D: Decoder> ReadDiskStream<D> {
// This check should never fail because it can only be `None` in the destructor.
let heap = self.heap_data.as_mut().unwrap();

heap.read_buffer.clear();

// Get the first block of data.
let current_block_data = {
let current_block = &heap.prefetch_buffer[self.current_block_index];
Expand All @@ -791,22 +793,21 @@ impl<D: Decoder> ReadDiskStream<D> {
}
};

for i in 0..heap.read_buffer.block.len() {
let read_buffer_part = &mut heap.read_buffer.block[i][0..first_len];

if let Some(block) = current_block_data {
let from_buffer_part = &block.block[i]
[self.current_frame_in_block..self.current_frame_in_block + first_len];

read_buffer_part.copy_from_slice(from_buffer_part);
} else {
// Output silence.
read_buffer_part[..first_len].fill_with(Default::default);
};
if let Some(block) = current_block_data {
for (read_buffer_ch, block_ch) in
heap.read_buffer.block.iter_mut().zip(block.block.iter())
{
read_buffer_ch.extend_from_slice(
&block_ch[self.current_frame_in_block
..self.current_frame_in_block + first_len],
);
}
} else {
// Output silence.
for ch in heap.read_buffer.block.iter_mut() {
ch.resize(ch.len() + first_len, Default::default());
}
}

// Keep this from growing indefinitely.
//self.current_block_start_frame = current_block_start_frame;
}

self.advance_to_next_block()?;
Expand Down Expand Up @@ -840,18 +841,17 @@ impl<D: Decoder> ReadDiskStream<D> {
}
};

for i in 0..heap.read_buffer.block.len() {
let read_buffer_part =
&mut heap.read_buffer.block[i][first_len..first_len + second_len];

if let Some(block) = next_block_data {
let from_buffer_part = &block.block[i][0..second_len];

read_buffer_part.copy_from_slice(from_buffer_part);
} else {
// Output silence.
read_buffer_part[..second_len].fill_with(Default::default);
};
if let Some(block) = next_block_data {
for (read_buffer_ch, block_ch) in
heap.read_buffer.block.iter_mut().zip(block.block.iter())
{
read_buffer_ch.extend_from_slice(&block_ch[0..second_len]);
}
} else {
// Output silence.
for ch in heap.read_buffer.block.iter_mut() {
ch.resize(ch.len() + second_len, Default::default());
}
}

self.current_frame_in_block = second_len;
Expand All @@ -862,6 +862,8 @@ impl<D: Decoder> ReadDiskStream<D> {
// This check should never fail because it can only be `None` in the destructor.
let heap = self.heap_data.as_mut().unwrap();

heap.read_buffer.clear();

// Get the first block of data.
let current_block_data = {
let current_block = &heap.prefetch_buffer[self.current_block_index];
Expand All @@ -886,18 +888,20 @@ impl<D: Decoder> ReadDiskStream<D> {
}
};

for i in 0..heap.read_buffer.block.len() {
let read_buffer_part = &mut heap.read_buffer.block[i][0..frames];

if let Some(block) = current_block_data {
let from_buffer_part = &block.block[i]
[self.current_frame_in_block..self.current_frame_in_block + frames];

read_buffer_part.copy_from_slice(from_buffer_part);
} else {
// Output silence.
read_buffer_part[..frames].fill_with(Default::default);
};
if let Some(block) = current_block_data {
for (read_buffer_ch, block_ch) in
heap.read_buffer.block.iter_mut().zip(block.block.iter())
{
read_buffer_ch.extend_from_slice(
&block_ch
[self.current_frame_in_block..self.current_frame_in_block + frames],
);
}
} else {
// Output silence.
for ch in heap.read_buffer.block.iter_mut() {
ch.resize(ch.len() + frames, Default::default());
}
}
}

Expand Down
16 changes: 10 additions & 6 deletions core/src/read/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -109,9 +109,11 @@ impl<D: Decoder> ReadServer<D> {
),
);

// Safe because we assume that the decoder will correctly fill the block
// with data.
let decode_res = unsafe { self.decoder.decode(&mut block) };
block.clear();

let decode_res = self.decoder.decode(&mut block);

block.ensure_correct_size(self.block_size);

match decode_res {
Ok(()) => {
Expand Down Expand Up @@ -181,9 +183,11 @@ impl<D: Decoder> ReadServer<D> {

// Fill the cache
for block in cache.blocks.iter_mut() {
// Safe because we assume that the decoder will correctly fill the block
// with data.
let decode_res = unsafe { self.decoder.decode(block) };
block.clear();

let decode_res = self.decoder.decode(block);

block.ensure_correct_size(self.block_size);

if let Err(e) = decode_res {
self.send_msg(ServerToClientMsg::FatalError(e));
Expand Down
28 changes: 10 additions & 18 deletions core/src/write/data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,29 +2,15 @@
pub struct WriteBlock<T: Copy + Clone + Default + Send> {
pub(crate) block: Vec<Vec<T>>,

pub(crate) written_frames: usize,
pub(crate) restart_count: usize,
}

impl<T: Copy + Clone + Default + Send> WriteBlock<T> {
/// # Safety
///
/// Using an allocated but uninitialized [`Vec`] is safe because the block data
/// will be always filled before it is sent to be written by the IO server.
pub fn new(num_channels: usize, block_size: usize) -> Self {
let mut block: Vec<Vec<T>> = Vec::with_capacity(num_channels);
for _ in 0..num_channels {
let mut data: Vec<T> = Vec::with_capacity(block_size);
#[allow(clippy::uninit_vec)] // TODO
unsafe {
data.set_len(block_size)
};
block.push(data);
}

WriteBlock {
block,
written_frames: 0,
block: (0..num_channels)
.map(|_| Vec::with_capacity(block_size))
.collect(),
restart_count: 0,
}
}
Expand All @@ -34,7 +20,13 @@ impl<T: Copy + Clone + Default + Send> WriteBlock<T> {
}

pub fn written_frames(&self) -> usize {
self.written_frames
self.block[0].len()
}

pub fn clear(&mut self) {
for ch in self.block.iter_mut() {
ch.clear();
}
}
}

Expand Down
12 changes: 1 addition & 11 deletions core/src/write/encoder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,6 @@ pub trait Encoder: Sized + 'static {

/// Write a block of data to the file.
///
/// The block may contain less written frames than the length of the channel
/// `Vec`s, so be sure to only read up to `block.written_frames()`.
///
/// If the write was successful, return `WriteStatus::Ok`.
///
/// If the codec has a maximum file size (i.e. 4GB for WAV), then keep track of
Expand All @@ -75,14 +72,7 @@ pub trait Encoder: Sized + 'static {
/// the file name (i.e. "_001" for the first file, "_002" for the second, etc.)
/// This helper function `num_files_to_file_name_extension()` can be used to find
/// this extension.
///
/// # Safety
///
/// This is marked as `unsafe` because a `data_block` may be uninitialized, causing
/// undefined behavior if unwritten data from the block is read. Please use the value
/// from `write_block.num_frames()` to know how many frames in the block are valid.
/// (valid frames are from `[0..num_frames]`)
unsafe fn encode(
fn encode(
&mut self,
write_block: &WriteBlock<Self::T>,
) -> Result<WriteStatus, Self::FatalError>;
Expand Down
8 changes: 3 additions & 5 deletions core/src/write/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -89,12 +89,10 @@ impl<E: Encoder> WriteServer<E> {
// Don't use this block if it is from a previous discarded stream.
if block.restart_count != self.restart_count {
// Clear and send block to be re-used by client.
block.written_frames = 0;
block.clear();
self.send_msg(ServerToClientMsg::NewWriteBlock { block });
} else {
// Safe because we assume that the encoder will not try to use any
// unwritten data.
let write_res = unsafe { self.encoder.encode(&block) };
let write_res = self.encoder.encode(&block);

match write_res {
Ok(status) => {
Expand All @@ -105,7 +103,7 @@ impl<E: Encoder> WriteServer<E> {
}

// Clear and send block to be re-used by client.
block.written_frames = 0;
block.clear();
self.send_msg(ServerToClientMsg::NewWriteBlock { block });
}
Err(e) => {
Expand Down
Loading

0 comments on commit 643f58b

Please sign in to comment.