From 4ab23cf1c876b1a856bb4bc9efdb66cf5cd7cee5 Mon Sep 17 00:00:00 2001 From: Remo Senekowitsch Date: Wed, 14 Aug 2024 13:10:33 +0200 Subject: [PATCH] paasio: remove macros from tests part of https://github.com/exercism/rust/issues/1824 --- exercises/practice/paasio/tests/paasio.rs | 607 +++++++++++++++------- 1 file changed, 422 insertions(+), 185 deletions(-) diff --git a/exercises/practice/paasio/tests/paasio.rs b/exercises/practice/paasio/tests/paasio.rs index e5c6f5eac..da984be88 100644 --- a/exercises/practice/paasio/tests/paasio.rs +++ b/exercises/practice/paasio/tests/paasio.rs @@ -1,203 +1,440 @@ use std::io::{Error, ErrorKind, Read, Result, Write}; -/// test a few read scenarios -macro_rules! test_read { - ($(#[$attr:meta])* $modname:ident ($input:expr, $len:expr)) => { - mod $modname { - use std::io::{Read, BufReader}; - use paasio::*; - - const CHUNK_SIZE: usize = 2; - - $(#[$attr])* - #[test] - fn read_passthrough() { - let data = $input; - let len = $len; - let size = len(&data); - let mut reader = ReadStats::new(data); - - let mut buffer = Vec::with_capacity(size); - let qty_read = reader.read_to_end(&mut buffer); - - assert!(qty_read.is_ok()); - assert_eq!(size, qty_read.unwrap()); - assert_eq!(size, buffer.len()); - // 2: first to read all the data, second to check that - // there wasn't any more pending data which simply didn't - // fit into the existing buffer - assert_eq!(2, reader.reads()); - assert_eq!(size, reader.bytes_through()); - } - - $(#[$attr])* - #[test] - fn read_chunks() { - let data = $input; - let len = $len; - let size = len(&data); - let mut reader = ReadStats::new(data); - - let mut buffer = [0_u8; CHUNK_SIZE]; - let mut chunks_read = 0; - while reader.read(&mut buffer[..]).unwrap_or_else(|_| panic!("read failed at chunk {}", chunks_read+1)) > 0 { - chunks_read += 1; - } - - assert_eq!(size / CHUNK_SIZE + std::cmp::min(1, size % CHUNK_SIZE), chunks_read); - // we read once more than the number of chunks, because the final - // read returns 0 new bytes - assert_eq!(1+chunks_read, reader.reads()); - assert_eq!(size, reader.bytes_through()); - } - - $(#[$attr])* - #[test] - fn read_buffered_chunks() { - let data = $input; - let len = $len; - let size = len(&data); - let mut reader = BufReader::new(ReadStats::new(data)); - - let mut buffer = [0_u8; CHUNK_SIZE]; - let mut chunks_read = 0; - while reader.read(&mut buffer[..]).unwrap_or_else(|_| panic!("read failed at chunk {}", chunks_read+1)) > 0 { - chunks_read += 1; - } - - assert_eq!(size / CHUNK_SIZE + std::cmp::min(1, size % CHUNK_SIZE), chunks_read); - // the BufReader should smooth out the reads, collecting into - // a buffer and performing only two read operations: - // the first collects everything into the buffer, - // and the second ensures that no data remains - assert_eq!(2, reader.get_ref().reads()); - assert_eq!(size, reader.get_ref().bytes_through()); - } +#[test] +fn create_stats() { + let mut data: Vec = Vec::new(); + let _ = paasio::ReadStats::new(data.as_slice()); + let _ = paasio::WriteStats::new(data.as_mut_slice()); +} + +mod read_string { + use paasio::*; + use std::io::{BufReader, Read}; + + const CHUNK_SIZE: usize = 2; + + static INPUT: &[u8] = b"Twas brillig, and the slithy toves/Did gyre and gimble in the wabe:/All mimsy were the borogoves,/And the mome raths outgrabe."; + + #[test] + #[ignore] + fn read_passthrough() { + let data = INPUT; + let len = |d: &[u8]| d.len(); + let size = len(data); + let mut reader = ReadStats::new(data); + + let mut buffer = Vec::with_capacity(size); + let qty_read = reader.read_to_end(&mut buffer); + + assert!(qty_read.is_ok()); + assert_eq!(size, qty_read.unwrap()); + assert_eq!(size, buffer.len()); + // 2: first to read all the data, second to check that + // there wasn't any more pending data which simply didn't + // fit into the existing buffer + assert_eq!(2, reader.reads()); + assert_eq!(size, reader.bytes_through()); + } + + #[test] + #[ignore] + fn read_chunks() { + let data = INPUT; + let len = |d: &[u8]| d.len(); + let size = len(data); + let mut reader = ReadStats::new(data); + + let mut buffer = [0_u8; CHUNK_SIZE]; + let mut chunks_read = 0; + while reader + .read(&mut buffer[..]) + .unwrap_or_else(|_| panic!("read failed at chunk {}", chunks_read + 1)) + > 0 + { + chunks_read += 1; } - }; + + assert_eq!( + size / CHUNK_SIZE + std::cmp::min(1, size % CHUNK_SIZE), + chunks_read + ); + // we read once more than the number of chunks, because the final + // read returns 0 new bytes + assert_eq!(1 + chunks_read, reader.reads()); + assert_eq!(size, reader.bytes_through()); + } + + #[test] + #[ignore] + fn read_buffered_chunks() { + let data = INPUT; + let len = |d: &[u8]| d.len(); + let size = len(data); + let mut reader = BufReader::new(ReadStats::new(data)); + + let mut buffer = [0_u8; CHUNK_SIZE]; + let mut chunks_read = 0; + while reader + .read(&mut buffer[..]) + .unwrap_or_else(|_| panic!("read failed at chunk {}", chunks_read + 1)) + > 0 + { + chunks_read += 1; + } + + assert_eq!( + size / CHUNK_SIZE + std::cmp::min(1, size % CHUNK_SIZE), + chunks_read + ); + // the BufReader should smooth out the reads, collecting into + // a buffer and performing only two read operations: + // the first collects everything into the buffer, + // and the second ensures that no data remains + assert_eq!(2, reader.get_ref().reads()); + assert_eq!(size, reader.get_ref().bytes_through()); + } } -/// test a few write scenarios -macro_rules! test_write { - ($(#[$attr:meta])* $modname:ident ($input:expr, $len:expr)) => { - mod $modname { - use std::io::{self, Write, BufWriter}; - use paasio::*; - - const CHUNK_SIZE: usize = 2; - $(#[$attr])* - #[test] - fn write_passthrough() { - let data = $input; - let len = $len; - let size = len(&data); - let mut writer = WriteStats::new(Vec::with_capacity(size)); - let written = writer.write(data); - assert!(written.is_ok()); - assert_eq!(size, written.unwrap()); - assert_eq!(size, writer.bytes_through()); - assert_eq!(1, writer.writes()); - assert_eq!(data, writer.get_ref().as_slice()); - } - - $(#[$attr])* - #[test] - fn sink_oneshot() { - let data = $input; - let len = $len; - let size = len(&data); - let mut writer = WriteStats::new(io::sink()); - let written = writer.write(data); - assert!(written.is_ok()); - assert_eq!(size, written.unwrap()); - assert_eq!(size, writer.bytes_through()); - assert_eq!(1, writer.writes()); - } - - $(#[$attr])* - #[test] - fn sink_windowed() { - let data = $input; - let len = $len; - let size = len(&data); - let mut writer = WriteStats::new(io::sink()); - - let mut chunk_count = 0; - for chunk in data.chunks(CHUNK_SIZE) { - chunk_count += 1; - let written = writer.write(chunk); - assert!(written.is_ok()); - assert_eq!(CHUNK_SIZE, written.unwrap()); - } - assert_eq!(size, writer.bytes_through()); - assert_eq!(chunk_count, writer.writes()); - } - - $(#[$attr])* - #[test] - fn sink_buffered_windowed() { - let data = $input; - let len = $len; - let size = len(&data); - let mut writer = BufWriter::new(WriteStats::new(io::sink())); - - for chunk in data.chunks(CHUNK_SIZE) { - let written = writer.write(chunk); - assert!(written.is_ok()); - assert_eq!(CHUNK_SIZE, written.unwrap()); - } - // at this point, nothing should have yet been passed through to - // our writer - assert_eq!(0, writer.get_ref().bytes_through()); - assert_eq!(0, writer.get_ref().writes()); - - // after flushing, everything should pass through in one go - assert!(writer.flush().is_ok()); - assert_eq!(size, writer.get_ref().bytes_through()); - assert_eq!(1, writer.get_ref().writes()); - } +mod write_string { + use paasio::*; + use std::io::{self, BufWriter, Write}; + + const CHUNK_SIZE: usize = 2; + + static INPUT: &[u8] = b"Beware the Jabberwock, my son!/The jaws that bite, the claws that catch!/Beware the Jubjub bird, and shun/The frumious Bandersnatch!"; + + #[test] + #[ignore] + fn write_passthrough() { + let data = INPUT; + let len = |d: &[u8]| d.len(); + let size = len(data); + let mut writer = WriteStats::new(Vec::with_capacity(size)); + let written = writer.write(data); + assert!(written.is_ok()); + assert_eq!(size, written.unwrap()); + assert_eq!(size, writer.bytes_through()); + assert_eq!(1, writer.writes()); + assert_eq!(data, writer.get_ref().as_slice()); + } + + #[test] + #[ignore] + fn sink_oneshot() { + let data = INPUT; + let len = |d: &[u8]| d.len(); + let size = len(data); + let mut writer = WriteStats::new(io::sink()); + let written = writer.write(data); + assert!(written.is_ok()); + assert_eq!(size, written.unwrap()); + assert_eq!(size, writer.bytes_through()); + assert_eq!(1, writer.writes()); + } + + #[test] + #[ignore] + fn sink_windowed() { + let data = INPUT; + let len = |d: &[u8]| d.len(); + let size = len(data); + let mut writer = WriteStats::new(io::sink()); + + let mut chunk_count = 0; + for chunk in data.chunks(CHUNK_SIZE) { + chunk_count += 1; + let written = writer.write(chunk); + assert!(written.is_ok()); + assert_eq!(CHUNK_SIZE, written.unwrap()); } - }; + assert_eq!(size, writer.bytes_through()); + assert_eq!(chunk_count, writer.writes()); + } + + #[test] + #[ignore] + fn sink_buffered_windowed() { + let data = INPUT; + let len = |d: &[u8]| d.len(); + let size = len(data); + let mut writer = BufWriter::new(WriteStats::new(io::sink())); + + for chunk in data.chunks(CHUNK_SIZE) { + let written = writer.write(chunk); + assert!(written.is_ok()); + assert_eq!(CHUNK_SIZE, written.unwrap()); + } + // at this point, nothing should have yet been passed through to + // our writer + assert_eq!(0, writer.get_ref().bytes_through()); + assert_eq!(0, writer.get_ref().writes()); + + // after flushing, everything should pass through in one go + assert!(writer.flush().is_ok()); + assert_eq!(size, writer.get_ref().bytes_through()); + assert_eq!(1, writer.get_ref().writes()); + } } -#[test] -fn create_stats() { - let mut data: Vec = Vec::new(); - let _ = paasio::ReadStats::new(data.as_slice()); - let _ = paasio::WriteStats::new(data.as_mut_slice()); +mod read_byte_literal { + use paasio::*; + use std::io::{BufReader, Read}; + + const CHUNK_SIZE: usize = 2; + + static INPUT: &[u8] = &[1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]; + + #[test] + #[ignore] + fn read_passthrough() { + let data = INPUT; + let len = |d: &[u8]| d.len(); + let size = len(data); + let mut reader = ReadStats::new(data); + + let mut buffer = Vec::with_capacity(size); + let qty_read = reader.read_to_end(&mut buffer); + + assert!(qty_read.is_ok()); + assert_eq!(size, qty_read.unwrap()); + assert_eq!(size, buffer.len()); + // 2: first to read all the data, second to check that + // there wasn't any more pending data which simply didn't + // fit into the existing buffer + assert_eq!(2, reader.reads()); + assert_eq!(size, reader.bytes_through()); + } + + #[test] + #[ignore] + fn read_chunks() { + let data = INPUT; + let len = |d: &[u8]| d.len(); + let size = len(data); + let mut reader = ReadStats::new(data); + + let mut buffer = [0_u8; CHUNK_SIZE]; + let mut chunks_read = 0; + while reader + .read(&mut buffer[..]) + .unwrap_or_else(|_| panic!("read failed at chunk {}", chunks_read + 1)) + > 0 + { + chunks_read += 1; + } + + assert_eq!( + size / CHUNK_SIZE + std::cmp::min(1, size % CHUNK_SIZE), + chunks_read + ); + // we read once more than the number of chunks, because the final + // read returns 0 new bytes + assert_eq!(1 + chunks_read, reader.reads()); + assert_eq!(size, reader.bytes_through()); + } + + #[test] + #[ignore] + fn read_buffered_chunks() { + let data = INPUT; + let len = |d: &[u8]| d.len(); + let size = len(data); + let mut reader = BufReader::new(ReadStats::new(data)); + + let mut buffer = [0_u8; CHUNK_SIZE]; + let mut chunks_read = 0; + while reader + .read(&mut buffer[..]) + .unwrap_or_else(|_| panic!("read failed at chunk {}", chunks_read + 1)) + > 0 + { + chunks_read += 1; + } + + assert_eq!( + size / CHUNK_SIZE + std::cmp::min(1, size % CHUNK_SIZE), + chunks_read + ); + // the BufReader should smooth out the reads, collecting into + // a buffer and performing only two read operations: + // the first collects everything into the buffer, + // and the second ensures that no data remains + assert_eq!(2, reader.get_ref().reads()); + assert_eq!(size, reader.get_ref().bytes_through()); + } } -test_read!(#[ignore] read_string ( - "Twas brillig, and the slithy toves/Did gyre and gimble in the wabe:/All mimsy were the borogoves,/And the mome raths outgrabe.".as_bytes(), - |d: &[u8]| d.len() -)); -test_write!(#[ignore] write_string ( - "Beware the Jabberwock, my son!/The jaws that bite, the claws that catch!/Beware the Jubjub bird, and shun/The frumious Bandersnatch!".as_bytes(), - |d: &[u8]| d.len() -)); +mod write_byte_literal { + use paasio::*; + use std::io::{self, BufWriter, Write}; + + const CHUNK_SIZE: usize = 2; + + static INPUT: &[u8] = &[ + 2_u8, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, + ]; -test_read!( + #[test] #[ignore] - read_byte_literal( - &[1_u8, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144][..], - |d: &[u8]| d.len() - ) -); -test_write!( + fn write_passthrough() { + let data = INPUT; + let len = |d: &[u8]| d.len(); + let size = len(data); + let mut writer = WriteStats::new(Vec::with_capacity(size)); + let written = writer.write(data); + assert!(written.is_ok()); + assert_eq!(size, written.unwrap()); + assert_eq!(size, writer.bytes_through()); + assert_eq!(1, writer.writes()); + assert_eq!(data, writer.get_ref().as_slice()); + } + + #[test] #[ignore] - write_byte_literal( - &[2_u8, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61,][..], - |d: &[u8]| d.len() - ) -); + fn sink_oneshot() { + let data = INPUT; + let len = |d: &[u8]| d.len(); + let size = len(data); + let mut writer = WriteStats::new(io::sink()); + let written = writer.write(data); + assert!(written.is_ok()); + assert_eq!(size, written.unwrap()); + assert_eq!(size, writer.bytes_through()); + assert_eq!(1, writer.writes()); + } -test_read!( + #[test] #[ignore] - read_file( - ::std::fs::File::open("Cargo.toml").expect("Cargo.toml must be present"), - |f: &::std::fs::File| f.metadata().expect("metadata must be present").len() as usize - ) -); + fn sink_windowed() { + let data = INPUT; + let len = |d: &[u8]| d.len(); + let size = len(data); + let mut writer = WriteStats::new(io::sink()); + + let mut chunk_count = 0; + for chunk in data.chunks(CHUNK_SIZE) { + chunk_count += 1; + let written = writer.write(chunk); + assert!(written.is_ok()); + assert_eq!(CHUNK_SIZE, written.unwrap()); + } + assert_eq!(size, writer.bytes_through()); + assert_eq!(chunk_count, writer.writes()); + } + + #[test] + #[ignore] + fn sink_buffered_windowed() { + let data = INPUT; + let len = |d: &[u8]| d.len(); + let size = len(data); + let mut writer = BufWriter::new(WriteStats::new(io::sink())); + + for chunk in data.chunks(CHUNK_SIZE) { + let written = writer.write(chunk); + assert!(written.is_ok()); + assert_eq!(CHUNK_SIZE, written.unwrap()); + } + // at this point, nothing should have yet been passed through to + // our writer + assert_eq!(0, writer.get_ref().bytes_through()); + assert_eq!(0, writer.get_ref().writes()); + + // after flushing, everything should pass through in one go + assert!(writer.flush().is_ok()); + assert_eq!(size, writer.get_ref().bytes_through()); + assert_eq!(1, writer.get_ref().writes()); + } +} + +mod read_file { + use paasio::*; + use std::io::{BufReader, Read}; + + const CHUNK_SIZE: usize = 2; + + #[test] + #[ignore] + fn read_passthrough() { + let data = std::fs::File::open("Cargo.toml").expect("Cargo.toml must be present"); + let len = + |f: &::std::fs::File| f.metadata().expect("metadata must be present").len() as usize; + let size = len(&data); + let mut reader = ReadStats::new(data); + + let mut buffer = Vec::with_capacity(size); + let qty_read = reader.read_to_end(&mut buffer); + + assert!(qty_read.is_ok()); + assert_eq!(size, qty_read.unwrap()); + assert_eq!(size, buffer.len()); + // 2: first to read all the data, second to check that + // there wasn't any more pending data which simply didn't + // fit into the existing buffer + assert_eq!(2, reader.reads()); + assert_eq!(size, reader.bytes_through()); + } + + #[test] + #[ignore] + fn read_chunks() { + let data = std::fs::File::open("Cargo.toml").expect("Cargo.toml must be present"); + let len = + |f: &::std::fs::File| f.metadata().expect("metadata must be present").len() as usize; + let size = len(&data); + let mut reader = ReadStats::new(data); + + let mut buffer = [0_u8; CHUNK_SIZE]; + let mut chunks_read = 0; + while reader + .read(&mut buffer[..]) + .unwrap_or_else(|_| panic!("read failed at chunk {}", chunks_read + 1)) + > 0 + { + chunks_read += 1; + } + + assert_eq!( + size / CHUNK_SIZE + std::cmp::min(1, size % CHUNK_SIZE), + chunks_read + ); + // we read once more than the number of chunks, because the final + // read returns 0 new bytes + assert_eq!(1 + chunks_read, reader.reads()); + assert_eq!(size, reader.bytes_through()); + } + + #[test] + #[ignore] + fn read_buffered_chunks() { + let data = std::fs::File::open("Cargo.toml").expect("Cargo.toml must be present"); + let len = + |f: &::std::fs::File| f.metadata().expect("metadata must be present").len() as usize; + let size = len(&data); + let mut reader = BufReader::new(ReadStats::new(data)); + + let mut buffer = [0_u8; CHUNK_SIZE]; + let mut chunks_read = 0; + while reader + .read(&mut buffer[..]) + .unwrap_or_else(|_| panic!("read failed at chunk {}", chunks_read + 1)) + > 0 + { + chunks_read += 1; + } + + assert_eq!( + size / CHUNK_SIZE + std::cmp::min(1, size % CHUNK_SIZE), + chunks_read + ); + // the BufReader should smooth out the reads, collecting into + // a buffer and performing only two read operations: + // the first collects everything into the buffer, + // and the second ensures that no data remains + assert_eq!(2, reader.get_ref().reads()); + assert_eq!(size, reader.get_ref().bytes_through()); + } +} #[test] #[ignore]