From 3950ab9ec4f88041a06a354696079096269a71bd Mon Sep 17 00:00:00 2001 From: ramla-i Date: Fri, 9 Jun 2023 14:25:51 -0400 Subject: [PATCH 01/23] added trusted chunk submodule --- .gitmodules | 3 +++ libs/trusted_chunk | 1 + 2 files changed, 4 insertions(+) create mode 160000 libs/trusted_chunk diff --git a/.gitmodules b/.gitmodules index fa662cb6f2..c38d5c565b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -22,3 +22,6 @@ path = libs/core2 url = https://github.com/theseus-os/core2.git shallow = true +[submodule "libs/trusted_chunk"] + path = libs/trusted_chunk + url = https://github.com/Ramla-I/trusted_chunk diff --git a/libs/trusted_chunk b/libs/trusted_chunk new file mode 160000 index 0000000000..c863f11892 --- /dev/null +++ b/libs/trusted_chunk @@ -0,0 +1 @@ +Subproject commit c863f118929ee54034d3b8c7bf971fdd53efdf97 From 57a5de7125184330f2a6a2071cb490032116b11f Mon Sep 17 00:00:00 2001 From: ramla-i Date: Fri, 9 Jun 2023 16:55:24 -0400 Subject: [PATCH 02/23] compiles and runs with trusted chunk shim --- Cargo.lock | 49 ++ kernel/frame_allocator/Cargo.toml | 4 + .../frame_allocator/src/allocated_frames.rs | 205 ++++++ kernel/frame_allocator/src/lib.rs | 686 ++++++------------ kernel/frame_allocator/src/region.rs | 59 ++ .../src/static_array_rb_tree.rs | 4 + .../frame_allocator/src/trusted_chunk_shim.rs | 233 ++++++ kernel/mem_into_fns/Cargo.toml | 28 + kernel/mem_into_fns/src/lib.rs | 66 ++ kernel/memory/Cargo.toml | 1 + kernel/memory/src/lib.rs | 7 +- kernel/memory/src/paging/mapper.rs | 22 +- kernel/memory/src/paging/mod.rs | 5 - kernel/memory_structs/Cargo.toml | 1 + kernel/memory_structs/src/lib.rs | 12 +- libs/trusted_chunk | 2 +- 16 files changed, 906 insertions(+), 478 deletions(-) create mode 100644 kernel/frame_allocator/src/allocated_frames.rs create mode 100644 kernel/frame_allocator/src/region.rs create mode 100644 kernel/frame_allocator/src/trusted_chunk_shim.rs create mode 100644 kernel/mem_into_fns/Cargo.toml create mode 100644 kernel/mem_into_fns/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 99cd9c5782..dda0c89bc7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1186,8 +1186,10 @@ dependencies = [ "kernel_config", "log", "memory_structs", + "range_inclusive", "spin 0.9.4", "static_assertions", + "trusted_chunk", ] [[package]] @@ -1918,6 +1920,21 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ca88d725a0a943b096803bd34e73a4437208b6077654cc4ecb2947a5f91618d" +[[package]] +name = "mem_into_fns" +version = "0.1.0" +dependencies = [ + "core2", + "frame_allocator", + "log", + "memory_structs", + "page_table_entry", + "range_inclusive", + "spin 0.9.4", + "trusted_chunk", + "x86_64", +] + [[package]] name = "memchr" version = "2.4.1" @@ -1976,6 +1993,7 @@ dependencies = [ "kernel_config", "lazy_static", "log", + "mem_into_fns", "memory_aarch64", "memory_structs", "memory_x86_64", @@ -2027,6 +2045,7 @@ dependencies = [ "derive_more", "kernel_config", "paste", + "range_inclusive", "zerocopy", ] @@ -2816,6 +2835,21 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "prusti-contracts" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9775a28190bfc0dda44516b542082746cb43545016a27f2010cb0433ae668e20" +dependencies = [ + "prusti-contracts-proc-macros", +] + +[[package]] +name = "prusti-contracts-proc-macros" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc5a49e19cc2309c2f7babcceeed80dacf3e69c8ebfb03355c76d89a0f1d1ddd" + [[package]] name = "ps" version = "0.1.0" @@ -2927,6 +2961,11 @@ dependencies = [ "tsc", ] +[[package]] +name = "range_inclusive" +version = "0.1.0" +source = "git+https://github.com/Ramla-I/range_inclusive#7998070408bc72a226c5a025f7b7df0f29a0a3c9" + [[package]] name = "rangemap" version = "1.3.0" @@ -4193,6 +4232,16 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ee8fba06c1f4d0b396ef61a54530bb6b28f0dc61c38bc8bc5a5a48161e6282e" +[[package]] +name = "trusted_chunk" +version = "0.1.0" +dependencies = [ + "cfg-if 1.0.0", + "prusti-contracts", + "range_inclusive", + "spin 0.9.4", +] + [[package]] name = "tsc" version = "0.1.0" diff --git a/kernel/frame_allocator/Cargo.toml b/kernel/frame_allocator/Cargo.toml index d589b41e8b..a7e8d0277c 100644 --- a/kernel/frame_allocator/Cargo.toml +++ b/kernel/frame_allocator/Cargo.toml @@ -8,6 +8,7 @@ version = "0.1.0" spin = "0.9.4" intrusive-collections = "0.9.0" static_assertions = "1.1.0" +range_inclusive = {git = "https://github.com/Ramla-I/range_inclusive"} [dependencies.log] version = "0.4.8" @@ -18,5 +19,8 @@ path = "../kernel_config" [dependencies.memory_structs] path = "../memory_structs" +[dependencies.trusted_chunk] +path = "../../libs/trusted_chunk" + [lib] crate-type = ["rlib"] diff --git a/kernel/frame_allocator/src/allocated_frames.rs b/kernel/frame_allocator/src/allocated_frames.rs new file mode 100644 index 0000000000..d1225e4068 --- /dev/null +++ b/kernel/frame_allocator/src/allocated_frames.rs @@ -0,0 +1,205 @@ +use crate::{Chunk, MemoryRegionType, frame_is_in_list, FREE_GENERAL_FRAMES_LIST, FREE_RESERVED_FRAMES_LIST, RESERVED_REGIONS}; +use memory_structs::{FrameRange, Frame}; +use core::{fmt, ops::{Deref, DerefMut}, marker::PhantomData}; +use trusted_chunk::trusted_chunk::TrustedChunk; +use range_inclusive::RangeInclusiveIterator; + +/// Represents a range of allocated physical memory [`Frame`]s; derefs to [`FrameRange`]. +/// +/// These frames are not immediately accessible because they're not yet mapped +/// by any virtual memory pages. +/// You must do that separately in order to create a `MappedPages` type, +/// which can then be used to access the contents of these frames. +/// +/// This object represents ownership of the range of allocated physical frames; +/// if this object falls out of scope, its allocated frames will be auto-deallocated upon drop. +pub struct AllocatedFrames { + pub(crate) frames: Chunk, +} + +// AllocatedFrames must not be Cloneable, and it must not expose its inner frames as mutable. +assert_not_impl_any!(AllocatedFrames: DerefMut, Clone); + +impl Deref for AllocatedFrames { + type Target = FrameRange; + fn deref(&self) -> &FrameRange { + &self.frames + } +} +impl fmt::Debug for AllocatedFrames { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "AllocatedFrames({:?})", self.frames) + } +} + +impl AllocatedFrames { + /// Returns an empty AllocatedFrames object that performs no frame allocation. + /// Can be used as a placeholder, but will not permit any real usage. + pub const fn empty() -> AllocatedFrames { + AllocatedFrames { + frames: Chunk::empty() + } + } + + /// Merges the given `AllocatedFrames` object `other` into this `AllocatedFrames` object (`self`). + /// This is just for convenience and usability purposes, it performs no allocation or remapping. + /// + /// The given `other` must be physically contiguous with `self`, i.e., come immediately before or after `self`. + /// That is, either `self.start == other.end + 1` or `self.end + 1 == other.start` must be true. + /// + /// If either of those conditions are met, `self` is modified and `Ok(())` is returned, + /// otherwise `Err(other)` is returned. + pub fn merge(&mut self, mut other: AllocatedFrames) -> Result<(), AllocatedFrames> { + let mut chunk = core::mem::replace(&mut other.frames, Chunk::empty()); + match self.frames.merge(chunk) { + Ok(_) => { + // ensure the now-merged AllocatedFrames doesn't run its drop handler and free its frames. + // This is not really necessary because it only contains an empty chunk. + core::mem::forget(other); + Ok(()) + }, + Err(other_chunk) => { + Err(AllocatedFrames{frames: other_chunk}) + } + } + } + + /// Splits this `AllocatedFrames` into two separate `AllocatedFrames` objects: + /// * `[beginning : at_frame - 1]` + /// * `[at_frame : end]` + /// + /// This function follows the behavior of [`core::slice::split_at()`], + /// thus, either one of the returned `AllocatedFrames` objects may be empty. + /// * If `at_frame == self.start`, the first returned `AllocatedFrames` object will be empty. + /// * If `at_frame == self.end + 1`, the second returned `AllocatedFrames` object will be empty. + /// + /// Returns an `Err` containing this `AllocatedFrames` if `at_frame` is otherwise out of bounds. + /// + /// [`core::slice::split_at()`]: https://doc.rust-lang.org/core/primitive.slice.html#method.split_at + pub fn split(mut self, at_frame: Frame) -> Result<(AllocatedFrames, AllocatedFrames), AllocatedFrames> { + let mut chunk = core::mem::replace(&mut self.frames, Chunk::empty()); + match chunk.split_at(at_frame) { + Ok((chunk1, chunk2)) => { + // ensure the now-merged AllocatedFrames doesn't run its drop handler and free its frames. + core::mem::forget(self); + Ok(( + AllocatedFrames{frames: chunk1}, + AllocatedFrames{frames: chunk2} + )) + }, + Err(chunk_not_split) => { + Err(AllocatedFrames{frames: chunk_not_split}) + } + } + } + + /// Returns an `AllocatedFrame` if this `AllocatedFrames` object contains only one frame. + /// + /// ## Panic + /// Panics if this `AllocatedFrame` contains multiple frames or zero frames. + pub fn as_allocated_frame(&self) -> AllocatedFrame { + assert!(self.size_in_frames() == 1); + AllocatedFrame { + frame: *self.start(), + _phantom: PhantomData, + } + } +} + +/// This function is a callback used to convert `UnmappedFrames` into `AllocatedFrames`. +/// `UnmappedFrames` represents frames that have been unmapped from a page that had +/// exclusively mapped them, indicating that no others pages have been mapped +/// to those same frames, and thus, they can be safely deallocated. +/// +/// This exists to break the cyclic dependency cycle between this crate and +/// the `page_table_entry` crate, since `page_table_entry` must depend on types +/// from this crate in order to enforce safety when modifying page table entries. +pub(crate) fn into_allocated_frames(tc: TrustedChunk, frames: FrameRange) -> AllocatedFrames { + let typ = if frame_is_in_list(&RESERVED_REGIONS.lock(), frames.start()) { + MemoryRegionType::Reserved + } else { + MemoryRegionType::Free + }; + AllocatedFrames { frames: Chunk::from_trusted_chunk(tc, frames, typ) } +} + +impl Drop for AllocatedFrames { + fn drop(&mut self) { + if self.size_in_frames() == 0 { return; } + + let (list, typ) = if frame_is_in_list(&RESERVED_REGIONS.lock(), self.start()) { + (&FREE_RESERVED_FRAMES_LIST, MemoryRegionType::Reserved) + } else { + (&FREE_GENERAL_FRAMES_LIST, MemoryRegionType::Free) + }; + // trace!("frame_allocator: deallocating {:?}, typ {:?}", self, typ); + + // Simply add the newly-deallocated chunk to the free frames list. + let mut locked_list = list.lock(); + let res = locked_list.insert(core::mem::replace(&mut self.frames, Chunk::empty())); + match res { + Ok(_inserted_free_chunk) => (), + Err(c) => error!("BUG: couldn't insert deallocated chunk {:?} into free frame list", c), + } + + // Here, we could optionally use above `_inserted_free_chunk` to merge the adjacent (contiguous) chunks + // before or after the newly-inserted free chunk. + // However, there's no *need* to do so until we actually run out of address space or until + // a requested address is in a chunk that needs to be merged. + // Thus, for performance, we save that for those future situations. + } +} + +impl<'f> IntoIterator for &'f AllocatedFrames { + type IntoIter = AllocatedFramesIter<'f>; + type Item = AllocatedFrame<'f>; + fn into_iter(self) -> Self::IntoIter { + AllocatedFramesIter { + _owner: self, + range: self.frames.clone().into_iter(), + } + } +} + +/// An iterator over each [`AllocatedFrame`] in a range of [`AllocatedFrames`]. +/// +/// We must implement our own iterator type here in order to tie the lifetime `'f` +/// of a returned `AllocatedFrame<'f>` type to the lifetime of its containing `AllocatedFrames`. +/// This is because the underlying type of `AllocatedFrames` is a [`FrameRange`], +/// which itself is a [`core::ops::RangeInclusive`] of [`Frame`]s, and unfortunately the +/// `RangeInclusive` type doesn't implement an immutable iterator. +/// +/// Iterating through a `RangeInclusive` actually modifies its own internal range, +/// so we must avoid doing that because it would break the semantics of a `FrameRange`. +/// In fact, this is why [`FrameRange`] only implements `IntoIterator` but +/// does not implement [`Iterator`] itself. +pub struct AllocatedFramesIter<'f> { + _owner: &'f AllocatedFrames, + range: RangeInclusiveIterator, +} +impl<'f> Iterator for AllocatedFramesIter<'f> { + type Item = AllocatedFrame<'f>; + fn next(&mut self) -> Option { + self.range.next().map(|frame| + AllocatedFrame { + frame, _phantom: PhantomData, + } + ) + } +} + +/// A reference to a single frame within a range of `AllocatedFrames`. +/// +/// The lifetime of this type is tied to the lifetime of its owning `AllocatedFrames`. +#[derive(Debug)] +pub struct AllocatedFrame<'f> { + frame: Frame, + _phantom: PhantomData<&'f Frame>, +} +impl<'f> Deref for AllocatedFrame<'f> { + type Target = Frame; + fn deref(&self) -> &Self::Target { + &self.frame + } +} +assert_not_impl_any!(AllocatedFrame: DerefMut, Clone); diff --git a/kernel/frame_allocator/src/lib.rs b/kernel/frame_allocator/src/lib.rs index 1ddfb52475..6563bd0e91 100644 --- a/kernel/frame_allocator/src/lib.rs +++ b/kernel/frame_allocator/src/lib.rs @@ -20,6 +20,7 @@ #![allow(clippy::blocks_in_if_conditions)] #![no_std] +#![feature(box_into_inner)] extern crate alloc; #[macro_use] extern crate log; @@ -28,13 +29,17 @@ extern crate memory_structs; extern crate spin; #[macro_use] extern crate static_assertions; extern crate intrusive_collections; +extern crate range_inclusive; +extern crate trusted_chunk; -#[cfg(test)] -mod test; +// #[cfg(test)] +// mod test; mod static_array_rb_tree; // mod static_array_linked_list; - +mod region; +mod trusted_chunk_shim; +mod allocated_frames; use core::{borrow::Borrow, cmp::{Ordering, min, max}, fmt, ops::{Deref, DerefMut}, marker::PhantomData}; use kernel_config::memory::*; @@ -42,10 +47,15 @@ use memory_structs::{PhysicalAddress, Frame, FrameRange}; use spin::Mutex; use intrusive_collections::Bound; use static_array_rb_tree::*; +use trusted_chunk::trusted_chunk::TrustedChunk; +use trusted_chunk_shim::*; +use region::*; +use range_inclusive::{RangeInclusive, RangeInclusiveIterator}; +pub use allocated_frames::*; const FRAME_SIZE: usize = PAGE_SIZE; -const MIN_FRAME: Frame = Frame::containing_address(PhysicalAddress::zero()); -const MAX_FRAME: Frame = Frame::containing_address(PhysicalAddress::new_canonical(usize::MAX)); +pub(crate) const MIN_FRAME: Frame = Frame::containing_address(PhysicalAddress::zero()); +pub(crate) const MAX_FRAME: Frame = Frame::containing_address(PhysicalAddress::new_canonical(usize::MAX)); // Note: we keep separate lists for "free, general-purpose" areas and "reserved" areas, as it's much faster. @@ -57,11 +67,11 @@ static FREE_RESERVED_FRAMES_LIST: Mutex> = Mutex::new(S /// The fixed list of all known regions that are available for general use. /// This does not indicate whether these regions are currently allocated, /// rather just where they exist and which regions are known to this allocator. -static GENERAL_REGIONS: Mutex> = Mutex::new(StaticArrayRBTree::empty()); +static GENERAL_REGIONS: Mutex> = Mutex::new(StaticArrayRBTree::empty()); /// The fixed list of all known regions that are reserved for specific purposes. /// This does not indicate whether these regions are currently allocated, /// rather just where they exist and which regions are known to this allocator. -static RESERVED_REGIONS: Mutex> = Mutex::new(StaticArrayRBTree::empty()); +static RESERVED_REGIONS: Mutex> = Mutex::new(StaticArrayRBTree::empty()); /// Initialize the frame allocator with the given list of available and reserved physical memory regions. @@ -80,7 +90,7 @@ static RESERVED_REGIONS: Mutex> = Mutex::new(StaticArra pub fn init( free_physical_memory_areas: F, reserved_physical_memory_areas: R, -) -> Result AllocatedFrames, &'static str> +) -> Result<(fn(RangeInclusive) -> TrustedChunk, fn(TrustedChunk, FrameRange) -> AllocatedFrames), &'static str> where P: Borrow, F: IntoIterator, R: IntoIterator + Clone, @@ -93,9 +103,10 @@ pub fn init( return Err("BUG: Frame allocator was already initialized, cannot be initialized twice."); } - let mut free_list: [Option; 32] = Default::default(); + // start with all lists using the `Region` type so we can merge and manipulate until we're sure we have non-overlapping regions + let mut free_list: [Option; 32] = Default::default(); let mut free_list_idx = 0; - + // Populate the list of free regions for general-purpose usage. for area in free_physical_memory_areas.into_iter() { let area = area.borrow(); @@ -107,11 +118,11 @@ pub fn init( reserved_physical_memory_areas.clone(), ); } + - - let mut reserved_list: [Option; 32] = Default::default(); + let mut reserved_list: [Option; 32] = Default::default(); for (i, area) in reserved_physical_memory_areas.into_iter().enumerate() { - reserved_list[i] = Some(Chunk { + reserved_list[i] = Some(Region { typ: MemoryRegionType::Reserved, frames: area.borrow().frames.clone(), }); @@ -119,9 +130,9 @@ pub fn init( let mut changed = true; while changed { - let mut temp_reserved_list: [Option; 32] = Default::default(); + let mut temp_reserved_list: [Option; 32] = Default::default(); changed = false; - + let mut temp_reserved_list_idx = 0; for i in 0..temp_reserved_list.len() { if let Some(mut current) = reserved_list[i].clone() { @@ -142,14 +153,14 @@ pub fn init( temp_reserved_list_idx += 1; } } - + reserved_list = temp_reserved_list; } - - + + // Finally, one last sanity check -- ensure no two regions overlap. let all_areas = free_list[..free_list_idx].iter().flatten() - .chain(reserved_list.iter().flatten()); + .chain(reserved_list.iter().flatten()); for (i, elem) in all_areas.clone().enumerate() { let next_idx = i + 1; for other in all_areas.clone().skip(next_idx) { @@ -161,12 +172,32 @@ pub fn init( } } - *FREE_GENERAL_FRAMES_LIST.lock() = StaticArrayRBTree::new(free_list.clone()); - *FREE_RESERVED_FRAMES_LIST.lock() = StaticArrayRBTree::new(reserved_list.clone()); + // Here, since we're sure we now have a list of regions that don't overlap, we can create lists of formally verified Chunks + let mut free_list_w_chunks: [Option; 32] = Default::default(); + let mut reserved_list_w_chunks: [Option; 32] = Default::default(); + for (i, elem) in reserved_list.iter().flatten().enumerate() { + reserved_list_w_chunks[i] = Some(Chunk::new( + MemoryRegionType::Reserved, + elem.frames.clone() + )?); + } + + for (i, elem) in free_list.iter().flatten().enumerate() { + free_list_w_chunks[i] = Some(Chunk::new( + MemoryRegionType::Free, + elem.frames.clone() + )?); + } + + *FREE_GENERAL_FRAMES_LIST.lock() = StaticArrayRBTree::new(free_list_w_chunks); + *FREE_RESERVED_FRAMES_LIST.lock() = StaticArrayRBTree::new(reserved_list_w_chunks); *GENERAL_REGIONS.lock() = StaticArrayRBTree::new(free_list); *RESERVED_REGIONS.lock() = StaticArrayRBTree::new(reserved_list); - Ok(into_allocated_frames) + // Register the callback to create a Chunk. + // This function is not formally-verified and we only call it in the code path for UnmappedFrames. + // trusted_chunk_shim::INTO_VERIFIED_CHUNK_FUNC.call_once(|| trusted_chunk::init()); + Ok((trusted_chunk::init()?, into_allocated_frames)) } @@ -178,7 +209,7 @@ pub fn init( /// the given list of `reserved_physical_memory_areas`. fn check_and_add_free_region( area: &FrameRange, - free_list: &mut [Option; 32], + free_list: &mut [Option; 32], free_list_idx: &mut usize, reserved_physical_memory_areas: R, ) @@ -224,7 +255,7 @@ fn check_and_add_free_region( let new_area = FrameRange::new(current_start, current_end); if new_area.size_in_frames() > 0 { - free_list[*free_list_idx] = Some(Chunk { + free_list[*free_list_idx] = Some(Region { typ: MemoryRegionType::Free, frames: new_area, }); @@ -266,282 +297,6 @@ pub enum MemoryRegionType { Unknown, } -/// A range of contiguous frames. -/// -/// # Ordering and Equality -/// -/// `Chunk` implements the `Ord` trait, and its total ordering is ONLY based on -/// its **starting** `Frame`. This is useful so we can store `Chunk`s in a sorted collection. -/// -/// Similarly, `Chunk` implements equality traits, `Eq` and `PartialEq`, -/// both of which are also based ONLY on the **starting** `Frame` of the `Chunk`. -/// Thus, comparing two `Chunk`s with the `==` or `!=` operators may not work as expected. -/// since it ignores their actual range of frames. -#[derive(Debug, Clone, Eq)] -struct Chunk { - /// The type of this memory chunk, e.g., whether it's in a free or reserved region. - typ: MemoryRegionType, - /// The Frames covered by this chunk, an inclusive range. - frames: FrameRange, -} -impl Chunk { - fn as_allocated_frames(&self) -> AllocatedFrames { - AllocatedFrames { - frames: self.frames.clone(), - } - } - - /// Returns a new `Chunk` with an empty range of frames. - const fn empty() -> Chunk { - Chunk { - typ: MemoryRegionType::Unknown, - frames: FrameRange::empty(), - } - } -} -impl Deref for Chunk { - type Target = FrameRange; - fn deref(&self) -> &FrameRange { - &self.frames - } -} -impl Ord for Chunk { - fn cmp(&self, other: &Self) -> Ordering { - self.frames.start().cmp(other.frames.start()) - } -} -impl PartialOrd for Chunk { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} -impl PartialEq for Chunk { - fn eq(&self, other: &Self) -> bool { - self.frames.start() == other.frames.start() - } -} -impl Borrow for &'_ Chunk { - fn borrow(&self) -> &Frame { - self.frames.start() - } -} - - -/// Represents a range of allocated physical memory [`Frame`]s; derefs to [`FrameRange`]. -/// -/// These frames are not immediately accessible because they're not yet mapped -/// by any virtual memory pages. -/// You must do that separately in order to create a `MappedPages` type, -/// which can then be used to access the contents of these frames. -/// -/// This object represents ownership of the range of allocated physical frames; -/// if this object falls out of scope, its allocated frames will be auto-deallocated upon drop. -pub struct AllocatedFrames { - frames: FrameRange, -} - -// AllocatedFrames must not be Cloneable, and it must not expose its inner frames as mutable. -assert_not_impl_any!(AllocatedFrames: DerefMut, Clone); - -impl Deref for AllocatedFrames { - type Target = FrameRange; - fn deref(&self) -> &FrameRange { - &self.frames - } -} -impl fmt::Debug for AllocatedFrames { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "AllocatedFrames({:?})", self.frames) - } -} - -impl AllocatedFrames { - /// Returns an empty AllocatedFrames object that performs no frame allocation. - /// Can be used as a placeholder, but will not permit any real usage. - pub const fn empty() -> AllocatedFrames { - AllocatedFrames { - frames: FrameRange::empty() - } - } - - /// Merges the given `AllocatedFrames` object `other` into this `AllocatedFrames` object (`self`). - /// This is just for convenience and usability purposes, it performs no allocation or remapping. - /// - /// The given `other` must be physically contiguous with `self`, i.e., come immediately before or after `self`. - /// That is, either `self.start == other.end + 1` or `self.end + 1 == other.start` must be true. - /// - /// If either of those conditions are met, `self` is modified and `Ok(())` is returned, - /// otherwise `Err(other)` is returned. - pub fn merge(&mut self, other: AllocatedFrames) -> Result<(), AllocatedFrames> { - if *self.start() == *other.end() + 1 { - // `other` comes contiguously before `self` - self.frames = FrameRange::new(*other.start(), *self.end()); - } - else if *self.end() + 1 == *other.start() { - // `self` comes contiguously before `other` - self.frames = FrameRange::new(*self.start(), *other.end()); - } - else { - // non-contiguous - return Err(other); - } - - // ensure the now-merged AllocatedFrames doesn't run its drop handler and free its frames. - core::mem::forget(other); - Ok(()) - } - - /// Splits this `AllocatedFrames` into two separate `AllocatedFrames` objects: - /// * `[beginning : at_frame - 1]` - /// * `[at_frame : end]` - /// - /// This function follows the behavior of [`core::slice::split_at()`], - /// thus, either one of the returned `AllocatedFrames` objects may be empty. - /// * If `at_frame == self.start`, the first returned `AllocatedFrames` object will be empty. - /// * If `at_frame == self.end + 1`, the second returned `AllocatedFrames` object will be empty. - /// - /// Returns an `Err` containing this `AllocatedFrames` if `at_frame` is otherwise out of bounds. - /// - /// [`core::slice::split_at()`]: https://doc.rust-lang.org/core/primitive.slice.html#method.split_at - pub fn split(self, at_frame: Frame) -> Result<(AllocatedFrames, AllocatedFrames), AllocatedFrames> { - let end_of_first = at_frame - 1; - - let (first, second) = if at_frame == *self.start() && at_frame <= *self.end() { - let first = FrameRange::empty(); - let second = FrameRange::new(at_frame, *self.end()); - (first, second) - } - else if at_frame == (*self.end() + 1) && end_of_first >= *self.start() { - let first = FrameRange::new(*self.start(), *self.end()); - let second = FrameRange::empty(); - (first, second) - } - else if at_frame > *self.start() && end_of_first <= *self.end() { - let first = FrameRange::new(*self.start(), end_of_first); - let second = FrameRange::new(at_frame, *self.end()); - (first, second) - } - else { - return Err(self); - }; - - // ensure the original AllocatedFrames doesn't run its drop handler and free its frames. - core::mem::forget(self); - Ok(( - AllocatedFrames { frames: first }, - AllocatedFrames { frames: second }, - )) - } - - /// Returns an `AllocatedFrame` if this `AllocatedFrames` object contains only one frame. - /// - /// ## Panic - /// Panics if this `AllocatedFrame` contains multiple frames or zero frames. - pub fn as_allocated_frame(&self) -> AllocatedFrame { - assert!(self.size_in_frames() == 1); - AllocatedFrame { - frame: *self.start(), - _phantom: PhantomData, - } - } -} - -/// This function is a callback used to convert `UnmappedFrames` into `AllocatedFrames`. -/// `UnmappedFrames` represents frames that have been unmapped from a page that had -/// exclusively mapped them, indicating that no others pages have been mapped -/// to those same frames, and thus, they can be safely deallocated. -/// -/// This exists to break the cyclic dependency cycle between this crate and -/// the `page_table_entry` crate, since `page_table_entry` must depend on types -/// from this crate in order to enforce safety when modifying page table entries. -fn into_allocated_frames(frames: FrameRange) -> AllocatedFrames { - AllocatedFrames { frames } -} - -impl Drop for AllocatedFrames { - fn drop(&mut self) { - if self.size_in_frames() == 0 { return; } - - let (list, typ) = if contains_any(&RESERVED_REGIONS.lock(), &self.frames) { - (&FREE_RESERVED_FRAMES_LIST, MemoryRegionType::Reserved) - } else { - (&FREE_GENERAL_FRAMES_LIST, MemoryRegionType::Free) - }; - // trace!("frame_allocator: deallocating {:?}, typ {:?}", self, typ); - - // Simply add the newly-deallocated chunk to the free frames list. - let mut locked_list = list.lock(); - let res = locked_list.insert(Chunk { - typ, - frames: self.frames.clone(), - }); - match res { - Ok(_inserted_free_chunk) => (), - Err(c) => error!("BUG: couldn't insert deallocated chunk {:?} into free frame list", c), - } - - // Here, we could optionally use above `_inserted_free_chunk` to merge the adjacent (contiguous) chunks - // before or after the newly-inserted free chunk. - // However, there's no *need* to do so until we actually run out of address space or until - // a requested address is in a chunk that needs to be merged. - // Thus, for performance, we save that for those future situations. - } -} - -impl<'f> IntoIterator for &'f AllocatedFrames { - type IntoIter = AllocatedFramesIter<'f>; - type Item = AllocatedFrame<'f>; - fn into_iter(self) -> Self::IntoIter { - AllocatedFramesIter { - _owner: self, - range: self.frames.clone(), - } - } -} - -/// An iterator over each [`AllocatedFrame`] in a range of [`AllocatedFrames`]. -/// -/// We must implement our own iterator type here in order to tie the lifetime `'f` -/// of a returned `AllocatedFrame<'f>` type to the lifetime of its containing `AllocatedFrames`. -/// This is because the underlying type of `AllocatedFrames` is a [`FrameRange`], -/// which itself is a [`core::ops::RangeInclusive`] of [`Frame`]s, and unfortunately the -/// `RangeInclusive` type doesn't implement an immutable iterator. -/// -/// Iterating through a `RangeInclusive` actually modifies its own internal range, -/// so we must avoid doing that because it would break the semantics of a `FrameRange`. -/// In fact, this is why [`FrameRange`] only implements `IntoIterator` but -/// does not implement [`Iterator`] itself. -pub struct AllocatedFramesIter<'f> { - _owner: &'f AllocatedFrames, - range: FrameRange, -} -impl<'f> Iterator for AllocatedFramesIter<'f> { - type Item = AllocatedFrame<'f>; - fn next(&mut self) -> Option { - self.range.next().map(|frame| - AllocatedFrame { - frame, _phantom: PhantomData, - } - ) - } -} - -/// A reference to a single frame within a range of `AllocatedFrames`. -/// -/// The lifetime of this type is tied to the lifetime of its owning `AllocatedFrames`. -#[derive(Debug)] -pub struct AllocatedFrame<'f> { - frame: Frame, - _phantom: PhantomData<&'f Frame>, -} -impl<'f> Deref for AllocatedFrame<'f> { - type Target = Frame; - fn deref(&self) -> &Self::Target { - &self.frame - } -} -assert_not_impl_any!(AllocatedFrame: DerefMut, Clone); - /// A series of pending actions related to frame allocator bookkeeping, /// which may result in heap allocation. @@ -582,19 +337,22 @@ impl<'list> DeferredAllocAction<'list> { } impl<'list> Drop for DeferredAllocAction<'list> { fn drop(&mut self) { + let chunk1 = core::mem::replace(&mut self.free1, Chunk::empty()); + let chunk2 = core::mem::replace(&mut self.free2, Chunk::empty()); + // Insert all of the chunks, both allocated and free ones, into the list. - if self.free1.size_in_frames() > 0 { - match self.free1.typ { - MemoryRegionType::Free => { self.free_list.lock().insert(self.free1.clone()).unwrap(); } - MemoryRegionType::Reserved => { self.reserved_list.lock().insert(self.free1.clone()).unwrap(); } - _ => error!("BUG likely: DeferredAllocAction encountered free1 chunk {:?} of a type Unknown", self.free1), + if chunk1.size_in_frames() > 0 { + match chunk1.typ() { + MemoryRegionType::Free => { self.free_list.lock().insert(chunk1).unwrap(); } + MemoryRegionType::Reserved => { self.reserved_list.lock().insert(chunk1).unwrap(); } + _ => error!("BUG likely: DeferredAllocAction encountered free1 chunk {:?} of a type Unknown", chunk1), } } - if self.free2.size_in_frames() > 0 { - match self.free2.typ { - MemoryRegionType::Free => { self.free_list.lock().insert(self.free2.clone()).unwrap(); } - MemoryRegionType::Reserved => { self.reserved_list.lock().insert(self.free2.clone()).unwrap(); } - _ => error!("BUG likely: DeferredAllocAction encountered free2 chunk {:?} of a type Unknown", self.free2), + if chunk2.size_in_frames() > 0 { + match chunk2.typ() { + MemoryRegionType::Free => { self.free_list.lock().insert(chunk2).unwrap(); } + MemoryRegionType::Reserved => { self.reserved_list.lock().insert(chunk2).unwrap(); } + _ => error!("BUG likely: DeferredAllocAction encountered free2 chunk {:?} of a type Unknown", chunk2), }; } } @@ -604,23 +362,20 @@ impl<'list> Drop for DeferredAllocAction<'list> { /// Possible allocation errors. #[derive(Debug)] enum AllocationError { - /// The requested address was not free: it was already allocated. + /// The requested address was not free: it was already allocated, or is outside the range of this allocator. AddressNotFree(Frame, usize), - /// The requested address was outside the range of this allocator. - AddressNotFound(Frame, usize), /// The address space was full, or there was not a large-enough chunk /// or enough remaining chunks that could satisfy the requested allocation size. OutOfAddressSpace(usize), - /// The starting address was found, but not all successive contiguous frames were available. - ContiguousChunkNotFound(Frame, usize) + /// ToDo: remove + InternalError, } impl From for &'static str { fn from(alloc_err: AllocationError) -> &'static str { match alloc_err { - AllocationError::AddressNotFree(..) => "requested address was in use", - AllocationError::AddressNotFound(..) => "requested address was outside of this frame allocator's range", + AllocationError::AddressNotFree(..) => "address was in use or outside of this frame allocator's range", AllocationError::OutOfAddressSpace(..) => "out of physical address space", - AllocationError::ContiguousChunkNotFound(..) => "only some of the requested frames were available", + AllocationError::InternalError => "problem with frame allocator logic", } } } @@ -643,7 +398,7 @@ fn find_specific_chunk( if let Some(chunk) = elem { if requested_frame >= *chunk.start() && requested_end_frame <= *chunk.end() { // Here: `chunk` was big enough and did contain the requested address. - return Ok(allocate_from_chosen_chunk(requested_frame, num_frames, &chunk.clone(), ValueRefMut::Array(elem))); + return allocate_from_chosen_chunk(requested_frame, num_frames, ValueRefMut::Array(elem)); } } } @@ -653,7 +408,7 @@ fn find_specific_chunk( if let Some(chunk) = cursor_mut.get().map(|w| w.deref().clone()) { if chunk.contains(&requested_frame) { if requested_end_frame <= *chunk.end() { - return Ok(allocate_from_chosen_chunk(requested_frame, num_frames, &chunk, ValueRefMut::RBTree(cursor_mut))); + return allocate_from_chosen_chunk(requested_frame, num_frames, ValueRefMut::RBTree(cursor_mut)); } else { // We found the chunk containing the requested address, but it was too small to cover all of the requested frames. // Let's try to merge the next-highest contiguous chunk to see if those two chunks together @@ -664,14 +419,16 @@ fn find_specific_chunk( // Requested address: {:?}, num_frames: {}, chunk: {:?}", // requested_frame, num_frames, chunk, // ); - let next_contiguous_chunk: Option = { + let initial_chunk_ref: Option> = { let next_cursor = cursor_mut.peek_next(); if let Some(next_chunk) = next_cursor.get().map(|w| w.deref()) { if *chunk.end() + 1 == *next_chunk.start() { // Here: next chunk was contiguous with the original chunk. if requested_end_frame <= *next_chunk.end() { // trace!("Frame allocator: found suitably-large contiguous next {:?} after initial too-small {:?}", next_chunk, chunk); - Some(next_chunk.clone()) + // We cannot clone a Chunk, so we return a reference to the first chunk, + // so that it can be removed and then we can remove the next chunk. + Some(ValueRefMut::RBTree(cursor_mut)) } else { todo!("Frame allocator: found chunk containing requested address, but it was too small. \ Theseus does not yet support merging more than two chunks during an allocation request. \ @@ -686,19 +443,25 @@ fn find_specific_chunk( } } else { trace!("Frame allocator: couldn't get next chunk above initial too-small {:?}", chunk); - trace!("Requesting new chunk starting at {:?}, num_frames: {}", *chunk.end() + 1, requested_end_frame.number() - chunk.end().number()); - return Err(AllocationError::ContiguousChunkNotFound(*chunk.end() + 1, requested_end_frame.number() - chunk.end().number())); + None } }; - if let Some(mut next_chunk) = next_contiguous_chunk { - // We found a suitable chunk that came contiguously after the initial too-small chunk. - // Remove the initial chunk (since we have a cursor pointing to it already) - // and "merge" it into this `next_chunk`. - let _removed_initial_chunk = cursor_mut.remove(); - // trace!("Frame allocator: removed suitably-large contiguous next {:?} after initial too-small {:?}", _removed_initial_chunk, chunk); - // Here, `cursor_mut` has been moved forward to point to the `next_chunk` now. - next_chunk.frames = FrameRange::new(*chunk.start(), *next_chunk.end()); - return Ok(allocate_from_chosen_chunk(requested_frame, num_frames, &next_chunk, ValueRefMut::RBTree(cursor_mut))); + + if let Some(initial_chunk_ref) = initial_chunk_ref { + // remove the first chunk + let initial_chunk = retrieve_chunk_from_ref(initial_chunk_ref).ok_or(AllocationError::InternalError)?; + + // now search for the next contiguous chunk, that we already know exists + let requested_contiguous_frame = *initial_chunk.end() + 1; + let mut cursor_mut = tree.upper_bound_mut(Bound::Included(&requested_contiguous_frame)); + if let Some(next_chunk) = cursor_mut.get().map(|w| w.deref()) { + if next_chunk.contains(&requested_contiguous_frame) { + // merge the next chunk into the intiial chunk + return adjust_chosen_chunk_contiguous(requested_frame, num_frames, initial_chunk, ValueRefMut::RBTree(cursor_mut)); + } else { + trace!("This should never fail, since we've already found a contiguous chunk."); + } + } } } } @@ -706,7 +469,7 @@ fn find_specific_chunk( } } - Err(AllocationError::AddressNotFound(requested_frame, num_frames)) + Err(AllocationError::AddressNotFree(requested_frame, num_frames)) } @@ -721,11 +484,11 @@ fn find_any_chunk( for elem in arr.iter_mut() { if let Some(chunk) = elem { // Skip chunks that are too-small or in the designated regions. - if chunk.size_in_frames() < num_frames || chunk.typ != MemoryRegionType::Free { + if chunk.size_in_frames() < num_frames || chunk.typ() != MemoryRegionType::Free { continue; } else { - return Ok(allocate_from_chosen_chunk(*chunk.start(), num_frames, &chunk.clone(), ValueRefMut::Array(elem))); + return allocate_from_chosen_chunk(*chunk.start(), num_frames, ValueRefMut::Array(elem)); } } } @@ -736,8 +499,8 @@ fn find_any_chunk( // This results in an O(1) allocation time in the general case, until all address ranges are already in use. let mut cursor = tree.upper_bound_mut(Bound::<&Chunk>::Unbounded); while let Some(chunk) = cursor.get().map(|w| w.deref()) { - if num_frames <= chunk.size_in_frames() && chunk.typ == MemoryRegionType::Free { - return Ok(allocate_from_chosen_chunk(*chunk.start(), num_frames, &chunk.clone(), ValueRefMut::RBTree(cursor))); + if num_frames <= chunk.size_in_frames() && chunk.typ() == MemoryRegionType::Free { + return allocate_from_chosen_chunk(*chunk.start(), num_frames, ValueRefMut::RBTree(cursor)); } warn!("Frame allocator: inefficient scenario: had to search multiple chunks \ (skipping {:?}) while trying to allocate {} frames at any address.", @@ -756,6 +519,24 @@ fn find_any_chunk( } +/// Removes a chunk from the RBTree. +/// `chosen_chunk_ref` is basically a wrapper over the cursor which stores the position of the chosen_chunk. +fn retrieve_chunk_from_ref(mut chosen_chunk_ref: ValueRefMut) -> Option { + // Remove the chosen chunk from the free frame list. + let removed_val = chosen_chunk_ref.remove(); + + let chosen_chunk = match removed_val { + RemovedValue::Array(c) => c, + RemovedValue::RBTree(option_chunk) => { + if let Some(boxed_chunk) = option_chunk { + Some(boxed_chunk.into_inner()) + } else { + None + } + } + }; + chosen_chunk +} /// The final part of the main allocation routine that splits the given chosen chunk /// into multiple smaller chunks, thereby "allocating" frames from it. @@ -765,105 +546,71 @@ fn find_any_chunk( fn allocate_from_chosen_chunk( start_frame: Frame, num_frames: usize, - chosen_chunk: &Chunk, mut chosen_chunk_ref: ValueRefMut, -) -> (AllocatedFrames, DeferredAllocAction<'static>) { - let (new_allocation, before, after) = split_chosen_chunk(start_frame, num_frames, chosen_chunk); - +) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { // Remove the chosen chunk from the free frame list. - let _removed_chunk = chosen_chunk_ref.remove(); + let chosen_chunk = retrieve_chunk_from_ref(chosen_chunk_ref).ok_or(AllocationError::InternalError)?; + + let (new_allocation, before, after) = chosen_chunk.split(start_frame, num_frames); // TODO: Re-use the allocated wrapper if possible, rather than allocate a new one entirely. // if let RemovedValue::RBTree(Some(wrapper_adapter)) = _removed_chunk { ... } - ( + Ok(( new_allocation.as_allocated_frames(), DeferredAllocAction::new(before, after), - ) + )) } -/// An inner function that breaks up the given chunk into multiple smaller chunks. -/// -/// Returns a tuple of three chunks: -/// 1. The `Chunk` containing the requested range of frames starting at `start_frame`. -/// 2. The range of frames in the `chosen_chunk` that came before the beginning of the requested frame range. -/// 3. The range of frames in the `chosen_chunk` that came after the end of the requested frame range. -fn split_chosen_chunk( +/// Merges the contiguous chunk given by `chunk2_ref` into `chunk1`. +/// Then allocates from the newly merged chunk. +fn adjust_chosen_chunk_contiguous( start_frame: Frame, num_frames: usize, - chosen_chunk: &Chunk, -) -> (Chunk, Option, Option) { - // The new allocated chunk might start in the middle of an existing chunk, - // so we need to break up that existing chunk into 3 possible chunks: before, newly-allocated, and after. - // - // Because Frames and PhysicalAddresses use saturating add/subtract, we need to double-check that - // we don't create overlapping duplicate Chunks at either the very minimum or the very maximum of the address space. - let new_allocation = Chunk { - typ: chosen_chunk.typ, - // The end frame is an inclusive bound, hence the -1. Parentheses are needed to avoid overflow. - frames: FrameRange::new(start_frame, start_frame + (num_frames - 1)), - }; - let before = if start_frame == MIN_FRAME { - None - } else { - Some(Chunk { - typ: chosen_chunk.typ, - frames: FrameRange::new(*chosen_chunk.start(), *new_allocation.start() - 1), - }) - }; - let after = if new_allocation.end() == &MAX_FRAME { - None - } else { - Some(Chunk { - typ: chosen_chunk.typ, - frames: FrameRange::new(*new_allocation.end() + 1, *chosen_chunk.end()), - }) - }; + mut initial_chunk: Chunk, + contiguous_chunk_ref: ValueRefMut, +) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { + let mut contiguous_chunk = retrieve_chunk_from_ref(contiguous_chunk_ref).ok_or(AllocationError::InternalError)?; - // some sanity checks -- these can be removed or disabled for better performance - if let Some(ref b) = before { - assert!(!new_allocation.contains(b.end())); - assert!(!b.contains(new_allocation.start())); - } - if let Some(ref a) = after { - assert!(!new_allocation.contains(a.start())); - assert!(!a.contains(new_allocation.end())); - } + initial_chunk.merge(contiguous_chunk).map_err(|_| { + trace!("contiguous chuynks couldn't ber merged, despite previous checks"); + //To Do: should we reinsert chunk to list here. + AllocationError::InternalError + })?; + let (new_allocation, before, after) = initial_chunk.split(start_frame, num_frames); - (new_allocation, before, after) -} -/// Returns `true` if the given list contains *any* of the given `frames`. -fn contains_any( - list: &StaticArrayRBTree, - frames: &FrameRange, + Ok(( + new_allocation.as_allocated_frames(), + DeferredAllocAction::new(before, after), + )) +} + +/// Returns whether the given `Frame` is contained within the given `list`. +fn frame_is_in_list( + list: &StaticArrayRBTree, + frame: &Frame, ) -> bool { match &list.0 { Inner::Array(ref arr) => { for chunk in arr.iter().flatten() { - if chunk.overlap(frames).is_some() { + if chunk.contains(frame) { return true; } } } - Inner::RBTree(ref tree) => { - let mut cursor = tree.upper_bound(Bound::Included(frames.start())); - while let Some(chunk) = cursor.get() { - if chunk.start() > frames.end() { - // We're iterating in ascending order over a sorted tree, so we can stop - // looking for overlapping regions once we pass the end of `frames`. - break; - } - - if chunk.overlap(frames).is_some() { + Inner::RBTree(ref tree) => { + let cursor = tree.upper_bound(Bound::Included(frame)); + if let Some(chunk) = cursor.get().map(|w| w.deref()) { + if chunk.contains(frame) { return true; } - cursor.move_next(); } } } + false } @@ -912,7 +659,62 @@ fn add_reserved_region( } } - list.insert(Chunk { + list.insert(Chunk::new( + MemoryRegionType::Reserved, + frames.clone(), + )?).map_err(|_c| "BUG: Failed to insert non-overlapping frames into list.")?; + + Ok(frames) +} + + +/// Adds the given `frames` to the given `list` as a Chunk of reserved frames. +/// +/// Returns the range of **new** frames that were added to the list, +/// which will be a subset of the given input `frames`. +/// +/// Currently, this function adds no new frames at all if any frames within the given `frames` list +/// overlap any existing regions at all. +/// Handling partially-overlapping regions +fn add_reserved_region_to_region_list( + list: &mut StaticArrayRBTree, + frames: FrameRange, +) -> Result { + + // Check whether the reserved region overlaps any existing regions. + match &mut list.0 { + Inner::Array(ref mut arr) => { + for elem in arr.iter() { + if let Some(chunk) = elem { + if let Some(_overlap) = chunk.overlap(&frames) { + // trace!("Failed to add reserved region {:?} due to overlap {:?} with existing chunk {:?}", + // frames, _overlap, chunk + // ); + return Err("Failed to add reserved region that overlapped with existing reserved regions (array)."); + } + } + } + } + Inner::RBTree(ref mut tree) => { + let mut cursor_mut = tree.upper_bound_mut(Bound::Included(frames.start())); + while let Some(chunk) = cursor_mut.get().map(|w| w.deref()) { + if chunk.start() > frames.end() { + // We're iterating in ascending order over a sorted tree, + // so we can stop looking for overlapping regions once we pass the end of the new frames to add. + break; + } + if let Some(_overlap) = chunk.overlap(&frames) { + // trace!("Failed to add reserved region {:?} due to overlap {:?} with existing chunk {:?}", + // frames, _overlap, chunk + // ); + return Err("Failed to add reserved region that overlapped with existing reserved regions (RBTree)."); + } + cursor_mut.move_next(); + } + } + } + + list.insert(Region { typ: MemoryRegionType::Reserved, frames: frames.clone(), }).map_err(|_c| "BUG: Failed to insert non-overlapping frames into list.")?; @@ -954,45 +756,36 @@ pub fn allocate_frames_deferred( if let Some(paddr) = requested_paddr { let start_frame = Frame::containing_address(paddr); + let end_frame = start_frame + (num_frames - 1); + // Try to allocate the frames at the specific address. let mut free_reserved_frames_list = FREE_RESERVED_FRAMES_LIST.lock(); - // First, attempt to allocate the requested frames from the free reserved list. - let first_allocation_attempt = find_specific_chunk(&mut free_reserved_frames_list, start_frame, num_frames); - let (requested_start_frame, requested_num_frames) = match first_allocation_attempt { - Ok(success) => return Ok(success), - Err(alloc_err) => match alloc_err { - AllocationError::AddressNotFound(..) => { - // If allocation failed, then the requested `start_frame` may be found in the general-purpose list - match find_specific_chunk(&mut FREE_GENERAL_FRAMES_LIST.lock(), start_frame, num_frames) { - Ok(result) => return Ok(result), - Err(AllocationError::AddressNotFound(..)) => (start_frame, num_frames), - Err(AllocationError::ContiguousChunkNotFound(..)) => { - // because we are searching the general frames list, it doesn't matter if part of the chunk was found - // since we only create new reserved frames. - trace!("Only part of the requested allocation was found in the general frames list."); - return Err(alloc_err).map_err(From::from); - } - Err(_other) => return Err(alloc_err).map_err(From::from), - } - }, - AllocationError::ContiguousChunkNotFound(f, numf) => (f, numf), - _ => return Err(alloc_err).map_err(From::from), + if let Ok(success) = find_specific_chunk(&mut free_reserved_frames_list, start_frame, num_frames) { + Ok(success) + } else { + // If allocation failed, then the requested `start_frame` may be found in the general-purpose list + // or may represent a new, previously-unknown reserved region that we must add. + // We first attempt to allocate it from the general-purpose free regions. + if let Ok(result) = find_specific_chunk(&mut FREE_GENERAL_FRAMES_LIST.lock(), start_frame, num_frames) { + Ok(result) + } + // If we failed to allocate the requested frames from the general list, + // we can add a new reserved region containing them, + // but ONLY if those frames are *NOT* in the general-purpose region. + else if { + let g = GENERAL_REGIONS.lock(); + !frame_is_in_list(&g, &start_frame) && !frame_is_in_list(&g, &end_frame) + } { + let frames = FrameRange::new(start_frame, end_frame); + let new_reserved_frames = add_reserved_region_to_region_list(&mut RESERVED_REGIONS.lock(), frames)?; + // If we successfully added a new reserved region, + // then add those frames to the actual list of *available* reserved regions. + let _new_free_reserved_frames = add_reserved_region(&mut free_reserved_frames_list, new_reserved_frames.clone())?; + assert_eq!(new_reserved_frames, _new_free_reserved_frames); + find_specific_chunk(&mut free_reserved_frames_list, start_frame, num_frames) + } + else { + Err(AllocationError::AddressNotFree(start_frame, num_frames)) } - }; - - // If we failed to allocate the requested frames from the general list, - // we can add a new reserved region containing them, - // but ONLY if those frames are *NOT* in the general-purpose region. - let requested_frames = FrameRange::new(requested_start_frame, requested_start_frame + (requested_num_frames - 1)); - if !contains_any(&GENERAL_REGIONS.lock(), &requested_frames) { - let new_reserved_frames = add_reserved_region(&mut RESERVED_REGIONS.lock(), requested_frames)?; - // If we successfully added a new reserved region, - // then add those frames to the actual list of *available* reserved regions. - let _new_free_reserved_frames = add_reserved_region(&mut free_reserved_frames_list, new_reserved_frames.clone())?; - assert_eq!(new_reserved_frames, _new_free_reserved_frames); - find_specific_chunk(&mut free_reserved_frames_list, start_frame, num_frames) - } - else { - Err(AllocationError::AddressNotFree(start_frame, num_frames)) } } else { find_any_chunk(&mut FREE_GENERAL_FRAMES_LIST.lock(), num_frames) @@ -1065,6 +858,7 @@ pub fn allocate_frames_at(paddr: PhysicalAddress, num_frames: usize) -> Result Region { + Region { + typ: MemoryRegionType::Unknown, + frames: FrameRange::empty(), + } + } +} + +impl Deref for Region { + type Target = FrameRange; + fn deref(&self) -> &FrameRange { + &self.frames + } +} +impl Ord for Region { + fn cmp(&self, other: &Self) -> Ordering { + self.frames.start().cmp(other.frames.start()) + } +} +impl PartialOrd for Region { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} +impl PartialEq for Region { + fn eq(&self, other: &Self) -> bool { + self.frames.start() == other.frames.start() + } +} +impl Borrow for &'_ Region { + fn borrow(&self) -> &Frame { + self.frames.start() + } +} \ No newline at end of file diff --git a/kernel/frame_allocator/src/static_array_rb_tree.rs b/kernel/frame_allocator/src/static_array_rb_tree.rs index 36482dbc70..fa9fb638f1 100644 --- a/kernel/frame_allocator/src/static_array_rb_tree.rs +++ b/kernel/frame_allocator/src/static_array_rb_tree.rs @@ -42,6 +42,10 @@ impl Wrapper { inner: value, }) } + + pub(crate) fn into_inner(self) -> T { + self.inner + } } diff --git a/kernel/frame_allocator/src/trusted_chunk_shim.rs b/kernel/frame_allocator/src/trusted_chunk_shim.rs new file mode 100644 index 0000000000..d5f8d008e8 --- /dev/null +++ b/kernel/frame_allocator/src/trusted_chunk_shim.rs @@ -0,0 +1,233 @@ +//! A trusted wrapper over the verified Chunk. +//! Needed because verification fails on a trusted chunk that stores a FrameRange or RangeInclusive, +//! but succeeds with RangeInclusive. +//! +//! We should be able to remove this module and work directly with the verified crate in the foreseeable future. +//! All this model should do is amke sure that the start and end of the stored `frames` is equal to the start and end of the `verified_chunk` + +use alloc::collections::btree_map::Range; +use kernel_config::memory::PAGE_SIZE; +use memory_structs::{FrameRange, Frame, PhysicalAddress}; +use range_inclusive::RangeInclusive; +use crate::{MemoryRegionType, AllocatedFrames, MIN_FRAME, MAX_FRAME}; +use core::{borrow::Borrow, cmp::{Ordering, min, max}, fmt, ops::{Deref, DerefMut}}; +use spin::{Once, Mutex}; +use trusted_chunk::{ + trusted_chunk::*, + linked_list::List, + static_array::StaticArray, +}; + +static CHUNK_ALLOCATOR: Mutex = Mutex::new(TrustedChunkAllocator::new()); + +pub(crate) fn switch_chunk_allocator_to_heap_structure() { + let _ = CHUNK_ALLOCATOR.lock().switch_to_heap_allocated(); +} + +#[derive(Debug, Eq)] +pub struct Chunk { + /// The type of this memory chunk, e.g., whether it's in a free or reserved region. + typ: MemoryRegionType, + /// The Frames covered by this chunk, an inclusive range. + frames: FrameRange, + /// The actual verified chunk + verified_chunk: TrustedChunk +} + +assert_not_impl_any!(Chunk: DerefMut, Clone); + +impl Chunk { + pub(crate) fn new(typ: MemoryRegionType, frames: FrameRange) -> Result { + let verified_chunk = CHUNK_ALLOCATOR.lock().create_chunk(frames.to_range_inclusive()) + .map(|(chunk, _)| chunk) + .map_err(|chunk_error|{ + match chunk_error { + ChunkCreationError::Overlap(idx) => "Failed to create a verified chunk due to an overlap", + ChunkCreationError::NoSpace => "Before the heap is initialized, requested more chunks than there is space for (64)", + ChunkCreationError::InvalidRange => "Could not create a chunk for an empty range, use the empty() function" + } + })?; + + Ok(Chunk { + typ, + frames, + verified_chunk + }) + } + + pub(crate) fn from_trusted_chunk(verified_chunk: TrustedChunk, frames: FrameRange, typ: MemoryRegionType) -> Chunk { + Chunk { + typ, + frames, + verified_chunk + } + } + + pub(crate) fn frames(&self) -> FrameRange { + self.frames.clone() + } + + pub(crate) fn typ(&self) -> MemoryRegionType { + self.typ + } + + pub(crate) fn as_allocated_frames(self) -> AllocatedFrames { + AllocatedFrames { + frames: self, + } + } + + /// Returns a new `Chunk` with an empty range of frames. + pub(crate) const fn empty() -> Chunk { + Chunk { + typ: MemoryRegionType::Unknown, + frames: FrameRange::empty(), + verified_chunk: TrustedChunk::empty() + } + } + + pub(crate) fn merge(&mut self, mut other: Chunk) -> Result<(), Chunk> { + if self.is_empty() || other.is_empty() { + return Err(other); + } + + // take out the TrustedChunk from other + let other_verified_chunk = core::mem::replace(&mut other.verified_chunk, TrustedChunk::empty()); + + // merged the other TrustedChunk with self + // failure here means that the chunks cannot be merged + self.verified_chunk.merge(other_verified_chunk) + .map_err(|vchunk| { + let _ = core::mem::replace(&mut other.verified_chunk, vchunk); + other + })?; + + // use the newly merged TrustedChunk to update the frame range + self.frames = into_frame_range(&self.verified_chunk.frames()); + + Ok(()) + } + + /// An inner function that breaks up the given chunk into multiple smaller chunks. + /// + /// Returns a tuple of three chunks: + /// 1. The `Chunk` containing the requested range of frames starting at `start_frame`. + /// 2. The range of frames in the `self` that came before the beginning of the requested frame range. + /// 3. The range of frames in the `self` that came after the end of the requested frame range. + pub fn split( + mut self, + start_frame: Frame, + num_frames: usize, + ) -> (Chunk, Option, Option) { + if self.is_empty() { + return (self, None, None); + } + + // take out the TrustedChunk + let verified_chunk = core::mem::replace(&mut self.verified_chunk, TrustedChunk::empty()); + + let (before, new_allocation, after) = match verified_chunk.split(start_frame.number(), num_frames) { + Ok(x) => x, + Err(vchunk) => { + let _ = core::mem::replace(&mut self.verified_chunk, vchunk); + return (self, None, None); + } + }; + + (Chunk { + typ: self.typ, + frames: into_frame_range(&new_allocation.frames()), + verified_chunk: new_allocation + }, + before.and_then(|vchunk| + Some(Chunk{ + typ: self.typ, + frames: into_frame_range(&vchunk.frames()), + verified_chunk: vchunk + }) + ), + after.and_then(|vchunk| + Some(Chunk{ + typ: self.typ, + frames: into_frame_range(&vchunk.frames()), + verified_chunk: vchunk + }) + )) + } + + pub fn split_at(mut self, at_frame: Frame) -> Result<(Chunk, Chunk), Chunk> { + if self.is_empty() { + return Err(self); + } + let typ = self.typ; + + // take out the TrustedChunk + let verified_chunk = core::mem::replace(&mut self.verified_chunk, TrustedChunk::empty()); + + let (first, second) = verified_chunk.split_at(at_frame.number()) + .map_err(|vchunk| { + let _ = core::mem::replace(&mut self.verified_chunk, vchunk); + self + })?; + + Ok((Chunk { + typ, + frames: into_frame_range(&first.frames()), + verified_chunk: first + }, + Chunk { + typ, + frames: into_frame_range(&second.frames()), + verified_chunk: second + })) + } +} + +impl Deref for Chunk { + type Target = FrameRange; + fn deref(&self) -> &FrameRange { + &self.frames + } +} +impl Ord for Chunk { + fn cmp(&self, other: &Self) -> Ordering { + self.frames.start().cmp(other.frames.start()) + } +} +impl PartialOrd for Chunk { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} +impl PartialEq for Chunk { + fn eq(&self, other: &Self) -> bool { + self.frames.start() == other.frames.start() + } +} +impl Borrow for &'_ Chunk { + fn borrow(&self) -> &Frame { + self.frames.start() + } +} + + +fn into_frame_range(frames: &RangeInclusive) -> FrameRange { + let start = FrameNum{ frame: *frames.start() }.into_frame() + .expect("Verified chunk start was not a valid frame"); + + let end = FrameNum{ frame: *frames.end() }.into_frame() + .expect("Verified chunk end was not a valid frame"); + + FrameRange::new(start, end) +} + +struct FrameNum { + frame: usize +} + +impl FrameNum { + fn into_frame(&self) -> Option { + PhysicalAddress::new(self.frame * PAGE_SIZE) + .and_then(|addr| Some(Frame::containing_address(addr))) + } +} \ No newline at end of file diff --git a/kernel/mem_into_fns/Cargo.toml b/kernel/mem_into_fns/Cargo.toml new file mode 100644 index 0000000000..9ee373ddd4 --- /dev/null +++ b/kernel/mem_into_fns/Cargo.toml @@ -0,0 +1,28 @@ +[package] +authors = ["Ramla Ijaz "] +name = "mem_into_fns" +description = "trusted callbacks to recreate memory ranges from an unmapped PTE" +version = "0.1.0" + +[dependencies] +log = "0.4.8" +spin = "0.9.4" +core2 = { version = "0.4.0", default-features = false, features = ["alloc", "nightly"] } +x86_64 = "0.14.8" +range_inclusive = {git = "https://github.com/Ramla-I/range_inclusive"} + +[dependencies.page_table_entry] +path = "../page_table_entry" + +[dependencies.frame_allocator] +path = "../frame_allocator" + +[dependencies.trusted_chunk] +path = "../../libs/trusted_chunk" + +[dependencies.memory_structs] +path = "../memory_structs" + + +[lib] +crate-type = ["rlib"] \ No newline at end of file diff --git a/kernel/mem_into_fns/src/lib.rs b/kernel/mem_into_fns/src/lib.rs new file mode 100644 index 0000000000..312f638faa --- /dev/null +++ b/kernel/mem_into_fns/src/lib.rs @@ -0,0 +1,66 @@ +//! A full serial driver with more advanced I/O support, e.g., interrupt-based data receival. +//! +//! This crate builds on [`serial_port_basic`], which provides the lower-level types +//! and functions that enable simple interactions with serial ports. +//! This crate extends that functionality to provide interrupt handlers for receiving data +//! and handling data access in a deferred, asynchronous manner. +//! It also implements additional higher-level I/O traits for serial ports, +//! namely [`core2::io::Read`] and [`core2::io::Write`]. +//! +//! # Notes +//! Typically, drivers do not need to be designed in this split manner. +//! However, the serial port is the very earliest device to be initialized and used +//! in Theseus, as it acts as the backend output stream for Theseus's logger. + +#![no_std] +extern crate page_table_entry; +extern crate frame_allocator; +extern crate trusted_chunk; +extern crate memory_structs; +extern crate spin; +extern crate range_inclusive; + +use core::ops::{Deref}; +use page_table_entry::UnmappedFrames; +use frame_allocator::AllocatedFrames; +use trusted_chunk::trusted_chunk::TrustedChunk; +use memory_structs::FrameRange; +use spin::Once; +use range_inclusive::RangeInclusive; + +/// This is a private callback used to convert `UnmappedFrames` into `AllocatedFrames`. +/// +/// This exists to break the cyclic dependency cycle between `page_table_entry` and +/// `frame_allocator`, which depend on each other as such: +/// * `frame_allocator` needs to `impl Into for UnmappedFrames` +/// in order to allow unmapped exclusive frames to be safely deallocated +/// * `page_table_entry` needs to use the `AllocatedFrames` type in order to allow +/// page table entry values to be set safely to a real physical frame that is owned and exists. +/// +/// To get around that, the `frame_allocator::init()` function returns a callback +/// to its function that allows converting a range of unmapped frames back into `AllocatedFrames`, +/// which then allows them to be dropped and thus deallocated. +/// +/// This is safe because the frame allocator can only be initialized once, and also because +/// only this crate has access to that function callback and can thus guarantee +/// that it is only invoked for `UnmappedFrames`. +static INTO_ALLOCATED_FRAMES_FUNC: Once AllocatedFrames> = Once::new(); +static INTO_TRUSTED_CHUNK_FUNC: Once) -> TrustedChunk> = Once::new(); + + +pub fn init(into_trusted_chunk_fn: fn(RangeInclusive) -> TrustedChunk, into_alloc_frames_fn: fn(TrustedChunk, FrameRange) -> AllocatedFrames) { + INTO_TRUSTED_CHUNK_FUNC.call_once(|| into_trusted_chunk_fn); + INTO_ALLOCATED_FRAMES_FUNC.call_once(|| into_alloc_frames_fn); +} + +pub fn from_unmapped(unmapped_frames: UnmappedFrames) -> Result { + let frames = unmapped_frames.deref().clone(); + let tc = INTO_TRUSTED_CHUNK_FUNC.get() + .ok_or("BUG: Mapper::unmap(): the `INTO_TRUSTED_CHUNK_FUNC` callback was not initialized") + .map(|into_func| into_func(unmapped_frames.deref().to_range_inclusive()))?; + + INTO_ALLOCATED_FRAMES_FUNC.get() + .ok_or("BUG: Mapper::unmap(): the `INTO_ALLOCATED_FRAMES_FUNC` callback was not initialized") + .map(|into_func| into_func(tc, frames)) +} + diff --git a/kernel/memory/Cargo.toml b/kernel/memory/Cargo.toml index d40940c9dc..a24c654c61 100644 --- a/kernel/memory/Cargo.toml +++ b/kernel/memory/Cargo.toml @@ -24,6 +24,7 @@ page_allocator = { path = "../page_allocator" } frame_allocator = { path = "../frame_allocator" } no_drop = { path = "../no_drop" } owned_borrowed_trait = { path = "../../libs/owned_borrowed_trait" } +mem_into_fns = { path = "../mem_into_fns" } irq_safety = { git = "https://github.com/theseus-os/irq_safety" } diff --git a/kernel/memory/src/lib.rs b/kernel/memory/src/lib.rs index bf847c75d3..b1a2fc63ac 100644 --- a/kernel/memory/src/lib.rs +++ b/kernel/memory/src/lib.rs @@ -14,6 +14,7 @@ #![feature(ptr_internals)] extern crate alloc; +extern crate mem_into_fns; mod paging; pub use self::paging::{ @@ -251,7 +252,7 @@ pub fn init( reserved_index += 1; } - let into_alloc_frames_fn = frame_allocator::init(free_regions.iter().flatten(), reserved_regions.iter().flatten())?; + let (into_trusted_chunk_fn, into_alloc_frames_fn) = frame_allocator::init(free_regions.iter().flatten(), reserved_regions.iter().flatten())?; debug!("Initialized new frame allocator!"); frame_allocator::dump_frame_allocator_state(); @@ -270,8 +271,10 @@ pub fn init( debug!("Initialized new page allocator!"); page_allocator::dump_page_allocator_state(); + mem_into_fns::init(into_trusted_chunk_fn, into_alloc_frames_fn); + // Initialize paging, which creates a new page table and maps all of the current code/data sections into it. - paging::init(boot_info, kernel_stack_start, into_alloc_frames_fn) + paging::init(boot_info, kernel_stack_start) } /// Finishes initializing the memory management system after the heap is ready. diff --git a/kernel/memory/src/paging/mapper.rs b/kernel/memory/src/paging/mapper.rs index 5387e3aa81..437a130767 100644 --- a/kernel/memory/src/paging/mapper.rs +++ b/kernel/memory/src/paging/mapper.rs @@ -35,24 +35,6 @@ use owned_borrowed_trait::{OwnedOrBorrowed, Owned, Borrowed}; #[cfg(target_arch = "x86_64")] use kernel_config::memory::ENTRIES_PER_PAGE_TABLE; -/// This is a private callback used to convert `UnmappedFrames` into `AllocatedFrames`. -/// -/// This exists to break the cyclic dependency cycle between `page_table_entry` and -/// `frame_allocator`, which depend on each other as such: -/// * `frame_allocator` needs to `impl Into for UnmappedFrames` -/// in order to allow unmapped exclusive frames to be safely deallocated -/// * `page_table_entry` needs to use the `AllocatedFrames` type in order to allow -/// page table entry values to be set safely to a real physical frame that is owned and exists. -/// -/// To get around that, the `frame_allocator::init()` function returns a callback -/// to its function that allows converting a range of unmapped frames back into `AllocatedFrames`, -/// which then allows them to be dropped and thus deallocated. -/// -/// This is safe because the frame allocator can only be initialized once, and also because -/// only this crate has access to that function callback and can thus guarantee -/// that it is only invoked for `UnmappedFrames`. -pub(super) static INTO_ALLOCATED_FRAMES_FUNC: Once AllocatedFrames> = Once::new(); - /// A convenience function to translate the given virtual address into a /// physical address using the currently-active page table. pub fn translate(virtual_address: VirtualAddress) -> Option { @@ -631,9 +613,7 @@ impl MappedPages { // freed from the newly-unmapped P1 PTE entry above. match unmapped_frames { UnmapResult::Exclusive(newly_unmapped_frames) => { - let newly_unmapped_frames = INTO_ALLOCATED_FRAMES_FUNC.get() - .ok_or("BUG: Mapper::unmap(): the `INTO_ALLOCATED_FRAMES_FUNC` callback was not initialized") - .map(|into_func| into_func(newly_unmapped_frames.deref().clone()))?; + let newly_unmapped_frames = mem_into_fns::from_unmapped(newly_unmapped_frames)?; if let Some(mut curr_frames) = current_frame_range.take() { match curr_frames.merge(newly_unmapped_frames) { diff --git a/kernel/memory/src/paging/mod.rs b/kernel/memory/src/paging/mod.rs index 1761c52cb9..61d3df8c84 100644 --- a/kernel/memory/src/paging/mod.rs +++ b/kernel/memory/src/paging/mod.rs @@ -223,12 +223,7 @@ pub fn get_current_p4() -> Frame { pub fn init( boot_info: &impl BootInformation, stack_start_virt: VirtualAddress, - into_alloc_frames_fn: fn(FrameRange) -> AllocatedFrames, ) -> Result { - // Store the callback from `frame_allocator::init()` that allows the `Mapper` to convert - // `page_table_entry::UnmappedFrames` back into `AllocatedFrames`. - mapper::INTO_ALLOCATED_FRAMES_FUNC.call_once(|| into_alloc_frames_fn); - // bootstrap a PageTable from the currently-loaded page table let mut page_table = PageTable::from_current() .map_err(|_| "Failed to allocate frame for initial page table; is it merged with another section?")?; diff --git a/kernel/memory_structs/Cargo.toml b/kernel/memory_structs/Cargo.toml index bf1787139c..317859d922 100644 --- a/kernel/memory_structs/Cargo.toml +++ b/kernel/memory_structs/Cargo.toml @@ -12,6 +12,7 @@ derive_more = "0.99.0" paste = "1.0.5" kernel_config = { path = "../kernel_config" } +range_inclusive = {git = "https://github.com/Ramla-I/range_inclusive"} [lib] crate-type = ["rlib"] diff --git a/kernel/memory_structs/src/lib.rs b/kernel/memory_structs/src/lib.rs index 923ff73590..85838a74bc 100644 --- a/kernel/memory_structs/src/lib.rs +++ b/kernel/memory_structs/src/lib.rs @@ -12,12 +12,13 @@ use core::{ cmp::{min, max}, fmt, iter::Step, - ops::{Add, AddAssign, Deref, DerefMut, RangeInclusive, Sub, SubAssign} + ops::{Add, AddAssign, Deref, DerefMut, Sub, SubAssign} }; use kernel_config::memory::{MAX_PAGE_NUMBER, PAGE_SIZE}; use zerocopy::FromBytes; use paste::paste; use derive_more::*; +use range_inclusive::{RangeInclusive, RangeInclusiveIterator}; /// A macro for defining `VirtualAddress` and `PhysicalAddress` structs /// and implementing their common traits, which are generally identical. @@ -439,6 +440,11 @@ macro_rules! implement_page_frame_range { None } } + + #[doc = "Returns a `RangeInclusive` with the same bounds."] + pub fn to_range_inclusive(&self) -> RangeInclusive { + RangeInclusive::new(self.start().number(), self.end().number()) + } } impl fmt::Debug for $TypeName { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -458,9 +464,9 @@ macro_rules! implement_page_frame_range { } impl IntoIterator for $TypeName { type Item = $chunk; - type IntoIter = RangeInclusive<$chunk>; + type IntoIter = RangeInclusiveIterator<$chunk>; fn into_iter(self) -> Self::IntoIter { - self.0 + self.0.into_iter() } } diff --git a/libs/trusted_chunk b/libs/trusted_chunk index c863f11892..009fa7fc1e 160000 --- a/libs/trusted_chunk +++ b/libs/trusted_chunk @@ -1 +1 @@ -Subproject commit c863f118929ee54034d3b8c7bf971fdd53efdf97 +Subproject commit 009fa7fc1e8554fc0565a72bbdd90d0a14c82d35 From 461d9a4fff130756c29803dd30b27413291d4827 Mon Sep 17 00:00:00 2001 From: ramla-i Date: Tue, 13 Jun 2023 13:42:11 -0400 Subject: [PATCH 03/23] updated trusted chunk submodule --- libs/trusted_chunk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/trusted_chunk b/libs/trusted_chunk index 009fa7fc1e..b1dcda3ed7 160000 --- a/libs/trusted_chunk +++ b/libs/trusted_chunk @@ -1 +1 @@ -Subproject commit 009fa7fc1e8554fc0565a72bbdd90d0a14c82d35 +Subproject commit b1dcda3ed72b14e3a248c5c10f2cb89e3251b56e From bdcebddca4620e63b5a08510406b05a10fa02e50 Mon Sep 17 00:00:00 2001 From: ramla-i Date: Tue, 13 Jun 2023 15:36:06 -0400 Subject: [PATCH 04/23] comments and naming --- Cargo.lock | 32 +++++++++---------- .../src/static_array_rb_tree.rs | 1 + .../Cargo.toml | 2 +- .../src/lib.rs | 15 --------- kernel/memory/Cargo.toml | 2 +- kernel/memory/src/lib.rs | 4 +-- kernel/memory/src/paging/mapper.rs | 2 +- 7 files changed, 22 insertions(+), 36 deletions(-) rename kernel/{mem_into_fns => frame_range_callbacks}/Cargo.toml (95%) rename kernel/{mem_into_fns => frame_range_callbacks}/src/lib.rs (75%) diff --git a/Cargo.lock b/Cargo.lock index dda0c89bc7..8793286bc4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1192,6 +1192,21 @@ dependencies = [ "trusted_chunk", ] +[[package]] +name = "frame_range_callbacks" +version = "0.1.0" +dependencies = [ + "core2", + "frame_allocator", + "log", + "memory_structs", + "page_table_entry", + "range_inclusive", + "spin 0.9.4", + "trusted_chunk", + "x86_64", +] + [[package]] name = "framebuffer" version = "0.1.0" @@ -1920,21 +1935,6 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ca88d725a0a943b096803bd34e73a4437208b6077654cc4ecb2947a5f91618d" -[[package]] -name = "mem_into_fns" -version = "0.1.0" -dependencies = [ - "core2", - "frame_allocator", - "log", - "memory_structs", - "page_table_entry", - "range_inclusive", - "spin 0.9.4", - "trusted_chunk", - "x86_64", -] - [[package]] name = "memchr" version = "2.4.1" @@ -1989,11 +1989,11 @@ dependencies = [ "bitflags", "boot_info", "frame_allocator", + "frame_range_callbacks", "irq_safety", "kernel_config", "lazy_static", "log", - "mem_into_fns", "memory_aarch64", "memory_structs", "memory_x86_64", diff --git a/kernel/frame_allocator/src/static_array_rb_tree.rs b/kernel/frame_allocator/src/static_array_rb_tree.rs index fa9fb638f1..0f9858db77 100644 --- a/kernel/frame_allocator/src/static_array_rb_tree.rs +++ b/kernel/frame_allocator/src/static_array_rb_tree.rs @@ -43,6 +43,7 @@ impl Wrapper { }) } + /// Returns the inner value, consuming this wrapper. pub(crate) fn into_inner(self) -> T { self.inner } diff --git a/kernel/mem_into_fns/Cargo.toml b/kernel/frame_range_callbacks/Cargo.toml similarity index 95% rename from kernel/mem_into_fns/Cargo.toml rename to kernel/frame_range_callbacks/Cargo.toml index 9ee373ddd4..f0437a452f 100644 --- a/kernel/mem_into_fns/Cargo.toml +++ b/kernel/frame_range_callbacks/Cargo.toml @@ -1,6 +1,6 @@ [package] authors = ["Ramla Ijaz "] -name = "mem_into_fns" +name = "frame_range_callbacks" description = "trusted callbacks to recreate memory ranges from an unmapped PTE" version = "0.1.0" diff --git a/kernel/mem_into_fns/src/lib.rs b/kernel/frame_range_callbacks/src/lib.rs similarity index 75% rename from kernel/mem_into_fns/src/lib.rs rename to kernel/frame_range_callbacks/src/lib.rs index 312f638faa..995c7cfba6 100644 --- a/kernel/mem_into_fns/src/lib.rs +++ b/kernel/frame_range_callbacks/src/lib.rs @@ -1,17 +1,3 @@ -//! A full serial driver with more advanced I/O support, e.g., interrupt-based data receival. -//! -//! This crate builds on [`serial_port_basic`], which provides the lower-level types -//! and functions that enable simple interactions with serial ports. -//! This crate extends that functionality to provide interrupt handlers for receiving data -//! and handling data access in a deferred, asynchronous manner. -//! It also implements additional higher-level I/O traits for serial ports, -//! namely [`core2::io::Read`] and [`core2::io::Write`]. -//! -//! # Notes -//! Typically, drivers do not need to be designed in this split manner. -//! However, the serial port is the very earliest device to be initialized and used -//! in Theseus, as it acts as the backend output stream for Theseus's logger. - #![no_std] extern crate page_table_entry; extern crate frame_allocator; @@ -47,7 +33,6 @@ use range_inclusive::RangeInclusive; static INTO_ALLOCATED_FRAMES_FUNC: Once AllocatedFrames> = Once::new(); static INTO_TRUSTED_CHUNK_FUNC: Once) -> TrustedChunk> = Once::new(); - pub fn init(into_trusted_chunk_fn: fn(RangeInclusive) -> TrustedChunk, into_alloc_frames_fn: fn(TrustedChunk, FrameRange) -> AllocatedFrames) { INTO_TRUSTED_CHUNK_FUNC.call_once(|| into_trusted_chunk_fn); INTO_ALLOCATED_FRAMES_FUNC.call_once(|| into_alloc_frames_fn); diff --git a/kernel/memory/Cargo.toml b/kernel/memory/Cargo.toml index a24c654c61..08efb803c3 100644 --- a/kernel/memory/Cargo.toml +++ b/kernel/memory/Cargo.toml @@ -24,7 +24,7 @@ page_allocator = { path = "../page_allocator" } frame_allocator = { path = "../frame_allocator" } no_drop = { path = "../no_drop" } owned_borrowed_trait = { path = "../../libs/owned_borrowed_trait" } -mem_into_fns = { path = "../mem_into_fns" } +frame_range_callbacks = { path = "../frame_range_callbacks" } irq_safety = { git = "https://github.com/theseus-os/irq_safety" } diff --git a/kernel/memory/src/lib.rs b/kernel/memory/src/lib.rs index b1a2fc63ac..fae1fc80e5 100644 --- a/kernel/memory/src/lib.rs +++ b/kernel/memory/src/lib.rs @@ -14,7 +14,7 @@ #![feature(ptr_internals)] extern crate alloc; -extern crate mem_into_fns; +extern crate frame_range_callbacks; mod paging; pub use self::paging::{ @@ -271,7 +271,7 @@ pub fn init( debug!("Initialized new page allocator!"); page_allocator::dump_page_allocator_state(); - mem_into_fns::init(into_trusted_chunk_fn, into_alloc_frames_fn); + frame_range_callbacks::init(into_trusted_chunk_fn, into_alloc_frames_fn); // Initialize paging, which creates a new page table and maps all of the current code/data sections into it. paging::init(boot_info, kernel_stack_start) diff --git a/kernel/memory/src/paging/mapper.rs b/kernel/memory/src/paging/mapper.rs index 437a130767..2e5972bf7b 100644 --- a/kernel/memory/src/paging/mapper.rs +++ b/kernel/memory/src/paging/mapper.rs @@ -613,7 +613,7 @@ impl MappedPages { // freed from the newly-unmapped P1 PTE entry above. match unmapped_frames { UnmapResult::Exclusive(newly_unmapped_frames) => { - let newly_unmapped_frames = mem_into_fns::from_unmapped(newly_unmapped_frames)?; + let newly_unmapped_frames = frame_range_callbacks::from_unmapped(newly_unmapped_frames)?; if let Some(mut curr_frames) = current_frame_range.take() { match curr_frames.merge(newly_unmapped_frames) { From e0ca2c942b68364c7c2f85e7ec506cd96162ab7f Mon Sep 17 00:00:00 2001 From: ramla-i Date: Tue, 13 Jun 2023 15:49:08 -0400 Subject: [PATCH 05/23] comments --- kernel/frame_range_callbacks/src/lib.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/kernel/frame_range_callbacks/src/lib.rs b/kernel/frame_range_callbacks/src/lib.rs index 995c7cfba6..bb45e5a3be 100644 --- a/kernel/frame_range_callbacks/src/lib.rs +++ b/kernel/frame_range_callbacks/src/lib.rs @@ -1,4 +1,10 @@ #![no_std] +//! This crate contains callbacks to create `TrustedChunk` objects and then `AllocatedFrames` objects from an `UnmappedFrames`. +//! It's required to avoid a cyclic dependency between the `frame_allocator` and `page_table_entry` crates. +//! +//! The public `from_unmapped()` function ensures that an `UnmappedFrames` object has to be consumed to run the callbacks, +/// making sure that it can only be called when a PTE has been unmapped. + extern crate page_table_entry; extern crate frame_allocator; extern crate trusted_chunk; @@ -14,6 +20,14 @@ use memory_structs::FrameRange; use spin::Once; use range_inclusive::RangeInclusive; +/// This is a private callback used to convert `UnmappedFrames` into a `TrustedChunk`. +/// The `TrustedChunk` is then used to create an `AllocatedFrames`. +/// +/// This is safe because the init function in the `trusted_chunk` crate returns this callback only once, +/// and only this crate has access to the callback. The callback function has been verified with the +/// invariant that the new `TrustedChunk` has the same bounds as the range passed as an argument. +static INTO_TRUSTED_CHUNK_FUNC: Once) -> TrustedChunk> = Once::new(); + /// This is a private callback used to convert `UnmappedFrames` into `AllocatedFrames`. /// /// This exists to break the cyclic dependency cycle between `page_table_entry` and @@ -31,7 +45,6 @@ use range_inclusive::RangeInclusive; /// only this crate has access to that function callback and can thus guarantee /// that it is only invoked for `UnmappedFrames`. static INTO_ALLOCATED_FRAMES_FUNC: Once AllocatedFrames> = Once::new(); -static INTO_TRUSTED_CHUNK_FUNC: Once) -> TrustedChunk> = Once::new(); pub fn init(into_trusted_chunk_fn: fn(RangeInclusive) -> TrustedChunk, into_alloc_frames_fn: fn(TrustedChunk, FrameRange) -> AllocatedFrames) { INTO_TRUSTED_CHUNK_FUNC.call_once(|| into_trusted_chunk_fn); From 3378834dbf308a4bde2484a55a84ddf0f4b7749b Mon Sep 17 00:00:00 2001 From: ramla-i Date: Tue, 13 Jun 2023 16:09:39 -0400 Subject: [PATCH 06/23] removed unnecessary check in frame allocator init --- kernel/frame_allocator/src/lib.rs | 32 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/kernel/frame_allocator/src/lib.rs b/kernel/frame_allocator/src/lib.rs index 6563bd0e91..7f938afa20 100644 --- a/kernel/frame_allocator/src/lib.rs +++ b/kernel/frame_allocator/src/lib.rs @@ -157,20 +157,20 @@ pub fn init( reserved_list = temp_reserved_list; } - - // Finally, one last sanity check -- ensure no two regions overlap. - let all_areas = free_list[..free_list_idx].iter().flatten() - .chain(reserved_list.iter().flatten()); - for (i, elem) in all_areas.clone().enumerate() { - let next_idx = i + 1; - for other in all_areas.clone().skip(next_idx) { - if let Some(overlap) = elem.overlap(other) { - panic!("BUG: frame allocator free list had overlapping ranges: \n \t {:?} and {:?} overlap at {:?}", - elem, other, overlap, - ); - } - } - } + // We can remove this sanity check because the following code uses formally verified functions to ensure no two regions overlap. + // // Finally, one last sanity check -- ensure no two regions overlap. + // let all_areas = free_list[..free_list_idx].iter().flatten() + // .chain(reserved_list.iter().flatten()); + // for (i, elem) in all_areas.clone().enumerate() { + // let next_idx = i + 1; + // for other in all_areas.clone().skip(next_idx) { + // if let Some(overlap) = elem.overlap(other) { + // panic!("BUG: frame allocator free list had overlapping ranges: \n \t {:?} and {:?} overlap at {:?}", + // elem, other, overlap, + // ); + // } + // } + // } // Here, since we're sure we now have a list of regions that don't overlap, we can create lists of formally verified Chunks let mut free_list_w_chunks: [Option; 32] = Default::default(); @@ -194,9 +194,7 @@ pub fn init( *GENERAL_REGIONS.lock() = StaticArrayRBTree::new(free_list); *RESERVED_REGIONS.lock() = StaticArrayRBTree::new(reserved_list); - // Register the callback to create a Chunk. - // This function is not formally-verified and we only call it in the code path for UnmappedFrames. - // trusted_chunk_shim::INTO_VERIFIED_CHUNK_FUNC.call_once(|| trusted_chunk::init()); + // Register the callbacks to create a TrustedChunk and AllocatedFrames from an unmapped PTE Ok((trusted_chunk::init()?, into_allocated_frames)) } From 228a169ffb9446faca51321bcc8896a824110c82 Mon Sep 17 00:00:00 2001 From: ramla-i Date: Tue, 13 Jun 2023 17:02:51 -0400 Subject: [PATCH 07/23] allocated frames lib file matches latest theseus main, with only my own changes --- .../frame_allocator/src/allocated_frames.rs | 6 +- kernel/frame_allocator/src/lib.rs | 217 ++++++++++-------- 2 files changed, 123 insertions(+), 100 deletions(-) diff --git a/kernel/frame_allocator/src/allocated_frames.rs b/kernel/frame_allocator/src/allocated_frames.rs index d1225e4068..c7ea60614f 100644 --- a/kernel/frame_allocator/src/allocated_frames.rs +++ b/kernel/frame_allocator/src/allocated_frames.rs @@ -1,4 +1,4 @@ -use crate::{Chunk, MemoryRegionType, frame_is_in_list, FREE_GENERAL_FRAMES_LIST, FREE_RESERVED_FRAMES_LIST, RESERVED_REGIONS}; +use crate::{Chunk, MemoryRegionType, contains_any, FREE_GENERAL_FRAMES_LIST, FREE_RESERVED_FRAMES_LIST, RESERVED_REGIONS}; use memory_structs::{FrameRange, Frame}; use core::{fmt, ops::{Deref, DerefMut}, marker::PhantomData}; use trusted_chunk::trusted_chunk::TrustedChunk; @@ -115,7 +115,7 @@ impl AllocatedFrames { /// the `page_table_entry` crate, since `page_table_entry` must depend on types /// from this crate in order to enforce safety when modifying page table entries. pub(crate) fn into_allocated_frames(tc: TrustedChunk, frames: FrameRange) -> AllocatedFrames { - let typ = if frame_is_in_list(&RESERVED_REGIONS.lock(), frames.start()) { + let typ = if contains_any(&RESERVED_REGIONS.lock(), &frames) { MemoryRegionType::Reserved } else { MemoryRegionType::Free @@ -127,7 +127,7 @@ impl Drop for AllocatedFrames { fn drop(&mut self) { if self.size_in_frames() == 0 { return; } - let (list, typ) = if frame_is_in_list(&RESERVED_REGIONS.lock(), self.start()) { + let (list, typ) = if contains_any(&RESERVED_REGIONS.lock(), &self.frames) { (&FREE_RESERVED_FRAMES_LIST, MemoryRegionType::Reserved) } else { (&FREE_GENERAL_FRAMES_LIST, MemoryRegionType::Free) diff --git a/kernel/frame_allocator/src/lib.rs b/kernel/frame_allocator/src/lib.rs index 7f938afa20..f22e8475be 100644 --- a/kernel/frame_allocator/src/lib.rs +++ b/kernel/frame_allocator/src/lib.rs @@ -356,24 +356,32 @@ impl<'list> Drop for DeferredAllocAction<'list> { } } - /// Possible allocation errors. #[derive(Debug)] enum AllocationError { - /// The requested address was not free: it was already allocated, or is outside the range of this allocator. + /// The requested address was not free: it was already allocated. AddressNotFree(Frame, usize), + /// The requested address was outside the range of this allocator. + AddressNotFound(Frame, usize), /// The address space was full, or there was not a large-enough chunk /// or enough remaining chunks that could satisfy the requested allocation size. OutOfAddressSpace(usize), - /// ToDo: remove - InternalError, + /// The starting address was found, but not all successive contiguous frames were available. + ContiguousChunkNotFound(Frame, usize), + /// Failed to remove a chunk from the free list given a reference to it. + ChunkRemovalFailed, + /// Failed to merge or split a Chunk. + ChunkOperationFailed, } impl From for &'static str { fn from(alloc_err: AllocationError) -> &'static str { match alloc_err { - AllocationError::AddressNotFree(..) => "address was in use or outside of this frame allocator's range", + AllocationError::AddressNotFree(..) => "requested address was in use", + AllocationError::AddressNotFound(..) => "requested address was outside of this frame allocator's range", AllocationError::OutOfAddressSpace(..) => "out of physical address space", - AllocationError::InternalError => "problem with frame allocator logic", + AllocationError::ContiguousChunkNotFound(..) => "only some of the requested frames were available", + AllocationError::ChunkRemovalFailed => "Failed to remove a Chunk from the free list, this is most likely due to some logical error", + AllocationError::ChunkOperationFailed => "A verified chunk function returned an error, this is most likely due to some logical error", } } } @@ -441,20 +449,21 @@ fn find_specific_chunk( } } else { trace!("Frame allocator: couldn't get next chunk above initial too-small {:?}", chunk); - None + trace!("Requesting new chunk starting at {:?}, num_frames: {}", *chunk.end() + 1, requested_end_frame.number() - chunk.end().number()); + return Err(AllocationError::ContiguousChunkNotFound(*chunk.end() + 1, requested_end_frame.number() - chunk.end().number())); } }; if let Some(initial_chunk_ref) = initial_chunk_ref { // remove the first chunk - let initial_chunk = retrieve_chunk_from_ref(initial_chunk_ref).ok_or(AllocationError::InternalError)?; + let initial_chunk = retrieve_chunk_from_ref(initial_chunk_ref).ok_or(AllocationError::ChunkRemovalFailed)?; // now search for the next contiguous chunk, that we already know exists let requested_contiguous_frame = *initial_chunk.end() + 1; let mut cursor_mut = tree.upper_bound_mut(Bound::Included(&requested_contiguous_frame)); if let Some(next_chunk) = cursor_mut.get().map(|w| w.deref()) { if next_chunk.contains(&requested_contiguous_frame) { - // merge the next chunk into the intiial chunk + // merge the next chunk into the initial chunk return adjust_chosen_chunk_contiguous(requested_frame, num_frames, initial_chunk, ValueRefMut::RBTree(cursor_mut)); } else { trace!("This should never fail, since we've already found a contiguous chunk."); @@ -467,7 +476,7 @@ fn find_specific_chunk( } } - Err(AllocationError::AddressNotFree(requested_frame, num_frames)) + Err(AllocationError::AddressNotFound(requested_frame, num_frames)) } @@ -526,11 +535,12 @@ fn retrieve_chunk_from_ref(mut chosen_chunk_ref: ValueRefMut) -> Option c, RemovedValue::RBTree(option_chunk) => { - if let Some(boxed_chunk) = option_chunk { - Some(boxed_chunk.into_inner()) - } else { - None - } + option_chunk.map(|c| c.into_inner()) + // if let Some(boxed_chunk) = option_chunk { + // Some(boxed_chunk.into_inner()) + // } else { + // None + // } } }; chosen_chunk @@ -547,7 +557,7 @@ fn allocate_from_chosen_chunk( mut chosen_chunk_ref: ValueRefMut, ) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { // Remove the chosen chunk from the free frame list. - let chosen_chunk = retrieve_chunk_from_ref(chosen_chunk_ref).ok_or(AllocationError::InternalError)?; + let chosen_chunk = retrieve_chunk_from_ref(chosen_chunk_ref).ok_or(AllocationError::ChunkRemovalFailed)?; let (new_allocation, before, after) = chosen_chunk.split(start_frame, num_frames); @@ -569,12 +579,12 @@ fn adjust_chosen_chunk_contiguous( mut initial_chunk: Chunk, contiguous_chunk_ref: ValueRefMut, ) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { - let mut contiguous_chunk = retrieve_chunk_from_ref(contiguous_chunk_ref).ok_or(AllocationError::InternalError)?; + let mut contiguous_chunk = retrieve_chunk_from_ref(contiguous_chunk_ref).ok_or(AllocationError::ChunkRemovalFailed)?; initial_chunk.merge(contiguous_chunk).map_err(|_| { - trace!("contiguous chuynks couldn't ber merged, despite previous checks"); + trace!("contiguous chunks couldn't be merged, despite previous checks"); //To Do: should we reinsert chunk to list here. - AllocationError::InternalError + AllocationError:: ChunkOperationFailed })?; let (new_allocation, before, after) = initial_chunk.split(start_frame, num_frames); @@ -586,29 +596,35 @@ fn adjust_chosen_chunk_contiguous( )) } -/// Returns whether the given `Frame` is contained within the given `list`. -fn frame_is_in_list( +/// Returns `true` if the given list contains *any* of the given `frames`. +fn contains_any( list: &StaticArrayRBTree, - frame: &Frame, + frames: &FrameRange, ) -> bool { match &list.0 { Inner::Array(ref arr) => { for chunk in arr.iter().flatten() { - if chunk.contains(frame) { + if chunk.overlap(frames).is_some() { return true; } } } - Inner::RBTree(ref tree) => { - let cursor = tree.upper_bound(Bound::Included(frame)); - if let Some(chunk) = cursor.get().map(|w| w.deref()) { - if chunk.contains(frame) { + Inner::RBTree(ref tree) => { + let mut cursor = tree.upper_bound(Bound::Included(frames.start())); + while let Some(chunk) = cursor.get() { + if chunk.start() > frames.end() { + // We're iterating in ascending order over a sorted tree, so we can stop + // looking for overlapping regions once we pass the end of `frames`. + break; + } + + if chunk.overlap(frames).is_some() { return true; } + cursor.move_next(); } } } - false } @@ -621,41 +637,42 @@ fn frame_is_in_list( /// Currently, this function adds no new frames at all if any frames within the given `frames` list /// overlap any existing regions at all. /// TODO: handle partially-overlapping regions by extending existing regions on either end. -fn add_reserved_region( +fn add_reserved_region_to_chunk_list( list: &mut StaticArrayRBTree, frames: FrameRange, ) -> Result { - - // Check whether the reserved region overlaps any existing regions. - match &mut list.0 { - Inner::Array(ref mut arr) => { - for chunk in arr.iter().flatten() { - if let Some(_overlap) = chunk.overlap(&frames) { - // trace!("Failed to add reserved region {:?} due to overlap {:?} with existing chunk {:?}", - // frames, _overlap, chunk - // ); - return Err("Failed to add reserved region that overlapped with existing reserved regions (array)."); - } - } - } - Inner::RBTree(ref mut tree) => { - let mut cursor_mut = tree.upper_bound_mut(Bound::Included(frames.start())); - while let Some(chunk) = cursor_mut.get().map(|w| w.deref()) { - if chunk.start() > frames.end() { - // We're iterating in ascending order over a sorted tree, - // so we can stop looking for overlapping regions once we pass the end of the new frames to add. - break; - } - if let Some(_overlap) = chunk.overlap(&frames) { - // trace!("Failed to add reserved region {:?} due to overlap {:?} with existing chunk {:?}", - // frames, _overlap, chunk - // ); - return Err("Failed to add reserved region that overlapped with existing reserved regions (RBTree)."); - } - cursor_mut.move_next(); - } - } - } + // We can remove this check because creating a Chunk will check for overlaps + + // // Check whether the reserved region overlaps any existing regions. + // match &mut list.0 { + // Inner::Array(ref mut arr) => { + // for chunk in arr.iter().flatten() { + // if let Some(_overlap) = chunk.overlap(&frames) { + // // trace!("Failed to add reserved region {:?} due to overlap {:?} with existing chunk {:?}", + // // frames, _overlap, chunk + // // ); + // return Err("Failed to add reserved region that overlapped with existing reserved regions (array)."); + // } + // } + // } + // Inner::RBTree(ref mut tree) => { + // let mut cursor_mut = tree.upper_bound_mut(Bound::Included(frames.start())); + // while let Some(chunk) = cursor_mut.get().map(|w| w.deref()) { + // if chunk.start() > frames.end() { + // // We're iterating in ascending order over a sorted tree, + // // so we can stop looking for overlapping regions once we pass the end of the new frames to add. + // break; + // } + // if let Some(_overlap) = chunk.overlap(&frames) { + // // trace!("Failed to add reserved region {:?} due to overlap {:?} with existing chunk {:?}", + // // frames, _overlap, chunk + // // ); + // return Err("Failed to add reserved region that overlapped with existing reserved regions (RBTree)."); + // } + // cursor_mut.move_next(); + // } + // } + // } list.insert(Chunk::new( MemoryRegionType::Reserved, @@ -682,14 +699,12 @@ fn add_reserved_region_to_region_list( // Check whether the reserved region overlaps any existing regions. match &mut list.0 { Inner::Array(ref mut arr) => { - for elem in arr.iter() { - if let Some(chunk) = elem { - if let Some(_overlap) = chunk.overlap(&frames) { - // trace!("Failed to add reserved region {:?} due to overlap {:?} with existing chunk {:?}", - // frames, _overlap, chunk - // ); - return Err("Failed to add reserved region that overlapped with existing reserved regions (array)."); - } + for chunk in arr.iter().flatten() { + if let Some(_overlap) = chunk.overlap(&frames) { + // trace!("Failed to add reserved region {:?} due to overlap {:?} with existing chunk {:?}", + // frames, _overlap, chunk + // ); + return Err("Failed to add reserved region that overlapped with existing reserved regions (array)."); } } } @@ -754,36 +769,44 @@ pub fn allocate_frames_deferred( if let Some(paddr) = requested_paddr { let start_frame = Frame::containing_address(paddr); - let end_frame = start_frame + (num_frames - 1); - // Try to allocate the frames at the specific address. let mut free_reserved_frames_list = FREE_RESERVED_FRAMES_LIST.lock(); - if let Ok(success) = find_specific_chunk(&mut free_reserved_frames_list, start_frame, num_frames) { - Ok(success) - } else { - // If allocation failed, then the requested `start_frame` may be found in the general-purpose list - // or may represent a new, previously-unknown reserved region that we must add. - // We first attempt to allocate it from the general-purpose free regions. - if let Ok(result) = find_specific_chunk(&mut FREE_GENERAL_FRAMES_LIST.lock(), start_frame, num_frames) { - Ok(result) - } - // If we failed to allocate the requested frames from the general list, - // we can add a new reserved region containing them, - // but ONLY if those frames are *NOT* in the general-purpose region. - else if { - let g = GENERAL_REGIONS.lock(); - !frame_is_in_list(&g, &start_frame) && !frame_is_in_list(&g, &end_frame) - } { - let frames = FrameRange::new(start_frame, end_frame); - let new_reserved_frames = add_reserved_region_to_region_list(&mut RESERVED_REGIONS.lock(), frames)?; - // If we successfully added a new reserved region, - // then add those frames to the actual list of *available* reserved regions. - let _new_free_reserved_frames = add_reserved_region(&mut free_reserved_frames_list, new_reserved_frames.clone())?; - assert_eq!(new_reserved_frames, _new_free_reserved_frames); - find_specific_chunk(&mut free_reserved_frames_list, start_frame, num_frames) - } - else { - Err(AllocationError::AddressNotFree(start_frame, num_frames)) + // First, attempt to allocate the requested frames from the free reserved list. + let first_allocation_attempt = find_specific_chunk(&mut free_reserved_frames_list, start_frame, num_frames); + let (requested_start_frame, requested_num_frames) = match first_allocation_attempt { + Ok(success) => return Ok(success), + Err(alloc_err) => match alloc_err { + AllocationError::AddressNotFound(..) => { + // If allocation failed, then the requested `start_frame` may be found in the general-purpose list + match find_specific_chunk(&mut FREE_GENERAL_FRAMES_LIST.lock(), start_frame, num_frames) { + Ok(result) => return Ok(result), + Err(AllocationError::AddressNotFound(..)) => (start_frame, num_frames), + Err(AllocationError::ContiguousChunkNotFound(..)) => { + // because we are searching the general frames list, it doesn't matter if part of the chunk was found + // since we only create new reserved frames. + trace!("Only part of the requested allocation was found in the general frames list."); + return Err(alloc_err).map_err(From::from); + } + Err(_other) => return Err(alloc_err).map_err(From::from), + } + }, + AllocationError::ContiguousChunkNotFound(f, numf) => (f, numf), + _ => return Err(alloc_err).map_err(From::from), } + }; + + // If we failed to allocate the requested frames from the general list, + // we can add a new reserved region containing them, + // but ONLY if those frames are *NOT* in the general-purpose region. + let requested_frames = FrameRange::new(requested_start_frame, requested_start_frame + (requested_num_frames - 1)); + if !contains_any(&GENERAL_REGIONS.lock(), &requested_frames) { + // If we successfully create a new Chunk with verified functions, then add a new reserved region + let new_free_reserved_frames = add_reserved_region_to_chunk_list(&mut free_reserved_frames_list, requested_frames)?; + let _new_reserved_frames = add_reserved_region_to_region_list(&mut RESERVED_REGIONS.lock(), new_free_reserved_frames.clone())?; + assert_eq!(_new_reserved_frames, new_free_reserved_frames); + find_specific_chunk(&mut free_reserved_frames_list, start_frame, num_frames) + } + else { + Err(AllocationError::AddressNotFree(start_frame, num_frames)) } } else { find_any_chunk(&mut FREE_GENERAL_FRAMES_LIST.lock(), num_frames) @@ -878,4 +901,4 @@ pub fn dump_frame_allocator_state() { debug!("------------------ RESERVED REGIONS -----------------"); RESERVED_REGIONS.lock().iter().for_each(|e| debug!("\t {:?}", e) ); debug!("-----------------------------------------------------"); -} \ No newline at end of file +} From 0d5dbb5aebd42e577048ec61b6a53e48b745e22a Mon Sep 17 00:00:00 2001 From: Ramla Ijaz Date: Tue, 13 Jun 2023 20:58:22 -0400 Subject: [PATCH 08/23] chunk, region and AF match latest main code, removed warnings --- .../frame_allocator/src/allocated_frames.rs | 6 ++-- kernel/frame_allocator/src/lib.rs | 18 ++++++------ kernel/frame_allocator/src/region.rs | 3 +- .../frame_allocator/src/trusted_chunk_shim.rs | 28 +++++++++---------- kernel/memory/src/paging/mapper.rs | 3 +- kernel/memory/src/paging/mod.rs | 2 +- 6 files changed, 30 insertions(+), 30 deletions(-) diff --git a/kernel/frame_allocator/src/allocated_frames.rs b/kernel/frame_allocator/src/allocated_frames.rs index c7ea60614f..23a94b3ab9 100644 --- a/kernel/frame_allocator/src/allocated_frames.rs +++ b/kernel/frame_allocator/src/allocated_frames.rs @@ -50,7 +50,7 @@ impl AllocatedFrames { /// If either of those conditions are met, `self` is modified and `Ok(())` is returned, /// otherwise `Err(other)` is returned. pub fn merge(&mut self, mut other: AllocatedFrames) -> Result<(), AllocatedFrames> { - let mut chunk = core::mem::replace(&mut other.frames, Chunk::empty()); + let chunk = core::mem::replace(&mut other.frames, Chunk::empty()); match self.frames.merge(chunk) { Ok(_) => { // ensure the now-merged AllocatedFrames doesn't run its drop handler and free its frames. @@ -77,7 +77,7 @@ impl AllocatedFrames { /// /// [`core::slice::split_at()`]: https://doc.rust-lang.org/core/primitive.slice.html#method.split_at pub fn split(mut self, at_frame: Frame) -> Result<(AllocatedFrames, AllocatedFrames), AllocatedFrames> { - let mut chunk = core::mem::replace(&mut self.frames, Chunk::empty()); + let chunk = core::mem::replace(&mut self.frames, Chunk::empty()); match chunk.split_at(at_frame) { Ok((chunk1, chunk2)) => { // ensure the now-merged AllocatedFrames doesn't run its drop handler and free its frames. @@ -127,7 +127,7 @@ impl Drop for AllocatedFrames { fn drop(&mut self) { if self.size_in_frames() == 0 { return; } - let (list, typ) = if contains_any(&RESERVED_REGIONS.lock(), &self.frames) { + let (list, _typ) = if contains_any(&RESERVED_REGIONS.lock(), &self.frames) { (&FREE_RESERVED_FRAMES_LIST, MemoryRegionType::Reserved) } else { (&FREE_GENERAL_FRAMES_LIST, MemoryRegionType::Free) diff --git a/kernel/frame_allocator/src/lib.rs b/kernel/frame_allocator/src/lib.rs index f22e8475be..07b90e0e62 100644 --- a/kernel/frame_allocator/src/lib.rs +++ b/kernel/frame_allocator/src/lib.rs @@ -41,7 +41,7 @@ mod region; mod trusted_chunk_shim; mod allocated_frames; -use core::{borrow::Borrow, cmp::{Ordering, min, max}, fmt, ops::{Deref, DerefMut}, marker::PhantomData}; +use core::{borrow::Borrow, cmp::{min, max}, ops::Deref}; use kernel_config::memory::*; use memory_structs::{PhysicalAddress, Frame, FrameRange}; use spin::Mutex; @@ -50,12 +50,14 @@ use static_array_rb_tree::*; use trusted_chunk::trusted_chunk::TrustedChunk; use trusted_chunk_shim::*; use region::*; -use range_inclusive::{RangeInclusive, RangeInclusiveIterator}; +use range_inclusive::RangeInclusive; pub use allocated_frames::*; const FRAME_SIZE: usize = PAGE_SIZE; -pub(crate) const MIN_FRAME: Frame = Frame::containing_address(PhysicalAddress::zero()); -pub(crate) const MAX_FRAME: Frame = Frame::containing_address(PhysicalAddress::new_canonical(usize::MAX)); +#[allow(dead_code)] +const MIN_FRAME: Frame = Frame::containing_address(PhysicalAddress::zero()); +#[allow(dead_code)] +const MAX_FRAME: Frame = Frame::containing_address(PhysicalAddress::new_canonical(usize::MAX)); // Note: we keep separate lists for "free, general-purpose" areas and "reserved" areas, as it's much faster. @@ -410,7 +412,7 @@ fn find_specific_chunk( } } Inner::RBTree(ref mut tree) => { - let mut cursor_mut = tree.upper_bound_mut(Bound::Included(&requested_frame)); + let cursor_mut = tree.upper_bound_mut(Bound::Included(&requested_frame)); if let Some(chunk) = cursor_mut.get().map(|w| w.deref().clone()) { if chunk.contains(&requested_frame) { if requested_end_frame <= *chunk.end() { @@ -460,7 +462,7 @@ fn find_specific_chunk( // now search for the next contiguous chunk, that we already know exists let requested_contiguous_frame = *initial_chunk.end() + 1; - let mut cursor_mut = tree.upper_bound_mut(Bound::Included(&requested_contiguous_frame)); + let cursor_mut = tree.upper_bound_mut(Bound::Included(&requested_contiguous_frame)); if let Some(next_chunk) = cursor_mut.get().map(|w| w.deref()) { if next_chunk.contains(&requested_contiguous_frame) { // merge the next chunk into the initial chunk @@ -554,7 +556,7 @@ fn retrieve_chunk_from_ref(mut chosen_chunk_ref: ValueRefMut) -> Option, + chosen_chunk_ref: ValueRefMut, ) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { // Remove the chosen chunk from the free frame list. let chosen_chunk = retrieve_chunk_from_ref(chosen_chunk_ref).ok_or(AllocationError::ChunkRemovalFailed)?; @@ -579,7 +581,7 @@ fn adjust_chosen_chunk_contiguous( mut initial_chunk: Chunk, contiguous_chunk_ref: ValueRefMut, ) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { - let mut contiguous_chunk = retrieve_chunk_from_ref(contiguous_chunk_ref).ok_or(AllocationError::ChunkRemovalFailed)?; + let contiguous_chunk = retrieve_chunk_from_ref(contiguous_chunk_ref).ok_or(AllocationError::ChunkRemovalFailed)?; initial_chunk.merge(contiguous_chunk).map_err(|_| { trace!("contiguous chunks couldn't be merged, despite previous checks"); diff --git a/kernel/frame_allocator/src/region.rs b/kernel/frame_allocator/src/region.rs index 417189cee4..d19b4689e9 100644 --- a/kernel/frame_allocator/src/region.rs +++ b/kernel/frame_allocator/src/region.rs @@ -1,6 +1,6 @@ use memory_structs::{FrameRange, Frame}; use crate::MemoryRegionType; -use core::{borrow::Borrow, cmp::{Ordering, min, max}, fmt, ops::{Deref, DerefMut}}; +use core::{borrow::Borrow, cmp::Ordering, ops::Deref}; /// A region of contiguous frames. /// Only used for bookkeeping, not for allocation. @@ -15,6 +15,7 @@ use core::{borrow::Borrow, cmp::{Ordering, min, max}, fmt, ops::{Deref, DerefMut /// Thus, comparing two `Region`s with the `==` or `!=` operators may not work as expected. /// since it ignores their actual range of frames. #[derive(Debug, Clone, Eq)] +#[allow(dead_code)] pub struct Region { /// The type of this memory region, e.g., whether it's in a free or reserved region. pub(crate) typ: MemoryRegionType, diff --git a/kernel/frame_allocator/src/trusted_chunk_shim.rs b/kernel/frame_allocator/src/trusted_chunk_shim.rs index d5f8d008e8..a680c79d8a 100644 --- a/kernel/frame_allocator/src/trusted_chunk_shim.rs +++ b/kernel/frame_allocator/src/trusted_chunk_shim.rs @@ -3,25 +3,21 @@ //! but succeeds with RangeInclusive. //! //! We should be able to remove this module and work directly with the verified crate in the foreseeable future. -//! All this model should do is amke sure that the start and end of the stored `frames` is equal to the start and end of the `verified_chunk` +//! All this model should do is make sure that the start and end of the stored `frames` is equal to the start and end of the `verified_chunk` -use alloc::collections::btree_map::Range; use kernel_config::memory::PAGE_SIZE; use memory_structs::{FrameRange, Frame, PhysicalAddress}; use range_inclusive::RangeInclusive; -use crate::{MemoryRegionType, AllocatedFrames, MIN_FRAME, MAX_FRAME}; -use core::{borrow::Borrow, cmp::{Ordering, min, max}, fmt, ops::{Deref, DerefMut}}; -use spin::{Once, Mutex}; -use trusted_chunk::{ - trusted_chunk::*, - linked_list::List, - static_array::StaticArray, -}; +use crate::{MemoryRegionType, AllocatedFrames}; +use core::{borrow::Borrow, cmp::Ordering, ops::{Deref, DerefMut}}; +use spin::Mutex; +use trusted_chunk::trusted_chunk::*; static CHUNK_ALLOCATOR: Mutex = Mutex::new(TrustedChunkAllocator::new()); pub(crate) fn switch_chunk_allocator_to_heap_structure() { - let _ = CHUNK_ALLOCATOR.lock().switch_to_heap_allocated(); + CHUNK_ALLOCATOR.lock().switch_to_heap_allocated() + .expect("BUG: Failed to switch the chunk allocator to heap allocated. May have been called twice."); } #[derive(Debug, Eq)] @@ -42,7 +38,7 @@ impl Chunk { .map(|(chunk, _)| chunk) .map_err(|chunk_error|{ match chunk_error { - ChunkCreationError::Overlap(idx) => "Failed to create a verified chunk due to an overlap", + ChunkCreationError::Overlap(_idx) => "Failed to create a verified chunk due to an overlap", ChunkCreationError::NoSpace => "Before the heap is initialized, requested more chunks than there is space for (64)", ChunkCreationError::InvalidRange => "Could not create a chunk for an empty range, use the empty() function" } @@ -55,6 +51,8 @@ impl Chunk { }) } + /// Creates a new Chunk from a TrustedChunk and a FrameRange. + /// Only used within the allocated frames callback function. pub(crate) fn from_trusted_chunk(verified_chunk: TrustedChunk, frames: FrameRange, typ: MemoryRegionType) -> Chunk { Chunk { typ, @@ -63,9 +61,9 @@ impl Chunk { } } - pub(crate) fn frames(&self) -> FrameRange { - self.frames.clone() - } + // pub(crate) fn frames(&self) -> FrameRange { + // self.frames.clone() + // } pub(crate) fn typ(&self) -> MemoryRegionType { self.typ diff --git a/kernel/memory/src/paging/mapper.rs b/kernel/memory/src/paging/mapper.rs index 2e5972bf7b..7bb33a7f6d 100644 --- a/kernel/memory/src/paging/mapper.rs +++ b/kernel/memory/src/paging/mapper.rs @@ -18,14 +18,13 @@ use core::{ slice, }; use log::{error, warn, debug, trace}; -use crate::{BROADCAST_TLB_SHOOTDOWN_FUNC, VirtualAddress, PhysicalAddress, Page, Frame, FrameRange, AllocatedPages, AllocatedFrames}; +use crate::{BROADCAST_TLB_SHOOTDOWN_FUNC, VirtualAddress, PhysicalAddress, Page, Frame, AllocatedPages, AllocatedFrames}; use crate::paging::{ get_current_p4, PageRange, table::{P4, UPCOMING_P4, Table, Level4}, }; use pte_flags::PteFlagsArch; -use spin::Once; use kernel_config::memory::PAGE_SIZE; use super::tlb_flush_virt_addr; use zerocopy::FromBytes; diff --git a/kernel/memory/src/paging/mod.rs b/kernel/memory/src/paging/mod.rs index 61d3df8c84..67d46c4b3f 100644 --- a/kernel/memory/src/paging/mod.rs +++ b/kernel/memory/src/paging/mod.rs @@ -29,7 +29,7 @@ use core::{ }; use log::debug; use super::{ - Frame, FrameRange, PageRange, VirtualAddress, PhysicalAddress, + Frame, PageRange, VirtualAddress, PhysicalAddress, AllocatedPages, allocate_pages, AllocatedFrames, PteFlags, InitialMemoryMappings, tlb_flush_all, tlb_flush_virt_addr, get_p4, find_section_memory_bounds, From 5d407bbe27f69def9ee060b8d0f74ead012a3466 Mon Sep 17 00:00:00 2001 From: Ramla Ijaz Date: Tue, 13 Jun 2023 21:51:17 -0400 Subject: [PATCH 09/23] added typestate just for chunk type --- .../frame_allocator/src/allocated_frames.rs | 17 +- kernel/frame_allocator/src/frames.rs | 378 ++++++++++++++++++ kernel/frame_allocator/src/lib.rs | 61 +-- 3 files changed, 419 insertions(+), 37 deletions(-) create mode 100644 kernel/frame_allocator/src/frames.rs diff --git a/kernel/frame_allocator/src/allocated_frames.rs b/kernel/frame_allocator/src/allocated_frames.rs index 23a94b3ab9..a25e43aa25 100644 --- a/kernel/frame_allocator/src/allocated_frames.rs +++ b/kernel/frame_allocator/src/allocated_frames.rs @@ -1,8 +1,9 @@ -use crate::{Chunk, MemoryRegionType, contains_any, FREE_GENERAL_FRAMES_LIST, FREE_RESERVED_FRAMES_LIST, RESERVED_REGIONS}; +use crate::{MemoryRegionType, contains_any, FREE_GENERAL_FRAMES_LIST, FREE_RESERVED_FRAMES_LIST, RESERVED_REGIONS}; use memory_structs::{FrameRange, Frame}; use core::{fmt, ops::{Deref, DerefMut}, marker::PhantomData}; use trusted_chunk::trusted_chunk::TrustedChunk; use range_inclusive::RangeInclusiveIterator; +use crate::frames::*; /// Represents a range of allocated physical memory [`Frame`]s; derefs to [`FrameRange`]. /// @@ -14,7 +15,7 @@ use range_inclusive::RangeInclusiveIterator; /// This object represents ownership of the range of allocated physical frames; /// if this object falls out of scope, its allocated frames will be auto-deallocated upon drop. pub struct AllocatedFrames { - pub(crate) frames: Chunk, + pub(crate) frames: Frames<{FrameState::Unmapped}>, } // AllocatedFrames must not be Cloneable, and it must not expose its inner frames as mutable. @@ -37,7 +38,7 @@ impl AllocatedFrames { /// Can be used as a placeholder, but will not permit any real usage. pub const fn empty() -> AllocatedFrames { AllocatedFrames { - frames: Chunk::empty() + frames: Frames::empty() } } @@ -50,7 +51,7 @@ impl AllocatedFrames { /// If either of those conditions are met, `self` is modified and `Ok(())` is returned, /// otherwise `Err(other)` is returned. pub fn merge(&mut self, mut other: AllocatedFrames) -> Result<(), AllocatedFrames> { - let chunk = core::mem::replace(&mut other.frames, Chunk::empty()); + let chunk = core::mem::replace(&mut other.frames, Frames::empty()); match self.frames.merge(chunk) { Ok(_) => { // ensure the now-merged AllocatedFrames doesn't run its drop handler and free its frames. @@ -76,8 +77,8 @@ impl AllocatedFrames { /// Returns an `Err` containing this `AllocatedFrames` if `at_frame` is otherwise out of bounds. /// /// [`core::slice::split_at()`]: https://doc.rust-lang.org/core/primitive.slice.html#method.split_at - pub fn split(mut self, at_frame: Frame) -> Result<(AllocatedFrames, AllocatedFrames), AllocatedFrames> { - let chunk = core::mem::replace(&mut self.frames, Chunk::empty()); + pub fn split_at(mut self, at_frame: Frame) -> Result<(AllocatedFrames, AllocatedFrames), AllocatedFrames> { + let chunk = core::mem::replace(&mut self.frames, Frames::empty()); match chunk.split_at(at_frame) { Ok((chunk1, chunk2)) => { // ensure the now-merged AllocatedFrames doesn't run its drop handler and free its frames. @@ -120,7 +121,7 @@ pub(crate) fn into_allocated_frames(tc: TrustedChunk, frames: FrameRange) -> All } else { MemoryRegionType::Free }; - AllocatedFrames { frames: Chunk::from_trusted_chunk(tc, frames, typ) } + AllocatedFrames { frames: Frames::from_trusted_chunk(tc, frames, typ) } } impl Drop for AllocatedFrames { @@ -136,7 +137,7 @@ impl Drop for AllocatedFrames { // Simply add the newly-deallocated chunk to the free frames list. let mut locked_list = list.lock(); - let res = locked_list.insert(core::mem::replace(&mut self.frames, Chunk::empty())); + let res = locked_list.insert(core::mem::replace(&mut self.frames, Frames::empty())); match res { Ok(_inserted_free_chunk) => (), Err(c) => error!("BUG: couldn't insert deallocated chunk {:?} into free frame list", c), diff --git a/kernel/frame_allocator/src/frames.rs b/kernel/frame_allocator/src/frames.rs new file mode 100644 index 0000000000..2904115f73 --- /dev/null +++ b/kernel/frame_allocator/src/frames.rs @@ -0,0 +1,378 @@ +use kernel_config::memory::PAGE_SIZE; +use memory_structs::{FrameRange, Frame, PhysicalAddress}; +use range_inclusive::RangeInclusive; +use crate::{MemoryRegionType, MIN_FRAME, MAX_FRAME, RESERVED_REGIONS, FREE_GENERAL_FRAMES_LIST, FREE_RESERVED_FRAMES_LIST, contains_any}; +use core::{borrow::Borrow, cmp::{Ordering, min, max}, fmt, ops::{Deref, DerefMut}}; +use spin::{Once, Mutex}; +use trusted_chunk::{ + trusted_chunk::*, + linked_list::List, + static_array::StaticArray, +}; +use range_inclusive::RangeInclusiveIterator; +use core::marker::PhantomData; +use crate::allocated_frames::*; + +static CHUNK_ALLOCATOR: Mutex = Mutex::new(TrustedChunkAllocator::new()); + +pub(crate) fn switch_chunk_allocator_to_heap_structure() { + CHUNK_ALLOCATOR.lock().switch_to_heap_allocated() + .expect("BUG: Failed to switch the chunk allocator to heap allocated. May have been called twice."); +} + +// pub(crate) type AllocatedFrames = Frames<{FrameState::Unmapped}>; +// pub(crate) type MappedFrames = Frames<{FrameState::Mapped}>; + + +#[derive(PartialEq, Eq)] +pub enum FrameState { + Unmapped, +} + +#[derive(Debug, Eq)] +pub struct Frames { + /// The type of this memory chunk, e.g., whether it's in a free or reserved region. + typ: MemoryRegionType, + /// The Frames covered by this chunk, an inclusive range. + frames: FrameRange, + /// The actual verified chunk + verified_chunk: TrustedChunk +} + +assert_not_impl_any!(Frames<{FrameState::Unmapped}>: DerefMut, Clone); + +impl Frames<{FrameState::Unmapped}> { + pub(crate) fn new(typ: MemoryRegionType, frames: FrameRange) -> Result { + let verified_chunk = CHUNK_ALLOCATOR.lock().create_chunk(frames.to_range_inclusive()) + .map(|(chunk, _)| chunk) + .map_err(|chunk_error|{ + match chunk_error { + ChunkCreationError::Overlap(_idx) => "Failed to create a verified chunk due to an overlap", + ChunkCreationError::NoSpace => "Before the heap is initialized, requested more chunks than there is space for (64)", + ChunkCreationError::InvalidRange => "Could not create a chunk for an empty range, use the empty() function" + } + })?; + + Ok(Frames { + typ, + frames, + verified_chunk + }) + } + + /// Creates a new Chunk from a TrustedChunk and a FrameRange. + /// Only used within the allocated frames callback function. + pub(crate) fn from_trusted_chunk(verified_chunk: TrustedChunk, frames: FrameRange, typ: MemoryRegionType) -> Self { + Frames { + typ, + frames, + verified_chunk + } + } + + pub(crate) fn as_allocated_frames(self) -> AllocatedFrames { + AllocatedFrames { + frames: self, + } + } + // /// Returns an `AllocatedFrame` if this `AllocatedFrames` object contains only one frame. + // /// + // /// ## Panic + // /// Panics if this `AllocatedFrame` contains multiple frames or zero frames. + // pub fn as_allocated_frame(&self) -> AllocatedFrame { + // assert!(self.size_in_frames() == 1); + // AllocatedFrame { + // frame: *self.start(), + // _phantom: PhantomData, + // } + // } +} + +// impl Drop for Frames { +// fn drop(&mut self) { +// if self.size_in_frames() == 0 { return; } + +// let unmapped_frames: Frames<{FrameState::Unmapped}> = Frames { +// typ: self.typ, +// frames: self.frames.clone(), +// verified_chunk: core::mem::replace(&mut self.verified_chunk, TrustedChunk::empty()) +// }; + +// let (list, _typ) = if contains_any(&RESERVED_REGIONS.lock(), &self.frames) { +// (&FREE_RESERVED_FRAMES_LIST, MemoryRegionType::Reserved) +// } else { +// (&FREE_GENERAL_FRAMES_LIST, MemoryRegionType::Free) +// }; +// // trace!("frame_allocator: deallocating {:?}, typ {:?}", self, typ); + +// // Simply add the newly-deallocated chunk to the free frames list. +// let mut locked_list = list.lock(); +// let res = locked_list.insert(unmapped_frames); +// match res { +// Ok(_inserted_free_chunk) => (), +// Err(c) => error!("BUG: couldn't insert deallocated chunk {:?} into free frame list", c), +// } + +// // Here, we could optionally use above `_inserted_free_chunk` to merge the adjacent (contiguous) chunks +// // before or after the newly-inserted free chunk. +// // However, there's no *need* to do so until we actually run out of address space or until +// // a requested address is in a chunk that needs to be merged. +// // Thus, for performance, we save that for those future situations. +// } +// } + +// impl<'f> IntoIterator for &'f Frames<{FrameState::Unmapped}> { +// type IntoIter = AllocatedFramesIter<'f>; +// type Item = AllocatedFrame<'f>; +// fn into_iter(self) -> Self::IntoIter { +// AllocatedFramesIter { +// _owner: self, +// range: self.frames.clone().into_iter(), +// } +// } +// } + +// /// An iterator over each [`AllocatedFrame`] in a range of [`Frames`]. +// /// +// /// We must implement our own iterator type here in order to tie the lifetime `'f` +// /// of a returned `AllocatedFrame<'f>` type to the lifetime of its containing `Frames`. +// /// This is because the underlying type of `Frames` is a [`FrameRange`], +// /// which itself is a [`core::ops::RangeInclusive`] of [`Frame`]s, and unfortunately the +// /// `RangeInclusive` type doesn't implement an immutable iterator. +// /// +// /// Iterating through a `RangeInclusive` actually modifies its own internal range, +// /// so we must avoid doing that because it would break the semantics of a `FrameRange`. +// /// In fact, this is why [`FrameRange`] only implements `IntoIterator` but +// /// does not implement [`Iterator`] itself. +// pub struct AllocatedFramesIter<'f> { +// _owner: &'f Frames<{FrameState::Unmapped}>, +// range: RangeInclusiveIterator, +// } +// impl<'f> Iterator for AllocatedFramesIter<'f> { +// type Item = AllocatedFrame<'f>; +// fn next(&mut self) -> Option { +// self.range.next().map(|frame| +// AllocatedFrame { +// frame, _phantom: PhantomData, +// } +// ) +// } +// } + +// /// A reference to a single frame within a range of `Frames`. +// /// +// /// The lifetime of this type is tied to the lifetime of its owning `Frames`. +// #[derive(Debug)] +// pub struct AllocatedFrame<'f> { +// frame: Frame, +// _phantom: PhantomData<&'f Frame>, +// } +// impl<'f> Deref for AllocatedFrame<'f> { +// type Target = Frame; +// fn deref(&self) -> &Self::Target { +// &self.frame +// } +// } +// assert_not_impl_any!(AllocatedFrame: DerefMut, Clone); + + +impl Frames { + pub(crate) fn frames(&self) -> FrameRange { + self.frames.clone() + } + + pub(crate) fn typ(&self) -> MemoryRegionType { + self.typ + } + + /// Returns a new `Frames` with an empty range of frames. + pub const fn empty() -> Frames { + Frames { + typ: MemoryRegionType::Unknown, + frames: FrameRange::empty(), + verified_chunk: TrustedChunk::empty() + } + } + + /// Merges the given `Frames` object `other` into this `Frames` object (`self`). + /// This is just for convenience and usability purposes, it performs no allocation or remapping. + /// + /// The given `other` must be physically contiguous with `self`, i.e., come immediately before or after `self`. + /// That is, either `self.start == other.end + 1` or `self.end + 1 == other.start` must be true. + /// + /// If either of those conditions are met, `self` is modified and `Ok(())` is returned, + /// otherwise `Err(other)` is returned. + pub fn merge(&mut self, mut other: Self) -> Result<(), Self> { + if self.is_empty() || other.is_empty() { + return Err(other); + } + + // take out the TrustedChunk from other + let other_verified_chunk = core::mem::replace(&mut other.verified_chunk, TrustedChunk::empty()); + + // merged the other TrustedChunk with self + // failure here means that the chunks cannot be merged + self.verified_chunk.merge(other_verified_chunk) + .map_err(|vchunk| { + let _ = core::mem::replace(&mut other.verified_chunk, vchunk); + other + })?; + + // use the newly merged TrustedChunk to update the frame range + self.frames = into_frame_range(&self.verified_chunk.frames()); + + Ok(()) + } + + /// An inner function that breaks up the given `Frames` into multiple smaller `Frames`. + /// + /// Returns a tuple of three `Frames`: + /// 1. The `Frames` containing the requested range of frames starting at `start_frame`. + /// 2. The range of frames in the `self` that came before the beginning of the requested frame range. + /// 3. The range of frames in the `self` that came after the end of the requested frame range. + pub fn split( + mut self, + start_frame: Frame, + num_frames: usize, + ) -> (Self, Option, Option) { + if self.is_empty() { + return (self, None, None); + } + + // take out the TrustedChunk + let verified_chunk = core::mem::replace(&mut self.verified_chunk, TrustedChunk::empty()); + + let (before, new_allocation, after) = match verified_chunk.split(start_frame.number(), num_frames) { + Ok(x) => x, + Err(vchunk) => { + let _ = core::mem::replace(&mut self.verified_chunk, vchunk); + return (self, None, None); + } + }; + + (Self { + typ: self.typ, + frames: into_frame_range(&new_allocation.frames()), + verified_chunk: new_allocation + }, + before.and_then(|vchunk| + Some(Self{ + typ: self.typ, + frames: into_frame_range(&vchunk.frames()), + verified_chunk: vchunk + }) + ), + after.and_then(|vchunk| + Some(Self{ + typ: self.typ, + frames: into_frame_range(&vchunk.frames()), + verified_chunk: vchunk + }) + )) + } + + /// Splits this `Frames` into two separate `Frames` objects: + /// * `[beginning : at_frame - 1]` + /// * `[at_frame : end]` + /// + /// This function follows the behavior of [`core::slice::split_at()`], + /// thus, either one of the returned `Frames` objects may be empty. + /// * If `at_frame == self.start`, the first returned `Frames` object will be empty. + /// * If `at_frame == self.end + 1`, the second returned `Frames` object will be empty. + /// + /// Returns an `Err` containing this `Frames` if `at_frame` is otherwise out of bounds. + /// + /// [`core::slice::split_at()`]: https://doc.rust-lang.org/core/primitive.slice.html#method.split_at + pub fn split_at(mut self, at_frame: Frame) -> Result<(Self, Self), Self> { + if self.is_empty() { + return Err(self); + } + let typ = self.typ; + + // take out the TrustedChunk + let verified_chunk = core::mem::replace(&mut self.verified_chunk, TrustedChunk::empty()); + + let (first, second) = verified_chunk.split_at(at_frame.number()) + .map_err(|vchunk| { + let _ = core::mem::replace(&mut self.verified_chunk, vchunk); + self + })?; + + Ok((Self { + typ, + frames: into_frame_range(&first.frames()), + verified_chunk: first + }, + Self { + typ, + frames: into_frame_range(&second.frames()), + verified_chunk: second + })) + } +} + +impl Deref for Frames { + type Target = FrameRange; + fn deref(&self) -> &FrameRange { + &self.frames + } +} +impl Ord for Frames { + fn cmp(&self, other: &Self) -> Ordering { + self.frames.start().cmp(other.frames.start()) + } +} +impl PartialOrd for Frames { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} +impl PartialEq for Frames { + fn eq(&self, other: &Self) -> bool { + self.frames.start() == other.frames.start() + } +} +impl Borrow for &'_ Frames { + fn borrow(&self) -> &Frame { + self.frames.start() + } +} + + +fn into_frame_range(frames: &RangeInclusive) -> FrameRange { + let start = FrameNum{ frame: *frames.start() }.into_frame() + .expect("Verified chunk start was not a valid frame"); + + let end = FrameNum{ frame: *frames.end() }.into_frame() + .expect("Verified chunk end was not a valid frame"); + + FrameRange::new(start, end) +} + +struct FrameNum { + frame: usize +} + +impl FrameNum { + fn into_frame(&self) -> Option { + PhysicalAddress::new(self.frame * PAGE_SIZE) + .and_then(|addr| Some(Frame::containing_address(addr))) + } +} + +/// This function is a callback used to convert `UnmappedFrames` into `AllocatedFrames`. +/// `UnmappedFrames` represents frames that have been unmapped from a page that had +/// exclusively mapped them, indicating that no others pages have been mapped +/// to those same frames, and thus, they can be safely deallocated. +/// +/// This exists to break the cyclic dependency cycle between this crate and +/// the `page_table_entry` crate, since `page_table_entry` must depend on types +/// from this crate in order to enforce safety when modifying page table entries. +pub(crate) fn into_frames_unmapped_state(tc: TrustedChunk, frames: FrameRange) -> Frames<{FrameState::Unmapped}> { + let typ = if contains_any(&RESERVED_REGIONS.lock(), &frames) { + MemoryRegionType::Reserved + } else { + MemoryRegionType::Free + }; + Frames { typ, frames, verified_chunk: tc } +} \ No newline at end of file diff --git a/kernel/frame_allocator/src/lib.rs b/kernel/frame_allocator/src/lib.rs index 07b90e0e62..e4ecb5a6ad 100644 --- a/kernel/frame_allocator/src/lib.rs +++ b/kernel/frame_allocator/src/lib.rs @@ -21,6 +21,7 @@ #![allow(clippy::blocks_in_if_conditions)] #![no_std] #![feature(box_into_inner)] +#![feature(adt_const_params)] extern crate alloc; #[macro_use] extern crate log; @@ -38,17 +39,19 @@ extern crate trusted_chunk; mod static_array_rb_tree; // mod static_array_linked_list; mod region; -mod trusted_chunk_shim; +// mod trusted_chunk_shim; mod allocated_frames; +mod frames; use core::{borrow::Borrow, cmp::{min, max}, ops::Deref}; +use frames::*; use kernel_config::memory::*; use memory_structs::{PhysicalAddress, Frame, FrameRange}; use spin::Mutex; use intrusive_collections::Bound; use static_array_rb_tree::*; use trusted_chunk::trusted_chunk::TrustedChunk; -use trusted_chunk_shim::*; +// use trusted_chunk_shim::*; use region::*; use range_inclusive::RangeInclusive; pub use allocated_frames::*; @@ -62,9 +65,9 @@ const MAX_FRAME: Frame = Frame::containing_address(PhysicalAddress::new_canonica // Note: we keep separate lists for "free, general-purpose" areas and "reserved" areas, as it's much faster. /// The single, system-wide list of free physical memory frames available for general usage. -static FREE_GENERAL_FRAMES_LIST: Mutex> = Mutex::new(StaticArrayRBTree::empty()); +static FREE_GENERAL_FRAMES_LIST: Mutex>> = Mutex::new(StaticArrayRBTree::empty()); /// The single, system-wide list of free physical memory frames reserved for specific usage. -static FREE_RESERVED_FRAMES_LIST: Mutex> = Mutex::new(StaticArrayRBTree::empty()); +static FREE_RESERVED_FRAMES_LIST: Mutex>> = Mutex::new(StaticArrayRBTree::empty()); /// The fixed list of all known regions that are available for general use. /// This does not indicate whether these regions are currently allocated, @@ -175,17 +178,17 @@ pub fn init( // } // Here, since we're sure we now have a list of regions that don't overlap, we can create lists of formally verified Chunks - let mut free_list_w_chunks: [Option; 32] = Default::default(); - let mut reserved_list_w_chunks: [Option; 32] = Default::default(); + let mut free_list_w_chunks: [Option>; 32] = Default::default(); + let mut reserved_list_w_chunks: [Option>; 32] = Default::default(); for (i, elem) in reserved_list.iter().flatten().enumerate() { - reserved_list_w_chunks[i] = Some(Chunk::new( + reserved_list_w_chunks[i] = Some(Frames::new( MemoryRegionType::Reserved, elem.frames.clone() )?); } for (i, elem) in free_list.iter().flatten().enumerate() { - free_list_w_chunks[i] = Some(Chunk::new( + free_list_w_chunks[i] = Some(Frames::new( MemoryRegionType::Free, elem.frames.clone() )?); @@ -312,21 +315,21 @@ pub enum MemoryRegionType { /// with a `let _ = ...` binding to instantly drop it. pub struct DeferredAllocAction<'list> { /// A reference to the list into which we will insert the free general-purpose `Chunk`s. - free_list: &'list Mutex>, + free_list: &'list Mutex>>, /// A reference to the list into which we will insert the free "reserved" `Chunk`s. - reserved_list: &'list Mutex>, + reserved_list: &'list Mutex>>, /// A free chunk that needs to be added back to the free list. - free1: Chunk, + free1: Frames<{FrameState::Unmapped}>, /// Another free chunk that needs to be added back to the free list. - free2: Chunk, + free2: Frames<{FrameState::Unmapped}>, } impl<'list> DeferredAllocAction<'list> { fn new(free1: F1, free2: F2) -> DeferredAllocAction<'list> - where F1: Into>, - F2: Into>, + where F1: Into>>, + F2: Into>>, { - let free1 = free1.into().unwrap_or_else(Chunk::empty); - let free2 = free2.into().unwrap_or_else(Chunk::empty); + let free1 = free1.into().unwrap_or_else(Frames::empty); + let free2 = free2.into().unwrap_or_else(Frames::empty); DeferredAllocAction { free_list: &FREE_GENERAL_FRAMES_LIST, reserved_list: &FREE_RESERVED_FRAMES_LIST, @@ -337,8 +340,8 @@ impl<'list> DeferredAllocAction<'list> { } impl<'list> Drop for DeferredAllocAction<'list> { fn drop(&mut self) { - let chunk1 = core::mem::replace(&mut self.free1, Chunk::empty()); - let chunk2 = core::mem::replace(&mut self.free2, Chunk::empty()); + let chunk1 = core::mem::replace(&mut self.free1, Frames::empty()); + let chunk2 = core::mem::replace(&mut self.free2, Frames::empty()); // Insert all of the chunks, both allocated and free ones, into the list. if chunk1.size_in_frames() > 0 { @@ -392,7 +395,7 @@ impl From for &'static str { /// Searches the given `list` for the chunk that contains the range of frames from /// `requested_frame` to `requested_frame + num_frames`. fn find_specific_chunk( - list: &mut StaticArrayRBTree, + list: &mut StaticArrayRBTree>, requested_frame: Frame, num_frames: usize ) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { @@ -427,7 +430,7 @@ fn find_specific_chunk( // Requested address: {:?}, num_frames: {}, chunk: {:?}", // requested_frame, num_frames, chunk, // ); - let initial_chunk_ref: Option> = { + let initial_chunk_ref: Option>> = { let next_cursor = cursor_mut.peek_next(); if let Some(next_chunk) = next_cursor.get().map(|w| w.deref()) { if *chunk.end() + 1 == *next_chunk.start() { @@ -484,7 +487,7 @@ fn find_specific_chunk( /// Searches the given `list` for any chunk large enough to hold at least `num_frames`. fn find_any_chunk( - list: &mut StaticArrayRBTree, + list: &mut StaticArrayRBTree>, num_frames: usize ) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { // During the first pass, we ignore designated regions. @@ -506,7 +509,7 @@ fn find_any_chunk( // Because we allocate new frames by peeling them off from the beginning part of a chunk, // it's MUCH faster to start the search for free frames from higher addresses moving down. // This results in an O(1) allocation time in the general case, until all address ranges are already in use. - let mut cursor = tree.upper_bound_mut(Bound::<&Chunk>::Unbounded); + let mut cursor = tree.upper_bound_mut(Bound::<&Frames<{FrameState::Unmapped}>>::Unbounded); while let Some(chunk) = cursor.get().map(|w| w.deref()) { if num_frames <= chunk.size_in_frames() && chunk.typ() == MemoryRegionType::Free { return allocate_from_chosen_chunk(*chunk.start(), num_frames, ValueRefMut::RBTree(cursor)); @@ -530,7 +533,7 @@ fn find_any_chunk( /// Removes a chunk from the RBTree. /// `chosen_chunk_ref` is basically a wrapper over the cursor which stores the position of the chosen_chunk. -fn retrieve_chunk_from_ref(mut chosen_chunk_ref: ValueRefMut) -> Option { +fn retrieve_chunk_from_ref(mut chosen_chunk_ref: ValueRefMut>) -> Option> { // Remove the chosen chunk from the free frame list. let removed_val = chosen_chunk_ref.remove(); @@ -556,7 +559,7 @@ fn retrieve_chunk_from_ref(mut chosen_chunk_ref: ValueRefMut) -> Option, + chosen_chunk_ref: ValueRefMut>, ) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { // Remove the chosen chunk from the free frame list. let chosen_chunk = retrieve_chunk_from_ref(chosen_chunk_ref).ok_or(AllocationError::ChunkRemovalFailed)?; @@ -578,8 +581,8 @@ fn allocate_from_chosen_chunk( fn adjust_chosen_chunk_contiguous( start_frame: Frame, num_frames: usize, - mut initial_chunk: Chunk, - contiguous_chunk_ref: ValueRefMut, + mut initial_chunk: Frames<{FrameState::Unmapped}>, + contiguous_chunk_ref: ValueRefMut>, ) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { let contiguous_chunk = retrieve_chunk_from_ref(contiguous_chunk_ref).ok_or(AllocationError::ChunkRemovalFailed)?; @@ -640,7 +643,7 @@ fn contains_any( /// overlap any existing regions at all. /// TODO: handle partially-overlapping regions by extending existing regions on either end. fn add_reserved_region_to_chunk_list( - list: &mut StaticArrayRBTree, + list: &mut StaticArrayRBTree>, frames: FrameRange, ) -> Result { // We can remove this check because creating a Chunk will check for overlaps @@ -676,7 +679,7 @@ fn add_reserved_region_to_chunk_list( // } // } - list.insert(Chunk::new( + list.insert(Frames::new( MemoryRegionType::Reserved, frames.clone(), )?).map_err(|_c| "BUG: Failed to insert non-overlapping frames into list.")?; @@ -881,7 +884,7 @@ pub fn allocate_frames_at(paddr: PhysicalAddress, num_frames: usize) -> Result Date: Tue, 13 Jun 2023 22:32:27 -0400 Subject: [PATCH 10/23] allocated frames test module --- kernel/.DS_Store | Bin 0 -> 14340 bytes kernel/frame_allocator/src/lib.rs | 4 +- kernel/frame_allocator/src/test.rs | 114 ++++++++++++++--------------- kernel/ixgbe/.DS_Store | Bin 0 -> 6148 bytes 4 files changed, 59 insertions(+), 59 deletions(-) create mode 100644 kernel/.DS_Store create mode 100644 kernel/ixgbe/.DS_Store diff --git a/kernel/.DS_Store b/kernel/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..981921c41f715723891fe16d016250e06c4335db GIT binary patch literal 14340 zcmeHN&u<%55PsV@&e~3#q|kzpsB+;3RZ6QC4pgDQ0S*W$2oC%RcI+gp+TP$FaoW1rV-=Uro!ero4*t$FcP!zX221*7>2L28MoO^M^4d0g6l7W(e zl7Tq`tPfi_+3sZRL*Gj2z=cbA{>~-59JuFFo!@O`?M~J{^eukD5qT(D4~?=dM&#l6 zek+sP$=Zj$^>7zS&(M?g9A!I{NP5TjTO#hF9p9GMl7W(eJOgjxt2KAqb=}YnaP7H0 zx9?KiA7Dp*_+~HMfmgm%JMdVLcd+YET?-mIf&6!<3|f8#`V&ZxrN6$Fj-EY3dgR7| z^cR726}nSAK{%xDK=!vlcG>kI)rXIxK$156t~{(}k}15q6Ufp6Eqg)Bg2x0bCh#=@ zH}v~1T7Ck%fhO9gH8lfG@E-k75SejkF`tSo50u4g0ru3r+YKE)gd|t83UQR z4rKdav7cic+!%SXn8K=&>srkbm@$%@keq-6de7_{g8e<@!4RJZ=xgGR0}WurXdojq z4ad+#{1L7#cU;KGFN%?dV3~8LK#c_54SN(eG^z&6S~yc+GMfV%$%jTj%)aFIMm4E;mwyF|BJor>ig4v=-&Jv7`YsnI^{*WcdMl9v1ObeOvK#evbOf znM>A;j{023r|7vWXTcaVm`(6ChIf0AK1Tj=lrX;gGHSZeSTOdDh9+>HnLV^U{G~us z4{|Mt8ZygZ8-ktjCB1>GV61bLRliqUtr^r52xj6jdA9utLmF8TQAg8x2f>s;E3J@Hr zdl+k|JApLnf{~^bBln_}5?jAfw${NetoE^DY;yscxm~OQSz$CZA;+}`R}}m3+OGwAK7O`$GHI~Oz;d>j7LA>p zBxKY}{4qUxEPI*Fz^v0J$U&@k0xWsc{W6cdg){6Ks~qxI zzp41Eh|S1-+Y85=kz1L`39|u%8IV-hvN){lS{tkxxZCC`8d< z>k7MI*5;bqV5-0fV~n=-h!xPtu4-9uK^!9!%&x-3mx>AcH17krv+|vMXl#D>{?EU8 z0V-Bu2Q!@eg>2R2F5@oDQAE}J|Gk3fu=)mTROYggiy_Av?Q2Ya_6D~`o_dQ`r|kb5 z#pN30Mp*j}#PT!7hghVMOF*LD1u;er8U5?10)8jK*>yfQ3KsP)KqH$BdIeU|57t#g z+)6VqqNeg?Rirta9icDIgoUG*o*3LR5S$0|_P`q>Sr^GZEK0!|M^zpJ9}K1`7{7-- zm2o%{yTnfG5l4u@o<9pKe4e;dt3z0Mpjf&17#-Xf@aX~9FF`znDXr^q_Kd(vSo_GK z)=S~uDTF*I`Pq2pID0tq$=o|d{0{nqwf6wy9adhq{#lQw; z_?Afex%4r4#JC!K&EVxZtL>37HHb}FxnK@~x2&CfHo|A1{;5pS3{H==%@t@MOTk`` zzAfEUzBM36f0zsEX_vK+Ibl#2i&3zZ_4Mp233iOG^9yx##Ih+Y3#t?OGCH1EsKY-u z@HIHp0?v$<6=+$wk`v~>Rm?4+Ds*!Ek|`}!k&%I zmBGR%eYCl9N!i?lZqEf*k-@mm$J}7+bLY8g^ZQo&y`($c+j#P|bTDougYHnkNv8{u58qD)opgFDoeVl7(X;D+h~rvZ z+uL~h-o2NvUVVP++SRM~pWnK7n8e1)YBjwtf2 zThH2A^L6gOMm1fJ%l-c&VXEB!m;3+OyUnP&W|v*=|D7xM|Nrj(f7#dT?FQO67Qg>L ztLkN2GEg$`KV%@PyuSPTEjTt?&xJnnUArxupWvj-4>)v(A8bv%6F>v%7T sPDtwMq?*FmhTl|#tf3g4n1923H#{d8T literal 0 HcmV?d00001 diff --git a/kernel/frame_allocator/src/lib.rs b/kernel/frame_allocator/src/lib.rs index e4ecb5a6ad..1211d4047d 100644 --- a/kernel/frame_allocator/src/lib.rs +++ b/kernel/frame_allocator/src/lib.rs @@ -33,8 +33,8 @@ extern crate intrusive_collections; extern crate range_inclusive; extern crate trusted_chunk; -// #[cfg(test)] -// mod test; +#[cfg(test)] +mod test; mod static_array_rb_tree; // mod static_array_linked_list; diff --git a/kernel/frame_allocator/src/test.rs b/kernel/frame_allocator/src/test.rs index d68ced8356..4bcb4697d2 100644 --- a/kernel/frame_allocator/src/test.rs +++ b/kernel/frame_allocator/src/test.rs @@ -14,10 +14,10 @@ impl PartialEq for AllocatedFrames { fn from_addr(start_addr: usize, end_addr: usize) -> AllocatedFrames { AllocatedFrames { - frames: FrameRange::new( + frames: Frames::new(MemoryRegionType::Free, FrameRange::new( Frame::containing_address(PhysicalAddress::new_canonical(start_addr)), Frame::containing_address(PhysicalAddress::new_canonical(end_addr)), - ) + )).unwrap() } } @@ -30,7 +30,7 @@ fn split_before_beginning() { let original = from_addr( 0x4275000, 0x4285000); let split_at = frame_addr(0x4274000); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); assert!(result.is_err()); } @@ -39,14 +39,14 @@ fn split_before_beginning() { fn split_at_beginning() { let original = from_addr( 0x4275000, 0x4285000); let split_at = frame_addr(0x4275000); - let first = AllocatedFrames::empty(); - let second = from_addr( 0x4275000, 0x4285000); + let first = FrameRange::empty(); + let second = FrameRange::new(frame_addr(0x4275000), frame_addr(0x4285000)); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } @@ -54,28 +54,28 @@ fn split_at_beginning() { fn split_at_middle() { let original = from_addr( 0x4275000, 0x4285000); let split_at = frame_addr( 0x427D000); - let first = from_addr( 0x4275000, 0x427C000); - let second = from_addr( 0x427D000, 0x4285000); + let first = FrameRange::new(frame_addr(0x4275000), frame_addr(0x427C000)); + let second = FrameRange::new( frame_addr(0x427D000), frame_addr(0x4285000)); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } #[test] fn split_at_end() { let original = from_addr( 0x4275000, 0x4285000); let split_at = frame_addr( 0x4285000); - let first = from_addr( 0x4275000, 0x4284000); - let second = from_addr( 0x4285000, 0x4285000); + let first = FrameRange::new( frame_addr(0x4275000), frame_addr(0x4284000)); + let second = FrameRange::new( frame_addr(0x4285000), frame_addr(0x4285000)); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } @@ -83,14 +83,14 @@ fn split_at_end() { fn split_after_end() { let original = from_addr( 0x4275000, 0x4285000); let split_at = frame_addr( 0x4286000); - let first = from_addr( 0x4275000, 0x4285000); - let second = AllocatedFrames::empty(); + let first = FrameRange::new( frame_addr(0x4275000), frame_addr(0x4285000)); + let second = FrameRange::empty(); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } @@ -99,7 +99,7 @@ fn split_empty_at_zero() { let original = AllocatedFrames::empty(); let split_at = frame_addr(0x0000); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); assert!(result.is_err()); } @@ -109,7 +109,7 @@ fn split_empty_at_one() { let original = AllocatedFrames::empty(); let split_at = frame_addr(0x1000); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); assert!(result.is_err()); } @@ -119,7 +119,7 @@ fn split_empty_at_two() { let original = AllocatedFrames::empty(); let split_at = frame_addr(0x2000); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); assert!(result.is_err()); } @@ -129,57 +129,57 @@ fn split_empty_at_two() { #[test] fn split_at_beginning_zero() { let original = from_addr( 0x0, 0x5000); - let split_at = frame_addr(0x0); - let first = AllocatedFrames::empty(); - let second = from_addr(0x0, 0x5000); + let split_at = frame_addr(0x0); // leads to attempt to subtract with overflow + let first = FrameRange::empty(); + let second = FrameRange::new(frame_addr(0x0), frame_addr(0x5000)); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } #[test] fn split_at_beginning_one() { let original = from_addr( 0x0000, 0x5000); let split_at = frame_addr(0x1000); - let first = from_addr( 0x0000, 0x0000); - let second = from_addr( 0x1000, 0x5000); + let first = FrameRange::new( frame_addr(0x0000), frame_addr(0x0000)); + let second = FrameRange::new( frame_addr(0x1000), frame_addr(0x5000)); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } #[test] fn split_at_beginning_max_length_one() { let original = from_addr( 0xFFFF_FFFF_FFFF_F000, 0xFFFF_FFFF_FFFF_F000); let split_at = frame_addr(0xFFFF_FFFF_FFFF_F000); - let first = AllocatedFrames::empty(); - let second = from_addr(0xFFFF_FFFF_FFFF_F000, 0xFFFF_FFFF_FFFF_F000); + let first = FrameRange::empty(); + let second = FrameRange::new(frame_addr(0xFFFF_FFFF_FFFF_F000), frame_addr(0xFFFF_FFFF_FFFF_F000)); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } #[test] fn split_at_end_max_length_two() { let original = from_addr( 0xFFFF_FFFF_FFFF_E000, 0xFFFF_FFFF_FFFF_F000); let split_at = frame_addr( 0xFFFF_FFFF_FFFF_F000); - let first = from_addr( 0xFFFF_FFFF_FFFF_E000, 0xFFFF_FFFF_FFFF_E000); - let second = from_addr( 0xFFFF_FFFF_FFFF_F000, 0xFFFF_FFFF_FFFF_F000); + let first = FrameRange::new( frame_addr(0xFFFF_FFFF_FFFF_E000), frame_addr(0xFFFF_FFFF_FFFF_E000)); + let second = FrameRange::new( frame_addr(0xFFFF_FFFF_FFFF_F000), frame_addr(0xFFFF_FFFF_FFFF_F000)); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } @@ -187,26 +187,26 @@ fn split_at_end_max_length_two() { fn split_after_end_max() { let original = from_addr( 0xFFFF_FFFF_FFFF_E000, 0xFFFF_FFFF_FFFF_E000); let split_at = frame_addr(0xFFFF_FFFF_FFFF_F000); - let first = from_addr( 0xFFFF_FFFF_FFFF_E000, 0xFFFF_FFFF_FFFF_E000); - let second = AllocatedFrames::empty(); + let first = FrameRange::new( frame_addr(0xFFFF_FFFF_FFFF_E000), frame_addr(0xFFFF_FFFF_FFFF_E000)); + let second = FrameRange::empty(); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } #[test] fn split_at_beginning_max() { let original = from_addr( 0xFFFF_FFFF_FFFF_E000, 0xFFFF_FFFF_FFFF_E000); let split_at = frame_addr(0xFFFF_FFFF_FFFF_E000); - let first = AllocatedFrames::empty(); - let second = from_addr(0xFFFF_FFFF_FFFF_E000, 0xFFFF_FFFF_FFFF_E000); + let first = FrameRange::empty(); + let second = FrameRange::new(frame_addr(0xFFFF_FFFF_FFFF_E000), frame_addr(0xFFFF_FFFF_FFFF_E000)); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } diff --git a/kernel/ixgbe/.DS_Store b/kernel/ixgbe/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..a4c4b45b73300795bdbe2d393d30b560c45d41a3 GIT binary patch literal 6148 zcmeHK%}yIJ5FRIKae}JmfCR_A^oF9KR7E{h+6FjqLt0c0P$j!0f|Zsv%5I2C2-??x zm*6>g0v-ou>`5d@dTk>#BaOfDc*eW_Z21QORD0Ol0;mIkMkOp&u~;C~PC6$g_EZp+ z?ZZF=axicK$y~HJmXQIPyBh332m?67nVZeueqv6e#6;Lmi{mG1k<+j;VGK=I0UHhsGo` zJB)tC{ZXgBwqw#fjMM(WCda)#hMXM5Y0r$>W|;Oe+fU#cw65xEr@k>Be{Z!my|1m- zWYZgOZ#T*QHknlQ`j@RA``wE_*W;VX?VTMaC|uZ9J~K|?9t|epDj1}RNzc$Tjh;pc z3IoD`FtC&i@Mfh~m-6rAr-T7v;D0he`-6u{=ra}$?bd;XeF7lTHPQ;&bX}wzkI`o= z9AXQKa;bwtvR}vkJsS?ncPOPWAPl@_pzNx3 zI{)|n{{DZRBsF0`7 Date: Wed, 14 Jun 2023 05:57:45 -0400 Subject: [PATCH 11/23] works with chunk as a type state, allocation error when AF is also the same typestate --- kernel/frame_allocator/src/frames.rs | 183 ++++++++++++++++++--------- kernel/frame_allocator/src/lib.rs | 3 + 2 files changed, 126 insertions(+), 60 deletions(-) diff --git a/kernel/frame_allocator/src/frames.rs b/kernel/frame_allocator/src/frames.rs index 2904115f73..983d149d04 100644 --- a/kernel/frame_allocator/src/frames.rs +++ b/kernel/frame_allocator/src/frames.rs @@ -1,14 +1,10 @@ use kernel_config::memory::PAGE_SIZE; use memory_structs::{FrameRange, Frame, PhysicalAddress}; use range_inclusive::RangeInclusive; -use crate::{MemoryRegionType, MIN_FRAME, MAX_FRAME, RESERVED_REGIONS, FREE_GENERAL_FRAMES_LIST, FREE_RESERVED_FRAMES_LIST, contains_any}; -use core::{borrow::Borrow, cmp::{Ordering, min, max}, fmt, ops::{Deref, DerefMut}}; -use spin::{Once, Mutex}; -use trusted_chunk::{ - trusted_chunk::*, - linked_list::List, - static_array::StaticArray, -}; +use crate::{MemoryRegionType,RESERVED_REGIONS, FREE_GENERAL_FRAMES_LIST, FREE_RESERVED_FRAMES_LIST, contains_any}; +use core::{borrow::Borrow, cmp::Ordering, ops::{Deref, DerefMut}}; +use spin::Mutex; +use trusted_chunk::trusted_chunk::*; use range_inclusive::RangeInclusiveIterator; use core::marker::PhantomData; use crate::allocated_frames::*; @@ -20,10 +16,6 @@ pub(crate) fn switch_chunk_allocator_to_heap_structure() { .expect("BUG: Failed to switch the chunk allocator to heap allocated. May have been called twice."); } -// pub(crate) type AllocatedFrames = Frames<{FrameState::Unmapped}>; -// pub(crate) type MappedFrames = Frames<{FrameState::Mapped}>; - - #[derive(PartialEq, Eq)] pub enum FrameState { Unmapped, @@ -52,22 +44,33 @@ impl Frames<{FrameState::Unmapped}> { ChunkCreationError::InvalidRange => "Could not create a chunk for an empty range, use the empty() function" } })?; + + assert!(frames.start().number() == verified_chunk.start()); + assert!(frames.end().number() == verified_chunk.end()); - Ok(Frames { + let f = Frames { typ, frames, verified_chunk - }) + }; + //warn!("new frames: {:?}", f); + + Ok(f) } /// Creates a new Chunk from a TrustedChunk and a FrameRange. /// Only used within the allocated frames callback function. pub(crate) fn from_trusted_chunk(verified_chunk: TrustedChunk, frames: FrameRange, typ: MemoryRegionType) -> Self { - Frames { + let f = Frames { typ, frames, verified_chunk - } + }; + assert!(f.frames.start().number() == f.verified_chunk.start()); + assert!(f.frames.end().number() == f.verified_chunk.end()); + //warn!("from trusted chunk: {:?}", f); + + f } pub(crate) fn as_allocated_frames(self) -> AllocatedFrames { @@ -75,6 +78,7 @@ impl Frames<{FrameState::Unmapped}> { frames: self, } } + // /// Returns an `AllocatedFrame` if this `AllocatedFrames` object contains only one frame. // /// // /// ## Panic @@ -92,18 +96,25 @@ impl Frames<{FrameState::Unmapped}> { // fn drop(&mut self) { // if self.size_in_frames() == 0 { return; } +// trace!("frame_allocator: dropping {:?}", self); // let unmapped_frames: Frames<{FrameState::Unmapped}> = Frames { // typ: self.typ, // frames: self.frames.clone(), // verified_chunk: core::mem::replace(&mut self.verified_chunk, TrustedChunk::empty()) // }; +// // assert!(unmapped_frames.start().number() == unmapped_frames.verified_chunk.start()); +// // assert!(unmapped_frames.end().number() == unmapped_frames.verified_chunk.end()); + +// // make sure the fields in the Frames match +// self.frames = FrameRange::empty(); + // let (list, _typ) = if contains_any(&RESERVED_REGIONS.lock(), &self.frames) { // (&FREE_RESERVED_FRAMES_LIST, MemoryRegionType::Reserved) // } else { // (&FREE_GENERAL_FRAMES_LIST, MemoryRegionType::Free) // }; -// // trace!("frame_allocator: deallocating {:?}, typ {:?}", self, typ); +// trace!("frame_allocator: deallocating {:?}, typ {:?}", self, _typ); // // Simply add the newly-deallocated chunk to the free frames list. // let mut locked_list = list.lock(); @@ -212,16 +223,27 @@ impl Frames { // merged the other TrustedChunk with self // failure here means that the chunks cannot be merged - self.verified_chunk.merge(other_verified_chunk) - .map_err(|vchunk| { - let _ = core::mem::replace(&mut other.verified_chunk, vchunk); - other - })?; - - // use the newly merged TrustedChunk to update the frame range - self.frames = into_frame_range(&self.verified_chunk.frames()); - - Ok(()) + match self.verified_chunk.merge(other_verified_chunk){ + Ok(_) => { + // use the newly merged TrustedChunk to update the frame range + self.frames = into_frame_range(&self.verified_chunk.frames()); + core::mem::forget(other); + assert!(self.frames.start().number() == self.verified_chunk.start()); + assert!(self.frames.end().number() == self.verified_chunk.end()); + //warn!("merge: {:?}", self); + + return Ok(()); + }, + Err(other_verified_chunk) => { + let _ = core::mem::replace(&mut other.verified_chunk, other_verified_chunk); + assert!(self.frames.start().number() == self.verified_chunk.start()); + assert!(self.frames.end().number() == self.verified_chunk.end()); + + assert!(other.frames.start().number() == other.verified_chunk.start()); + assert!(other.frames.end().number() == other.verified_chunk.end()); + return Err(other); + } + } } /// An inner function that breaks up the given `Frames` into multiple smaller `Frames`. @@ -246,29 +268,50 @@ impl Frames { Ok(x) => x, Err(vchunk) => { let _ = core::mem::replace(&mut self.verified_chunk, vchunk); + assert!(self.frames.start().number() == self.verified_chunk.start()); + assert!(self.frames.end().number() == self.verified_chunk.end()); return (self, None, None); } }; + + let typ = self.typ; + core::mem::forget(self); - (Self { - typ: self.typ, + let c1 = Self { + typ, frames: into_frame_range(&new_allocation.frames()), verified_chunk: new_allocation - }, - before.and_then(|vchunk| + }; + let c2 = before.and_then(|vchunk| Some(Self{ - typ: self.typ, + typ, frames: into_frame_range(&vchunk.frames()), verified_chunk: vchunk }) - ), - after.and_then(|vchunk| + ); + let c3 = after.and_then(|vchunk| Some(Self{ - typ: self.typ, + typ, frames: into_frame_range(&vchunk.frames()), verified_chunk: vchunk }) - )) + ); + assert!(c1.frames.start().number() == c1.verified_chunk.start()); + assert!(c1.frames.end().number() == c1.verified_chunk.end()); + + if let Some(c) = &c2 { + assert!(c.frames.start().number() == c.verified_chunk.start()); + assert!(c.frames.end().number() == c.verified_chunk.end()); + } + + if let Some(c) = &c3 { + assert!(c.frames.start().number() == c.verified_chunk.start()); + assert!(c.frames.end().number() == c.verified_chunk.end()); + } + + //warn!("split: {:?} {:?} {:?}", c1, c2, c3); + + (c1, c2, c3) } /// Splits this `Frames` into two separate `Frames` objects: @@ -292,22 +335,38 @@ impl Frames { // take out the TrustedChunk let verified_chunk = core::mem::replace(&mut self.verified_chunk, TrustedChunk::empty()); - let (first, second) = verified_chunk.split_at(at_frame.number()) - .map_err(|vchunk| { + let (first, second) = match verified_chunk.split_at(at_frame.number()){ + Ok((first, second)) => (first, second), + Err(vchunk) => { let _ = core::mem::replace(&mut self.verified_chunk, vchunk); - self - })?; + assert!(self.frames.start().number() == self.verified_chunk.start()); + assert!(self.frames.end().number() == self.verified_chunk.end()); + return Err(self); + } + }; + + core::mem::forget(self); - Ok((Self { + let c1 = Self { typ, frames: into_frame_range(&first.frames()), verified_chunk: first - }, - Self { + }; + let c2 = Self { typ, frames: into_frame_range(&second.frames()), verified_chunk: second - })) + }; + + assert!(c1.frames.start().number() == c1.verified_chunk.start()); + assert!(c1.frames.end().number() == c1.verified_chunk.end()); + + assert!(c2.frames.start().number() == c2.verified_chunk.start()); + assert!(c2.frames.end().number() == c2.verified_chunk.end()); + + //warn!("split at: {:?} {:?}", c1, c2); + + Ok((c1, c2)) } } @@ -360,19 +419,23 @@ impl FrameNum { } } -/// This function is a callback used to convert `UnmappedFrames` into `AllocatedFrames`. -/// `UnmappedFrames` represents frames that have been unmapped from a page that had -/// exclusively mapped them, indicating that no others pages have been mapped -/// to those same frames, and thus, they can be safely deallocated. -/// -/// This exists to break the cyclic dependency cycle between this crate and -/// the `page_table_entry` crate, since `page_table_entry` must depend on types -/// from this crate in order to enforce safety when modifying page table entries. -pub(crate) fn into_frames_unmapped_state(tc: TrustedChunk, frames: FrameRange) -> Frames<{FrameState::Unmapped}> { - let typ = if contains_any(&RESERVED_REGIONS.lock(), &frames) { - MemoryRegionType::Reserved - } else { - MemoryRegionType::Free - }; - Frames { typ, frames, verified_chunk: tc } -} \ No newline at end of file +// /// This function is a callback used to convert `UnmappedFrames` into `AllocatedFrames`. +// /// `UnmappedFrames` represents frames that have been unmapped from a page that had +// /// exclusively mapped them, indicating that no others pages have been mapped +// /// to those same frames, and thus, they can be safely deallocated. +// /// +// /// This exists to break the cyclic dependency cycle between this crate and +// /// the `page_table_entry` crate, since `page_table_entry` must depend on types +// /// from this crate in order to enforce safety when modifying page table entries. +// pub(crate) fn into_frames_unmapped_state(tc: TrustedChunk, frames: FrameRange) -> Frames<{FrameState::Unmapped}> { +// let typ = if contains_any(&RESERVED_REGIONS.lock(), &frames) { +// MemoryRegionType::Reserved +// } else { +// MemoryRegionType::Free +// }; +// let f = Frames { typ, frames, verified_chunk: tc }; +// assert!(f.frames.start().number() == f.verified_chunk.start()); +// assert!(f.frames.end().number() == f.verified_chunk.end()); +// //warn!("into frames unmapped: {:?} ", f); +// f +// } \ No newline at end of file diff --git a/kernel/frame_allocator/src/lib.rs b/kernel/frame_allocator/src/lib.rs index 1211d4047d..6125d84982 100644 --- a/kernel/frame_allocator/src/lib.rs +++ b/kernel/frame_allocator/src/lib.rs @@ -56,6 +56,9 @@ use region::*; use range_inclusive::RangeInclusive; pub use allocated_frames::*; +// pub type AllocatedFrames = Frames<{FrameState::Unmapped}>; +// pub use frames::AllocatedFrame; + const FRAME_SIZE: usize = PAGE_SIZE; #[allow(dead_code)] const MIN_FRAME: Frame = Frame::containing_address(PhysicalAddress::zero()); From bfcc2687f15b466b12e4c07e3417a6acff448a23 Mon Sep 17 00:00:00 2001 From: Ramla Ijaz Date: Wed, 14 Jun 2023 06:06:38 -0400 Subject: [PATCH 12/23] removed changes to other files --- kernel/.DS_Store | Bin 14340 -> 0 bytes kernel/cpu/src/x86_64.rs | 6 ------ kernel/gic/src/gic/dist_interface.rs | 20 -------------------- kernel/gic/src/gic/mod.rs | 12 ------------ kernel/gic/src/gic/redist_interface.rs | 7 ------- 5 files changed, 45 deletions(-) delete mode 100644 kernel/.DS_Store diff --git a/kernel/.DS_Store b/kernel/.DS_Store deleted file mode 100644 index 981921c41f715723891fe16d016250e06c4335db..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14340 zcmeHN&u<%55PsV@&e~3#q|kzpsB+;3RZ6QC4pgDQ0S*W$2oC%RcI+gp+TP$FaoW1rV-=Uro!ero4*t$FcP!zX221*7>2L28MoO^M^4d0g6l7W(e zl7Tq`tPfi_+3sZRL*Gj2z=cbA{>~-59JuFFo!@O`?M~J{^eukD5qT(D4~?=dM&#l6 zek+sP$=Zj$^>7zS&(M?g9A!I{NP5TjTO#hF9p9GMl7W(eJOgjxt2KAqb=}YnaP7H0 zx9?KiA7Dp*_+~HMfmgm%JMdVLcd+YET?-mIf&6!<3|f8#`V&ZxrN6$Fj-EY3dgR7| z^cR726}nSAK{%xDK=!vlcG>kI)rXIxK$156t~{(}k}15q6Ufp6Eqg)Bg2x0bCh#=@ zH}v~1T7Ck%fhO9gH8lfG@E-k75SejkF`tSo50u4g0ru3r+YKE)gd|t83UQR z4rKdav7cic+!%SXn8K=&>srkbm@$%@keq-6de7_{g8e<@!4RJZ=xgGR0}WurXdojq z4ad+#{1L7#cU;KGFN%?dV3~8LK#c_54SN(eG^z&6S~yc+GMfV%$%jTj%)aFIMm4E;mwyF|BJor>ig4v=-&Jv7`YsnI^{*WcdMl9v1ObeOvK#evbOf znM>A;j{023r|7vWXTcaVm`(6ChIf0AK1Tj=lrX;gGHSZeSTOdDh9+>HnLV^U{G~us z4{|Mt8ZygZ8-ktjCB1>GV61bLRliqUtr^r52xj6jdA9utLmF8TQAg8x2f>s;E3J@Hr zdl+k|JApLnf{~^bBln_}5?jAfw${NetoE^DY;yscxm~OQSz$CZA;+}`R}}m3+OGwAK7O`$GHI~Oz;d>j7LA>p zBxKY}{4qUxEPI*Fz^v0J$U&@k0xWsc{W6cdg){6Ks~qxI zzp41Eh|S1-+Y85=kz1L`39|u%8IV-hvN){lS{tkxxZCC`8d< z>k7MI*5;bqV5-0fV~n=-h!xPtu4-9uK^!9!%&x-3mx>AcH17krv+|vMXl#D>{?EU8 z0V-Bu2Q!@eg>2R2F5@oDQAE}J|Gk3fu=)mTROYggiy_Av?Q2Ya_6D~`o_dQ`r|kb5 z#pN30Mp*j}#PT!7hghVMOF*LD1u;er8U5?10)8jK*>yfQ3KsP)KqH$BdIeU|57t#g z+)6VqqNeg?Rirta9icDIgoUG*o*3LR5S$0|_P`q>Sr^GZEK0!|M^zpJ9}K1`7{7-- zm2o%{yTnfG5l4u@o<9pKe4e;dt3z0Mpjf&17#-Xf@aX~9FF`znDXr^q_Kd(vSo_GK z)=S~uDTF*I`Pq2pID0tq$=o|d{0{nqwf6wy9adhq{#lQw; z_?Afex%4r4#JC!K&EVxZtL>37HHb}FxnK@~x2&CfHo|A1{;5pS3{H==%@t@MOTk`` zzAfEUzBM36f0zsEX_vK+Ibl#2i&3zZ_4Mp233iOG^9yx##Ih+Y3#t?OGCH1EsKY-u z@HIHp0?v$<6=+$wk`v~>Rm?4+Ds*!Ek|`}!k&%I zmBGR%eYCl9N!i?lZqEf*k-@mm$J}7+bLY8g^ZQo&y`($c+j#P|bTDougYHnkNv8{u58qD)opgFDoeVl7(X;D+h~rvZ z+uL~h-o2NvUVVP++SRM~pWnK7n8e1)YBjwtf2 zThH2A^L6gOMm1fJ%l-c&VXEB!m;3+OyUnP&W|v*=|D7xM|Nrj(f7#dT?FQO67Qg>L ztLkN2GEg$`KV%@PyuSPTEjTt?&xJnnUArxupWvj-4>)v(A8bv%6F>v%7T sPDtwMq?*FmhTl|#tf3g4n1923H#{d8T diff --git a/kernel/cpu/src/x86_64.rs b/kernel/cpu/src/x86_64.rs index de158ef812..8df87e5f6b 100644 --- a/kernel/cpu/src/x86_64.rs +++ b/kernel/cpu/src/x86_64.rs @@ -12,12 +12,6 @@ impl From for CpuId { } } -impl From for ApicId { - fn from(cpu_id: CpuId) -> Self { - ApicId::try_from(cpu_id.value()).expect("An invalid CpuId was encountered") - } -} - impl TryFrom for CpuId { type Error = u32; fn try_from(raw_cpu_id: u32) -> Result { diff --git a/kernel/gic/src/gic/dist_interface.rs b/kernel/gic/src/gic/dist_interface.rs index 398568aa55..697cf35131 100644 --- a/kernel/gic/src/gic/dist_interface.rs +++ b/kernel/gic/src/gic/dist_interface.rs @@ -27,7 +27,6 @@ use cpu::MpidrValue; mod offset { use crate::{Offset32, Offset64}; pub(crate) const CTLR: Offset32 = Offset32::from_byte_offset(0x000); - pub(crate) const IIDR: Offset32 = Offset32::from_byte_offset(0x008); pub(crate) const IGROUPR: Offset32 = Offset32::from_byte_offset(0x080); pub(crate) const ISENABLER: Offset32 = Offset32::from_byte_offset(0x100); pub(crate) const ICENABLER: Offset32 = Offset32::from_byte_offset(0x180); @@ -137,16 +136,6 @@ pub fn send_ipi_gicv2(registers: &mut GicRegisters, int_num: u32, target: IpiTar registers.write_volatile(offset::SGIR, value); } -/// Deserialized content of the `IIDR` distributor register -pub struct Implementer { - /// Product Identifier of this distributor. - pub product_id: u8, - /// An arbitrary revision number defined by the implementer. - pub version: u8, - /// Contains the JEP106 code of the company that implemented the distributor. - pub implementer_jep106: u16, -} - impl super::ArmGic { pub(crate) fn distributor(&self) -> &GicRegisters { match self { @@ -162,15 +151,6 @@ impl super::ArmGic { } } - pub fn implementer(&self) -> Implementer { - let raw = self.distributor().read_volatile(offset::IIDR); - Implementer { - product_id: (raw >> 24) as _, - version: ((raw >> 12) & 0xff) as _, - implementer_jep106: (raw & 0xfff) as _, - } - } - /// Returns the destination of an SPI if it's valid, i.e. if it /// points to existing CPU(s). /// diff --git a/kernel/gic/src/gic/mod.rs b/kernel/gic/src/gic/mod.rs index 00f233f104..66cbd40e49 100644 --- a/kernel/gic/src/gic/mod.rs +++ b/kernel/gic/src/gic/mod.rs @@ -438,18 +438,6 @@ impl ArmGic { Self::V3( _) => cpu_interface_gicv3::set_minimum_priority(priority), } } - - /// Returns the internal ID of the redistributor (GICv3) - /// - /// Note #2: this is only provided for debugging purposes - /// Note #1: as a compatibility feature, on GICv2, the CPU index is returned. - pub fn get_cpu_interface_id(&self) -> u16 { - let i = get_current_cpu_redist_index(); - match self { - Self::V3(v3) => redist_interface::get_internal_id(&v3.redistributors[i].redistributor), - _ => i as _, - } - } } impl core::fmt::Debug for ArmGicV3RedistPages { diff --git a/kernel/gic/src/gic/redist_interface.rs b/kernel/gic/src/gic/redist_interface.rs index 7f14ae2907..da0bfd3b17 100644 --- a/kernel/gic/src/gic/redist_interface.rs +++ b/kernel/gic/src/gic/redist_interface.rs @@ -133,10 +133,3 @@ pub fn get_sgippi_priority(registers: &GicRegisters, int: InterruptNumber) -> Pr pub fn set_sgippi_priority(registers: &mut GicRegisters, int: InterruptNumber, prio: Priority) { registers.write_array_volatile::<4>(offset::SGIPPI_IPRIORITYR, int, (u8::MAX - prio) as u32); } - -/// Returns the internal ID of the redistributor -/// -/// Note: this is only provided for debugging purposes -pub fn get_internal_id(registers: &GicRegisters) -> u16 { - (registers.read_volatile_64(offset::TYPER) >> 8) as _ -} From 1977fc5ced93ef5ae6190a7849e7a58ff577ced0 Mon Sep 17 00:00:00 2001 From: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> Date: Tue, 13 Jun 2023 14:51:17 -0700 Subject: [PATCH 13/23] page allocator: support allocating pages within an address range (#970) * Currently this is only used for allocating pages for new executable .text sections on aarch64, which itself is a workaround to enable runtime loading of crates (see #940). * Based on the limitations of aarch64's ISA (branch instructions), we reserve 128MiB of virtual address space for this purpose. * This 128MiB region is for executable .text sections only, and is contiguous with the base kernel image's .text section. * This is available but not used by default on x86_64 yet. --- kernel/kernel_config/src/memory.rs | 27 ++- kernel/memory/src/lib.rs | 11 +- kernel/mod_mgmt/src/lib.rs | 87 +++++--- .../nano_core/linker_higher_half-aarch64.ld | 12 ++ kernel/page_allocator/src/lib.rs | 189 ++++++++++++------ 5 files changed, 220 insertions(+), 106 deletions(-) diff --git a/kernel/kernel_config/src/memory.rs b/kernel/kernel_config/src/memory.rs index 596f666931..2a0b615899 100644 --- a/kernel/kernel_config/src/memory.rs +++ b/kernel/kernel_config/src/memory.rs @@ -1,4 +1,4 @@ -//! The basic virtual memory map that Theseus assumes. +//! The basic virtual address ranges (virtual memory map) defined by Theseus. //! //! Current P4 (top-level page table) mappings: //! * 511: kernel text sections. @@ -53,15 +53,15 @@ pub const TEMPORARY_PAGE_VIRT_ADDR: usize = MAX_VIRTUAL_ADDRESS; /// Value: 512. pub const ENTRIES_PER_PAGE_TABLE: usize = PAGE_SIZE / BYTES_PER_ADDR; -/// Value: 511. The 511th entry is used for kernel text sections +/// Value: 511. The 511th entry is used (in part) for kernel text sections. pub const KERNEL_TEXT_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 1; /// Value: 510. The 510th entry is used to recursively map the current P4 root page table frame -// such that it can be accessed and modified just like any other level of page table. +/// such that it can be accessed and modified just like any other level of page table. pub const RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 2; -/// Value: 509. The 509th entry is used for the kernel heap +/// Value: 509. The 509th entry is used for the kernel heap. pub const KERNEL_HEAP_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 3; /// Value: 508. The 508th entry is used to temporarily recursively map the P4 root page table frame -// of an upcoming (new) page table such that it can be accessed and modified. +/// of an upcoming (new) page table such that it can be accessed and modified. pub const UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 4; @@ -89,12 +89,9 @@ pub const KERNEL_OFFSET: usize = canonicalize(MAX_VIRTUAL_ADDRESS - (TWO_GIGABYT /// Actual value on x86_64: 0o177777_777_000_000_000_0000, or 0xFFFF_FF80_0000_0000 pub const KERNEL_TEXT_START: usize = canonicalize(KERNEL_TEXT_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); -/// The size in bytes, not in pages. -/// -/// the KERNEL_OFFSET starts at (MAX_ADDR - 2GiB), -/// and .text contains nano_core, so this is the -/// first 510GiB of the 511th P4 entry. -pub const KERNEL_TEXT_MAX_SIZE: usize = ADDRESSABILITY_PER_P4_ENTRY - TWO_GIGABYTES; +/// The start of the virtual address range covered by the 510th P4 entry, +/// i.e., [`RECURSIVE_P4_INDEX`]; +pub const RECURSIVE_P4_START: usize = canonicalize(RECURSIVE_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); /// The higher-half heap gets the 512GB address range starting at the 509th P4 entry, /// which is the slot right below the recursive P4 entry (510). @@ -103,12 +100,12 @@ pub const KERNEL_HEAP_START: usize = canonicalize(KERNEL_HEAP_P4_INDEX << (P4_IN #[cfg(not(debug_assertions))] pub const KERNEL_HEAP_INITIAL_SIZE: usize = 64 * 1024 * 1024; // 64 MiB - #[cfg(debug_assertions)] pub const KERNEL_HEAP_INITIAL_SIZE: usize = 256 * 1024 * 1024; // 256 MiB, debug builds require more heap space. -/// the kernel heap gets the whole 509th P4 entry. +/// The kernel heap is allowed to grow to fill the entirety of its P4 entry. pub const KERNEL_HEAP_MAX_SIZE: usize = ADDRESSABILITY_PER_P4_ENTRY; -/// The system (page allocator) must not use addresses at or above this address. -pub const UPCOMING_PAGE_TABLE_RECURSIVE_MEMORY_START: usize = canonicalize(UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); +/// The start of the virtual address range covered by the 508th P4 entry, +/// i.e., [`UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX`]; +pub const UPCOMING_PAGE_TABLE_RECURSIVE_P4_START: usize = canonicalize(UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); diff --git a/kernel/memory/src/lib.rs b/kernel/memory/src/lib.rs index fae1fc80e5..aeb0978929 100644 --- a/kernel/memory/src/lib.rs +++ b/kernel/memory/src/lib.rs @@ -24,15 +24,8 @@ pub use self::paging::{ }; pub use memory_structs::*; -pub use page_allocator::{ - AllocatedPages, allocate_pages, allocate_pages_at, - allocate_pages_by_bytes, allocate_pages_by_bytes_at, -}; - -pub use frame_allocator::{ - AllocatedFrames, MemoryRegionType, PhysicalMemoryRegion, - allocate_frames, allocate_frames_at, allocate_frames_by_bytes_at, allocate_frames_by_bytes, -}; +pub use page_allocator::*; +pub use frame_allocator::*; #[cfg(target_arch = "x86_64")] use memory_x86_64::{ tlb_flush_virt_addr, tlb_flush_all, get_p4, find_section_memory_bounds, get_vga_mem_addr }; diff --git a/kernel/mod_mgmt/src/lib.rs b/kernel/mod_mgmt/src/lib.rs index 70ad66d5b5..89a8e1d932 100644 --- a/kernel/mod_mgmt/src/lib.rs +++ b/kernel/mod_mgmt/src/lib.rs @@ -14,7 +14,7 @@ use alloc::{ }; use spin::{Mutex, Once}; use xmas_elf::{ElfFile, sections::{SHF_ALLOC, SHF_EXECINSTR, SHF_TLS, SHF_WRITE, SectionData, ShType}, symbol_table::{Binding, Type}}; -use memory::{MmiRef, MemoryManagementInfo, VirtualAddress, MappedPages, PteFlags, allocate_pages_by_bytes, allocate_frames_by_bytes_at}; +use memory::{MmiRef, MemoryManagementInfo, VirtualAddress, MappedPages, PteFlags, allocate_pages_by_bytes, allocate_frames_by_bytes_at, PageRange, allocate_pages_by_bytes_in_range}; use bootloader_modules::BootloaderModule; use cow_arc::CowArc; use rustc_demangle::demangle; @@ -33,6 +33,7 @@ pub mod parse_nano_core; pub mod replace_nano_core_crates; mod serde; + /// The name of the directory that contains all of the CrateNamespace files. pub const NAMESPACES_DIRECTORY_NAME: &str = "namespaces"; @@ -2882,6 +2883,35 @@ struct SectionPages { } +/// The range of virtual addresses from which we allocate pages for executable .text sections. +/// +/// This is mostly an architecture-specific design choice (hopefully a temporary one): +/// * On aarch64, even with the large code model, we are not (yet) able to generate +/// code with branch instructions (call/jump) that can address instructions more than +/// 128 MiB away from the current instruction. +/// Thus, we restrict the range of .text section locations to ensure they are within 128 MiB. +/// At some point in the future, this will be a limitation, but not for a long, long time. +/// * On x86_64, this is not necessary, so the range is `None`. +pub const KERNEL_TEXT_ADDR_RANGE: Option = { + #[cfg(target_arch = "x86_64")] { + None + } + #[cfg(target_arch = "aarch64")] { + use {memory::Page, kernel_config::memory::KERNEL_OFFSET}; + + const ONE_MIB: usize = 0x10_0000; + let start_vaddr = VirtualAddress::new_canonical(KERNEL_OFFSET + ONE_MIB); + let end_vaddr = VirtualAddress::new_canonical(start_vaddr.value() + (128 * ONE_MIB) - 1); + Some(PageRange::new( + // the start of the base kernel image's .text section. + Page::containing_address(start_vaddr), + // the start of the base kernel image's .text section, plus 128 MiB. + Page::containing_address(end_vaddr), + )) + } +}; + + /// Allocates and maps memory sufficient to hold the sections that are found in the given `ElfFile`. /// Only sections that are marked "allocated" (`ALLOC`) in the ELF object file will contribute to the mappings' sizes. fn allocate_section_pages(elf_file: &ElfFile, kernel_mmi_ref: &MmiRef) -> Result { @@ -2953,10 +2983,37 @@ fn allocate_section_pages(elf_file: &ElfFile, kernel_mmi_ref: &MmiRef) -> Result // trace!("\n\texec_bytes: {exec_bytes} {exec_bytes:#X}\n\tro_bytes: {ro_bytes} {ro_bytes:#X}\n\trw_bytes: {rw_bytes} {rw_bytes:#X}"); // Allocate contiguous virtual memory pages for each section and map them to random frames as writable. - // We must allocate these pages separately because they will have different flags later. - let executable_pages = if exec_bytes > 0 { Some(allocate_and_map_as_writable(exec_bytes, TEXT_SECTION_FLAGS, kernel_mmi_ref)?) } else { None }; - let read_only_pages = if ro_bytes > 0 { Some(allocate_and_map_as_writable(ro_bytes, RODATA_SECTION_FLAGS, kernel_mmi_ref)?) } else { None }; - let read_write_pages = if rw_bytes > 0 { Some(allocate_and_map_as_writable(rw_bytes, DATA_BSS_SECTION_FLAGS, kernel_mmi_ref)?) } else { None }; + // We must allocate these pages separately because they use different flags. + let alloc_sec = |size_in_bytes: usize, within_range: Option<&PageRange>, flags: PteFlags| { + let allocated_pages = if let Some(range) = within_range { + allocate_pages_by_bytes_in_range(size_in_bytes, range) + .map_err(|_| "Couldn't allocate pages in text section address range")? + } else { + allocate_pages_by_bytes(size_in_bytes) + .ok_or("Couldn't allocate pages for new section")? + }; + + kernel_mmi_ref.lock().page_table.map_allocated_pages( + allocated_pages, + flags.valid(true).writable(true) + ) + }; + + let executable_pages = if exec_bytes > 0 { + Some(alloc_sec(exec_bytes, KERNEL_TEXT_ADDR_RANGE.as_ref(), TEXT_SECTION_FLAGS)?) + } else { + None + }; + let read_only_pages = if ro_bytes > 0 { + Some(alloc_sec(ro_bytes, None, RODATA_SECTION_FLAGS)?) + } else { + None + }; + let read_write_pages = if rw_bytes > 0 { + Some(alloc_sec(rw_bytes, None, DATA_BSS_SECTION_FLAGS)?) + } else { + None + }; let range_tuple = |mp: MappedPages, size_in_bytes: usize| { let start = mp.start_address(); @@ -2971,26 +3028,6 @@ fn allocate_section_pages(elf_file: &ElfFile, kernel_mmi_ref: &MmiRef) -> Result } -/// A convenience function for allocating virtual pages and mapping them to random physical frames. -/// -/// The returned `MappedPages` will be at least as large as `size_in_bytes`, -/// rounded up to the nearest `Page` size, -/// and is mapped as writable along with the other specified `flags` -/// to ensure we can copy content into it. -fn allocate_and_map_as_writable( - size_in_bytes: usize, - flags: PteFlags, - kernel_mmi_ref: &MmiRef, -) -> Result { - let allocated_pages = allocate_pages_by_bytes(size_in_bytes) - .ok_or("Couldn't allocate_pages_by_bytes, out of virtual address space")?; - kernel_mmi_ref.lock().page_table.map_allocated_pages( - allocated_pages, - flags.valid(true).writable(true) - ) -} - - #[allow(dead_code)] fn dump_dependent_crates(krate: &LoadedCrate, prefix: String) { for weak_crate_ref in krate.crates_dependent_on_me() { diff --git a/kernel/nano_core/linker_higher_half-aarch64.ld b/kernel/nano_core/linker_higher_half-aarch64.ld index 16378cb193..4a6355f63c 100644 --- a/kernel/nano_core/linker_higher_half-aarch64.ld +++ b/kernel/nano_core/linker_higher_half-aarch64.ld @@ -27,6 +27,18 @@ SECTIONS { *(.text .text.*) } + /* + * Currently, we are unable to force aarch64 to emit branch (call/jump) instructions + * that are capable of addressing a destination instruction pointer more than 128MiB away, + * even when specifying the "large" code model with `-C code-model=large`. + * + * Thus, as a workaround, we reserve the 128MiB chunk of virtual address space that + * directly follows the initial base kernel image's executable .text section, + * ensuring it can only be used by the page allocator when allocating pages for + * newly-loaded .text sections. + */ + . = ALIGN(128M); + .rodata ALIGN(4K) : AT(ADDR(.rodata) - KERNEL_OFFSET) { *(.rodata .rodata.*) diff --git a/kernel/page_allocator/src/lib.rs b/kernel/page_allocator/src/lib.rs index 236fd30b15..326ecec154 100644 --- a/kernel/page_allocator/src/lib.rs +++ b/kernel/page_allocator/src/lib.rs @@ -30,7 +30,7 @@ mod static_array_rb_tree; // mod static_array_linked_list; -use core::{borrow::Borrow, cmp::Ordering, fmt, ops::{Deref, DerefMut}}; +use core::{borrow::Borrow, cmp::{Ordering, max, min}, fmt, ops::{Deref, DerefMut}}; use kernel_config::memory::*; use memory_structs::{VirtualAddress, Page, PageRange}; use spin::{Mutex, Once}; @@ -53,7 +53,9 @@ static DESIGNATED_PAGES_LOW_END: Once = Once::new(); /// /// TODO: once the heap is fully dynamic and not dependent on static addresses, /// we can exclude the heap from the designated region. -static DESIGNATED_PAGES_HIGH_START: Page = Page::containing_address(VirtualAddress::new_canonical(UPCOMING_PAGE_TABLE_RECURSIVE_MEMORY_START)); +static DESIGNATED_PAGES_HIGH_START: Page = Page::containing_address( + VirtualAddress::new_canonical(UPCOMING_PAGE_TABLE_RECURSIVE_P4_START) +); const MIN_PAGE: Page = Page::containing_address(VirtualAddress::zero()); const MAX_PAGE: Page = Page::containing_address(VirtualAddress::new_canonical(MAX_VIRTUAL_ADDRESS)); @@ -69,45 +71,62 @@ static FREE_PAGE_LIST: Mutex> = Mutex::new(StaticArrayR /// lower designated region, which should be the ending address of the initial kernel image /// (a lower-half identity address). /// -/// The page allocator will only allocate addresses lower than `end_vaddr_of_low_designated_region` -/// if specifically requested. -/// General allocation requests for any virtual address will not use any address lower than that, -/// unless the rest of the entire virtual address space is already in use. +/// The page allocator considers two regions as "designated" regions. It will only allocate pages +/// within these designated regions if the specifically-requested address falls within them. +/// 1. The lower designated region is for identity-mapped bootloader content +/// and base kernel image sections, which is used during OS initialization. +/// 2. The higher designated region is for the same content, mapped to the higher half +/// of the address space. It also excludes the address ranges for the P4 entries that +/// Theseus uses for recursive page table mapping. +/// * See [`RECURSIVE_P4_INDEX`] and [`UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX`]. /// +/// General allocation requests for pages at any virtual address will not use +/// addresses within designated regions unless the entire address space is already in use, +/// which is an extraordinarily unlikely (i.e., basically impossible) situation. pub fn init(end_vaddr_of_low_designated_region: VirtualAddress) -> Result<(), &'static str> { assert!(end_vaddr_of_low_designated_region < DESIGNATED_PAGES_HIGH_START.start_address()); - let designated_low_end = DESIGNATED_PAGES_LOW_END.call_once(|| Page::containing_address(end_vaddr_of_low_designated_region)); - let designated_low_end = *designated_low_end; + let designated_low_end_page = DESIGNATED_PAGES_LOW_END.call_once( + || Page::containing_address(end_vaddr_of_low_designated_region) + ); + let designated_low_end = *designated_low_end_page; let initial_free_chunks = [ - // The first region contains all pages *below* the beginning of the 510th entry of P4. - // We split it up into three chunks just for ease, since it overlaps the designated regions. - Some(Chunk { + // The first region contains all pages from address zero to the end of the low designated region, + // which is generally reserved for identity-mapped bootloader stuff and base kernel image sections. + Some(Chunk { pages: PageRange::new( Page::containing_address(VirtualAddress::zero()), designated_low_end, ) }), - Some(Chunk { + // The second region contains the massive range from the end of the low designated region + // to the beginning of the high designated region, which comprises the majority of the address space. + // The beginning of the high designated region starts at the reserved P4 entry used to + // recursively map the "upcoming" page table (i.e., UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX). + Some(Chunk { pages: PageRange::new( designated_low_end + 1, DESIGNATED_PAGES_HIGH_START - 1, ) }), - Some(Chunk { + // Here, we skip the addresses covered by the `UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX`. + + // The third region contains the range of addresses reserved for the heap, + // which ends at the beginning of the addresses covered by the `RECURSIVE_P4_INDEX`, + Some(Chunk { pages: PageRange::new( - DESIGNATED_PAGES_HIGH_START, + Page::containing_address(VirtualAddress::new_canonical(KERNEL_HEAP_START)), // This is the page right below the beginning of the 510th entry of the top-level P4 page table. - Page::containing_address(VirtualAddress::new_canonical(KERNEL_TEXT_START - ADDRESSABILITY_PER_P4_ENTRY - 1)), + Page::containing_address(VirtualAddress::new_canonical(RECURSIVE_P4_START - 1)), ) }), + // Here, we skip the addresses covered by the `RECURSIVE_P4_INDEX`. - // The second region contains all pages *above* the end of the 510th entry of P4, i.e., starting at the 511th (last) entry of P4. - // This is fully covered by the second (higher) designated region. - Some(Chunk { + // The fourth region contains all pages in the 511th (last) entry of P4. + Some(Chunk { pages: PageRange::new( Page::containing_address(VirtualAddress::new_canonical(KERNEL_TEXT_START)), - Page::containing_address(VirtualAddress::new_canonical(MAX_VIRTUAL_ADDRESS)), + MAX_PAGE, ) }), None, None, None, None, @@ -309,7 +328,7 @@ impl Drop for AllocatedPages { /// that may result in heap allocation should occur. /// Such actions include adding chunks to lists of free pages or pages in use. /// -/// The vast majority of use cases don't care about such precise control, +/// The vast majority of use cases don't care about such precise control, /// so you can simply drop this struct at any time or ignore it /// with a `let _ = ...` binding to instantly drop it. pub struct DeferredAllocAction<'list> { @@ -344,14 +363,15 @@ impl<'list> Drop for DeferredAllocAction<'list> { } -/// Possible allocation errors. +/// Possible errors returned by the page allocator. #[derive(Debug)] -enum AllocationError { +pub enum AllocationError { /// The requested address was not free: it was already allocated, or is outside the range of this allocator. AddressNotFree(Page, usize), /// The address space was full, or there was not a large-enough chunk - /// or enough remaining chunks that could satisfy the requested allocation size. - OutOfAddressSpace(usize), + /// or enough remaining chunks (within the given `PageRange`, if any) + /// that could satisfy the requested allocation size. + OutOfAddressSpace(usize, Option), /// The allocator has not yet been initialized. NotInitialized, } @@ -359,7 +379,8 @@ impl From for &'static str { fn from(alloc_err: AllocationError) -> &'static str { match alloc_err { AllocationError::AddressNotFree(..) => "address was in use or outside of this page allocator's range", - AllocationError::OutOfAddressSpace(..) => "out of virtual address space", + AllocationError::OutOfAddressSpace(_, Some(_range)) => "out of virtual address space in specified range", + AllocationError::OutOfAddressSpace(_, None) => "out of virtual address space", AllocationError::NotInitialized => "the page allocator has not yet been initialized", } } @@ -429,38 +450,52 @@ fn find_specific_chunk( /// Searches the given `list` for any chunk large enough to hold at least `num_pages`. /// -/// It first attempts to find a suitable chunk **not** in the designated regions, +/// If a given range is specified, the returned `AllocatedPages` *must* exist +/// fully within that inclusive range of pages. +/// +/// If no range is specified, this function first attempts to find a suitable chunk +/// that is **not** within the designated regions, /// and only allocates from the designated regions as a backup option. fn find_any_chunk( list: &mut StaticArrayRBTree, - num_pages: usize + num_pages: usize, + within_range: Option<&PageRange>, ) -> Result<(AllocatedPages, DeferredAllocAction<'static>), AllocationError> { - let designated_low_end = DESIGNATED_PAGES_LOW_END.get().ok_or(AllocationError::NotInitialized)?; - - // During the first pass, we ignore designated regions. + let designated_low_end = DESIGNATED_PAGES_LOW_END.get() + .ok_or(AllocationError::NotInitialized)?; + let full_range = PageRange::new(*designated_low_end + 1, DESIGNATED_PAGES_HIGH_START - 1); + let range = within_range.unwrap_or(&full_range); + + // During the first pass, we only search within the given range. + // If no range was given, we search from the end of the low designated region + // to the start of the high designated region. match list.0 { Inner::Array(ref mut arr) => { for elem in arr.iter_mut() { if let Some(chunk) = elem { - // Skip chunks that are too-small or in the designated regions. - if chunk.size_in_pages() < num_pages || - chunk.start() <= designated_low_end || - chunk.end() >= &DESIGNATED_PAGES_HIGH_START - { - continue; - } - else { - return adjust_chosen_chunk(*chunk.start(), num_pages, &chunk.clone(), ValueRefMut::Array(elem)); + // Use max and min below to ensure that the range of pages we allocate from + // is within *both* the current chunk's bounds and the range's bounds. + let lowest_possible_start_page = *max(chunk.start(), range.start()); + let highest_possible_end_page = *min(chunk.end(), range.end()); + if lowest_possible_start_page + num_pages <= highest_possible_end_page { + return adjust_chosen_chunk( + lowest_possible_start_page, + num_pages, + &chunk.clone(), + ValueRefMut::Array(elem), + ); } + + // The early static array is not sorted, so we must iterate over all elements. } } } Inner::RBTree(ref mut tree) => { // NOTE: if RBTree had a `range_mut()` method, we could simply do the following: // ``` - // let eligible_chunks = tree.range( - // Bound::Excluded(&DESIGNATED_PAGES_LOW_END), - // Bound::Excluded(&DESIGNATED_PAGES_HIGH_START) + // let eligible_chunks = tree.range_mut( + // Bound::Included(range.start()), + // Bound::Included(range.end()) // ); // for c in eligible_chunks { ... } // ``` @@ -470,20 +505,35 @@ fn find_any_chunk( // Because we allocate new pages by peeling them off from the beginning part of a chunk, // it's MUCH faster to start the search for free pages from higher addresses moving down. // This results in an O(1) allocation time in the general case, until all address ranges are already in use. - let mut cursor = tree.upper_bound_mut(Bound::Excluded(&DESIGNATED_PAGES_HIGH_START)); + let mut cursor = tree.upper_bound_mut(Bound::Included(range.end())); while let Some(chunk) = cursor.get().map(|w| w.deref()) { - if chunk.start() <= designated_low_end { - break; // move on to searching through the designated regions + // Use max and min below to ensure that the range of pages we allocate from + // is within *both* the current chunk's bounds and the range's bounds. + let lowest_possible_start_page = *max(chunk.start(), range.start()); + let highest_possible_end_page = *min(chunk.end(), range.end()); + if lowest_possible_start_page + num_pages <= highest_possible_end_page { + return adjust_chosen_chunk( + lowest_possible_start_page, + num_pages, + &chunk.clone(), + ValueRefMut::RBTree(cursor) + ); } - if num_pages < chunk.size_in_pages() { - return adjust_chosen_chunk(*chunk.start(), num_pages, &chunk.clone(), ValueRefMut::RBTree(cursor)); + + if chunk.start() <= range.start() { + break; // move on to searching through the designated regions } - warn!("Page allocator: unlikely scenario: had to search multiple chunks while trying to allocate {} pages at any address.", num_pages); + warn!("page_allocator: unlikely scenario: had to search multiple chunks while trying to allocate {} pages in {:?}.", num_pages, range); cursor.move_prev(); } } } + // If we failed to find suitable pages within the given range, return an error. + if let Some(range) = within_range { + return Err(AllocationError::OutOfAddressSpace(num_pages, Some(range.clone()))); + } + // If we can't find any suitable chunks in the non-designated regions, then look in both designated regions. warn!("PageAllocator: unlikely scenario: non-designated chunks are all allocated, \ falling back to allocating {} pages from designated regions!", num_pages); @@ -510,8 +560,8 @@ fn find_any_chunk( // for c in eligible_chunks { ... } // ``` // - // However, RBTree doesn't have a `range_mut()` method, so we use two sets of cursors for manual iteration. - // The first cursor iterates over the lower designated region, from higher addresses to lower, down to zero. + // RBTree doesn't have a `range_mut()` method, so we use cursors for two rounds of iteration. + // The first iterates over the lower designated region, from higher addresses to lower, down to zero. let mut cursor = tree.upper_bound_mut(Bound::Included(designated_low_end)); while let Some(chunk) = cursor.get().map(|w| w.deref()) { if num_pages < chunk.size_in_pages() { @@ -520,7 +570,7 @@ fn find_any_chunk( cursor.move_prev(); } - // The second cursor iterates over the higher designated region, from the highest (max) address down to the designated region boundary. + // The second iterates over the higher designated region, from the highest (max) address down to the designated region boundary. let mut cursor = tree.upper_bound_mut::(Bound::Unbounded); while let Some(chunk) = cursor.get().map(|w| w.deref()) { if chunk.start() < &DESIGNATED_PAGES_HIGH_START { @@ -535,7 +585,7 @@ fn find_any_chunk( } } - Err(AllocationError::OutOfAddressSpace(num_pages)) + Err(AllocationError::OutOfAddressSpace(num_pages, None)) } @@ -624,6 +674,7 @@ fn adjust_chosen_chunk( pub fn allocate_pages_deferred( requested_vaddr: Option, num_pages: usize, + within_range: Option<&PageRange>, ) -> Result<(AllocatedPages, DeferredAllocAction<'static>), &'static str> { if num_pages == 0 { warn!("PageAllocator: requested an allocation of 0 pages... stupid!"); @@ -640,8 +691,9 @@ pub fn allocate_pages_deferred( if let Some(vaddr) = requested_vaddr { find_specific_chunk(&mut locked_list, Page::containing_address(vaddr), num_pages) } else { - find_any_chunk(&mut locked_list, num_pages) - }.map_err(From::from) // convert from AllocationError to &str + find_any_chunk(&mut locked_list, num_pages, within_range) + } + .map_err(From::from) // convert from AllocationError to &str } @@ -659,7 +711,7 @@ pub fn allocate_pages_by_bytes_deferred( num_bytes }; let num_pages = (actual_num_bytes + PAGE_SIZE - 1) / PAGE_SIZE; // round up - allocate_pages_deferred(requested_vaddr, num_pages) + allocate_pages_deferred(requested_vaddr, num_pages, None) } @@ -667,7 +719,7 @@ pub fn allocate_pages_by_bytes_deferred( /// /// See [`allocate_pages_deferred()`](fn.allocate_pages_deferred.html) for more details. pub fn allocate_pages(num_pages: usize) -> Option { - allocate_pages_deferred(None, num_pages) + allocate_pages_deferred(None, num_pages, None) .map(|(ap, _action)| ap) .ok() } @@ -699,7 +751,30 @@ pub fn allocate_pages_by_bytes_at(vaddr: VirtualAddress, num_bytes: usize) -> Re /// /// See [`allocate_pages_deferred()`](fn.allocate_pages_deferred.html) for more details. pub fn allocate_pages_at(vaddr: VirtualAddress, num_pages: usize) -> Result { - allocate_pages_deferred(Some(vaddr), num_pages) + allocate_pages_deferred(Some(vaddr), num_pages, None) + .map(|(ap, _action)| ap) +} + + +/// Allocates the given number of pages with the constraint that +/// they must be within the given inclusive `range` of pages. +pub fn allocate_pages_in_range( + num_pages: usize, + range: &PageRange, +) -> Result { + allocate_pages_deferred(None, num_pages, Some(range)) + .map(|(ap, _action)| ap) +} + + +/// Allocates pages with a size given in number of bytes with the constraint that +/// they must be within the given inclusive `range` of pages. +pub fn allocate_pages_by_bytes_in_range( + num_bytes: usize, + range: &PageRange, +) -> Result { + let num_pages = (num_bytes + PAGE_SIZE - 1) / PAGE_SIZE; // round up + allocate_pages_deferred(None, num_pages, Some(range)) .map(|(ap, _action)| ap) } From 87afde2c2f08053fcc2545f7181068805b83d4f5 Mon Sep 17 00:00:00 2001 From: Ramla Ijaz Date: Wed, 14 Jun 2023 06:30:52 -0400 Subject: [PATCH 14/23] should match with theseus_main now --- kernel/cpu/src/x86_64.rs | 6 ++++++ kernel/gic/src/gic/dist_interface.rs | 20 ++++++++++++++++++++ kernel/gic/src/gic/mod.rs | 12 ++++++++++++ kernel/gic/src/gic/redist_interface.rs | 7 +++++++ kernel/ixgbe/.DS_Store | Bin 6148 -> 0 bytes 5 files changed, 45 insertions(+) delete mode 100644 kernel/ixgbe/.DS_Store diff --git a/kernel/cpu/src/x86_64.rs b/kernel/cpu/src/x86_64.rs index 8df87e5f6b..de158ef812 100644 --- a/kernel/cpu/src/x86_64.rs +++ b/kernel/cpu/src/x86_64.rs @@ -12,6 +12,12 @@ impl From for CpuId { } } +impl From for ApicId { + fn from(cpu_id: CpuId) -> Self { + ApicId::try_from(cpu_id.value()).expect("An invalid CpuId was encountered") + } +} + impl TryFrom for CpuId { type Error = u32; fn try_from(raw_cpu_id: u32) -> Result { diff --git a/kernel/gic/src/gic/dist_interface.rs b/kernel/gic/src/gic/dist_interface.rs index 697cf35131..398568aa55 100644 --- a/kernel/gic/src/gic/dist_interface.rs +++ b/kernel/gic/src/gic/dist_interface.rs @@ -27,6 +27,7 @@ use cpu::MpidrValue; mod offset { use crate::{Offset32, Offset64}; pub(crate) const CTLR: Offset32 = Offset32::from_byte_offset(0x000); + pub(crate) const IIDR: Offset32 = Offset32::from_byte_offset(0x008); pub(crate) const IGROUPR: Offset32 = Offset32::from_byte_offset(0x080); pub(crate) const ISENABLER: Offset32 = Offset32::from_byte_offset(0x100); pub(crate) const ICENABLER: Offset32 = Offset32::from_byte_offset(0x180); @@ -136,6 +137,16 @@ pub fn send_ipi_gicv2(registers: &mut GicRegisters, int_num: u32, target: IpiTar registers.write_volatile(offset::SGIR, value); } +/// Deserialized content of the `IIDR` distributor register +pub struct Implementer { + /// Product Identifier of this distributor. + pub product_id: u8, + /// An arbitrary revision number defined by the implementer. + pub version: u8, + /// Contains the JEP106 code of the company that implemented the distributor. + pub implementer_jep106: u16, +} + impl super::ArmGic { pub(crate) fn distributor(&self) -> &GicRegisters { match self { @@ -151,6 +162,15 @@ impl super::ArmGic { } } + pub fn implementer(&self) -> Implementer { + let raw = self.distributor().read_volatile(offset::IIDR); + Implementer { + product_id: (raw >> 24) as _, + version: ((raw >> 12) & 0xff) as _, + implementer_jep106: (raw & 0xfff) as _, + } + } + /// Returns the destination of an SPI if it's valid, i.e. if it /// points to existing CPU(s). /// diff --git a/kernel/gic/src/gic/mod.rs b/kernel/gic/src/gic/mod.rs index 66cbd40e49..00f233f104 100644 --- a/kernel/gic/src/gic/mod.rs +++ b/kernel/gic/src/gic/mod.rs @@ -438,6 +438,18 @@ impl ArmGic { Self::V3( _) => cpu_interface_gicv3::set_minimum_priority(priority), } } + + /// Returns the internal ID of the redistributor (GICv3) + /// + /// Note #2: this is only provided for debugging purposes + /// Note #1: as a compatibility feature, on GICv2, the CPU index is returned. + pub fn get_cpu_interface_id(&self) -> u16 { + let i = get_current_cpu_redist_index(); + match self { + Self::V3(v3) => redist_interface::get_internal_id(&v3.redistributors[i].redistributor), + _ => i as _, + } + } } impl core::fmt::Debug for ArmGicV3RedistPages { diff --git a/kernel/gic/src/gic/redist_interface.rs b/kernel/gic/src/gic/redist_interface.rs index da0bfd3b17..7f14ae2907 100644 --- a/kernel/gic/src/gic/redist_interface.rs +++ b/kernel/gic/src/gic/redist_interface.rs @@ -133,3 +133,10 @@ pub fn get_sgippi_priority(registers: &GicRegisters, int: InterruptNumber) -> Pr pub fn set_sgippi_priority(registers: &mut GicRegisters, int: InterruptNumber, prio: Priority) { registers.write_array_volatile::<4>(offset::SGIPPI_IPRIORITYR, int, (u8::MAX - prio) as u32); } + +/// Returns the internal ID of the redistributor +/// +/// Note: this is only provided for debugging purposes +pub fn get_internal_id(registers: &GicRegisters) -> u16 { + (registers.read_volatile_64(offset::TYPER) >> 8) as _ +} diff --git a/kernel/ixgbe/.DS_Store b/kernel/ixgbe/.DS_Store deleted file mode 100644 index a4c4b45b73300795bdbe2d393d30b560c45d41a3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHK%}yIJ5FRIKae}JmfCR_A^oF9KR7E{h+6FjqLt0c0P$j!0f|Zsv%5I2C2-??x zm*6>g0v-ou>`5d@dTk>#BaOfDc*eW_Z21QORD0Ol0;mIkMkOp&u~;C~PC6$g_EZp+ z?ZZF=axicK$y~HJmXQIPyBh332m?67nVZeueqv6e#6;Lmi{mG1k<+j;VGK=I0UHhsGo` zJB)tC{ZXgBwqw#fjMM(WCda)#hMXM5Y0r$>W|;Oe+fU#cw65xEr@k>Be{Z!my|1m- zWYZgOZ#T*QHknlQ`j@RA``wE_*W;VX?VTMaC|uZ9J~K|?9t|epDj1}RNzc$Tjh;pc z3IoD`FtC&i@Mfh~m-6rAr-T7v;D0he`-6u{=ra}$?bd;XeF7lTHPQ;&bX}wzkI`o= z9AXQKa;bwtvR}vkJsS?ncPOPWAPl@_pzNx3 zI{)|n{{DZRBsF0`7 Date: Wed, 14 Jun 2023 07:04:08 -0400 Subject: [PATCH 15/23] condensed files --- .../frame_allocator/src/allocated_frames.rs | 206 -------------- kernel/frame_allocator/src/frames.rs | 235 ++++----------- kernel/frame_allocator/src/lib.rs | 268 +++++++++++++++++- kernel/frame_allocator/src/region.rs | 60 ---- .../frame_allocator/src/trusted_chunk_shim.rs | 231 --------------- 5 files changed, 317 insertions(+), 683 deletions(-) delete mode 100644 kernel/frame_allocator/src/allocated_frames.rs delete mode 100644 kernel/frame_allocator/src/region.rs delete mode 100644 kernel/frame_allocator/src/trusted_chunk_shim.rs diff --git a/kernel/frame_allocator/src/allocated_frames.rs b/kernel/frame_allocator/src/allocated_frames.rs deleted file mode 100644 index a25e43aa25..0000000000 --- a/kernel/frame_allocator/src/allocated_frames.rs +++ /dev/null @@ -1,206 +0,0 @@ -use crate::{MemoryRegionType, contains_any, FREE_GENERAL_FRAMES_LIST, FREE_RESERVED_FRAMES_LIST, RESERVED_REGIONS}; -use memory_structs::{FrameRange, Frame}; -use core::{fmt, ops::{Deref, DerefMut}, marker::PhantomData}; -use trusted_chunk::trusted_chunk::TrustedChunk; -use range_inclusive::RangeInclusiveIterator; -use crate::frames::*; - -/// Represents a range of allocated physical memory [`Frame`]s; derefs to [`FrameRange`]. -/// -/// These frames are not immediately accessible because they're not yet mapped -/// by any virtual memory pages. -/// You must do that separately in order to create a `MappedPages` type, -/// which can then be used to access the contents of these frames. -/// -/// This object represents ownership of the range of allocated physical frames; -/// if this object falls out of scope, its allocated frames will be auto-deallocated upon drop. -pub struct AllocatedFrames { - pub(crate) frames: Frames<{FrameState::Unmapped}>, -} - -// AllocatedFrames must not be Cloneable, and it must not expose its inner frames as mutable. -assert_not_impl_any!(AllocatedFrames: DerefMut, Clone); - -impl Deref for AllocatedFrames { - type Target = FrameRange; - fn deref(&self) -> &FrameRange { - &self.frames - } -} -impl fmt::Debug for AllocatedFrames { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "AllocatedFrames({:?})", self.frames) - } -} - -impl AllocatedFrames { - /// Returns an empty AllocatedFrames object that performs no frame allocation. - /// Can be used as a placeholder, but will not permit any real usage. - pub const fn empty() -> AllocatedFrames { - AllocatedFrames { - frames: Frames::empty() - } - } - - /// Merges the given `AllocatedFrames` object `other` into this `AllocatedFrames` object (`self`). - /// This is just for convenience and usability purposes, it performs no allocation or remapping. - /// - /// The given `other` must be physically contiguous with `self`, i.e., come immediately before or after `self`. - /// That is, either `self.start == other.end + 1` or `self.end + 1 == other.start` must be true. - /// - /// If either of those conditions are met, `self` is modified and `Ok(())` is returned, - /// otherwise `Err(other)` is returned. - pub fn merge(&mut self, mut other: AllocatedFrames) -> Result<(), AllocatedFrames> { - let chunk = core::mem::replace(&mut other.frames, Frames::empty()); - match self.frames.merge(chunk) { - Ok(_) => { - // ensure the now-merged AllocatedFrames doesn't run its drop handler and free its frames. - // This is not really necessary because it only contains an empty chunk. - core::mem::forget(other); - Ok(()) - }, - Err(other_chunk) => { - Err(AllocatedFrames{frames: other_chunk}) - } - } - } - - /// Splits this `AllocatedFrames` into two separate `AllocatedFrames` objects: - /// * `[beginning : at_frame - 1]` - /// * `[at_frame : end]` - /// - /// This function follows the behavior of [`core::slice::split_at()`], - /// thus, either one of the returned `AllocatedFrames` objects may be empty. - /// * If `at_frame == self.start`, the first returned `AllocatedFrames` object will be empty. - /// * If `at_frame == self.end + 1`, the second returned `AllocatedFrames` object will be empty. - /// - /// Returns an `Err` containing this `AllocatedFrames` if `at_frame` is otherwise out of bounds. - /// - /// [`core::slice::split_at()`]: https://doc.rust-lang.org/core/primitive.slice.html#method.split_at - pub fn split_at(mut self, at_frame: Frame) -> Result<(AllocatedFrames, AllocatedFrames), AllocatedFrames> { - let chunk = core::mem::replace(&mut self.frames, Frames::empty()); - match chunk.split_at(at_frame) { - Ok((chunk1, chunk2)) => { - // ensure the now-merged AllocatedFrames doesn't run its drop handler and free its frames. - core::mem::forget(self); - Ok(( - AllocatedFrames{frames: chunk1}, - AllocatedFrames{frames: chunk2} - )) - }, - Err(chunk_not_split) => { - Err(AllocatedFrames{frames: chunk_not_split}) - } - } - } - - /// Returns an `AllocatedFrame` if this `AllocatedFrames` object contains only one frame. - /// - /// ## Panic - /// Panics if this `AllocatedFrame` contains multiple frames or zero frames. - pub fn as_allocated_frame(&self) -> AllocatedFrame { - assert!(self.size_in_frames() == 1); - AllocatedFrame { - frame: *self.start(), - _phantom: PhantomData, - } - } -} - -/// This function is a callback used to convert `UnmappedFrames` into `AllocatedFrames`. -/// `UnmappedFrames` represents frames that have been unmapped from a page that had -/// exclusively mapped them, indicating that no others pages have been mapped -/// to those same frames, and thus, they can be safely deallocated. -/// -/// This exists to break the cyclic dependency cycle between this crate and -/// the `page_table_entry` crate, since `page_table_entry` must depend on types -/// from this crate in order to enforce safety when modifying page table entries. -pub(crate) fn into_allocated_frames(tc: TrustedChunk, frames: FrameRange) -> AllocatedFrames { - let typ = if contains_any(&RESERVED_REGIONS.lock(), &frames) { - MemoryRegionType::Reserved - } else { - MemoryRegionType::Free - }; - AllocatedFrames { frames: Frames::from_trusted_chunk(tc, frames, typ) } -} - -impl Drop for AllocatedFrames { - fn drop(&mut self) { - if self.size_in_frames() == 0 { return; } - - let (list, _typ) = if contains_any(&RESERVED_REGIONS.lock(), &self.frames) { - (&FREE_RESERVED_FRAMES_LIST, MemoryRegionType::Reserved) - } else { - (&FREE_GENERAL_FRAMES_LIST, MemoryRegionType::Free) - }; - // trace!("frame_allocator: deallocating {:?}, typ {:?}", self, typ); - - // Simply add the newly-deallocated chunk to the free frames list. - let mut locked_list = list.lock(); - let res = locked_list.insert(core::mem::replace(&mut self.frames, Frames::empty())); - match res { - Ok(_inserted_free_chunk) => (), - Err(c) => error!("BUG: couldn't insert deallocated chunk {:?} into free frame list", c), - } - - // Here, we could optionally use above `_inserted_free_chunk` to merge the adjacent (contiguous) chunks - // before or after the newly-inserted free chunk. - // However, there's no *need* to do so until we actually run out of address space or until - // a requested address is in a chunk that needs to be merged. - // Thus, for performance, we save that for those future situations. - } -} - -impl<'f> IntoIterator for &'f AllocatedFrames { - type IntoIter = AllocatedFramesIter<'f>; - type Item = AllocatedFrame<'f>; - fn into_iter(self) -> Self::IntoIter { - AllocatedFramesIter { - _owner: self, - range: self.frames.clone().into_iter(), - } - } -} - -/// An iterator over each [`AllocatedFrame`] in a range of [`AllocatedFrames`]. -/// -/// We must implement our own iterator type here in order to tie the lifetime `'f` -/// of a returned `AllocatedFrame<'f>` type to the lifetime of its containing `AllocatedFrames`. -/// This is because the underlying type of `AllocatedFrames` is a [`FrameRange`], -/// which itself is a [`core::ops::RangeInclusive`] of [`Frame`]s, and unfortunately the -/// `RangeInclusive` type doesn't implement an immutable iterator. -/// -/// Iterating through a `RangeInclusive` actually modifies its own internal range, -/// so we must avoid doing that because it would break the semantics of a `FrameRange`. -/// In fact, this is why [`FrameRange`] only implements `IntoIterator` but -/// does not implement [`Iterator`] itself. -pub struct AllocatedFramesIter<'f> { - _owner: &'f AllocatedFrames, - range: RangeInclusiveIterator, -} -impl<'f> Iterator for AllocatedFramesIter<'f> { - type Item = AllocatedFrame<'f>; - fn next(&mut self) -> Option { - self.range.next().map(|frame| - AllocatedFrame { - frame, _phantom: PhantomData, - } - ) - } -} - -/// A reference to a single frame within a range of `AllocatedFrames`. -/// -/// The lifetime of this type is tied to the lifetime of its owning `AllocatedFrames`. -#[derive(Debug)] -pub struct AllocatedFrame<'f> { - frame: Frame, - _phantom: PhantomData<&'f Frame>, -} -impl<'f> Deref for AllocatedFrame<'f> { - type Target = Frame; - fn deref(&self) -> &Self::Target { - &self.frame - } -} -assert_not_impl_any!(AllocatedFrame: DerefMut, Clone); diff --git a/kernel/frame_allocator/src/frames.rs b/kernel/frame_allocator/src/frames.rs index 983d149d04..46d76d3632 100644 --- a/kernel/frame_allocator/src/frames.rs +++ b/kernel/frame_allocator/src/frames.rs @@ -1,13 +1,15 @@ +//! A range of unmapped frames that stores a verified `TrustedChunk`. +//! A `Frames` object is uncloneable and is the only way to access the range of frames it references. +//! +//! To Do: Merge AllocatedFrames into this typestate as well. + use kernel_config::memory::PAGE_SIZE; use memory_structs::{FrameRange, Frame, PhysicalAddress}; use range_inclusive::RangeInclusive; -use crate::{MemoryRegionType,RESERVED_REGIONS, FREE_GENERAL_FRAMES_LIST, FREE_RESERVED_FRAMES_LIST, contains_any}; +use crate::{MemoryRegionType, AllocatedFrames}; use core::{borrow::Borrow, cmp::Ordering, ops::{Deref, DerefMut}}; use spin::Mutex; use trusted_chunk::trusted_chunk::*; -use range_inclusive::RangeInclusiveIterator; -use core::marker::PhantomData; -use crate::allocated_frames::*; static CHUNK_ALLOCATOR: Mutex = Mutex::new(TrustedChunkAllocator::new()); @@ -21,11 +23,28 @@ pub enum FrameState { Unmapped, } +/// A range of contiguous frames. +/// Owning a `Frames` object gives ownership of the range of frames it references. +/// The `verified_chunk` field is a verified `TrustedChunk` that stores the actual frames, +/// and has the invariant that it does not overlap with any other `TrustedChunk` created by the +/// `CHUNK_ALLOCATOR`. +/// +/// # Ordering and Equality +/// +/// `Frames` implements the `Ord` trait, and its total ordering is ONLY based on +/// its **starting** `Frame`. This is useful so we can store `Frames` in a sorted collection. +/// +/// Similarly, `Frames` implements equality traits, `Eq` and `PartialEq`, +/// both of which are also based ONLY on the **starting** `Frame` of the `Frames`. +/// Thus, comparing two `Frames` with the `==` or `!=` operators may not work as expected. +/// since it ignores their actual range of frames. #[derive(Debug, Eq)] pub struct Frames { /// The type of this memory chunk, e.g., whether it's in a free or reserved region. typ: MemoryRegionType, - /// The Frames covered by this chunk, an inclusive range. + /// The Frames covered by this chunk, an inclusive range. Equal to the frames in the verified chunk. + /// Needed because verification fails on a trusted chunk that stores a FrameRange or RangeInclusive, + /// but succeeds with RangeInclusive. frames: FrameRange, /// The actual verified chunk verified_chunk: TrustedChunk @@ -45,8 +64,8 @@ impl Frames<{FrameState::Unmapped}> { } })?; - assert!(frames.start().number() == verified_chunk.start()); - assert!(frames.end().number() == verified_chunk.end()); + // assert!(frames.start().number() == verified_chunk.start()); + // assert!(frames.end().number() == verified_chunk.end()); let f = Frames { typ, @@ -54,7 +73,6 @@ impl Frames<{FrameState::Unmapped}> { verified_chunk }; //warn!("new frames: {:?}", f); - Ok(f) } @@ -66,10 +84,9 @@ impl Frames<{FrameState::Unmapped}> { frames, verified_chunk }; - assert!(f.frames.start().number() == f.verified_chunk.start()); - assert!(f.frames.end().number() == f.verified_chunk.end()); - //warn!("from trusted chunk: {:?}", f); - + // assert!(f.frames.start().number() == f.verified_chunk.start()); + // assert!(f.frames.end().number() == f.verified_chunk.end()); + // warn!("from trusted chunk: {:?}", f); f } @@ -78,116 +95,10 @@ impl Frames<{FrameState::Unmapped}> { frames: self, } } - - // /// Returns an `AllocatedFrame` if this `AllocatedFrames` object contains only one frame. - // /// - // /// ## Panic - // /// Panics if this `AllocatedFrame` contains multiple frames or zero frames. - // pub fn as_allocated_frame(&self) -> AllocatedFrame { - // assert!(self.size_in_frames() == 1); - // AllocatedFrame { - // frame: *self.start(), - // _phantom: PhantomData, - // } - // } } -// impl Drop for Frames { -// fn drop(&mut self) { -// if self.size_in_frames() == 0 { return; } - -// trace!("frame_allocator: dropping {:?}", self); -// let unmapped_frames: Frames<{FrameState::Unmapped}> = Frames { -// typ: self.typ, -// frames: self.frames.clone(), -// verified_chunk: core::mem::replace(&mut self.verified_chunk, TrustedChunk::empty()) -// }; - -// // assert!(unmapped_frames.start().number() == unmapped_frames.verified_chunk.start()); -// // assert!(unmapped_frames.end().number() == unmapped_frames.verified_chunk.end()); - -// // make sure the fields in the Frames match -// self.frames = FrameRange::empty(); - -// let (list, _typ) = if contains_any(&RESERVED_REGIONS.lock(), &self.frames) { -// (&FREE_RESERVED_FRAMES_LIST, MemoryRegionType::Reserved) -// } else { -// (&FREE_GENERAL_FRAMES_LIST, MemoryRegionType::Free) -// }; -// trace!("frame_allocator: deallocating {:?}, typ {:?}", self, _typ); - -// // Simply add the newly-deallocated chunk to the free frames list. -// let mut locked_list = list.lock(); -// let res = locked_list.insert(unmapped_frames); -// match res { -// Ok(_inserted_free_chunk) => (), -// Err(c) => error!("BUG: couldn't insert deallocated chunk {:?} into free frame list", c), -// } - -// // Here, we could optionally use above `_inserted_free_chunk` to merge the adjacent (contiguous) chunks -// // before or after the newly-inserted free chunk. -// // However, there's no *need* to do so until we actually run out of address space or until -// // a requested address is in a chunk that needs to be merged. -// // Thus, for performance, we save that for those future situations. -// } -// } - -// impl<'f> IntoIterator for &'f Frames<{FrameState::Unmapped}> { -// type IntoIter = AllocatedFramesIter<'f>; -// type Item = AllocatedFrame<'f>; -// fn into_iter(self) -> Self::IntoIter { -// AllocatedFramesIter { -// _owner: self, -// range: self.frames.clone().into_iter(), -// } -// } -// } - -// /// An iterator over each [`AllocatedFrame`] in a range of [`Frames`]. -// /// -// /// We must implement our own iterator type here in order to tie the lifetime `'f` -// /// of a returned `AllocatedFrame<'f>` type to the lifetime of its containing `Frames`. -// /// This is because the underlying type of `Frames` is a [`FrameRange`], -// /// which itself is a [`core::ops::RangeInclusive`] of [`Frame`]s, and unfortunately the -// /// `RangeInclusive` type doesn't implement an immutable iterator. -// /// -// /// Iterating through a `RangeInclusive` actually modifies its own internal range, -// /// so we must avoid doing that because it would break the semantics of a `FrameRange`. -// /// In fact, this is why [`FrameRange`] only implements `IntoIterator` but -// /// does not implement [`Iterator`] itself. -// pub struct AllocatedFramesIter<'f> { -// _owner: &'f Frames<{FrameState::Unmapped}>, -// range: RangeInclusiveIterator, -// } -// impl<'f> Iterator for AllocatedFramesIter<'f> { -// type Item = AllocatedFrame<'f>; -// fn next(&mut self) -> Option { -// self.range.next().map(|frame| -// AllocatedFrame { -// frame, _phantom: PhantomData, -// } -// ) -// } -// } - -// /// A reference to a single frame within a range of `Frames`. -// /// -// /// The lifetime of this type is tied to the lifetime of its owning `Frames`. -// #[derive(Debug)] -// pub struct AllocatedFrame<'f> { -// frame: Frame, -// _phantom: PhantomData<&'f Frame>, -// } -// impl<'f> Deref for AllocatedFrame<'f> { -// type Target = Frame; -// fn deref(&self) -> &Self::Target { -// &self.frame -// } -// } -// assert_not_impl_any!(AllocatedFrame: DerefMut, Clone); - - impl Frames { + #[allow(dead_code)] pub(crate) fn frames(&self) -> FrameRange { self.frames.clone() } @@ -228,19 +139,19 @@ impl Frames { // use the newly merged TrustedChunk to update the frame range self.frames = into_frame_range(&self.verified_chunk.frames()); core::mem::forget(other); - assert!(self.frames.start().number() == self.verified_chunk.start()); - assert!(self.frames.end().number() == self.verified_chunk.end()); + // assert!(self.frames.start().number() == self.verified_chunk.start()); + // assert!(self.frames.end().number() == self.verified_chunk.end()); //warn!("merge: {:?}", self); return Ok(()); }, Err(other_verified_chunk) => { let _ = core::mem::replace(&mut other.verified_chunk, other_verified_chunk); - assert!(self.frames.start().number() == self.verified_chunk.start()); - assert!(self.frames.end().number() == self.verified_chunk.end()); + // assert!(self.frames.start().number() == self.verified_chunk.start()); + // assert!(self.frames.end().number() == self.verified_chunk.end()); - assert!(other.frames.start().number() == other.verified_chunk.start()); - assert!(other.frames.end().number() == other.verified_chunk.end()); + // assert!(other.frames.start().number() == other.verified_chunk.start()); + // assert!(other.frames.end().number() == other.verified_chunk.end()); return Err(other); } } @@ -268,8 +179,8 @@ impl Frames { Ok(x) => x, Err(vchunk) => { let _ = core::mem::replace(&mut self.verified_chunk, vchunk); - assert!(self.frames.start().number() == self.verified_chunk.start()); - assert!(self.frames.end().number() == self.verified_chunk.end()); + // assert!(self.frames.start().number() == self.verified_chunk.start()); + // assert!(self.frames.end().number() == self.verified_chunk.end()); return (self, None, None); } }; @@ -296,19 +207,19 @@ impl Frames { verified_chunk: vchunk }) ); - assert!(c1.frames.start().number() == c1.verified_chunk.start()); - assert!(c1.frames.end().number() == c1.verified_chunk.end()); - if let Some(c) = &c2 { - assert!(c.frames.start().number() == c.verified_chunk.start()); - assert!(c.frames.end().number() == c.verified_chunk.end()); - } + // assert!(c1.frames.start().number() == c1.verified_chunk.start()); + // assert!(c1.frames.end().number() == c1.verified_chunk.end()); - if let Some(c) = &c3 { - assert!(c.frames.start().number() == c.verified_chunk.start()); - assert!(c.frames.end().number() == c.verified_chunk.end()); - } + // if let Some(c) = &c2 { + // assert!(c.frames.start().number() == c.verified_chunk.start()); + // assert!(c.frames.end().number() == c.verified_chunk.end()); + // } + // if let Some(c) = &c3 { + // assert!(c.frames.start().number() == c.verified_chunk.start()); + // assert!(c.frames.end().number() == c.verified_chunk.end()); + // } //warn!("split: {:?} {:?} {:?}", c1, c2, c3); (c1, c2, c3) @@ -339,8 +250,8 @@ impl Frames { Ok((first, second)) => (first, second), Err(vchunk) => { let _ = core::mem::replace(&mut self.verified_chunk, vchunk); - assert!(self.frames.start().number() == self.verified_chunk.start()); - assert!(self.frames.end().number() == self.verified_chunk.end()); + // assert!(self.frames.start().number() == self.verified_chunk.start()); + // assert!(self.frames.end().number() == self.verified_chunk.end()); return Err(self); } }; @@ -358,11 +269,11 @@ impl Frames { verified_chunk: second }; - assert!(c1.frames.start().number() == c1.verified_chunk.start()); - assert!(c1.frames.end().number() == c1.verified_chunk.end()); + // assert!(c1.frames.start().number() == c1.verified_chunk.start()); + // assert!(c1.frames.end().number() == c1.verified_chunk.end()); - assert!(c2.frames.start().number() == c2.verified_chunk.start()); - assert!(c2.frames.end().number() == c2.verified_chunk.end()); + // assert!(c2.frames.start().number() == c2.verified_chunk.start()); + // assert!(c2.frames.end().number() == c2.verified_chunk.end()); //warn!("split at: {:?} {:?}", c1, c2); @@ -399,43 +310,15 @@ impl Borrow for &'_ Frames { fn into_frame_range(frames: &RangeInclusive) -> FrameRange { - let start = FrameNum{ frame: *frames.start() }.into_frame() + let start = into_frame(*frames.start()) .expect("Verified chunk start was not a valid frame"); - let end = FrameNum{ frame: *frames.end() }.into_frame() + let end = into_frame(*frames.end()) .expect("Verified chunk end was not a valid frame"); - FrameRange::new(start, end) } -struct FrameNum { - frame: usize +fn into_frame(frame_num: usize) -> Option { + PhysicalAddress::new(frame_num * PAGE_SIZE) + .and_then(|addr| Some(Frame::containing_address(addr))) } - -impl FrameNum { - fn into_frame(&self) -> Option { - PhysicalAddress::new(self.frame * PAGE_SIZE) - .and_then(|addr| Some(Frame::containing_address(addr))) - } -} - -// /// This function is a callback used to convert `UnmappedFrames` into `AllocatedFrames`. -// /// `UnmappedFrames` represents frames that have been unmapped from a page that had -// /// exclusively mapped them, indicating that no others pages have been mapped -// /// to those same frames, and thus, they can be safely deallocated. -// /// -// /// This exists to break the cyclic dependency cycle between this crate and -// /// the `page_table_entry` crate, since `page_table_entry` must depend on types -// /// from this crate in order to enforce safety when modifying page table entries. -// pub(crate) fn into_frames_unmapped_state(tc: TrustedChunk, frames: FrameRange) -> Frames<{FrameState::Unmapped}> { -// let typ = if contains_any(&RESERVED_REGIONS.lock(), &frames) { -// MemoryRegionType::Reserved -// } else { -// MemoryRegionType::Free -// }; -// let f = Frames { typ, frames, verified_chunk: tc }; -// assert!(f.frames.start().number() == f.verified_chunk.start()); -// assert!(f.frames.end().number() == f.verified_chunk.end()); -// //warn!("into frames unmapped: {:?} ", f); -// f -// } \ No newline at end of file diff --git a/kernel/frame_allocator/src/lib.rs b/kernel/frame_allocator/src/lib.rs index 6125d84982..adca0da88a 100644 --- a/kernel/frame_allocator/src/lib.rs +++ b/kernel/frame_allocator/src/lib.rs @@ -38,12 +38,9 @@ mod test; mod static_array_rb_tree; // mod static_array_linked_list; -mod region; -// mod trusted_chunk_shim; -mod allocated_frames; mod frames; -use core::{borrow::Borrow, cmp::{min, max}, ops::Deref}; +use core::{borrow::Borrow, cmp::{min, max, Ordering}, fmt, ops::{Deref, DerefMut}, marker::PhantomData}; use frames::*; use kernel_config::memory::*; use memory_structs::{PhysicalAddress, Frame, FrameRange}; @@ -51,13 +48,8 @@ use spin::Mutex; use intrusive_collections::Bound; use static_array_rb_tree::*; use trusted_chunk::trusted_chunk::TrustedChunk; -// use trusted_chunk_shim::*; -use region::*; -use range_inclusive::RangeInclusive; -pub use allocated_frames::*; +use range_inclusive::{RangeInclusive, RangeInclusiveIterator}; -// pub type AllocatedFrames = Frames<{FrameState::Unmapped}>; -// pub use frames::AllocatedFrame; const FRAME_SIZE: usize = PAGE_SIZE; #[allow(dead_code)] @@ -303,6 +295,262 @@ pub enum MemoryRegionType { Unknown, } +/// A region of contiguous frames. +/// Only used for bookkeeping, not for allocation. +/// +/// # Ordering and Equality +/// +/// `Region` implements the `Ord` trait, and its total ordering is ONLY based on +/// its **starting** `Frame`. This is useful so we can store `Region`s in a sorted collection. +/// +/// Similarly, `Region` implements equality traits, `Eq` and `PartialEq`, +/// both of which are also based ONLY on the **starting** `Frame` of the `Region`. +/// Thus, comparing two `Region`s with the `==` or `!=` operators may not work as expected. +/// since it ignores their actual range of frames. +#[derive(Debug, Clone, Eq)] +#[allow(dead_code)] +pub struct Region { + /// The type of this memory region, e.g., whether it's in a free or reserved region. + pub(crate) typ: MemoryRegionType, + /// The Frames covered by this region, an inclusive range. + pub(crate) frames: FrameRange, +} +impl Region { + /// Returns a new `Region` with an empty range of frames. + pub fn empty() -> Region { + Region { + typ: MemoryRegionType::Unknown, + frames: FrameRange::empty(), + } + } +} + +impl Deref for Region { + type Target = FrameRange; + fn deref(&self) -> &FrameRange { + &self.frames + } +} +impl Ord for Region { + fn cmp(&self, other: &Self) -> Ordering { + self.frames.start().cmp(other.frames.start()) + } +} +impl PartialOrd for Region { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} +impl PartialEq for Region { + fn eq(&self, other: &Self) -> bool { + self.frames.start() == other.frames.start() + } +} +impl Borrow for &'_ Region { + fn borrow(&self) -> &Frame { + self.frames.start() + } +} + +/// Represents a range of allocated physical memory [`Frame`]s; derefs to [`FrameRange`]. +/// +/// These frames are not immediately accessible because they're not yet mapped +/// by any virtual memory pages. +/// You must do that separately in order to create a `MappedPages` type, +/// which can then be used to access the contents of these frames. +/// +/// This object represents ownership of the range of allocated physical frames; +/// if this object falls out of scope, its allocated frames will be auto-deallocated upon drop. +pub struct AllocatedFrames { + pub(crate) frames: Frames<{FrameState::Unmapped}>, +} + +// AllocatedFrames must not be Cloneable, and it must not expose its inner frames as mutable. +assert_not_impl_any!(AllocatedFrames: DerefMut, Clone); + +impl Deref for AllocatedFrames { + type Target = FrameRange; + fn deref(&self) -> &FrameRange { + &self.frames + } +} +impl fmt::Debug for AllocatedFrames { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "AllocatedFrames({:?})", self.frames) + } +} + +impl AllocatedFrames { + /// Returns an empty AllocatedFrames object that performs no frame allocation. + /// Can be used as a placeholder, but will not permit any real usage. + pub const fn empty() -> AllocatedFrames { + AllocatedFrames { + frames: Frames::empty() + } + } + + /// Merges the given `AllocatedFrames` object `other` into this `AllocatedFrames` object (`self`). + /// This is just for convenience and usability purposes, it performs no allocation or remapping. + /// + /// The given `other` must be physically contiguous with `self`, i.e., come immediately before or after `self`. + /// That is, either `self.start == other.end + 1` or `self.end + 1 == other.start` must be true. + /// + /// If either of those conditions are met, `self` is modified and `Ok(())` is returned, + /// otherwise `Err(other)` is returned. + pub fn merge(&mut self, mut other: AllocatedFrames) -> Result<(), AllocatedFrames> { + let chunk = core::mem::replace(&mut other.frames, Frames::empty()); + match self.frames.merge(chunk) { + Ok(_) => { + // ensure the now-merged AllocatedFrames doesn't run its drop handler and free its frames. + // This is not really necessary because it only contains an empty chunk. + core::mem::forget(other); + Ok(()) + }, + Err(other_chunk) => { + Err(AllocatedFrames{frames: other_chunk}) + } + } + } + + /// Splits this `AllocatedFrames` into two separate `AllocatedFrames` objects: + /// * `[beginning : at_frame - 1]` + /// * `[at_frame : end]` + /// + /// This function follows the behavior of [`core::slice::split_at()`], + /// thus, either one of the returned `AllocatedFrames` objects may be empty. + /// * If `at_frame == self.start`, the first returned `AllocatedFrames` object will be empty. + /// * If `at_frame == self.end + 1`, the second returned `AllocatedFrames` object will be empty. + /// + /// Returns an `Err` containing this `AllocatedFrames` if `at_frame` is otherwise out of bounds. + /// + /// [`core::slice::split_at()`]: https://doc.rust-lang.org/core/primitive.slice.html#method.split_at + pub fn split_at(mut self, at_frame: Frame) -> Result<(AllocatedFrames, AllocatedFrames), AllocatedFrames> { + let chunk = core::mem::replace(&mut self.frames, Frames::empty()); + match chunk.split_at(at_frame) { + Ok((chunk1, chunk2)) => { + // ensure the now-merged AllocatedFrames doesn't run its drop handler and free its frames. + core::mem::forget(self); + Ok(( + AllocatedFrames{frames: chunk1}, + AllocatedFrames{frames: chunk2} + )) + }, + Err(chunk_not_split) => { + Err(AllocatedFrames{frames: chunk_not_split}) + } + } + } + + /// Returns an `AllocatedFrame` if this `AllocatedFrames` object contains only one frame. + /// + /// ## Panic + /// Panics if this `AllocatedFrame` contains multiple frames or zero frames. + pub fn as_allocated_frame(&self) -> AllocatedFrame { + assert!(self.size_in_frames() == 1); + AllocatedFrame { + frame: *self.start(), + _phantom: PhantomData, + } + } +} + +/// This function is a callback used to convert `UnmappedFrames` into `AllocatedFrames`. +/// `UnmappedFrames` represents frames that have been unmapped from a page that had +/// exclusively mapped them, indicating that no others pages have been mapped +/// to those same frames, and thus, they can be safely deallocated. +/// +/// This exists to break the cyclic dependency cycle between this crate and +/// the `page_table_entry` crate, since `page_table_entry` must depend on types +/// from this crate in order to enforce safety when modifying page table entries. +pub(crate) fn into_allocated_frames(tc: TrustedChunk, frames: FrameRange) -> AllocatedFrames { + let typ = if contains_any(&RESERVED_REGIONS.lock(), &frames) { + MemoryRegionType::Reserved + } else { + MemoryRegionType::Free + }; + AllocatedFrames { frames: Frames::from_trusted_chunk(tc, frames, typ) } +} + +impl Drop for AllocatedFrames { + fn drop(&mut self) { + if self.size_in_frames() == 0 { return; } + + let (list, _typ) = if contains_any(&RESERVED_REGIONS.lock(), &self.frames) { + (&FREE_RESERVED_FRAMES_LIST, MemoryRegionType::Reserved) + } else { + (&FREE_GENERAL_FRAMES_LIST, MemoryRegionType::Free) + }; + // trace!("frame_allocator: deallocating {:?}, typ {:?}", self, typ); + + // Simply add the newly-deallocated chunk to the free frames list. + let mut locked_list = list.lock(); + let res = locked_list.insert(core::mem::replace(&mut self.frames, Frames::empty())); + match res { + Ok(_inserted_free_chunk) => (), + Err(c) => error!("BUG: couldn't insert deallocated chunk {:?} into free frame list", c), + } + + // Here, we could optionally use above `_inserted_free_chunk` to merge the adjacent (contiguous) chunks + // before or after the newly-inserted free chunk. + // However, there's no *need* to do so until we actually run out of address space or until + // a requested address is in a chunk that needs to be merged. + // Thus, for performance, we save that for those future situations. + } +} + +impl<'f> IntoIterator for &'f AllocatedFrames { + type IntoIter = AllocatedFramesIter<'f>; + type Item = AllocatedFrame<'f>; + fn into_iter(self) -> Self::IntoIter { + AllocatedFramesIter { + _owner: self, + range: self.frames.clone().into_iter(), + } + } +} + +/// An iterator over each [`AllocatedFrame`] in a range of [`AllocatedFrames`]. +/// +/// We must implement our own iterator type here in order to tie the lifetime `'f` +/// of a returned `AllocatedFrame<'f>` type to the lifetime of its containing `AllocatedFrames`. +/// This is because the underlying type of `AllocatedFrames` is a [`FrameRange`], +/// which itself is a [`core::ops::RangeInclusive`] of [`Frame`]s, and unfortunately the +/// `RangeInclusive` type doesn't implement an immutable iterator. +/// +/// Iterating through a `RangeInclusive` actually modifies its own internal range, +/// so we must avoid doing that because it would break the semantics of a `FrameRange`. +/// In fact, this is why [`FrameRange`] only implements `IntoIterator` but +/// does not implement [`Iterator`] itself. +pub struct AllocatedFramesIter<'f> { + _owner: &'f AllocatedFrames, + range: RangeInclusiveIterator, +} +impl<'f> Iterator for AllocatedFramesIter<'f> { + type Item = AllocatedFrame<'f>; + fn next(&mut self) -> Option { + self.range.next().map(|frame| + AllocatedFrame { + frame, _phantom: PhantomData, + } + ) + } +} + +/// A reference to a single frame within a range of `AllocatedFrames`. +/// +/// The lifetime of this type is tied to the lifetime of its owning `AllocatedFrames`. +#[derive(Debug)] +pub struct AllocatedFrame<'f> { + frame: Frame, + _phantom: PhantomData<&'f Frame>, +} +impl<'f> Deref for AllocatedFrame<'f> { + type Target = Frame; + fn deref(&self) -> &Self::Target { + &self.frame + } +} +assert_not_impl_any!(AllocatedFrame: DerefMut, Clone); /// A series of pending actions related to frame allocator bookkeeping, /// which may result in heap allocation. diff --git a/kernel/frame_allocator/src/region.rs b/kernel/frame_allocator/src/region.rs deleted file mode 100644 index d19b4689e9..0000000000 --- a/kernel/frame_allocator/src/region.rs +++ /dev/null @@ -1,60 +0,0 @@ -use memory_structs::{FrameRange, Frame}; -use crate::MemoryRegionType; -use core::{borrow::Borrow, cmp::Ordering, ops::Deref}; - -/// A region of contiguous frames. -/// Only used for bookkeeping, not for allocation. -/// -/// # Ordering and Equality -/// -/// `Region` implements the `Ord` trait, and its total ordering is ONLY based on -/// its **starting** `Frame`. This is useful so we can store `Region`s in a sorted collection. -/// -/// Similarly, `Region` implements equality traits, `Eq` and `PartialEq`, -/// both of which are also based ONLY on the **starting** `Frame` of the `Region`. -/// Thus, comparing two `Region`s with the `==` or `!=` operators may not work as expected. -/// since it ignores their actual range of frames. -#[derive(Debug, Clone, Eq)] -#[allow(dead_code)] -pub struct Region { - /// The type of this memory region, e.g., whether it's in a free or reserved region. - pub(crate) typ: MemoryRegionType, - /// The Frames covered by this region, an inclusive range. - pub(crate) frames: FrameRange, -} -impl Region { - /// Returns a new `Region` with an empty range of frames. - pub fn empty() -> Region { - Region { - typ: MemoryRegionType::Unknown, - frames: FrameRange::empty(), - } - } -} - -impl Deref for Region { - type Target = FrameRange; - fn deref(&self) -> &FrameRange { - &self.frames - } -} -impl Ord for Region { - fn cmp(&self, other: &Self) -> Ordering { - self.frames.start().cmp(other.frames.start()) - } -} -impl PartialOrd for Region { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} -impl PartialEq for Region { - fn eq(&self, other: &Self) -> bool { - self.frames.start() == other.frames.start() - } -} -impl Borrow for &'_ Region { - fn borrow(&self) -> &Frame { - self.frames.start() - } -} \ No newline at end of file diff --git a/kernel/frame_allocator/src/trusted_chunk_shim.rs b/kernel/frame_allocator/src/trusted_chunk_shim.rs deleted file mode 100644 index a680c79d8a..0000000000 --- a/kernel/frame_allocator/src/trusted_chunk_shim.rs +++ /dev/null @@ -1,231 +0,0 @@ -//! A trusted wrapper over the verified Chunk. -//! Needed because verification fails on a trusted chunk that stores a FrameRange or RangeInclusive, -//! but succeeds with RangeInclusive. -//! -//! We should be able to remove this module and work directly with the verified crate in the foreseeable future. -//! All this model should do is make sure that the start and end of the stored `frames` is equal to the start and end of the `verified_chunk` - -use kernel_config::memory::PAGE_SIZE; -use memory_structs::{FrameRange, Frame, PhysicalAddress}; -use range_inclusive::RangeInclusive; -use crate::{MemoryRegionType, AllocatedFrames}; -use core::{borrow::Borrow, cmp::Ordering, ops::{Deref, DerefMut}}; -use spin::Mutex; -use trusted_chunk::trusted_chunk::*; - -static CHUNK_ALLOCATOR: Mutex = Mutex::new(TrustedChunkAllocator::new()); - -pub(crate) fn switch_chunk_allocator_to_heap_structure() { - CHUNK_ALLOCATOR.lock().switch_to_heap_allocated() - .expect("BUG: Failed to switch the chunk allocator to heap allocated. May have been called twice."); -} - -#[derive(Debug, Eq)] -pub struct Chunk { - /// The type of this memory chunk, e.g., whether it's in a free or reserved region. - typ: MemoryRegionType, - /// The Frames covered by this chunk, an inclusive range. - frames: FrameRange, - /// The actual verified chunk - verified_chunk: TrustedChunk -} - -assert_not_impl_any!(Chunk: DerefMut, Clone); - -impl Chunk { - pub(crate) fn new(typ: MemoryRegionType, frames: FrameRange) -> Result { - let verified_chunk = CHUNK_ALLOCATOR.lock().create_chunk(frames.to_range_inclusive()) - .map(|(chunk, _)| chunk) - .map_err(|chunk_error|{ - match chunk_error { - ChunkCreationError::Overlap(_idx) => "Failed to create a verified chunk due to an overlap", - ChunkCreationError::NoSpace => "Before the heap is initialized, requested more chunks than there is space for (64)", - ChunkCreationError::InvalidRange => "Could not create a chunk for an empty range, use the empty() function" - } - })?; - - Ok(Chunk { - typ, - frames, - verified_chunk - }) - } - - /// Creates a new Chunk from a TrustedChunk and a FrameRange. - /// Only used within the allocated frames callback function. - pub(crate) fn from_trusted_chunk(verified_chunk: TrustedChunk, frames: FrameRange, typ: MemoryRegionType) -> Chunk { - Chunk { - typ, - frames, - verified_chunk - } - } - - // pub(crate) fn frames(&self) -> FrameRange { - // self.frames.clone() - // } - - pub(crate) fn typ(&self) -> MemoryRegionType { - self.typ - } - - pub(crate) fn as_allocated_frames(self) -> AllocatedFrames { - AllocatedFrames { - frames: self, - } - } - - /// Returns a new `Chunk` with an empty range of frames. - pub(crate) const fn empty() -> Chunk { - Chunk { - typ: MemoryRegionType::Unknown, - frames: FrameRange::empty(), - verified_chunk: TrustedChunk::empty() - } - } - - pub(crate) fn merge(&mut self, mut other: Chunk) -> Result<(), Chunk> { - if self.is_empty() || other.is_empty() { - return Err(other); - } - - // take out the TrustedChunk from other - let other_verified_chunk = core::mem::replace(&mut other.verified_chunk, TrustedChunk::empty()); - - // merged the other TrustedChunk with self - // failure here means that the chunks cannot be merged - self.verified_chunk.merge(other_verified_chunk) - .map_err(|vchunk| { - let _ = core::mem::replace(&mut other.verified_chunk, vchunk); - other - })?; - - // use the newly merged TrustedChunk to update the frame range - self.frames = into_frame_range(&self.verified_chunk.frames()); - - Ok(()) - } - - /// An inner function that breaks up the given chunk into multiple smaller chunks. - /// - /// Returns a tuple of three chunks: - /// 1. The `Chunk` containing the requested range of frames starting at `start_frame`. - /// 2. The range of frames in the `self` that came before the beginning of the requested frame range. - /// 3. The range of frames in the `self` that came after the end of the requested frame range. - pub fn split( - mut self, - start_frame: Frame, - num_frames: usize, - ) -> (Chunk, Option, Option) { - if self.is_empty() { - return (self, None, None); - } - - // take out the TrustedChunk - let verified_chunk = core::mem::replace(&mut self.verified_chunk, TrustedChunk::empty()); - - let (before, new_allocation, after) = match verified_chunk.split(start_frame.number(), num_frames) { - Ok(x) => x, - Err(vchunk) => { - let _ = core::mem::replace(&mut self.verified_chunk, vchunk); - return (self, None, None); - } - }; - - (Chunk { - typ: self.typ, - frames: into_frame_range(&new_allocation.frames()), - verified_chunk: new_allocation - }, - before.and_then(|vchunk| - Some(Chunk{ - typ: self.typ, - frames: into_frame_range(&vchunk.frames()), - verified_chunk: vchunk - }) - ), - after.and_then(|vchunk| - Some(Chunk{ - typ: self.typ, - frames: into_frame_range(&vchunk.frames()), - verified_chunk: vchunk - }) - )) - } - - pub fn split_at(mut self, at_frame: Frame) -> Result<(Chunk, Chunk), Chunk> { - if self.is_empty() { - return Err(self); - } - let typ = self.typ; - - // take out the TrustedChunk - let verified_chunk = core::mem::replace(&mut self.verified_chunk, TrustedChunk::empty()); - - let (first, second) = verified_chunk.split_at(at_frame.number()) - .map_err(|vchunk| { - let _ = core::mem::replace(&mut self.verified_chunk, vchunk); - self - })?; - - Ok((Chunk { - typ, - frames: into_frame_range(&first.frames()), - verified_chunk: first - }, - Chunk { - typ, - frames: into_frame_range(&second.frames()), - verified_chunk: second - })) - } -} - -impl Deref for Chunk { - type Target = FrameRange; - fn deref(&self) -> &FrameRange { - &self.frames - } -} -impl Ord for Chunk { - fn cmp(&self, other: &Self) -> Ordering { - self.frames.start().cmp(other.frames.start()) - } -} -impl PartialOrd for Chunk { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} -impl PartialEq for Chunk { - fn eq(&self, other: &Self) -> bool { - self.frames.start() == other.frames.start() - } -} -impl Borrow for &'_ Chunk { - fn borrow(&self) -> &Frame { - self.frames.start() - } -} - - -fn into_frame_range(frames: &RangeInclusive) -> FrameRange { - let start = FrameNum{ frame: *frames.start() }.into_frame() - .expect("Verified chunk start was not a valid frame"); - - let end = FrameNum{ frame: *frames.end() }.into_frame() - .expect("Verified chunk end was not a valid frame"); - - FrameRange::new(start, end) -} - -struct FrameNum { - frame: usize -} - -impl FrameNum { - fn into_frame(&self) -> Option { - PhysicalAddress::new(self.frame * PAGE_SIZE) - .and_then(|addr| Some(Frame::containing_address(addr))) - } -} \ No newline at end of file From b0fb296bddec299a7724bdded1f354715ecedcd2 Mon Sep 17 00:00:00 2001 From: Ramla Ijaz Date: Wed, 14 Jun 2023 07:11:16 -0400 Subject: [PATCH 16/23] formatting --- kernel/frame_allocator/src/lib.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/kernel/frame_allocator/src/lib.rs b/kernel/frame_allocator/src/lib.rs index adca0da88a..cb45f111b8 100644 --- a/kernel/frame_allocator/src/lib.rs +++ b/kernel/frame_allocator/src/lib.rs @@ -40,7 +40,7 @@ mod static_array_rb_tree; // mod static_array_linked_list; mod frames; -use core::{borrow::Borrow, cmp::{min, max, Ordering}, fmt, ops::{Deref, DerefMut}, marker::PhantomData}; +use core::{borrow::Borrow, cmp::{Ordering, min, max}, fmt, ops::{Deref, DerefMut}, marker::PhantomData}; use frames::*; use kernel_config::memory::*; use memory_structs::{PhysicalAddress, Frame, FrameRange}; @@ -311,9 +311,9 @@ pub enum MemoryRegionType { #[allow(dead_code)] pub struct Region { /// The type of this memory region, e.g., whether it's in a free or reserved region. - pub(crate) typ: MemoryRegionType, + typ: MemoryRegionType, /// The Frames covered by this region, an inclusive range. - pub(crate) frames: FrameRange, + frames: FrameRange, } impl Region { /// Returns a new `Region` with an empty range of frames. @@ -352,6 +352,7 @@ impl Borrow for &'_ Region { } } + /// Represents a range of allocated physical memory [`Frame`]s; derefs to [`FrameRange`]. /// /// These frames are not immediately accessible because they're not yet mapped @@ -362,7 +363,7 @@ impl Borrow for &'_ Region { /// This object represents ownership of the range of allocated physical frames; /// if this object falls out of scope, its allocated frames will be auto-deallocated upon drop. pub struct AllocatedFrames { - pub(crate) frames: Frames<{FrameState::Unmapped}>, + frames: Frames<{FrameState::Unmapped}>, } // AllocatedFrames must not be Cloneable, and it must not expose its inner frames as mutable. @@ -402,7 +403,6 @@ impl AllocatedFrames { match self.frames.merge(chunk) { Ok(_) => { // ensure the now-merged AllocatedFrames doesn't run its drop handler and free its frames. - // This is not really necessary because it only contains an empty chunk. core::mem::forget(other); Ok(()) }, @@ -462,7 +462,7 @@ impl AllocatedFrames { /// This exists to break the cyclic dependency cycle between this crate and /// the `page_table_entry` crate, since `page_table_entry` must depend on types /// from this crate in order to enforce safety when modifying page table entries. -pub(crate) fn into_allocated_frames(tc: TrustedChunk, frames: FrameRange) -> AllocatedFrames { +fn into_allocated_frames(tc: TrustedChunk, frames: FrameRange) -> AllocatedFrames { let typ = if contains_any(&RESERVED_REGIONS.lock(), &frames) { MemoryRegionType::Reserved } else { @@ -475,6 +475,7 @@ impl Drop for AllocatedFrames { fn drop(&mut self) { if self.size_in_frames() == 0 { return; } + // Should we remove these lines since we store the typ in Frames? let (list, _typ) = if contains_any(&RESERVED_REGIONS.lock(), &self.frames) { (&FREE_RESERVED_FRAMES_LIST, MemoryRegionType::Reserved) } else { @@ -552,6 +553,7 @@ impl<'f> Deref for AllocatedFrame<'f> { } assert_not_impl_any!(AllocatedFrame: DerefMut, Clone); + /// A series of pending actions related to frame allocator bookkeeping, /// which may result in heap allocation. /// @@ -612,6 +614,7 @@ impl<'list> Drop for DeferredAllocAction<'list> { } } + /// Possible allocation errors. #[derive(Debug)] enum AllocationError { From b551c147d127ba08c7b9badb71f8d744e3c8a0eb Mon Sep 17 00:00:00 2001 From: Ramla Ijaz Date: Wed, 14 Jun 2023 07:13:31 -0400 Subject: [PATCH 17/23] comments --- kernel/frame_allocator/src/lib.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/frame_allocator/src/lib.rs b/kernel/frame_allocator/src/lib.rs index cb45f111b8..53b4f78bc6 100644 --- a/kernel/frame_allocator/src/lib.rs +++ b/kernel/frame_allocator/src/lib.rs @@ -512,6 +512,10 @@ impl<'f> IntoIterator for &'f AllocatedFrames { /// An iterator over each [`AllocatedFrame`] in a range of [`AllocatedFrames`]. /// +/// To Do: Description is no longer valid, since we have an iterator for RangeInclusive now. +/// but I still think it's useful to thave an `AllocatedFrames` iterator that ties the lifetime +/// of the `AllocatedFrame` to the original object. +/// /// We must implement our own iterator type here in order to tie the lifetime `'f` /// of a returned `AllocatedFrame<'f>` type to the lifetime of its containing `AllocatedFrames`. /// This is because the underlying type of `AllocatedFrames` is a [`FrameRange`], From 82fd670cefd36ad2bed5ac932bf971d07a7b0515 Mon Sep 17 00:00:00 2001 From: Ramla Ijaz Date: Wed, 14 Jun 2023 07:17:31 -0400 Subject: [PATCH 18/23] allow incomplete features --- kernel/frame_allocator/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/frame_allocator/src/lib.rs b/kernel/frame_allocator/src/lib.rs index 53b4f78bc6..6b7c0e2fb9 100644 --- a/kernel/frame_allocator/src/lib.rs +++ b/kernel/frame_allocator/src/lib.rs @@ -21,6 +21,7 @@ #![allow(clippy::blocks_in_if_conditions)] #![no_std] #![feature(box_into_inner)] +#![allow(incomplete_features)] #![feature(adt_const_params)] extern crate alloc; From 1fa390a3c6f70bebc7f84d07d2f6204c8e8c3de0 Mon Sep 17 00:00:00 2001 From: Ramla Ijaz Date: Wed, 14 Jun 2023 08:04:40 -0400 Subject: [PATCH 19/23] one commit from main wasn't merged in, strange --- kernel/kernel_config/src/memory.rs | 27 +-- kernel/mod_mgmt/src/lib.rs | 87 +++----- .../nano_core/linker_higher_half-aarch64.ld | 12 -- kernel/page_allocator/src/lib.rs | 189 ++++++------------ 4 files changed, 97 insertions(+), 218 deletions(-) diff --git a/kernel/kernel_config/src/memory.rs b/kernel/kernel_config/src/memory.rs index 2a0b615899..596f666931 100644 --- a/kernel/kernel_config/src/memory.rs +++ b/kernel/kernel_config/src/memory.rs @@ -1,4 +1,4 @@ -//! The basic virtual address ranges (virtual memory map) defined by Theseus. +//! The basic virtual memory map that Theseus assumes. //! //! Current P4 (top-level page table) mappings: //! * 511: kernel text sections. @@ -53,15 +53,15 @@ pub const TEMPORARY_PAGE_VIRT_ADDR: usize = MAX_VIRTUAL_ADDRESS; /// Value: 512. pub const ENTRIES_PER_PAGE_TABLE: usize = PAGE_SIZE / BYTES_PER_ADDR; -/// Value: 511. The 511th entry is used (in part) for kernel text sections. +/// Value: 511. The 511th entry is used for kernel text sections pub const KERNEL_TEXT_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 1; /// Value: 510. The 510th entry is used to recursively map the current P4 root page table frame -/// such that it can be accessed and modified just like any other level of page table. +// such that it can be accessed and modified just like any other level of page table. pub const RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 2; -/// Value: 509. The 509th entry is used for the kernel heap. +/// Value: 509. The 509th entry is used for the kernel heap pub const KERNEL_HEAP_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 3; /// Value: 508. The 508th entry is used to temporarily recursively map the P4 root page table frame -/// of an upcoming (new) page table such that it can be accessed and modified. +// of an upcoming (new) page table such that it can be accessed and modified. pub const UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 4; @@ -89,9 +89,12 @@ pub const KERNEL_OFFSET: usize = canonicalize(MAX_VIRTUAL_ADDRESS - (TWO_GIGABYT /// Actual value on x86_64: 0o177777_777_000_000_000_0000, or 0xFFFF_FF80_0000_0000 pub const KERNEL_TEXT_START: usize = canonicalize(KERNEL_TEXT_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); -/// The start of the virtual address range covered by the 510th P4 entry, -/// i.e., [`RECURSIVE_P4_INDEX`]; -pub const RECURSIVE_P4_START: usize = canonicalize(RECURSIVE_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); +/// The size in bytes, not in pages. +/// +/// the KERNEL_OFFSET starts at (MAX_ADDR - 2GiB), +/// and .text contains nano_core, so this is the +/// first 510GiB of the 511th P4 entry. +pub const KERNEL_TEXT_MAX_SIZE: usize = ADDRESSABILITY_PER_P4_ENTRY - TWO_GIGABYTES; /// The higher-half heap gets the 512GB address range starting at the 509th P4 entry, /// which is the slot right below the recursive P4 entry (510). @@ -100,12 +103,12 @@ pub const KERNEL_HEAP_START: usize = canonicalize(KERNEL_HEAP_P4_INDEX << (P4_IN #[cfg(not(debug_assertions))] pub const KERNEL_HEAP_INITIAL_SIZE: usize = 64 * 1024 * 1024; // 64 MiB + #[cfg(debug_assertions)] pub const KERNEL_HEAP_INITIAL_SIZE: usize = 256 * 1024 * 1024; // 256 MiB, debug builds require more heap space. -/// The kernel heap is allowed to grow to fill the entirety of its P4 entry. +/// the kernel heap gets the whole 509th P4 entry. pub const KERNEL_HEAP_MAX_SIZE: usize = ADDRESSABILITY_PER_P4_ENTRY; -/// The start of the virtual address range covered by the 508th P4 entry, -/// i.e., [`UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX`]; -pub const UPCOMING_PAGE_TABLE_RECURSIVE_P4_START: usize = canonicalize(UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); +/// The system (page allocator) must not use addresses at or above this address. +pub const UPCOMING_PAGE_TABLE_RECURSIVE_MEMORY_START: usize = canonicalize(UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); diff --git a/kernel/mod_mgmt/src/lib.rs b/kernel/mod_mgmt/src/lib.rs index 89a8e1d932..70ad66d5b5 100644 --- a/kernel/mod_mgmt/src/lib.rs +++ b/kernel/mod_mgmt/src/lib.rs @@ -14,7 +14,7 @@ use alloc::{ }; use spin::{Mutex, Once}; use xmas_elf::{ElfFile, sections::{SHF_ALLOC, SHF_EXECINSTR, SHF_TLS, SHF_WRITE, SectionData, ShType}, symbol_table::{Binding, Type}}; -use memory::{MmiRef, MemoryManagementInfo, VirtualAddress, MappedPages, PteFlags, allocate_pages_by_bytes, allocate_frames_by_bytes_at, PageRange, allocate_pages_by_bytes_in_range}; +use memory::{MmiRef, MemoryManagementInfo, VirtualAddress, MappedPages, PteFlags, allocate_pages_by_bytes, allocate_frames_by_bytes_at}; use bootloader_modules::BootloaderModule; use cow_arc::CowArc; use rustc_demangle::demangle; @@ -33,7 +33,6 @@ pub mod parse_nano_core; pub mod replace_nano_core_crates; mod serde; - /// The name of the directory that contains all of the CrateNamespace files. pub const NAMESPACES_DIRECTORY_NAME: &str = "namespaces"; @@ -2883,35 +2882,6 @@ struct SectionPages { } -/// The range of virtual addresses from which we allocate pages for executable .text sections. -/// -/// This is mostly an architecture-specific design choice (hopefully a temporary one): -/// * On aarch64, even with the large code model, we are not (yet) able to generate -/// code with branch instructions (call/jump) that can address instructions more than -/// 128 MiB away from the current instruction. -/// Thus, we restrict the range of .text section locations to ensure they are within 128 MiB. -/// At some point in the future, this will be a limitation, but not for a long, long time. -/// * On x86_64, this is not necessary, so the range is `None`. -pub const KERNEL_TEXT_ADDR_RANGE: Option = { - #[cfg(target_arch = "x86_64")] { - None - } - #[cfg(target_arch = "aarch64")] { - use {memory::Page, kernel_config::memory::KERNEL_OFFSET}; - - const ONE_MIB: usize = 0x10_0000; - let start_vaddr = VirtualAddress::new_canonical(KERNEL_OFFSET + ONE_MIB); - let end_vaddr = VirtualAddress::new_canonical(start_vaddr.value() + (128 * ONE_MIB) - 1); - Some(PageRange::new( - // the start of the base kernel image's .text section. - Page::containing_address(start_vaddr), - // the start of the base kernel image's .text section, plus 128 MiB. - Page::containing_address(end_vaddr), - )) - } -}; - - /// Allocates and maps memory sufficient to hold the sections that are found in the given `ElfFile`. /// Only sections that are marked "allocated" (`ALLOC`) in the ELF object file will contribute to the mappings' sizes. fn allocate_section_pages(elf_file: &ElfFile, kernel_mmi_ref: &MmiRef) -> Result { @@ -2983,37 +2953,10 @@ fn allocate_section_pages(elf_file: &ElfFile, kernel_mmi_ref: &MmiRef) -> Result // trace!("\n\texec_bytes: {exec_bytes} {exec_bytes:#X}\n\tro_bytes: {ro_bytes} {ro_bytes:#X}\n\trw_bytes: {rw_bytes} {rw_bytes:#X}"); // Allocate contiguous virtual memory pages for each section and map them to random frames as writable. - // We must allocate these pages separately because they use different flags. - let alloc_sec = |size_in_bytes: usize, within_range: Option<&PageRange>, flags: PteFlags| { - let allocated_pages = if let Some(range) = within_range { - allocate_pages_by_bytes_in_range(size_in_bytes, range) - .map_err(|_| "Couldn't allocate pages in text section address range")? - } else { - allocate_pages_by_bytes(size_in_bytes) - .ok_or("Couldn't allocate pages for new section")? - }; - - kernel_mmi_ref.lock().page_table.map_allocated_pages( - allocated_pages, - flags.valid(true).writable(true) - ) - }; - - let executable_pages = if exec_bytes > 0 { - Some(alloc_sec(exec_bytes, KERNEL_TEXT_ADDR_RANGE.as_ref(), TEXT_SECTION_FLAGS)?) - } else { - None - }; - let read_only_pages = if ro_bytes > 0 { - Some(alloc_sec(ro_bytes, None, RODATA_SECTION_FLAGS)?) - } else { - None - }; - let read_write_pages = if rw_bytes > 0 { - Some(alloc_sec(rw_bytes, None, DATA_BSS_SECTION_FLAGS)?) - } else { - None - }; + // We must allocate these pages separately because they will have different flags later. + let executable_pages = if exec_bytes > 0 { Some(allocate_and_map_as_writable(exec_bytes, TEXT_SECTION_FLAGS, kernel_mmi_ref)?) } else { None }; + let read_only_pages = if ro_bytes > 0 { Some(allocate_and_map_as_writable(ro_bytes, RODATA_SECTION_FLAGS, kernel_mmi_ref)?) } else { None }; + let read_write_pages = if rw_bytes > 0 { Some(allocate_and_map_as_writable(rw_bytes, DATA_BSS_SECTION_FLAGS, kernel_mmi_ref)?) } else { None }; let range_tuple = |mp: MappedPages, size_in_bytes: usize| { let start = mp.start_address(); @@ -3028,6 +2971,26 @@ fn allocate_section_pages(elf_file: &ElfFile, kernel_mmi_ref: &MmiRef) -> Result } +/// A convenience function for allocating virtual pages and mapping them to random physical frames. +/// +/// The returned `MappedPages` will be at least as large as `size_in_bytes`, +/// rounded up to the nearest `Page` size, +/// and is mapped as writable along with the other specified `flags` +/// to ensure we can copy content into it. +fn allocate_and_map_as_writable( + size_in_bytes: usize, + flags: PteFlags, + kernel_mmi_ref: &MmiRef, +) -> Result { + let allocated_pages = allocate_pages_by_bytes(size_in_bytes) + .ok_or("Couldn't allocate_pages_by_bytes, out of virtual address space")?; + kernel_mmi_ref.lock().page_table.map_allocated_pages( + allocated_pages, + flags.valid(true).writable(true) + ) +} + + #[allow(dead_code)] fn dump_dependent_crates(krate: &LoadedCrate, prefix: String) { for weak_crate_ref in krate.crates_dependent_on_me() { diff --git a/kernel/nano_core/linker_higher_half-aarch64.ld b/kernel/nano_core/linker_higher_half-aarch64.ld index 4a6355f63c..16378cb193 100644 --- a/kernel/nano_core/linker_higher_half-aarch64.ld +++ b/kernel/nano_core/linker_higher_half-aarch64.ld @@ -27,18 +27,6 @@ SECTIONS { *(.text .text.*) } - /* - * Currently, we are unable to force aarch64 to emit branch (call/jump) instructions - * that are capable of addressing a destination instruction pointer more than 128MiB away, - * even when specifying the "large" code model with `-C code-model=large`. - * - * Thus, as a workaround, we reserve the 128MiB chunk of virtual address space that - * directly follows the initial base kernel image's executable .text section, - * ensuring it can only be used by the page allocator when allocating pages for - * newly-loaded .text sections. - */ - . = ALIGN(128M); - .rodata ALIGN(4K) : AT(ADDR(.rodata) - KERNEL_OFFSET) { *(.rodata .rodata.*) diff --git a/kernel/page_allocator/src/lib.rs b/kernel/page_allocator/src/lib.rs index 326ecec154..236fd30b15 100644 --- a/kernel/page_allocator/src/lib.rs +++ b/kernel/page_allocator/src/lib.rs @@ -30,7 +30,7 @@ mod static_array_rb_tree; // mod static_array_linked_list; -use core::{borrow::Borrow, cmp::{Ordering, max, min}, fmt, ops::{Deref, DerefMut}}; +use core::{borrow::Borrow, cmp::Ordering, fmt, ops::{Deref, DerefMut}}; use kernel_config::memory::*; use memory_structs::{VirtualAddress, Page, PageRange}; use spin::{Mutex, Once}; @@ -53,9 +53,7 @@ static DESIGNATED_PAGES_LOW_END: Once = Once::new(); /// /// TODO: once the heap is fully dynamic and not dependent on static addresses, /// we can exclude the heap from the designated region. -static DESIGNATED_PAGES_HIGH_START: Page = Page::containing_address( - VirtualAddress::new_canonical(UPCOMING_PAGE_TABLE_RECURSIVE_P4_START) -); +static DESIGNATED_PAGES_HIGH_START: Page = Page::containing_address(VirtualAddress::new_canonical(UPCOMING_PAGE_TABLE_RECURSIVE_MEMORY_START)); const MIN_PAGE: Page = Page::containing_address(VirtualAddress::zero()); const MAX_PAGE: Page = Page::containing_address(VirtualAddress::new_canonical(MAX_VIRTUAL_ADDRESS)); @@ -71,62 +69,45 @@ static FREE_PAGE_LIST: Mutex> = Mutex::new(StaticArrayR /// lower designated region, which should be the ending address of the initial kernel image /// (a lower-half identity address). /// -/// The page allocator considers two regions as "designated" regions. It will only allocate pages -/// within these designated regions if the specifically-requested address falls within them. -/// 1. The lower designated region is for identity-mapped bootloader content -/// and base kernel image sections, which is used during OS initialization. -/// 2. The higher designated region is for the same content, mapped to the higher half -/// of the address space. It also excludes the address ranges for the P4 entries that -/// Theseus uses for recursive page table mapping. -/// * See [`RECURSIVE_P4_INDEX`] and [`UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX`]. +/// The page allocator will only allocate addresses lower than `end_vaddr_of_low_designated_region` +/// if specifically requested. +/// General allocation requests for any virtual address will not use any address lower than that, +/// unless the rest of the entire virtual address space is already in use. /// -/// General allocation requests for pages at any virtual address will not use -/// addresses within designated regions unless the entire address space is already in use, -/// which is an extraordinarily unlikely (i.e., basically impossible) situation. pub fn init(end_vaddr_of_low_designated_region: VirtualAddress) -> Result<(), &'static str> { assert!(end_vaddr_of_low_designated_region < DESIGNATED_PAGES_HIGH_START.start_address()); - let designated_low_end_page = DESIGNATED_PAGES_LOW_END.call_once( - || Page::containing_address(end_vaddr_of_low_designated_region) - ); - let designated_low_end = *designated_low_end_page; + let designated_low_end = DESIGNATED_PAGES_LOW_END.call_once(|| Page::containing_address(end_vaddr_of_low_designated_region)); + let designated_low_end = *designated_low_end; let initial_free_chunks = [ - // The first region contains all pages from address zero to the end of the low designated region, - // which is generally reserved for identity-mapped bootloader stuff and base kernel image sections. - Some(Chunk { + // The first region contains all pages *below* the beginning of the 510th entry of P4. + // We split it up into three chunks just for ease, since it overlaps the designated regions. + Some(Chunk { pages: PageRange::new( Page::containing_address(VirtualAddress::zero()), designated_low_end, ) }), - // The second region contains the massive range from the end of the low designated region - // to the beginning of the high designated region, which comprises the majority of the address space. - // The beginning of the high designated region starts at the reserved P4 entry used to - // recursively map the "upcoming" page table (i.e., UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX). - Some(Chunk { + Some(Chunk { pages: PageRange::new( designated_low_end + 1, DESIGNATED_PAGES_HIGH_START - 1, ) }), - // Here, we skip the addresses covered by the `UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX`. - - // The third region contains the range of addresses reserved for the heap, - // which ends at the beginning of the addresses covered by the `RECURSIVE_P4_INDEX`, - Some(Chunk { + Some(Chunk { pages: PageRange::new( - Page::containing_address(VirtualAddress::new_canonical(KERNEL_HEAP_START)), + DESIGNATED_PAGES_HIGH_START, // This is the page right below the beginning of the 510th entry of the top-level P4 page table. - Page::containing_address(VirtualAddress::new_canonical(RECURSIVE_P4_START - 1)), + Page::containing_address(VirtualAddress::new_canonical(KERNEL_TEXT_START - ADDRESSABILITY_PER_P4_ENTRY - 1)), ) }), - // Here, we skip the addresses covered by the `RECURSIVE_P4_INDEX`. - // The fourth region contains all pages in the 511th (last) entry of P4. - Some(Chunk { + // The second region contains all pages *above* the end of the 510th entry of P4, i.e., starting at the 511th (last) entry of P4. + // This is fully covered by the second (higher) designated region. + Some(Chunk { pages: PageRange::new( Page::containing_address(VirtualAddress::new_canonical(KERNEL_TEXT_START)), - MAX_PAGE, + Page::containing_address(VirtualAddress::new_canonical(MAX_VIRTUAL_ADDRESS)), ) }), None, None, None, None, @@ -328,7 +309,7 @@ impl Drop for AllocatedPages { /// that may result in heap allocation should occur. /// Such actions include adding chunks to lists of free pages or pages in use. /// -/// The vast majority of use cases don't care about such precise control, +/// The vast majority of use cases don't care about such precise control, /// so you can simply drop this struct at any time or ignore it /// with a `let _ = ...` binding to instantly drop it. pub struct DeferredAllocAction<'list> { @@ -363,15 +344,14 @@ impl<'list> Drop for DeferredAllocAction<'list> { } -/// Possible errors returned by the page allocator. +/// Possible allocation errors. #[derive(Debug)] -pub enum AllocationError { +enum AllocationError { /// The requested address was not free: it was already allocated, or is outside the range of this allocator. AddressNotFree(Page, usize), /// The address space was full, or there was not a large-enough chunk - /// or enough remaining chunks (within the given `PageRange`, if any) - /// that could satisfy the requested allocation size. - OutOfAddressSpace(usize, Option), + /// or enough remaining chunks that could satisfy the requested allocation size. + OutOfAddressSpace(usize), /// The allocator has not yet been initialized. NotInitialized, } @@ -379,8 +359,7 @@ impl From for &'static str { fn from(alloc_err: AllocationError) -> &'static str { match alloc_err { AllocationError::AddressNotFree(..) => "address was in use or outside of this page allocator's range", - AllocationError::OutOfAddressSpace(_, Some(_range)) => "out of virtual address space in specified range", - AllocationError::OutOfAddressSpace(_, None) => "out of virtual address space", + AllocationError::OutOfAddressSpace(..) => "out of virtual address space", AllocationError::NotInitialized => "the page allocator has not yet been initialized", } } @@ -450,52 +429,38 @@ fn find_specific_chunk( /// Searches the given `list` for any chunk large enough to hold at least `num_pages`. /// -/// If a given range is specified, the returned `AllocatedPages` *must* exist -/// fully within that inclusive range of pages. -/// -/// If no range is specified, this function first attempts to find a suitable chunk -/// that is **not** within the designated regions, +/// It first attempts to find a suitable chunk **not** in the designated regions, /// and only allocates from the designated regions as a backup option. fn find_any_chunk( list: &mut StaticArrayRBTree, - num_pages: usize, - within_range: Option<&PageRange>, + num_pages: usize ) -> Result<(AllocatedPages, DeferredAllocAction<'static>), AllocationError> { - let designated_low_end = DESIGNATED_PAGES_LOW_END.get() - .ok_or(AllocationError::NotInitialized)?; - let full_range = PageRange::new(*designated_low_end + 1, DESIGNATED_PAGES_HIGH_START - 1); - let range = within_range.unwrap_or(&full_range); - - // During the first pass, we only search within the given range. - // If no range was given, we search from the end of the low designated region - // to the start of the high designated region. + let designated_low_end = DESIGNATED_PAGES_LOW_END.get().ok_or(AllocationError::NotInitialized)?; + + // During the first pass, we ignore designated regions. match list.0 { Inner::Array(ref mut arr) => { for elem in arr.iter_mut() { if let Some(chunk) = elem { - // Use max and min below to ensure that the range of pages we allocate from - // is within *both* the current chunk's bounds and the range's bounds. - let lowest_possible_start_page = *max(chunk.start(), range.start()); - let highest_possible_end_page = *min(chunk.end(), range.end()); - if lowest_possible_start_page + num_pages <= highest_possible_end_page { - return adjust_chosen_chunk( - lowest_possible_start_page, - num_pages, - &chunk.clone(), - ValueRefMut::Array(elem), - ); + // Skip chunks that are too-small or in the designated regions. + if chunk.size_in_pages() < num_pages || + chunk.start() <= designated_low_end || + chunk.end() >= &DESIGNATED_PAGES_HIGH_START + { + continue; + } + else { + return adjust_chosen_chunk(*chunk.start(), num_pages, &chunk.clone(), ValueRefMut::Array(elem)); } - - // The early static array is not sorted, so we must iterate over all elements. } } } Inner::RBTree(ref mut tree) => { // NOTE: if RBTree had a `range_mut()` method, we could simply do the following: // ``` - // let eligible_chunks = tree.range_mut( - // Bound::Included(range.start()), - // Bound::Included(range.end()) + // let eligible_chunks = tree.range( + // Bound::Excluded(&DESIGNATED_PAGES_LOW_END), + // Bound::Excluded(&DESIGNATED_PAGES_HIGH_START) // ); // for c in eligible_chunks { ... } // ``` @@ -505,35 +470,20 @@ fn find_any_chunk( // Because we allocate new pages by peeling them off from the beginning part of a chunk, // it's MUCH faster to start the search for free pages from higher addresses moving down. // This results in an O(1) allocation time in the general case, until all address ranges are already in use. - let mut cursor = tree.upper_bound_mut(Bound::Included(range.end())); + let mut cursor = tree.upper_bound_mut(Bound::Excluded(&DESIGNATED_PAGES_HIGH_START)); while let Some(chunk) = cursor.get().map(|w| w.deref()) { - // Use max and min below to ensure that the range of pages we allocate from - // is within *both* the current chunk's bounds and the range's bounds. - let lowest_possible_start_page = *max(chunk.start(), range.start()); - let highest_possible_end_page = *min(chunk.end(), range.end()); - if lowest_possible_start_page + num_pages <= highest_possible_end_page { - return adjust_chosen_chunk( - lowest_possible_start_page, - num_pages, - &chunk.clone(), - ValueRefMut::RBTree(cursor) - ); - } - - if chunk.start() <= range.start() { + if chunk.start() <= designated_low_end { break; // move on to searching through the designated regions } - warn!("page_allocator: unlikely scenario: had to search multiple chunks while trying to allocate {} pages in {:?}.", num_pages, range); + if num_pages < chunk.size_in_pages() { + return adjust_chosen_chunk(*chunk.start(), num_pages, &chunk.clone(), ValueRefMut::RBTree(cursor)); + } + warn!("Page allocator: unlikely scenario: had to search multiple chunks while trying to allocate {} pages at any address.", num_pages); cursor.move_prev(); } } } - // If we failed to find suitable pages within the given range, return an error. - if let Some(range) = within_range { - return Err(AllocationError::OutOfAddressSpace(num_pages, Some(range.clone()))); - } - // If we can't find any suitable chunks in the non-designated regions, then look in both designated regions. warn!("PageAllocator: unlikely scenario: non-designated chunks are all allocated, \ falling back to allocating {} pages from designated regions!", num_pages); @@ -560,8 +510,8 @@ fn find_any_chunk( // for c in eligible_chunks { ... } // ``` // - // RBTree doesn't have a `range_mut()` method, so we use cursors for two rounds of iteration. - // The first iterates over the lower designated region, from higher addresses to lower, down to zero. + // However, RBTree doesn't have a `range_mut()` method, so we use two sets of cursors for manual iteration. + // The first cursor iterates over the lower designated region, from higher addresses to lower, down to zero. let mut cursor = tree.upper_bound_mut(Bound::Included(designated_low_end)); while let Some(chunk) = cursor.get().map(|w| w.deref()) { if num_pages < chunk.size_in_pages() { @@ -570,7 +520,7 @@ fn find_any_chunk( cursor.move_prev(); } - // The second iterates over the higher designated region, from the highest (max) address down to the designated region boundary. + // The second cursor iterates over the higher designated region, from the highest (max) address down to the designated region boundary. let mut cursor = tree.upper_bound_mut::(Bound::Unbounded); while let Some(chunk) = cursor.get().map(|w| w.deref()) { if chunk.start() < &DESIGNATED_PAGES_HIGH_START { @@ -585,7 +535,7 @@ fn find_any_chunk( } } - Err(AllocationError::OutOfAddressSpace(num_pages, None)) + Err(AllocationError::OutOfAddressSpace(num_pages)) } @@ -674,7 +624,6 @@ fn adjust_chosen_chunk( pub fn allocate_pages_deferred( requested_vaddr: Option, num_pages: usize, - within_range: Option<&PageRange>, ) -> Result<(AllocatedPages, DeferredAllocAction<'static>), &'static str> { if num_pages == 0 { warn!("PageAllocator: requested an allocation of 0 pages... stupid!"); @@ -691,9 +640,8 @@ pub fn allocate_pages_deferred( if let Some(vaddr) = requested_vaddr { find_specific_chunk(&mut locked_list, Page::containing_address(vaddr), num_pages) } else { - find_any_chunk(&mut locked_list, num_pages, within_range) - } - .map_err(From::from) // convert from AllocationError to &str + find_any_chunk(&mut locked_list, num_pages) + }.map_err(From::from) // convert from AllocationError to &str } @@ -711,7 +659,7 @@ pub fn allocate_pages_by_bytes_deferred( num_bytes }; let num_pages = (actual_num_bytes + PAGE_SIZE - 1) / PAGE_SIZE; // round up - allocate_pages_deferred(requested_vaddr, num_pages, None) + allocate_pages_deferred(requested_vaddr, num_pages) } @@ -719,7 +667,7 @@ pub fn allocate_pages_by_bytes_deferred( /// /// See [`allocate_pages_deferred()`](fn.allocate_pages_deferred.html) for more details. pub fn allocate_pages(num_pages: usize) -> Option { - allocate_pages_deferred(None, num_pages, None) + allocate_pages_deferred(None, num_pages) .map(|(ap, _action)| ap) .ok() } @@ -751,30 +699,7 @@ pub fn allocate_pages_by_bytes_at(vaddr: VirtualAddress, num_bytes: usize) -> Re /// /// See [`allocate_pages_deferred()`](fn.allocate_pages_deferred.html) for more details. pub fn allocate_pages_at(vaddr: VirtualAddress, num_pages: usize) -> Result { - allocate_pages_deferred(Some(vaddr), num_pages, None) - .map(|(ap, _action)| ap) -} - - -/// Allocates the given number of pages with the constraint that -/// they must be within the given inclusive `range` of pages. -pub fn allocate_pages_in_range( - num_pages: usize, - range: &PageRange, -) -> Result { - allocate_pages_deferred(None, num_pages, Some(range)) - .map(|(ap, _action)| ap) -} - - -/// Allocates pages with a size given in number of bytes with the constraint that -/// they must be within the given inclusive `range` of pages. -pub fn allocate_pages_by_bytes_in_range( - num_bytes: usize, - range: &PageRange, -) -> Result { - let num_pages = (num_bytes + PAGE_SIZE - 1) / PAGE_SIZE; // round up - allocate_pages_deferred(None, num_pages, Some(range)) + allocate_pages_deferred(Some(vaddr), num_pages) .map(|(ap, _action)| ap) } From b32b88bebf6ace3bbf2f53485054ea8ecdda8553 Mon Sep 17 00:00:00 2001 From: Ramla Ijaz Date: Wed, 14 Jun 2023 08:05:57 -0400 Subject: [PATCH 20/23] retain my changes to memory --- kernel/memory/src/lib.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/kernel/memory/src/lib.rs b/kernel/memory/src/lib.rs index aeb0978929..fae1fc80e5 100644 --- a/kernel/memory/src/lib.rs +++ b/kernel/memory/src/lib.rs @@ -24,8 +24,15 @@ pub use self::paging::{ }; pub use memory_structs::*; -pub use page_allocator::*; -pub use frame_allocator::*; +pub use page_allocator::{ + AllocatedPages, allocate_pages, allocate_pages_at, + allocate_pages_by_bytes, allocate_pages_by_bytes_at, +}; + +pub use frame_allocator::{ + AllocatedFrames, MemoryRegionType, PhysicalMemoryRegion, + allocate_frames, allocate_frames_at, allocate_frames_by_bytes_at, allocate_frames_by_bytes, +}; #[cfg(target_arch = "x86_64")] use memory_x86_64::{ tlb_flush_virt_addr, tlb_flush_all, get_p4, find_section_memory_bounds, get_vga_mem_addr }; From a19984cc97761aebf74c4c4b78a3f83ebcfdb087 Mon Sep 17 00:00:00 2001 From: Ramla Ijaz Date: Wed, 14 Jun 2023 08:24:25 -0400 Subject: [PATCH 21/23] addressed clippy errors --- kernel/frame_allocator/src/frames.rs | 30 +++++++++++++--------------- kernel/frame_allocator/src/lib.rs | 10 ++-------- 2 files changed, 16 insertions(+), 24 deletions(-) diff --git a/kernel/frame_allocator/src/frames.rs b/kernel/frame_allocator/src/frames.rs index 46d76d3632..74c9bbb2b9 100644 --- a/kernel/frame_allocator/src/frames.rs +++ b/kernel/frame_allocator/src/frames.rs @@ -79,15 +79,14 @@ impl Frames<{FrameState::Unmapped}> { /// Creates a new Chunk from a TrustedChunk and a FrameRange. /// Only used within the allocated frames callback function. pub(crate) fn from_trusted_chunk(verified_chunk: TrustedChunk, frames: FrameRange, typ: MemoryRegionType) -> Self { - let f = Frames { + Frames { typ, frames, verified_chunk - }; + } // assert!(f.frames.start().number() == f.verified_chunk.start()); // assert!(f.frames.end().number() == f.verified_chunk.end()); // warn!("from trusted chunk: {:?}", f); - f } pub(crate) fn as_allocated_frames(self) -> AllocatedFrames { @@ -138,12 +137,11 @@ impl Frames { Ok(_) => { // use the newly merged TrustedChunk to update the frame range self.frames = into_frame_range(&self.verified_chunk.frames()); - core::mem::forget(other); + // core::mem::forget(other); // assert!(self.frames.start().number() == self.verified_chunk.start()); // assert!(self.frames.end().number() == self.verified_chunk.end()); //warn!("merge: {:?}", self); - - return Ok(()); + Ok(()) }, Err(other_verified_chunk) => { let _ = core::mem::replace(&mut other.verified_chunk, other_verified_chunk); @@ -152,7 +150,7 @@ impl Frames { // assert!(other.frames.start().number() == other.verified_chunk.start()); // assert!(other.frames.end().number() == other.verified_chunk.end()); - return Err(other); + Err(other) } } } @@ -186,26 +184,26 @@ impl Frames { }; let typ = self.typ; - core::mem::forget(self); + // core::mem::forget(self); let c1 = Self { typ, frames: into_frame_range(&new_allocation.frames()), verified_chunk: new_allocation }; - let c2 = before.and_then(|vchunk| - Some(Self{ + let c2 = before.map(|vchunk| + Self{ typ, frames: into_frame_range(&vchunk.frames()), verified_chunk: vchunk - }) + } ); - let c3 = after.and_then(|vchunk| - Some(Self{ + let c3 = after.map(|vchunk| + Self{ typ, frames: into_frame_range(&vchunk.frames()), verified_chunk: vchunk - }) + } ); // assert!(c1.frames.start().number() == c1.verified_chunk.start()); @@ -256,7 +254,7 @@ impl Frames { } }; - core::mem::forget(self); + // core::mem::forget(self); let c1 = Self { typ, @@ -320,5 +318,5 @@ fn into_frame_range(frames: &RangeInclusive) -> FrameRange { fn into_frame(frame_num: usize) -> Option { PhysicalAddress::new(frame_num * PAGE_SIZE) - .and_then(|addr| Some(Frame::containing_address(addr))) + .map(|addr| Frame::containing_address(addr)) } diff --git a/kernel/frame_allocator/src/lib.rs b/kernel/frame_allocator/src/lib.rs index 6b7c0e2fb9..3d038bbffc 100644 --- a/kernel/frame_allocator/src/lib.rs +++ b/kernel/frame_allocator/src/lib.rs @@ -796,18 +796,12 @@ fn retrieve_chunk_from_ref(mut chosen_chunk_ref: ValueRefMut c, RemovedValue::RBTree(option_chunk) => { option_chunk.map(|c| c.into_inner()) - // if let Some(boxed_chunk) = option_chunk { - // Some(boxed_chunk.into_inner()) - // } else { - // None - // } } - }; - chosen_chunk + } } /// The final part of the main allocation routine that splits the given chosen chunk From 1bd4facd007bc480edd108716695edc6c0bfddbb Mon Sep 17 00:00:00 2001 From: Ramla Ijaz Date: Wed, 14 Jun 2023 08:38:13 -0400 Subject: [PATCH 22/23] addressed clipp errors --- kernel/frame_allocator/src/frames.rs | 4 ++-- kernel/frame_allocator/src/lib.rs | 10 ++++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/kernel/frame_allocator/src/frames.rs b/kernel/frame_allocator/src/frames.rs index 74c9bbb2b9..c7b9f2de72 100644 --- a/kernel/frame_allocator/src/frames.rs +++ b/kernel/frame_allocator/src/frames.rs @@ -89,7 +89,7 @@ impl Frames<{FrameState::Unmapped}> { // warn!("from trusted chunk: {:?}", f); } - pub(crate) fn as_allocated_frames(self) -> AllocatedFrames { + pub(crate) fn into_allocated_frames(self) -> AllocatedFrames { AllocatedFrames { frames: self, } @@ -318,5 +318,5 @@ fn into_frame_range(frames: &RangeInclusive) -> FrameRange { fn into_frame(frame_num: usize) -> Option { PhysicalAddress::new(frame_num * PAGE_SIZE) - .map(|addr| Frame::containing_address(addr)) + .map(Frame::containing_address) } diff --git a/kernel/frame_allocator/src/lib.rs b/kernel/frame_allocator/src/lib.rs index 3d038bbffc..213e62aa43 100644 --- a/kernel/frame_allocator/src/lib.rs +++ b/kernel/frame_allocator/src/lib.rs @@ -74,6 +74,8 @@ static GENERAL_REGIONS: Mutex> = Mutex::new(StaticArra /// rather just where they exist and which regions are known to this allocator. static RESERVED_REGIONS: Mutex> = Mutex::new(StaticArrayRBTree::empty()); +type IntoTrustedChunkFn = fn(RangeInclusive) -> TrustedChunk; +type IntoAllocatedFramesFn = fn(TrustedChunk, FrameRange) -> AllocatedFrames; /// Initialize the frame allocator with the given list of available and reserved physical memory regions. /// @@ -91,7 +93,7 @@ static RESERVED_REGIONS: Mutex> = Mutex::new(StaticArr pub fn init( free_physical_memory_areas: F, reserved_physical_memory_areas: R, -) -> Result<(fn(RangeInclusive) -> TrustedChunk, fn(TrustedChunk, FrameRange) -> AllocatedFrames), &'static str> +) -> Result<(IntoTrustedChunkFn, IntoAllocatedFramesFn), &'static str> where P: Borrow, F: IntoIterator, R: IntoIterator + Clone, @@ -675,7 +677,7 @@ fn find_specific_chunk( } Inner::RBTree(ref mut tree) => { let cursor_mut = tree.upper_bound_mut(Bound::Included(&requested_frame)); - if let Some(chunk) = cursor_mut.get().map(|w| w.deref().clone()) { + if let Some(chunk) = cursor_mut.get().map(|w| w.deref()) { if chunk.contains(&requested_frame) { if requested_end_frame <= *chunk.end() { return allocate_from_chosen_chunk(requested_frame, num_frames, ValueRefMut::RBTree(cursor_mut)); @@ -823,7 +825,7 @@ fn allocate_from_chosen_chunk( // if let RemovedValue::RBTree(Some(wrapper_adapter)) = _removed_chunk { ... } Ok(( - new_allocation.as_allocated_frames(), + new_allocation.into_allocated_frames(), DeferredAllocAction::new(before, after), )) @@ -849,7 +851,7 @@ fn adjust_chosen_chunk_contiguous( Ok(( - new_allocation.as_allocated_frames(), + new_allocation.into_allocated_frames(), DeferredAllocAction::new(before, after), )) } From 14efde91ae8325b66e1f3c7bfd068c7196dbcbbf Mon Sep 17 00:00:00 2001 From: ramla-i Date: Tue, 20 Jun 2023 15:48:15 -0400 Subject: [PATCH 23/23] merged AllocatedFrames into Frames --- kernel/frame_allocator/Cargo.toml | 1 + kernel/frame_allocator/src/frames.rs | 253 +++++++++++++++++++++++---- kernel/frame_allocator/src/lib.rs | 248 +++----------------------- kernel/frame_allocator/src/test.rs | 12 +- kernel/memory/src/paging/mapper.rs | 8 +- kernel/memory/src/paging/mod.rs | 6 +- kernel/memory/src/paging/table.rs | 2 +- kernel/page_table_entry/src/lib.rs | 4 +- 8 files changed, 254 insertions(+), 280 deletions(-) diff --git a/kernel/frame_allocator/Cargo.toml b/kernel/frame_allocator/Cargo.toml index a7e8d0277c..b8c2a9ce70 100644 --- a/kernel/frame_allocator/Cargo.toml +++ b/kernel/frame_allocator/Cargo.toml @@ -22,5 +22,6 @@ path = "../memory_structs" [dependencies.trusted_chunk] path = "../../libs/trusted_chunk" + [lib] crate-type = ["rlib"] diff --git a/kernel/frame_allocator/src/frames.rs b/kernel/frame_allocator/src/frames.rs index c7b9f2de72..fedf398895 100644 --- a/kernel/frame_allocator/src/frames.rs +++ b/kernel/frame_allocator/src/frames.rs @@ -1,16 +1,16 @@ //! A range of unmapped frames that stores a verified `TrustedChunk`. //! A `Frames` object is uncloneable and is the only way to access the range of frames it references. -//! -//! To Do: Merge AllocatedFrames into this typestate as well. use kernel_config::memory::PAGE_SIZE; use memory_structs::{FrameRange, Frame, PhysicalAddress}; use range_inclusive::RangeInclusive; -use crate::{MemoryRegionType, AllocatedFrames}; -use core::{borrow::Borrow, cmp::Ordering, ops::{Deref, DerefMut}}; +use crate::{MemoryRegionType, contains_any, RESERVED_REGIONS, FREE_GENERAL_FRAMES_LIST, FREE_RESERVED_FRAMES_LIST}; +use core::{borrow::Borrow, cmp::Ordering, ops::{Deref, DerefMut}, fmt}; use spin::Mutex; use trusted_chunk::trusted_chunk::*; +pub type AllocatedFrames = Frames<{FrameState::Unmapped}>; + static CHUNK_ALLOCATOR: Mutex = Mutex::new(TrustedChunkAllocator::new()); pub(crate) fn switch_chunk_allocator_to_heap_structure() { @@ -21,6 +21,7 @@ pub(crate) fn switch_chunk_allocator_to_heap_structure() { #[derive(PartialEq, Eq)] pub enum FrameState { Unmapped, + Mapped } /// A range of contiguous frames. @@ -28,6 +29,13 @@ pub enum FrameState { /// The `verified_chunk` field is a verified `TrustedChunk` that stores the actual frames, /// and has the invariant that it does not overlap with any other `TrustedChunk` created by the /// `CHUNK_ALLOCATOR`. +/// +/// The frames can be in an unmapped or mapped state. In the unmapped state, the frames are not +/// immediately accessible because they're not yet mapped by any virtual memory pages. +/// They are converted into a mapped state once they are used to create a `MappedPages` object. +/// +/// When a `Frames` object in an unmapped state is dropped, it is deallocated and returned to the free frames list. +/// We expect that `Frames` in a mapped state will never be dropped, but instead will be forgotten. /// /// # Ordering and Equality /// @@ -38,7 +46,7 @@ pub enum FrameState { /// both of which are also based ONLY on the **starting** `Frame` of the `Frames`. /// Thus, comparing two `Frames` with the `==` or `!=` operators may not work as expected. /// since it ignores their actual range of frames. -#[derive(Debug, Eq)] +#[derive(Eq)] pub struct Frames { /// The type of this memory chunk, e.g., whether it's in a free or reserved region. typ: MemoryRegionType, @@ -50,9 +58,15 @@ pub struct Frames { verified_chunk: TrustedChunk } +// Frames must not be Cloneable, and it must not expose its inner frames as mutable. assert_not_impl_any!(Frames<{FrameState::Unmapped}>: DerefMut, Clone); +assert_not_impl_any!(Frames<{FrameState::Mapped}>: DerefMut, Clone); + impl Frames<{FrameState::Unmapped}> { + /// Creates a new `Frames` object in an unmapped state. + /// If `frames` is empty, there is no space to store the new `Frames` information pre-heap intialization, + /// or a `TrustedChunk` already exists which overlaps with the given `frames`, then an error is returned. pub(crate) fn new(typ: MemoryRegionType, frames: FrameRange) -> Result { let verified_chunk = CHUNK_ALLOCATOR.lock().create_chunk(frames.to_range_inclusive()) .map(|(chunk, _)| chunk) @@ -72,30 +86,170 @@ impl Frames<{FrameState::Unmapped}> { frames, verified_chunk }; - //warn!("new frames: {:?}", f); + // warn!("NEW FRAMES: {:?}", f); Ok(f) } /// Creates a new Chunk from a TrustedChunk and a FrameRange. + /// It is expected that the range of `verified_chunk` is equal to `frames`. /// Only used within the allocated frames callback function. pub(crate) fn from_trusted_chunk(verified_chunk: TrustedChunk, frames: FrameRange, typ: MemoryRegionType) -> Self { - Frames { + let f = Frames { typ, frames, verified_chunk - } + }; + // assert!(f.frames.start().number() == f.verified_chunk.start()); // assert!(f.frames.end().number() == f.verified_chunk.end()); - // warn!("from trusted chunk: {:?}", f); + // warn!("FROM TRUSTED CHUNK: {:?}", f); + f + } + + /// Consumes the `Frames` in an unmapped state and converts them to `Frames` in a mapped state. + /// This should only be called once a `MappedPages` has been created from the `Frames`. + pub fn into_mapped_frames(mut self) -> Frames<{FrameState::Mapped}> { + let typ = self.typ; + let (frame_range, chunk) = self.replace_with_empty(); + core::mem::forget(self); + + Frames { + typ: typ, + frames: frame_range, + verified_chunk: chunk + } } - pub(crate) fn into_allocated_frames(self) -> AllocatedFrames { - AllocatedFrames { - frames: self, + /// Returns an `UnmappedFrame` if this `Frames<{FrameState::Unmapped}>` object contains only one frame. + /// + /// ## Panic + /// Panics if this `AllocatedFrame` contains multiple frames or zero frames. + pub fn as_unmapped_frame(&self) -> UnmappedFrame { + assert!(self.size_in_frames() == 1); + UnmappedFrame { + frame: *self.start(), + _phantom: core::marker::PhantomData, + } + } +} + + +/// This function is a callback used to convert `UnmappedFrames` into `Frames<{FrameState::Unmapped}>`. +/// `UnmappedFrames` represents frames that have been unmapped from a page that had +/// exclusively mapped them, indicating that no others pages have been mapped +/// to those same frames, and thus, they can be safely deallocated. +/// +/// This exists to break the cyclic dependency cycle between this crate and +/// the `page_table_entry` crate, since `page_table_entry` must depend on types +/// from this crate in order to enforce safety when modifying page table entries. +pub(crate) fn into_allocated_frames(tc: TrustedChunk, frames: FrameRange) -> Frames<{FrameState::Unmapped}> { + let typ = if contains_any(&RESERVED_REGIONS.lock(), &frames) { + MemoryRegionType::Reserved + } else { + MemoryRegionType::Free + }; + Frames::from_trusted_chunk(tc, frames, typ) +} + +impl Drop for Frames { + fn drop(&mut self) { + match S { + FrameState::Unmapped => { + if self.size_in_frames() == 0 { return; } + // trace!("FRAMES DROP {:?}", self); + + let (frames, verified_chunk) = self.replace_with_empty(); + let unmapped_frames: Frames<{FrameState::Unmapped}> = Frames { + typ: self.typ, + frames, + verified_chunk, + }; + + // Should we remove these lines since we store the typ in Frames? + let (list, _typ) = if contains_any(&RESERVED_REGIONS.lock(), &unmapped_frames) { + (&FREE_RESERVED_FRAMES_LIST, MemoryRegionType::Reserved) + } else { + (&FREE_GENERAL_FRAMES_LIST, MemoryRegionType::Free) + }; + + // Simply add the newly-deallocated chunk to the free frames list. + let mut locked_list = list.lock(); + let res = locked_list.insert(unmapped_frames); + match res { + Ok(_inserted_free_chunk) => (), + Err(c) => error!("BUG: couldn't insert deallocated chunk {:?} into free frame list", c), + } + + // Here, we could optionally use above `_inserted_free_chunk` to merge the adjacent (contiguous) chunks + // before or after the newly-inserted free chunk. + // However, there's no *need* to do so until we actually run out of address space or until + // a requested address is in a chunk that needs to be merged. + // Thus, for performance, we save that for those future situations. + } + FrameState::Mapped => panic!("We should never drop a mapped frame! It should be forgotten instead."), + } + } +} + +impl<'f> IntoIterator for &'f Frames<{FrameState::Unmapped}> { + type IntoIter = UnmappedFramesIter<'f>; + type Item = UnmappedFrame<'f>; + fn into_iter(self) -> Self::IntoIter { + UnmappedFramesIter { + _owner: self, + range: self.frames.clone().into_iter(), } } } +/// An iterator over each [`UnmappedFrame`] in a range of [`Frames<{FrameState::Unmapped}>`]. +/// +/// To Do: Description is no longer valid, since we have an iterator for RangeInclusive now. +/// but I still think it's useful to have a `Frames<{FrameState::Unmapped}>` iterator that ties the lifetime +/// of the `UnmappedFrame` to the original object. +/// +/// We must implement our own iterator type here in order to tie the lifetime `'f` +/// of a returned `UnmappedFrame<'f>` type to the lifetime of its containing `Frames<{FrameState::Unmapped}>`. +/// This is because the underlying type of `Frames<{FrameState::Unmapped}>` is a [`FrameRange`], +/// which itself is a [`core::ops::RangeInclusive`] of [`Frame`]s, and unfortunately the +/// `RangeInclusive` type doesn't implement an immutable iterator. +/// +/// Iterating through a `RangeInclusive` actually modifies its own internal range, +/// so we must avoid doing that because it would break the semantics of a `FrameRange`. +/// In fact, this is why [`FrameRange`] only implements `IntoIterator` but +/// does not implement [`Iterator`] itself. +pub struct UnmappedFramesIter<'f> { + _owner: &'f Frames<{FrameState::Unmapped}>, + range: range_inclusive::RangeInclusiveIterator, +} +impl<'f> Iterator for UnmappedFramesIter<'f> { + type Item = UnmappedFrame<'f>; + fn next(&mut self) -> Option { + self.range.next().map(|frame| + UnmappedFrame { + frame, _phantom: core::marker::PhantomData, + } + ) + } +} + +/// A reference to a single frame within a range of `Frames<{FrameState::Unmapped}>`. +/// +/// The lifetime of this type is tied to the lifetime of its owning `Frames<{FrameState::Unmapped}>`. +#[derive(Debug)] +pub struct UnmappedFrame<'f> { + frame: Frame, + _phantom: core::marker::PhantomData<&'f Frame>, +} +impl<'f> Deref for UnmappedFrame<'f> { + type Target = Frame; + fn deref(&self) -> &Self::Target { + &self.frame + } +} +assert_not_impl_any!(UnmappedFrame: DerefMut, Clone); + + impl Frames { #[allow(dead_code)] pub(crate) fn frames(&self) -> FrameRange { @@ -107,6 +261,7 @@ impl Frames { } /// Returns a new `Frames` with an empty range of frames. + /// Can be used as a placeholder, but will not permit any real usage. pub const fn empty() -> Frames { Frames { typ: MemoryRegionType::Unknown, @@ -115,6 +270,15 @@ impl Frames { } } + /// Returns the `frames` and `verified_chunk` fields of this `Frames` object, + /// and replaces them with an empty range of frames and an empty `TrustedChunk`. + /// It's a convenience function to make sure these two fields are always changed together. + fn replace_with_empty(&mut self) -> (FrameRange, TrustedChunk) { + let chunk = core::mem::replace(&mut self.verified_chunk, TrustedChunk::empty()); + let frame_range = core::mem::replace(&mut self.frames, FrameRange::empty()); + (frame_range, chunk) + } + /// Merges the given `Frames` object `other` into this `Frames` object (`self`). /// This is just for convenience and usability purposes, it performs no allocation or remapping. /// @@ -124,12 +288,13 @@ impl Frames { /// If either of those conditions are met, `self` is modified and `Ok(())` is returned, /// otherwise `Err(other)` is returned. pub fn merge(&mut self, mut other: Self) -> Result<(), Self> { + // To Do: Check if we actually need this or does the verified merge function take care of this condition if self.is_empty() || other.is_empty() { return Err(other); } // take out the TrustedChunk from other - let other_verified_chunk = core::mem::replace(&mut other.verified_chunk, TrustedChunk::empty()); + let (other_frame_range, other_verified_chunk) = other.replace_with_empty(); // merged the other TrustedChunk with self // failure here means that the chunks cannot be merged @@ -137,14 +302,16 @@ impl Frames { Ok(_) => { // use the newly merged TrustedChunk to update the frame range self.frames = into_frame_range(&self.verified_chunk.frames()); - // core::mem::forget(other); + core::mem::forget(other); // assert!(self.frames.start().number() == self.verified_chunk.start()); // assert!(self.frames.end().number() == self.verified_chunk.end()); - //warn!("merge: {:?}", self); + // warn!("merge: {:?}", self); Ok(()) }, Err(other_verified_chunk) => { - let _ = core::mem::replace(&mut other.verified_chunk, other_verified_chunk); + other.frames = other_frame_range; + other.verified_chunk = other_verified_chunk; + // assert!(self.frames.start().number() == self.verified_chunk.start()); // assert!(self.frames.end().number() == self.verified_chunk.end()); @@ -155,12 +322,15 @@ impl Frames { } } - /// An inner function that breaks up the given `Frames` into multiple smaller `Frames`. + /// Splits up the given `Frames` into multiple smaller `Frames`. /// /// Returns a tuple of three `Frames`: /// 1. The `Frames` containing the requested range of frames starting at `start_frame`. /// 2. The range of frames in the `self` that came before the beginning of the requested frame range. /// 3. The range of frames in the `self` that came after the end of the requested frame range. + /// + /// If `start_frame` is not contained within `self` or `num_frames` results in an end frame greater than the end of `self`, + /// then `self` is not changed and we return (self, None, None). pub fn split( mut self, start_frame: Frame, @@ -171,36 +341,35 @@ impl Frames { } // take out the TrustedChunk - let verified_chunk = core::mem::replace(&mut self.verified_chunk, TrustedChunk::empty()); + let (frame_range, verified_chunk) = self.replace_with_empty(); let (before, new_allocation, after) = match verified_chunk.split(start_frame.number(), num_frames) { Ok(x) => x, Err(vchunk) => { - let _ = core::mem::replace(&mut self.verified_chunk, vchunk); + self.frames = frame_range; + self.verified_chunk = vchunk; + // assert!(self.frames.start().number() == self.verified_chunk.start()); // assert!(self.frames.end().number() == self.verified_chunk.end()); return (self, None, None); } }; - - let typ = self.typ; - // core::mem::forget(self); let c1 = Self { - typ, + typ: self.typ, frames: into_frame_range(&new_allocation.frames()), verified_chunk: new_allocation }; let c2 = before.map(|vchunk| Self{ - typ, + typ: self.typ, frames: into_frame_range(&vchunk.frames()), verified_chunk: vchunk } ); let c3 = after.map(|vchunk| Self{ - typ, + typ: self.typ, frames: into_frame_range(&vchunk.frames()), verified_chunk: vchunk } @@ -218,7 +387,8 @@ impl Frames { // assert!(c.frames.start().number() == c.verified_chunk.start()); // assert!(c.frames.end().number() == c.verified_chunk.end()); // } - //warn!("split: {:?} {:?} {:?}", c1, c2, c3); + // warn!("split: {:?} {:?} {:?}", c1, c2, c3); + core::mem::forget(self); (c1, c2, c3) } @@ -239,30 +409,29 @@ impl Frames { if self.is_empty() { return Err(self); } - let typ = self.typ; // take out the TrustedChunk - let verified_chunk = core::mem::replace(&mut self.verified_chunk, TrustedChunk::empty()); + let (frame_range, verified_chunk) = self.replace_with_empty(); let (first, second) = match verified_chunk.split_at(at_frame.number()){ Ok((first, second)) => (first, second), Err(vchunk) => { - let _ = core::mem::replace(&mut self.verified_chunk, vchunk); + self.frames = frame_range; + self.verified_chunk = vchunk; + // assert!(self.frames.start().number() == self.verified_chunk.start()); // assert!(self.frames.end().number() == self.verified_chunk.end()); return Err(self); } }; - - // core::mem::forget(self); let c1 = Self { - typ, + typ: self.typ, frames: into_frame_range(&first.frames()), verified_chunk: first }; let c2 = Self { - typ, + typ: self.typ, frames: into_frame_range(&second.frames()), verified_chunk: second }; @@ -273,7 +442,8 @@ impl Frames { // assert!(c2.frames.start().number() == c2.verified_chunk.start()); // assert!(c2.frames.end().number() == c2.verified_chunk.end()); - //warn!("split at: {:?} {:?}", c1, c2); + // warn!("split at: {:?} {:?}", c1, c2); + core::mem::forget(self); Ok((c1, c2)) } @@ -295,17 +465,30 @@ impl PartialOrd for Frames { Some(self.cmp(other)) } } +// To Do: will this be an issue as now this applies to Chunk as well as AllocatedFrames +#[cfg(not(test))] impl PartialEq for Frames { fn eq(&self, other: &Self) -> bool { self.frames.start() == other.frames.start() } } +#[cfg(test)] +impl PartialEq for Frames { + fn eq(&self, other: &Self) -> bool { + self.frames == other.frames + } +} impl Borrow for &'_ Frames { fn borrow(&self) -> &Frame { self.frames.start() } } - +impl fmt::Debug for Frames { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Frames({:?}, {:?}, TrustedChunk:{{ start: {:#X}, end: {:#X} }})", self.typ, self.frames, + self.verified_chunk.frames().start() * PAGE_SIZE, self.verified_chunk.frames().end()* PAGE_SIZE) + } +} fn into_frame_range(frames: &RangeInclusive) -> FrameRange { let start = into_frame(*frames.start()) diff --git a/kernel/frame_allocator/src/lib.rs b/kernel/frame_allocator/src/lib.rs index 213e62aa43..14711be342 100644 --- a/kernel/frame_allocator/src/lib.rs +++ b/kernel/frame_allocator/src/lib.rs @@ -33,7 +33,6 @@ extern crate spin; extern crate intrusive_collections; extern crate range_inclusive; extern crate trusted_chunk; - #[cfg(test)] mod test; @@ -41,7 +40,7 @@ mod static_array_rb_tree; // mod static_array_linked_list; mod frames; -use core::{borrow::Borrow, cmp::{Ordering, min, max}, fmt, ops::{Deref, DerefMut}, marker::PhantomData}; +use core::{borrow::Borrow, cmp::{Ordering, min, max}, ops::Deref}; use frames::*; use kernel_config::memory::*; use memory_structs::{PhysicalAddress, Frame, FrameRange}; @@ -49,8 +48,8 @@ use spin::Mutex; use intrusive_collections::Bound; use static_array_rb_tree::*; use trusted_chunk::trusted_chunk::TrustedChunk; -use range_inclusive::{RangeInclusive, RangeInclusiveIterator}; - +use range_inclusive::RangeInclusive; +pub use frames::{AllocatedFrames, UnmappedFrame}; const FRAME_SIZE: usize = PAGE_SIZE; #[allow(dead_code)] @@ -75,7 +74,7 @@ static GENERAL_REGIONS: Mutex> = Mutex::new(StaticArra static RESERVED_REGIONS: Mutex> = Mutex::new(StaticArrayRBTree::empty()); type IntoTrustedChunkFn = fn(RangeInclusive) -> TrustedChunk; -type IntoAllocatedFramesFn = fn(TrustedChunk, FrameRange) -> AllocatedFrames; +type IntoAllocatedFramesFn = fn(TrustedChunk, FrameRange) -> Frames<{FrameState::Unmapped}>; /// Initialize the frame allocator with the given list of available and reserved physical memory regions. /// @@ -89,7 +88,7 @@ type IntoAllocatedFramesFn = fn(TrustedChunk, FrameRange) -> AllocatedFrames; /// ## Return /// Upon success, this function returns a callback function that allows the caller /// (the memory subsystem init function) to convert a range of unmapped frames -/// back into an [`AllocatedFrames`] object. +/// back into an [`Frames<{FrameState::Unmapped}>`] object. pub fn init( free_physical_memory_areas: F, reserved_physical_memory_areas: R, @@ -198,7 +197,7 @@ pub fn init( *RESERVED_REGIONS.lock() = StaticArrayRBTree::new(reserved_list); // Register the callbacks to create a TrustedChunk and AllocatedFrames from an unmapped PTE - Ok((trusted_chunk::init()?, into_allocated_frames)) + Ok((trusted_chunk::init()?, frames::into_allocated_frames)) } @@ -356,211 +355,6 @@ impl Borrow for &'_ Region { } -/// Represents a range of allocated physical memory [`Frame`]s; derefs to [`FrameRange`]. -/// -/// These frames are not immediately accessible because they're not yet mapped -/// by any virtual memory pages. -/// You must do that separately in order to create a `MappedPages` type, -/// which can then be used to access the contents of these frames. -/// -/// This object represents ownership of the range of allocated physical frames; -/// if this object falls out of scope, its allocated frames will be auto-deallocated upon drop. -pub struct AllocatedFrames { - frames: Frames<{FrameState::Unmapped}>, -} - -// AllocatedFrames must not be Cloneable, and it must not expose its inner frames as mutable. -assert_not_impl_any!(AllocatedFrames: DerefMut, Clone); - -impl Deref for AllocatedFrames { - type Target = FrameRange; - fn deref(&self) -> &FrameRange { - &self.frames - } -} -impl fmt::Debug for AllocatedFrames { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "AllocatedFrames({:?})", self.frames) - } -} - -impl AllocatedFrames { - /// Returns an empty AllocatedFrames object that performs no frame allocation. - /// Can be used as a placeholder, but will not permit any real usage. - pub const fn empty() -> AllocatedFrames { - AllocatedFrames { - frames: Frames::empty() - } - } - - /// Merges the given `AllocatedFrames` object `other` into this `AllocatedFrames` object (`self`). - /// This is just for convenience and usability purposes, it performs no allocation or remapping. - /// - /// The given `other` must be physically contiguous with `self`, i.e., come immediately before or after `self`. - /// That is, either `self.start == other.end + 1` or `self.end + 1 == other.start` must be true. - /// - /// If either of those conditions are met, `self` is modified and `Ok(())` is returned, - /// otherwise `Err(other)` is returned. - pub fn merge(&mut self, mut other: AllocatedFrames) -> Result<(), AllocatedFrames> { - let chunk = core::mem::replace(&mut other.frames, Frames::empty()); - match self.frames.merge(chunk) { - Ok(_) => { - // ensure the now-merged AllocatedFrames doesn't run its drop handler and free its frames. - core::mem::forget(other); - Ok(()) - }, - Err(other_chunk) => { - Err(AllocatedFrames{frames: other_chunk}) - } - } - } - - /// Splits this `AllocatedFrames` into two separate `AllocatedFrames` objects: - /// * `[beginning : at_frame - 1]` - /// * `[at_frame : end]` - /// - /// This function follows the behavior of [`core::slice::split_at()`], - /// thus, either one of the returned `AllocatedFrames` objects may be empty. - /// * If `at_frame == self.start`, the first returned `AllocatedFrames` object will be empty. - /// * If `at_frame == self.end + 1`, the second returned `AllocatedFrames` object will be empty. - /// - /// Returns an `Err` containing this `AllocatedFrames` if `at_frame` is otherwise out of bounds. - /// - /// [`core::slice::split_at()`]: https://doc.rust-lang.org/core/primitive.slice.html#method.split_at - pub fn split_at(mut self, at_frame: Frame) -> Result<(AllocatedFrames, AllocatedFrames), AllocatedFrames> { - let chunk = core::mem::replace(&mut self.frames, Frames::empty()); - match chunk.split_at(at_frame) { - Ok((chunk1, chunk2)) => { - // ensure the now-merged AllocatedFrames doesn't run its drop handler and free its frames. - core::mem::forget(self); - Ok(( - AllocatedFrames{frames: chunk1}, - AllocatedFrames{frames: chunk2} - )) - }, - Err(chunk_not_split) => { - Err(AllocatedFrames{frames: chunk_not_split}) - } - } - } - - /// Returns an `AllocatedFrame` if this `AllocatedFrames` object contains only one frame. - /// - /// ## Panic - /// Panics if this `AllocatedFrame` contains multiple frames or zero frames. - pub fn as_allocated_frame(&self) -> AllocatedFrame { - assert!(self.size_in_frames() == 1); - AllocatedFrame { - frame: *self.start(), - _phantom: PhantomData, - } - } -} - -/// This function is a callback used to convert `UnmappedFrames` into `AllocatedFrames`. -/// `UnmappedFrames` represents frames that have been unmapped from a page that had -/// exclusively mapped them, indicating that no others pages have been mapped -/// to those same frames, and thus, they can be safely deallocated. -/// -/// This exists to break the cyclic dependency cycle between this crate and -/// the `page_table_entry` crate, since `page_table_entry` must depend on types -/// from this crate in order to enforce safety when modifying page table entries. -fn into_allocated_frames(tc: TrustedChunk, frames: FrameRange) -> AllocatedFrames { - let typ = if contains_any(&RESERVED_REGIONS.lock(), &frames) { - MemoryRegionType::Reserved - } else { - MemoryRegionType::Free - }; - AllocatedFrames { frames: Frames::from_trusted_chunk(tc, frames, typ) } -} - -impl Drop for AllocatedFrames { - fn drop(&mut self) { - if self.size_in_frames() == 0 { return; } - - // Should we remove these lines since we store the typ in Frames? - let (list, _typ) = if contains_any(&RESERVED_REGIONS.lock(), &self.frames) { - (&FREE_RESERVED_FRAMES_LIST, MemoryRegionType::Reserved) - } else { - (&FREE_GENERAL_FRAMES_LIST, MemoryRegionType::Free) - }; - // trace!("frame_allocator: deallocating {:?}, typ {:?}", self, typ); - - // Simply add the newly-deallocated chunk to the free frames list. - let mut locked_list = list.lock(); - let res = locked_list.insert(core::mem::replace(&mut self.frames, Frames::empty())); - match res { - Ok(_inserted_free_chunk) => (), - Err(c) => error!("BUG: couldn't insert deallocated chunk {:?} into free frame list", c), - } - - // Here, we could optionally use above `_inserted_free_chunk` to merge the adjacent (contiguous) chunks - // before or after the newly-inserted free chunk. - // However, there's no *need* to do so until we actually run out of address space or until - // a requested address is in a chunk that needs to be merged. - // Thus, for performance, we save that for those future situations. - } -} - -impl<'f> IntoIterator for &'f AllocatedFrames { - type IntoIter = AllocatedFramesIter<'f>; - type Item = AllocatedFrame<'f>; - fn into_iter(self) -> Self::IntoIter { - AllocatedFramesIter { - _owner: self, - range: self.frames.clone().into_iter(), - } - } -} - -/// An iterator over each [`AllocatedFrame`] in a range of [`AllocatedFrames`]. -/// -/// To Do: Description is no longer valid, since we have an iterator for RangeInclusive now. -/// but I still think it's useful to thave an `AllocatedFrames` iterator that ties the lifetime -/// of the `AllocatedFrame` to the original object. -/// -/// We must implement our own iterator type here in order to tie the lifetime `'f` -/// of a returned `AllocatedFrame<'f>` type to the lifetime of its containing `AllocatedFrames`. -/// This is because the underlying type of `AllocatedFrames` is a [`FrameRange`], -/// which itself is a [`core::ops::RangeInclusive`] of [`Frame`]s, and unfortunately the -/// `RangeInclusive` type doesn't implement an immutable iterator. -/// -/// Iterating through a `RangeInclusive` actually modifies its own internal range, -/// so we must avoid doing that because it would break the semantics of a `FrameRange`. -/// In fact, this is why [`FrameRange`] only implements `IntoIterator` but -/// does not implement [`Iterator`] itself. -pub struct AllocatedFramesIter<'f> { - _owner: &'f AllocatedFrames, - range: RangeInclusiveIterator, -} -impl<'f> Iterator for AllocatedFramesIter<'f> { - type Item = AllocatedFrame<'f>; - fn next(&mut self) -> Option { - self.range.next().map(|frame| - AllocatedFrame { - frame, _phantom: PhantomData, - } - ) - } -} - -/// A reference to a single frame within a range of `AllocatedFrames`. -/// -/// The lifetime of this type is tied to the lifetime of its owning `AllocatedFrames`. -#[derive(Debug)] -pub struct AllocatedFrame<'f> { - frame: Frame, - _phantom: PhantomData<&'f Frame>, -} -impl<'f> Deref for AllocatedFrame<'f> { - type Target = Frame; - fn deref(&self) -> &Self::Target { - &self.frame - } -} -assert_not_impl_any!(AllocatedFrame: DerefMut, Clone); - - /// A series of pending actions related to frame allocator bookkeeping, /// which may result in heap allocation. /// @@ -659,7 +453,7 @@ fn find_specific_chunk( list: &mut StaticArrayRBTree>, requested_frame: Frame, num_frames: usize -) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { +) -> Result<(Frames<{FrameState::Unmapped}>, DeferredAllocAction<'static>), AllocationError> { // The end frame is an inclusive bound, hence the -1. Parentheses are needed to avoid overflow. let requested_end_frame = requested_frame + (num_frames - 1); @@ -750,7 +544,7 @@ fn find_specific_chunk( fn find_any_chunk( list: &mut StaticArrayRBTree>, num_frames: usize -) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { +) -> Result<(Frames<{FrameState::Unmapped}>, DeferredAllocAction<'static>), AllocationError> { // During the first pass, we ignore designated regions. match list.0 { Inner::Array(ref mut arr) => { @@ -809,13 +603,13 @@ fn retrieve_chunk_from_ref(mut chosen_chunk_ref: ValueRefMut` /// from (part of) that chunk, ranging from `start_frame` to `start_frame + num_frames`. fn allocate_from_chosen_chunk( start_frame: Frame, num_frames: usize, chosen_chunk_ref: ValueRefMut>, -) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { +) -> Result<(Frames<{FrameState::Unmapped}>, DeferredAllocAction<'static>), AllocationError> { // Remove the chosen chunk from the free frame list. let chosen_chunk = retrieve_chunk_from_ref(chosen_chunk_ref).ok_or(AllocationError::ChunkRemovalFailed)?; @@ -825,7 +619,7 @@ fn allocate_from_chosen_chunk( // if let RemovedValue::RBTree(Some(wrapper_adapter)) = _removed_chunk { ... } Ok(( - new_allocation.into_allocated_frames(), + new_allocation, //.into_allocated_frames(), DeferredAllocAction::new(before, after), )) @@ -838,7 +632,7 @@ fn adjust_chosen_chunk_contiguous( num_frames: usize, mut initial_chunk: Frames<{FrameState::Unmapped}>, contiguous_chunk_ref: ValueRefMut>, -) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { +) -> Result<(Frames<{FrameState::Unmapped}>, DeferredAllocAction<'static>), AllocationError> { let contiguous_chunk = retrieve_chunk_from_ref(contiguous_chunk_ref).ok_or(AllocationError::ChunkRemovalFailed)?; initial_chunk.merge(contiguous_chunk).map_err(|_| { @@ -851,7 +645,7 @@ fn adjust_chosen_chunk_contiguous( Ok(( - new_allocation.into_allocated_frames(), + new_allocation, //.into_allocated_frames(), DeferredAllocAction::new(before, after), )) } @@ -1000,14 +794,14 @@ fn add_reserved_region_to_region_list( /// optionally at the requested starting `PhysicalAddress`. /// /// This simply reserves a range of frames; it does not perform any memory mapping. -/// Thus, the memory represented by the returned `AllocatedFrames` isn't directly accessible +/// Thus, the memory represented by the returned `Frames<{FrameState::Unmapped}>` isn't directly accessible /// until you map virtual pages to them. /// /// Allocation is based on a red-black tree and is thus `O(log(n))`. /// Fragmentation isn't cleaned up until we're out of address space, but that's not really a big deal. /// /// # Arguments -/// * `requested_paddr`: if `Some`, the returned `AllocatedFrames` will start at the `Frame` +/// * `requested_paddr`: if `Some`, the returned `Frames<{FrameState::Unmapped}>` will start at the `Frame` /// containing this `PhysicalAddress`. /// If `None`, the first available `Frame` range will be used, starting at any random physical address. /// * `num_frames`: the number of `Frame`s to be allocated. @@ -1021,7 +815,7 @@ fn add_reserved_region_to_region_list( pub fn allocate_frames_deferred( requested_paddr: Option, num_frames: usize, -) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), &'static str> { +) -> Result<(Frames<{FrameState::Unmapped}>, DeferredAllocAction<'static>), &'static str> { if num_frames == 0 { warn!("frame_allocator: requested an allocation of 0 frames... stupid!"); return Err("cannot allocate zero frames"); @@ -1081,7 +875,7 @@ pub fn allocate_frames_deferred( pub fn allocate_frames_by_bytes_deferred( requested_paddr: Option, num_bytes: usize, -) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), &'static str> { +) -> Result<(Frames<{FrameState::Unmapped}>, DeferredAllocAction<'static>), &'static str> { let actual_num_bytes = if let Some(paddr) = requested_paddr { num_bytes + (paddr.value() % FRAME_SIZE) } else { @@ -1095,7 +889,7 @@ pub fn allocate_frames_by_bytes_deferred( /// Allocates the given number of frames with no constraints on the starting physical address. /// /// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. -pub fn allocate_frames(num_frames: usize) -> Option { +pub fn allocate_frames(num_frames: usize) -> Option> { allocate_frames_deferred(None, num_frames) .map(|(af, _action)| af) .ok() @@ -1107,7 +901,7 @@ pub fn allocate_frames(num_frames: usize) -> Option { /// /// This function still allocates whole frames by rounding up the number of bytes. /// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. -pub fn allocate_frames_by_bytes(num_bytes: usize) -> Option { +pub fn allocate_frames_by_bytes(num_bytes: usize) -> Option> { allocate_frames_by_bytes_deferred(None, num_bytes) .map(|(af, _action)| af) .ok() @@ -1118,7 +912,7 @@ pub fn allocate_frames_by_bytes(num_bytes: usize) -> Option { /// /// This function still allocates whole frames by rounding up the number of bytes. /// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. -pub fn allocate_frames_by_bytes_at(paddr: PhysicalAddress, num_bytes: usize) -> Result { +pub fn allocate_frames_by_bytes_at(paddr: PhysicalAddress, num_bytes: usize) -> Result, &'static str> { allocate_frames_by_bytes_deferred(Some(paddr), num_bytes) .map(|(af, _action)| af) } @@ -1127,7 +921,7 @@ pub fn allocate_frames_by_bytes_at(paddr: PhysicalAddress, num_bytes: usize) -> /// Allocates the given number of frames starting at (inclusive of) the frame containing the given `PhysicalAddress`. /// /// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. -pub fn allocate_frames_at(paddr: PhysicalAddress, num_frames: usize) -> Result { +pub fn allocate_frames_at(paddr: PhysicalAddress, num_frames: usize) -> Result, &'static str> { allocate_frames_deferred(Some(paddr), num_frames) .map(|(af, _action)| af) } diff --git a/kernel/frame_allocator/src/test.rs b/kernel/frame_allocator/src/test.rs index 4bcb4697d2..8a4518dcd2 100644 --- a/kernel/frame_allocator/src/test.rs +++ b/kernel/frame_allocator/src/test.rs @@ -1,4 +1,6 @@ //! Tests for the AllocatedFrames type, mainly the `split` method. +//! These tests have to be run individually because running them all at once leads to overlaps between `TrustedChunk`s +//! which will return an error. extern crate std; @@ -6,19 +8,11 @@ use self::std::dbg; use super::*; -impl PartialEq for AllocatedFrames { - fn eq(&self, other: &Self) -> bool { - self.frames == other.frames - } -} - fn from_addr(start_addr: usize, end_addr: usize) -> AllocatedFrames { - AllocatedFrames { - frames: Frames::new(MemoryRegionType::Free, FrameRange::new( + AllocatedFrames::new(MemoryRegionType::Free, FrameRange::new( Frame::containing_address(PhysicalAddress::new_canonical(start_addr)), Frame::containing_address(PhysicalAddress::new_canonical(end_addr)), )).unwrap() - } } fn frame_addr(addr: usize) -> Frame { diff --git a/kernel/memory/src/paging/mapper.rs b/kernel/memory/src/paging/mapper.rs index 7bb33a7f6d..957738b650 100644 --- a/kernel/memory/src/paging/mapper.rs +++ b/kernel/memory/src/paging/mapper.rs @@ -240,7 +240,8 @@ impl Mapper { // there is no easy/efficient way to store a dynamic list of non-contiguous frames (would require Vec). // This is okay because we will deallocate each of these frames when this MappedPages object is dropped // and each of the page table entries for its pages are cleared. - core::mem::forget(frames); + let mapped_frames = frames.into_mapped_frames(); // mark the frames as mapped + core::mem::forget(mapped_frames); Ok(mapped_pages) } @@ -277,8 +278,9 @@ impl Mapper { return Err("map_allocated_pages(): page was already in use"); } - p1[page.p1_index()].set_entry(af.as_allocated_frame(), actual_flags); - core::mem::forget(af); // we currently forget frames allocated here since we don't yet have a way to track them. + p1[page.p1_index()].set_entry(af.as_unmapped_frame(), actual_flags); + let mapped_frames = af.into_mapped_frames(); // mark the frame as mapped + core::mem::forget(mapped_frames); // we currently forget frames allocated here since we don't yet have a way to track them. } Ok(MappedPages { diff --git a/kernel/memory/src/paging/mod.rs b/kernel/memory/src/paging/mod.rs index 67d46c4b3f..b4bc0b82b8 100644 --- a/kernel/memory/src/paging/mod.rs +++ b/kernel/memory/src/paging/mod.rs @@ -110,7 +110,7 @@ impl PageTable { temporary_page.with_table_and_frame(|new_table, frame| { new_table.zero(); new_table[RECURSIVE_P4_INDEX].set_entry( - frame.as_allocated_frame(), + frame.as_unmapped_frame(), PteFlagsArch::new().valid(true).writable(true), ); })?; @@ -153,11 +153,11 @@ impl PageTable { // Overwrite upcoming page table recursive mapping. temporary_page.with_table_and_frame(|table, frame| { self.p4_mut()[UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX].set_entry( - frame.as_allocated_frame(), + frame.as_unmapped_frame(), PteFlagsArch::new().valid(true).writable(true), ); table[UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX].set_entry( - frame.as_allocated_frame(), + frame.as_unmapped_frame(), PteFlagsArch::new().valid(true).writable(true), ); })?; diff --git a/kernel/memory/src/paging/table.rs b/kernel/memory/src/paging/table.rs index 95a70c5353..a74800ef1a 100644 --- a/kernel/memory/src/paging/table.rs +++ b/kernel/memory/src/paging/table.rs @@ -140,7 +140,7 @@ impl Table { assert!(!is_huge(&self[index].flags()), "mapping code does not support huge pages"); let af = frame_allocator::allocate_frames(1).expect("next_table_create(): no frames available"); self[index].set_entry( - af.as_allocated_frame(), + af.as_unmapped_frame(), flags.valid(true).writable(true), // must be valid and writable on x86_64 ); self.next_table_mut(index).unwrap().zero(); diff --git a/kernel/page_table_entry/src/lib.rs b/kernel/page_table_entry/src/lib.rs index a9116b6c3c..1829a03309 100644 --- a/kernel/page_table_entry/src/lib.rs +++ b/kernel/page_table_entry/src/lib.rs @@ -15,7 +15,7 @@ use core::ops::Deref; use memory_structs::{Frame, FrameRange, PhysicalAddress}; use zerocopy::FromBytes; -use frame_allocator::AllocatedFrame; +use frame_allocator::UnmappedFrame; use pte_flags::{PteFlagsArch, PTE_FRAME_MASK}; /// A page table entry, which is a `u64` value under the hood. @@ -90,7 +90,7 @@ impl PageTableEntry { /// This is the actual mapping action that informs the MMU of a new mapping. /// /// Note: this performs no checks about the current value of this page table entry. - pub fn set_entry(&mut self, frame: AllocatedFrame, flags: PteFlagsArch) { + pub fn set_entry(&mut self, frame: UnmappedFrame, flags: PteFlagsArch) { self.0 = (frame.start_address().value() as u64) | flags.bits(); }