Skip to content

WIP: Page allocation fixes, first moves towards aarch64 user space page table handling #52

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 15 commits into
base: main
Choose a base branch
from
53 changes: 29 additions & 24 deletions aarch64/lib/kernel.ld
Original file line number Diff line number Diff line change
Expand Up @@ -8,28 +8,34 @@ SECTIONS {
/* Entrypoint for Raspberry Pi will be at 0x80000 */
. = ${LOAD-ADDRESS};

boottext = .;

/* Group boottext and text */
PROVIDE(boottext = .);
.text.boot : ALIGN(4096) {
*(.boottext .bootdata)
}
. = ALIGN(4096);
eboottext = .;
PROVIDE(eboottext = .);

text = .;
PROVIDE(text = .);
.text : ALIGN(4096) {
*(.text* .stub .gnu.linkonce.t.*)
}
. = ALIGN(4096);
etext = .;
. = ALIGN(2097152);
PROVIDE(etext = .);


rodata = .;
.rodata : ALIGN(4096) {
/* RO data is in a separate page to other data */
PROVIDE(rodata = .);
.rodata : ALIGN(2097152) {
*(.rodata* .gnu.linkonce.r.*)
}
. = ALIGN(4096);
erodata = .;
. = ALIGN(2097152);
PROVIDE(erodata = .);

data = .;

/* Group data and bss */
PROVIDE(data = .);
.data : ALIGN(4096) {
*(.data*)
}
Expand All @@ -40,27 +46,26 @@ SECTIONS {
*(.got.plt)
}
. = ALIGN(4096);
edata = .;
PROVIDE(edata = .);

bss = .;
PROVIDE(bss = .);
.bss : ALIGN(4096) {
*(.bss*)
*(COMMON)
}
. = ALIGN(4096);
ebss = .;
. = ALIGN(2097152);
PROVIDE(ebss = .);

/* Reserve section for early pagetables. Align to 2MiB to allow us to map
as a 2MiB page.Note that this won't be needed once we transition to
recursive pagetables.
TODO Just use the heap when we enable recursive pagetables? */
. = ALIGN(2 * 1024 * 1024);
early_pagetables = .;
. += 2 * 1024 * 1024;

/* Reserve section for early pagetables. */
. = ALIGN(4096);
early_pagetables = .;
. += 32 * 4096;
eearly_pagetables = .;

end = .;



PROVIDE(end = .);

/DISCARD/ : {
*(.eh_frame .note.GNU-stack)
}
Expand Down
1 change: 1 addition & 0 deletions aarch64/src/kmem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ pub fn total_kernel_range() -> PhysRange {
PhysRange(from_virt_to_physaddr(base_addr())..from_virt_to_physaddr(end_addr()))
}

// TODO Meh, this is only valid if it's been mapped as an offset - should probably remove
pub const fn physaddr_as_virt(pa: PhysAddr) -> usize {
(pa.addr() as usize).wrapping_add(KZERO)
}
Expand Down
164 changes: 153 additions & 11 deletions aarch64/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,24 +15,29 @@ mod mailbox;
mod pagealloc;
mod param;
mod registers;
mod swtch;
mod trap;
mod uartmini;
mod uartpl011;
mod vm;

extern crate alloc;

use crate::kmem::from_virt_to_physaddr;
use crate::vm::kernel_root;
use alloc::boxed::Box;
use core::ptr;
use kmem::{boottext_range, bss_range, data_range, rodata_range, text_range, total_kernel_range};
use param::KZERO;
use port::fdt::DeviceTree;
use port::mem::PhysRange;
use port::println;
use vm::PageTable;
use vm::{Entry, RootPageTable, RootPageTableType, VaMapping};

#[cfg(not(test))]
core::arch::global_asm!(include_str!("l.S"));

static mut KPGTBL: PageTable = PageTable::empty();
static mut KERNEL_PAGETABLE: RootPageTable = RootPageTable::empty();
static mut USER_PAGETABLE: RootPageTable = RootPageTable::empty();

unsafe fn print_memory_range(name: &str, range: &PhysRange) {
let size = range.size();
Expand Down Expand Up @@ -118,27 +123,164 @@ pub extern "C" fn main9(dtb_va: usize) {
// Map address space accurately using rust VM code to manage page tables
unsafe {
let dtb_range = PhysRange::with_len(from_virt_to_physaddr(dtb_va).addr(), dt.size());
vm::init(&mut *ptr::addr_of_mut!(KPGTBL), dtb_range, mailbox::get_arm_memory());
vm::switch(&*ptr::addr_of!(KPGTBL));
vm::init_kernel_page_tables(
&mut *ptr::addr_of_mut!(KERNEL_PAGETABLE),
dtb_range,
mailbox::get_arm_memory(),
);
vm::switch(&*ptr::addr_of!(KERNEL_PAGETABLE), RootPageTableType::Kernel);

vm::init_user_page_tables(&mut *ptr::addr_of_mut!(USER_PAGETABLE));
vm::switch(&*ptr::addr_of!(USER_PAGETABLE), RootPageTableType::User);
}

// From this point we can use the global allocator

print_memory_info();

if let Ok(page) = pagealloc::allocate() {
println!("page addr: {:#016x}", page.data().as_ptr() as *const _ as u64);
vm::print_recursive_tables(RootPageTableType::Kernel);
vm::print_recursive_tables(RootPageTableType::User);

{
let page_table = unsafe { &mut *ptr::addr_of_mut!(KERNEL_PAGETABLE) };
let entry = Entry::rw_kernel_data();
for i in 0..3 {
let alloc_result = pagealloc::allocate_virtpage(
page_table,
"testkernel",
entry,
VaMapping::Offset(KZERO),
RootPageTableType::Kernel,
);
match alloc_result {
Ok(_allocated_page) => {
// let pa = allocated_page.pa;
// let va = allocated_page.page.data().as_ptr() as *const _ as u64;
// println!("page pa: {pa:?} va: {va:#016x}");

//allocated_page.clear();

//let mapped_range =
// let kpgtable = unsafe { &mut *ptr::addr_of_mut!(KPGTBL) };
// kpgtable.map_phys_range(range, *flags, *page_size).expect("dynamic mapping failed");
// let range = PhysRange::new(pa, pa + PAGE_SIZE_4K as u64);
// let entry = Entry::rw_user_text();
// let page_size = PageSize::Page4K;

// //let kpgtable = unsafe { &mut *ptr::addr_of_mut!(KPGTBL) };
// //kernel_root().map_phys_range(&range, entry, page_size).expect("dynamic mapping failed");
}
Err(err) => {
println!("Error allocating page in kernel space ({i}): {:?}", err);
break;
}
}
}
}

kernel_root().print_recursive_tables();
vm::print_recursive_tables(RootPageTableType::Kernel);
vm::print_recursive_tables(RootPageTableType::User);

println!("Now try user space");

{
let page_table = unsafe { &mut *ptr::addr_of_mut!(USER_PAGETABLE) };
let entry = Entry::rw_user_text();
for i in 0..100 {
let alloc_result = pagealloc::allocate_virtpage(
page_table,
"testuser",
entry,
VaMapping::Addr((i + 1) * 4096),
RootPageTableType::User,
);
match alloc_result {
Ok(_allocated_page) => {}
Err(err) => {
println!("Error allocating page in user space ({i}): {:?}", err);
break;
}
}
}
}

vm::print_recursive_tables(RootPageTableType::Kernel);
vm::print_recursive_tables(RootPageTableType::User);

// test_sysexit();

let _b = Box::new("ddododo");

println!("looping now");

#[allow(clippy::empty_loop)]
loop {}
}

// struct Proc {}

// static mut PROC: Proc = Proc {};

// fn test_sysexit() {
// // TODO
// // Jump to user mode (EL0)
// // Return to kernel mode (EL1)
// // Create and switch process stack

// // Populate process
// // - page for program code
// // - syscall to exit
// // - page for stack
// // We need to jump to user mode (EL0)
// // svc jumps to supervisor mode (EL1)

// // point to proc page table
// // switch to process
// // point to kernel page table

// // For this hack, we don't need to change page tables.
// // Instead, we will:
// // 1. create a buffer for our process
// // 2. copy code to sysexit
// // 3. context switch to the process
// // Machine code and assembly to call syscall exit
// // 00 00 80 D2 ; mov x0, #0
// // 21 00 80 D2 ; mov x1, #1
// // 01 00 00 D4 ; svc #0
// let proc_text_bytes: [u8; 12] =
// [0x00, 0x00, 0x80, 0xd2, 0x21, 0x00, 0x80, 0xd2, 0x01, 0x00, 0x00, 0xd4];
// let proc_textbuf = unsafe {
// core::slice::from_raw_parts_mut(
// alloc::alloc::alloc_zeroed(Layout::from_size_align_unchecked(4096, 4096)),
// 4096,
// )
// };
// proc_textbuf[..proc_text_bytes.len()].copy_from_slice(&proc_text_bytes);

// let proc_stack_buffer =
// unsafe { alloc::alloc::alloc_zeroed(Layout::from_size_align_unchecked(4096, 4096)) };
// let proc_stack = unsafe { core::slice::from_raw_parts_mut(proc_stack_buffer, 4096) };

// // Initialise a Context struct on the process stack, at the end of the proc_stack_buffer.
// let proc_stack_initial_ctx =
// unsafe { proc_stack_buffer.add(4096 - size_of::<swtch::Context>()) };
// let proc_context_ptr: *mut swtch::Context =
// proc_stack_initial_ctx as *const _ as *mut swtch::Context;

// // TODO Set up proc stack
// // Need to push a context object onto the stack, with x30 populated at the
// // address of proc_textbuf
// let proc_context_ref: &mut swtch::Context = unsafe { &mut *proc_context_ptr };
// proc_context_ref.set_stack_pointer(&proc_context_ptr as *const _ as u64);
// proc_context_ref.set_return(&proc_textbuf.as_ptr() as *const _ as u64);

// // let mut foo: *mut swtch::Context = &mut context1;
// let mut kernel_context: *mut swtch::Context = null_mut();
// let kernel_context_ptr: *mut *mut swtch::Context = &mut kernel_context;

// println!("proc ctx: {:?}", proc_context_ref);

// //context2.set_return(&proc_textbuf as *const _ as u64);
// unsafe { swtch::swtch(kernel_context_ptr, &*proc_context_ptr) };

// //println!("x30: {:#016x}", proc_context_ref.x30);
// }

mod runtime;
Loading