diff --git a/src/arch/mod.rs b/src/arch/mod.rs index 98246127..af80165b 100644 --- a/src/arch/mod.rs +++ b/src/arch/mod.rs @@ -1,8 +1,10 @@ + #[cfg(target_arch = "arm")] mod arm32; #[cfg(target_arch = "riscv64")] mod riscv64; + use cfg_if::cfg_if; use crate::mm; @@ -12,10 +14,10 @@ pub type MemoryImpl = riscv64::sv39::PageTable; #[cfg(target_arch = "riscv64")] pub type InterruptsImpl = riscv64::interrupts::Interrupts; -pub fn new_arch() -> impl Architecture { +pub fn new_arch(info: usize) -> impl Architecture { cfg_if! { if #[cfg(target_arch = "riscv64")] { - riscv64::Riscv64::new() + riscv64::Riscv64::new(info) } else if #[cfg(target_arch = "arm")] { arm32::Arm32::new() } else { @@ -26,6 +28,11 @@ pub fn new_arch() -> impl Architecture { pub trait Architecture { unsafe extern "C" fn _start() -> !; + + fn new(info: usize) -> Self; + + fn for_all_memory_regions)>(&self, f: F); + fn for_all_reserved_memory_regions)>(&self, f: F); } pub trait ArchitectureMemory { diff --git a/src/arch/riscv64/mod.rs b/src/arch/riscv64/mod.rs index 2d502770..15989790 100644 --- a/src/arch/riscv64/mod.rs +++ b/src/arch/riscv64/mod.rs @@ -7,12 +7,9 @@ pub mod interrupts; pub mod registers; pub mod sv39; -pub struct Riscv64 {} -impl Riscv64 { - pub fn new() -> Self { - Self {} - } +pub struct Riscv64 { + device_tree: fdt::Fdt<'static>, } impl Architecture for Riscv64 { @@ -21,4 +18,32 @@ impl Architecture for Riscv64 { unsafe extern "C" fn _start() -> ! { asm!("la sp, STACK_START", "call k_main", options(noreturn)); } + + fn new(info: usize) -> Self { + let device_tree = unsafe { fdt::Fdt::from_ptr(info as *const u8).unwrap() }; + + Self { device_tree } + } + + fn for_all_memory_regions)>(&self, mut f: F) { + let memory = self.device_tree.memory(); + let mut regions = memory + .regions() + .map(|region| { + (region.starting_address as usize, region.size.unwrap_or(0)) + }); + + f(&mut regions); + } + + fn for_all_reserved_memory_regions)>(&self, mut f: F) { + let reserved_memory = self.device_tree.find_node("/reserved-memory").unwrap(); + + let mut regions = reserved_memory + .children() + .flat_map(|child| child.reg().unwrap()) + .map(|region| (region.starting_address as usize, region.size.unwrap_or(0))); + + f(&mut regions); + } } diff --git a/src/main.rs b/src/main.rs index 62c5659f..a018f351 100644 --- a/src/main.rs +++ b/src/main.rs @@ -4,6 +4,8 @@ #![feature(fn_align)] #![feature(naked_functions)] #![feature(custom_test_frameworks)] +#![feature(associated_type_defaults)] +#![feature(type_alias_impl_trait)] #![test_runner(crate::kernel_tests::runner)] #![reexport_test_harness_main = "ktests_launch"] @@ -28,7 +30,7 @@ extern "C" fn k_main(_core_id: usize, device_tree_ptr: usize) -> ! { #[cfg(test)] ktests_launch(); - let _arch = arch::new_arch(); + let mut arch = arch::new_arch(device_tree_ptr); kprintln!("GoOSe is booting"); @@ -43,9 +45,7 @@ extern "C" fn k_main(_core_id: usize, device_tree_ptr: usize) -> ! { } plic.set_threshold(0); - let device_tree = unsafe { fdt::Fdt::from_ptr(device_tree_ptr as *const u8).unwrap() }; - - let mut memory = mm::MemoryManager::::new(&device_tree); + let mut memory = mm::MemoryManager::::new(&arch); memory.map_address_space(); kprintln!("[OK] Setup virtual memory"); diff --git a/src/mm/mod.rs b/src/mm/mod.rs index 54bfac55..c65c013b 100644 --- a/src/mm/mod.rs +++ b/src/mm/mod.rs @@ -32,17 +32,18 @@ pub fn is_kernel_page(base: usize) -> bool { base >= kernel_start && base < kernel_end } -// TODO: shall this be moved to arch/riscv (in the MemoryImpl) ? -pub fn is_reserved_page(base: usize, device_tree: &fdt::Fdt) -> bool { - let reserved_memory = device_tree.find_node("/reserved-memory").unwrap(); - let mut reserved_pages = reserved_memory - .children() - .flat_map(|child| child.reg().unwrap()) - .map(|region| (region.starting_address as usize, region.size.unwrap_or(0))); - - reserved_pages.any(|(region_start, region_size)| { - base >= region_start && base <= (region_start + region_size) - }) +pub fn is_reserved_page(base: usize, arch: &impl arch::Architecture) -> bool { + let mut is_res = false; + + arch.for_all_reserved_memory_regions(|regions| { + is_res = regions + .map(|(start, size)| (start, size)) // this is a weird hack to fix a type error. + .any(|(region_start, region_size)| { + base >= region_start && base <= (region_start + region_size) + }) + }); + + return is_res; } pub struct MemoryManager<'alloc, T: arch::ArchitectureMemory> { @@ -51,12 +52,12 @@ pub struct MemoryManager<'alloc, T: arch::ArchitectureMemory> { } impl<'alloc, T: arch::ArchitectureMemory> MemoryManager<'alloc, T> { - pub fn new(device_tree: &fdt::Fdt) -> Self { + pub fn new(arch: &impl arch::Architecture) -> Self { let mut page_manager = - page_manager::PageManager::from_device_tree(&device_tree, T::get_page_size()); - let arch = T::new(&mut page_manager); + page_manager::PageManager::from_arch_info(arch, T::get_page_size()); + let arch_mem = T::new(&mut page_manager); - Self { page_manager, arch } + Self { page_manager, arch: arch_mem } } fn map(&mut self, to: usize, from: usize, perms: Permissions) { diff --git a/src/mm/page_manager.rs b/src/mm/page_manager.rs index 65a2f0e4..157a55fb 100644 --- a/src/mm/page_manager.rs +++ b/src/mm/page_manager.rs @@ -2,6 +2,7 @@ use super::page_alloc::{AllocatorError, PageAllocator}; use crate::kprintln; use crate::mm; use core::mem; +use crate::Architecture; #[derive(Debug, PartialEq)] pub enum PageKind { @@ -55,18 +56,19 @@ pub struct PageManager<'a> { impl<'a> PageManager<'a> { fn count_pages( - regions: impl Iterator, + arch: &impl Architecture, page_size: usize, ) -> usize { - regions - .map(|region| { - (region.starting_address, unsafe { - region.starting_address.add(region.size.unwrap_or(0)) - }) - }) - .map(|(start, end)| (start as usize, end as usize)) - .flat_map(|(start, end)| (start..end).step_by(page_size)) - .count() + let mut count = 0; + + arch.for_all_memory_regions(|regions| { + count = regions + .map(|(start, end)| (start as usize, end as usize)) + .flat_map(|(start, end)| (start..end).step_by(page_size)) + .count(); + }); + + count } fn align_up(addr: usize, alignment: usize) -> usize { @@ -75,12 +77,12 @@ impl<'a> PageManager<'a> { fn phys_addr_to_physical_page( phys_addr: usize, - device_tree: &fdt::Fdt, + arch: &impl Architecture, page_size: usize, ) -> PhysicalPage { let kind = if mm::is_kernel_page(phys_addr) { PageKind::Kernel - } else if mm::is_reserved_page(phys_addr, device_tree) { + } else if mm::is_reserved_page(phys_addr, arch) { PageKind::Reserved } else { PageKind::Unused @@ -94,46 +96,43 @@ impl<'a> PageManager<'a> { } /// Look for `pages_needed` contiguous unused pages, beware of pages that are in use by the - /// kernel. + /// kernel or reserved by opensbi. fn find_contiguous_unused_pages( - device_tree: &fdt::Fdt, + arch: &impl Architecture, pages_needed: usize, page_size: usize, ) -> Option { - let memory = device_tree.memory(); - - let physical_pages = memory - .regions() - .map(|region| { - (region.starting_address, unsafe { - region.starting_address.add(region.size.unwrap_or(0)) - }) - }) - .map(|(start, end)| (start as usize, end as usize)) - .flat_map(|(start, end)| (start..end).step_by(page_size)) - .map(|base| Self::phys_addr_to_physical_page(base, device_tree, page_size)); - - let mut first_page_addr: usize = 0; - let mut consecutive_pages: usize = 0; + let mut found = None; - for page in physical_pages { - if consecutive_pages == 0 { - first_page_addr = page.base; - } + arch.for_all_memory_regions(|regions| { + let physical_pages = regions + .flat_map(|(addr, size)| (addr..addr + size).step_by(page_size)); - if page.is_used() { - consecutive_pages = 0; - continue; - } + let mut first_page_addr: usize = 0; + let mut consecutive_pages: usize = 0; - consecutive_pages += 1; + for page in physical_pages { + if consecutive_pages == 0 { + first_page_addr = page; + } + + if mm::is_kernel_page(page) || mm::is_reserved_page(page, arch) { + consecutive_pages = 0; + continue; + } - if consecutive_pages == pages_needed { - return Some(first_page_addr); + consecutive_pages += 1; + + if consecutive_pages == pages_needed { + found = Some(first_page_addr); + return + } } - } - None + return; + }); + + found } /// TLDR: Initialize a [`PageAllocator`] from the device tree. @@ -143,10 +142,8 @@ impl<'a> PageManager<'a> { /// - Second (in [`Self::find_contiguous_unused_pages`]), look through our pages for a contiguous /// space large enough to hold all our metadata. /// - Lastly store our metadata there, and mark pages as unused or kernel. - pub fn from_device_tree(device_tree: &fdt::Fdt, page_size: usize) -> Self { - let memory_node = device_tree.memory(); - - let page_count = Self::count_pages(memory_node.regions(), page_size); + pub fn from_arch_info(arch: &impl Architecture, page_size: usize) -> Self { + let page_count = Self::count_pages(arch, page_size); let metadata_size = page_count * mem::size_of::(); let pages_needed = Self::align_up(metadata_size, page_size) / page_size; kprintln!("total of {:?} pages", page_count); @@ -154,31 +151,27 @@ impl<'a> PageManager<'a> { kprintln!("pages_needed: {:?}", pages_needed); let metadata_addr = - Self::find_contiguous_unused_pages(device_tree, pages_needed, page_size).unwrap(); + Self::find_contiguous_unused_pages(arch, pages_needed, page_size).unwrap(); kprintln!("metadata_addr: {:X}", metadata_addr); let metadata: &mut [PhysicalPage] = unsafe { core::slice::from_raw_parts_mut(metadata_addr as *mut _, page_count) }; - let physical_pages = memory_node - .regions() - .map(|region| { - (region.starting_address, unsafe { - region.starting_address.add(region.size.unwrap_or(0)) - }) - }) - .map(|(start, end)| (start as usize, end as usize)) - .flat_map(|(start, end)| (start..end).step_by(page_size)) - .map(|base| Self::phys_addr_to_physical_page(base, device_tree, page_size)); - - for (i, page) in physical_pages.enumerate() { - metadata[i] = page; - } + arch.for_all_memory_regions(|regions| { + let physical_pages = regions + .flat_map(|(start, end)| (start..end).step_by(page_size)) + .map(|base| Self::phys_addr_to_physical_page(base, arch, page_size)); - return Self { + for (i, page) in physical_pages.enumerate() { + metadata[i] = page; + } + }); + + + Self { metadata, page_size, - }; + } } pub fn page_size(&self) -> usize {