From cba7073ae53cff2651b042b76d2f7b590c93d4b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Neuh=C3=A4user?= Date: Thu, 19 Mar 2026 08:57:39 +0100 Subject: [PATCH] refactor: organize code --- src/aarch64/mmu.rs | 293 ++++++++-------------------- src/aarch64/mmu/physical_mapping.rs | 89 +++++++++ src/configuration.rs | 46 ++--- src/lib.rs | 5 +- src/main.rs | 6 +- 5 files changed, 197 insertions(+), 242 deletions(-) create mode 100644 src/aarch64/mmu/physical_mapping.rs diff --git a/src/aarch64/mmu.rs b/src/aarch64/mmu.rs index 7f2566d..2bdd297 100644 --- a/src/aarch64/mmu.rs +++ b/src/aarch64/mmu.rs @@ -1,9 +1,12 @@ -use core::panic; - use core::mem::size_of; use nova_error::NovaError; -use crate::get_current_el; +use crate::{ + aarch64::mmu::physical_mapping::{ + reserve_block, reserve_block_explicit, reserve_page, reserve_page_explicit, + }, + get_current_el, +}; unsafe extern "C" { static mut __translation_table_l2_start: u64; @@ -51,6 +54,16 @@ pub const KERNEL_VIRTUAL_MEM_SPACE: usize = 0xFFFF_FF80_0000_0000; pub const STACK_START_ADDR: usize = !KERNEL_VIRTUAL_MEM_SPACE & (!0xF); +mod physical_mapping; + +type VirtAddr = usize; +type PhysAddr = usize; + +pub enum PhysSource { + Any, + Explicit(PhysAddr), +} + #[repr(align(4096))] pub struct PageTable([u64; TABLE_ENTRY_COUNT]); @@ -59,122 +72,85 @@ pub static mut TRANSLATIONTABLE_TTBR0: PageTable = PageTable([0; 512]); #[no_mangle] pub static mut TRANSLATIONTABLE_TTBR1: PageTable = PageTable([0; 512]); -static mut PAGING_BITMAP: [u64; MAX_PAGE_COUNT / 64] = [0; MAX_PAGE_COUNT / 64]; - /// Allocate a memory block of `size` starting at `virtual_address`. pub fn allocate_memory( - mut virtual_address: usize, - mut size: usize, - additional_flags: u64, + virtual_address: usize, + size_bytes: usize, + phys: PhysSource, + flags: u64, ) -> Result<(), NovaError> { if !virtual_address.is_multiple_of(GRANULARITY) { return Err(NovaError::Misalignment); } - - let level1_blocks = size / LEVEL1_BLOCK_SIZE; - size %= LEVEL1_BLOCK_SIZE; - let level2_blocks = size / LEVEL2_BLOCK_SIZE; - size %= LEVEL2_BLOCK_SIZE; - let level3_pages = size / GRANULARITY; - if !size.is_multiple_of(GRANULARITY) { + if !size_bytes.is_multiple_of(GRANULARITY) { return Err(NovaError::InvalidGranularity); } - if level1_blocks > 0 { - todo!("Currently not supported"); - } - let base_table = if virtual_address & KERNEL_VIRTUAL_MEM_SPACE > 0 { core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR1) } else { core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0) }; - for _ in 0..level2_blocks { - alloc_block_l2(virtual_address, base_table, additional_flags)?; - virtual_address += LEVEL2_BLOCK_SIZE; + match phys { + PhysSource::Any => map_range_dynamic(virtual_address, size_bytes, base_table, flags), + PhysSource::Explicit(phys_addr) => { + map_range_explicit(virtual_address, phys_addr, size_bytes, base_table, flags) + } } - for _ in 0..level3_pages { - alloc_page(virtual_address, base_table, additional_flags)?; - virtual_address += GRANULARITY; +} + +fn map_range_explicit( + mut virt: VirtAddr, + mut phys: PhysAddr, + size_bytes: usize, + base: *mut PageTable, + flags: u64, +) -> Result<(), NovaError> { + let mut remaining = size_bytes; + + while virt % LEVEL2_BLOCK_SIZE != 0 { + map_page(virt, phys, base, flags)?; + virt += GRANULARITY; + phys += GRANULARITY; + remaining -= GRANULARITY; + } + + while remaining >= LEVEL2_BLOCK_SIZE { + map_l2_block(virt, phys, base, flags)?; + virt += LEVEL2_BLOCK_SIZE; + phys += LEVEL2_BLOCK_SIZE; + remaining -= LEVEL2_BLOCK_SIZE; + } + + while remaining > 0 { + map_page(virt, phys, base, flags)?; + virt += GRANULARITY; + phys += GRANULARITY; + remaining -= GRANULARITY; } Ok(()) } -/// Allocate a memory block of `size` starting at `virtual_address`, -/// with explicit physical_address. -/// -/// Note: This can be used when mapping predefined regions. -pub fn allocate_memory_explicit( - mut virtual_address: usize, - mut size: usize, - mut physical_address: usize, - additional_flags: u64, +fn map_range_dynamic( + mut virt: PhysAddr, + size_bytes: usize, + base: *mut PageTable, + flags: u64, ) -> Result<(), NovaError> { - if !virtual_address.is_multiple_of(GRANULARITY) { - return Err(NovaError::Misalignment); - } - if !physical_address.is_multiple_of(GRANULARITY) { - return Err(NovaError::Misalignment); + let mut remaining = size_bytes; + + while remaining >= LEVEL2_BLOCK_SIZE { + map_l2_block(virt, reserve_block(), base, flags)?; + virt += LEVEL2_BLOCK_SIZE; + remaining -= LEVEL2_BLOCK_SIZE; } - let level1_blocks = size / LEVEL1_BLOCK_SIZE; - size %= LEVEL1_BLOCK_SIZE; - let mut level2_blocks = size / LEVEL2_BLOCK_SIZE; - size %= LEVEL2_BLOCK_SIZE; - let mut level3_pages = size / GRANULARITY; - if !size.is_multiple_of(GRANULARITY) { - return Err(NovaError::InvalidGranularity); - } - - if level1_blocks > 0 { - todo!("Currently not supported"); - } - - let l2_alignment = (physical_address % LEVEL2_BLOCK_SIZE) / GRANULARITY; - if l2_alignment != 0 { - let l3_diff = LEVEL2_BLOCK_SIZE / GRANULARITY - l2_alignment; - if l3_diff > level3_pages { - level2_blocks -= 1; - level3_pages += TABLE_ENTRY_COUNT; - } - - level3_pages -= l3_diff; - - for _ in 0..l3_diff { - alloc_page_explicit( - virtual_address, - physical_address, - core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0), - additional_flags, - )?; - - virtual_address += GRANULARITY; - physical_address += GRANULARITY; - } - } - - for _ in 0..level2_blocks { - alloc_block_l2_explicit( - virtual_address, - physical_address, - core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0), - additional_flags, - )?; - virtual_address += LEVEL2_BLOCK_SIZE; - physical_address += LEVEL2_BLOCK_SIZE; - } - - for _ in 0..level3_pages { - alloc_page_explicit( - virtual_address, - physical_address, - core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0), - additional_flags, - )?; - virtual_address += GRANULARITY; - physical_address += GRANULARITY; + while remaining > 0 { + map_page(virt, reserve_page(), base, flags)?; + virt += GRANULARITY; + remaining -= GRANULARITY; } Ok(()) @@ -232,20 +208,6 @@ fn map_page( Ok(()) } -// Allocate a level 2 block. -pub fn alloc_block_l2( - virtual_addr: usize, - base_table_ptr: *mut PageTable, - additional_flags: u64, -) -> Result<(), NovaError> { - map_l2_block( - virtual_addr, - reserve_block(), - base_table_ptr, - additional_flags, - ) -} - // Allocate a level 2 block, at a explicit `physical_address`. pub fn alloc_block_l2_explicit( virtual_addr: usize, @@ -290,10 +252,10 @@ pub fn map_l2_block( Ok(()) } -pub fn reserve_range_explicit( - start_physical_address: usize, - end_physical_address: usize, -) -> Result<(), NovaError> { +pub fn reserve_range( + start_physical_address: PhysAddr, + end_physical_address: PhysAddr, +) -> Result { let mut size = end_physical_address - start_physical_address; let l1_blocks = size / LEVEL1_BLOCK_SIZE; size %= LEVEL1_BLOCK_SIZE; @@ -320,57 +282,7 @@ pub fn reserve_range_explicit( addr += GRANULARITY; } - Ok(()) -} - -fn reserve_page() -> usize { - if let Some(address) = find_unallocated_page() { - let page = address / GRANULARITY; - let word_index = page / 64; - unsafe { PAGING_BITMAP[word_index] |= 1 << (page % 64) }; - return address; - } - panic!("Out of Memory!"); -} - -fn reserve_page_explicit(physical_address: usize) -> Result<(), NovaError> { - let page = physical_address / GRANULARITY; - let word_index = page / 64; - - if unsafe { PAGING_BITMAP[word_index] } & (1 << (page % 64)) > 0 { - return Err(NovaError::Paging); - } - - unsafe { PAGING_BITMAP[word_index] |= 1 << (page % 64) }; - Ok(()) -} - -fn reserve_block() -> usize { - if let Some(start) = find_contiguous_free_bitmap_words(L2_BLOCK_BITMAP_WORDS) { - for j in 0..L2_BLOCK_BITMAP_WORDS { - unsafe { PAGING_BITMAP[start + j] = u64::MAX }; - } - return start * 64 * GRANULARITY; - } - - panic!("Out of Memory!"); -} - -fn reserve_block_explicit(physical_address: usize) -> Result<(), NovaError> { - let page = physical_address / GRANULARITY; - for i in 0..L2_BLOCK_BITMAP_WORDS { - unsafe { - if PAGING_BITMAP[(page / 64) + i] != 0 { - return Err(NovaError::Paging); - } - }; - } - for i in 0..L2_BLOCK_BITMAP_WORDS { - unsafe { - PAGING_BITMAP[(page / 64) + i] = u64::MAX; - }; - } - Ok(()) + Ok(start_physical_address) } fn create_block_descriptor_entry(physical_address: usize, additional_flags: u64) -> u64 { @@ -401,18 +313,6 @@ fn virtual_address_to_table_offset(virtual_addr: usize) -> (usize, usize, usize) (l1_off, l2_off, l3_off) } -/// Debugging function to navigate the translation tables. -#[allow(unused_variables)] -pub fn sim_l3_access(addr: usize) { - unsafe { - let entry1 = TRANSLATIONTABLE_TTBR0.0[addr / LEVEL1_BLOCK_SIZE]; - let table2 = &mut *(entry_phys(entry1 as usize) as *mut PageTable); - let entry2 = table2.0[(addr % LEVEL1_BLOCK_SIZE) / LEVEL2_BLOCK_SIZE]; - let table3 = &mut *(entry_phys(entry2 as usize) as *mut PageTable); - let _entry3 = table3.0[(addr % LEVEL2_BLOCK_SIZE) / GRANULARITY]; - } -} - /// Navigate the table tree, by following given offsets. This function /// allocates new tables if required. fn navigate_table( @@ -451,49 +351,14 @@ fn next_table(table_ptr: *mut PageTable, offset: usize) -> Result<*mut PageTable } } -fn find_unallocated_page() -> Option { - for (i, entry) in unsafe { PAGING_BITMAP }.iter().enumerate() { - if *entry != u64::MAX { - for offset in 0..64 { - if entry >> offset & 0b1 == 0 { - return Some((i * 64 + offset) * GRANULARITY); - } - } - } - } - None -} - -fn find_contiguous_free_bitmap_words(required_words: usize) -> Option { - let mut run_start = 0; - let mut run_len = 0; - - for (i, entry) in unsafe { PAGING_BITMAP }.iter().enumerate() { - if *entry == 0 { - if run_len == 0 { - run_start = i; - } - run_len += 1; - - if run_len == required_words { - return Some(run_start); - } - } else { - run_len = 0; - } - } - - None -} - /// Extracts the physical address out of an table entry. #[inline] -fn entry_phys(entry: usize) -> usize { +fn entry_phys(entry: usize) -> PhysAddr { entry & 0x0000_FFFF_FFFF_F000 } #[inline] -fn entry_table_addr(entry: usize) -> usize { +fn entry_table_addr(entry: usize) -> VirtAddr { if get_current_el() == 1 { phys_table_to_kernel_space(entry_phys(entry)) } else { @@ -503,6 +368,6 @@ fn entry_table_addr(entry: usize) -> usize { /// Extracts the physical address out of an table entry. #[inline] -fn phys_table_to_kernel_space(entry: usize) -> usize { +fn phys_table_to_kernel_space(entry: usize) -> VirtAddr { entry | TRANSLATION_TABLE_BASE_ADDR } diff --git a/src/aarch64/mmu/physical_mapping.rs b/src/aarch64/mmu/physical_mapping.rs new file mode 100644 index 0000000..4dba6f0 --- /dev/null +++ b/src/aarch64/mmu/physical_mapping.rs @@ -0,0 +1,89 @@ +use crate::aarch64::mmu::{PhysAddr, GRANULARITY, L2_BLOCK_BITMAP_WORDS, MAX_PAGE_COUNT}; +use nova_error::NovaError; + +static mut PAGING_BITMAP: [u64; MAX_PAGE_COUNT / 64] = [0; MAX_PAGE_COUNT / 64]; + +pub fn reserve_page() -> PhysAddr { + if let Some(address) = find_unallocated_page() { + let page = address / GRANULARITY; + let word_index = page / 64; + unsafe { PAGING_BITMAP[word_index] |= 1 << (page % 64) }; + return address; + } + panic!("Out of Memory!"); +} + +pub fn reserve_page_explicit(physical_address: usize) -> Result { + let page = physical_address / GRANULARITY; + let word_index = page / 64; + + if unsafe { PAGING_BITMAP[word_index] } & (1 << (page % 64)) > 0 { + return Err(NovaError::Paging); + } + + unsafe { PAGING_BITMAP[word_index] |= 1 << (page % 64) }; + Ok(physical_address) +} + +pub fn reserve_block() -> usize { + if let Some(start) = find_contiguous_free_bitmap_words(L2_BLOCK_BITMAP_WORDS) { + for j in 0..L2_BLOCK_BITMAP_WORDS { + unsafe { PAGING_BITMAP[start + j] = u64::MAX }; + } + return start * 64 * GRANULARITY; + } + + panic!("Out of Memory!"); +} + +pub fn reserve_block_explicit(physical_address: usize) -> Result<(), NovaError> { + let page = physical_address / GRANULARITY; + for i in 0..L2_BLOCK_BITMAP_WORDS { + unsafe { + if PAGING_BITMAP[(page / 64) + i] != 0 { + return Err(NovaError::Paging); + } + }; + } + for i in 0..L2_BLOCK_BITMAP_WORDS { + unsafe { + PAGING_BITMAP[(page / 64) + i] = u64::MAX; + }; + } + Ok(()) +} + +fn find_unallocated_page() -> Option { + for (i, entry) in unsafe { PAGING_BITMAP }.iter().enumerate() { + if *entry != u64::MAX { + for offset in 0..64 { + if entry >> offset & 0b1 == 0 { + return Some((i * 64 + offset) * GRANULARITY); + } + } + } + } + None +} + +fn find_contiguous_free_bitmap_words(required_words: usize) -> Option { + let mut run_start = 0; + let mut run_len = 0; + + for (i, entry) in unsafe { PAGING_BITMAP }.iter().enumerate() { + if *entry == 0 { + if run_len == 0 { + run_start = i; + } + run_len += 1; + + if run_len == required_words { + return Some(run_start); + } + } else { + run_len = 0; + } + } + + None +} diff --git a/src/configuration.rs b/src/configuration.rs index 9496034..8f2b250 100644 --- a/src/configuration.rs +++ b/src/configuration.rs @@ -32,20 +32,19 @@ const AS: u64 = 0b1 << 36; // configure an ASID size of 16 bits pub static TCR_EL1_CONF: u64 = IPS | TG0 | TG1 | T0SZ | T1SZ | SH0 | SH1 | AS; pub mod mmu { - use crate::{ aarch64::mmu::{ - alloc_block_l2, alloc_block_l2_explicit, map_l2_block, reserve_range_explicit, + alloc_block_l2_explicit, allocate_memory, map_l2_block, reserve_range, PhysSource, DEVICE_MEM, EL0_ACCESSIBLE, KERNEL_VIRTUAL_MEM_SPACE, LEVEL1_BLOCK_SIZE, LEVEL2_BLOCK_SIZE, NORMAL_MEM, PXN, READ_ONLY, STACK_START_ADDR, - TRANSLATIONTABLE_TTBR0, TRANSLATIONTABLE_TTBR1, UXN, WRITABLE, + TRANSLATIONTABLE_TTBR0, UXN, WRITABLE, }, PERIPHERAL_BASE, }; #[no_mangle] static EL1_STACK_TOP: usize = STACK_START_ADDR | KERNEL_VIRTUAL_MEM_SPACE; - const EL1_STACK_BOTTOM: usize = EL1_STACK_TOP - LEVEL2_BLOCK_SIZE * 2; + const EL1_STACK_SIZE: usize = LEVEL2_BLOCK_SIZE * 2; extern "C" { static _data: u64; @@ -58,53 +57,54 @@ pub mod mmu { let kernel_end = unsafe { &__kernel_end } as *const _ as usize; let user_space_end = unsafe { &_end } as *const _ as usize; - reserve_range_explicit(0x0, user_space_end).unwrap(); + reserve_range(0x0, user_space_end).unwrap(); for addr in (0..shared_segment_end).step_by(LEVEL2_BLOCK_SIZE) { - let _ = map_l2_block( + map_l2_block( addr, addr, core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0), EL0_ACCESSIBLE | READ_ONLY | NORMAL_MEM, - ); + ) + .unwrap(); } for addr in (shared_segment_end..kernel_end).step_by(LEVEL2_BLOCK_SIZE) { - let _ = map_l2_block( + map_l2_block( addr, addr, core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0), WRITABLE | UXN | NORMAL_MEM, - ); + ) + .unwrap(); } for addr in (kernel_end..user_space_end).step_by(LEVEL2_BLOCK_SIZE) { - let _ = map_l2_block( + map_l2_block( addr, addr, core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0), EL0_ACCESSIBLE | WRITABLE | PXN | NORMAL_MEM, - ); + ) + .unwrap(); } for addr in (PERIPHERAL_BASE..LEVEL1_BLOCK_SIZE).step_by(LEVEL2_BLOCK_SIZE) { - let _ = alloc_block_l2_explicit( + alloc_block_l2_explicit( addr, addr, core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0), EL0_ACCESSIBLE | WRITABLE | UXN | PXN | DEVICE_MEM, - ); + ) + .unwrap(); } - for addr in (EL1_STACK_BOTTOM..EL1_STACK_TOP) - .rev() - .step_by(LEVEL2_BLOCK_SIZE) - { - let _ = alloc_block_l2( - addr, - core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR1), - WRITABLE | NORMAL_MEM, - ); - } + allocate_memory( + EL1_STACK_TOP - EL1_STACK_SIZE + 0x10, + EL1_STACK_SIZE, + PhysSource::Any, + WRITABLE | NORMAL_MEM, + ) + .unwrap(); } } diff --git a/src/lib.rs b/src/lib.rs index 057fea7..f5358c7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,7 +14,8 @@ use heap::Heap; use crate::{ aarch64::mmu::{ - allocate_memory, KERNEL_VIRTUAL_MEM_SPACE, LEVEL2_BLOCK_SIZE, NORMAL_MEM, UXN, WRITABLE, + allocate_memory, PhysSource, KERNEL_VIRTUAL_MEM_SPACE, LEVEL2_BLOCK_SIZE, NORMAL_MEM, UXN, + WRITABLE, }, interrupt_handlers::initialize_interrupt_handler, logger::DefaultLogger, @@ -33,7 +34,7 @@ pub unsafe fn init_kernel_heap() { let start = core::ptr::addr_of_mut!(__kernel_end) as usize | KERNEL_VIRTUAL_MEM_SPACE; let size = LEVEL2_BLOCK_SIZE * 2; - allocate_memory(start, size, NORMAL_MEM | UXN | WRITABLE).unwrap(); + allocate_memory(start, size, PhysSource::Any, NORMAL_MEM | UXN | WRITABLE).unwrap(); let heap = core::ptr::addr_of_mut!(GLOBAL_ALLOCATOR); (*heap).init(start, start + size); } diff --git a/src/main.rs b/src/main.rs index 169deb3..25d07b0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -12,7 +12,7 @@ extern crate alloc; use alloc::vec::Vec; use nova::{ aarch64::{ - mmu::{allocate_memory_explicit, EL0_ACCESSIBLE, NORMAL_MEM, PXN, UXN, WRITABLE}, + mmu::{allocate_memory, PhysSource, EL0_ACCESSIBLE, NORMAL_MEM, PXN, UXN, WRITABLE}, registers::{daif, read_id_aa64mmfr0_el1}, }, configuration::mmu::initialize_mmu_translation_tables, @@ -102,10 +102,10 @@ pub extern "C" fn kernel_main() -> ! { // Frame Buffer memory range // TODO: this is just temporary - allocate_memory_explicit( + allocate_memory( 0x3c100000, 1080 * 1920 * 4, - 0x3c100000, + PhysSource::Explicit(0x3c100000), NORMAL_MEM | PXN | UXN | WRITABLE | EL0_ACCESSIBLE, ) .unwrap();