feat: Update MMU configuration and memory allocation functions

This commit is contained in:
2026-03-15 01:17:31 +01:00
parent 34a73f0095
commit e84ce6ab91
7 changed files with 439 additions and 112 deletions

View File

@@ -49,6 +49,7 @@ SECTIONS {
__stack_end_el0 = .;
}
. = ALIGN(2M);
_end = .;
}

View File

@@ -1,9 +1,7 @@
use core::u64::MAX;
use core::{panic, u64::MAX};
use nova_error::NovaError;
use crate::{println, PERIPHERAL_BASE};
unsafe extern "C" {
static mut __translation_table_l2_start: u64;
static __stack_start_el0: u64;
@@ -14,26 +12,30 @@ unsafe extern "C" {
const BLOCK: u64 = 0b01;
const TABLE: u64 = 0b11;
const EL0_ACCESSIBLE: u64 = 1 << 6;
pub const EL0_ACCESSIBLE: u64 = 1 << 6;
const WRITABLE: u64 = 0 << 7;
const READ_ONLY: u64 = 1 << 7;
pub const WRITABLE: u64 = 0 << 7;
pub const READ_ONLY: u64 = 1 << 7;
const ACCESS_FLAG: u64 = 1 << 10;
const INNER_SHAREABILITY: u64 = 0b11 << 8;
const NORMAL_MEM: u64 = 0 << 2;
const DEVICE_MEM: u64 = 1 << 2;
pub const NORMAL_MEM: u64 = 0 << 2;
pub const DEVICE_MEM: u64 = 1 << 2;
/// Disallow EL1 Execution.
const PXN: u64 = 1 << 53;
pub const PXN: u64 = 1 << 53;
/// Disallow EL0 Execution.
const UXN: u64 = 1 << 54;
pub const UXN: u64 = 1 << 54;
const GRANULARITY: usize = 4 * 1024;
pub const GRANULARITY: usize = 4 * 1024;
const TABLE_ENTRY_COUNT: usize = GRANULARITY / size_of::<u64>(); // 2MiB
const LEVEL2_BLOCK_SIZE: usize = TABLE_ENTRY_COUNT * GRANULARITY;
pub const LEVEL1_BLOCK_SIZE: usize = TABLE_ENTRY_COUNT * TABLE_ENTRY_COUNT * GRANULARITY;
pub const LEVEL2_BLOCK_SIZE: usize = TABLE_ENTRY_COUNT * GRANULARITY;
const L2_BLOCK_BITMAP_WORDS: usize = LEVEL2_BLOCK_SIZE / (64 * GRANULARITY);
const MAX_PAGE_COUNT: usize = 1 * 1024 * 1024 * 1024 / GRANULARITY;
#[repr(align(4096))]
@@ -41,121 +43,365 @@ pub struct PageTable([u64; TABLE_ENTRY_COUNT]);
#[no_mangle]
pub static mut TRANSLATIONTABLE_TTBR0: PageTable = PageTable([0; 512]);
pub static mut TRANSLATIONTABLE_TTBR0_L2_0: PageTable = PageTable([0; 512]);
static mut PAGING_BITMAP: [u64; MAX_PAGE_COUNT / 64] = [0; MAX_PAGE_COUNT / 64];
pub fn init_translation_table() {
unsafe {
TRANSLATIONTABLE_TTBR0.0[0] =
table_descriptor_entry(&raw mut TRANSLATIONTABLE_TTBR0_L2_0 as usize);
println!("{}", &raw mut TRANSLATIONTABLE_TTBR0_L2_0 as u64);
println!("{}", TRANSLATIONTABLE_TTBR0.0[0] & 0x0000_FFFF_FFFF_F000);
for i in 0..512 {
let addr = 0x0 + (i * LEVEL2_BLOCK_SIZE);
if addr < &_data as *const _ as usize {
let _ = alloc_block_l2(
addr,
&TRANSLATIONTABLE_TTBR0,
EL0_ACCESSIBLE | READ_ONLY | NORMAL_MEM,
);
} else if addr < &__kernel_end as *const _ as usize {
let _ = alloc_block_l2(addr, &TRANSLATIONTABLE_TTBR0, WRITABLE | UXN | NORMAL_MEM);
} else if addr < PERIPHERAL_BASE {
let _ = alloc_block_l2(
addr,
&TRANSLATIONTABLE_TTBR0,
EL0_ACCESSIBLE | WRITABLE | PXN | NORMAL_MEM,
);
} else {
let _ = alloc_block_l2(
addr,
&TRANSLATIONTABLE_TTBR0,
EL0_ACCESSIBLE | WRITABLE | UXN | PXN | DEVICE_MEM,
);
};
}
println!("Done");
}
}
pub fn alloc_page() -> Result<usize, NovaError> {
find_unallocated_page()
}
fn find_unallocated_page() -> Result<usize, NovaError> {
for (i, entry) in unsafe { PAGING_BITMAP }.iter().enumerate() {
if *entry != u64::MAX {
for offset in 0..64 {
if entry >> offset & 0b1 == 0 {
return Ok((i * 64 + offset) * GRANULARITY);
}
}
}
}
Err(NovaError::Paging)
}
pub fn alloc_block_l2(
virtual_addr: usize,
base_table: &PageTable,
pub fn allocate_memory(
mut virtual_address: usize,
mut size: usize,
additional_flags: u64,
) -> Result<(), NovaError> {
let physical_address = find_unallocated_block_l2()?;
if virtual_address % GRANULARITY != 0 {
return Err(NovaError::Misalignment);
}
let l2_off = virtual_addr / GRANULARITY / TABLE_ENTRY_COUNT;
let l1_off = l2_off / TABLE_ENTRY_COUNT;
let level1_blocks = size / LEVEL1_BLOCK_SIZE;
size %= LEVEL1_BLOCK_SIZE;
let level2_blocks = size / LEVEL2_BLOCK_SIZE;
size %= LEVEL2_BLOCK_SIZE;
let level3_pages = size / GRANULARITY;
if size % GRANULARITY != 0 {
return Err(NovaError::InvalidGranularity);
}
let l2_table =
unsafe { &mut *((base_table.0[l1_off] & 0x0000_FFFF_FFFF_F000) as *mut PageTable) };
if level1_blocks > 0 {
todo!("Currently not supported");
}
let new_entry = create_block_descriptor_entry(physical_address, additional_flags);
for _ in 0..level2_blocks {
unsafe {
alloc_block_l2(
virtual_address,
&mut TRANSLATIONTABLE_TTBR0,
additional_flags,
)?;
}
virtual_address += LEVEL2_BLOCK_SIZE;
}
for _ in 0..level3_pages {
unsafe {
alloc_page(
virtual_address,
&mut TRANSLATIONTABLE_TTBR0,
additional_flags,
)?;
}
virtual_address += GRANULARITY;
}
l2_table.0[l2_off] = new_entry;
Ok(())
}
pub fn allocate_memory_explicit(
mut virtual_address: usize,
mut size: usize,
mut physical_address: usize,
additional_flags: u64,
) -> Result<(), NovaError> {
if virtual_address % GRANULARITY != 0 {
return Err(NovaError::Misalignment);
}
allocate_block_l2(physical_address);
let level1_blocks = size / LEVEL1_BLOCK_SIZE;
size %= LEVEL1_BLOCK_SIZE;
let level2_blocks = size / LEVEL2_BLOCK_SIZE;
size %= LEVEL2_BLOCK_SIZE;
let level3_pages = size / GRANULARITY;
if size % GRANULARITY != 0 {
return Err(NovaError::InvalidGranularity);
}
if level1_blocks > 0 {
todo!("Currently not supported");
}
for _ in 0..level2_blocks {
unsafe {
alloc_block_l2_explicit(
virtual_address,
physical_address,
&mut TRANSLATIONTABLE_TTBR0,
additional_flags,
)?;
}
virtual_address += LEVEL2_BLOCK_SIZE;
physical_address += LEVEL2_BLOCK_SIZE;
}
for _ in 0..level3_pages {
unsafe {
alloc_page_explicit(
virtual_address,
physical_address,
&mut TRANSLATIONTABLE_TTBR0,
additional_flags,
)?;
}
virtual_address += GRANULARITY;
physical_address += GRANULARITY;
}
Ok(())
}
fn find_unallocated_block_l2() -> Result<usize, NovaError> {
let mut count = 0;
for (i, entry) in unsafe { PAGING_BITMAP }.iter().enumerate() {
if *entry == 0 {
count += 1;
} else {
count = 0;
pub fn alloc_page(
virtual_address: usize,
base_table: &mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
map_page(
virtual_address,
reserve_page(),
base_table,
additional_flags,
)
}
if count == 8 {
return Ok((i - 7) * 64 * GRANULARITY);
}
}
Err(NovaError::Paging)
pub fn alloc_page_explicit(
virtual_address: usize,
physical_address: usize,
base_table: &mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
reserve_page_explicit(physical_address)?;
map_page(
virtual_address,
physical_address,
base_table,
additional_flags,
)
}
fn allocate_block_l2(physical_address: usize) {
fn map_page(
virtual_address: usize,
physical_address: usize,
base_table: &mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
let (l1_off, l2_off, l3_off) = virtual_address_to_table_offset(virtual_address);
let table = navigate_table(base_table, [l1_off, l2_off, 0], 2)?;
if table.0[l3_off] & 0b11 > 0 {
return Err(NovaError::Paging);
}
table.0[l3_off] = create_block_descriptor_entry(physical_address, additional_flags);
Ok(())
}
pub fn alloc_block_l2(
virtual_addr: usize,
base_table: &mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
map_l2_block(virtual_addr, reserve_block(), base_table, additional_flags)
}
pub fn alloc_block_l2_explicit(
virtual_addr: usize,
physical_address: usize,
base_table: &mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
reserve_block_explicit(physical_address)?;
map_l2_block(virtual_addr, physical_address, base_table, additional_flags)
}
pub fn map_l2_block(
virtual_addr: usize,
physical_address: usize,
base_table: &mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
let (l1_off, l2_off, _) = virtual_address_to_table_offset(virtual_addr);
let table = navigate_table(base_table, [l1_off, 0, 0], 1)?;
// Verify virtual address is available.
if table.0[l2_off] & 0b11 != 0 {
return Err(NovaError::Paging);
}
let new_entry = create_block_descriptor_entry(physical_address, additional_flags);
table.0[l2_off] = new_entry;
Ok(())
}
pub fn reserve_range_explicit(
start_physical_address: usize,
end_physical_address: usize,
) -> Result<(), NovaError> {
let mut size = end_physical_address - start_physical_address;
let l1_blocks = size / LEVEL1_BLOCK_SIZE;
size %= LEVEL1_BLOCK_SIZE;
let l2_blocks = size / LEVEL2_BLOCK_SIZE;
size %= LEVEL2_BLOCK_SIZE;
let l3_pages = size / GRANULARITY;
if size % GRANULARITY != 0 {
return Err(NovaError::Misalignment);
}
if l1_blocks > 0 {
todo!();
}
let mut addr = start_physical_address;
for _ in 0..l2_blocks {
reserve_block_explicit(addr)?;
addr += LEVEL2_BLOCK_SIZE;
}
for _ in 0..l3_pages {
reserve_page_explicit(addr)?;
addr += GRANULARITY;
}
Ok(())
}
fn reserve_page() -> usize {
if let Some(address) = find_unallocated_page() {
let page = address / GRANULARITY;
let word_index = page / 64;
unsafe { PAGING_BITMAP[word_index] |= 1 << (page % 64) };
return address;
}
panic!("Out of Memory!");
}
fn reserve_page_explicit(physical_address: usize) -> Result<(), NovaError> {
let page = physical_address / GRANULARITY;
for i in 0..8 {
unsafe { PAGING_BITMAP[(page / 64) + i] = MAX };
}
let word_index = page / 64;
if unsafe { PAGING_BITMAP[word_index] } & (1 << (page % 64)) > 0 {
return Err(NovaError::Paging);
}
fn create_block_descriptor_entry(addr: usize, additional_flags: u64) -> u64 {
let pxn = 0 << 53; // Privileged execute never
let uxn = 0 << 54; // Unprivileged execute never
unsafe { PAGING_BITMAP[word_index] |= 1 << (page % 64) };
Ok(())
}
(addr as u64 & 0x0000_FFFF_FFE0_0000)
fn reserve_block() -> usize {
if let Some(start) = find_contiguous_free_bitmap_words(L2_BLOCK_BITMAP_WORDS) {
for j in 0..L2_BLOCK_BITMAP_WORDS {
unsafe { PAGING_BITMAP[start + j] = MAX };
}
return start * 64 * GRANULARITY;
}
panic!("Out of Memory!");
}
fn reserve_block_explicit(physical_address: usize) -> Result<(), NovaError> {
let page = physical_address / GRANULARITY;
for i in 0..L2_BLOCK_BITMAP_WORDS {
unsafe {
if PAGING_BITMAP[(page / 64) + i] != 0 {
return Err(NovaError::Paging);
}
};
}
for i in 0..L2_BLOCK_BITMAP_WORDS {
unsafe {
PAGING_BITMAP[(page / 64) + i] = MAX;
};
}
Ok(())
}
fn create_block_descriptor_entry(physical_address: usize, additional_flags: u64) -> u64 {
(physical_address as u64 & 0x0000_FFFF_FFE0_0000)
| BLOCK
| ACCESS_FLAG
| pxn
| uxn
| INNER_SHAREABILITY
| additional_flags
}
pub fn table_descriptor_entry(addr: usize) -> u64 {
fn create_table_descriptor_entry(addr: usize) -> u64 {
0 | (addr as u64 & 0x0000_FFFF_FFFF_F000) | TABLE
}
fn virtual_address_to_table_offset(virtual_addr: usize) -> (usize, usize, usize) {
let absolute_page_off = virtual_addr / GRANULARITY;
let l3_off = absolute_page_off % TABLE_ENTRY_COUNT;
let l2_off = (absolute_page_off / TABLE_ENTRY_COUNT) % TABLE_ENTRY_COUNT;
let l1_off = (absolute_page_off / TABLE_ENTRY_COUNT / TABLE_ENTRY_COUNT) % TABLE_ENTRY_COUNT;
(l1_off, l2_off, l3_off)
}
fn navigate_table(
initial_table: &mut PageTable,
offsets: [usize; 3],
offsets_size: usize,
) -> Result<&mut PageTable, NovaError> {
let root_table_ptr = initial_table as *mut PageTable;
let mut table = initial_table;
for i in 0..offsets_size {
let offset = offsets[i];
match table.0[offset] & 0b11 {
0 => {
let new_table_addr = reserve_page();
table.0[offset] = create_table_descriptor_entry(new_table_addr);
table =
unsafe { &mut *(get_table_entry_address(table.0[offset]) as *mut PageTable) };
map_page(
new_table_addr,
new_table_addr,
unsafe { &mut *root_table_ptr },
NORMAL_MEM | WRITABLE | PXN | UXN,
)?;
}
1 => return Err(NovaError::Paging),
3 => {
table =
unsafe { &mut *(get_table_entry_address(table.0[offset]) as *mut PageTable) }
}
_ => panic!(),
};
}
Ok(table)
}
fn find_unallocated_page() -> Option<usize> {
for (i, entry) in unsafe { PAGING_BITMAP }.iter().enumerate() {
if *entry != u64::MAX {
for offset in 0..64 {
if entry >> offset & 0b1 == 0 {
return Some((i * 64 + offset) * GRANULARITY);
}
}
}
}
None
}
fn find_contiguous_free_bitmap_words(required_words: usize) -> Option<usize> {
let mut run_start = 0;
let mut run_len = 0;
for (i, entry) in unsafe { PAGING_BITMAP }.iter().enumerate() {
if *entry == 0 {
if run_len == 0 {
run_start = i;
}
run_len += 1;
if run_len == required_words {
return Some(run_start);
}
} else {
run_len = 0;
}
}
None
}
#[inline]
fn get_table_entry_address(entry: u64) -> u64 {
entry & 0x0000_FFFF_FFFF_F000
}

View File

@@ -31,3 +31,64 @@ const AS: u64 = 0b1 << 36; // configure an ASID size of 16 bits
#[no_mangle]
pub static TCR_EL1_CONF: u64 = IPS | TG0 | TG1 | T0SZ | T1SZ | SH0 | SH1 | EPD1 | AS;
pub mod mmu {
use crate::{
aarch64::mmu::{
alloc_block_l2_explicit, map_l2_block, reserve_range_explicit, DEVICE_MEM,
EL0_ACCESSIBLE, LEVEL1_BLOCK_SIZE, LEVEL2_BLOCK_SIZE, NORMAL_MEM, PXN, READ_ONLY,
TRANSLATIONTABLE_TTBR0, UXN, WRITABLE,
},
PERIPHERAL_BASE,
};
extern "C" {
static _data: u64;
static _end: u64;
static __kernel_end: u64;
}
pub fn initialize_mmu_translation_tables() {
let shared_segment_end = unsafe { &_data } as *const _ as usize;
let kernel_end = unsafe { &__kernel_end } as *const _ as usize;
let user_space_end = unsafe { &_end } as *const _ as usize;
reserve_range_explicit(0x0, user_space_end).unwrap();
for addr in (0..shared_segment_end).step_by(LEVEL2_BLOCK_SIZE) {
let _ = map_l2_block(
addr,
addr,
unsafe { &mut TRANSLATIONTABLE_TTBR0 },
EL0_ACCESSIBLE | READ_ONLY | NORMAL_MEM,
);
}
for addr in (shared_segment_end..kernel_end).step_by(LEVEL2_BLOCK_SIZE) {
let _ = map_l2_block(
addr,
addr,
unsafe { &mut TRANSLATIONTABLE_TTBR0 },
WRITABLE | UXN | NORMAL_MEM,
);
}
for addr in (kernel_end..user_space_end).step_by(LEVEL2_BLOCK_SIZE) {
let _ = map_l2_block(
addr,
addr,
unsafe { &mut TRANSLATIONTABLE_TTBR0 },
EL0_ACCESSIBLE | WRITABLE | PXN | NORMAL_MEM,
);
}
for addr in (PERIPHERAL_BASE..LEVEL1_BLOCK_SIZE).step_by(LEVEL2_BLOCK_SIZE) {
let _ = alloc_block_l2_explicit(
addr,
addr,
unsafe { &mut TRANSLATIONTABLE_TTBR0 },
EL0_ACCESSIBLE | WRITABLE | UXN | PXN | DEVICE_MEM,
);
}
}
}

View File

@@ -24,8 +24,8 @@ pub struct FrameBuffer {
pixel_depth: u32, // Bits per pixel
pitch: u32, // Pixel per row
rows: u32, // Rows
start_addr: *mut u32,
size: u32, //Bytes
pub start_addr: *mut u32,
pub size: u32, //Bytes
}
pub const RED: u32 = 0x00FF0000;
@@ -37,8 +37,11 @@ pub const YELLOW: u32 = 0x00FFFF00;
impl FrameBuffer {
pub fn draw_pixel(&self, x: u32, y: u32, color: u32) {
let offset = x + y * self.pitch;
if x >= self.pitch || y >= self.rows {
return;
}
unsafe {
write_volatile(self.start_addr.add(offset as usize), color);
write_volatile(self.start_addr.byte_add(4 * offset as usize), color);
}
}

View File

@@ -35,7 +35,7 @@ pub unsafe fn init_heap() {
#[panic_handler]
fn panic(_panic: &PanicInfo) -> ! {
loop {
println!("Panic");
println!("Panic: {}", _panic.message());
}
}

View File

@@ -10,12 +10,15 @@ use core::{
extern crate alloc;
use alloc::boxed::Box;
use nova::{
aarch64::{
mmu::init_translation_table,
mmu::{
allocate_memory_explicit, EL0_ACCESSIBLE, LEVEL2_BLOCK_SIZE, NORMAL_MEM, PXN, UXN,
WRITABLE,
},
registers::{daif, read_id_aa64mmfr0_el1},
},
configuration::mmu::initialize_mmu_translation_tables,
framebuffer::{FrameBuffer, BLUE, GREEN, RED},
get_current_el, init_heap,
interrupt_handlers::{enable_irq_source, IRQSource},
@@ -67,7 +70,18 @@ pub extern "C" fn main() -> ! {
unsafe {
init_heap();
init_translation_table();
initialize_mmu_translation_tables();
// Frame Buffer memory range
// TODO: this is just temporary
// TODO: Investigate why the size is off
allocate_memory_explicit(
0x3c100000,
1080 * 1920 * 4 + LEVEL2_BLOCK_SIZE + LEVEL2_BLOCK_SIZE,
0x3c100000,
NORMAL_MEM | PXN | UXN | WRITABLE | EL0_ACCESSIBLE,
)
.unwrap();
configure_mmu_el1();
};
@@ -117,22 +131,22 @@ pub extern "C" fn el0() -> ! {
let fb = FrameBuffer::default();
for i in 0..1080 {
fb.draw_pixel(50, i, RED);
}
fb.draw_square(500, 500, 600, 700, RED);
fb.draw_square_fill(800, 800, 900, 900, GREEN);
fb.draw_square_fill(1000, 800, 1200, 700, BLUE);
fb.draw_square_fill(900, 100, 800, 150, RED | BLUE);
fb.draw_string("Hello World! :D\nTest next Line", 500, 5, 3, BLUE);
fb.draw_function(cos, 100, 101, RED);
fb.draw_function(cos, 0, 101, RED);
loop {
let temp = mailbox::read_soc_temp([0]).unwrap();
println!("{} °C", temp[1] / 1000);
blink_gpio(SpecificGpio::OnboardLed as u8, 500);
let b = Box::new([1, 2, 3, 4]);
println!("{:?}", b);
}
}

View File

@@ -8,5 +8,7 @@ pub enum NovaError {
Mailbox,
HeapFull,
EmptyHeapSegmentNotAllowed,
Misalignment,
InvalidGranularity,
Paging,
}