Compare commits

...

3 Commits

Author SHA1 Message Date
bc9091f942 fix: linter 2026-03-15 14:17:23 +01:00
dfb4d094a3 docs: add code documentation 2026-03-15 13:18:12 +01:00
95a5037b91 fix: Level 3 translation fault 2026-03-15 12:27:54 +01:00
7 changed files with 196 additions and 118 deletions

View File

@@ -1,4 +1,4 @@
use core::{panic, u64::MAX}; use core::panic;
use nova_error::NovaError; use nova_error::NovaError;
@@ -11,10 +11,14 @@ unsafe extern "C" {
const BLOCK: u64 = 0b01; const BLOCK: u64 = 0b01;
const TABLE: u64 = 0b11; const TABLE: u64 = 0b11;
const PAGE: u64 = 0b11;
/// Allow EL0 to access this section
pub const EL0_ACCESSIBLE: u64 = 1 << 6; pub const EL0_ACCESSIBLE: u64 = 1 << 6;
/// Allow a page or block to be written.
pub const WRITABLE: u64 = 0 << 7; pub const WRITABLE: u64 = 0 << 7;
/// Disallow a page or block to be written.
pub const READ_ONLY: u64 = 1 << 7; pub const READ_ONLY: u64 = 1 << 7;
const ACCESS_FLAG: u64 = 1 << 10; const ACCESS_FLAG: u64 = 1 << 10;
@@ -37,7 +41,7 @@ pub const LEVEL2_BLOCK_SIZE: usize = TABLE_ENTRY_COUNT * GRANULARITY;
const L2_BLOCK_BITMAP_WORDS: usize = LEVEL2_BLOCK_SIZE / (64 * GRANULARITY); const L2_BLOCK_BITMAP_WORDS: usize = LEVEL2_BLOCK_SIZE / (64 * GRANULARITY);
const MAX_PAGE_COUNT: usize = 1 * 1024 * 1024 * 1024 / GRANULARITY; const MAX_PAGE_COUNT: usize = 1024 * 1024 * 1024 / GRANULARITY;
#[repr(align(4096))] #[repr(align(4096))]
pub struct PageTable([u64; TABLE_ENTRY_COUNT]); pub struct PageTable([u64; TABLE_ENTRY_COUNT]);
@@ -46,12 +50,13 @@ pub static mut TRANSLATIONTABLE_TTBR0: PageTable = PageTable([0; 512]);
static mut PAGING_BITMAP: [u64; MAX_PAGE_COUNT / 64] = [0; MAX_PAGE_COUNT / 64]; static mut PAGING_BITMAP: [u64; MAX_PAGE_COUNT / 64] = [0; MAX_PAGE_COUNT / 64];
/// Allocate a memory block of `size` starting at `virtual_address`.
pub fn allocate_memory( pub fn allocate_memory(
mut virtual_address: usize, mut virtual_address: usize,
mut size: usize, mut size: usize,
additional_flags: u64, additional_flags: u64,
) -> Result<(), NovaError> { ) -> Result<(), NovaError> {
if virtual_address % GRANULARITY != 0 { if !virtual_address.is_multiple_of(GRANULARITY) {
return Err(NovaError::Misalignment); return Err(NovaError::Misalignment);
} }
@@ -60,7 +65,7 @@ pub fn allocate_memory(
let level2_blocks = size / LEVEL2_BLOCK_SIZE; let level2_blocks = size / LEVEL2_BLOCK_SIZE;
size %= LEVEL2_BLOCK_SIZE; size %= LEVEL2_BLOCK_SIZE;
let level3_pages = size / GRANULARITY; let level3_pages = size / GRANULARITY;
if size % GRANULARITY != 0 { if !size.is_multiple_of(GRANULARITY) {
return Err(NovaError::InvalidGranularity); return Err(NovaError::InvalidGranularity);
} }
@@ -69,44 +74,45 @@ pub fn allocate_memory(
} }
for _ in 0..level2_blocks { for _ in 0..level2_blocks {
unsafe {
alloc_block_l2( alloc_block_l2(
virtual_address, virtual_address,
&mut TRANSLATIONTABLE_TTBR0, core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
additional_flags, additional_flags,
)?; )?;
}
virtual_address += LEVEL2_BLOCK_SIZE; virtual_address += LEVEL2_BLOCK_SIZE;
} }
for _ in 0..level3_pages { for _ in 0..level3_pages {
unsafe {
alloc_page( alloc_page(
virtual_address, virtual_address,
&mut TRANSLATIONTABLE_TTBR0, core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
additional_flags, additional_flags,
)?; )?;
}
virtual_address += GRANULARITY; virtual_address += GRANULARITY;
} }
Ok(()) Ok(())
} }
/// Allocate a memory block of `size` starting at `virtual_address`,
/// with explicit physical_address.
///
/// Note: This can be used when mapping predefined regions.
pub fn allocate_memory_explicit( pub fn allocate_memory_explicit(
mut virtual_address: usize, mut virtual_address: usize,
mut size: usize, mut size: usize,
mut physical_address: usize, mut physical_address: usize,
additional_flags: u64, additional_flags: u64,
) -> Result<(), NovaError> { ) -> Result<(), NovaError> {
if virtual_address % GRANULARITY != 0 { if !virtual_address.is_multiple_of(GRANULARITY) {
return Err(NovaError::Misalignment); return Err(NovaError::Misalignment);
} }
let level1_blocks = size / LEVEL1_BLOCK_SIZE; let level1_blocks = size / LEVEL1_BLOCK_SIZE;
size %= LEVEL1_BLOCK_SIZE; size %= LEVEL1_BLOCK_SIZE;
let level2_blocks = size / LEVEL2_BLOCK_SIZE; let mut level2_blocks = size / LEVEL2_BLOCK_SIZE;
size %= LEVEL2_BLOCK_SIZE; size %= LEVEL2_BLOCK_SIZE;
let level3_pages = size / GRANULARITY; let mut level3_pages = size / GRANULARITY;
if size % GRANULARITY != 0 { if !size.is_multiple_of(GRANULARITY) {
return Err(NovaError::InvalidGranularity); return Err(NovaError::InvalidGranularity);
} }
@@ -114,27 +120,46 @@ pub fn allocate_memory_explicit(
todo!("Currently not supported"); todo!("Currently not supported");
} }
let l2_alignment = (physical_address % LEVEL2_BLOCK_SIZE) / GRANULARITY;
if l2_alignment != 0 {
let l3_diff = LEVEL2_BLOCK_SIZE / GRANULARITY - l2_alignment;
if l3_diff > level3_pages {
level2_blocks -= 1;
level3_pages += TABLE_ENTRY_COUNT;
}
level3_pages -= l3_diff;
for _ in 0..l3_diff {
alloc_page_explicit(
virtual_address,
physical_address,
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
additional_flags,
)?;
virtual_address += GRANULARITY;
physical_address += GRANULARITY;
}
}
for _ in 0..level2_blocks { for _ in 0..level2_blocks {
unsafe {
alloc_block_l2_explicit( alloc_block_l2_explicit(
virtual_address, virtual_address,
physical_address, physical_address,
&mut TRANSLATIONTABLE_TTBR0, core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
additional_flags, additional_flags,
)?; )?;
}
virtual_address += LEVEL2_BLOCK_SIZE; virtual_address += LEVEL2_BLOCK_SIZE;
physical_address += LEVEL2_BLOCK_SIZE; physical_address += LEVEL2_BLOCK_SIZE;
} }
for _ in 0..level3_pages { for _ in 0..level3_pages {
unsafe {
alloc_page_explicit( alloc_page_explicit(
virtual_address, virtual_address,
physical_address, physical_address,
&mut TRANSLATIONTABLE_TTBR0, core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
additional_flags, additional_flags,
)?; )?;
}
virtual_address += GRANULARITY; virtual_address += GRANULARITY;
physical_address += GRANULARITY; physical_address += GRANULARITY;
} }
@@ -142,9 +167,10 @@ pub fn allocate_memory_explicit(
Ok(()) Ok(())
} }
/// Allocate a singe page.
pub fn alloc_page( pub fn alloc_page(
virtual_address: usize, virtual_address: usize,
base_table: &mut PageTable, base_table: *mut PageTable,
additional_flags: u64, additional_flags: u64,
) -> Result<(), NovaError> { ) -> Result<(), NovaError> {
map_page( map_page(
@@ -155,10 +181,11 @@ pub fn alloc_page(
) )
} }
/// Allocate a single page at an explicit `physical_address`.
pub fn alloc_page_explicit( pub fn alloc_page_explicit(
virtual_address: usize, virtual_address: usize,
physical_address: usize, physical_address: usize,
base_table: &mut PageTable, base_table: *mut PageTable,
additional_flags: u64, additional_flags: u64,
) -> Result<(), NovaError> { ) -> Result<(), NovaError> {
reserve_page_explicit(physical_address)?; reserve_page_explicit(physical_address)?;
@@ -173,49 +200,70 @@ pub fn alloc_page_explicit(
fn map_page( fn map_page(
virtual_address: usize, virtual_address: usize,
physical_address: usize, physical_address: usize,
base_table: &mut PageTable, base_table_ptr: *mut PageTable,
additional_flags: u64, additional_flags: u64,
) -> Result<(), NovaError> { ) -> Result<(), NovaError> {
let (l1_off, l2_off, l3_off) = virtual_address_to_table_offset(virtual_address); let (l1_off, l2_off, l3_off) = virtual_address_to_table_offset(virtual_address);
let table = navigate_table(base_table, [l1_off, l2_off, 0], 2)?; let offsets = [l1_off, l2_off];
let table_ptr = navigate_table(base_table_ptr, &offsets)?;
let table = unsafe { &mut *table_ptr };
if table.0[l3_off] & 0b11 > 0 { if table.0[l3_off] & 0b11 > 0 {
return Err(NovaError::Paging); return Err(NovaError::Paging);
} }
table.0[l3_off] = create_block_descriptor_entry(physical_address, additional_flags); table.0[l3_off] = create_page_descriptor_entry(physical_address, additional_flags);
Ok(()) Ok(())
} }
// Allocate a level 2 block.
pub fn alloc_block_l2( pub fn alloc_block_l2(
virtual_addr: usize, virtual_addr: usize,
base_table: &mut PageTable, base_table_ptr: *mut PageTable,
additional_flags: u64, additional_flags: u64,
) -> Result<(), NovaError> { ) -> Result<(), NovaError> {
map_l2_block(virtual_addr, reserve_block(), base_table, additional_flags) map_l2_block(
virtual_addr,
reserve_block(),
base_table_ptr,
additional_flags,
)
} }
// Allocate a level 2 block, at a explicit `physical_address`.
pub fn alloc_block_l2_explicit( pub fn alloc_block_l2_explicit(
virtual_addr: usize, virtual_addr: usize,
physical_address: usize, physical_address: usize,
base_table: &mut PageTable, base_table_ptr: *mut PageTable,
additional_flags: u64, additional_flags: u64,
) -> Result<(), NovaError> { ) -> Result<(), NovaError> {
if !physical_address.is_multiple_of(LEVEL2_BLOCK_SIZE) {
return Err(NovaError::Misalignment);
}
reserve_block_explicit(physical_address)?; reserve_block_explicit(physical_address)?;
map_l2_block(virtual_addr, physical_address, base_table, additional_flags) map_l2_block(
virtual_addr,
physical_address,
base_table_ptr,
additional_flags,
)
} }
pub fn map_l2_block( pub fn map_l2_block(
virtual_addr: usize, virtual_addr: usize,
physical_address: usize, physical_address: usize,
base_table: &mut PageTable, base_table_ptr: *mut PageTable,
additional_flags: u64, additional_flags: u64,
) -> Result<(), NovaError> { ) -> Result<(), NovaError> {
let (l1_off, l2_off, _) = virtual_address_to_table_offset(virtual_addr); let (l1_off, l2_off, _) = virtual_address_to_table_offset(virtual_addr);
let offsets = [l1_off];
let table_ptr = navigate_table(base_table_ptr, &offsets)?;
let table = navigate_table(base_table, [l1_off, 0, 0], 1)?; let table = unsafe { &mut *table_ptr };
// Verify virtual address is available. // Verify virtual address is available.
if table.0[l2_off] & 0b11 != 0 { if table.0[l2_off] & 0b11 != 0 {
@@ -228,6 +276,7 @@ pub fn map_l2_block(
Ok(()) Ok(())
} }
pub fn reserve_range_explicit( pub fn reserve_range_explicit(
start_physical_address: usize, start_physical_address: usize,
end_physical_address: usize, end_physical_address: usize,
@@ -239,7 +288,7 @@ pub fn reserve_range_explicit(
size %= LEVEL2_BLOCK_SIZE; size %= LEVEL2_BLOCK_SIZE;
let l3_pages = size / GRANULARITY; let l3_pages = size / GRANULARITY;
if size % GRANULARITY != 0 { if !size.is_multiple_of(GRANULARITY) {
return Err(NovaError::Misalignment); return Err(NovaError::Misalignment);
} }
@@ -286,7 +335,7 @@ fn reserve_page_explicit(physical_address: usize) -> Result<(), NovaError> {
fn reserve_block() -> usize { fn reserve_block() -> usize {
if let Some(start) = find_contiguous_free_bitmap_words(L2_BLOCK_BITMAP_WORDS) { if let Some(start) = find_contiguous_free_bitmap_words(L2_BLOCK_BITMAP_WORDS) {
for j in 0..L2_BLOCK_BITMAP_WORDS { for j in 0..L2_BLOCK_BITMAP_WORDS {
unsafe { PAGING_BITMAP[start + j] = MAX }; unsafe { PAGING_BITMAP[start + j] = u64::MAX };
} }
return start * 64 * GRANULARITY; return start * 64 * GRANULARITY;
} }
@@ -305,22 +354,30 @@ fn reserve_block_explicit(physical_address: usize) -> Result<(), NovaError> {
} }
for i in 0..L2_BLOCK_BITMAP_WORDS { for i in 0..L2_BLOCK_BITMAP_WORDS {
unsafe { unsafe {
PAGING_BITMAP[(page / 64) + i] = MAX; PAGING_BITMAP[(page / 64) + i] = u64::MAX;
}; };
} }
Ok(()) Ok(())
} }
fn create_block_descriptor_entry(physical_address: usize, additional_flags: u64) -> u64 { fn create_block_descriptor_entry(physical_address: usize, additional_flags: u64) -> u64 {
(physical_address as u64 & 0x0000_FFFF_FFE0_0000) (physical_address as u64 & 0x0000_FFFF_FFFF_F000)
| BLOCK | BLOCK
| ACCESS_FLAG | ACCESS_FLAG
| INNER_SHAREABILITY | INNER_SHAREABILITY
| additional_flags | additional_flags
} }
fn create_page_descriptor_entry(physical_address: usize, additional_flags: u64) -> u64 {
(physical_address as u64 & 0x0000_FFFF_FFFF_F000)
| PAGE
| ACCESS_FLAG
| INNER_SHAREABILITY
| additional_flags
}
fn create_table_descriptor_entry(addr: usize) -> u64 { fn create_table_descriptor_entry(addr: usize) -> u64 {
0 | (addr as u64 & 0x0000_FFFF_FFFF_F000) | TABLE (addr as u64 & 0x0000_FFFF_FFFF_F000) | TABLE
} }
fn virtual_address_to_table_offset(virtual_addr: usize) -> (usize, usize, usize) { fn virtual_address_to_table_offset(virtual_addr: usize) -> (usize, usize, usize) {
@@ -331,22 +388,46 @@ fn virtual_address_to_table_offset(virtual_addr: usize) -> (usize, usize, usize)
(l1_off, l2_off, l3_off) (l1_off, l2_off, l3_off)
} }
/// Debugging function to navigate the translation tables.
#[allow(unused_variables)]
pub fn sim_l3_access(addr: usize) {
unsafe {
let entry1 = TRANSLATIONTABLE_TTBR0.0[addr / LEVEL1_BLOCK_SIZE];
let table2 = &mut *(entry_phys(entry1) as *mut PageTable);
let entry2 = table2.0[(addr % LEVEL1_BLOCK_SIZE) / LEVEL2_BLOCK_SIZE];
let table3 = &mut *(entry_phys(entry2) as *mut PageTable);
let _entry3 = table3.0[(addr % LEVEL2_BLOCK_SIZE) / GRANULARITY];
}
}
/// Navigate the table tree, by following given offsets. This function
/// allocates new tables if required.
fn navigate_table( fn navigate_table(
initial_table: &mut PageTable, initial_table_ptr: *mut PageTable,
offsets: [usize; 3], offsets: &[usize],
offsets_size: usize, ) -> Result<*mut PageTable, NovaError> {
) -> Result<&mut PageTable, NovaError> { let root_table_ptr = initial_table_ptr;
let root_table_ptr = initial_table as *mut PageTable; let mut table = initial_table_ptr;
let mut table = initial_table; for offset in offsets {
for i in 0..offsets_size { table = next_table(table, *offset, root_table_ptr)?;
let offset = offsets[i]; }
Ok(table)
}
/// Get the next table one level down.
///
/// If table doesn't exit a page will be allocated for it.
fn next_table(
table_ptr: *mut PageTable,
offset: usize,
root_table_ptr: *mut PageTable,
) -> Result<*mut PageTable, NovaError> {
let table = unsafe { &mut *table_ptr };
match table.0[offset] & 0b11 { match table.0[offset] & 0b11 {
0 => { 0 => {
let new_table_addr = reserve_page(); let new_table_addr = reserve_page();
table.0[offset] = create_table_descriptor_entry(new_table_addr); table.0[offset] = create_table_descriptor_entry(new_table_addr);
table =
unsafe { &mut *(get_table_entry_address(table.0[offset]) as *mut PageTable) };
map_page( map_page(
new_table_addr, new_table_addr,
@@ -354,16 +435,13 @@ fn navigate_table(
unsafe { &mut *root_table_ptr }, unsafe { &mut *root_table_ptr },
NORMAL_MEM | WRITABLE | PXN | UXN, NORMAL_MEM | WRITABLE | PXN | UXN,
)?; )?;
Ok(entry_phys(table.0[offset]) as *mut PageTable)
} }
1 => return Err(NovaError::Paging), 1 => Err(NovaError::Paging),
3 => { 3 => Ok(entry_phys(table.0[offset]) as *mut PageTable),
table = _ => unreachable!(),
unsafe { &mut *(get_table_entry_address(table.0[offset]) as *mut PageTable) }
} }
_ => panic!(),
};
}
Ok(table)
} }
fn find_unallocated_page() -> Option<usize> { fn find_unallocated_page() -> Option<usize> {
@@ -401,7 +479,8 @@ fn find_contiguous_free_bitmap_words(required_words: usize) -> Option<usize> {
None None
} }
/// Extracts the physical address out of an table entry.
#[inline] #[inline]
fn get_table_entry_address(entry: u64) -> u64 { fn entry_phys(entry: u64) -> u64 {
entry & 0x0000_FFFF_FFFF_F000 entry & 0x0000_FFFF_FFFF_F000
} }

View File

@@ -59,7 +59,7 @@ pub mod mmu {
let _ = map_l2_block( let _ = map_l2_block(
addr, addr,
addr, addr,
unsafe { &mut TRANSLATIONTABLE_TTBR0 }, core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
EL0_ACCESSIBLE | READ_ONLY | NORMAL_MEM, EL0_ACCESSIBLE | READ_ONLY | NORMAL_MEM,
); );
} }
@@ -68,7 +68,7 @@ pub mod mmu {
let _ = map_l2_block( let _ = map_l2_block(
addr, addr,
addr, addr,
unsafe { &mut TRANSLATIONTABLE_TTBR0 }, core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
WRITABLE | UXN | NORMAL_MEM, WRITABLE | UXN | NORMAL_MEM,
); );
} }
@@ -77,7 +77,7 @@ pub mod mmu {
let _ = map_l2_block( let _ = map_l2_block(
addr, addr,
addr, addr,
unsafe { &mut TRANSLATIONTABLE_TTBR0 }, core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
EL0_ACCESSIBLE | WRITABLE | PXN | NORMAL_MEM, EL0_ACCESSIBLE | WRITABLE | PXN | NORMAL_MEM,
); );
} }
@@ -86,7 +86,7 @@ pub mod mmu {
let _ = alloc_block_l2_explicit( let _ = alloc_block_l2_explicit(
addr, addr,
addr, addr,
unsafe { &mut TRANSLATIONTABLE_TTBR0 }, core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
EL0_ACCESSIBLE | WRITABLE | UXN | PXN | DEVICE_MEM, EL0_ACCESSIBLE | WRITABLE | UXN | PXN | DEVICE_MEM,
); );
} }

View File

@@ -82,7 +82,7 @@ unsafe extern "C" fn rust_irq_handler() {
println!("Return register address: {:#x}", read_esr_el1()); println!("Return register address: {:#x}", read_esr_el1());
} }
if let Some(handler_vec) = unsafe { INTERRUPT_HANDLERS.as_ref() } { if let Some(handler_vec) = unsafe { &*core::ptr::addr_of_mut!(INTERRUPT_HANDLERS) } {
for handler in handler_vec { for handler in handler_vec {
if (pending_irqs & (1 << (handler.source.clone() as u32))) != 0 { if (pending_irqs & (1 << (handler.source.clone() as u32))) != 0 {
(handler.function)(); (handler.function)();
@@ -222,7 +222,7 @@ pub fn initialize_interrupt_handler() {
} }
pub fn register_interrupt_handler(source: IRQSource, function: fn()) { pub fn register_interrupt_handler(source: IRQSource, function: fn()) {
if let Some(handler_vec) = unsafe { INTERRUPT_HANDLERS.as_mut() } { if let Some(handler_vec) = unsafe { &mut *core::ptr::addr_of_mut!(INTERRUPT_HANDLERS) } {
handler_vec.push(InterruptHandlers { source, function }); handler_vec.push(InterruptHandlers { source, function });
} }
} }

View File

@@ -31,13 +31,11 @@ macro_rules! log {
} }
pub fn log(args: fmt::Arguments) { pub fn log(args: fmt::Arguments) {
unsafe { if let Some(logger) = unsafe { &mut *core::ptr::addr_of_mut!(LOGGER) } {
if let Some(logger) = LOGGER.as_mut() {
logger.write_str("\n").unwrap(); logger.write_str("\n").unwrap();
logger.write_fmt(args).unwrap(); logger.write_fmt(args).unwrap();
logger.flush(); logger.flush();
} }
}
} }
pub fn set_logger(logger: Box<dyn Logger>) { pub fn set_logger(logger: Box<dyn Logger>) {

View File

@@ -1,6 +1,5 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
#![feature(asm_experimental_arch)]
#![allow(static_mut_refs)] #![allow(static_mut_refs)]
#![allow(clippy::missing_safety_doc)] #![allow(clippy::missing_safety_doc)]
use core::{ use core::{
@@ -13,8 +12,7 @@ extern crate alloc;
use nova::{ use nova::{
aarch64::{ aarch64::{
mmu::{ mmu::{
allocate_memory_explicit, EL0_ACCESSIBLE, LEVEL2_BLOCK_SIZE, NORMAL_MEM, PXN, UXN, allocate_memory_explicit, sim_l3_access, EL0_ACCESSIBLE, NORMAL_MEM, PXN, UXN, WRITABLE,
WRITABLE,
}, },
registers::{daif, read_id_aa64mmfr0_el1}, registers::{daif, read_id_aa64mmfr0_el1},
}, },
@@ -74,14 +72,14 @@ pub extern "C" fn main() -> ! {
initialize_mmu_translation_tables(); initialize_mmu_translation_tables();
// Frame Buffer memory range // Frame Buffer memory range
// TODO: this is just temporary // TODO: this is just temporary
// TODO: Investigate why the size is off
allocate_memory_explicit( allocate_memory_explicit(
0x3c100000, 0x3c100000,
1080 * 1920 * 4 + LEVEL2_BLOCK_SIZE + LEVEL2_BLOCK_SIZE, 1080 * 1920 * 4,
0x3c100000, 0x3c100000,
NORMAL_MEM | PXN | UXN | WRITABLE | EL0_ACCESSIBLE, NORMAL_MEM | PXN | UXN | WRITABLE | EL0_ACCESSIBLE,
) )
.unwrap(); .unwrap();
sim_l3_access(0x3c100000);
configure_mmu_el1(); configure_mmu_el1();
}; };
@@ -108,7 +106,11 @@ pub extern "C" fn kernel_main() -> ! {
nova::initialize_kernel(); nova::initialize_kernel();
println!("Exception Level: {}", get_current_el()); println!("Exception Level: {}", get_current_el());
daif::unmask_all(); daif::unmask_all();
let fb = FrameBuffer::default();
for i in 0..1080 {
fb.draw_pixel(50, i, RED);
}
unsafe { unsafe {
el1_to_el0(); el1_to_el0();
}; };
@@ -131,7 +133,7 @@ pub extern "C" fn el0() -> ! {
let fb = FrameBuffer::default(); let fb = FrameBuffer::default();
for i in 0..1080 { for i in 600..1080 {
fb.draw_pixel(50, i, RED); fb.draw_pixel(50, i, RED);
} }
fb.draw_square(500, 500, 600, 700, RED); fb.draw_square(500, 500, 600, 700, RED);

View File

@@ -23,5 +23,6 @@ pub fn reboot_system() {
PM_PASSWORD | (pm_rstc_val & PM_RSTC_WRCFG_CLR) | PM_RSTC_WRCFG_FULL_RESET, PM_PASSWORD | (pm_rstc_val & PM_RSTC_WRCFG_CLR) | PM_RSTC_WRCFG_FULL_RESET,
); );
} }
#[allow(clippy::empty_loop)]
loop {} loop {}
} }

View File

@@ -100,7 +100,7 @@ fn test_merging_free_sections() {
); );
let root_header = heap.start_address; let root_header = heap.start_address;
let root_header_start_size = unsafe { (*root_header).size }; let _root_header_start_size = unsafe { (*root_header).size };
let malloc1 = heap.malloc(MIN_BLOCK_SIZE).unwrap(); let malloc1 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
let malloc_header_before = unsafe { *Heap::get_header_ref_from_data_pointer(malloc1) }; let malloc_header_before = unsafe { *Heap::get_header_ref_from_data_pointer(malloc1) };
@@ -135,14 +135,13 @@ fn test_first_fit() {
); );
let root_header = heap.start_address; let root_header = heap.start_address;
let root_header_start_size = unsafe { (*root_header).size }; let _root_header_start_size = unsafe { (*root_header).size };
let malloc1 = heap.malloc(MIN_BLOCK_SIZE).unwrap(); let malloc1 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
let malloc2 = heap.malloc(MIN_BLOCK_SIZE).unwrap(); let _malloc2 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
let malloc3 = heap.malloc(MIN_BLOCK_SIZE * 3).unwrap(); let malloc3 = heap.malloc(MIN_BLOCK_SIZE * 3).unwrap();
let malloc4 = heap.malloc(MIN_BLOCK_SIZE).unwrap(); let malloc4 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
unsafe {
assert!(heap.free(malloc1).is_ok()); assert!(heap.free(malloc1).is_ok());
assert!(heap.free(malloc3).is_ok()); assert!(heap.free(malloc3).is_ok());
let malloc5 = heap.malloc(MIN_BLOCK_SIZE * 2).unwrap(); let malloc5 = heap.malloc(MIN_BLOCK_SIZE * 2).unwrap();
@@ -161,5 +160,4 @@ fn test_first_fit() {
// Malloc7 takes slot of Malloc1 // Malloc7 takes slot of Malloc1
let malloc7 = heap.malloc(MIN_BLOCK_SIZE).unwrap(); let malloc7 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
assert_eq!(malloc1, malloc7); assert_eq!(malloc1, malloc7);
}
} }