Compare commits

3 Commits

Author SHA1 Message Date
778b3ed80c feat: move EL0 stack to virtual space 2026-03-19 10:43:45 +01:00
cba7073ae5 refactor: organize code 2026-03-19 08:57:39 +01:00
Alexander Neuhäuser
f78388ee2c feat: implement MMU core functionality
* feat: Implement a basic MMU configuration

* feat: Enhance MMU by separating sections and configuring permissions

* feat: Update MMU configuration and memory allocation functions

* fix: Level 3 translation fault

* docs: add code documentation

* fix: linter

* feat: map translation tables to kernel space

* feat: move el1 stack to kernel VA space

* feat: use virtual memory for heap allocation

* docs: update Readme
2026-03-17 19:30:45 +01:00
19 changed files with 623 additions and 543 deletions

View File

@@ -14,8 +14,9 @@ NovaOS is a expository project where I build a kernel from scratch for a Raspber
- Communicate with peripherals via mailboxes ✓
- Frame Buffer ✓
- Heap Memory allocation ✓
- MMU
- MMU
- SVC instructions
- Kernel Independent Applications
- Multi Core
- Dynamic clock speed
- Multiprocessing

34
link.ld
View File

@@ -4,53 +4,43 @@ SECTIONS {
.text ALIGN(4) : {
KEEP(*(.text._start))
*(.text .text.*)
. = ALIGN(4K);
__text_end = .;
}
.rodata : {
*(.rodata .rodata.*)
}
.data ALIGN(2M) : {
_data = .;
.data : {
*(.data .data.*)
}
.bss (NOLOAD) : {
. = ALIGN(16);
.bss ALIGN(16) (NOLOAD) : {
__bss_start = .;
*(.bss .bss.*)
*(COMMON)
__bss_end = .;
}
.vector_table ALIGN(2048) : {
. = ALIGN(2M);
__share_end = .;
.vector_table ALIGN(2K) : {
KEEP(*(.vector_table))
}
.heap ALIGN(16): {
__heap_start = .;
. += 100K; #100kB
__heap_end = .;
}
# EL2 Stack
.stack ALIGN(16): {
__stack_start = .;
. += 10K; #10kB stack
. += 100K; #100kB stack
. = ALIGN(16);
__stack_end = .;
}
. = ALIGN(2M);
__kernel_end = .;
.stack_el0 : {
__stack_start_el0 = .;
. += 10K; #10kB stack
__stack_end_el0 = .;
}
. = ALIGN(2M);
_end = .;
}
__bss_size = (__bss_end - __bss_start) >> 3;

View File

@@ -1,20 +1,23 @@
use core::{panic, u64::MAX};
use core::mem::size_of;
use nova_error::NovaError;
unsafe extern "C" {
static mut __translation_table_l2_start: u64;
static __stack_start_el0: u64;
static __kernel_end: u64;
static _data: u64;
}
use crate::{
aarch64::mmu::physical_mapping::{
reserve_block, reserve_block_explicit, reserve_page, reserve_page_explicit,
},
get_current_el,
};
const BLOCK: u64 = 0b01;
const TABLE: u64 = 0b11;
const PAGE: u64 = 0b11;
/// Allow EL0 to access this section
pub const EL0_ACCESSIBLE: u64 = 1 << 6;
/// Allow a page or block to be written.
pub const WRITABLE: u64 = 0 << 7;
/// Disallow a page or block to be written.
pub const READ_ONLY: u64 = 1 << 7;
const ACCESS_FLAG: u64 = 1 << 10;
@@ -37,114 +40,119 @@ pub const LEVEL2_BLOCK_SIZE: usize = TABLE_ENTRY_COUNT * GRANULARITY;
const L2_BLOCK_BITMAP_WORDS: usize = LEVEL2_BLOCK_SIZE / (64 * GRANULARITY);
const MAX_PAGE_COUNT: usize = 1 * 1024 * 1024 * 1024 / GRANULARITY;
const MAX_PAGE_COUNT: usize = 1024 * 1024 * 1024 / GRANULARITY;
const TRANSLATION_TABLE_BASE_ADDR: usize = 0xFFFF_FF82_0000_0000;
pub const KERNEL_VIRTUAL_MEM_SPACE: usize = 0xFFFF_FF80_0000_0000;
pub const STACK_START_ADDR: usize = !KERNEL_VIRTUAL_MEM_SPACE & (!0xF);
mod physical_mapping;
type VirtAddr = usize;
type PhysAddr = usize;
pub enum PhysSource {
Any,
Explicit(PhysAddr),
}
#[repr(align(4096))]
pub struct PageTable([u64; TABLE_ENTRY_COUNT]);
#[no_mangle]
pub static mut TRANSLATIONTABLE_TTBR0: PageTable = PageTable([0; 512]);
#[no_mangle]
pub static mut TRANSLATIONTABLE_TTBR1: PageTable = PageTable([0; 512]);
static mut PAGING_BITMAP: [u64; MAX_PAGE_COUNT / 64] = [0; MAX_PAGE_COUNT / 64];
/// Allocate a memory block of `size` starting at `virtual_address`.
pub fn allocate_memory(
mut virtual_address: usize,
mut size: usize,
additional_flags: u64,
virtual_address: usize,
size_bytes: usize,
phys: PhysSource,
flags: u64,
) -> Result<(), NovaError> {
if virtual_address % GRANULARITY != 0 {
if !virtual_address.is_multiple_of(GRANULARITY) {
return Err(NovaError::Misalignment);
}
let level1_blocks = size / LEVEL1_BLOCK_SIZE;
size %= LEVEL1_BLOCK_SIZE;
let level2_blocks = size / LEVEL2_BLOCK_SIZE;
size %= LEVEL2_BLOCK_SIZE;
let level3_pages = size / GRANULARITY;
if size % GRANULARITY != 0 {
if !size_bytes.is_multiple_of(GRANULARITY) {
return Err(NovaError::InvalidGranularity);
}
if level1_blocks > 0 {
todo!("Currently not supported");
let base_table = if virtual_address & KERNEL_VIRTUAL_MEM_SPACE > 0 {
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR1)
} else {
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0)
};
match phys {
PhysSource::Any => map_range_dynamic(virtual_address, size_bytes, base_table, flags),
PhysSource::Explicit(phys_addr) => {
map_range_explicit(virtual_address, phys_addr, size_bytes, base_table, flags)
}
}
}
for _ in 0..level2_blocks {
unsafe {
alloc_block_l2(
virtual_address,
&mut TRANSLATIONTABLE_TTBR0,
additional_flags,
)?;
}
virtual_address += LEVEL2_BLOCK_SIZE;
}
for _ in 0..level3_pages {
unsafe {
alloc_page(
virtual_address,
&mut TRANSLATIONTABLE_TTBR0,
additional_flags,
)?;
}
virtual_address += GRANULARITY;
}
Ok(())
}
pub fn allocate_memory_explicit(
mut virtual_address: usize,
mut size: usize,
mut physical_address: usize,
additional_flags: u64,
fn map_range_explicit(
mut virt: VirtAddr,
mut phys: PhysAddr,
size_bytes: usize,
base: *mut PageTable,
flags: u64,
) -> Result<(), NovaError> {
if virtual_address % GRANULARITY != 0 {
return Err(NovaError::Misalignment);
let mut remaining = size_bytes;
while virt % LEVEL2_BLOCK_SIZE != 0 {
map_page(virt, phys, base, flags)?;
(virt, _) = virt.overflowing_add(GRANULARITY);
phys += GRANULARITY;
remaining -= GRANULARITY;
}
let level1_blocks = size / LEVEL1_BLOCK_SIZE;
size %= LEVEL1_BLOCK_SIZE;
let level2_blocks = size / LEVEL2_BLOCK_SIZE;
size %= LEVEL2_BLOCK_SIZE;
let level3_pages = size / GRANULARITY;
if size % GRANULARITY != 0 {
return Err(NovaError::InvalidGranularity);
while remaining >= LEVEL2_BLOCK_SIZE {
map_l2_block(virt, phys, base, flags)?;
(virt, _) = virt.overflowing_add(LEVEL2_BLOCK_SIZE);
phys += LEVEL2_BLOCK_SIZE;
remaining -= LEVEL2_BLOCK_SIZE;
}
if level1_blocks > 0 {
todo!("Currently not supported");
}
for _ in 0..level2_blocks {
unsafe {
alloc_block_l2_explicit(
virtual_address,
physical_address,
&mut TRANSLATIONTABLE_TTBR0,
additional_flags,
)?;
}
virtual_address += LEVEL2_BLOCK_SIZE;
physical_address += LEVEL2_BLOCK_SIZE;
}
for _ in 0..level3_pages {
unsafe {
alloc_page_explicit(
virtual_address,
physical_address,
&mut TRANSLATIONTABLE_TTBR0,
additional_flags,
)?;
}
virtual_address += GRANULARITY;
physical_address += GRANULARITY;
while remaining > 0 {
map_page(virt, phys, base, flags)?;
(virt, _) = virt.overflowing_add(GRANULARITY);
phys += GRANULARITY;
remaining -= GRANULARITY;
}
Ok(())
}
fn map_range_dynamic(
mut virt: PhysAddr,
size_bytes: usize,
base: *mut PageTable,
flags: u64,
) -> Result<(), NovaError> {
let mut remaining = size_bytes;
while remaining >= LEVEL2_BLOCK_SIZE {
map_l2_block(virt, reserve_block(), base, flags)?;
(virt, _) = virt.overflowing_add(LEVEL2_BLOCK_SIZE);
remaining -= LEVEL2_BLOCK_SIZE;
}
while remaining > 0 {
map_page(virt, reserve_page(), base, flags)?;
(virt, _) = virt.overflowing_add(GRANULARITY);
remaining -= GRANULARITY;
}
Ok(())
}
/// Allocate a singe page.
pub fn alloc_page(
virtual_address: usize,
base_table: &mut PageTable,
base_table: *mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
map_page(
@@ -155,10 +163,11 @@ pub fn alloc_page(
)
}
/// Allocate a single page at an explicit `physical_address`.
pub fn alloc_page_explicit(
virtual_address: usize,
physical_address: usize,
base_table: &mut PageTable,
base_table: *mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
reserve_page_explicit(physical_address)?;
@@ -170,52 +179,59 @@ pub fn alloc_page_explicit(
)
}
fn map_page(
pub fn map_page(
virtual_address: usize,
physical_address: usize,
base_table: &mut PageTable,
base_table_ptr: *mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
let (l1_off, l2_off, l3_off) = virtual_address_to_table_offset(virtual_address);
let table = navigate_table(base_table, [l1_off, l2_off, 0], 2)?;
let offsets = [l1_off, l2_off];
let table_ptr = navigate_table(base_table_ptr, &offsets)?;
let table = unsafe { &mut *table_ptr };
if table.0[l3_off] & 0b11 > 0 {
return Err(NovaError::Paging);
}
table.0[l3_off] = create_block_descriptor_entry(physical_address, additional_flags);
table.0[l3_off] = create_page_descriptor_entry(physical_address, additional_flags);
Ok(())
}
pub fn alloc_block_l2(
virtual_addr: usize,
base_table: &mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
map_l2_block(virtual_addr, reserve_block(), base_table, additional_flags)
}
// Allocate a level 2 block, at a explicit `physical_address`.
pub fn alloc_block_l2_explicit(
virtual_addr: usize,
physical_address: usize,
base_table: &mut PageTable,
base_table_ptr: *mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
if !physical_address.is_multiple_of(LEVEL2_BLOCK_SIZE) {
return Err(NovaError::Misalignment);
}
reserve_block_explicit(physical_address)?;
map_l2_block(virtual_addr, physical_address, base_table, additional_flags)
map_l2_block(
virtual_addr,
physical_address,
base_table_ptr,
additional_flags,
)
}
pub fn map_l2_block(
virtual_addr: usize,
physical_address: usize,
base_table: &mut PageTable,
base_table_ptr: *mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
let (l1_off, l2_off, _) = virtual_address_to_table_offset(virtual_addr);
let offsets = [l1_off];
let table_ptr = navigate_table(base_table_ptr, &offsets)?;
let table = navigate_table(base_table, [l1_off, 0, 0], 1)?;
let table = unsafe { &mut *table_ptr };
// Verify virtual address is available.
if table.0[l2_off] & 0b11 != 0 {
@@ -228,10 +244,11 @@ pub fn map_l2_block(
Ok(())
}
pub fn reserve_range_explicit(
start_physical_address: usize,
end_physical_address: usize,
) -> Result<(), NovaError> {
pub fn reserve_range(
start_physical_address: PhysAddr,
end_physical_address: PhysAddr,
) -> Result<PhysAddr, NovaError> {
let mut size = end_physical_address - start_physical_address;
let l1_blocks = size / LEVEL1_BLOCK_SIZE;
size %= LEVEL1_BLOCK_SIZE;
@@ -239,7 +256,7 @@ pub fn reserve_range_explicit(
size %= LEVEL2_BLOCK_SIZE;
let l3_pages = size / GRANULARITY;
if size % GRANULARITY != 0 {
if !size.is_multiple_of(GRANULARITY) {
return Err(NovaError::Misalignment);
}
@@ -258,150 +275,92 @@ pub fn reserve_range_explicit(
addr += GRANULARITY;
}
Ok(())
}
fn reserve_page() -> usize {
if let Some(address) = find_unallocated_page() {
let page = address / GRANULARITY;
let word_index = page / 64;
unsafe { PAGING_BITMAP[word_index] |= 1 << (page % 64) };
return address;
}
panic!("Out of Memory!");
}
fn reserve_page_explicit(physical_address: usize) -> Result<(), NovaError> {
let page = physical_address / GRANULARITY;
let word_index = page / 64;
if unsafe { PAGING_BITMAP[word_index] } & (1 << (page % 64)) > 0 {
return Err(NovaError::Paging);
}
unsafe { PAGING_BITMAP[word_index] |= 1 << (page % 64) };
Ok(())
}
fn reserve_block() -> usize {
if let Some(start) = find_contiguous_free_bitmap_words(L2_BLOCK_BITMAP_WORDS) {
for j in 0..L2_BLOCK_BITMAP_WORDS {
unsafe { PAGING_BITMAP[start + j] = MAX };
}
return start * 64 * GRANULARITY;
}
panic!("Out of Memory!");
}
fn reserve_block_explicit(physical_address: usize) -> Result<(), NovaError> {
let page = physical_address / GRANULARITY;
for i in 0..L2_BLOCK_BITMAP_WORDS {
unsafe {
if PAGING_BITMAP[(page / 64) + i] != 0 {
return Err(NovaError::Paging);
}
};
}
for i in 0..L2_BLOCK_BITMAP_WORDS {
unsafe {
PAGING_BITMAP[(page / 64) + i] = MAX;
};
}
Ok(())
Ok(start_physical_address)
}
fn create_block_descriptor_entry(physical_address: usize, additional_flags: u64) -> u64 {
(physical_address as u64 & 0x0000_FFFF_FFE0_0000)
(physical_address as u64 & 0x0000_FFFF_FFFF_F000)
| BLOCK
| ACCESS_FLAG
| INNER_SHAREABILITY
| additional_flags
}
fn create_page_descriptor_entry(physical_address: usize, additional_flags: u64) -> u64 {
(physical_address as u64 & 0x0000_FFFF_FFFF_F000)
| PAGE
| ACCESS_FLAG
| INNER_SHAREABILITY
| additional_flags
}
fn create_table_descriptor_entry(addr: usize) -> u64 {
0 | (addr as u64 & 0x0000_FFFF_FFFF_F000) | TABLE
(addr as u64 & 0x0000_FFFF_FFFF_F000) | TABLE
}
fn virtual_address_to_table_offset(virtual_addr: usize) -> (usize, usize, usize) {
let absolute_page_off = virtual_addr / GRANULARITY;
let absolute_page_off = (virtual_addr & !KERNEL_VIRTUAL_MEM_SPACE) / GRANULARITY;
let l3_off = absolute_page_off % TABLE_ENTRY_COUNT;
let l2_off = (absolute_page_off / TABLE_ENTRY_COUNT) % TABLE_ENTRY_COUNT;
let l1_off = (absolute_page_off / TABLE_ENTRY_COUNT / TABLE_ENTRY_COUNT) % TABLE_ENTRY_COUNT;
(l1_off, l2_off, l3_off)
}
/// Navigate the table tree, by following given offsets. This function
/// allocates new tables if required.
fn navigate_table(
initial_table: &mut PageTable,
offsets: [usize; 3],
offsets_size: usize,
) -> Result<&mut PageTable, NovaError> {
let root_table_ptr = initial_table as *mut PageTable;
let mut table = initial_table;
for i in 0..offsets_size {
let offset = offsets[i];
match table.0[offset] & 0b11 {
0 => {
let new_table_addr = reserve_page();
table.0[offset] = create_table_descriptor_entry(new_table_addr);
table =
unsafe { &mut *(get_table_entry_address(table.0[offset]) as *mut PageTable) };
map_page(
new_table_addr,
new_table_addr,
unsafe { &mut *root_table_ptr },
NORMAL_MEM | WRITABLE | PXN | UXN,
)?;
}
1 => return Err(NovaError::Paging),
3 => {
table =
unsafe { &mut *(get_table_entry_address(table.0[offset]) as *mut PageTable) }
}
_ => panic!(),
};
initial_table_ptr: *mut PageTable,
offsets: &[usize],
) -> Result<*mut PageTable, NovaError> {
let mut table = initial_table_ptr;
for offset in offsets {
table = next_table(table, *offset)?;
}
Ok(table)
}
fn find_unallocated_page() -> Option<usize> {
for (i, entry) in unsafe { PAGING_BITMAP }.iter().enumerate() {
if *entry != u64::MAX {
for offset in 0..64 {
if entry >> offset & 0b1 == 0 {
return Some((i * 64 + offset) * GRANULARITY);
}
}
}
}
None
}
/// Get the next table one level down.
///
/// If table doesn't exit a page will be allocated for it.
fn next_table(table_ptr: *mut PageTable, offset: usize) -> Result<*mut PageTable, NovaError> {
let table = unsafe { &mut *table_ptr };
match table.0[offset] & 0b11 {
0 => {
let new_phys_page_table_address = reserve_page();
fn find_contiguous_free_bitmap_words(required_words: usize) -> Option<usize> {
let mut run_start = 0;
let mut run_len = 0;
table.0[offset] = create_table_descriptor_entry(new_phys_page_table_address);
map_page(
phys_table_to_kernel_space(new_phys_page_table_address),
new_phys_page_table_address,
&raw mut TRANSLATIONTABLE_TTBR1,
NORMAL_MEM | WRITABLE | PXN | UXN,
)?;
for (i, entry) in unsafe { PAGING_BITMAP }.iter().enumerate() {
if *entry == 0 {
if run_len == 0 {
run_start = i;
Ok(entry_table_addr(table.0[offset] as usize) as *mut PageTable)
}
run_len += 1;
if run_len == required_words {
return Some(run_start);
}
} else {
run_len = 0;
1 => Err(NovaError::Paging),
3 => Ok(entry_table_addr(table.0[offset] as usize) as *mut PageTable),
_ => unreachable!(),
}
}
None
/// Extracts the physical address out of an table entry.
#[inline]
fn entry_phys(entry: usize) -> PhysAddr {
entry & 0x0000_FFFF_FFFF_F000
}
#[inline]
fn get_table_entry_address(entry: u64) -> u64 {
entry & 0x0000_FFFF_FFFF_F000
fn entry_table_addr(entry: usize) -> VirtAddr {
if get_current_el() == 1 {
phys_table_to_kernel_space(entry_phys(entry))
} else {
entry_phys(entry)
}
}
/// Extracts the physical address out of an table entry.
#[inline]
fn phys_table_to_kernel_space(entry: usize) -> VirtAddr {
entry | TRANSLATION_TABLE_BASE_ADDR
}

View File

@@ -0,0 +1,89 @@
use crate::aarch64::mmu::{PhysAddr, GRANULARITY, L2_BLOCK_BITMAP_WORDS, MAX_PAGE_COUNT};
use nova_error::NovaError;
static mut PAGING_BITMAP: [u64; MAX_PAGE_COUNT / 64] = [0; MAX_PAGE_COUNT / 64];
pub fn reserve_page() -> PhysAddr {
if let Some(address) = find_unallocated_page() {
let page = address / GRANULARITY;
let word_index = page / 64;
unsafe { PAGING_BITMAP[word_index] |= 1 << (page % 64) };
return address;
}
panic!("Out of Memory!");
}
pub fn reserve_page_explicit(physical_address: usize) -> Result<PhysAddr, NovaError> {
let page = physical_address / GRANULARITY;
let word_index = page / 64;
if unsafe { PAGING_BITMAP[word_index] } & (1 << (page % 64)) > 0 {
return Err(NovaError::Paging);
}
unsafe { PAGING_BITMAP[word_index] |= 1 << (page % 64) };
Ok(physical_address)
}
pub fn reserve_block() -> usize {
if let Some(start) = find_contiguous_free_bitmap_words(L2_BLOCK_BITMAP_WORDS) {
for j in 0..L2_BLOCK_BITMAP_WORDS {
unsafe { PAGING_BITMAP[start + j] = u64::MAX };
}
return start * 64 * GRANULARITY;
}
panic!("Out of Memory!");
}
pub fn reserve_block_explicit(physical_address: usize) -> Result<(), NovaError> {
let page = physical_address / GRANULARITY;
for i in 0..L2_BLOCK_BITMAP_WORDS {
unsafe {
if PAGING_BITMAP[(page / 64) + i] != 0 {
return Err(NovaError::Paging);
}
};
}
for i in 0..L2_BLOCK_BITMAP_WORDS {
unsafe {
PAGING_BITMAP[(page / 64) + i] = u64::MAX;
};
}
Ok(())
}
fn find_unallocated_page() -> Option<usize> {
for (i, entry) in unsafe { PAGING_BITMAP }.iter().enumerate() {
if *entry != u64::MAX {
for offset in 0..64 {
if entry >> offset & 0b1 == 0 {
return Some((i * 64 + offset) * GRANULARITY);
}
}
}
}
None
}
fn find_contiguous_free_bitmap_words(required_words: usize) -> Option<usize> {
let mut run_start = 0;
let mut run_len = 0;
for (i, entry) in unsafe { PAGING_BITMAP }.iter().enumerate() {
if *entry == 0 {
if run_len == 0 {
run_start = i;
}
run_len += 1;
if run_len == required_words {
return Some(run_start);
}
} else {
run_len = 0;
}
}
None
}

View File

@@ -50,9 +50,9 @@ psr!(ESR_EL1, u32);
psr!(SPSR_EL1, u32);
psr!(ELR_EL1, u32);
psr!(ELR_EL1, u64);
psr!(SCTLR_EL1, u32);
psr!(SCTLR_EL1, u64);
pub fn read_exception_source_el() -> u32 {
read_spsr_el1() & 0b1111

194
src/config.S Normal file
View File

@@ -0,0 +1,194 @@
.section .text.config
.align 4
.global el2_to_el1
el2_to_el1:
mov x0, #(1 << 31)
msr HCR_EL2, x0
// Set SPSR_EL2: return to EL1h
mov x0, #(0b0101)
msr SPSR_EL2, x0
// Set return address to kernel_main
adrp x0, kernel_main
add x0, x0, :lo12:kernel_main
msr ELR_EL2, x0
// Set SP_EL1 to stack base
adrp x0, EL1_STACK_TOP
ldr x1, [x0, :lo12:EL1_STACK_TOP]
msr SP_EL1, x1
// Set VBAR_EL1 to vector table
adrp x0, vector_table
add x0, x0, :lo12:vector_table
msr VBAR_EL1, x0
isb
adrp x0, SCTLR_EL1_CONF
ldr x1, [x0, :lo12:SCTLR_EL1_CONF]
msr SCTLR_EL1, x1
isb
// SIMD should not be trapped
mrs x0, CPACR_EL1
mov x1, #(0b11<<20)
orr x0,x0, x1
msr CPACR_EL1,x0
isb
// Return to EL1
eret
.section .text.config
.align 4
.global configure_mmu_el1
configure_mmu_el1:
// Configure MMU
adrp x0, TCR_EL1_CONF
ldr x1, [x0, :lo12:TCR_EL1_CONF]
msr TCR_EL1, x1
isb
// MAIR0: Normal Mem.
// MAIR1: Device Mem.
mov x0, #0x04FF
msr MAIR_EL1, x0
isb
// Configure translation table
adrp x0, TRANSLATIONTABLE_TTBR0
add x1, x0, :lo12:TRANSLATIONTABLE_TTBR0
msr TTBR0_EL1, x1
adrp x0, TRANSLATIONTABLE_TTBR1
add x1, x0, :lo12:TRANSLATIONTABLE_TTBR1
msr TTBR1_EL1, x1
tlbi vmalle1
dsb ish
isb
ret
.align 4
.global el1_to_el0
el1_to_el0:
// Set SPSR_EL1: return to EL0t
mov x0, #(0b0000)
msr SPSR_EL1, x0
// Set return address to el0
ldr x0, =el0
msr ELR_EL1, x0
// Set SP_EL1 to stack base
adrp x0, EL0_STACK_TOP
ldr x1, [x0, :lo12:EL0_STACK_TOP]
msr SP_EL0, x1
isb
// Return to EL0
eret
.align 4
irq_handler:
sub sp, sp, #176
stp x0, x1, [sp, #0]
stp x2, x3, [sp, #16]
stp x4, x5, [sp, #32]
stp x6, x7, [sp, #48]
stp x8, x9, [sp, #64]
stp x10, x11, [sp, #80]
stp x12, x13, [sp, #96]
stp x14, x15, [sp, #112]
stp x16, x17, [sp, #128]
stp x18, x29, [sp, #144]
stp x30, xzr, [sp, #160]
bl rust_irq_handler
ldp x0, x1, [sp, #0]
ldp x2, x3, [sp, #16]
ldp x4, x5, [sp, #32]
ldp x6, x7, [sp, #48]
ldp x8, x9, [sp, #64]
ldp x10, x11, [sp, #80]
ldp x12, x13, [sp, #96]
ldp x14, x15, [sp, #112]
ldp x16, x17, [sp, #128]
ldp x18, x29, [sp, #144]
ldp x30, xzr, [sp, #160]
add sp, sp, #176
eret
.align 4
synchronous_interrupt_imm_lower_aarch64:
sub sp, sp, #176
stp x0, x1, [sp, #0]
stp x2, x3, [sp, #16]
stp x4, x5, [sp, #32]
stp x6, x7, [sp, #48]
stp x8, x9, [sp, #64]
stp x10, x11, [sp, #80]
stp x12, x13, [sp, #96]
stp x14, x15, [sp, #112]
stp x16, x17, [sp, #128]
stp x18, x29, [sp, #144]
stp x30, xzr, [sp, #160]
bl rust_synchronous_interrupt_imm_lower_aarch64
ldp x0, x1, [sp, #0]
ldp x2, x3, [sp, #16]
ldp x4, x5, [sp, #32]
ldp x6, x7, [sp, #48]
ldp x8, x9, [sp, #64]
ldp x10, x11, [sp, #80]
ldp x12, x13, [sp, #96]
ldp x14, x15, [sp, #112]
ldp x16, x17, [sp, #128]
ldp x18, x29, [sp, #144]
ldp x30, xzr, [sp, #160]
add sp, sp, #176
eret
.align 4
synchronous_interrupt_no_el_change:
sub sp, sp, #176
stp x0, x1, [sp, #0]
stp x2, x3, [sp, #16]
stp x4, x5, [sp, #32]
stp x6, x7, [sp, #48]
stp x8, x9, [sp, #64]
stp x10, x11, [sp, #80]
stp x12, x13, [sp, #96]
stp x14, x15, [sp, #112]
stp x16, x17, [sp, #128]
stp x18, x29, [sp, #144]
stp x30, xzr, [sp, #160]
bl rust_synchronous_interrupt_no_el_change
ldp x0, x1, [sp, #0]
ldp x2, x3, [sp, #16]
ldp x4, x5, [sp, #32]
ldp x6, x7, [sp, #48]
ldp x8, x9, [sp, #64]
ldp x10, x11, [sp, #80]
ldp x12, x13, [sp, #96]
ldp x14, x15, [sp, #112]
ldp x16, x17, [sp, #128]
ldp x18, x29, [sp, #144]
ldp x30, xzr, [sp, #160]
add sp, sp, #176
eret

View File

@@ -23,72 +23,107 @@ const SH0: u64 = 0b11 << 12; // Inner shareable
const TG1: u64 = 0b10 << 30; // 4KB granularity EL1
const T1SZ: u64 = 25 << 16; // 25 Bits of TTBR select -> 39 Bits of VA
const EPD1: u64 = 0b1 << 23; // Trigger translation fault when using TTBR1_EL1
const SH1: u64 = 0b11 << 28; // Inner sharable
const IPS: u64 = 0b000 << 32; // 32 bits of PA space -> up to 4GiB
const AS: u64 = 0b1 << 36; // configure an ASID size of 16 bits
#[no_mangle]
pub static TCR_EL1_CONF: u64 = IPS | TG0 | TG1 | T0SZ | T1SZ | SH0 | SH1 | EPD1 | AS;
pub static TCR_EL1_CONF: u64 = IPS | TG0 | TG1 | T0SZ | T1SZ | SH0 | SH1 | AS;
pub mod mmu {
use crate::{
aarch64::mmu::{
alloc_block_l2_explicit, map_l2_block, reserve_range_explicit, DEVICE_MEM,
EL0_ACCESSIBLE, LEVEL1_BLOCK_SIZE, LEVEL2_BLOCK_SIZE, NORMAL_MEM, PXN, READ_ONLY,
alloc_block_l2_explicit, allocate_memory, map_l2_block, map_page, reserve_range,
PhysSource, DEVICE_MEM, EL0_ACCESSIBLE, GRANULARITY, KERNEL_VIRTUAL_MEM_SPACE,
LEVEL1_BLOCK_SIZE, LEVEL2_BLOCK_SIZE, NORMAL_MEM, PXN, READ_ONLY, STACK_START_ADDR,
TRANSLATIONTABLE_TTBR0, UXN, WRITABLE,
},
PERIPHERAL_BASE,
};
#[no_mangle]
static EL1_STACK_TOP: usize = STACK_START_ADDR | KERNEL_VIRTUAL_MEM_SPACE;
const EL1_STACK_SIZE: usize = LEVEL2_BLOCK_SIZE * 2;
#[no_mangle]
static EL0_STACK_TOP: usize = STACK_START_ADDR;
const EL0_STACK_SIZE: usize = LEVEL2_BLOCK_SIZE * 2;
extern "C" {
static _data: u64;
static _end: u64;
static __text_end: u64;
static __share_end: u64;
static __kernel_end: u64;
}
pub fn initialize_mmu_translation_tables() {
let shared_segment_end = unsafe { &_data } as *const _ as usize;
let text_end = unsafe { &__text_end } as *const _ as usize;
let shared_segment_end = unsafe { &__share_end } as *const _ as usize;
let kernel_end = unsafe { &__kernel_end } as *const _ as usize;
let user_space_end = unsafe { &_end } as *const _ as usize;
reserve_range_explicit(0x0, user_space_end).unwrap();
reserve_range(0x0, kernel_end).unwrap();
for addr in (0..shared_segment_end).step_by(LEVEL2_BLOCK_SIZE) {
let _ = map_l2_block(
for addr in (0..text_end).step_by(GRANULARITY) {
map_page(
addr,
addr,
unsafe { &mut TRANSLATIONTABLE_TTBR0 },
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
EL0_ACCESSIBLE | READ_ONLY | NORMAL_MEM,
);
)
.unwrap();
}
for addr in (text_end..shared_segment_end).step_by(GRANULARITY) {
map_page(
addr,
addr,
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
EL0_ACCESSIBLE | WRITABLE | NORMAL_MEM,
)
.unwrap();
}
for addr in (shared_segment_end..kernel_end).step_by(LEVEL2_BLOCK_SIZE) {
let _ = map_l2_block(
map_l2_block(
addr,
addr,
unsafe { &mut TRANSLATIONTABLE_TTBR0 },
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
WRITABLE | UXN | NORMAL_MEM,
);
}
for addr in (kernel_end..user_space_end).step_by(LEVEL2_BLOCK_SIZE) {
let _ = map_l2_block(
addr,
addr,
unsafe { &mut TRANSLATIONTABLE_TTBR0 },
EL0_ACCESSIBLE | WRITABLE | PXN | NORMAL_MEM,
);
)
.unwrap();
}
for addr in (PERIPHERAL_BASE..LEVEL1_BLOCK_SIZE).step_by(LEVEL2_BLOCK_SIZE) {
let _ = alloc_block_l2_explicit(
alloc_block_l2_explicit(
addr,
addr,
unsafe { &mut TRANSLATIONTABLE_TTBR0 },
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
EL0_ACCESSIBLE | WRITABLE | UXN | PXN | DEVICE_MEM,
);
}
)
.unwrap();
}
// Frame Buffer memory range
allocate_memory(
0x3c100000,
1080 * 1920 * 4,
PhysSource::Explicit(0x3c100000),
NORMAL_MEM | PXN | UXN | WRITABLE | EL0_ACCESSIBLE,
)
.unwrap();
allocate_memory(
EL1_STACK_TOP - EL1_STACK_SIZE + 0x10,
EL1_STACK_SIZE,
PhysSource::Any,
WRITABLE | NORMAL_MEM,
)
.unwrap();
allocate_memory(
EL0_STACK_TOP - EL0_STACK_SIZE + 0x10,
EL0_STACK_SIZE,
PhysSource::Any,
WRITABLE | EL0_ACCESSIBLE | NORMAL_MEM,
)
.unwrap();
}
}

View File

@@ -41,7 +41,7 @@ impl FrameBuffer {
return;
}
unsafe {
write_volatile(self.start_addr.byte_add(4 * offset as usize), color);
write_volatile(self.start_addr.add(offset as usize), color);
}
}

View File

@@ -82,7 +82,7 @@ unsafe extern "C" fn rust_irq_handler() {
println!("Return register address: {:#x}", read_esr_el1());
}
if let Some(handler_vec) = unsafe { INTERRUPT_HANDLERS.as_ref() } {
if let Some(handler_vec) = unsafe { &*core::ptr::addr_of_mut!(INTERRUPT_HANDLERS) } {
for handler in handler_vec {
if (pending_irqs & (1 << (handler.source.clone() as u32))) != 0 {
(handler.function)();
@@ -222,7 +222,7 @@ pub fn initialize_interrupt_handler() {
}
pub fn register_interrupt_handler(source: IRQSource, function: fn()) {
if let Some(handler_vec) = unsafe { INTERRUPT_HANDLERS.as_mut() } {
if let Some(handler_vec) = unsafe { &mut *core::ptr::addr_of_mut!(INTERRUPT_HANDLERS) } {
handler_vec.push(InterruptHandlers { source, function });
}
}

View File

@@ -12,24 +12,31 @@ use core::{
use heap::Heap;
use crate::{interrupt_handlers::initialize_interrupt_handler, logger::DefaultLogger};
use crate::{
aarch64::mmu::{
allocate_memory, PhysSource, KERNEL_VIRTUAL_MEM_SPACE, LEVEL2_BLOCK_SIZE, NORMAL_MEM, UXN,
WRITABLE,
},
interrupt_handlers::initialize_interrupt_handler,
logger::DefaultLogger,
};
static PERIPHERAL_BASE: usize = 0x3F00_0000;
unsafe extern "C" {
unsafe static mut __heap_start: u8;
unsafe static mut __heap_end: u8;
unsafe static mut __kernel_end: u8;
}
#[global_allocator]
pub static mut GLOBAL_ALLOCATOR: Heap = Heap::empty();
pub unsafe fn init_heap() {
let start = core::ptr::addr_of_mut!(__heap_start) as usize;
let end = core::ptr::addr_of_mut!(__heap_end) as usize;
pub unsafe fn init_kernel_heap() {
let start = core::ptr::addr_of_mut!(__kernel_end) as usize | KERNEL_VIRTUAL_MEM_SPACE;
let size = LEVEL2_BLOCK_SIZE * 2;
allocate_memory(start, size, PhysSource::Any, NORMAL_MEM | UXN | WRITABLE).unwrap();
let heap = core::ptr::addr_of_mut!(GLOBAL_ALLOCATOR);
(*heap).init(start, end);
(*heap).init(start, start + size);
}
#[panic_handler]
@@ -46,7 +53,6 @@ pub mod configuration;
pub mod framebuffer;
pub mod interrupt_handlers;
pub mod logger;
pub mod timer;
pub mod pi3;
@@ -73,6 +79,7 @@ pub fn get_current_el() -> u64 {
}
pub fn initialize_kernel() {
unsafe { init_kernel_heap() };
logger::set_logger(Box::new(DefaultLogger));
initialize_interrupt_handler();
}

View File

@@ -31,14 +31,12 @@ macro_rules! log {
}
pub fn log(args: fmt::Arguments) {
unsafe {
if let Some(logger) = LOGGER.as_mut() {
if let Some(logger) = unsafe { &mut *core::ptr::addr_of_mut!(LOGGER) } {
logger.write_str("\n").unwrap();
logger.write_fmt(args).unwrap();
logger.flush();
}
}
}
pub fn set_logger(logger: Box<dyn Logger>) {
unsafe {

View File

@@ -1,6 +1,5 @@
#![no_main]
#![no_std]
#![feature(asm_experimental_arch)]
#![allow(static_mut_refs)]
#![allow(clippy::missing_safety_doc)]
use core::{
@@ -10,17 +9,12 @@ use core::{
extern crate alloc;
use alloc::vec::Vec;
use nova::{
aarch64::{
mmu::{
allocate_memory_explicit, EL0_ACCESSIBLE, LEVEL2_BLOCK_SIZE, NORMAL_MEM, PXN, UXN,
WRITABLE,
},
registers::{daif, read_id_aa64mmfr0_el1},
},
aarch64::registers::{daif, read_id_aa64mmfr0_el1},
configuration::mmu::initialize_mmu_translation_tables,
framebuffer::{FrameBuffer, BLUE, GREEN, RED},
get_current_el, init_heap,
get_current_el,
interrupt_handlers::{enable_irq_source, IRQSource},
peripherals::{
gpio::{
@@ -29,11 +23,13 @@ use nova::{
},
uart::uart_init,
},
pi3::mailbox,
println,
};
global_asm!(include_str!("vector.S"));
global_asm!(include_str!("config.S"));
static mut FRAMEBUFFER: Option<FrameBuffer> = None;
extern "C" {
fn el2_to_el1();
@@ -69,23 +65,14 @@ pub extern "C" fn main() -> ! {
println!("Exception level: {}", get_current_el());
unsafe {
init_heap();
initialize_mmu_translation_tables();
// Frame Buffer memory range
// TODO: this is just temporary
// TODO: Investigate why the size is off
allocate_memory_explicit(
0x3c100000,
1080 * 1920 * 4 + LEVEL2_BLOCK_SIZE + LEVEL2_BLOCK_SIZE,
0x3c100000,
NORMAL_MEM | PXN | UXN | WRITABLE | EL0_ACCESSIBLE,
)
.unwrap();
configure_mmu_el1();
println!("MMU initialized...");
};
println!("AA64 {:064b}", read_id_aa64mmfr0_el1());
println!("Register: AA64MMFR0_EL1: {:064b}", read_id_aa64mmfr0_el1());
println!("Moving El2->EL1");
unsafe { FRAMEBUFFER = Some(FrameBuffer::default()) };
unsafe {
el2_to_el1();
@@ -105,7 +92,14 @@ unsafe fn zero_bss() {
#[no_mangle]
pub extern "C" fn kernel_main() -> ! {
println!("Kernel Start...");
nova::initialize_kernel();
let mut test_vector = Vec::new();
for i in 0..20 {
test_vector.push(i);
}
println!("heap allocation test: {:?}", test_vector);
println!("Exception Level: {}", get_current_el());
daif::unmask_all();
@@ -129,10 +123,9 @@ pub extern "C" fn el0() -> ! {
enable_irq_source(IRQSource::UartInt);
let fb = FrameBuffer::default();
if let Some(fb) = unsafe { FRAMEBUFFER.as_mut() } {
for i in 0..1080 {
fb.draw_pixel(50, i, RED);
fb.draw_pixel(50, i, BLUE);
}
fb.draw_square(500, 500, 600, 700, RED);
fb.draw_square_fill(800, 800, 900, 900, GREEN);
@@ -141,10 +134,14 @@ pub extern "C" fn el0() -> ! {
fb.draw_string("Hello World! :D\nTest next Line", 500, 5, 3, BLUE);
fb.draw_function(cos, 0, 101, RED);
}
loop {
let temp = mailbox::read_soc_temp([0]).unwrap();
println!("{} °C", temp[1] / 1000);
// TODO: Mailbox requires a physical address. The stack is now in VA space causing an issue.
// Fix with SVCs ?
// let temp = mailbox::read_soc_temp([0]).unwrap();
// println!("{} °C", temp[1] / 1000);
blink_gpio(SpecificGpio::OnboardLed as u8, 500);
}

View File

@@ -2,7 +2,7 @@ use core::result::Result;
use core::result::Result::Ok;
use core::sync::atomic::{compiler_fence, Ordering};
use crate::timer::{delay_nops, sleep_ms};
use crate::pi3::timer::{delay_nops, sleep_ms};
use crate::{read_address, write_address};
const GPFSEL_BASE: u32 = 0x3F20_0000;

View File

@@ -1,2 +1,3 @@
pub mod mailbox;
pub mod power_management;
pub mod timer;

View File

@@ -23,5 +23,6 @@ pub fn reboot_system() {
PM_PASSWORD | (pm_rstc_val & PM_RSTC_WRCFG_CLR) | PM_RSTC_WRCFG_FULL_RESET,
);
}
#[allow(clippy::empty_loop)]
loop {}
}

View File

@@ -1,13 +1,12 @@
.global v_table
.section .vector_table , "ax"
.extern irq_handler
.macro ventry label
.align 7
.align 11
b \label
.endm
.section .vector_table , "ax"
.global vector_table
vector_table:
ventry .
ventry .
@@ -28,190 +27,3 @@ vector_table:
ventry .
ventry .
ventry .
.align 4
.global el2_to_el1
el2_to_el1:
mov x0, #(1 << 31)
msr HCR_EL2, x0
// Set SPSR_EL2: return to EL1h
mov x0, #(0b0101)
msr SPSR_EL2, x0
// Set return address to kernel_main
ldr x0, =kernel_main
msr ELR_EL2, x0
// Set SP_EL1 to stack base
ldr x0, =__stack_end
msr SP_EL1, x0
// Set VBAR_EL1 to vector table
adr x0, vector_table
msr VBAR_EL1, x0
isb
adrp x0, SCTLR_EL1_CONF
ldr x1, [x0, :lo12:SCTLR_EL1_CONF]
msr SCTLR_EL1, x1
isb
// SIMD should not be trapped
mrs x0, CPACR_EL1
mov x1, #(0b11<<20)
orr x0,x0, x1
msr CPACR_EL1,x0
isb
// Return to EL1
eret
.align 4
.global configure_mmu_el1
configure_mmu_el1:
// Configure MMU
adrp x0, TCR_EL1_CONF
ldr x1, [x0, :lo12:TCR_EL1_CONF]
msr TCR_EL1, x1
isb
// MAIR0: Normal Mem.
// MAIR1: Device Mem.
mov x0, #0x04FF
msr MAIR_EL1, x0
isb
// Configure translation table
adrp x0, TRANSLATIONTABLE_TTBR0
add x1, x0, :lo12:TRANSLATIONTABLE_TTBR0
msr TTBR0_EL1, x1
msr TTBR1_EL1, x1
tlbi vmalle1
dsb ish
isb
ret
.align 4
.global el1_to_el0
el1_to_el0:
// Set SPSR_EL1: return to EL0t
mov x0, #(0b0000)
msr SPSR_EL1, x0
// Set return address to el0
ldr x0, =el0
msr ELR_EL1, x0
// Set SP_EL1 to stack base
ldr x0, =__stack_end_el0
msr SP_EL0, x0
isb
// Return to EL0
eret
.align 4
irq_handler:
sub sp, sp, #176
stp x0, x1, [sp, #0]
stp x2, x3, [sp, #16]
stp x4, x5, [sp, #32]
stp x6, x7, [sp, #48]
stp x8, x9, [sp, #64]
stp x10, x11, [sp, #80]
stp x12, x13, [sp, #96]
stp x14, x15, [sp, #112]
stp x16, x17, [sp, #128]
stp x18, x29, [sp, #144]
stp x30, xzr, [sp, #160]
bl rust_irq_handler
ldp x0, x1, [sp, #0]
ldp x2, x3, [sp, #16]
ldp x4, x5, [sp, #32]
ldp x6, x7, [sp, #48]
ldp x8, x9, [sp, #64]
ldp x10, x11, [sp, #80]
ldp x12, x13, [sp, #96]
ldp x14, x15, [sp, #112]
ldp x16, x17, [sp, #128]
ldp x18, x29, [sp, #144]
ldp x30, xzr, [sp, #160]
add sp, sp, #176
eret
.align 4
synchronous_interrupt_imm_lower_aarch64:
sub sp, sp, #176
stp x0, x1, [sp, #0]
stp x2, x3, [sp, #16]
stp x4, x5, [sp, #32]
stp x6, x7, [sp, #48]
stp x8, x9, [sp, #64]
stp x10, x11, [sp, #80]
stp x12, x13, [sp, #96]
stp x14, x15, [sp, #112]
stp x16, x17, [sp, #128]
stp x18, x29, [sp, #144]
stp x30, xzr, [sp, #160]
bl rust_synchronous_interrupt_imm_lower_aarch64
ldp x0, x1, [sp, #0]
ldp x2, x3, [sp, #16]
ldp x4, x5, [sp, #32]
ldp x6, x7, [sp, #48]
ldp x8, x9, [sp, #64]
ldp x10, x11, [sp, #80]
ldp x12, x13, [sp, #96]
ldp x14, x15, [sp, #112]
ldp x16, x17, [sp, #128]
ldp x18, x29, [sp, #144]
ldp x30, xzr, [sp, #160]
add sp, sp, #176
eret
.align 4
synchronous_interrupt_no_el_change:
sub sp, sp, #176
stp x0, x1, [sp, #0]
stp x2, x3, [sp, #16]
stp x4, x5, [sp, #32]
stp x6, x7, [sp, #48]
stp x8, x9, [sp, #64]
stp x10, x11, [sp, #80]
stp x12, x13, [sp, #96]
stp x14, x15, [sp, #112]
stp x16, x17, [sp, #128]
stp x18, x29, [sp, #144]
stp x30, xzr, [sp, #160]
bl rust_synchronous_interrupt_no_el_change
ldp x0, x1, [sp, #0]
ldp x2, x3, [sp, #16]
ldp x4, x5, [sp, #32]
ldp x6, x7, [sp, #48]
ldp x8, x9, [sp, #64]
ldp x10, x11, [sp, #80]
ldp x12, x13, [sp, #96]
ldp x14, x15, [sp, #112]
ldp x16, x17, [sp, #128]
ldp x18, x29, [sp, #144]
ldp x30, xzr, [sp, #160]
add sp, sp, #176
eret

View File

@@ -11,6 +11,4 @@ qemu-system-aarch64 \
-cpu cortex-a53 \
-serial stdio \
-sd ../sd.img \
-display none \
-kernel ../target/aarch64-unknown-none/debug/kernel8.img \
-s -S

View File

@@ -100,7 +100,7 @@ fn test_merging_free_sections() {
);
let root_header = heap.start_address;
let root_header_start_size = unsafe { (*root_header).size };
let _root_header_start_size = unsafe { (*root_header).size };
let malloc1 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
let malloc_header_before = unsafe { *Heap::get_header_ref_from_data_pointer(malloc1) };
@@ -135,14 +135,13 @@ fn test_first_fit() {
);
let root_header = heap.start_address;
let root_header_start_size = unsafe { (*root_header).size };
let _root_header_start_size = unsafe { (*root_header).size };
let malloc1 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
let malloc2 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
let _malloc2 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
let malloc3 = heap.malloc(MIN_BLOCK_SIZE * 3).unwrap();
let malloc4 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
unsafe {
assert!(heap.free(malloc1).is_ok());
assert!(heap.free(malloc3).is_ok());
let malloc5 = heap.malloc(MIN_BLOCK_SIZE * 2).unwrap();
@@ -162,4 +161,3 @@ fn test_first_fit() {
let malloc7 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
assert_eq!(malloc1, malloc7);
}
}