diff --git a/src/heap.rs b/src/heap.rs index 3263161..31b01b1 100644 --- a/src/heap.rs +++ b/src/heap.rs @@ -1,6 +1,8 @@ +#![allow(static_mut_refs)] + use core::{ alloc::GlobalAlloc, - ptr::{self, null, null_mut, read_volatile, write_volatile}, + ptr::{self, null_mut, read_volatile}, }; use crate::NovaError; @@ -11,7 +13,7 @@ extern "C" { static mut __heap_end: u8; } -#[repr(C)] +#[repr(C, align(16))] pub struct HeapHeader { pub next: *mut HeapHeader, before: *mut HeapHeader, @@ -22,121 +24,177 @@ pub struct HeapHeader { const HEAP_HEADER_SIZE: usize = size_of::(); const MIN_BLOCK_SIZE: usize = 16; +// TODO: This implementation has to be reevaluated when implementing multiprocessing +// Spinlock could be a solution but has its issues: +// https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html +pub static mut HEAP: Heap = Heap { + start_address: &raw mut __heap_start as *mut HeapHeader, + end_address: &raw mut __heap_end as *mut HeapHeader, + raw_size: 0, +}; + +// TODO: investigate if there is a better alternative to this +pub unsafe fn init_global_heap() { + HEAP.init(); +} + #[derive(Default)] pub struct Novalloc; unsafe impl GlobalAlloc for Novalloc { unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 { - malloc(layout.size()).unwrap() + HEAP.malloc(layout.size()).unwrap() } - unsafe fn dealloc(&self, ptr: *mut u8, layout: core::alloc::Layout) { - free(ptr).unwrap(); + unsafe fn dealloc(&self, ptr: *mut u8, _: core::alloc::Layout) { + HEAP.free(ptr).unwrap(); } } #[global_allocator] static GLOBAL_ALLOCATOR: Novalloc = Novalloc; -pub fn init_heap() { - unsafe { - let heap_end = &raw const __heap_end as usize; - let heap_start = &raw const __heap_start as usize; - - ptr::write( - &raw const __heap_start as *mut HeapHeader, - HeapHeader { - next: null_mut(), - before: null_mut(), - size: heap_end - heap_start - HEAP_HEADER_SIZE, - free: true, - }, - ); - } +pub struct Heap { + start_address: *mut HeapHeader, + end_address: *mut HeapHeader, + raw_size: usize, } - -pub fn malloc(mut size: usize) -> Result<*mut u8, NovaError> { - let mut head = &raw const __heap_start as *mut HeapHeader; - - if size == 0 { - return Err(NovaError::EmptyHeapNotAllowed); +impl Heap { + pub fn new(heap_start: usize, heap_end: usize) -> Self { + let mut instance = Self { + start_address: &raw const heap_start as *mut HeapHeader, + end_address: &raw const heap_end as *mut HeapHeader, + raw_size: heap_end - heap_start, + }; + instance.init(); + instance } - if size < MIN_BLOCK_SIZE { - size = MIN_BLOCK_SIZE; + fn init(&mut self) { + self.raw_size = self.end_address as usize - self.start_address as usize; + + unsafe { + ptr::write( + self.start_address, + HeapHeader { + next: null_mut(), + before: null_mut(), + size: self.raw_size - HEAP_HEADER_SIZE, + free: true, + }, + ); + } } - // Align size to the next 16 bytes - size += (16 - (size % 16)) % 16; - - unsafe { - // Find First-Fit memory segment - while !(*head).free || size > (*head).size { - if (*head).next.is_null() { + unsafe fn find_first_fit(&self, size: usize) -> Result<*mut HeapHeader, NovaError> { + let mut current = self.start_address; + while !fits(size, current) { + if (*self.start_address).next.is_null() { return Err(NovaError::HeapFull); } - head = (*head).next; + current = (*current).next; + } + Ok(current) + } + + pub fn malloc(&self, mut size: usize) -> Result<*mut u8, NovaError> { + if size == 0 { + return Err(NovaError::EmptyHeapNotAllowed); } - // Return entire block WITHOUT generating a new header - // if the current block doesn't have enough space to hold: requested size + HEAP_HEADER_SIZE + MIN_BLOCK_SIZE - if (*head).size < size + HEAP_HEADER_SIZE + MIN_BLOCK_SIZE { - (*head).free = false; - return Ok(head.byte_add(HEAP_HEADER_SIZE) as *mut u8); + if size < MIN_BLOCK_SIZE { + size = MIN_BLOCK_SIZE; } - let byte_offset = HEAP_HEADER_SIZE + size; - let new_address = head.byte_add(byte_offset); + // Align size to the next 16 bytes + size += (16 - (size % 16)) % 16; - // Handle case where fragmenting center free space - let next = (*head).next; - if !(*head).next.is_null() { - (*next).before = new_address; + unsafe { + // Find First-Fit memory segment + let current = self.find_first_fit(size)?; + + // Return entire block WITHOUT generating a new header + // if the current block doesn't have enough space to hold: requested size + HEAP_HEADER_SIZE + MIN_BLOCK_SIZE + if (*current).size < size + HEAP_HEADER_SIZE + MIN_BLOCK_SIZE { + (*current).free = false; + return Ok(current.byte_add(HEAP_HEADER_SIZE) as *mut u8); + } + + let byte_offset = HEAP_HEADER_SIZE + size; + let new_address = current.byte_add(byte_offset); + + // Handle case where fragmenting center free space + let next = (*current).next; + if !(*current).next.is_null() { + (*next).before = new_address; + } + + ptr::write( + new_address as *mut HeapHeader, + HeapHeader { + next, + before: current, + size: (*current).size - size - HEAP_HEADER_SIZE, + free: true, + }, + ); + (*current).next = new_address; + (*current).free = false; + (*current).size = size; + + let data_start_address = current.byte_add(HEAP_HEADER_SIZE); + + Ok(data_start_address as *mut u8) + } + } + + pub fn free(&self, pointer: *mut u8) -> Result<(), NovaError> { + let mut segment = unsafe { pointer.sub(HEAP_HEADER_SIZE) as *mut HeapHeader }; + unsafe { + // IF prev is free: + // Delete header, add size to previous and fix pointers. + // Move Head left + if !(*segment).before.is_null() && (*(*segment).before).free { + let before_head = (*segment).before; + (*before_head).size += (*segment).size + HEAP_HEADER_SIZE; + delete_header(segment); + segment = before_head; + } + // IF next is free: + // Delete next header and merge size, fix pointers + if !(*segment).next.is_null() && (*(*segment).next).free { + let next_head = (*segment).next; + (*segment).size += (*next_head).size + HEAP_HEADER_SIZE; + delete_header(next_head); + } + // Neither: Set free + (*segment).free = true; } - ptr::write( - new_address as *mut HeapHeader, - HeapHeader { - next, - before: head, - size: (*head).size - size - HEAP_HEADER_SIZE, - free: true, - }, - ); - (*head).next = new_address; - (*head).free = false; - (*head).size = size; + Ok(()) + } - let data_start_address = head.byte_add(HEAP_HEADER_SIZE); - - Ok(data_start_address as *mut u8) + pub fn traverse_heap(&self) { + let mut pointer_address = self.start_address; + loop { + let head = unsafe { read_volatile(pointer_address) }; + println!("Header {:#x}", pointer_address as u32); + println!("free: {}", head.free); + println!("size: {}", head.size); + println!("hasNext: {}", !head.next.is_null()); + println!(); + if !head.next.is_null() { + pointer_address = head.next; + } else { + println!("---------------"); + return; + } + } } } -pub fn free(pointer: *mut u8) -> Result<(), NovaError> { - let mut head = unsafe { pointer.sub(HEAP_HEADER_SIZE) as *mut HeapHeader }; - unsafe { - // IF prev is free: - // Delete header, add size to previous and fix pointers. - // Move Head left - if !(*head).before.is_null() && (*(*head).before).free { - let before_head = (*head).before; - (*before_head).size += (*head).size + HEAP_HEADER_SIZE; - delete_header(head); - head = before_head; - } - // IF next is free: - // Delete next header and merge size, fix pointers - if !(*head).next.is_null() && (*(*head).next).free { - let next_head = (*head).next; - (*head).size += (*next_head).size + HEAP_HEADER_SIZE; - delete_header(next_head); - } - // Neither: Set free - (*head).free = true; - } - - Ok(()) +unsafe fn fits(size: usize, header: *mut HeapHeader) -> bool { + (*header).free && size <= (*header).size } unsafe fn delete_header(header: *mut HeapHeader) { @@ -151,21 +209,3 @@ unsafe fn delete_header(header: *mut HeapHeader) { (*next).before = before; } } - -pub fn traverse_heap_tree() { - let mut pointer_address = &raw const __heap_start as *const HeapHeader; - loop { - let head = unsafe { read_volatile(pointer_address) }; - println!("Header {:#x}", pointer_address as u32); - println!("free: {}", head.free); - println!("size: {}", head.size); - println!("hasNext: {}", !head.next.is_null()); - println!(); - if !head.next.is_null() { - pointer_address = head.next; - } else { - println!("---------------"); - return; - } - } -} diff --git a/src/main.rs b/src/main.rs index f2c21d9..5f778ea 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,7 +1,7 @@ #![no_main] #![no_std] #![feature(asm_experimental_arch)] - +#![allow(static_mut_refs)] use core::{ arch::{asm, global_asm}, panic::PanicInfo, @@ -12,7 +12,7 @@ extern crate alloc; use nova::{ framebuffer::{FrameBuffer, BLUE, GREEN, RED}, - heap::init_heap, + heap::{init_global_heap, HEAP}, irq_interrupt::enable_irq_source, mailbox::mb_read_soc_temp, peripherals::{ @@ -88,8 +88,7 @@ unsafe fn zero_bss() { pub extern "C" fn kernel_main() -> ! { println!("EL: {}", get_current_el()); - // Initialize the first heap header - init_heap(); + heap_test(); sleep_us(500_000); @@ -117,11 +116,28 @@ pub extern "C" fn kernel_main() -> ! { } } +fn heap_test() { + unsafe { + init_global_heap(); + let a = HEAP.malloc(32).unwrap(); + let b = HEAP.malloc(64).unwrap(); + let c = HEAP.malloc(128).unwrap(); + let _ = HEAP.malloc(256).unwrap(); + HEAP.traverse_heap(); + HEAP.free(b).unwrap(); + HEAP.traverse_heap(); + HEAP.free(a).unwrap(); + HEAP.traverse_heap(); + HEAP.free(c).unwrap(); + HEAP.traverse_heap(); + } +} + fn cos(x: u32) -> f64 { libm::cos(x as f64 * 0.1) * 20.0 } -pub fn get_current_el() -> u64 { +fn get_current_el() -> u64 { let el: u64; unsafe { asm!(