implement heap allocator tests

This commit is contained in:
Alexander Neuhäuser
2025-12-20 17:40:45 +01:00
committed by GitHub
parent 82fa03d48e
commit 36bc1f3315
20 changed files with 730 additions and 347 deletions

View File

@@ -1,9 +1,10 @@
static SCTLR_EL1_MMU_DISABLED: u64 = 0 << 0; //M
static SCTLR_EL1_MMU_DISABLED: u64 = 0; //M
static SCTLR_EL1_DATA_CACHE_DISABLED: u64 = 0 << 2; //C
static SCTLR_EL1_INSTRUCTION_CACHE_DISABLED: u64 = 0 << 12; //I
static SCTLR_EL1_LITTLE_ENDIAN_EL0: u64 = 0 << 24; //E0E
static SCTLR_EL1_LITTLE_ENDIAN_EL1: u64 = 0 << 25; //EE
#[allow(clippy::identity_op)]
static SCTLR_EL1_RES: u64 = (0 << 6) | (1 << 11) | (0 << 17) | (1 << 20) | (1 << 22); //Res0 & Res1
#[no_mangle]

View File

@@ -16,6 +16,7 @@ const SET_PIXEL_ORDER: u32 = 0x0004_8006;
const GET_PITCH: u32 = 0x000_40008;
const SET_FB_OFFSET: u32 = 0x0004_8009;
#[allow(dead_code)]
pub struct FrameBuffer {
pixel_depth: u32, // Bits per pixel
pitch: u32, // Pixel per row
@@ -31,74 +32,6 @@ pub const ORANGE: u32 = 0x00FFA500;
pub const YELLOW: u32 = 0x00FFFF00;
impl FrameBuffer {
pub fn new() -> Self {
let mut mailbox = Mailbox([0; 36]);
mailbox.0[0] = 35 * 4;
mailbox.0[1] = 0;
mailbox.0[2] = SET_PHYSICAL_DISPLAY_WH;
mailbox.0[3] = 8;
mailbox.0[4] = 8;
mailbox.0[5] = 1920;
mailbox.0[6] = 1080;
mailbox.0[7] = SET_VIRTUAL_DISPLAY_WH;
mailbox.0[8] = 8;
mailbox.0[9] = 8;
mailbox.0[10] = 1920;
mailbox.0[11] = 1080;
mailbox.0[12] = SET_PIXEL_DEPTH;
mailbox.0[13] = 4;
mailbox.0[14] = 4;
mailbox.0[15] = 32; // 32 bit per pixel
mailbox.0[16] = SET_PIXEL_ORDER;
mailbox.0[17] = 4;
mailbox.0[18] = 4;
mailbox.0[19] = 0x0; // RGB
mailbox.0[20] = SET_FB_OFFSET;
mailbox.0[21] = 8;
mailbox.0[22] = 8;
mailbox.0[23] = 0; // X in pixels
mailbox.0[24] = 0; // Y in pixels
mailbox.0[25] = ALLOCATE_BUFFER;
mailbox.0[26] = 8;
mailbox.0[27] = 4;
mailbox.0[28] = 4096; // Alignment
mailbox.0[29] = 0;
mailbox.0[30] = GET_PITCH;
mailbox.0[31] = 4;
mailbox.0[32] = 0;
mailbox.0[33] = 0;
mailbox.0[34] = 0; // End tag
// TODO: validate responses
let addr = core::ptr::addr_of!(mailbox.0[0]) as u32;
write_mailbox(8, addr);
let _ = read_mailbox(8);
if mailbox.0[1] == 0 {
println!("Failed");
}
mailbox.0[28] &= 0x3FFFFFFF;
Self {
pixel_depth: mailbox.0[15],
pitch: mailbox.0[33] / (mailbox.0[15] / 8),
rows: mailbox.0[29] / mailbox.0[33],
start_addr: mailbox.0[28] as *mut u32,
size: mailbox.0[29],
}
}
pub fn draw_pixel(&self, x: u32, y: u32, color: u32) {
let offset = x + y * self.pitch;
unsafe {
@@ -109,6 +42,7 @@ impl FrameBuffer {
/*Bresenham's line algorithm
TODO: check if its possible to optimize y1==y2 case (ARM neon?)
*/
#[allow(clippy::collapsible_else_if)]
pub fn draw_line(&self, x1: u32, y1: u32, x2: u32, y2: u32, color: u32) {
if x1 == x2 {
for y in y1..=y2 {
@@ -218,7 +152,7 @@ impl FrameBuffer {
}
fn draw_ascii(&self, x: u32, y: u32, char: usize, scale: u32, color: u32) {
for (y_offset, row) in (&BASIC_LEGACY[char]).iter().enumerate() {
for (y_offset, row) in BASIC_LEGACY[char].iter().enumerate() {
for bit in 0..8 {
match row & (1 << bit) {
0 => {}
@@ -241,3 +175,73 @@ impl FrameBuffer {
}
}
}
impl Default for FrameBuffer {
fn default() -> Self {
let mut mailbox = Mailbox([0; 36]);
mailbox.0[0] = 35 * 4;
mailbox.0[1] = 0;
mailbox.0[2] = SET_PHYSICAL_DISPLAY_WH;
mailbox.0[3] = 8;
mailbox.0[4] = 8;
mailbox.0[5] = 1920;
mailbox.0[6] = 1080;
mailbox.0[7] = SET_VIRTUAL_DISPLAY_WH;
mailbox.0[8] = 8;
mailbox.0[9] = 8;
mailbox.0[10] = 1920;
mailbox.0[11] = 1080;
mailbox.0[12] = SET_PIXEL_DEPTH;
mailbox.0[13] = 4;
mailbox.0[14] = 4;
mailbox.0[15] = 32; // 32 bit per pixel
mailbox.0[16] = SET_PIXEL_ORDER;
mailbox.0[17] = 4;
mailbox.0[18] = 4;
mailbox.0[19] = 0x0; // RGB
mailbox.0[20] = SET_FB_OFFSET;
mailbox.0[21] = 8;
mailbox.0[22] = 8;
mailbox.0[23] = 0; // X in pixels
mailbox.0[24] = 0; // Y in pixels
mailbox.0[25] = ALLOCATE_BUFFER;
mailbox.0[26] = 8;
mailbox.0[27] = 4;
mailbox.0[28] = 4096; // Alignment
mailbox.0[29] = 0;
mailbox.0[30] = GET_PITCH;
mailbox.0[31] = 4;
mailbox.0[32] = 0;
mailbox.0[33] = 0;
mailbox.0[34] = 0; // End tag
// TODO: validate responses
let addr = core::ptr::addr_of!(mailbox.0[0]) as u32;
write_mailbox(8, addr);
let _ = read_mailbox(8);
if mailbox.0[1] == 0 {
println!("Failed");
}
mailbox.0[28] &= 0x3FFFFFFF;
Self {
pixel_depth: mailbox.0[15],
pitch: mailbox.0[33] / (mailbox.0[15] / 8),
rows: mailbox.0[29] / mailbox.0[33],
start_addr: mailbox.0[28] as *mut u32,
size: mailbox.0[29],
}
}
}

View File

@@ -1,216 +0,0 @@
#![allow(static_mut_refs)]
use core::{
alloc::GlobalAlloc,
ptr::{self, null_mut, read_volatile},
};
use crate::NovaError;
extern crate alloc;
extern "C" {
static mut __heap_start: u8;
static mut __heap_end: u8;
}
#[repr(C, align(16))]
pub struct HeapHeader {
pub next: *mut HeapHeader,
before: *mut HeapHeader,
pub size: usize,
free: bool,
}
const HEAP_HEADER_SIZE: usize = size_of::<HeapHeader>();
const MIN_BLOCK_SIZE: usize = 16;
// TODO: This implementation has to be reevaluated when implementing multiprocessing
// Spinlock could be a solution but has its issues:
// https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html
pub static mut HEAP: Heap = Heap {
start_address: &raw mut __heap_start as *mut HeapHeader,
end_address: &raw mut __heap_end as *mut HeapHeader,
raw_size: 0,
};
// TODO: investigate if there is a better alternative to this
pub unsafe fn init_global_heap() {
HEAP.init();
}
#[derive(Default)]
pub struct Novalloc;
unsafe impl GlobalAlloc for Novalloc {
unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 {
HEAP.malloc(layout.size()).unwrap()
}
unsafe fn dealloc(&self, ptr: *mut u8, _: core::alloc::Layout) {
HEAP.free(ptr).unwrap();
}
}
#[global_allocator]
static GLOBAL_ALLOCATOR: Novalloc = Novalloc;
pub struct Heap {
start_address: *mut HeapHeader,
end_address: *mut HeapHeader,
raw_size: usize,
}
impl Heap {
pub fn new(heap_start: usize, heap_end: usize) -> Self {
let mut instance = Self {
start_address: &raw const heap_start as *mut HeapHeader,
end_address: &raw const heap_end as *mut HeapHeader,
raw_size: heap_end - heap_start,
};
instance.init();
instance
}
fn init(&mut self) {
self.raw_size = self.end_address as usize - self.start_address as usize;
unsafe {
ptr::write(
self.start_address,
HeapHeader {
next: null_mut(),
before: null_mut(),
size: self.raw_size - HEAP_HEADER_SIZE,
free: true,
},
);
}
}
unsafe fn find_first_fit(&self, size: usize) -> Result<*mut HeapHeader, NovaError> {
let mut current = self.start_address;
while !fits(size, current) {
if (*self.start_address).next.is_null() {
return Err(NovaError::HeapFull);
}
current = (*current).next;
}
Ok(current)
}
pub fn malloc(&self, mut size: usize) -> Result<*mut u8, NovaError> {
if size == 0 {
return Err(NovaError::EmptyHeapSegmentNotAllowed);
}
if size < MIN_BLOCK_SIZE {
size = MIN_BLOCK_SIZE;
}
// Align size to the next 16 bytes
size += (16 - (size % 16)) % 16;
unsafe {
// Find First-Fit memory segment
let current = self.find_first_fit(size)?;
// Return entire block WITHOUT generating a new header
// if the current block doesn't have enough space to hold: requested size + HEAP_HEADER_SIZE + MIN_BLOCK_SIZE
if (*current).size < size + HEAP_HEADER_SIZE + MIN_BLOCK_SIZE {
(*current).free = false;
return Ok(current.byte_add(HEAP_HEADER_SIZE) as *mut u8);
}
Self::fragment_segment(current, size);
let data_start_address = current.byte_add(HEAP_HEADER_SIZE);
Ok(data_start_address as *mut u8)
}
}
unsafe fn fragment_segment(current: *mut HeapHeader, size: usize) {
let byte_offset = HEAP_HEADER_SIZE + size;
let new_address = current.byte_add(byte_offset);
// Handle case where fragmenting center free space
let next = (*current).next;
if !(*current).next.is_null() {
(*next).before = new_address;
}
ptr::write(
new_address as *mut HeapHeader,
HeapHeader {
next,
before: current,
size: (*current).size - size - HEAP_HEADER_SIZE,
free: true,
},
);
(*current).next = new_address;
(*current).free = false;
(*current).size = size;
}
pub fn free(&self, pointer: *mut u8) -> Result<(), NovaError> {
let mut segment = unsafe { pointer.sub(HEAP_HEADER_SIZE) as *mut HeapHeader };
unsafe {
// IF prev is free:
// Delete header, add size to previous and fix pointers.
// Move Head left
if !(*segment).before.is_null() && (*(*segment).before).free {
let before_head = (*segment).before;
(*before_head).size += (*segment).size + HEAP_HEADER_SIZE;
delete_header(segment);
segment = before_head;
}
// IF next is free:
// Delete next header and merge size, fix pointers
if !(*segment).next.is_null() && (*(*segment).next).free {
let next_head = (*segment).next;
(*segment).size += (*next_head).size + HEAP_HEADER_SIZE;
delete_header(next_head);
}
// Neither: Set free
(*segment).free = true;
}
Ok(())
}
pub fn traverse_heap(&self) {
let mut pointer_address = self.start_address;
loop {
let head = unsafe { read_volatile(pointer_address) };
println!("Header {:#x}", pointer_address as u32);
println!("free: {}", head.free);
println!("size: {}", head.size);
println!("hasNext: {}", !head.next.is_null());
println!("");
if !head.next.is_null() {
pointer_address = head.next;
} else {
println!("---------------");
return;
}
}
}
}
unsafe fn fits(size: usize, header: *mut HeapHeader) -> bool {
(*header).free && size <= (*header).size
}
unsafe fn delete_header(header: *mut HeapHeader) {
let before = (*header).before;
let next = (*header).next;
if !before.is_null() {
(*before).next = next;
}
if !next.is_null() {
(*next).before = before;
}
}

View File

@@ -6,7 +6,6 @@ use core::{
use crate::{
mmio_read, mmio_write,
peripherals::gpio::{blink_gpio, SpecificGpio},
print,
timer::sleep_s,
};
@@ -54,7 +53,7 @@ fn esr_uart_dump() {
let esr: u32;
unsafe {
asm!(
"mrs {esr}, ESR_EL1",
"mrs {esr:x}, ESR_EL1",
esr = out(reg) esr
);
}
@@ -80,6 +79,7 @@ fn handle_gpio_interrupt() {
let val = read_gpio_event_detect_status(i);
if val {
#[allow(clippy::single_match)]
match i {
26 => print!("Button Pressed"),
_ => {}

View File

@@ -1,6 +1,34 @@
#![no_std]
#![allow(clippy::missing_safety_doc)]
use core::{
panic::PanicInfo,
ptr::{read_volatile, write_volatile},
};
use core::ptr::{read_volatile, write_volatile};
use heap::Heap;
unsafe extern "C" {
unsafe static mut __heap_start: u8;
unsafe static mut __heap_end: u8;
}
#[global_allocator]
pub static mut GLOBAL_ALLOCATOR: Heap = Heap::empty();
pub unsafe fn init_heap() {
let start = core::ptr::addr_of_mut!(__heap_start) as usize;
let end = core::ptr::addr_of_mut!(__heap_end) as usize;
let heap = core::ptr::addr_of_mut!(GLOBAL_ALLOCATOR);
(*heap).init(start, end);
}
#[panic_handler]
fn panic(_panic: &PanicInfo) -> ! {
loop {
println!("Panic");
}
}
#[macro_export]
macro_rules! print {
@@ -23,7 +51,6 @@ pub mod peripherals;
pub mod configuration;
pub mod framebuffer;
pub mod heap;
pub mod irq_interrupt;
pub mod mailbox;
pub mod timer;
@@ -35,10 +62,3 @@ pub fn mmio_read(address: u32) -> u32 {
pub fn mmio_write(address: u32, data: u32) {
unsafe { write_volatile(address as *mut u32, data) }
}
#[derive(Debug)]
pub enum NovaError {
Mailbox,
HeapFull,
EmptyHeapSegmentNotAllowed,
}

View File

@@ -1,9 +1,10 @@
use crate::{mmio_read, mmio_write, NovaError};
use crate::{mmio_read, mmio_write};
use nova_error::NovaError;
const MBOX_BASE: u32 = 0x3F00_0000 + 0xB880;
// MB0
const MBOX_READ: u32 = MBOX_BASE + 0x00;
const MBOX_READ: u32 = MBOX_BASE;
const MBOX_STATUS: u32 = MBOX_BASE + 0x18;
// MB1
@@ -51,7 +52,7 @@ macro_rules! mailbox_command {
return Err(NovaError::Mailbox);
}
let mut out = [0u32; $response_len / 4]; // TODO: Can this be improved?
let mut out = [0u32; $response_len / 4];
out.copy_from_slice(&mailbox[5..(5 + $response_len / 4)]);
Ok(out)
}

View File

@@ -2,17 +2,18 @@
#![no_std]
#![feature(asm_experimental_arch)]
#![allow(static_mut_refs)]
#![allow(clippy::missing_safety_doc)]
use core::{
arch::{asm, global_asm},
panic::PanicInfo,
ptr::write_volatile,
};
extern crate alloc;
use alloc::boxed::Box;
use nova::{
framebuffer::{FrameBuffer, BLUE, GREEN, RED},
heap::{init_global_heap, HEAP},
init_heap,
irq_interrupt::enable_irq_source,
mailbox::mb_read_soc_temp,
peripherals::{
@@ -34,15 +35,8 @@ extern "C" {
static mut __bss_end: u32;
}
#[panic_handler]
fn panic(_panic: &PanicInfo) -> ! {
loop {
println!("Panic");
}
}
#[no_mangle]
#[link_section = ".text._start"]
#[cfg_attr(not(test), link_section = ".text._start")]
pub unsafe extern "C" fn _start() {
// Set the stack pointer
asm!(
@@ -88,7 +82,10 @@ unsafe fn zero_bss() {
pub extern "C" fn kernel_main() -> ! {
println!("EL: {}", get_current_el());
heap_test();
unsafe {
init_heap();
heap_test();
};
sleep_us(500_000);
@@ -98,7 +95,7 @@ pub extern "C" fn kernel_main() -> ! {
gpio_pull_up(26);
set_falling_edge_detect(26, true);
let fb = FrameBuffer::new();
let fb = FrameBuffer::default();
fb.draw_square(500, 500, 600, 700, RED);
fb.draw_square_fill(800, 800, 900, 900, GREEN);
@@ -116,21 +113,9 @@ pub extern "C" fn kernel_main() -> ! {
}
}
fn heap_test() {
unsafe {
init_global_heap();
let a = HEAP.malloc(32).unwrap();
let b = HEAP.malloc(64).unwrap();
let c = HEAP.malloc(128).unwrap();
let _ = HEAP.malloc(256).unwrap();
HEAP.traverse_heap();
HEAP.free(b).unwrap();
HEAP.traverse_heap();
HEAP.free(a).unwrap();
HEAP.traverse_heap();
HEAP.free(c).unwrap();
HEAP.traverse_heap();
}
unsafe fn heap_test() {
let b = Box::new([1, 2, 3, 4]);
println!("{:?}", b);
}
fn cos(x: u32) -> f64 {

View File

@@ -170,7 +170,7 @@ pub fn set_rising_edge_detect(gpio: u8, enable: bool) {
mmio_write(register_addr, new_val);
}
pub fn blink_gpio(gpio: u8, duration_ms: u32) {
pub fn blink_gpio(gpio: u8, duration_ms: u64) {
let _ = gpio_high(gpio);
sleep_ms(duration_ms);

View File

@@ -1,32 +1,61 @@
use crate::mmio_read;
use core::{hint::spin_loop, ptr::read_volatile};
const TIMER_CLO: u32 = 0x3F00_3004;
const TIMER_CLOCK_LO: u32 = 0x3F00_3004;
const TIMER_CLOCK_HI: u32 = 0x3F00_3008;
fn read_clo() -> u32 {
mmio_read(TIMER_CLO)
fn read_timer_32() -> u32 {
unsafe { read_volatile(TIMER_CLOCK_LO as *const u32) }
}
fn read_timer_64() -> u64 {
loop {
let clock_hi1 = unsafe { read_volatile(TIMER_CLOCK_HI as *const u32) };
let clock_lo = unsafe { read_volatile(TIMER_CLOCK_LO as *const u32) };
let clock_hi2 = unsafe { read_volatile(TIMER_CLOCK_HI as *const u32) };
// account for roll over during read
if clock_hi1 == clock_hi2 {
return ((clock_hi1 as u64) << 32) | clock_lo as u64;
}
}
}
/// Sleep for `us` microseconds
pub fn sleep_us(us: u32) {
let start = read_clo();
while read_clo() - start < us {
unsafe { core::arch::asm!("nop") }
pub fn sleep_us(us: u64) {
if us < u32::MAX as u64 {
sleep_us_u32(us as u32);
} else {
sleep_us_u64(us);
}
}
fn sleep_us_u32(us: u32) {
let start = read_timer_32();
while read_timer_32().wrapping_sub(start) < us {
spin_loop();
}
}
fn sleep_us_u64(us: u64) {
let start = read_timer_64();
while read_timer_64().wrapping_sub(start) < us {
spin_loop();
}
}
/// Sleep for `ms` milliseconds
pub fn sleep_ms(ms: u32) {
sleep_us(ms * 1000);
pub fn sleep_ms(ms: u64) {
sleep_us(ms * 1_000);
}
/// Sleep for `s` seconds
pub fn sleep_s(s: u32) {
sleep_us(s * 1000);
pub fn sleep_s(s: u64) {
sleep_ms(s * 1_000);
}
/// Wait for `count` operations to pass
pub fn delay_nops(count: u32) {
for _ in 0..count {
unsafe { core::arch::asm!("nop") }
spin_loop()
}
}