implement heap allocator tests

This commit is contained in:
Alexander Neuhäuser
2025-12-20 17:40:45 +01:00
committed by GitHub
parent 82fa03d48e
commit 36bc1f3315
20 changed files with 730 additions and 347 deletions

View File

@@ -1,5 +1,2 @@
[build]
target = "aarch64-unknown-none"
[target.aarch64-unknown-none]
rustflags = ["-C", "link-arg=-Tlink.ld"]

View File

@@ -21,8 +21,7 @@ jobs:
- name: Run format check
run: cargo fmt --check
- name: Run lint
run: cargo clippy -- -D warnings
run: cargo clippy --target aarch64-unknown-none -- -D warnings
build:
runs-on: ubuntu-latest
@@ -33,4 +32,16 @@ jobs:
- name: Add AArch64 Target
run: rustup target add aarch64-unknown-none
- name: Build
run: cargo build --verbose
run: cargo build --verbose --target aarch64-unknown-none
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install rustfmt for nightly
run: rustup component add --toolchain nightly-x86_64-unknown-linux-gnu rustfmt clippy
- name: Add AArch64 Target
run: rustup target add aarch64-unknown-none
- name: Heap Workspace Test
run: cargo test -p heap

1
.gitignore vendored
View File

@@ -3,3 +3,4 @@ kernel8.img
.env
sd.img
settings.json
.DS_Store

152
Cargo.lock generated
View File

@@ -2,6 +2,38 @@
# It is not intended for manual editing.
version = 4
[[package]]
name = "cfg-if"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
[[package]]
name = "getrandom"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd"
dependencies = [
"cfg-if",
"libc",
"r-efi",
"wasip2",
]
[[package]]
name = "heap"
version = "0.1.0"
dependencies = [
"nova_error",
"rand",
]
[[package]]
name = "libc"
version = "0.2.178"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091"
[[package]]
name = "libm"
version = "0.2.15"
@@ -12,5 +44,125 @@ checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de"
name = "nova"
version = "0.1.0"
dependencies = [
"heap",
"libm",
"nova_error",
]
[[package]]
name = "nova_error"
version = "0.1.0"
[[package]]
name = "ppv-lite86"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
dependencies = [
"zerocopy",
]
[[package]]
name = "proc-macro2"
version = "1.0.103"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f"
dependencies = [
"proc-macro2",
]
[[package]]
name = "r-efi"
version = "5.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
[[package]]
name = "rand"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1"
dependencies = [
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38"
dependencies = [
"getrandom",
]
[[package]]
name = "syn"
version = "2.0.111"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "unicode-ident"
version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5"
[[package]]
name = "wasip2"
version = "1.0.1+wasi-0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7"
dependencies = [
"wit-bindgen",
]
[[package]]
name = "wit-bindgen"
version = "0.46.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59"
[[package]]
name = "zerocopy"
version = "0.8.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.8.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a"
dependencies = [
"proc-macro2",
"quote",
"syn",
]

View File

@@ -14,3 +14,11 @@ panic = "abort"
[dependencies]
libm = "0.2.15"
heap = {path = "heap"}
nova_error = {path = "nova_error"}
[workspace]
members = [ "nova_error",
"heap"
]

0
heap/.cargo/config.toml Normal file
View File

10
heap/Cargo.toml Normal file
View File

@@ -0,0 +1,10 @@
[package]
name = "heap"
version = "0.1.0"
edition = "2024"
[dependencies]
nova_error = {path = "../nova_error"}
[dev-dependencies]
rand = "0.9.2"

200
heap/src/lib.rs Normal file
View File

@@ -0,0 +1,200 @@
#![allow(static_mut_refs)]
#![cfg_attr(not(test), no_std)]
use core::{
alloc::GlobalAlloc,
mem::size_of,
prelude::v1::*,
ptr::{self, null_mut},
result::Result,
};
use nova_error::NovaError;
extern crate alloc;
#[repr(C, align(16))]
#[derive(Clone, Copy)]
pub struct HeapHeader {
next: Option<*mut HeapHeader>,
before: Option<*mut HeapHeader>,
size: usize,
free: bool,
}
const HEAP_HEADER_SIZE: usize = size_of::<HeapHeader>();
const MIN_BLOCK_SIZE: usize = 16;
pub struct Heap {
pub start_address: *mut HeapHeader,
pub end_address: *mut HeapHeader,
pub raw_size: usize,
}
impl Heap {
pub const fn empty() -> Self {
Self {
start_address: null_mut(),
end_address: null_mut(),
raw_size: 0,
}
}
pub fn init(&mut self, heap_start: usize, heap_end: usize) {
self.start_address = heap_start as *mut HeapHeader;
self.end_address = heap_end as *mut HeapHeader;
self.raw_size = heap_end - heap_start + 1;
unsafe {
ptr::write(
self.start_address,
HeapHeader {
next: None,
before: None,
size: self.raw_size - HEAP_HEADER_SIZE,
free: true,
},
);
}
}
unsafe fn find_first_fit(&self, size: usize) -> Result<*mut HeapHeader, NovaError> {
let mut current = self.start_address;
unsafe {
while !fits(size, current) {
if let Some(next) = (*current).next {
current = next;
} else {
return Err(NovaError::HeapFull);
}
}
}
Ok(current)
}
pub fn malloc(&self, mut size: usize) -> Result<*mut u8, NovaError> {
if size == 0 {
return Err(NovaError::EmptyHeapSegmentNotAllowed);
}
if size < MIN_BLOCK_SIZE {
size = MIN_BLOCK_SIZE;
}
// Align size to the next 16 bytes
size += (16 - (size % 16)) % 16;
unsafe {
// Find First-Fit memory segment
let current = self.find_first_fit(size)?;
// Return entire block WITHOUT generating a new header
// if the current block doesn't have enough space to hold: requested size + HEAP_HEADER_SIZE + MIN_BLOCK_SIZE
if (*current).size < size + HEAP_HEADER_SIZE + MIN_BLOCK_SIZE {
(*current).free = false;
return Ok(current.byte_add(HEAP_HEADER_SIZE) as *mut u8);
}
Self::fragment_segment(current, size);
let data_start_address = current.byte_add(HEAP_HEADER_SIZE);
Ok(data_start_address as *mut u8)
}
}
unsafe fn fragment_segment(current: *mut HeapHeader, size: usize) {
let byte_offset = HEAP_HEADER_SIZE + size;
let new_address = unsafe { current.byte_add(byte_offset) };
// Handle case where fragmenting center free space
unsafe {
let next = (*current).next;
if let Some(next) = next {
(*next).before = Some(new_address);
}
ptr::write(
new_address,
HeapHeader {
next,
before: Some(current),
size: (*current).size - byte_offset,
free: true,
},
);
(*current).next = Some(new_address);
(*current).free = false;
(*current).size = size;
}
}
pub fn free(&self, pointer: *mut u8) -> Result<(), NovaError> {
let mut segment = Self::get_header_ref_from_data_pointer(pointer);
unsafe {
// IF prev is free:
// Delete header, add size to previous and fix pointers.
// Move Head left
if let Some(before_head) = (*segment).before
&& (*before_head).free
{
(*before_head).size += (*segment).size + HEAP_HEADER_SIZE;
delete_header(segment);
segment = before_head;
}
// IF next is free:
// Delete next header and merge size, fix pointers
if let Some(next_head) = (*segment).next
&& (*next_head).free
{
(*segment).size += (*next_head).size + HEAP_HEADER_SIZE;
delete_header(next_head);
}
// Neither: Set free
(*segment).free = true;
}
Ok(())
}
const fn get_header_ref_from_data_pointer(pointer: *mut u8) -> *mut HeapHeader {
unsafe { pointer.sub(HEAP_HEADER_SIZE) as *mut HeapHeader }
}
}
unsafe impl GlobalAlloc for Heap {
unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 {
self.malloc(layout.size()).unwrap()
}
unsafe fn dealloc(&self, ptr: *mut u8, _: core::alloc::Layout) {
self.free(ptr).unwrap();
}
}
unsafe impl Sync for Heap {}
unsafe fn fits(size: usize, header: *mut HeapHeader) -> bool {
unsafe { (*header).free && size <= (*header).size }
}
unsafe fn delete_header(header: *mut HeapHeader) {
unsafe {
let before_opt = (*header).before;
let next_opt = (*header).next;
if let Some(before) = before_opt {
(*before).next = next_opt;
}
if let Some(next) = next_opt {
(*next).before = before_opt;
}
}
}
#[cfg(test)]
mod tests;

165
heap/src/tests.rs Normal file
View File

@@ -0,0 +1,165 @@
use super::*;
use rand::{self, random_range};
extern crate std;
static HEAP_SIZE: usize = 1024;
#[test]
fn test_heap_allocation() {
let heap_vector = Box::new([0u8; HEAP_SIZE]);
let mut heap = Heap::empty();
heap.init(
&heap_vector[0] as *const u8 as usize,
&heap_vector[HEAP_SIZE - 1] as *const u8 as usize,
);
let root_header = heap.start_address;
let malloc_size = random_range(0..(HEAP_SIZE - HEAP_HEADER_SIZE));
let malloc = heap.malloc(malloc_size).unwrap();
let malloc_header = Heap::get_header_ref_from_data_pointer(malloc);
assert_eq!(root_header, malloc_header);
unsafe {
let actual_alloc_size = (*malloc_header).size;
let actual_raw_size = actual_alloc_size + HEAP_HEADER_SIZE;
// Verify sizing
assert!(actual_alloc_size >= malloc_size);
assert_eq!(actual_alloc_size % MIN_BLOCK_SIZE, 0);
// Verify section is occupied
assert!((*malloc_header).free == false);
// Verify next header has been created
let next = (*malloc_header).next.unwrap();
assert_eq!(malloc_header.byte_add(actual_raw_size), next);
assert!((*next).free);
assert_eq!((*malloc_header).next.unwrap(), next);
assert_eq!((*next).before.unwrap(), malloc_header);
assert_eq!((*next).size, HEAP_SIZE - actual_raw_size - HEAP_HEADER_SIZE)
}
}
#[test]
fn test_full_heap() {
let heap_vector = Box::new([0u8; HEAP_SIZE]);
let mut heap = Heap::empty();
heap.init(
&heap_vector[0] as *const u8 as usize,
&heap_vector[HEAP_SIZE - 1] as *const u8 as usize,
);
let malloc_size = HEAP_SIZE - HEAP_HEADER_SIZE;
let malloc = heap.malloc(malloc_size).unwrap();
let malloc_header = Heap::get_header_ref_from_data_pointer(malloc);
unsafe {
assert_eq!((*malloc_header).free, false);
assert!((*malloc_header).next.is_none());
}
let malloc2 = heap.malloc(MIN_BLOCK_SIZE);
assert!(malloc2.is_err());
}
#[test]
fn test_freeing_root() {
let heap_vector = Box::new([0u8; HEAP_SIZE]);
let mut heap = Heap::empty();
heap.init(
&heap_vector[0] as *const u8 as usize,
&heap_vector[HEAP_SIZE - 1] as *const u8 as usize,
);
let root_header = heap.start_address;
let root_header_start_size = unsafe { (*root_header).size };
let malloc_size = random_range(0..((HEAP_SIZE - HEAP_HEADER_SIZE) / 2));
let malloc = heap.malloc(malloc_size).unwrap();
let malloc_header = Heap::get_header_ref_from_data_pointer(malloc);
unsafe {
assert_eq!((*malloc_header).free, false);
assert!((*malloc_header).size >= malloc_size);
assert!((*root_header).next.is_some());
assert!(heap.free(malloc).is_ok());
assert_eq!((*root_header).size, root_header_start_size);
assert!((*root_header).next.is_none());
}
}
#[test]
fn test_merging_free_sections() {
let heap_vector = Box::new([0u8; HEAP_SIZE]);
let mut heap = Heap::empty();
heap.init(
&heap_vector[0] as *const u8 as usize,
&heap_vector[HEAP_SIZE - 1] as *const u8 as usize,
);
let root_header = heap.start_address;
let root_header_start_size = unsafe { (*root_header).size };
let malloc1 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
let malloc_header_before = unsafe { *Heap::get_header_ref_from_data_pointer(malloc1) };
let malloc2 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
let _ = heap.malloc(MIN_BLOCK_SIZE).unwrap();
unsafe {
assert!(heap.free(malloc1).is_ok());
let malloc_header_free = *Heap::get_header_ref_from_data_pointer(malloc1);
assert_ne!(malloc_header_before.free, malloc_header_free.free);
assert_eq!(malloc_header_before.size, malloc_header_free.size);
assert!(heap.free(malloc2).is_ok());
let malloc_header_merge = *Heap::get_header_ref_from_data_pointer(malloc1);
assert!(malloc_header_merge.free);
assert_eq!(
malloc_header_merge.size,
malloc_header_free.size + MIN_BLOCK_SIZE + HEAP_HEADER_SIZE
);
}
}
#[test]
fn test_first_fit() {
let heap_vector = Box::new([0u8; HEAP_SIZE]);
let mut heap = Heap::empty();
heap.init(
&heap_vector[0] as *const u8 as usize,
&heap_vector[HEAP_SIZE - 1] as *const u8 as usize,
);
let root_header = heap.start_address;
let root_header_start_size = unsafe { (*root_header).size };
let malloc1 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
let malloc2 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
let malloc3 = heap.malloc(MIN_BLOCK_SIZE * 3).unwrap();
let malloc4 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
unsafe {
assert!(heap.free(malloc1).is_ok());
assert!(heap.free(malloc3).is_ok());
let malloc5 = heap.malloc(MIN_BLOCK_SIZE * 2).unwrap();
let malloc1_header = unsafe { *Heap::get_header_ref_from_data_pointer(malloc1) };
// First free block stays empty
assert!(malloc1_header.free);
// New allocation takes the first fit aka. malloc3
assert_eq!(malloc5, malloc3);
// If no free slot could be found, append to the end
let malloc6 = heap.malloc(MIN_BLOCK_SIZE * 2).unwrap();
assert!(malloc6 > malloc4);
// Malloc7 takes slot of Malloc1
let malloc7 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
assert_eq!(malloc1, malloc7);
}
}

4
nova_error/Cargo.toml Normal file
View File

@@ -0,0 +1,4 @@
[package]
name = "nova_error"
version = "0.1.0"
edition = "2024"

11
nova_error/src/lib.rs Normal file
View File

@@ -0,0 +1,11 @@
#![no_std]
use core::fmt::Debug;
use core::prelude::rust_2024::derive;
#[derive(Debug)]
pub enum NovaError {
Mailbox,
HeapFull,
EmptyHeapSegmentNotAllowed,
}

View File

@@ -1,9 +1,10 @@
static SCTLR_EL1_MMU_DISABLED: u64 = 0 << 0; //M
static SCTLR_EL1_MMU_DISABLED: u64 = 0; //M
static SCTLR_EL1_DATA_CACHE_DISABLED: u64 = 0 << 2; //C
static SCTLR_EL1_INSTRUCTION_CACHE_DISABLED: u64 = 0 << 12; //I
static SCTLR_EL1_LITTLE_ENDIAN_EL0: u64 = 0 << 24; //E0E
static SCTLR_EL1_LITTLE_ENDIAN_EL1: u64 = 0 << 25; //EE
#[allow(clippy::identity_op)]
static SCTLR_EL1_RES: u64 = (0 << 6) | (1 << 11) | (0 << 17) | (1 << 20) | (1 << 22); //Res0 & Res1
#[no_mangle]

View File

@@ -16,6 +16,7 @@ const SET_PIXEL_ORDER: u32 = 0x0004_8006;
const GET_PITCH: u32 = 0x000_40008;
const SET_FB_OFFSET: u32 = 0x0004_8009;
#[allow(dead_code)]
pub struct FrameBuffer {
pixel_depth: u32, // Bits per pixel
pitch: u32, // Pixel per row
@@ -31,74 +32,6 @@ pub const ORANGE: u32 = 0x00FFA500;
pub const YELLOW: u32 = 0x00FFFF00;
impl FrameBuffer {
pub fn new() -> Self {
let mut mailbox = Mailbox([0; 36]);
mailbox.0[0] = 35 * 4;
mailbox.0[1] = 0;
mailbox.0[2] = SET_PHYSICAL_DISPLAY_WH;
mailbox.0[3] = 8;
mailbox.0[4] = 8;
mailbox.0[5] = 1920;
mailbox.0[6] = 1080;
mailbox.0[7] = SET_VIRTUAL_DISPLAY_WH;
mailbox.0[8] = 8;
mailbox.0[9] = 8;
mailbox.0[10] = 1920;
mailbox.0[11] = 1080;
mailbox.0[12] = SET_PIXEL_DEPTH;
mailbox.0[13] = 4;
mailbox.0[14] = 4;
mailbox.0[15] = 32; // 32 bit per pixel
mailbox.0[16] = SET_PIXEL_ORDER;
mailbox.0[17] = 4;
mailbox.0[18] = 4;
mailbox.0[19] = 0x0; // RGB
mailbox.0[20] = SET_FB_OFFSET;
mailbox.0[21] = 8;
mailbox.0[22] = 8;
mailbox.0[23] = 0; // X in pixels
mailbox.0[24] = 0; // Y in pixels
mailbox.0[25] = ALLOCATE_BUFFER;
mailbox.0[26] = 8;
mailbox.0[27] = 4;
mailbox.0[28] = 4096; // Alignment
mailbox.0[29] = 0;
mailbox.0[30] = GET_PITCH;
mailbox.0[31] = 4;
mailbox.0[32] = 0;
mailbox.0[33] = 0;
mailbox.0[34] = 0; // End tag
// TODO: validate responses
let addr = core::ptr::addr_of!(mailbox.0[0]) as u32;
write_mailbox(8, addr);
let _ = read_mailbox(8);
if mailbox.0[1] == 0 {
println!("Failed");
}
mailbox.0[28] &= 0x3FFFFFFF;
Self {
pixel_depth: mailbox.0[15],
pitch: mailbox.0[33] / (mailbox.0[15] / 8),
rows: mailbox.0[29] / mailbox.0[33],
start_addr: mailbox.0[28] as *mut u32,
size: mailbox.0[29],
}
}
pub fn draw_pixel(&self, x: u32, y: u32, color: u32) {
let offset = x + y * self.pitch;
unsafe {
@@ -109,6 +42,7 @@ impl FrameBuffer {
/*Bresenham's line algorithm
TODO: check if its possible to optimize y1==y2 case (ARM neon?)
*/
#[allow(clippy::collapsible_else_if)]
pub fn draw_line(&self, x1: u32, y1: u32, x2: u32, y2: u32, color: u32) {
if x1 == x2 {
for y in y1..=y2 {
@@ -218,7 +152,7 @@ impl FrameBuffer {
}
fn draw_ascii(&self, x: u32, y: u32, char: usize, scale: u32, color: u32) {
for (y_offset, row) in (&BASIC_LEGACY[char]).iter().enumerate() {
for (y_offset, row) in BASIC_LEGACY[char].iter().enumerate() {
for bit in 0..8 {
match row & (1 << bit) {
0 => {}
@@ -241,3 +175,73 @@ impl FrameBuffer {
}
}
}
impl Default for FrameBuffer {
fn default() -> Self {
let mut mailbox = Mailbox([0; 36]);
mailbox.0[0] = 35 * 4;
mailbox.0[1] = 0;
mailbox.0[2] = SET_PHYSICAL_DISPLAY_WH;
mailbox.0[3] = 8;
mailbox.0[4] = 8;
mailbox.0[5] = 1920;
mailbox.0[6] = 1080;
mailbox.0[7] = SET_VIRTUAL_DISPLAY_WH;
mailbox.0[8] = 8;
mailbox.0[9] = 8;
mailbox.0[10] = 1920;
mailbox.0[11] = 1080;
mailbox.0[12] = SET_PIXEL_DEPTH;
mailbox.0[13] = 4;
mailbox.0[14] = 4;
mailbox.0[15] = 32; // 32 bit per pixel
mailbox.0[16] = SET_PIXEL_ORDER;
mailbox.0[17] = 4;
mailbox.0[18] = 4;
mailbox.0[19] = 0x0; // RGB
mailbox.0[20] = SET_FB_OFFSET;
mailbox.0[21] = 8;
mailbox.0[22] = 8;
mailbox.0[23] = 0; // X in pixels
mailbox.0[24] = 0; // Y in pixels
mailbox.0[25] = ALLOCATE_BUFFER;
mailbox.0[26] = 8;
mailbox.0[27] = 4;
mailbox.0[28] = 4096; // Alignment
mailbox.0[29] = 0;
mailbox.0[30] = GET_PITCH;
mailbox.0[31] = 4;
mailbox.0[32] = 0;
mailbox.0[33] = 0;
mailbox.0[34] = 0; // End tag
// TODO: validate responses
let addr = core::ptr::addr_of!(mailbox.0[0]) as u32;
write_mailbox(8, addr);
let _ = read_mailbox(8);
if mailbox.0[1] == 0 {
println!("Failed");
}
mailbox.0[28] &= 0x3FFFFFFF;
Self {
pixel_depth: mailbox.0[15],
pitch: mailbox.0[33] / (mailbox.0[15] / 8),
rows: mailbox.0[29] / mailbox.0[33],
start_addr: mailbox.0[28] as *mut u32,
size: mailbox.0[29],
}
}
}

View File

@@ -1,216 +0,0 @@
#![allow(static_mut_refs)]
use core::{
alloc::GlobalAlloc,
ptr::{self, null_mut, read_volatile},
};
use crate::NovaError;
extern crate alloc;
extern "C" {
static mut __heap_start: u8;
static mut __heap_end: u8;
}
#[repr(C, align(16))]
pub struct HeapHeader {
pub next: *mut HeapHeader,
before: *mut HeapHeader,
pub size: usize,
free: bool,
}
const HEAP_HEADER_SIZE: usize = size_of::<HeapHeader>();
const MIN_BLOCK_SIZE: usize = 16;
// TODO: This implementation has to be reevaluated when implementing multiprocessing
// Spinlock could be a solution but has its issues:
// https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html
pub static mut HEAP: Heap = Heap {
start_address: &raw mut __heap_start as *mut HeapHeader,
end_address: &raw mut __heap_end as *mut HeapHeader,
raw_size: 0,
};
// TODO: investigate if there is a better alternative to this
pub unsafe fn init_global_heap() {
HEAP.init();
}
#[derive(Default)]
pub struct Novalloc;
unsafe impl GlobalAlloc for Novalloc {
unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 {
HEAP.malloc(layout.size()).unwrap()
}
unsafe fn dealloc(&self, ptr: *mut u8, _: core::alloc::Layout) {
HEAP.free(ptr).unwrap();
}
}
#[global_allocator]
static GLOBAL_ALLOCATOR: Novalloc = Novalloc;
pub struct Heap {
start_address: *mut HeapHeader,
end_address: *mut HeapHeader,
raw_size: usize,
}
impl Heap {
pub fn new(heap_start: usize, heap_end: usize) -> Self {
let mut instance = Self {
start_address: &raw const heap_start as *mut HeapHeader,
end_address: &raw const heap_end as *mut HeapHeader,
raw_size: heap_end - heap_start,
};
instance.init();
instance
}
fn init(&mut self) {
self.raw_size = self.end_address as usize - self.start_address as usize;
unsafe {
ptr::write(
self.start_address,
HeapHeader {
next: null_mut(),
before: null_mut(),
size: self.raw_size - HEAP_HEADER_SIZE,
free: true,
},
);
}
}
unsafe fn find_first_fit(&self, size: usize) -> Result<*mut HeapHeader, NovaError> {
let mut current = self.start_address;
while !fits(size, current) {
if (*self.start_address).next.is_null() {
return Err(NovaError::HeapFull);
}
current = (*current).next;
}
Ok(current)
}
pub fn malloc(&self, mut size: usize) -> Result<*mut u8, NovaError> {
if size == 0 {
return Err(NovaError::EmptyHeapSegmentNotAllowed);
}
if size < MIN_BLOCK_SIZE {
size = MIN_BLOCK_SIZE;
}
// Align size to the next 16 bytes
size += (16 - (size % 16)) % 16;
unsafe {
// Find First-Fit memory segment
let current = self.find_first_fit(size)?;
// Return entire block WITHOUT generating a new header
// if the current block doesn't have enough space to hold: requested size + HEAP_HEADER_SIZE + MIN_BLOCK_SIZE
if (*current).size < size + HEAP_HEADER_SIZE + MIN_BLOCK_SIZE {
(*current).free = false;
return Ok(current.byte_add(HEAP_HEADER_SIZE) as *mut u8);
}
Self::fragment_segment(current, size);
let data_start_address = current.byte_add(HEAP_HEADER_SIZE);
Ok(data_start_address as *mut u8)
}
}
unsafe fn fragment_segment(current: *mut HeapHeader, size: usize) {
let byte_offset = HEAP_HEADER_SIZE + size;
let new_address = current.byte_add(byte_offset);
// Handle case where fragmenting center free space
let next = (*current).next;
if !(*current).next.is_null() {
(*next).before = new_address;
}
ptr::write(
new_address as *mut HeapHeader,
HeapHeader {
next,
before: current,
size: (*current).size - size - HEAP_HEADER_SIZE,
free: true,
},
);
(*current).next = new_address;
(*current).free = false;
(*current).size = size;
}
pub fn free(&self, pointer: *mut u8) -> Result<(), NovaError> {
let mut segment = unsafe { pointer.sub(HEAP_HEADER_SIZE) as *mut HeapHeader };
unsafe {
// IF prev is free:
// Delete header, add size to previous and fix pointers.
// Move Head left
if !(*segment).before.is_null() && (*(*segment).before).free {
let before_head = (*segment).before;
(*before_head).size += (*segment).size + HEAP_HEADER_SIZE;
delete_header(segment);
segment = before_head;
}
// IF next is free:
// Delete next header and merge size, fix pointers
if !(*segment).next.is_null() && (*(*segment).next).free {
let next_head = (*segment).next;
(*segment).size += (*next_head).size + HEAP_HEADER_SIZE;
delete_header(next_head);
}
// Neither: Set free
(*segment).free = true;
}
Ok(())
}
pub fn traverse_heap(&self) {
let mut pointer_address = self.start_address;
loop {
let head = unsafe { read_volatile(pointer_address) };
println!("Header {:#x}", pointer_address as u32);
println!("free: {}", head.free);
println!("size: {}", head.size);
println!("hasNext: {}", !head.next.is_null());
println!("");
if !head.next.is_null() {
pointer_address = head.next;
} else {
println!("---------------");
return;
}
}
}
}
unsafe fn fits(size: usize, header: *mut HeapHeader) -> bool {
(*header).free && size <= (*header).size
}
unsafe fn delete_header(header: *mut HeapHeader) {
let before = (*header).before;
let next = (*header).next;
if !before.is_null() {
(*before).next = next;
}
if !next.is_null() {
(*next).before = before;
}
}

View File

@@ -6,7 +6,6 @@ use core::{
use crate::{
mmio_read, mmio_write,
peripherals::gpio::{blink_gpio, SpecificGpio},
print,
timer::sleep_s,
};
@@ -54,7 +53,7 @@ fn esr_uart_dump() {
let esr: u32;
unsafe {
asm!(
"mrs {esr}, ESR_EL1",
"mrs {esr:x}, ESR_EL1",
esr = out(reg) esr
);
}
@@ -80,6 +79,7 @@ fn handle_gpio_interrupt() {
let val = read_gpio_event_detect_status(i);
if val {
#[allow(clippy::single_match)]
match i {
26 => print!("Button Pressed"),
_ => {}

View File

@@ -1,6 +1,34 @@
#![no_std]
#![allow(clippy::missing_safety_doc)]
use core::{
panic::PanicInfo,
ptr::{read_volatile, write_volatile},
};
use core::ptr::{read_volatile, write_volatile};
use heap::Heap;
unsafe extern "C" {
unsafe static mut __heap_start: u8;
unsafe static mut __heap_end: u8;
}
#[global_allocator]
pub static mut GLOBAL_ALLOCATOR: Heap = Heap::empty();
pub unsafe fn init_heap() {
let start = core::ptr::addr_of_mut!(__heap_start) as usize;
let end = core::ptr::addr_of_mut!(__heap_end) as usize;
let heap = core::ptr::addr_of_mut!(GLOBAL_ALLOCATOR);
(*heap).init(start, end);
}
#[panic_handler]
fn panic(_panic: &PanicInfo) -> ! {
loop {
println!("Panic");
}
}
#[macro_export]
macro_rules! print {
@@ -23,7 +51,6 @@ pub mod peripherals;
pub mod configuration;
pub mod framebuffer;
pub mod heap;
pub mod irq_interrupt;
pub mod mailbox;
pub mod timer;
@@ -35,10 +62,3 @@ pub fn mmio_read(address: u32) -> u32 {
pub fn mmio_write(address: u32, data: u32) {
unsafe { write_volatile(address as *mut u32, data) }
}
#[derive(Debug)]
pub enum NovaError {
Mailbox,
HeapFull,
EmptyHeapSegmentNotAllowed,
}

View File

@@ -1,9 +1,10 @@
use crate::{mmio_read, mmio_write, NovaError};
use crate::{mmio_read, mmio_write};
use nova_error::NovaError;
const MBOX_BASE: u32 = 0x3F00_0000 + 0xB880;
// MB0
const MBOX_READ: u32 = MBOX_BASE + 0x00;
const MBOX_READ: u32 = MBOX_BASE;
const MBOX_STATUS: u32 = MBOX_BASE + 0x18;
// MB1
@@ -51,7 +52,7 @@ macro_rules! mailbox_command {
return Err(NovaError::Mailbox);
}
let mut out = [0u32; $response_len / 4]; // TODO: Can this be improved?
let mut out = [0u32; $response_len / 4];
out.copy_from_slice(&mailbox[5..(5 + $response_len / 4)]);
Ok(out)
}

View File

@@ -2,17 +2,18 @@
#![no_std]
#![feature(asm_experimental_arch)]
#![allow(static_mut_refs)]
#![allow(clippy::missing_safety_doc)]
use core::{
arch::{asm, global_asm},
panic::PanicInfo,
ptr::write_volatile,
};
extern crate alloc;
use alloc::boxed::Box;
use nova::{
framebuffer::{FrameBuffer, BLUE, GREEN, RED},
heap::{init_global_heap, HEAP},
init_heap,
irq_interrupt::enable_irq_source,
mailbox::mb_read_soc_temp,
peripherals::{
@@ -34,15 +35,8 @@ extern "C" {
static mut __bss_end: u32;
}
#[panic_handler]
fn panic(_panic: &PanicInfo) -> ! {
loop {
println!("Panic");
}
}
#[no_mangle]
#[link_section = ".text._start"]
#[cfg_attr(not(test), link_section = ".text._start")]
pub unsafe extern "C" fn _start() {
// Set the stack pointer
asm!(
@@ -88,7 +82,10 @@ unsafe fn zero_bss() {
pub extern "C" fn kernel_main() -> ! {
println!("EL: {}", get_current_el());
heap_test();
unsafe {
init_heap();
heap_test();
};
sleep_us(500_000);
@@ -98,7 +95,7 @@ pub extern "C" fn kernel_main() -> ! {
gpio_pull_up(26);
set_falling_edge_detect(26, true);
let fb = FrameBuffer::new();
let fb = FrameBuffer::default();
fb.draw_square(500, 500, 600, 700, RED);
fb.draw_square_fill(800, 800, 900, 900, GREEN);
@@ -116,21 +113,9 @@ pub extern "C" fn kernel_main() -> ! {
}
}
fn heap_test() {
unsafe {
init_global_heap();
let a = HEAP.malloc(32).unwrap();
let b = HEAP.malloc(64).unwrap();
let c = HEAP.malloc(128).unwrap();
let _ = HEAP.malloc(256).unwrap();
HEAP.traverse_heap();
HEAP.free(b).unwrap();
HEAP.traverse_heap();
HEAP.free(a).unwrap();
HEAP.traverse_heap();
HEAP.free(c).unwrap();
HEAP.traverse_heap();
}
unsafe fn heap_test() {
let b = Box::new([1, 2, 3, 4]);
println!("{:?}", b);
}
fn cos(x: u32) -> f64 {

View File

@@ -170,7 +170,7 @@ pub fn set_rising_edge_detect(gpio: u8, enable: bool) {
mmio_write(register_addr, new_val);
}
pub fn blink_gpio(gpio: u8, duration_ms: u32) {
pub fn blink_gpio(gpio: u8, duration_ms: u64) {
let _ = gpio_high(gpio);
sleep_ms(duration_ms);

View File

@@ -1,32 +1,61 @@
use crate::mmio_read;
use core::{hint::spin_loop, ptr::read_volatile};
const TIMER_CLO: u32 = 0x3F00_3004;
const TIMER_CLOCK_LO: u32 = 0x3F00_3004;
const TIMER_CLOCK_HI: u32 = 0x3F00_3008;
fn read_clo() -> u32 {
mmio_read(TIMER_CLO)
fn read_timer_32() -> u32 {
unsafe { read_volatile(TIMER_CLOCK_LO as *const u32) }
}
fn read_timer_64() -> u64 {
loop {
let clock_hi1 = unsafe { read_volatile(TIMER_CLOCK_HI as *const u32) };
let clock_lo = unsafe { read_volatile(TIMER_CLOCK_LO as *const u32) };
let clock_hi2 = unsafe { read_volatile(TIMER_CLOCK_HI as *const u32) };
// account for roll over during read
if clock_hi1 == clock_hi2 {
return ((clock_hi1 as u64) << 32) | clock_lo as u64;
}
}
}
/// Sleep for `us` microseconds
pub fn sleep_us(us: u32) {
let start = read_clo();
while read_clo() - start < us {
unsafe { core::arch::asm!("nop") }
pub fn sleep_us(us: u64) {
if us < u32::MAX as u64 {
sleep_us_u32(us as u32);
} else {
sleep_us_u64(us);
}
}
fn sleep_us_u32(us: u32) {
let start = read_timer_32();
while read_timer_32().wrapping_sub(start) < us {
spin_loop();
}
}
fn sleep_us_u64(us: u64) {
let start = read_timer_64();
while read_timer_64().wrapping_sub(start) < us {
spin_loop();
}
}
/// Sleep for `ms` milliseconds
pub fn sleep_ms(ms: u32) {
sleep_us(ms * 1000);
pub fn sleep_ms(ms: u64) {
sleep_us(ms * 1_000);
}
/// Sleep for `s` seconds
pub fn sleep_s(s: u32) {
sleep_us(s * 1000);
pub fn sleep_s(s: u64) {
sleep_ms(s * 1_000);
}
/// Wait for `count` operations to pass
pub fn delay_nops(count: u32) {
for _ in 0..count {
unsafe { core::arch::asm!("nop") }
spin_loop()
}
}