5 Commits

Author SHA1 Message Date
Alexander Neuhäuser
e712593dae feat: Application manager (#8)
* feat: map text area to kernel memory space, first application_manager
implementation

* feat: start application via application_manager

* feat: terminal, start app by ID

* feat: support CLI args, by applying System V ABI concepts

* refactor: rename terminal to console. Minor cleanup

* docs: README.md
2026-03-27 15:15:49 +01:00
Alexander Neuhäuser
f33100d36b feat: UART terminal (#7)
* feat: basic terminal

* feat: execute application via terminal
2026-03-24 17:50:28 +01:00
Alexander Neuhäuser
34a66ff87a feat: implement first SVC mailbox instruction (#6)
* refactor: organize code

* feat: move EL0 stack to virtual space

* wip

* feat: Enable EL0 basic mailbox access via SVCs

* refactor: move irq interrupts
2026-03-22 12:25:43 +01:00
Alexander Neuhäuser
f78388ee2c feat: implement MMU core functionality
* feat: Implement a basic MMU configuration

* feat: Enhance MMU by separating sections and configuring permissions

* feat: Update MMU configuration and memory allocation functions

* fix: Level 3 translation fault

* docs: add code documentation

* fix: linter

* feat: map translation tables to kernel space

* feat: move el1 stack to kernel VA space

* feat: use virtual memory for heap allocation

* docs: update Readme
2026-03-17 19:30:45 +01:00
55f410e2bb Refactor and reorganize project structure 2026-03-04 11:23:27 +01:00
40 changed files with 1769 additions and 770 deletions

7
.gitignore vendored
View File

@@ -4,3 +4,10 @@ kernel8.img
sd.img
settings.json
.DS_Store
.venv
.nvimlog
__pycache__
.pytest_cache
build/

42
.vscode/launch.json vendored
View File

@@ -3,7 +3,7 @@
"compounds": [
{
"name": "Run QEMU + Attach LLDB",
"configurations": ["Attach LLDB"],
"configurations": ["LLDB"],
"preLaunchTask": "Run QEMU"
}
],
@@ -33,13 +33,47 @@
],
"preLaunchTask": "Run QEMU"
},
{
"name": "Attach LLDB",
"name": "Attach to QEMU (AArch64) wo. window",
"type": "cppdbg",
"request": "launch",
"program": "${workspaceFolder}/target/aarch64-unknown-none/debug/nova",
"miDebuggerServerAddress": "localhost:1234",
"miDebuggerPath": "gdb",
"cwd": "${workspaceFolder}",
"stopAtEntry": true,
"externalConsole": false,
"MIMode": "gdb",
"setupCommands": [
{
"description": "Enable pretty-printing for gdb",
"text": "-enable-pretty-printing",
"ignoreFailures": true
},
{
"description": "Show assembly on stop",
"text": "set disassemble-next-line on",
"ignoreFailures": true
}
],
"preLaunchTask": "Run QEMU wo window"
},
{
"name": "LLDB",
"type": "lldb",
"request": "attach",
"debugServer": 1234,
"program": "${workspaceFolder}/target/aarch64-unknown-none/debug/nova",
"preLaunchTask": "Run QEMU",
"stopOnEntry": true,
"processCreateCommands": ["gdb-remote localhost:1234"]
},
{
"name": "NVIM LLDB",
"type": "codelldb",
"request": "attach",
"program": "${workspaceFolder}/target/aarch64-unknown-none/debug/nova",
"preLaunchTask": "Run QEMU",
"stopOnEntry": true,
"processCreateCommands": ["gdb-remote localhost:1234"]
}

33
.vscode/tasks.json vendored
View File

@@ -14,9 +14,38 @@
{
"label": "Run QEMU",
"type": "shell",
"command": "llvm-objcopy -O binary target/aarch64-unknown-none/debug/nova target/aarch64-unknown-none/debug/kernel8.img && qemu-system-aarch64 -M raspi3b -cpu cortex-a53 -serial stdio -sd sd.img -kernel ${workspaceFolder}/target/aarch64-unknown-none/debug/kernel8.img -S -s -m 1024",
"command": "llvm-objcopy -O binary target/aarch64-unknown-none/debug/nova target/aarch64-unknown-none/debug/kernel8.img && echo Starting QEMU&qemu-system-aarch64 -M raspi3b -cpu cortex-a53 -serial stdio -sd sd.img -kernel ${workspaceFolder}/target/aarch64-unknown-none/debug/kernel8.img -S -s -m 1024",
"isBackground": true,
"dependsOn": ["Build"]
"dependsOn": ["Build"],
"problemMatcher": {
"pattern": {
"regexp": "^(Starting QEMU)",
"line": 1,
},
"background": {
"activeOnStart": true,
"beginsPattern": "^(Starting QEMU)",
"endsPattern": "^(Starting QEMU)"
}
}
},
{
"label": "Run QEMU wo window",
"type": "shell",
"command": "llvm-objcopy -O binary target/aarch64-unknown-none/debug/nova target/aarch64-unknown-none/debug/kernel8.img && echo Starting QEMU&qemu-system-aarch64 -M raspi3b -cpu cortex-a53 -display none -serial stdio -sd sd.img -kernel ${workspaceFolder}/target/aarch64-unknown-none/debug/kernel8.img -S -s -m 1024",
"isBackground": true,
"dependsOn": ["Build"],
"problemMatcher": {
"pattern": {
"regexp": "^(Starting QEMU)",
"line": 1,
},
"background": {
"activeOnStart": true,
"beginsPattern": "^(Starting QEMU)",
"endsPattern": "^(Starting QEMU)"
}
}
}
]
}

39
Cargo.lock generated
View File

@@ -40,19 +40,43 @@ version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de"
[[package]]
name = "lock_api"
version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965"
dependencies = [
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
[[package]]
name = "nova"
version = "0.1.0"
dependencies = [
"heap",
"libm",
"log",
"nova_error",
"paste",
"spin",
]
[[package]]
name = "nova_error"
version = "0.1.0"
[[package]]
name = "paste"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
[[package]]
name = "ppv-lite86"
version = "0.2.21"
@@ -115,6 +139,21 @@ dependencies = [
"getrandom",
]
[[package]]
name = "scopeguard"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "spin"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591"
dependencies = [
"lock_api",
]
[[package]]
name = "syn"
version = "2.0.111"

View File

@@ -14,11 +14,15 @@ panic = "abort"
[dependencies]
libm = "0.2.15"
heap = {path = "heap"}
nova_error = {path = "nova_error"}
heap = {path = "workspace/heap"}
nova_error = {path = "workspace/nova_error"}
paste = "1.0.15"
log = "0.4.29"
spin = "0.10.0"
[workspace]
members = [ "nova_error",
"heap"
members = [
"workspace/nova_error",
"workspace/heap",
]

View File

@@ -2,8 +2,6 @@
NovaOS is a expository project where I build a kernel from scratch for a Raspberry PI 3 B+.
[Technical write-up](https://leafnova.net/projects/pi3_kernel/)
## Features
- Delay and sleep ✓
@@ -14,8 +12,11 @@ NovaOS is a expository project where I build a kernel from scratch for a Raspber
- Communicate with peripherals via mailboxes ✓
- Frame Buffer ✓
- Heap Memory allocation ✓
- MMU ✓
- SVC instructions ~
- Basic Console over UART ~
- Multi Applications ~
- Multi Core
- Dynamic clock speed
- MMU
- Kernel Independent Applications
- Multiprocessing
- Basic Terminal over UART

41
link.ld
View File

@@ -5,51 +5,42 @@ SECTIONS {
KEEP(*(.text._start))
*(.text .text.*)
}
.vector_table ALIGN(2K) : {
KEEP(*(.vector_t))
}
. = ALIGN(4K);
__text_end = .;
.rodata : {
*(.rodata .rodata.*)
}
.data : {
_data = .;
.data : {
*(.data .data.*)
}
.bss (NOLOAD) : {
. = ALIGN(16);
.bss ALIGN(16) (NOLOAD) : {
__bss_start = .;
*(.bss .bss.*)
*(COMMON)
__bss_end = .;
}
.vector_table ALIGN(2048) : {
KEEP(*(.vector_table))
}
. = ALIGN(2M);
.heap : ALIGN(16)
{
__heap_start = .;
. += 0x10000; #10kB
__heap_end = .;
}
__share_end = .;
.stack : ALIGN(16)
{
# EL2 Stack
.stack ALIGN(16): {
__stack_start = .;
. += 0x10000; #10kB stack
. += 100K; #100kB stack
. = ALIGN(16);
__stack_end = .;
}
.stack_el0 : ALIGN(16)
{
__stack_start_el0 = .;
. += 0x10000; #10kB stack
__stack_end_el0 = .;
}
. = ALIGN(2M);
_end = .;
__kernel_end = .;
}
__bss_size = (__bss_end - __bss_start) >> 3;

439
src/aarch64/mmu.rs Normal file
View File

@@ -0,0 +1,439 @@
use core::mem::size_of;
use nova_error::NovaError;
use crate::{
aarch64::mmu::physical_mapping::{
reserve_block, reserve_block_explicit, reserve_page, reserve_page_explicit,
},
get_current_el,
};
const BLOCK: u64 = 0b01;
const TABLE: u64 = 0b11;
const PAGE: u64 = 0b11;
/// Allow EL0 to access this section
pub const EL0_ACCESSIBLE: u64 = 1 << 6;
/// Allow a page or block to be written.
pub const WRITABLE: u64 = 0 << 7;
/// Disallow a page or block to be written.
pub const READ_ONLY: u64 = 1 << 7;
const ACCESS_FLAG: u64 = 1 << 10;
const INNER_SHAREABILITY: u64 = 0b11 << 8;
pub const NORMAL_MEM: u64 = 0 << 2;
pub const DEVICE_MEM: u64 = 1 << 2;
/// Disallow EL1 Execution.
pub const PXN: u64 = 1 << 53;
/// Disallow EL0 Execution.
pub const UXN: u64 = 1 << 54;
pub const GRANULARITY: usize = 4 * 1024;
const TABLE_ENTRY_COUNT: usize = GRANULARITY / size_of::<u64>(); // 2MiB
pub const LEVEL1_BLOCK_SIZE: usize = TABLE_ENTRY_COUNT * TABLE_ENTRY_COUNT * GRANULARITY;
pub const LEVEL2_BLOCK_SIZE: usize = TABLE_ENTRY_COUNT * GRANULARITY;
const L2_BLOCK_BITMAP_WORDS: usize = LEVEL2_BLOCK_SIZE / (64 * GRANULARITY);
const MAX_PAGE_COUNT: usize = 1024 * 1024 * 1024 / GRANULARITY;
const TRANSLATION_TABLE_BASE_ADDR: usize = 0xFFFF_FF82_0000_0000;
#[no_mangle]
pub static KERNEL_VIRTUAL_MEM_SPACE: usize = 0xFFFF_FF80_0000_0000;
pub const STACK_START_ADDR: usize = !KERNEL_VIRTUAL_MEM_SPACE & (!0xF);
pub mod physical_mapping;
pub type VirtAddr = usize;
pub type PhysAddr = usize;
#[derive(Clone, Copy)]
pub struct TableEntry {
value: u64,
}
impl TableEntry {
pub fn invalid() -> Self {
Self { value: 0 }
}
fn table_descriptor(addr: PhysAddr) -> Self {
Self {
value: (addr as u64 & 0x0000_FFFF_FFFF_F000) | TABLE,
}
}
fn block_descriptor(physical_address: usize, additional_flags: u64) -> Self {
Self {
value: (physical_address as u64 & 0x0000_FFFF_FFFF_F000)
| BLOCK
| ACCESS_FLAG
| INNER_SHAREABILITY
| additional_flags,
}
}
fn page_descriptor(physical_address: usize, additional_flags: u64) -> Self {
Self {
value: (physical_address as u64 & 0x0000_FFFF_FFFF_F000)
| PAGE
| ACCESS_FLAG
| INNER_SHAREABILITY
| additional_flags,
}
}
fn is_invalid(self) -> bool {
self.value & 0b11 == 0
}
#[inline]
fn address(self) -> PhysAddr {
self.value as usize & 0x0000_FFFF_FFFF_F000
}
}
pub enum PhysSource {
Any,
Explicit(PhysAddr),
}
#[repr(align(4096))]
pub struct PageTable(pub [TableEntry; TABLE_ENTRY_COUNT]);
impl Iterator for PageTable {
type Item = VirtAddr;
fn next(&mut self) -> Option<Self::Item> {
for (offset, entity) in self.0.iter().enumerate() {
if entity.is_invalid() {
return Some(offset);
}
}
None
}
}
#[no_mangle]
pub static mut TRANSLATIONTABLE_TTBR0: PageTable = PageTable([TableEntry { value: 0 }; 512]);
#[no_mangle]
pub static mut TRANSLATIONTABLE_TTBR1: PageTable = PageTable([TableEntry { value: 0 }; 512]);
/// Allocate a memory block of `size` starting at `virtual_address`.
pub fn allocate_memory(
virtual_address: usize,
size_bytes: usize,
phys: PhysSource,
flags: u64,
) -> Result<(), NovaError> {
if !virtual_address.is_multiple_of(GRANULARITY) {
return Err(NovaError::Misalignment);
}
if !size_bytes.is_multiple_of(GRANULARITY) {
return Err(NovaError::InvalidGranularity);
}
let base_table = if virtual_address & KERNEL_VIRTUAL_MEM_SPACE > 0 {
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR1)
} else {
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0)
};
match phys {
PhysSource::Any => map_range_dynamic(virtual_address, size_bytes, base_table, flags),
PhysSource::Explicit(phys_addr) => {
map_range_explicit(virtual_address, phys_addr, size_bytes, base_table, flags)
}
}
}
fn map_range_explicit(
mut virt: VirtAddr,
mut phys: PhysAddr,
size_bytes: usize,
base: *mut PageTable,
flags: u64,
) -> Result<(), NovaError> {
let mut remaining = size_bytes;
while !virt.is_multiple_of(LEVEL2_BLOCK_SIZE) && remaining > 0 {
map_page(virt, phys, base, flags)?;
(virt, _) = virt.overflowing_add(GRANULARITY);
phys += GRANULARITY;
remaining -= GRANULARITY;
}
while remaining >= LEVEL2_BLOCK_SIZE {
map_l2_block(virt, phys, base, flags)?;
(virt, _) = virt.overflowing_add(LEVEL2_BLOCK_SIZE);
phys += LEVEL2_BLOCK_SIZE;
remaining -= LEVEL2_BLOCK_SIZE;
}
while remaining > 0 {
map_page(virt, phys, base, flags)?;
(virt, _) = virt.overflowing_add(GRANULARITY);
phys += GRANULARITY;
remaining -= GRANULARITY;
}
Ok(())
}
fn map_range_dynamic(
mut virt: PhysAddr,
size_bytes: usize,
base: *mut PageTable,
flags: u64,
) -> Result<(), NovaError> {
let mut remaining = size_bytes;
while remaining >= LEVEL2_BLOCK_SIZE {
map_l2_block(virt, reserve_block(), base, flags)?;
(virt, _) = virt.overflowing_add(LEVEL2_BLOCK_SIZE);
remaining -= LEVEL2_BLOCK_SIZE;
}
while remaining > 0 {
map_page(virt, reserve_page(), base, flags)?;
(virt, _) = virt.overflowing_add(GRANULARITY);
remaining -= GRANULARITY;
}
Ok(())
}
/// Allocate a singe page.
pub fn alloc_page(
virtual_address: VirtAddr,
base_table: *mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
map_page(
virtual_address,
reserve_page(),
base_table,
additional_flags,
)
}
/// Allocate a singe page in one block.
pub fn find_free_kerne_page_in_block(start: VirtAddr) -> Result<VirtAddr, NovaError> {
if !start.is_multiple_of(LEVEL2_BLOCK_SIZE) {
return Err(NovaError::Misalignment);
}
let (off1, off2, _) = virtual_address_to_table_offset(start);
let offsets = [off1, off2];
let table = unsafe {
&mut *navigate_table(
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR1),
&offsets,
true,
)?
};
if let Some(offset) = table.next() {
return Ok(start + (offset * GRANULARITY));
}
Err(NovaError::OutOfMeomory)
}
/// Allocate a single page at an explicit `physical_address`.
pub fn alloc_page_explicit(
virtual_address: usize,
physical_address: usize,
base_table: *mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
reserve_page_explicit(physical_address)?;
map_page(
virtual_address,
physical_address,
base_table,
additional_flags,
)
}
pub fn map_page(
virtual_address: usize,
physical_address: usize,
base_table_ptr: *mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
let (l1_off, l2_off, l3_off) = virtual_address_to_table_offset(virtual_address);
let offsets = [l1_off, l2_off];
let table_ptr = navigate_table(base_table_ptr, &offsets, true)?;
let table = unsafe { &mut *table_ptr };
if !table.0[l3_off].is_invalid() {
return Err(NovaError::Paging("Page already occupied."));
}
table.0[l3_off] = TableEntry::page_descriptor(physical_address, additional_flags);
Ok(())
}
// Allocate a level 2 block, at a explicit `physical_address`.
pub fn alloc_block_l2_explicit(
virtual_addr: usize,
physical_address: usize,
base_table_ptr: *mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
if !physical_address.is_multiple_of(LEVEL2_BLOCK_SIZE) {
return Err(NovaError::Misalignment);
}
reserve_block_explicit(physical_address)?;
map_l2_block(
virtual_addr,
physical_address,
base_table_ptr,
additional_flags,
)
}
pub fn map_l2_block(
virtual_addr: usize,
physical_address: usize,
base_table_ptr: *mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
let (l1_off, l2_off, _) = virtual_address_to_table_offset(virtual_addr);
let offsets = [l1_off];
let table_ptr = navigate_table(base_table_ptr, &offsets, true)?;
let table = unsafe { &mut *table_ptr };
// Verify virtual address is available.
if !table.0[l2_off].is_invalid() {
return Err(NovaError::Paging("Block already occupied."));
}
let new_entry = TableEntry::block_descriptor(physical_address, additional_flags);
table.0[l2_off] = new_entry;
Ok(())
}
pub fn reserve_range(
start_physical_address: PhysAddr,
end_physical_address: PhysAddr,
) -> Result<PhysAddr, NovaError> {
let mut size = end_physical_address - start_physical_address;
let l1_blocks = size / LEVEL1_BLOCK_SIZE;
size %= LEVEL1_BLOCK_SIZE;
let l2_blocks = size / LEVEL2_BLOCK_SIZE;
size %= LEVEL2_BLOCK_SIZE;
let l3_pages = size / GRANULARITY;
if !size.is_multiple_of(GRANULARITY) {
return Err(NovaError::Misalignment);
}
if l1_blocks > 0 {
todo!();
}
let mut addr = start_physical_address;
for _ in 0..l2_blocks {
reserve_block_explicit(addr)?;
addr += LEVEL2_BLOCK_SIZE;
}
for _ in 0..l3_pages {
reserve_page_explicit(addr)?;
addr += GRANULARITY;
}
Ok(start_physical_address)
}
fn virtual_address_to_table_offset(virtual_addr: usize) -> (usize, usize, usize) {
let absolute_page_off = (virtual_addr & !KERNEL_VIRTUAL_MEM_SPACE) / GRANULARITY;
let l3_off = absolute_page_off % TABLE_ENTRY_COUNT;
let l2_off = (absolute_page_off / TABLE_ENTRY_COUNT) % TABLE_ENTRY_COUNT;
let l1_off = (absolute_page_off / TABLE_ENTRY_COUNT / TABLE_ENTRY_COUNT) % TABLE_ENTRY_COUNT;
(l1_off, l2_off, l3_off)
}
/// Navigate the table tree, by following given offsets. This function
/// allocates new tables if required.
fn navigate_table(
initial_table_ptr: *mut PageTable,
offsets: &[usize],
create_missing: bool,
) -> Result<*mut PageTable, NovaError> {
let mut table = initial_table_ptr;
for offset in offsets {
table = next_table(table, *offset, create_missing)?;
}
Ok(table)
}
/// Get the next table one level down.
///
/// If table doesn't exit a page will be allocated for it.
fn next_table(
table_ptr: *mut PageTable,
offset: usize,
create_missing: bool,
) -> Result<*mut PageTable, NovaError> {
let table = unsafe { &mut *table_ptr };
match table.0[offset].value & 0b11 {
0 => {
if !create_missing {
return Err(NovaError::Paging("No table defined."));
}
let new_phys_page_table_address = reserve_page();
table.0[offset] = TableEntry::table_descriptor(new_phys_page_table_address);
map_page(
phys_table_to_kernel_space(new_phys_page_table_address),
new_phys_page_table_address,
&raw mut TRANSLATIONTABLE_TTBR1,
NORMAL_MEM | WRITABLE | PXN | UXN,
)?;
Ok(resolve_table_addr(table.0[offset].address()) as *mut PageTable)
}
1 => Err(NovaError::Paging(
"Can't navigate table due to block mapping.",
)),
3 => Ok(resolve_table_addr(table.0[offset].address()) as *mut PageTable),
_ => unreachable!(),
}
}
/// Converts a physical table address and returns the corresponding virtual address depending on EL.
///
/// - `== EL0` -> panic
/// - `== EL1` -> 0xFFFFFF82XXXXXXXX
/// - `>= EL2` -> physical address
#[inline]
fn resolve_table_addr(physical_address: PhysAddr) -> VirtAddr {
let current_el = get_current_el();
if current_el >= 2 {
physical_address
} else if get_current_el() == 1 {
phys_table_to_kernel_space(physical_address)
} else {
panic!("Access to table entries is forbidden in EL0.")
}
}
/// Extracts the physical address out of an table entry.
#[inline]
fn phys_table_to_kernel_space(entry: usize) -> VirtAddr {
entry | TRANSLATION_TABLE_BASE_ADDR
}

View File

@@ -0,0 +1,95 @@
use crate::aarch64::mmu::{PhysAddr, GRANULARITY, L2_BLOCK_BITMAP_WORDS, MAX_PAGE_COUNT};
use nova_error::NovaError;
struct PagingMap {
bitmap: [u64; MAX_PAGE_COUNT / 64],
}
static mut PAGING_BITMAP: PagingMap = PagingMap {
bitmap: [0; MAX_PAGE_COUNT / 64],
};
pub fn reserve_page() -> PhysAddr {
if let Some(address) = find_unallocated_page() {
let page = address / GRANULARITY;
let word_index = page / 64;
unsafe { PAGING_BITMAP.bitmap[word_index] |= 1 << (page % 64) };
return address;
}
panic!("Out of Memory!");
}
pub fn reserve_page_explicit(physical_address: usize) -> Result<PhysAddr, NovaError> {
let page = physical_address / GRANULARITY;
let word_index = page / 64;
if unsafe { PAGING_BITMAP.bitmap[word_index] } & (1 << (page % 64)) > 0 {
return Err(NovaError::Paging("Page PA already taken."));
}
unsafe { PAGING_BITMAP.bitmap[word_index] |= 1 << (page % 64) };
Ok(physical_address)
}
pub fn reserve_block() -> usize {
if let Some(start) = find_contiguous_free_bitmap_words(L2_BLOCK_BITMAP_WORDS) {
for j in 0..L2_BLOCK_BITMAP_WORDS {
unsafe { PAGING_BITMAP.bitmap[start + j] = u64::MAX };
}
return start * 64 * GRANULARITY;
}
panic!("Out of Memory!");
}
pub fn reserve_block_explicit(physical_address: usize) -> Result<(), NovaError> {
let page = physical_address / GRANULARITY;
for i in 0..L2_BLOCK_BITMAP_WORDS {
unsafe {
if PAGING_BITMAP.bitmap[(page / 64) + i] != 0 {
return Err(NovaError::Paging("Block PA already taken."));
}
};
}
for i in 0..L2_BLOCK_BITMAP_WORDS {
unsafe {
PAGING_BITMAP.bitmap[(page / 64) + i] = u64::MAX;
};
}
Ok(())
}
fn find_unallocated_page() -> Option<usize> {
for (i, entry) in unsafe { PAGING_BITMAP.bitmap }.iter().enumerate() {
if *entry != u64::MAX {
for offset in 0..64 {
if entry >> offset & 0b1 == 0 {
return Some((i * 64 + offset) * GRANULARITY);
}
}
}
}
None
}
fn find_contiguous_free_bitmap_words(required_words: usize) -> Option<usize> {
let mut run_start = 0;
let mut run_len = 0;
for (i, entry) in unsafe { PAGING_BITMAP.bitmap }.iter().enumerate() {
if *entry == 0 {
if run_len == 0 {
run_start = i;
}
run_len += 1;
if run_len == required_words {
return Some(run_start);
}
} else {
run_len = 0;
}
}
None
}

2
src/aarch64/mod.rs Normal file
View File

@@ -0,0 +1,2 @@
pub mod mmu;
pub mod registers;

59
src/aarch64/registers.rs Normal file
View File

@@ -0,0 +1,59 @@
use core::arch::asm;
pub mod daif {
use core::arch::asm;
#[inline(always)]
pub fn mask_all() {
unsafe { asm!("msr DAIFSet, #0xf", options(nomem, nostack)) }
}
#[inline(always)]
pub fn unmask_all() {
unsafe { asm!("msr DAIFClr, #0xf", options(nomem, nostack)) }
}
#[inline(always)]
pub fn mask_irq() {
unsafe { asm!("msr DAIFSet, #0x2", options(nomem, nostack)) }
}
#[inline(always)]
pub fn unmask_irq() {
unsafe { asm!("msr DAIFClr, #0x2", options(nomem, nostack)) }
}
}
#[macro_export]
macro_rules! psr {
($name:ident, $t:tt) => {
paste::item! {
pub fn [<read_ $name:lower>]() -> $t {
let buf: $t;
unsafe {
asm!(
concat!("mrs {0:x}, ", stringify!($name)),
out(reg) buf
);
}
buf
}
}
};
}
psr!(TCR_EL1, u64);
psr!(ID_AA64MMFR0_EL1, u64);
psr!(ESR_EL1, u32);
psr!(SPSR_EL1, u32);
psr!(ELR_EL1, u64);
psr!(SCTLR_EL1, u64);
pub fn read_exception_source_el() -> u32 {
read_spsr_el1() & 0b1111
}

145
src/application_manager.rs Normal file
View File

@@ -0,0 +1,145 @@
use crate::{
aarch64::mmu::{
find_free_kerne_page_in_block, map_page, physical_mapping::reserve_page, PageTable,
TableEntry, VirtAddr, NORMAL_MEM, TRANSLATIONTABLE_TTBR0, TRANSLATIONTABLE_TTBR1, WRITABLE,
},
configuration::memory_mapping::{APPLICATION_TRANSLATION_TABLE_VA, EL0_STACK_TOP},
};
use alloc::vec::Vec;
use core::{arch::asm, mem, ptr::write_volatile};
use log::error;
use nova_error::NovaError;
use spin::Mutex;
struct AppManager {
apps: Option<Vec<Application>>,
}
impl AppManager {
const fn new() -> Self {
Self { apps: None }
}
}
unsafe impl Send for AppManager {}
pub struct Application {
pub table_ptr: *mut TableEntry,
pub start_addr: usize,
pub stack_pointer: usize,
}
impl Application {
pub fn new(start_addr: VirtAddr) -> Self {
let physical_addr = reserve_page();
let virtual_address =
find_free_kerne_page_in_block(APPLICATION_TRANSLATION_TABLE_VA).unwrap();
map_page(
virtual_address,
physical_addr,
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR1),
NORMAL_MEM | WRITABLE,
)
.unwrap();
// TODO: Temporary solution, while kernel and app share some memory regions
#[allow(static_mut_refs)]
unsafe {
let table = &mut *(virtual_address as *mut PageTable);
table.0 = TRANSLATIONTABLE_TTBR0.0;
}
Self {
table_ptr: physical_addr as *mut TableEntry,
start_addr,
stack_pointer: EL0_STACK_TOP,
}
}
pub unsafe fn configure_registers(&self) {
asm!("msr ELR_EL1, {}", in(reg) self.start_addr);
asm!("msr SPSR_EL1, {0:x}", in(reg) 0);
asm!("msr SP_EL0, {0:x}", in(reg) self.stack_pointer);
asm!("msr TTBR0_EL1, {}", in(reg) self.table_ptr as usize);
}
/// Starts an application.
///
/// `ELR_EL1` -> Exception Link Register (starting virtual address)
/// `SPSR_EL1` -> Saved Program State Register (settings for `eret` behaviour)
/// `SP_EL0` -> Stack Pointer Register (virtual_address of stack Pointer)
/// `TTBR0_EL1` -> Translation Table base Register Register
pub fn start(&mut self, args: Vec<&str>) {
let size = args.len();
let argv = self.construct_inital_stack(args);
unsafe {
self.configure_registers();
asm!("", in("x0") size, in("x1") argv);
asm!("eret");
}
}
/// Initializes the stack based on the System V ABI
fn construct_inital_stack(&mut self, args: Vec<&str>) -> usize {
let size = args.len();
let mut arg_addresses = Vec::with_capacity(size);
// Write strings into stack
for value in args {
self.stack_pointer -= value.len() * mem::size_of::<u8>();
let pointer = self.stack_pointer as *mut u8;
unsafe { core::ptr::copy(value.as_ptr(), pointer, value.len()) };
arg_addresses.push(pointer);
}
self.stack_pointer = align_down(self.stack_pointer, 16);
// TODO: Auxiliry vector entry
// TODO: Environment pointers
let argv = self.stack_pointer;
// Write argument pointers into stack
for addr in arg_addresses {
unsafe { write_volatile(self.stack_pointer as *mut *const u8, addr) };
self.stack_pointer -= mem::size_of::<*const u8>();
}
argv
}
}
fn align_down(sp: usize, align: usize) -> usize {
sp & !(align - 1)
}
static APP_MANAGER: Mutex<AppManager> = Mutex::new(AppManager::new());
pub fn initialize_app_manager() {
let mut guard = APP_MANAGER.lock();
guard.apps = Some(Vec::new());
}
pub fn add_app(app: Application) -> Result<(), NovaError> {
if let Some(app_list) = APP_MANAGER.lock().apps.as_mut() {
app_list.push(app);
Ok(())
} else {
Err(NovaError::General("AppManager not initalized."))
}
}
pub fn start_app(index: usize, args: Vec<&str>) -> Result<(), NovaError> {
if let Some(app) = APP_MANAGER
.lock()
.apps
.as_mut()
.and_then(|am| am.get_mut(index))
{
app.start(args);
unreachable!()
} else {
error!("Unable to start app due to invalid App ID.");
Err(NovaError::General("Invalid app id."))
}
}

80
src/config.S Normal file
View File

@@ -0,0 +1,80 @@
.section .text.config
.align 4
.global el2_to_el1
el2_to_el1:
mov x0, #(1 << 31)
msr HCR_EL2, x0
// Set SPSR_EL2: return to EL1h
mov x0, #(0b0101)
msr SPSR_EL2, x0
// Set return address to kernel_main
adrp x0, KERNEL_VIRTUAL_MEM_SPACE
ldr x1, [x0, :lo12:KERNEL_VIRTUAL_MEM_SPACE]
adrp x0, kernel_main
add x0, x0, :lo12:kernel_main
orr x0, x0, x1
msr ELR_EL2, x0
// Set SP_EL1 to stack base
adrp x0, EL1_STACK_TOP
ldr x1, [x0, :lo12:EL1_STACK_TOP]
msr SP_EL1, x1
// Set VBAR_EL1 to vector table
adrp x0, vector_table
add x0, x0, :lo12:vector_table
msr VBAR_EL1, x0
isb
adrp x0, SCTLR_EL1_CONF
ldr x1, [x0, :lo12:SCTLR_EL1_CONF]
msr SCTLR_EL1, x1
isb
// SIMD should not be trapped
mrs x0, CPACR_EL1
mov x1, #(0b11<<20)
orr x0,x0, x1
msr CPACR_EL1,x0
isb
// Return to EL1
eret
.section .text.config
.align 4
.global configure_mmu_el1
configure_mmu_el1:
// Configure MMU
adrp x0, TCR_EL1_CONF
ldr x1, [x0, :lo12:TCR_EL1_CONF]
msr TCR_EL1, x1
isb
// MAIR0: Normal Mem.
// MAIR1: Device Mem.
mov x0, #0x04FF
msr MAIR_EL1, x0
isb
// Configure translation table
adrp x0, TRANSLATIONTABLE_TTBR0
add x1, x0, :lo12:TRANSLATIONTABLE_TTBR0
msr TTBR0_EL1, x1
adrp x0, TRANSLATIONTABLE_TTBR1
add x1, x0, :lo12:TRANSLATIONTABLE_TTBR1
msr TTBR1_EL1, x1
tlbi vmalle1
dsb ish
isb
ret

View File

@@ -1,16 +1,34 @@
static SCTLR_EL1_MMU_DISABLED: u64 = 0; //M
static SCTLR_EL1_DATA_CACHE_DISABLED: u64 = 0 << 2; //C
static SCTLR_EL1_INSTRUCTION_CACHE_DISABLED: u64 = 0 << 12; //I
static SCTLR_EL1_LITTLE_ENDIAN_EL0: u64 = 0 << 24; //E0E
static SCTLR_EL1_LITTLE_ENDIAN_EL1: u64 = 0 << 25; //EE
const SCTLR_EL1_MMU_ENABLED: u64 = 1; //M
const SCTLR_EL1_DATA_CACHE_DISABLED: u64 = 0 << 2; //C
const SCTLR_EL1_INSTRUCTION_CACHE_DISABLED: u64 = 0 << 12; //I
const SCTLR_EL1_LITTLE_ENDIAN_EL0: u64 = 0 << 24; //E0E
const SCTLR_EL1_LITTLE_ENDIAN_EL1: u64 = 0 << 25; //EE
const SCTLR_EL1_SPAN: u64 = 1 << 23; //SPAN
#[allow(clippy::identity_op)]
static SCTLR_EL1_RES: u64 = (0 << 6) | (1 << 11) | (0 << 17) | (1 << 20) | (1 << 22); //Res0 & Res1
const SCTLR_EL1_RES: u64 = (0 << 6) | (1 << 11) | (0 << 17) | (1 << 20) | (1 << 22); //Res0 & Res1
#[no_mangle]
pub static SCTLR_EL1_CONF: u64 = SCTLR_EL1_MMU_DISABLED
pub static SCTLR_EL1_CONF: u64 = SCTLR_EL1_MMU_ENABLED
| SCTLR_EL1_DATA_CACHE_DISABLED
| SCTLR_EL1_INSTRUCTION_CACHE_DISABLED
| SCTLR_EL1_LITTLE_ENDIAN_EL0
| SCTLR_EL1_LITTLE_ENDIAN_EL1
| SCTLR_EL1_RES;
| SCTLR_EL1_RES
| SCTLR_EL1_SPAN;
const TG0: u64 = 0b00 << 14; // 4KB granularity EL0
const T0SZ: u64 = 25; // 25 Bits of TTBR select -> 39 Bits of VA
const SH0: u64 = 0b11 << 12; // Inner shareable
const TG1: u64 = 0b10 << 30; // 4KB granularity EL1
const T1SZ: u64 = 25 << 16; // 25 Bits of TTBR select -> 39 Bits of VA
const SH1: u64 = 0b11 << 28; // Inner sharable
const IPS: u64 = 0b000 << 32; // 32 bits of PA space -> up to 4GiB
const AS: u64 = 0b1 << 36; // configure an ASID size of 16 bits
#[no_mangle]
pub static TCR_EL1_CONF: u64 = IPS | TG0 | TG1 | T0SZ | T1SZ | SH0 | SH1 | AS;
pub mod memory_mapping;

View File

@@ -0,0 +1,127 @@
use crate::{
aarch64::mmu::{
alloc_block_l2_explicit, allocate_memory, map_page, physical_mapping::reserve_page,
reserve_range, PhysAddr, PhysSource, VirtAddr, DEVICE_MEM, EL0_ACCESSIBLE, GRANULARITY,
KERNEL_VIRTUAL_MEM_SPACE, LEVEL1_BLOCK_SIZE, LEVEL2_BLOCK_SIZE, NORMAL_MEM, PXN, READ_ONLY,
STACK_START_ADDR, TRANSLATIONTABLE_TTBR0, TRANSLATIONTABLE_TTBR1, UXN, WRITABLE,
},
PERIPHERAL_BASE,
};
#[no_mangle]
static EL1_STACK_TOP: usize = STACK_START_ADDR | KERNEL_VIRTUAL_MEM_SPACE;
const EL1_STACK_SIZE: usize = LEVEL2_BLOCK_SIZE * 2;
#[no_mangle]
pub static EL0_STACK_TOP: usize = STACK_START_ADDR;
pub const EL0_STACK_SIZE: usize = LEVEL2_BLOCK_SIZE * 2;
pub const MAILBOX_VIRTUAL_ADDRESS: VirtAddr = 0xFFFF_FF81_FFFF_E000;
pub static mut MAILBOX_PHYSICAL_ADDRESS: Option<PhysAddr> = None;
// TODO: Currently limited to 512 applications, more than enough, but has to be kept
// in mind
pub const APPLICATION_TRANSLATION_TABLE_VA: VirtAddr = 0xFFFF_FF81_FE00_0000;
extern "C" {
static __text_end: u64;
static __share_end: u64;
static __kernel_end: u64;
}
pub fn initialize_mmu_translation_tables() {
let text_end = unsafe { &__text_end } as *const _ as usize;
let shared_segment_end = unsafe { &__share_end } as *const _ as usize;
let kernel_end = unsafe { &__kernel_end } as *const _ as usize;
reserve_range(0x0, kernel_end).unwrap();
for addr in (0..text_end).step_by(GRANULARITY) {
map_page(
addr,
addr,
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
EL0_ACCESSIBLE | READ_ONLY | NORMAL_MEM,
)
.unwrap();
}
for addr in (0..text_end).step_by(GRANULARITY) {
map_page(
addr | KERNEL_VIRTUAL_MEM_SPACE,
addr,
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR1),
READ_ONLY | NORMAL_MEM,
)
.unwrap();
}
for addr in (text_end..shared_segment_end).step_by(GRANULARITY) {
map_page(
addr,
addr,
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
EL0_ACCESSIBLE | WRITABLE | NORMAL_MEM,
)
.unwrap();
}
for addr in (text_end..shared_segment_end).step_by(GRANULARITY) {
map_page(
addr | KERNEL_VIRTUAL_MEM_SPACE,
addr,
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR1),
EL0_ACCESSIBLE | WRITABLE | NORMAL_MEM,
)
.unwrap();
}
for addr in (PERIPHERAL_BASE..LEVEL1_BLOCK_SIZE).step_by(LEVEL2_BLOCK_SIZE) {
alloc_block_l2_explicit(
addr,
addr,
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
EL0_ACCESSIBLE | WRITABLE | UXN | PXN | DEVICE_MEM,
)
.unwrap();
}
// Frame Buffer memory range
allocate_memory(
0x3c100000,
1080 * 1920 * 4,
PhysSource::Explicit(0x3c100000),
NORMAL_MEM | PXN | UXN | WRITABLE | EL0_ACCESSIBLE,
)
.unwrap();
// Allocate EL1 stack
allocate_memory(
EL1_STACK_TOP - EL1_STACK_SIZE + 0x10,
EL1_STACK_SIZE,
PhysSource::Any,
WRITABLE | NORMAL_MEM,
)
.unwrap();
// Allocate EL0 stack
allocate_memory(
EL0_STACK_TOP - EL0_STACK_SIZE + 0x10,
EL0_STACK_SIZE,
PhysSource::Any,
WRITABLE | EL0_ACCESSIBLE | NORMAL_MEM,
)
.unwrap();
// Allocate Mailbox buffer
{
let addr = reserve_page();
unsafe { MAILBOX_PHYSICAL_ADDRESS = Some(addr) };
allocate_memory(
MAILBOX_VIRTUAL_ADDRESS,
GRANULARITY,
PhysSource::Explicit(addr),
WRITABLE | NORMAL_MEM,
)
.unwrap();
}
}

92
src/console.rs Normal file
View File

@@ -0,0 +1,92 @@
use alloc::string::String;
use crate::{
application_manager::start_app,
interrupt_handlers::irq::{register_interrupt_handler, IRQSource},
peripherals::uart::read_uart_data,
pi3::mailbox::read_soc_temp,
print, println,
};
pub static mut TERMINAL: Option<Terminal> = None;
pub struct Terminal {
input: String,
}
impl Default for Terminal {
fn default() -> Self {
Self::new()
}
}
impl Terminal {
pub fn new() -> Self {
Self {
input: String::new(),
}
}
fn flush(&mut self) {
print!("\n> {}", self.input);
}
fn exec(&mut self) {
print!("\n");
let val = self.input.clone();
self.input.clear();
let mut parts = val.split(" ");
match parts.next().unwrap() {
"temp" => {
println!("{}", read_soc_temp([0]).unwrap()[1]);
}
"app" => {
if let Some(app_id) = parts.next().and_then(|a| a.parse::<usize>().ok()) {
let args = parts.collect();
let _ = start_app(app_id, args);
} else {
println!("App ID not set.");
}
}
_ => {
println!("Unknown command: \"{}\"", self.input);
}
}
self.input.clear();
}
}
pub fn init_terminal() {
unsafe { TERMINAL = Some(Terminal::new()) };
register_terminal_interrupt_handler();
}
fn terminal_uart_rx_interrupt_handler() {
let input = read_uart_data();
#[allow(static_mut_refs)]
if let Some(term) = unsafe { TERMINAL.as_mut() } {
match input {
'\r' => {
term.exec();
term.flush();
}
_ => {
term.input.push(input);
print!("{}", input);
}
}
}
}
pub fn flush_terminal() {
#[allow(static_mut_refs)]
if let Some(term) = unsafe { TERMINAL.as_mut() } {
term.flush();
}
}
fn register_terminal_interrupt_handler() {
register_interrupt_handler(IRQSource::UartInt, terminal_uart_rx_interrupt_handler);
}

View File

@@ -4,10 +4,8 @@ mod bitmaps;
use bitmaps::BASIC_LEGACY;
use crate::{
mailbox::{read_mailbox, write_mailbox},
println,
};
use crate::pi3::mailbox::{read_mailbox, write_mailbox};
use log::error;
#[repr(align(16))]
struct Mailbox([u32; 36]);
@@ -24,8 +22,8 @@ pub struct FrameBuffer {
pixel_depth: u32, // Bits per pixel
pitch: u32, // Pixel per row
rows: u32, // Rows
start_addr: *mut u32,
size: u32, //Bytes
pub start_addr: *mut u32,
pub size: u32, //Bytes
}
pub const RED: u32 = 0x00FF0000;
@@ -37,6 +35,9 @@ pub const YELLOW: u32 = 0x00FFFF00;
impl FrameBuffer {
pub fn draw_pixel(&self, x: u32, y: u32, color: u32) {
let offset = x + y * self.pitch;
if x >= self.pitch || y >= self.rows {
return;
}
unsafe {
write_volatile(self.start_addr.add(offset as usize), color);
}
@@ -234,7 +235,7 @@ impl Default for FrameBuffer {
let _ = read_mailbox(8);
if mailbox.0[1] == 0 {
println!("Failed");
error!("Mailbox request was not processed!");
}
mailbox.0[28] &= 0x3FFFFFFF;

View File

@@ -1,16 +1,10 @@
use core::arch::asm;
use alloc::vec::Vec;
use crate::{
aarch64::registers::{daif::mask_all, read_esr_el1, read_exception_source_el},
get_current_el,
interrupt_handlers::daif::unmask_irq,
peripherals::{
gpio::{read_gpio_event_detect_status, reset_gpio_event_detect_status},
uart::clear_uart_interrupt_state,
},
println, read_address, write_address,
};
use log::debug;
const INTERRUPT_BASE: u32 = 0x3F00_B000;
const IRQ_PENDING_BASE: u32 = INTERRUPT_BASE + 0x204;
@@ -19,30 +13,29 @@ const DISABLE_IRQ_BASE: u32 = INTERRUPT_BASE + 0x21C;
const GPIO_PENDING_BIT_OFFSET: u64 = 0b1111 << 49;
struct InterruptHandlers {
source: IRQSource,
function: fn(),
}
// TODO: replace with hashmap and check for better alternatives for option
static mut INTERRUPT_HANDLERS: Option<Vec<InterruptHandlers>> = None;
#[derive(Clone)]
#[repr(u32)]
pub enum IRQSource {
AuxInt = 29,
I2cSpiSlvInt = 44,
Pwa0 = 45,
Pwa1 = 46,
Smi = 48,
GpioInt0 = 49,
GpioInt1 = 50,
GpioInt2 = 51,
GpioInt3 = 52,
I2cInt = 53,
SpiInt = 54,
PcmInt = 55,
UartInt = 57,
#[repr(C)]
pub struct TrapFrame {
pub x0: u64,
pub x1: u64,
pub x2: u64,
pub x3: u64,
pub x4: u64,
pub x5: u64,
pub x6: u64,
pub x7: u64,
pub x8: u64,
pub x9: u64,
pub x10: u64,
pub x11: u64,
pub x12: u64,
pub x13: u64,
pub x14: u64,
pub x15: u64,
pub x16: u64,
pub x17: u64,
pub x18: u64,
pub x29: u64,
pub x30: u64,
}
/// Representation of the ESR_ELx registers
@@ -66,215 +59,25 @@ impl From<u32> for EsrElX {
}
}
#[no_mangle]
unsafe extern "C" fn rust_irq_handler() {
daif::mask_all();
let pending_irqs = get_irq_pending_sources();
if pending_irqs & GPIO_PENDING_BIT_OFFSET != 0 {
handle_gpio_interrupt();
let source_el = get_exception_return_exception_level() >> 2;
println!("Source EL: {}", source_el);
println!("Current EL: {}", get_current_el());
println!("Return register address: {:#x}", get_elr_el1());
}
if let Some(handler_vec) = unsafe { INTERRUPT_HANDLERS.as_ref() } {
for handler in handler_vec {
if (pending_irqs & (1 << (handler.source.clone() as u32))) != 0 {
(handler.function)();
clear_interrupt_for_source(handler.source.clone());
}
}
}
}
pub mod irq;
pub mod synchronous;
#[no_mangle]
unsafe extern "C" fn rust_synchronous_interrupt_no_el_change() {
daif::mask_all();
mask_all();
let source_el = get_exception_return_exception_level() >> 2;
println!("--------Sync Exception in EL{}--------", source_el);
println!("No EL change");
println!("Current EL: {}", get_current_el());
println!("{:?}", EsrElX::from(get_esr_el1()));
println!("Return register address: {:#x}", get_elr_el1());
println!("-------------------------------------");
let source_el = read_exception_source_el() >> 2;
debug!("--------Sync Exception in EL{}--------", source_el);
debug!("No EL change");
debug!("Current EL: {}", get_current_el());
debug!("{:?}", EsrElX::from(read_esr_el1()));
debug!("Return register address: {:#x}", read_esr_el1());
debug!("-------------------------------------");
}
/// Synchronous Exception Handler
///
/// Lower Exception level, where the implemented level
/// immediately lower than the target level is using
/// AArch64.
#[no_mangle]
unsafe extern "C" fn rust_synchronous_interrupt_imm_lower_aarch64() {
daif::mask_all();
let source_el = get_exception_return_exception_level() >> 2;
println!("--------Sync Exception in EL{}--------", source_el);
println!("Exception escalated to EL {}", get_current_el());
println!("Current EL: {}", get_current_el());
let esr = EsrElX::from(get_esr_el1());
println!("{:?}", EsrElX::from(esr));
println!("Return register address: {:#x}", get_elr_el1());
match esr.ec {
0b100100 => {
println!("Cause: Data Abort from a lower Exception level");
}
_ => {}
}
println!("-------------------------------------");
set_return_to_kernel_main();
}
fn clear_interrupt_for_source(source: IRQSource) {
match source {
IRQSource::UartInt => clear_uart_interrupt_state(),
_ => {}
}
}
fn set_return_to_kernel_main() {
fn set_return_to_kernel_loop() {
unsafe {
asm!("ldr x0, =kernel_main", "msr ELR_EL1, x0");
asm!("ldr x0, =kernel_loop", "msr ELR_EL1, x0");
asm!("mov x0, #(0b0101)", "msr SPSR_EL1, x0");
}
}
fn get_exception_return_exception_level() -> u32 {
let spsr: u32;
unsafe {
asm!("mrs {0:x}, SPSR_EL1", out(reg) spsr);
}
spsr & 0b1111
}
/// Read the syndrome information that caused an exception
///
/// ESR = Exception Syndrome Register
fn get_esr_el1() -> u32 {
let esr: u32;
unsafe {
asm!(
"mrs {esr:x}, ESR_EL1",
esr = out(reg) esr
);
}
esr
}
/// Read the return address
///
/// ELR = Exception Link Registers
fn get_elr_el1() -> u32 {
let elr: u32;
unsafe {
asm!(
"mrs {esr:x}, ELR_EL1",
esr = out(reg) elr
);
}
elr
}
fn handle_gpio_interrupt() {
println!("Interrupt");
for i in 0..=53u32 {
let val = read_gpio_event_detect_status(i);
if val {
#[allow(clippy::single_match)]
match i {
26 => {
println!("Button Pressed");
}
_ => {}
}
// Reset GPIO Interrupt handler by writing a 1
reset_gpio_event_detect_status(i);
}
}
unmask_irq();
}
/// Enables IRQ Source
pub fn enable_irq_source(state: IRQSource) {
let nr = state as u32;
let register = ENABLE_IRQ_BASE + 4 * (nr / 32);
let register_offset = nr % 32;
let current = unsafe { read_address(register) };
let mask = 0b1 << register_offset;
let new_val = current | mask;
unsafe { write_address(register, new_val) };
}
/// Disable IRQ Source
pub fn disable_irq_source(state: IRQSource) {
let nr = state as u32;
let register = DISABLE_IRQ_BASE + 4 * (nr / 32);
let register_offset = nr % 32;
let current = unsafe { read_address(register) };
let mask = 0b1 << register_offset;
let new_val = current | mask;
unsafe { write_address(register, new_val) };
}
/// Read current IRQ Source status
pub fn read_irq_source_status(state: IRQSource) -> u32 {
let nr = state as u32;
let register = ENABLE_IRQ_BASE + 4 * (nr / 32);
let register_offset = nr % 32;
(unsafe { read_address(register) } >> register_offset) & 0b1
}
/// Status if a IRQ Source is pending
pub fn is_irq_source_pending(state: IRQSource) -> bool {
let nr = state as u32;
let register = IRQ_PENDING_BASE + 4 * (nr / 32);
let register_offset = nr % 32;
((unsafe { read_address(register) } >> register_offset) & 0b1) != 0
}
/// Status if a IRQ Source is pending
pub fn get_irq_pending_sources() -> u64 {
let mut pending = unsafe { read_address(IRQ_PENDING_BASE + 4) as u64 } << 32;
pending |= unsafe { read_address(IRQ_PENDING_BASE) as u64 };
pending
}
pub mod daif {
use core::arch::asm;
#[inline(always)]
pub fn mask_all() {
unsafe { asm!("msr DAIFSet, #0xf", options(nomem, nostack)) }
}
#[inline(always)]
pub fn unmask_all() {
unsafe { asm!("msr DAIFClr, #0xf", options(nomem, nostack)) }
}
#[inline(always)]
pub fn mask_irq() {
unsafe { asm!("msr DAIFSet, #0x2", options(nomem, nostack)) }
}
#[inline(always)]
pub fn unmask_irq() {
unsafe { asm!("msr DAIFClr, #0x2", options(nomem, nostack)) }
}
}
pub fn initialize_interrupt_handler() {
unsafe { INTERRUPT_HANDLERS = Some(Vec::new()) };
}
pub fn register_interrupt_handler(source: IRQSource, function: fn()) {
if let Some(handler_vec) = unsafe { INTERRUPT_HANDLERS.as_mut() } {
handler_vec.push(InterruptHandlers { source, function });
}
}

View File

@@ -0,0 +1,152 @@
use crate::aarch64::registers::read_esr_el1;
use crate::{
aarch64::registers::{
daif::{mask_all, unmask_irq},
read_exception_source_el,
},
get_current_el,
interrupt_handlers::{
DISABLE_IRQ_BASE, ENABLE_IRQ_BASE, GPIO_PENDING_BIT_OFFSET, IRQ_PENDING_BASE,
},
peripherals::{
gpio::{read_gpio_event_detect_status, reset_gpio_event_detect_status},
uart::clear_uart_interrupt_state,
},
read_address, write_address,
};
use alloc::vec::Vec;
use log::{debug, info};
struct InterruptHandlers {
source: IRQSource,
function: fn(),
}
// TODO: replace with hashmap and check for better alternatives for option
static mut INTERRUPT_HANDLERS: Option<Vec<InterruptHandlers>> = None;
#[derive(Clone)]
#[repr(u32)]
pub enum IRQSource {
AuxInt = 29,
I2cSpiSlvInt = 44,
Pwa0 = 45,
Pwa1 = 46,
Smi = 48,
GpioInt0 = 49,
GpioInt1 = 50,
GpioInt2 = 51,
GpioInt3 = 52,
I2cInt = 53,
SpiInt = 54,
PcmInt = 55,
UartInt = 57,
}
#[inline(always)]
pub fn initialize_interrupt_handler() {
unsafe { INTERRUPT_HANDLERS = Some(Vec::new()) };
}
pub fn register_interrupt_handler(source: IRQSource, function: fn()) {
if let Some(handler_vec) = unsafe { &mut *core::ptr::addr_of_mut!(INTERRUPT_HANDLERS) } {
handler_vec.push(InterruptHandlers { source, function });
}
}
#[no_mangle]
unsafe extern "C" fn rust_irq_handler() {
mask_all();
let pending_irqs = get_irq_pending_sources();
if pending_irqs & GPIO_PENDING_BIT_OFFSET != 0 {
handle_gpio_interrupt();
let source_el = read_exception_source_el() >> 2;
debug!("Source EL: {}", source_el);
debug!("Current EL: {}", get_current_el());
debug!("Return register address: {:#x}", read_esr_el1());
}
if let Some(handler_vec) = unsafe { &*core::ptr::addr_of_mut!(INTERRUPT_HANDLERS) } {
for handler in handler_vec {
if (pending_irqs & (1 << (handler.source.clone() as u32))) != 0 {
(handler.function)();
clear_interrupt_for_source(handler.source.clone());
}
}
}
}
fn handle_gpio_interrupt() {
debug!("GPIO interrupt triggered");
for i in 0..=53u32 {
let val = read_gpio_event_detect_status(i);
if val {
#[allow(clippy::single_match)]
match i {
26 => {
info!("Button Pressed");
}
_ => {}
}
// Reset GPIO Interrupt handler by writing a 1
reset_gpio_event_detect_status(i);
}
}
unmask_irq();
}
/// Enables IRQ Source
pub fn enable_irq_source(state: IRQSource) {
let nr = state as u32;
let register = ENABLE_IRQ_BASE + 4 * (nr / 32);
let register_offset = nr % 32;
let current = unsafe { read_address(register) };
let mask = 0b1 << register_offset;
let new_val = current | mask;
unsafe { write_address(register, new_val) };
}
/// Disable IRQ Source
pub fn disable_irq_source(state: IRQSource) {
let nr = state as u32;
let register = DISABLE_IRQ_BASE + 4 * (nr / 32);
let register_offset = nr % 32;
let current = unsafe { read_address(register) };
let mask = 0b1 << register_offset;
let new_val = current | mask;
unsafe { write_address(register, new_val) };
}
/// Read current IRQ Source status
pub fn read_irq_source_status(state: IRQSource) -> u32 {
let nr = state as u32;
let register = ENABLE_IRQ_BASE + 4 * (nr / 32);
let register_offset = nr % 32;
(unsafe { read_address(register) } >> register_offset) & 0b1
}
/// Status if a IRQ Source is pending
pub fn is_irq_source_pending(state: IRQSource) -> bool {
let nr = state as u32;
let register = IRQ_PENDING_BASE + 4 * (nr / 32);
let register_offset = nr % 32;
((unsafe { read_address(register) } >> register_offset) & 0b1) != 0
}
/// Status if a IRQ Source is pending
pub fn get_irq_pending_sources() -> u64 {
let mut pending = unsafe { read_address(IRQ_PENDING_BASE + 4) as u64 } << 32;
pending |= unsafe { read_address(IRQ_PENDING_BASE) as u64 };
pending
}
fn clear_interrupt_for_source(source: IRQSource) {
match source {
IRQSource::UartInt => clear_uart_interrupt_state(),
_ => {
todo!()
}
}
}

View File

@@ -0,0 +1,116 @@
use crate::{
aarch64::registers::{daif::mask_all, read_elr_el1, read_esr_el1, read_exception_source_el},
get_current_el,
interrupt_handlers::{set_return_to_kernel_loop, EsrElX, TrapFrame},
pi3::mailbox,
};
use log::{debug, error, warn};
/// Synchronous Exception Handler
///
/// Source is a lower Exception level, where the implemented level
/// immediately lower than the target level is using
/// AArch64.
#[no_mangle]
unsafe extern "C" fn rust_synchronous_interrupt_imm_lower_aarch64(frame: &mut TrapFrame) -> usize {
mask_all();
let esr: EsrElX = EsrElX::from(read_esr_el1());
debug!("Synchronous interrupt from lower EL triggered");
log_sync_exception();
match esr.ec {
0b100100 => {
error!("Data Abort from a lower Exception level");
error!("Cause: {}", decode_data_abort(esr.iss as usize));
}
0b010101 => {
debug!("SVC instruction execution in AArch64");
return handle_svc(frame);
}
0b100010 => {
error!("PC alignment fault.");
}
_ => {
error!("Synchronous interrupt: Unknown Error Code: {:b}", esr.ec);
}
}
warn!("UnhandledException -> Returning to kernel...");
set_return_to_kernel_loop();
0
}
fn decode_data_abort(iss: usize) -> &'static str {
match iss & 0b111111 {
0b000000 => "Address size fault, level 0",
0b000001 => "Address size fault, level 1",
0b000010 => "Address size fault, level 2",
0b000011 => "Address size fault, level 3",
0b000100 => "Translation fault, level 0",
0b000101 => "Translation fault, level 1",
0b000110 => "Translation fault, level 2",
0b000111 => "Translation fault, level 3",
0b001001 => "Access flag fault, level 1",
0b001010 => "Access flag fault, level 2",
0b001011 => "Access flag fault, level 3",
0b001101 => "Permission fault, level 1",
0b001110 => "Permission fault, level 2",
0b001111 => "Permission fault, level 3",
0b010000 => "Synchronous External abort, not on translation table walk",
0b011000 => {
"Synchronous parity or ECC error on memory access, not on translation table walk"
}
0b010100 => "Synchronous External abort, on translation table walk, level 0",
0b010101 => "Synchronous External abort, on translation table walk, level 1",
0b010110 => "Synchronous External abort, on translation table walk, level 2",
0b010111 => "Synchronous External abort, on translation table walk, level 3",
0b011100 => "Synchronous parity or ECC error on translation table walk, level 0",
0b011101 => "Synchronous parity or ECC error on translation table walk, level 1",
0b011110 => "Synchronous parity or ECC error on translation table walk, level 2",
0b011111 => "Synchronous parity or ECC error on translation table walk, level 3",
0b100001 => "Alignment fault",
0b110000 => "TLB conflict abort",
0b110001 => "Unsupported atomic hardware update fault",
0b110100 => "IMPLEMENTATION DEFINED fault (Lockdown)",
0b110101 => "IMPLEMENTATION DEFINED fault (Unsupported Exclusive or Atomic access)",
0b111101 => "Section Domain Fault",
0b111110 => "Page Domain Fault",
_ => "Reserved / Unknown",
}
}
fn handle_svc(frame: &mut TrapFrame) -> usize {
match frame.x8 {
0 => {
debug!("Program exited!");
set_return_to_kernel_loop();
0
}
67 => {
let response = mailbox::read_soc_temp([0]).unwrap();
response[1] as usize
}
_ => 0,
}
}
fn log_sync_exception() {
let source_el = read_exception_source_el() >> 2;
debug!("--------Sync Exception in EL{}--------", source_el);
debug!("Exception escalated to EL {}", get_current_el());
debug!("Current EL: {}", get_current_el());
let esr: EsrElX = EsrElX::from(read_esr_el1());
debug!("{:?}", esr);
debug!("Return address: {:#x}", read_elr_el1());
debug!("-------------------------------------");
}

View File

@@ -1,227 +0,0 @@
use core::arch::asm;
use crate::{
get_current_el,
irq_interrupt::daif::unmask_irq,
mmio_read, mmio_write,
peripherals::gpio::{read_gpio_event_detect_status, reset_gpio_event_detect_status},
};
const INTERRUPT_BASE: u32 = 0x3F00_B000;
const IRQ_PENDING_BASE: u32 = INTERRUPT_BASE + 0x204;
const ENABLE_IRQ_BASE: u32 = INTERRUPT_BASE + 0x210;
const DISABLE_IRQ_BASE: u32 = INTERRUPT_BASE + 0x21C;
#[repr(u32)]
pub enum IRQState {
AuxInt = 29,
I2cSpiSlvInt = 44,
Pwa0 = 45,
Pwa1 = 46,
Smi = 48,
GpioInt0 = 49,
GpioInt1 = 50,
GpioInt2 = 51,
GpioInt3 = 52,
I2cInt = 53,
SpiInt = 54,
PcmInt = 55,
UartInt = 57,
}
/// Representation of the ESR_ELx registers
///
/// Reference: D1.10.4
#[derive(Debug, Clone, Copy)]
#[allow(dead_code)]
struct EsrElX {
ec: u32,
il: u32,
iss: u32,
}
impl From<u32> for EsrElX {
fn from(value: u32) -> Self {
Self {
ec: value >> 26,
il: (value >> 25) & 0b1,
iss: value & 0x1FFFFFF,
}
}
}
#[no_mangle]
unsafe extern "C" fn rust_irq_handler() {
daif::mask_all();
handle_gpio_interrupt();
let source_el = get_exception_return_exception_level() >> 2;
println!("Source EL: {}", source_el);
println!("Current EL: {}", get_current_el());
println!("Return register address: {:#x}", get_elr_el1());
}
#[no_mangle]
unsafe extern "C" fn rust_synchronous_interrupt_no_el_change() {
daif::mask_all();
let source_el = get_exception_return_exception_level() >> 2;
println!("--------Sync Exception in EL{}--------", source_el);
println!("No EL change");
println!("Current EL: {}", get_current_el());
println!("{:?}", EsrElX::from(get_esr_el1()));
println!("Return register address: {:#x}", get_elr_el1());
println!("-------------------------------------");
}
/// Synchronous Exception Handler
///
/// Lower Exception level, where the implemented level
/// immediately lower than the target level is using
/// AArch64.
#[no_mangle]
unsafe extern "C" fn rust_synchronous_interrupt_imm_lower_aarch64() {
daif::mask_all();
let source_el = get_exception_return_exception_level() >> 2;
println!("--------Sync Exception in EL{}--------", source_el);
println!("Exception escalated to EL {}", get_current_el());
println!("Current EL: {}", get_current_el());
let esr = EsrElX::from(get_esr_el1());
println!("{:?}", EsrElX::from(esr));
println!("Return register address: {:#x}", get_elr_el1());
match esr.ec {
0b100100 => {
println!("Cause: Data Abort from a lower Exception level");
}
_ => {}
}
println!("-------------------------------------");
set_return_to_kernel_main();
}
fn set_return_to_kernel_main() {
unsafe {
asm!("ldr x0, =kernel_main", "msr ELR_EL1, x0");
asm!("mov x0, #(0b0101)", "msr SPSR_EL1, x0");
}
}
fn get_exception_return_exception_level() -> u32 {
let spsr: u32;
unsafe {
asm!("mrs {0:x}, SPSR_EL1", out(reg) spsr);
}
spsr & 0b1111
}
/// Read the syndrome information that caused an exception
///
/// ESR = Exception Syndrome Register
fn get_esr_el1() -> u32 {
let esr: u32;
unsafe {
asm!(
"mrs {esr:x}, ESR_EL1",
esr = out(reg) esr
);
}
esr
}
/// Read the return address
///
/// ELR = Exception Link Registers
fn get_elr_el1() -> u32 {
let elr: u32;
unsafe {
asm!(
"mrs {esr:x}, ELR_EL1",
esr = out(reg) elr
);
}
elr
}
fn handle_gpio_interrupt() {
println!("Interrupt");
for i in 0..=53u32 {
let val = read_gpio_event_detect_status(i);
if val {
#[allow(clippy::single_match)]
match i {
26 => {
println!("Button Pressed");
}
_ => {}
}
// Reset GPIO Interrupt handler by writing a 1
reset_gpio_event_detect_status(i);
}
}
unmask_irq();
}
/// Enables IRQ Source
pub fn enable_irq_source(state: IRQState) {
let nr = state as u32;
let register = ENABLE_IRQ_BASE + 4 * (nr / 32);
let register_offset = nr % 32;
let current = mmio_read(register);
let mask = 0b1 << register_offset;
let new_val = current | mask;
mmio_write(register, new_val);
}
/// Disable IRQ Source
pub fn disable_irq_source(state: IRQState) {
let nr = state as u32;
let register = DISABLE_IRQ_BASE + 4 * (nr / 32);
let register_offset = nr % 32;
let current = mmio_read(register);
let mask = 0b1 << register_offset;
let new_val = current | mask;
mmio_write(register, new_val);
}
/// Read current IRQ Source status
pub fn read_irq_source_status(state: IRQState) -> u32 {
let nr = state as u32;
let register = ENABLE_IRQ_BASE + 4 * (nr / 32);
let register_offset = nr % 32;
(mmio_read(register) >> register_offset) & 0b1
}
/// Status if a IRQ Source is enabled
pub fn read_irq_pending(state: IRQState) -> bool {
let nr = state as u32;
let register = IRQ_PENDING_BASE + 4 * (nr / 32);
let register_offset = nr % 32;
((mmio_read(register) >> register_offset) & 0b1) != 0
}
pub mod daif {
use core::arch::asm;
#[inline(always)]
pub fn mask_all() {
unsafe { asm!("msr DAIFSet, #0xf", options(nomem, nostack)) }
}
#[inline(always)]
pub fn unmask_all() {
unsafe { asm!("msr DAIFClr, #0xf", options(nomem, nostack)) }
}
#[inline(always)]
pub fn mask_irq() {
unsafe { asm!("msr DAIFSet, #0x2", options(nomem, nostack)) }
}
#[inline(always)]
pub fn unmask_irq() {
unsafe { asm!("msr DAIFClr, #0x2", options(nomem, nostack)) }
}
}

View File

@@ -3,51 +3,64 @@
extern crate alloc;
use alloc::boxed::Box;
use core::{
arch::asm,
panic::PanicInfo,
ptr::{read_volatile, write_volatile},
};
use log::LevelFilter;
use log::{Level, Metadata, Record};
use heap::Heap;
use crate::{interrupt_handlers::initialize_interrupt_handler, logger::DefaultLogger};
use crate::{
aarch64::mmu::{
allocate_memory, PhysSource, KERNEL_VIRTUAL_MEM_SPACE, LEVEL2_BLOCK_SIZE, NORMAL_MEM, UXN,
WRITABLE,
},
application_manager::initialize_app_manager,
console::{flush_terminal, init_terminal},
interrupt_handlers::irq::initialize_interrupt_handler,
pi3::timer::sleep_s,
};
static PERIPHERAL_BASE: u32 = 0x3F00_0000;
static LOGGER: UartLogger = UartLogger;
static PERIPHERAL_BASE: usize = 0x3F00_0000;
unsafe extern "C" {
unsafe static mut __heap_start: u8;
unsafe static mut __heap_end: u8;
unsafe static mut __kernel_end: u8;
}
#[global_allocator]
pub static mut GLOBAL_ALLOCATOR: Heap = Heap::empty();
pub unsafe fn init_heap() {
let start = core::ptr::addr_of_mut!(__heap_start) as usize;
let end = core::ptr::addr_of_mut!(__heap_end) as usize;
pub unsafe fn initialize_kernel_heap() {
let start = core::ptr::addr_of_mut!(__kernel_end) as usize | KERNEL_VIRTUAL_MEM_SPACE;
let size = LEVEL2_BLOCK_SIZE * 2;
allocate_memory(start, size, PhysSource::Any, NORMAL_MEM | UXN | WRITABLE).unwrap();
let heap = core::ptr::addr_of_mut!(GLOBAL_ALLOCATOR);
(*heap).init(start, end);
(*heap).init(start, start + size);
}
#[panic_handler]
fn panic(_panic: &PanicInfo) -> ! {
loop {
println!("Panic");
println!("Panic: {}", _panic.message());
sleep_s(1);
}
}
pub mod peripherals;
pub mod aarch64;
pub mod configuration;
pub mod framebuffer;
pub mod interrupt_handlers;
pub mod logger;
pub mod mailbox;
pub mod power_management;
pub mod timer;
pub mod application_manager;
pub mod console;
pub mod pi3;
#[inline(always)]
pub unsafe fn read_address(address: u32) -> u32 {
@@ -72,6 +85,33 @@ pub fn get_current_el() -> u64 {
}
pub fn initialize_kernel() {
logger::set_logger(Box::new(DefaultLogger));
unsafe { initialize_kernel_heap() };
initialize_interrupt_handler();
initialize_app_manager();
init_terminal();
}
struct UartLogger;
impl log::Log for UartLogger {
fn enabled(&self, metadata: &Metadata) -> bool {
metadata.level() <= Level::Debug
}
fn log(&self, record: &Record) {
if self.enabled(record.metadata()) {
println!("{} - {}", record.level(), record.args());
if record.level() <= Level::Info {
flush_terminal();
}
}
}
fn flush(&self) {}
}
pub fn init_logger() {
log::set_logger(&LOGGER)
.map(|()| log::set_max_level(LevelFilter::Debug))
.unwrap();
}

View File

@@ -1,47 +0,0 @@
use core::fmt::Write;
use alloc::{boxed::Box, fmt};
use crate::peripherals::uart;
static mut LOGGER: Option<Box<dyn Logger>> = None;
pub trait Logger: Write + Sync {
fn flush(&mut self);
}
pub struct DefaultLogger;
impl Logger for DefaultLogger {
fn flush(&mut self) {}
}
impl Write for DefaultLogger {
fn write_str(&mut self, s: &str) -> core::fmt::Result {
uart::Uart.write_str(s)
}
}
#[macro_export]
macro_rules! log {
() => {};
($($arg:tt)*) => {
$crate::logger::log(format_args!($($arg)*))
};
}
pub fn log(args: fmt::Arguments) {
unsafe {
if let Some(logger) = LOGGER.as_mut() {
logger.write_str("\n").unwrap();
logger.write_fmt(args).unwrap();
logger.flush();
}
}
}
pub fn set_logger(logger: Box<dyn Logger>) {
unsafe {
LOGGER = Some(logger);
}
}

View File

@@ -1,21 +1,23 @@
#![no_main]
#![no_std]
#![feature(asm_experimental_arch)]
#![allow(static_mut_refs)]
#![allow(clippy::missing_safety_doc)]
use core::{
arch::{asm, global_asm},
ptr::write_volatile,
};
use log::{debug, info};
extern crate alloc;
use alloc::boxed::Box;
use alloc::{slice, vec::Vec};
use nova::{
aarch64::registers::{daif, read_id_aa64mmfr0_el1},
application_manager::{add_app, Application},
configuration::memory_mapping::initialize_mmu_translation_tables,
framebuffer::{FrameBuffer, BLUE, GREEN, RED},
get_current_el, init_heap,
interrupt_handlers::{daif, enable_irq_source, IRQSource},
log, mailbox,
get_current_el, init_logger,
interrupt_handlers::irq::{enable_irq_source, IRQSource},
peripherals::{
gpio::{
blink_gpio, gpio_pull_up, set_falling_edge_detect, set_gpio_function, GPIOFunction,
@@ -23,14 +25,18 @@ use nova::{
},
uart::uart_init,
},
println,
pi3::timer::sleep_s,
print, println,
};
global_asm!(include_str!("vector.S"));
global_asm!(include_str!("config.S"));
static mut FRAMEBUFFER: Option<FrameBuffer> = None;
extern "C" {
fn el2_to_el1();
fn el1_to_el0();
fn configure_mmu_el1();
static mut __bss_start: u32;
static mut __bss_end: u32;
}
@@ -56,15 +62,23 @@ pub extern "C" fn main() -> ! {
// Set ACT Led to Outout
let _ = set_gpio_function(21, GPIOFunction::Output);
init_logger();
println!("Hello World!");
println!("Exception level: {}", get_current_el());
info!("Hello World!");
info!("Current exception level: {}", get_current_el());
info!("initializing MMU...");
initialize_mmu_translation_tables();
unsafe { configure_mmu_el1() };
info!("MMU configured!");
debug!("Register: AA64MMFR0_EL1: {:064b}", read_id_aa64mmfr0_el1());
info!("Moving El2->EL1");
unsafe { FRAMEBUFFER = Some(FrameBuffer::default()) };
unsafe {
asm!("mrs x0, SCTLR_EL1",);
el2_to_el1();
}
#[allow(clippy::empty_loop)]
loop {}
}
@@ -78,24 +92,50 @@ unsafe fn zero_bss() {
}
#[no_mangle]
pub extern "C" fn kernel_main() -> ! {
pub extern "C" fn kernel_main() {
nova::initialize_kernel();
println!("Kernel Main");
println!("Exception Level: {}", get_current_el());
info!("Kernel Initialized...");
info!("Current exception Level: {}", get_current_el());
let mut test_vector = Vec::new();
for i in 0..20 {
test_vector.push(i);
}
debug!("heap allocation test: {:?}", test_vector);
enable_irq_source(IRQSource::UartInt);
let app = Application::new(el0 as *const () as usize);
add_app(app).unwrap();
kernel_loop();
}
#[no_mangle]
pub extern "C" fn kernel_loop() {
daif::unmask_all();
unsafe {
init_heap();
el1_to_el0();
};
#[allow(clippy::empty_loop)]
loop {}
}
#[no_mangle]
pub extern "C" fn el0() -> ! {
pub unsafe extern "C" fn el0(argc: usize, argv: *const *const u8) {
println!("Jumped into EL0");
println!("num: {}", argc);
println!("argv: {:?}", argv);
let raw_args = unsafe { slice::from_raw_parts(argv, argc) };
let first_arg = raw_args
.iter()
.map(|&arg_ptr| {
if arg_ptr.is_null() {
return "";
}
let c_str = unsafe { core::ffi::CStr::from_ptr(arg_ptr) };
let str_slice = c_str.to_str().unwrap();
str_slice
})
.next();
sleep_s(1);
// Set GPIO 26 to Input
enable_irq_source(IRQSource::GpioInt0); //26 is on the first GPIO bank
@@ -103,27 +143,45 @@ pub extern "C" fn el0() -> ! {
gpio_pull_up(26);
set_falling_edge_detect(26, true);
enable_irq_source(IRQSource::UartInt);
if let Some(fb) = unsafe { FRAMEBUFFER.as_mut() } {
for i in 0..1080 {
fb.draw_pixel(50, i, BLUE);
}
fb.draw_square(500, 500, 600, 700, RED);
fb.draw_square_fill(800, 800, 900, 900, GREEN);
fb.draw_square_fill(1000, 800, 1200, 700, BLUE);
fb.draw_square_fill(900, 100, 800, 150, RED | BLUE);
fb.draw_string("Hello World! :D\nTest next Line", 500, 5, 3, BLUE);
let fb = FrameBuffer::default();
fb.draw_square(500, 500, 600, 700, RED);
fb.draw_square_fill(800, 800, 900, 900, GREEN);
fb.draw_square_fill(1000, 800, 1200, 700, BLUE);
fb.draw_square_fill(900, 100, 800, 150, RED | BLUE);
fb.draw_string("Hello World! :D\nTest next Line", 500, 5, 3, BLUE);
fb.draw_function(cos, 100, 101, RED);
loop {
let temp = mailbox::read_soc_temp([0]).unwrap();
log!("{} °C", temp[1] / 1000);
blink_gpio(SpecificGpio::OnboardLed as u8, 500);
let b = Box::new([1, 2, 3, 4]);
log!("{:?}", b);
fb.draw_function(cos, 0, 101, RED);
}
let _temp = syscall(67);
if let Some(num) = first_arg.and_then(|val| val.parse::<usize>().ok()) {
println!("Calculting prime to: {}", num);
for i in 3..num {
let mut is_prime = true;
for j in 3..i {
if i == j {
continue;
}
if i % j == 0 {
is_prime = false;
}
}
if is_prime {
print!("{} ", i);
}
}
println!("");
} else {
println!("Input NaN");
}
blink_gpio(SpecificGpio::OnboardLed as u8, 500);
syscall(0);
}
fn cos(x: u32) -> f64 {
@@ -136,3 +194,17 @@ fn enable_uart() {
let _ = set_gpio_function(15, GPIOFunction::Alternative0);
uart_init();
}
pub fn syscall(nr: u64) -> u64 {
let ret: u64;
unsafe {
asm!(
"svc #0",
in("x8") nr,
lateout("x0") ret,
);
}
ret
}

View File

@@ -2,7 +2,7 @@ use core::result::Result;
use core::result::Result::Ok;
use core::sync::atomic::{compiler_fence, Ordering};
use crate::timer::{delay_nops, sleep_ms};
use crate::pi3::timer::{delay_nops, sleep_ms};
use crate::{read_address, write_address};
const GPFSEL_BASE: u32 = 0x3F20_0000;

View File

@@ -3,7 +3,7 @@ use core::{
fmt::{self, Write},
};
use crate::{println, read_address, write_address};
use crate::{read_address, write_address};
const BAUD: u32 = 115200;
const UART_CLK: u32 = 48_000_000;
@@ -118,11 +118,13 @@ fn uart_fifo_enable(enable: bool) {
unsafe { write_address(UART0_LCRH, lcrh) };
}
#[inline(always)]
fn uart_enable_rx_interrupt() {
unsafe { write_address(UART0_IMSC, UART0_IMSC_RXIM) };
}
/// Set UART word length and set FIFO status
#[inline(always)]
fn uart_set_lcrh(wlen: u32, enable_fifo: bool) {
let mut value = (wlen & 0b11) << 5;
if enable_fifo {
@@ -131,10 +133,12 @@ fn uart_set_lcrh(wlen: u32, enable_fifo: bool) {
unsafe { write_address(UART0_LCRH, value) };
}
#[inline(always)]
pub fn read_uart_data() -> char {
(unsafe { read_address(UART0_DR) } & 0xFF) as u8 as char
}
#[inline(always)]
pub fn clear_uart_interrupt_state() {
unsafe {
write_address(UART0_ICR, 1 << 4);

View File

@@ -1,4 +1,9 @@
use crate::{read_address, write_address};
use core::slice;
use crate::{
aarch64::mmu::GRANULARITY, configuration::memory_mapping::MAILBOX_PHYSICAL_ADDRESS,
configuration::memory_mapping::MAILBOX_VIRTUAL_ADDRESS, read_address, write_address,
};
use nova_error::NovaError;
const MBOX_BASE: u32 = 0x3F00_0000 + 0xB880;
@@ -31,8 +36,9 @@ macro_rules! mailbox_command {
pub fn $name(
request_data: [u32; $request_len / 4],
) -> Result<[u32; $response_len / 4], NovaError> {
let mut mailbox =
[0u32; (HEADER_LENGTH + max!($request_len, $response_len) + FOOTER_LENGTH) / 4];
let mailbox = unsafe {
slice::from_raw_parts_mut(MAILBOX_VIRTUAL_ADDRESS as *mut u32, GRANULARITY / 4)
};
mailbox[0] = (HEADER_LENGTH + max!($request_len, $response_len) + FOOTER_LENGTH) as u32; // Total length in Bytes
mailbox[1] = 0; // Request
mailbox[2] = $tag; // Command Tag
@@ -42,9 +48,9 @@ macro_rules! mailbox_command {
mailbox[5..(5 + ($request_len / 4))].copy_from_slice(&request_data);
mailbox[(5 + ($request_len / 4))..].fill(0);
let addr = core::ptr::addr_of!(mailbox[0]) as u32;
//let addr = core::ptr::addr_of!(mailbox[0]) as u32;
write_mailbox(8, addr);
write_mailbox(8, unsafe { MAILBOX_PHYSICAL_ADDRESS.unwrap() } as u32);
let _ = read_mailbox(8);

3
src/pi3/mod.rs Normal file
View File

@@ -0,0 +1,3 @@
pub mod mailbox;
pub mod power_management;
pub mod timer;

View File

@@ -3,7 +3,7 @@ use core::ptr::{read_volatile, write_volatile};
use crate::PERIPHERAL_BASE;
/// Power Management Base
static PM_BASE: u32 = PERIPHERAL_BASE + 0x10_0000;
static PM_BASE: u32 = PERIPHERAL_BASE as u32 + 0x10_0000;
static PM_RSTC: u32 = PM_BASE + 0x1c;
static PM_WDOG: u32 = PM_BASE + 0x24;
@@ -23,5 +23,6 @@ pub fn reboot_system() {
PM_PASSWORD | (pm_rstc_val & PM_RSTC_WRCFG_CLR) | PM_RSTC_WRCFG_FULL_RESET,
);
}
#[allow(clippy::empty_loop)]
loop {}
}

View File

@@ -1,53 +0,0 @@
use core::fmt::Write;
use alloc::string::String;
use nova::{
interrupt_handlers::register_interrupt_handler, logger::Logger,
peripherals::uart::read_uart_data, print, println,
};
pub struct Terminal {
buffer: String,
input: String,
}
impl Terminal {
pub fn new() -> Self {
Self {
buffer: String::new(),
input: String::new(),
}
}
fn flush(&mut self) {
println!("{}", self.buffer);
print!("> {}", self.input);
self.buffer.clear();
}
}
impl Write for Terminal {
fn write_str(&mut self, s: &str) -> core::fmt::Result {
self.buffer.push_str(s);
Ok(())
}
}
impl Logger for Terminal {
fn flush(&mut self) {
println!("{}", self.buffer);
print!("> {}", self.input);
self.buffer.clear();
}
}
fn terminal_uart_rx_interrupt_handler() {
print!("{}", read_uart_data());
}
pub fn register_terminal_interrupt_handler() {
register_interrupt_handler(
nova::interrupt_handlers::IRQSource::UartInt,
terminal_uart_rx_interrupt_handler,
);
}

View File

@@ -1,83 +1,36 @@
.global vector_table
.section .vector_t , "ax"
.extern irq_handler
.macro ventry label
.align 7
.align 7
b \label
.endm
.section .vector_table, "ax"
.global vector_table
vector_table:
// Exceptions from current EL using SP_EL0
ventry .
ventry .
ventry .
ventry .
ventry synchronous_interrupt_no_el_change // Synchronous Exception 0x200
ventry irq_handler // IRQ(Interrupt Request) 0x280
// Exceptions from the current EL using SP_ELx
ventry synchronous_interrupt_no_el_change // Synchronous Exception 0x200
ventry irq_handler // IRQ(Interrupt Request) 0x280
ventry . // FIQ(Fast Interrupt Request) 0x300
ventry . // SError 0x580
// Exceptions from lower EL AArch64
ventry synchronous_interrupt_imm_lower_aarch64 // Synchronous Exception 0x400
ventry irq_handler // IRQ(Interrupt Request) 0x480
ventry . // FIQ(Fast Interrupt Request) 0x500
ventry . // SError 0x580
// Exceptions from lower EL AArch32
ventry .
ventry .
ventry .
ventry .
ventry synchronous_interrupt_imm_lower_aarch64
ventry irq_handler
.align 4
.global el2_to_el1
el2_to_el1:
mov x0, #(1 << 31)
msr HCR_EL2, x0
// Set SPSR_EL2: return to EL1h
mov x0, #(0b0101)
msr SPSR_EL2, x0
// Set return address to kernel_main
ldr x0, =kernel_main
msr ELR_EL2, x0
// Set SP_EL1 to stack base
ldr x0, =__stack_end
msr SP_EL1, x0
// Set VBAR_EL1 to vector table
adr x0, vector_table
msr VBAR_EL1, x0
// Disable MMU
ldr x0, =SCTLR_EL1_CONF
msr sctlr_el1, x0
// SIMD should not be trapped
mrs x0, CPACR_EL1
mov x1, #(0b11<<20)
orr x0,x0, x1
msr CPACR_EL1,x0
// Return to EL1
eret
.align 4
.global el1_to_el0
el1_to_el0:
// Set SPSR_EL1: return to EL0t
mov x0, #(0b0000)
msr SPSR_EL1, x0
// Set return address to el0
ldr x0, =el0
msr ELR_EL1, x0
// Set SP_EL1 to stack base
ldr x0, =__stack_end_el0
msr SP_EL0, x0
// Return to EL0
eret
.align 4
irq_handler:
@@ -126,7 +79,9 @@ synchronous_interrupt_imm_lower_aarch64:
stp x18, x29, [sp, #144]
stp x30, xzr, [sp, #160]
mov x0, sp
bl rust_synchronous_interrupt_imm_lower_aarch64
str x0, [sp, #0]
ldp x0, x1, [sp, #0]
ldp x2, x3, [sp, #16]
@@ -158,7 +113,9 @@ synchronous_interrupt_no_el_change:
stp x18, x29, [sp, #144]
stp x30, xzr, [sp, #160]
mov x0, sp
bl rust_synchronous_interrupt_no_el_change
str x0, [sp, #0]
ldp x0, x1, [sp, #0]
ldp x2, x3, [sp, #16]

View File

@@ -1,3 +1,5 @@
set -e
cargo build --target aarch64-unknown-none --release
cd "$(dirname "$0")"

View File

@@ -1,3 +1,5 @@
set -e
cargo build --target aarch64-unknown-none
cd "$(dirname "$0")"
@@ -9,6 +11,4 @@ qemu-system-aarch64 \
-cpu cortex-a53 \
-serial stdio \
-sd ../sd.img \
-display none \
-kernel ../target/aarch64-unknown-none/debug/kernel8.img \
-s -S
-kernel ../target/aarch64-unknown-none/debug/kernel8.img -S -s

View File

@@ -39,6 +39,10 @@ impl Heap {
}
}
pub fn size(self) -> usize {
self.raw_size
}
pub fn init(&mut self, heap_start: usize, heap_end: usize) {
self.start_address = heap_start as *mut HeapHeader;
self.end_address = heap_end as *mut HeapHeader;

View File

@@ -29,7 +29,7 @@ fn test_heap_allocation() {
assert_eq!(actual_alloc_size % MIN_BLOCK_SIZE, 0);
// Verify section is occupied
assert!((*malloc_header).free == false);
assert!(!(*malloc_header).free);
// Verify next header has been created
let next = (*malloc_header).next.unwrap();
@@ -55,7 +55,7 @@ fn test_full_heap() {
let malloc = heap.malloc(malloc_size).unwrap();
let malloc_header = Heap::get_header_ref_from_data_pointer(malloc);
unsafe {
assert_eq!((*malloc_header).free, false);
assert!(!(*malloc_header).free);
assert!((*malloc_header).next.is_none());
}
@@ -79,7 +79,7 @@ fn test_freeing_root() {
let malloc = heap.malloc(malloc_size).unwrap();
let malloc_header = Heap::get_header_ref_from_data_pointer(malloc);
unsafe {
assert_eq!((*malloc_header).free, false);
assert!(!(*malloc_header).free);
assert!((*malloc_header).size >= malloc_size);
assert!((*root_header).next.is_some());
@@ -100,7 +100,7 @@ fn test_merging_free_sections() {
);
let root_header = heap.start_address;
let root_header_start_size = unsafe { (*root_header).size };
let _root_header_start_size = unsafe { (*root_header).size };
let malloc1 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
let malloc_header_before = unsafe { *Heap::get_header_ref_from_data_pointer(malloc1) };
@@ -135,31 +135,29 @@ fn test_first_fit() {
);
let root_header = heap.start_address;
let root_header_start_size = unsafe { (*root_header).size };
let _root_header_start_size = unsafe { (*root_header).size };
let malloc1 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
let malloc2 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
let _malloc2 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
let malloc3 = heap.malloc(MIN_BLOCK_SIZE * 3).unwrap();
let malloc4 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
unsafe {
assert!(heap.free(malloc1).is_ok());
assert!(heap.free(malloc3).is_ok());
let malloc5 = heap.malloc(MIN_BLOCK_SIZE * 2).unwrap();
let malloc1_header = unsafe { *Heap::get_header_ref_from_data_pointer(malloc1) };
assert!(heap.free(malloc1).is_ok());
assert!(heap.free(malloc3).is_ok());
let malloc5 = heap.malloc(MIN_BLOCK_SIZE * 2).unwrap();
let malloc1_header = unsafe { *Heap::get_header_ref_from_data_pointer(malloc1) };
// First free block stays empty
assert!(malloc1_header.free);
// First free block stays empty
assert!(malloc1_header.free);
// New allocation takes the first fit aka. malloc3
assert_eq!(malloc5, malloc3);
// New allocation takes the first fit aka. malloc3
assert_eq!(malloc5, malloc3);
// If no free slot could be found, append to the end
let malloc6 = heap.malloc(MIN_BLOCK_SIZE * 2).unwrap();
assert!(malloc6 > malloc4);
// If no free slot could be found, append to the end
let malloc6 = heap.malloc(MIN_BLOCK_SIZE * 2).unwrap();
assert!(malloc6 > malloc4);
// Malloc7 takes slot of Malloc1
let malloc7 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
assert_eq!(malloc1, malloc7);
}
// Malloc7 takes slot of Malloc1
let malloc7 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
assert_eq!(malloc1, malloc7);
}

View File

@@ -5,7 +5,12 @@ use core::prelude::rust_2024::derive;
#[derive(Debug)]
pub enum NovaError {
General(&'static str),
Mailbox,
HeapFull,
EmptyHeapSegmentNotAllowed,
Misalignment,
InvalidGranularity,
Paging(&'static str),
OutOfMeomory,
}