Compare commits

7 Commits

30 changed files with 876 additions and 383 deletions

1
.gitignore vendored
View File

@@ -4,3 +4,4 @@ kernel8.img
sd.img sd.img
settings.json settings.json
.DS_Store .DS_Store
.venv

25
.vscode/launch.json vendored
View File

@@ -33,6 +33,31 @@
], ],
"preLaunchTask": "Run QEMU" "preLaunchTask": "Run QEMU"
}, },
{
"name": "Attach to QEMU (AArch64) wo. window",
"type": "cppdbg",
"request": "launch",
"program": "${workspaceFolder}/target/aarch64-unknown-none/debug/nova",
"miDebuggerServerAddress": "localhost:1234",
"miDebuggerPath": "gdb",
"cwd": "${workspaceFolder}",
"stopAtEntry": true,
"externalConsole": false,
"MIMode": "gdb",
"setupCommands": [
{
"description": "Enable pretty-printing for gdb",
"text": "-enable-pretty-printing",
"ignoreFailures": true
},
{
"description": "Show assembly on stop",
"text": "set disassemble-next-line on",
"ignoreFailures": true
}
],
"preLaunchTask": "Run QEMU wo window"
},
{ {
"name": "Attach LLDB", "name": "Attach LLDB",

33
.vscode/tasks.json vendored
View File

@@ -14,9 +14,38 @@
{ {
"label": "Run QEMU", "label": "Run QEMU",
"type": "shell", "type": "shell",
"command": "llvm-objcopy -O binary target/aarch64-unknown-none/debug/nova target/aarch64-unknown-none/debug/kernel8.img && qemu-system-aarch64 -M raspi3b -cpu cortex-a53 -serial stdio -sd sd.img -kernel ${workspaceFolder}/target/aarch64-unknown-none/debug/kernel8.img -S -s -m 1024", "command": "llvm-objcopy -O binary target/aarch64-unknown-none/debug/nova target/aarch64-unknown-none/debug/kernel8.img && echo Starting QEMU&qemu-system-aarch64 -M raspi3b -cpu cortex-a53 -serial stdio -sd sd.img -kernel ${workspaceFolder}/target/aarch64-unknown-none/debug/kernel8.img -S -s -m 1024",
"isBackground": true, "isBackground": true,
"dependsOn": ["Build"] "dependsOn": ["Build"],
"problemMatcher": {
"pattern": {
"regexp": "^(Starting QEMU)",
"line": 1,
},
"background": {
"activeOnStart": true,
"beginsPattern": "^(Starting QEMU)",
"endsPattern": "^(Starting QEMU)"
}
}
},
{
"label": "Run QEMU wo window",
"type": "shell",
"command": "llvm-objcopy -O binary target/aarch64-unknown-none/debug/nova target/aarch64-unknown-none/debug/kernel8.img && echo Starting QEMU&qemu-system-aarch64 -M raspi3b -cpu cortex-a53 -display none -serial stdio -sd sd.img -kernel ${workspaceFolder}/target/aarch64-unknown-none/debug/kernel8.img -S -s -m 1024",
"isBackground": true,
"dependsOn": ["Build"],
"problemMatcher": {
"pattern": {
"regexp": "^(Starting QEMU)",
"line": 1,
},
"background": {
"activeOnStart": true,
"beginsPattern": "^(Starting QEMU)",
"endsPattern": "^(Starting QEMU)"
}
}
} }
] ]
} }

7
Cargo.lock generated
View File

@@ -47,12 +47,19 @@ dependencies = [
"heap", "heap",
"libm", "libm",
"nova_error", "nova_error",
"paste",
] ]
[[package]] [[package]]
name = "nova_error" name = "nova_error"
version = "0.1.0" version = "0.1.0"
[[package]]
name = "paste"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
[[package]] [[package]]
name = "ppv-lite86" name = "ppv-lite86"
version = "0.2.21" version = "0.2.21"

View File

@@ -14,11 +14,13 @@ panic = "abort"
[dependencies] [dependencies]
libm = "0.2.15" libm = "0.2.15"
heap = {path = "heap"} heap = {path = "workspace/heap"}
nova_error = {path = "nova_error"} nova_error = {path = "workspace/nova_error"}
paste = "1.0.15"
[workspace] [workspace]
members = [ "nova_error", members = [
"heap" "workspace/nova_error",
"workspace/heap",
] ]

View File

@@ -14,8 +14,9 @@ NovaOS is a expository project where I build a kernel from scratch for a Raspber
- Communicate with peripherals via mailboxes ✓ - Communicate with peripherals via mailboxes ✓
- Frame Buffer ✓ - Frame Buffer ✓
- Heap Memory allocation ✓ - Heap Memory allocation ✓
- MMU
- SVC instructions
- Multi Core - Multi Core
- Dynamic clock speed - Dynamic clock speed
- MMU
- Multiprocessing - Multiprocessing
- Basic Terminal over UART - Basic Terminal over UART

23
link.ld
View File

@@ -10,7 +10,7 @@ SECTIONS {
*(.rodata .rodata.*) *(.rodata .rodata.*)
} }
.data : { .data ALIGN(2M) : {
_data = .; _data = .;
*(.data .data.*) *(.data .data.*)
} }
@@ -27,28 +27,29 @@ SECTIONS {
KEEP(*(.vector_table)) KEEP(*(.vector_table))
} }
.heap : ALIGN(16) .heap ALIGN(16): {
{
__heap_start = .; __heap_start = .;
. += 0x10000; #10kB . += 100K; #100kB
__heap_end = .; __heap_end = .;
} }
.stack : ALIGN(16) .stack ALIGN(16): {
{
__stack_start = .; __stack_start = .;
. += 0x10000; #10kB stack . += 10K; #10kB stack
__stack_end = .; __stack_end = .;
} }
.stack_el0 : ALIGN(16) . = ALIGN(2M);
{
__kernel_end = .;
.stack_el0 : {
__stack_start_el0 = .; __stack_start_el0 = .;
. += 0x10000; #10kB stack . += 10K; #10kB stack
__stack_end_el0 = .; __stack_end_el0 = .;
} }
. = ALIGN(2M);
_end = .; _end = .;
} }

486
src/aarch64/mmu.rs Normal file
View File

@@ -0,0 +1,486 @@
use core::panic;
use nova_error::NovaError;
unsafe extern "C" {
static mut __translation_table_l2_start: u64;
static __stack_start_el0: u64;
static __kernel_end: u64;
static _data: u64;
}
const BLOCK: u64 = 0b01;
const TABLE: u64 = 0b11;
const PAGE: u64 = 0b11;
/// Allow EL0 to access this section
pub const EL0_ACCESSIBLE: u64 = 1 << 6;
/// Allow a page or block to be written.
pub const WRITABLE: u64 = 0 << 7;
/// Disallow a page or block to be written.
pub const READ_ONLY: u64 = 1 << 7;
const ACCESS_FLAG: u64 = 1 << 10;
const INNER_SHAREABILITY: u64 = 0b11 << 8;
pub const NORMAL_MEM: u64 = 0 << 2;
pub const DEVICE_MEM: u64 = 1 << 2;
/// Disallow EL1 Execution.
pub const PXN: u64 = 1 << 53;
/// Disallow EL0 Execution.
pub const UXN: u64 = 1 << 54;
pub const GRANULARITY: usize = 4 * 1024;
const TABLE_ENTRY_COUNT: usize = GRANULARITY / size_of::<u64>(); // 2MiB
pub const LEVEL1_BLOCK_SIZE: usize = TABLE_ENTRY_COUNT * TABLE_ENTRY_COUNT * GRANULARITY;
pub const LEVEL2_BLOCK_SIZE: usize = TABLE_ENTRY_COUNT * GRANULARITY;
const L2_BLOCK_BITMAP_WORDS: usize = LEVEL2_BLOCK_SIZE / (64 * GRANULARITY);
const MAX_PAGE_COUNT: usize = 1024 * 1024 * 1024 / GRANULARITY;
#[repr(align(4096))]
pub struct PageTable([u64; TABLE_ENTRY_COUNT]);
#[no_mangle]
pub static mut TRANSLATIONTABLE_TTBR0: PageTable = PageTable([0; 512]);
static mut PAGING_BITMAP: [u64; MAX_PAGE_COUNT / 64] = [0; MAX_PAGE_COUNT / 64];
/// Allocate a memory block of `size` starting at `virtual_address`.
pub fn allocate_memory(
mut virtual_address: usize,
mut size: usize,
additional_flags: u64,
) -> Result<(), NovaError> {
if !virtual_address.is_multiple_of(GRANULARITY) {
return Err(NovaError::Misalignment);
}
let level1_blocks = size / LEVEL1_BLOCK_SIZE;
size %= LEVEL1_BLOCK_SIZE;
let level2_blocks = size / LEVEL2_BLOCK_SIZE;
size %= LEVEL2_BLOCK_SIZE;
let level3_pages = size / GRANULARITY;
if !size.is_multiple_of(GRANULARITY) {
return Err(NovaError::InvalidGranularity);
}
if level1_blocks > 0 {
todo!("Currently not supported");
}
for _ in 0..level2_blocks {
alloc_block_l2(
virtual_address,
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
additional_flags,
)?;
virtual_address += LEVEL2_BLOCK_SIZE;
}
for _ in 0..level3_pages {
alloc_page(
virtual_address,
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
additional_flags,
)?;
virtual_address += GRANULARITY;
}
Ok(())
}
/// Allocate a memory block of `size` starting at `virtual_address`,
/// with explicit physical_address.
///
/// Note: This can be used when mapping predefined regions.
pub fn allocate_memory_explicit(
mut virtual_address: usize,
mut size: usize,
mut physical_address: usize,
additional_flags: u64,
) -> Result<(), NovaError> {
if !virtual_address.is_multiple_of(GRANULARITY) {
return Err(NovaError::Misalignment);
}
let level1_blocks = size / LEVEL1_BLOCK_SIZE;
size %= LEVEL1_BLOCK_SIZE;
let mut level2_blocks = size / LEVEL2_BLOCK_SIZE;
size %= LEVEL2_BLOCK_SIZE;
let mut level3_pages = size / GRANULARITY;
if !size.is_multiple_of(GRANULARITY) {
return Err(NovaError::InvalidGranularity);
}
if level1_blocks > 0 {
todo!("Currently not supported");
}
let l2_alignment = (physical_address % LEVEL2_BLOCK_SIZE) / GRANULARITY;
if l2_alignment != 0 {
let l3_diff = LEVEL2_BLOCK_SIZE / GRANULARITY - l2_alignment;
if l3_diff > level3_pages {
level2_blocks -= 1;
level3_pages += TABLE_ENTRY_COUNT;
}
level3_pages -= l3_diff;
for _ in 0..l3_diff {
alloc_page_explicit(
virtual_address,
physical_address,
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
additional_flags,
)?;
virtual_address += GRANULARITY;
physical_address += GRANULARITY;
}
}
for _ in 0..level2_blocks {
alloc_block_l2_explicit(
virtual_address,
physical_address,
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
additional_flags,
)?;
virtual_address += LEVEL2_BLOCK_SIZE;
physical_address += LEVEL2_BLOCK_SIZE;
}
for _ in 0..level3_pages {
alloc_page_explicit(
virtual_address,
physical_address,
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
additional_flags,
)?;
virtual_address += GRANULARITY;
physical_address += GRANULARITY;
}
Ok(())
}
/// Allocate a singe page.
pub fn alloc_page(
virtual_address: usize,
base_table: *mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
map_page(
virtual_address,
reserve_page(),
base_table,
additional_flags,
)
}
/// Allocate a single page at an explicit `physical_address`.
pub fn alloc_page_explicit(
virtual_address: usize,
physical_address: usize,
base_table: *mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
reserve_page_explicit(physical_address)?;
map_page(
virtual_address,
physical_address,
base_table,
additional_flags,
)
}
fn map_page(
virtual_address: usize,
physical_address: usize,
base_table_ptr: *mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
let (l1_off, l2_off, l3_off) = virtual_address_to_table_offset(virtual_address);
let offsets = [l1_off, l2_off];
let table_ptr = navigate_table(base_table_ptr, &offsets)?;
let table = unsafe { &mut *table_ptr };
if table.0[l3_off] & 0b11 > 0 {
return Err(NovaError::Paging);
}
table.0[l3_off] = create_page_descriptor_entry(physical_address, additional_flags);
Ok(())
}
// Allocate a level 2 block.
pub fn alloc_block_l2(
virtual_addr: usize,
base_table_ptr: *mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
map_l2_block(
virtual_addr,
reserve_block(),
base_table_ptr,
additional_flags,
)
}
// Allocate a level 2 block, at a explicit `physical_address`.
pub fn alloc_block_l2_explicit(
virtual_addr: usize,
physical_address: usize,
base_table_ptr: *mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
if !physical_address.is_multiple_of(LEVEL2_BLOCK_SIZE) {
return Err(NovaError::Misalignment);
}
reserve_block_explicit(physical_address)?;
map_l2_block(
virtual_addr,
physical_address,
base_table_ptr,
additional_flags,
)
}
pub fn map_l2_block(
virtual_addr: usize,
physical_address: usize,
base_table_ptr: *mut PageTable,
additional_flags: u64,
) -> Result<(), NovaError> {
let (l1_off, l2_off, _) = virtual_address_to_table_offset(virtual_addr);
let offsets = [l1_off];
let table_ptr = navigate_table(base_table_ptr, &offsets)?;
let table = unsafe { &mut *table_ptr };
// Verify virtual address is available.
if table.0[l2_off] & 0b11 != 0 {
return Err(NovaError::Paging);
}
let new_entry = create_block_descriptor_entry(physical_address, additional_flags);
table.0[l2_off] = new_entry;
Ok(())
}
pub fn reserve_range_explicit(
start_physical_address: usize,
end_physical_address: usize,
) -> Result<(), NovaError> {
let mut size = end_physical_address - start_physical_address;
let l1_blocks = size / LEVEL1_BLOCK_SIZE;
size %= LEVEL1_BLOCK_SIZE;
let l2_blocks = size / LEVEL2_BLOCK_SIZE;
size %= LEVEL2_BLOCK_SIZE;
let l3_pages = size / GRANULARITY;
if !size.is_multiple_of(GRANULARITY) {
return Err(NovaError::Misalignment);
}
if l1_blocks > 0 {
todo!();
}
let mut addr = start_physical_address;
for _ in 0..l2_blocks {
reserve_block_explicit(addr)?;
addr += LEVEL2_BLOCK_SIZE;
}
for _ in 0..l3_pages {
reserve_page_explicit(addr)?;
addr += GRANULARITY;
}
Ok(())
}
fn reserve_page() -> usize {
if let Some(address) = find_unallocated_page() {
let page = address / GRANULARITY;
let word_index = page / 64;
unsafe { PAGING_BITMAP[word_index] |= 1 << (page % 64) };
return address;
}
panic!("Out of Memory!");
}
fn reserve_page_explicit(physical_address: usize) -> Result<(), NovaError> {
let page = physical_address / GRANULARITY;
let word_index = page / 64;
if unsafe { PAGING_BITMAP[word_index] } & (1 << (page % 64)) > 0 {
return Err(NovaError::Paging);
}
unsafe { PAGING_BITMAP[word_index] |= 1 << (page % 64) };
Ok(())
}
fn reserve_block() -> usize {
if let Some(start) = find_contiguous_free_bitmap_words(L2_BLOCK_BITMAP_WORDS) {
for j in 0..L2_BLOCK_BITMAP_WORDS {
unsafe { PAGING_BITMAP[start + j] = u64::MAX };
}
return start * 64 * GRANULARITY;
}
panic!("Out of Memory!");
}
fn reserve_block_explicit(physical_address: usize) -> Result<(), NovaError> {
let page = physical_address / GRANULARITY;
for i in 0..L2_BLOCK_BITMAP_WORDS {
unsafe {
if PAGING_BITMAP[(page / 64) + i] != 0 {
return Err(NovaError::Paging);
}
};
}
for i in 0..L2_BLOCK_BITMAP_WORDS {
unsafe {
PAGING_BITMAP[(page / 64) + i] = u64::MAX;
};
}
Ok(())
}
fn create_block_descriptor_entry(physical_address: usize, additional_flags: u64) -> u64 {
(physical_address as u64 & 0x0000_FFFF_FFFF_F000)
| BLOCK
| ACCESS_FLAG
| INNER_SHAREABILITY
| additional_flags
}
fn create_page_descriptor_entry(physical_address: usize, additional_flags: u64) -> u64 {
(physical_address as u64 & 0x0000_FFFF_FFFF_F000)
| PAGE
| ACCESS_FLAG
| INNER_SHAREABILITY
| additional_flags
}
fn create_table_descriptor_entry(addr: usize) -> u64 {
(addr as u64 & 0x0000_FFFF_FFFF_F000) | TABLE
}
fn virtual_address_to_table_offset(virtual_addr: usize) -> (usize, usize, usize) {
let absolute_page_off = virtual_addr / GRANULARITY;
let l3_off = absolute_page_off % TABLE_ENTRY_COUNT;
let l2_off = (absolute_page_off / TABLE_ENTRY_COUNT) % TABLE_ENTRY_COUNT;
let l1_off = (absolute_page_off / TABLE_ENTRY_COUNT / TABLE_ENTRY_COUNT) % TABLE_ENTRY_COUNT;
(l1_off, l2_off, l3_off)
}
/// Debugging function to navigate the translation tables.
#[allow(unused_variables)]
pub fn sim_l3_access(addr: usize) {
unsafe {
let entry1 = TRANSLATIONTABLE_TTBR0.0[addr / LEVEL1_BLOCK_SIZE];
let table2 = &mut *(entry_phys(entry1) as *mut PageTable);
let entry2 = table2.0[(addr % LEVEL1_BLOCK_SIZE) / LEVEL2_BLOCK_SIZE];
let table3 = &mut *(entry_phys(entry2) as *mut PageTable);
let _entry3 = table3.0[(addr % LEVEL2_BLOCK_SIZE) / GRANULARITY];
}
}
/// Navigate the table tree, by following given offsets. This function
/// allocates new tables if required.
fn navigate_table(
initial_table_ptr: *mut PageTable,
offsets: &[usize],
) -> Result<*mut PageTable, NovaError> {
let root_table_ptr = initial_table_ptr;
let mut table = initial_table_ptr;
for offset in offsets {
table = next_table(table, *offset, root_table_ptr)?;
}
Ok(table)
}
/// Get the next table one level down.
///
/// If table doesn't exit a page will be allocated for it.
fn next_table(
table_ptr: *mut PageTable,
offset: usize,
root_table_ptr: *mut PageTable,
) -> Result<*mut PageTable, NovaError> {
let table = unsafe { &mut *table_ptr };
match table.0[offset] & 0b11 {
0 => {
let new_table_addr = reserve_page();
table.0[offset] = create_table_descriptor_entry(new_table_addr);
map_page(
new_table_addr,
new_table_addr,
unsafe { &mut *root_table_ptr },
NORMAL_MEM | WRITABLE | PXN | UXN,
)?;
Ok(entry_phys(table.0[offset]) as *mut PageTable)
}
1 => Err(NovaError::Paging),
3 => Ok(entry_phys(table.0[offset]) as *mut PageTable),
_ => unreachable!(),
}
}
fn find_unallocated_page() -> Option<usize> {
for (i, entry) in unsafe { PAGING_BITMAP }.iter().enumerate() {
if *entry != u64::MAX {
for offset in 0..64 {
if entry >> offset & 0b1 == 0 {
return Some((i * 64 + offset) * GRANULARITY);
}
}
}
}
None
}
fn find_contiguous_free_bitmap_words(required_words: usize) -> Option<usize> {
let mut run_start = 0;
let mut run_len = 0;
for (i, entry) in unsafe { PAGING_BITMAP }.iter().enumerate() {
if *entry == 0 {
if run_len == 0 {
run_start = i;
}
run_len += 1;
if run_len == required_words {
return Some(run_start);
}
} else {
run_len = 0;
}
}
None
}
/// Extracts the physical address out of an table entry.
#[inline]
fn entry_phys(entry: u64) -> u64 {
entry & 0x0000_FFFF_FFFF_F000
}

2
src/aarch64/mod.rs Normal file
View File

@@ -0,0 +1,2 @@
pub mod mmu;
pub mod registers;

59
src/aarch64/registers.rs Normal file
View File

@@ -0,0 +1,59 @@
use core::arch::asm;
pub mod daif {
use core::arch::asm;
#[inline(always)]
pub fn mask_all() {
unsafe { asm!("msr DAIFSet, #0xf", options(nomem, nostack)) }
}
#[inline(always)]
pub fn unmask_all() {
unsafe { asm!("msr DAIFClr, #0xf", options(nomem, nostack)) }
}
#[inline(always)]
pub fn mask_irq() {
unsafe { asm!("msr DAIFSet, #0x2", options(nomem, nostack)) }
}
#[inline(always)]
pub fn unmask_irq() {
unsafe { asm!("msr DAIFClr, #0x2", options(nomem, nostack)) }
}
}
#[macro_export]
macro_rules! psr {
($name:ident, $t:tt) => {
paste::item! {
pub fn [<read_ $name:lower>]() -> $t {
let buf: $t;
unsafe {
asm!(
concat!("mrs {0:x}, ", stringify!($name)),
out(reg) buf
);
}
buf
}
}
};
}
psr!(TCR_EL1, u64);
psr!(ID_AA64MMFR0_EL1, u64);
psr!(ESR_EL1, u32);
psr!(SPSR_EL1, u32);
psr!(ELR_EL1, u32);
psr!(SCTLR_EL1, u32);
pub fn read_exception_source_el() -> u32 {
read_spsr_el1() & 0b1111
}

View File

@@ -1,16 +1,94 @@
static SCTLR_EL1_MMU_DISABLED: u64 = 0; //M const SCTLR_EL1_MMU_ENABLED: u64 = 1; //M
static SCTLR_EL1_DATA_CACHE_DISABLED: u64 = 0 << 2; //C const SCTLR_EL1_DATA_CACHE_DISABLED: u64 = 0 << 2; //C
static SCTLR_EL1_INSTRUCTION_CACHE_DISABLED: u64 = 0 << 12; //I const SCTLR_EL1_INSTRUCTION_CACHE_DISABLED: u64 = 0 << 12; //I
static SCTLR_EL1_LITTLE_ENDIAN_EL0: u64 = 0 << 24; //E0E const SCTLR_EL1_LITTLE_ENDIAN_EL0: u64 = 0 << 24; //E0E
static SCTLR_EL1_LITTLE_ENDIAN_EL1: u64 = 0 << 25; //EE const SCTLR_EL1_LITTLE_ENDIAN_EL1: u64 = 0 << 25; //EE
const SCTLR_EL1_SPAN: u64 = 1 << 23; //SPAN
#[allow(clippy::identity_op)] #[allow(clippy::identity_op)]
static SCTLR_EL1_RES: u64 = (0 << 6) | (1 << 11) | (0 << 17) | (1 << 20) | (1 << 22); //Res0 & Res1 const SCTLR_EL1_RES: u64 = (0 << 6) | (1 << 11) | (0 << 17) | (1 << 20) | (1 << 22); //Res0 & Res1
#[no_mangle] #[no_mangle]
pub static SCTLR_EL1_CONF: u64 = SCTLR_EL1_MMU_DISABLED pub static SCTLR_EL1_CONF: u64 = SCTLR_EL1_MMU_ENABLED
| SCTLR_EL1_DATA_CACHE_DISABLED | SCTLR_EL1_DATA_CACHE_DISABLED
| SCTLR_EL1_INSTRUCTION_CACHE_DISABLED | SCTLR_EL1_INSTRUCTION_CACHE_DISABLED
| SCTLR_EL1_LITTLE_ENDIAN_EL0 | SCTLR_EL1_LITTLE_ENDIAN_EL0
| SCTLR_EL1_LITTLE_ENDIAN_EL1 | SCTLR_EL1_LITTLE_ENDIAN_EL1
| SCTLR_EL1_RES; | SCTLR_EL1_RES
| SCTLR_EL1_SPAN;
const TG0: u64 = 0b00 << 14; // 4KB granularity EL0
const T0SZ: u64 = 25; // 25 Bits of TTBR select -> 39 Bits of VA
const SH0: u64 = 0b11 << 12; // Inner shareable
const TG1: u64 = 0b10 << 30; // 4KB granularity EL1
const T1SZ: u64 = 25 << 16; // 25 Bits of TTBR select -> 39 Bits of VA
const EPD1: u64 = 0b1 << 23; // Trigger translation fault when using TTBR1_EL1
const SH1: u64 = 0b11 << 28; // Inner sharable
const IPS: u64 = 0b000 << 32; // 32 bits of PA space -> up to 4GiB
const AS: u64 = 0b1 << 36; // configure an ASID size of 16 bits
#[no_mangle]
pub static TCR_EL1_CONF: u64 = IPS | TG0 | TG1 | T0SZ | T1SZ | SH0 | SH1 | EPD1 | AS;
pub mod mmu {
use crate::{
aarch64::mmu::{
alloc_block_l2_explicit, map_l2_block, reserve_range_explicit, DEVICE_MEM,
EL0_ACCESSIBLE, LEVEL1_BLOCK_SIZE, LEVEL2_BLOCK_SIZE, NORMAL_MEM, PXN, READ_ONLY,
TRANSLATIONTABLE_TTBR0, UXN, WRITABLE,
},
PERIPHERAL_BASE,
};
extern "C" {
static _data: u64;
static _end: u64;
static __kernel_end: u64;
}
pub fn initialize_mmu_translation_tables() {
let shared_segment_end = unsafe { &_data } as *const _ as usize;
let kernel_end = unsafe { &__kernel_end } as *const _ as usize;
let user_space_end = unsafe { &_end } as *const _ as usize;
reserve_range_explicit(0x0, user_space_end).unwrap();
for addr in (0..shared_segment_end).step_by(LEVEL2_BLOCK_SIZE) {
let _ = map_l2_block(
addr,
addr,
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
EL0_ACCESSIBLE | READ_ONLY | NORMAL_MEM,
);
}
for addr in (shared_segment_end..kernel_end).step_by(LEVEL2_BLOCK_SIZE) {
let _ = map_l2_block(
addr,
addr,
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
WRITABLE | UXN | NORMAL_MEM,
);
}
for addr in (kernel_end..user_space_end).step_by(LEVEL2_BLOCK_SIZE) {
let _ = map_l2_block(
addr,
addr,
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
EL0_ACCESSIBLE | WRITABLE | PXN | NORMAL_MEM,
);
}
for addr in (PERIPHERAL_BASE..LEVEL1_BLOCK_SIZE).step_by(LEVEL2_BLOCK_SIZE) {
let _ = alloc_block_l2_explicit(
addr,
addr,
core::ptr::addr_of_mut!(TRANSLATIONTABLE_TTBR0),
EL0_ACCESSIBLE | WRITABLE | UXN | PXN | DEVICE_MEM,
);
}
}
}

View File

@@ -5,7 +5,7 @@ mod bitmaps;
use bitmaps::BASIC_LEGACY; use bitmaps::BASIC_LEGACY;
use crate::{ use crate::{
mailbox::{read_mailbox, write_mailbox}, pi3::mailbox::{read_mailbox, write_mailbox},
println, println,
}; };
#[repr(align(16))] #[repr(align(16))]
@@ -24,8 +24,8 @@ pub struct FrameBuffer {
pixel_depth: u32, // Bits per pixel pixel_depth: u32, // Bits per pixel
pitch: u32, // Pixel per row pitch: u32, // Pixel per row
rows: u32, // Rows rows: u32, // Rows
start_addr: *mut u32, pub start_addr: *mut u32,
size: u32, //Bytes pub size: u32, //Bytes
} }
pub const RED: u32 = 0x00FF0000; pub const RED: u32 = 0x00FF0000;
@@ -37,8 +37,11 @@ pub const YELLOW: u32 = 0x00FFFF00;
impl FrameBuffer { impl FrameBuffer {
pub fn draw_pixel(&self, x: u32, y: u32, color: u32) { pub fn draw_pixel(&self, x: u32, y: u32, color: u32) {
let offset = x + y * self.pitch; let offset = x + y * self.pitch;
if x >= self.pitch || y >= self.rows {
return;
}
unsafe { unsafe {
write_volatile(self.start_addr.add(offset as usize), color); write_volatile(self.start_addr.byte_add(4 * offset as usize), color);
} }
} }

View File

@@ -3,8 +3,11 @@ use core::arch::asm;
use alloc::vec::Vec; use alloc::vec::Vec;
use crate::{ use crate::{
aarch64::registers::{
daif::{mask_all, unmask_irq},
read_elr_el1, read_esr_el1, read_exception_source_el,
},
get_current_el, get_current_el,
interrupt_handlers::daif::unmask_irq,
peripherals::{ peripherals::{
gpio::{read_gpio_event_detect_status, reset_gpio_event_detect_status}, gpio::{read_gpio_event_detect_status, reset_gpio_event_detect_status},
uart::clear_uart_interrupt_state, uart::clear_uart_interrupt_state,
@@ -68,18 +71,18 @@ impl From<u32> for EsrElX {
#[no_mangle] #[no_mangle]
unsafe extern "C" fn rust_irq_handler() { unsafe extern "C" fn rust_irq_handler() {
daif::mask_all(); mask_all();
let pending_irqs = get_irq_pending_sources(); let pending_irqs = get_irq_pending_sources();
if pending_irqs & GPIO_PENDING_BIT_OFFSET != 0 { if pending_irqs & GPIO_PENDING_BIT_OFFSET != 0 {
handle_gpio_interrupt(); handle_gpio_interrupt();
let source_el = get_exception_return_exception_level() >> 2; let source_el = read_exception_source_el() >> 2;
println!("Source EL: {}", source_el); println!("Source EL: {}", source_el);
println!("Current EL: {}", get_current_el()); println!("Current EL: {}", get_current_el());
println!("Return register address: {:#x}", get_elr_el1()); println!("Return register address: {:#x}", read_esr_el1());
} }
if let Some(handler_vec) = unsafe { INTERRUPT_HANDLERS.as_ref() } { if let Some(handler_vec) = unsafe { &*core::ptr::addr_of_mut!(INTERRUPT_HANDLERS) } {
for handler in handler_vec { for handler in handler_vec {
if (pending_irqs & (1 << (handler.source.clone() as u32))) != 0 { if (pending_irqs & (1 << (handler.source.clone() as u32))) != 0 {
(handler.function)(); (handler.function)();
@@ -91,14 +94,14 @@ unsafe extern "C" fn rust_irq_handler() {
#[no_mangle] #[no_mangle]
unsafe extern "C" fn rust_synchronous_interrupt_no_el_change() { unsafe extern "C" fn rust_synchronous_interrupt_no_el_change() {
daif::mask_all(); mask_all();
let source_el = get_exception_return_exception_level() >> 2; let source_el = read_exception_source_el() >> 2;
println!("--------Sync Exception in EL{}--------", source_el); println!("--------Sync Exception in EL{}--------", source_el);
println!("No EL change"); println!("No EL change");
println!("Current EL: {}", get_current_el()); println!("Current EL: {}", get_current_el());
println!("{:?}", EsrElX::from(get_esr_el1())); println!("{:?}", EsrElX::from(read_esr_el1()));
println!("Return register address: {:#x}", get_elr_el1()); println!("Return register address: {:#x}", read_esr_el1());
println!("-------------------------------------"); println!("-------------------------------------");
} }
@@ -109,21 +112,23 @@ unsafe extern "C" fn rust_synchronous_interrupt_no_el_change() {
/// AArch64. /// AArch64.
#[no_mangle] #[no_mangle]
unsafe extern "C" fn rust_synchronous_interrupt_imm_lower_aarch64() { unsafe extern "C" fn rust_synchronous_interrupt_imm_lower_aarch64() {
daif::mask_all(); mask_all();
let source_el = get_exception_return_exception_level() >> 2; let source_el = read_exception_source_el() >> 2;
println!("--------Sync Exception in EL{}--------", source_el); println!("--------Sync Exception in EL{}--------", source_el);
println!("Exception escalated to EL {}", get_current_el()); println!("Exception escalated to EL {}", get_current_el());
println!("Current EL: {}", get_current_el()); println!("Current EL: {}", get_current_el());
let esr = EsrElX::from(get_esr_el1()); let esr: EsrElX = EsrElX::from(read_esr_el1());
println!("{:?}", EsrElX::from(esr)); println!("{:?}", esr);
println!("Return register address: {:#x}", get_elr_el1()); println!("Return address: {:#x}", read_elr_el1());
match esr.ec { match esr.ec {
0b100100 => { 0b100100 => {
println!("Cause: Data Abort from a lower Exception level"); println!("Cause: Data Abort from a lower Exception level");
} }
_ => {} _ => {
println!("Unknown Error Code: {:b}", esr.ec);
}
} }
println!("-------------------------------------"); println!("-------------------------------------");
@@ -133,7 +138,9 @@ unsafe extern "C" fn rust_synchronous_interrupt_imm_lower_aarch64() {
fn clear_interrupt_for_source(source: IRQSource) { fn clear_interrupt_for_source(source: IRQSource) {
match source { match source {
IRQSource::UartInt => clear_uart_interrupt_state(), IRQSource::UartInt => clear_uart_interrupt_state(),
_ => {} _ => {
todo!()
}
} }
} }
@@ -144,42 +151,6 @@ fn set_return_to_kernel_main() {
} }
} }
fn get_exception_return_exception_level() -> u32 {
let spsr: u32;
unsafe {
asm!("mrs {0:x}, SPSR_EL1", out(reg) spsr);
}
spsr & 0b1111
}
/// Read the syndrome information that caused an exception
///
/// ESR = Exception Syndrome Register
fn get_esr_el1() -> u32 {
let esr: u32;
unsafe {
asm!(
"mrs {esr:x}, ESR_EL1",
esr = out(reg) esr
);
}
esr
}
/// Read the return address
///
/// ELR = Exception Link Registers
fn get_elr_el1() -> u32 {
let elr: u32;
unsafe {
asm!(
"mrs {esr:x}, ELR_EL1",
esr = out(reg) elr
);
}
elr
}
fn handle_gpio_interrupt() { fn handle_gpio_interrupt() {
println!("Interrupt"); println!("Interrupt");
for i in 0..=53u32 { for i in 0..=53u32 {
@@ -245,36 +216,13 @@ pub fn get_irq_pending_sources() -> u64 {
pending pending
} }
pub mod daif { #[inline(always)]
use core::arch::asm;
#[inline(always)]
pub fn mask_all() {
unsafe { asm!("msr DAIFSet, #0xf", options(nomem, nostack)) }
}
#[inline(always)]
pub fn unmask_all() {
unsafe { asm!("msr DAIFClr, #0xf", options(nomem, nostack)) }
}
#[inline(always)]
pub fn mask_irq() {
unsafe { asm!("msr DAIFSet, #0x2", options(nomem, nostack)) }
}
#[inline(always)]
pub fn unmask_irq() {
unsafe { asm!("msr DAIFClr, #0x2", options(nomem, nostack)) }
}
}
pub fn initialize_interrupt_handler() { pub fn initialize_interrupt_handler() {
unsafe { INTERRUPT_HANDLERS = Some(Vec::new()) }; unsafe { INTERRUPT_HANDLERS = Some(Vec::new()) };
} }
pub fn register_interrupt_handler(source: IRQSource, function: fn()) { pub fn register_interrupt_handler(source: IRQSource, function: fn()) {
if let Some(handler_vec) = unsafe { INTERRUPT_HANDLERS.as_mut() } { if let Some(handler_vec) = unsafe { &mut *core::ptr::addr_of_mut!(INTERRUPT_HANDLERS) } {
handler_vec.push(InterruptHandlers { source, function }); handler_vec.push(InterruptHandlers { source, function });
} }
} }

View File

@@ -1,227 +0,0 @@
use core::arch::asm;
use crate::{
get_current_el,
irq_interrupt::daif::unmask_irq,
mmio_read, mmio_write,
peripherals::gpio::{read_gpio_event_detect_status, reset_gpio_event_detect_status},
};
const INTERRUPT_BASE: u32 = 0x3F00_B000;
const IRQ_PENDING_BASE: u32 = INTERRUPT_BASE + 0x204;
const ENABLE_IRQ_BASE: u32 = INTERRUPT_BASE + 0x210;
const DISABLE_IRQ_BASE: u32 = INTERRUPT_BASE + 0x21C;
#[repr(u32)]
pub enum IRQState {
AuxInt = 29,
I2cSpiSlvInt = 44,
Pwa0 = 45,
Pwa1 = 46,
Smi = 48,
GpioInt0 = 49,
GpioInt1 = 50,
GpioInt2 = 51,
GpioInt3 = 52,
I2cInt = 53,
SpiInt = 54,
PcmInt = 55,
UartInt = 57,
}
/// Representation of the ESR_ELx registers
///
/// Reference: D1.10.4
#[derive(Debug, Clone, Copy)]
#[allow(dead_code)]
struct EsrElX {
ec: u32,
il: u32,
iss: u32,
}
impl From<u32> for EsrElX {
fn from(value: u32) -> Self {
Self {
ec: value >> 26,
il: (value >> 25) & 0b1,
iss: value & 0x1FFFFFF,
}
}
}
#[no_mangle]
unsafe extern "C" fn rust_irq_handler() {
daif::mask_all();
handle_gpio_interrupt();
let source_el = get_exception_return_exception_level() >> 2;
println!("Source EL: {}", source_el);
println!("Current EL: {}", get_current_el());
println!("Return register address: {:#x}", get_elr_el1());
}
#[no_mangle]
unsafe extern "C" fn rust_synchronous_interrupt_no_el_change() {
daif::mask_all();
let source_el = get_exception_return_exception_level() >> 2;
println!("--------Sync Exception in EL{}--------", source_el);
println!("No EL change");
println!("Current EL: {}", get_current_el());
println!("{:?}", EsrElX::from(get_esr_el1()));
println!("Return register address: {:#x}", get_elr_el1());
println!("-------------------------------------");
}
/// Synchronous Exception Handler
///
/// Lower Exception level, where the implemented level
/// immediately lower than the target level is using
/// AArch64.
#[no_mangle]
unsafe extern "C" fn rust_synchronous_interrupt_imm_lower_aarch64() {
daif::mask_all();
let source_el = get_exception_return_exception_level() >> 2;
println!("--------Sync Exception in EL{}--------", source_el);
println!("Exception escalated to EL {}", get_current_el());
println!("Current EL: {}", get_current_el());
let esr = EsrElX::from(get_esr_el1());
println!("{:?}", EsrElX::from(esr));
println!("Return register address: {:#x}", get_elr_el1());
match esr.ec {
0b100100 => {
println!("Cause: Data Abort from a lower Exception level");
}
_ => {}
}
println!("-------------------------------------");
set_return_to_kernel_main();
}
fn set_return_to_kernel_main() {
unsafe {
asm!("ldr x0, =kernel_main", "msr ELR_EL1, x0");
asm!("mov x0, #(0b0101)", "msr SPSR_EL1, x0");
}
}
fn get_exception_return_exception_level() -> u32 {
let spsr: u32;
unsafe {
asm!("mrs {0:x}, SPSR_EL1", out(reg) spsr);
}
spsr & 0b1111
}
/// Read the syndrome information that caused an exception
///
/// ESR = Exception Syndrome Register
fn get_esr_el1() -> u32 {
let esr: u32;
unsafe {
asm!(
"mrs {esr:x}, ESR_EL1",
esr = out(reg) esr
);
}
esr
}
/// Read the return address
///
/// ELR = Exception Link Registers
fn get_elr_el1() -> u32 {
let elr: u32;
unsafe {
asm!(
"mrs {esr:x}, ELR_EL1",
esr = out(reg) elr
);
}
elr
}
fn handle_gpio_interrupt() {
println!("Interrupt");
for i in 0..=53u32 {
let val = read_gpio_event_detect_status(i);
if val {
#[allow(clippy::single_match)]
match i {
26 => {
println!("Button Pressed");
}
_ => {}
}
// Reset GPIO Interrupt handler by writing a 1
reset_gpio_event_detect_status(i);
}
}
unmask_irq();
}
/// Enables IRQ Source
pub fn enable_irq_source(state: IRQState) {
let nr = state as u32;
let register = ENABLE_IRQ_BASE + 4 * (nr / 32);
let register_offset = nr % 32;
let current = mmio_read(register);
let mask = 0b1 << register_offset;
let new_val = current | mask;
mmio_write(register, new_val);
}
/// Disable IRQ Source
pub fn disable_irq_source(state: IRQState) {
let nr = state as u32;
let register = DISABLE_IRQ_BASE + 4 * (nr / 32);
let register_offset = nr % 32;
let current = mmio_read(register);
let mask = 0b1 << register_offset;
let new_val = current | mask;
mmio_write(register, new_val);
}
/// Read current IRQ Source status
pub fn read_irq_source_status(state: IRQState) -> u32 {
let nr = state as u32;
let register = ENABLE_IRQ_BASE + 4 * (nr / 32);
let register_offset = nr % 32;
(mmio_read(register) >> register_offset) & 0b1
}
/// Status if a IRQ Source is enabled
pub fn read_irq_pending(state: IRQState) -> bool {
let nr = state as u32;
let register = IRQ_PENDING_BASE + 4 * (nr / 32);
let register_offset = nr % 32;
((mmio_read(register) >> register_offset) & 0b1) != 0
}
pub mod daif {
use core::arch::asm;
#[inline(always)]
pub fn mask_all() {
unsafe { asm!("msr DAIFSet, #0xf", options(nomem, nostack)) }
}
#[inline(always)]
pub fn unmask_all() {
unsafe { asm!("msr DAIFClr, #0xf", options(nomem, nostack)) }
}
#[inline(always)]
pub fn mask_irq() {
unsafe { asm!("msr DAIFSet, #0x2", options(nomem, nostack)) }
}
#[inline(always)]
pub fn unmask_irq() {
unsafe { asm!("msr DAIFClr, #0x2", options(nomem, nostack)) }
}
}

View File

@@ -14,7 +14,7 @@ use heap::Heap;
use crate::{interrupt_handlers::initialize_interrupt_handler, logger::DefaultLogger}; use crate::{interrupt_handlers::initialize_interrupt_handler, logger::DefaultLogger};
static PERIPHERAL_BASE: u32 = 0x3F00_0000; static PERIPHERAL_BASE: usize = 0x3F00_0000;
unsafe extern "C" { unsafe extern "C" {
unsafe static mut __heap_start: u8; unsafe static mut __heap_start: u8;
@@ -35,20 +35,21 @@ pub unsafe fn init_heap() {
#[panic_handler] #[panic_handler]
fn panic(_panic: &PanicInfo) -> ! { fn panic(_panic: &PanicInfo) -> ! {
loop { loop {
println!("Panic"); println!("Panic: {}", _panic.message());
} }
} }
pub mod peripherals; pub mod peripherals;
pub mod aarch64;
pub mod configuration; pub mod configuration;
pub mod framebuffer; pub mod framebuffer;
pub mod interrupt_handlers; pub mod interrupt_handlers;
pub mod logger; pub mod logger;
pub mod mailbox;
pub mod power_management;
pub mod timer; pub mod timer;
pub mod pi3;
#[inline(always)] #[inline(always)]
pub unsafe fn read_address(address: u32) -> u32 { pub unsafe fn read_address(address: u32) -> u32 {
unsafe { read_volatile(address as *const u32) } unsafe { read_volatile(address as *const u32) }

View File

@@ -31,13 +31,11 @@ macro_rules! log {
} }
pub fn log(args: fmt::Arguments) { pub fn log(args: fmt::Arguments) {
unsafe { if let Some(logger) = unsafe { &mut *core::ptr::addr_of_mut!(LOGGER) } {
if let Some(logger) = LOGGER.as_mut() {
logger.write_str("\n").unwrap(); logger.write_str("\n").unwrap();
logger.write_fmt(args).unwrap(); logger.write_fmt(args).unwrap();
logger.flush(); logger.flush();
} }
}
} }
pub fn set_logger(logger: Box<dyn Logger>) { pub fn set_logger(logger: Box<dyn Logger>) {

View File

@@ -1,6 +1,5 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
#![feature(asm_experimental_arch)]
#![allow(static_mut_refs)] #![allow(static_mut_refs)]
#![allow(clippy::missing_safety_doc)] #![allow(clippy::missing_safety_doc)]
use core::{ use core::{
@@ -10,12 +9,17 @@ use core::{
extern crate alloc; extern crate alloc;
use alloc::boxed::Box;
use nova::{ use nova::{
aarch64::{
mmu::{
allocate_memory_explicit, sim_l3_access, EL0_ACCESSIBLE, NORMAL_MEM, PXN, UXN, WRITABLE,
},
registers::{daif, read_id_aa64mmfr0_el1},
},
configuration::mmu::initialize_mmu_translation_tables,
framebuffer::{FrameBuffer, BLUE, GREEN, RED}, framebuffer::{FrameBuffer, BLUE, GREEN, RED},
get_current_el, init_heap, get_current_el, init_heap,
interrupt_handlers::{daif, enable_irq_source, IRQSource}, interrupt_handlers::{enable_irq_source, IRQSource},
log, mailbox,
peripherals::{ peripherals::{
gpio::{ gpio::{
blink_gpio, gpio_pull_up, set_falling_edge_detect, set_gpio_function, GPIOFunction, blink_gpio, gpio_pull_up, set_falling_edge_detect, set_gpio_function, GPIOFunction,
@@ -23,6 +27,7 @@ use nova::{
}, },
uart::uart_init, uart::uart_init,
}, },
pi3::mailbox,
println, println,
}; };
@@ -31,6 +36,7 @@ global_asm!(include_str!("vector.S"));
extern "C" { extern "C" {
fn el2_to_el1(); fn el2_to_el1();
fn el1_to_el0(); fn el1_to_el0();
fn configure_mmu_el1();
static mut __bss_start: u32; static mut __bss_start: u32;
static mut __bss_end: u32; static mut __bss_end: u32;
} }
@@ -61,7 +67,25 @@ pub extern "C" fn main() -> ! {
println!("Exception level: {}", get_current_el()); println!("Exception level: {}", get_current_el());
unsafe { unsafe {
asm!("mrs x0, SCTLR_EL1",); init_heap();
initialize_mmu_translation_tables();
// Frame Buffer memory range
// TODO: this is just temporary
allocate_memory_explicit(
0x3c100000,
1080 * 1920 * 4,
0x3c100000,
NORMAL_MEM | PXN | UXN | WRITABLE | EL0_ACCESSIBLE,
)
.unwrap();
sim_l3_access(0x3c100000);
configure_mmu_el1();
};
println!("AA64 {:064b}", read_id_aa64mmfr0_el1());
unsafe {
el2_to_el1(); el2_to_el1();
} }
@@ -80,12 +104,14 @@ unsafe fn zero_bss() {
#[no_mangle] #[no_mangle]
pub extern "C" fn kernel_main() -> ! { pub extern "C" fn kernel_main() -> ! {
nova::initialize_kernel(); nova::initialize_kernel();
println!("Kernel Main");
println!("Exception Level: {}", get_current_el()); println!("Exception Level: {}", get_current_el());
daif::unmask_all(); daif::unmask_all();
let fb = FrameBuffer::default();
for i in 0..1080 {
fb.draw_pixel(50, i, RED);
}
unsafe { unsafe {
init_heap();
el1_to_el0(); el1_to_el0();
}; };
@@ -107,22 +133,22 @@ pub extern "C" fn el0() -> ! {
let fb = FrameBuffer::default(); let fb = FrameBuffer::default();
for i in 600..1080 {
fb.draw_pixel(50, i, RED);
}
fb.draw_square(500, 500, 600, 700, RED); fb.draw_square(500, 500, 600, 700, RED);
fb.draw_square_fill(800, 800, 900, 900, GREEN); fb.draw_square_fill(800, 800, 900, 900, GREEN);
fb.draw_square_fill(1000, 800, 1200, 700, BLUE); fb.draw_square_fill(1000, 800, 1200, 700, BLUE);
fb.draw_square_fill(900, 100, 800, 150, RED | BLUE); fb.draw_square_fill(900, 100, 800, 150, RED | BLUE);
fb.draw_string("Hello World! :D\nTest next Line", 500, 5, 3, BLUE); fb.draw_string("Hello World! :D\nTest next Line", 500, 5, 3, BLUE);
fb.draw_function(cos, 100, 101, RED); fb.draw_function(cos, 0, 101, RED);
loop { loop {
let temp = mailbox::read_soc_temp([0]).unwrap(); let temp = mailbox::read_soc_temp([0]).unwrap();
log!("{} °C", temp[1] / 1000); println!("{} °C", temp[1] / 1000);
blink_gpio(SpecificGpio::OnboardLed as u8, 500); blink_gpio(SpecificGpio::OnboardLed as u8, 500);
let b = Box::new([1, 2, 3, 4]);
log!("{:?}", b);
} }
} }

View File

@@ -3,7 +3,7 @@ use core::{
fmt::{self, Write}, fmt::{self, Write},
}; };
use crate::{println, read_address, write_address}; use crate::{read_address, write_address};
const BAUD: u32 = 115200; const BAUD: u32 = 115200;
const UART_CLK: u32 = 48_000_000; const UART_CLK: u32 = 48_000_000;
@@ -118,11 +118,13 @@ fn uart_fifo_enable(enable: bool) {
unsafe { write_address(UART0_LCRH, lcrh) }; unsafe { write_address(UART0_LCRH, lcrh) };
} }
#[inline(always)]
fn uart_enable_rx_interrupt() { fn uart_enable_rx_interrupt() {
unsafe { write_address(UART0_IMSC, UART0_IMSC_RXIM) }; unsafe { write_address(UART0_IMSC, UART0_IMSC_RXIM) };
} }
/// Set UART word length and set FIFO status /// Set UART word length and set FIFO status
#[inline(always)]
fn uart_set_lcrh(wlen: u32, enable_fifo: bool) { fn uart_set_lcrh(wlen: u32, enable_fifo: bool) {
let mut value = (wlen & 0b11) << 5; let mut value = (wlen & 0b11) << 5;
if enable_fifo { if enable_fifo {
@@ -131,10 +133,12 @@ fn uart_set_lcrh(wlen: u32, enable_fifo: bool) {
unsafe { write_address(UART0_LCRH, value) }; unsafe { write_address(UART0_LCRH, value) };
} }
#[inline(always)]
pub fn read_uart_data() -> char { pub fn read_uart_data() -> char {
(unsafe { read_address(UART0_DR) } & 0xFF) as u8 as char (unsafe { read_address(UART0_DR) } & 0xFF) as u8 as char
} }
#[inline(always)]
pub fn clear_uart_interrupt_state() { pub fn clear_uart_interrupt_state() {
unsafe { unsafe {
write_address(UART0_ICR, 1 << 4); write_address(UART0_ICR, 1 << 4);

2
src/pi3/mod.rs Normal file
View File

@@ -0,0 +1,2 @@
pub mod mailbox;
pub mod power_management;

View File

@@ -3,7 +3,7 @@ use core::ptr::{read_volatile, write_volatile};
use crate::PERIPHERAL_BASE; use crate::PERIPHERAL_BASE;
/// Power Management Base /// Power Management Base
static PM_BASE: u32 = PERIPHERAL_BASE + 0x10_0000; static PM_BASE: u32 = PERIPHERAL_BASE as u32 + 0x10_0000;
static PM_RSTC: u32 = PM_BASE + 0x1c; static PM_RSTC: u32 = PM_BASE + 0x1c;
static PM_WDOG: u32 = PM_BASE + 0x24; static PM_WDOG: u32 = PM_BASE + 0x24;
@@ -23,5 +23,6 @@ pub fn reboot_system() {
PM_PASSWORD | (pm_rstc_val & PM_RSTC_WRCFG_CLR) | PM_RSTC_WRCFG_FULL_RESET, PM_PASSWORD | (pm_rstc_val & PM_RSTC_WRCFG_CLR) | PM_RSTC_WRCFG_FULL_RESET,
); );
} }
#[allow(clippy::empty_loop)]
loop {} loop {}
} }

View File

@@ -1,5 +1,5 @@
.global vector_table .global v_table
.extern irq_handler .extern irq_handler
.macro ventry label .macro ventry label
@@ -7,7 +7,7 @@
b \label b \label
.endm .endm
.section .vector_table, "ax" .section .vector_table , "ax"
vector_table: vector_table:
ventry . ventry .
ventry . ventry .
@@ -21,12 +21,18 @@ vector_table:
ventry synchronous_interrupt_imm_lower_aarch64 ventry synchronous_interrupt_imm_lower_aarch64
ventry irq_handler ventry irq_handler
ventry .
ventry .
ventry .
ventry .
ventry .
ventry .
.align 4 .align 4
.global el2_to_el1 .global el2_to_el1
el2_to_el1: el2_to_el1:
mov x0, #(1 << 31) mov x0, #(1 << 31)
msr HCR_EL2, x0 msr HCR_EL2, x0
@@ -46,9 +52,13 @@ el2_to_el1:
adr x0, vector_table adr x0, vector_table
msr VBAR_EL1, x0 msr VBAR_EL1, x0
// Disable MMU isb
ldr x0, =SCTLR_EL1_CONF
msr sctlr_el1, x0 adrp x0, SCTLR_EL1_CONF
ldr x1, [x0, :lo12:SCTLR_EL1_CONF]
msr SCTLR_EL1, x1
isb
// SIMD should not be trapped // SIMD should not be trapped
mrs x0, CPACR_EL1 mrs x0, CPACR_EL1
@@ -56,9 +66,38 @@ el2_to_el1:
orr x0,x0, x1 orr x0,x0, x1
msr CPACR_EL1,x0 msr CPACR_EL1,x0
isb
// Return to EL1 // Return to EL1
eret eret
.align 4
.global configure_mmu_el1
configure_mmu_el1:
// Configure MMU
adrp x0, TCR_EL1_CONF
ldr x1, [x0, :lo12:TCR_EL1_CONF]
msr TCR_EL1, x1
isb
// MAIR0: Normal Mem.
// MAIR1: Device Mem.
mov x0, #0x04FF
msr MAIR_EL1, x0
isb
// Configure translation table
adrp x0, TRANSLATIONTABLE_TTBR0
add x1, x0, :lo12:TRANSLATIONTABLE_TTBR0
msr TTBR0_EL1, x1
msr TTBR1_EL1, x1
tlbi vmalle1
dsb ish
isb
ret
.align 4 .align 4
.global el1_to_el0 .global el1_to_el0
el1_to_el0: el1_to_el0:
@@ -75,6 +114,8 @@ el1_to_el0:
ldr x0, =__stack_end_el0 ldr x0, =__stack_end_el0
msr SP_EL0, x0 msr SP_EL0, x0
isb
// Return to EL0 // Return to EL0
eret eret

View File

@@ -1,3 +1,5 @@
set -e
cargo build --target aarch64-unknown-none --release cargo build --target aarch64-unknown-none --release
cd "$(dirname "$0")" cd "$(dirname "$0")"

View File

@@ -1,3 +1,5 @@
set -e
cargo build --target aarch64-unknown-none cargo build --target aarch64-unknown-none
cd "$(dirname "$0")" cd "$(dirname "$0")"

View File

@@ -100,7 +100,7 @@ fn test_merging_free_sections() {
); );
let root_header = heap.start_address; let root_header = heap.start_address;
let root_header_start_size = unsafe { (*root_header).size }; let _root_header_start_size = unsafe { (*root_header).size };
let malloc1 = heap.malloc(MIN_BLOCK_SIZE).unwrap(); let malloc1 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
let malloc_header_before = unsafe { *Heap::get_header_ref_from_data_pointer(malloc1) }; let malloc_header_before = unsafe { *Heap::get_header_ref_from_data_pointer(malloc1) };
@@ -135,14 +135,13 @@ fn test_first_fit() {
); );
let root_header = heap.start_address; let root_header = heap.start_address;
let root_header_start_size = unsafe { (*root_header).size }; let _root_header_start_size = unsafe { (*root_header).size };
let malloc1 = heap.malloc(MIN_BLOCK_SIZE).unwrap(); let malloc1 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
let malloc2 = heap.malloc(MIN_BLOCK_SIZE).unwrap(); let _malloc2 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
let malloc3 = heap.malloc(MIN_BLOCK_SIZE * 3).unwrap(); let malloc3 = heap.malloc(MIN_BLOCK_SIZE * 3).unwrap();
let malloc4 = heap.malloc(MIN_BLOCK_SIZE).unwrap(); let malloc4 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
unsafe {
assert!(heap.free(malloc1).is_ok()); assert!(heap.free(malloc1).is_ok());
assert!(heap.free(malloc3).is_ok()); assert!(heap.free(malloc3).is_ok());
let malloc5 = heap.malloc(MIN_BLOCK_SIZE * 2).unwrap(); let malloc5 = heap.malloc(MIN_BLOCK_SIZE * 2).unwrap();
@@ -161,5 +160,4 @@ fn test_first_fit() {
// Malloc7 takes slot of Malloc1 // Malloc7 takes slot of Malloc1
let malloc7 = heap.malloc(MIN_BLOCK_SIZE).unwrap(); let malloc7 = heap.malloc(MIN_BLOCK_SIZE).unwrap();
assert_eq!(malloc1, malloc7); assert_eq!(malloc1, malloc7);
}
} }

View File

@@ -8,4 +8,7 @@ pub enum NovaError {
Mailbox, Mailbox,
HeapFull, HeapFull,
EmptyHeapSegmentNotAllowed, EmptyHeapSegmentNotAllowed,
Misalignment,
InvalidGranularity,
Paging,
} }