Compare commits

..

No commits in common. "master" and "feature/tcp-rsh" have entirely different histories.

1090 changed files with 18386 additions and 178239 deletions

View File

@ -1,2 +1,2 @@
[alias]
xtask = "run --manifest-path ./xtask/Cargo.toml --release --"
xtask = "run --manifest-path ./xtask/Cargo.toml --"

6
.gitignore vendored
View File

@ -1,8 +1,2 @@
/target
/toolchain
/xtask.toml
/qemu.toml
/etc/boot/yboot.cfg
/disk-*.img
/tmp-*.txt
/*.log

1344
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -6,8 +6,6 @@ exclude = [
"tool/abi-generator",
"toolchain",
"userspace/dynload-program",
"userspace/lib/ygglibc",
"toolchain-c"
]
members = [
"xtask",
@ -16,13 +14,10 @@ members = [
"lib/abi",
"lib/libyalloc",
"lib/runtime",
"lib/qemu",
"lib/abi-serde",
"lib/libutil"
"lib/qemu"
]
[workspace.dependencies]
chrono = { version = "0.4.38", default-features = false, features = ["alloc"] }
log = "0.4.22"
atomic_enum = "0.3.0"
bitflags = "2.6.0"
@ -40,7 +35,6 @@ ahash = { version = "0.8.11", default-features = false, features = ["no-rng"] }
# acpi
acpi = { git = "https://github.com/alnyan/acpi.git", package = "acpi", branch = "acpi-system" }
rsdp = { git = "https://github.com/alnyan/acpi.git", package = "rsdp", branch = "acpi-system" }
aml = { git = "https://github.com/alnyan/acpi.git", branch = "acpi-system" }
acpi-system = { git = "https://github.com/alnyan/acpi-system.git" }
@ -49,14 +43,12 @@ yboot-proto.path = "boot/yboot-proto"
# Local libs
abi-lib.path = "lib/abi-lib"
abi-serde.path = "lib/abi-serde"
yggdrasil-abi.path = "lib/abi"
abi-generator.path = "tool/abi-generator"
# Kernel parts
kernel-arch-interface.path = "kernel/arch/interface"
kernel-arch-aarch64.path = "kernel/arch/aarch64"
kernel-arch-riscv64.path = "kernel/arch/riscv64"
kernel-arch-x86_64.path = "kernel/arch/x86_64"
kernel-arch-i686.path = "kernel/arch/i686"
kernel-arch-x86.path = "kernel/arch/x86"
@ -78,29 +70,8 @@ aarch64-cpu = "10.0.0"
discrete_range_map = { git = "https://git.alnyan.me/yggdrasil/discrete_range_map.git" }
# Test dependencies
tokio = { version = "1.42.0", default-features = false }
libc = "*"
[workspace.dependencies.elf]
version = "0.7.2"
git = "https://git.alnyan.me/yggdrasil/yggdrasil-elf.git"
default-features = false
features = ["no_std_stream"]
[workspace.lints.rust]
unexpected_cfgs = { level = "allow", check-cfg = ['cfg(rust_analyzer)'] }
[workspace.lints.clippy]
derivable_impls = { level = "allow" }
[profile.dev]
opt-level = 1
# split-debuginfo = "packed"
lto = "thin"
panic = "abort"
[profile.test]
split-debuginfo = "none"
[profile.dev.package."*"]
opt-level = 3

View File

@ -1,9 +0,0 @@
MIT License
Copyright (c) 2025 Mark Poliakov <mark@alnyan.me>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the " Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice (including the next paragraph) shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -8,38 +8,22 @@ Rust Unix-like operating system.
Main features
-------------
* Architecture support:
* [aarch64](kernel/src/arch/aarch64)
* [x86_64](kernel/src/arch/x86_64)
* [i686](kernel/src/arch/i686) (Pentium Pro and later)
* Core features:
* Kernel/userspace preemptive multithreading
* Kernel-space multitasking with `async`/`await` runtime
* Symmetric Multiprocessing
* Rust-style interfaces for most of the stuff like memory management, devices etc.
* Filesystem features:
* Unix-like virtual filesystem:
* Architecture support: [aarch64](/kernel/src/arch/aarch64) and [x86_64](/kernel/src/arch/x86_64)
* Kernel/userspace preemptive multithreading
* Kernel-space multitasking with `async`/`await` runtime
* Symmetric Multiprocessing
* Unix-like virtual filesystem:
files, directories, block/char devices, symlinks, mounts
* In-memory read-write filesystem for tar-based initrd
* sysfs
* devfs
* ext2
* Userspace features:
* [Kernel-user ABI](lib/abi-def/yggdrasil.abi) generated from a rust-like description language
* Sanitized system calls better suited for use in Rust
* Binary formats: ELF + `#!/...` shebangs
* C compatibility through a [custom Rust libc](userspace/lib/ygglibc)
* Userspace multithreading
* Synchronization primitives through futex-like interface
* Unix-like signals and exceptions
* [Dynamic loader](userspace/dyn-loader) for linking with shared libraries
* Hardware features:
* PCI Express devices
* NVMe drive support (read/write, currently x86_64 only, due to lack of MSI-X support on aarch64/i686).
* AHCI SATA drive support (read/write)
* xHCI USB host controller
* VirtIO Network + GPU framebuffer support
* USB HID keyboards
* [Kernel-user ABI](/lib/abi-def/yggdrasil.abi) generated from a rust-like description language
* In-memory read-write filesystem for tar-based initrd
* sysfs/devfs
* Binary formats: ELF + `#!/...` shebangs
* Rust-style interfaces for most of the stuff like memory management, devices etc.
* PCI Express devices
* NVMe drive support (read/write, currently x86_64 only, due to lack of MSI-X support on aarch64).
* AHCI SATA drive support (read/write)
* xHCI USB host controller
* USB HID keyboards
aarch64-specific:
@ -48,34 +32,39 @@ aarch64-specific:
* ARM generic timer as system/monotonic timer
* GICv2 IRQ controller
x86-specific:
x86_64-specific:
* Boot options:
* x86_64: UEFI [yboot](https://git.alnyan.me/yggdrasil/yboot)
* i686: multiboot/grub
* UEFI boot through [yboot](https://git.alnyan.me/yggdrasil/yboot)
(no plans for legacy boot)
* I/O and Local APIC IRQ controllers
* PS/2 keyboard
* HPET for x86_64
* i8253-based timer for i686 or as a fallback timer
* i8253-based timer (got some problems with HPET on
real hw, had to revert, lol)
* COM ports
* ACPI, [work in progress](https://github.com/rust-osdev/acpi), mostly broken
on real hardware, so currently disabled
on real hardware
* ACPI shutdown
* PCI IRQ pin routing
* Events like power button, etc.
* Fancy framebuffer console
Userspace features:
* Sanitized system calls better suited for Rust
* Userspace threads
* Synchronization primitives through futex-like interface
* Unix-like signals and exceptions
Building the OS
---------------
**NOTE** This project uses `xtask`-style build system. To see help, use `cargo xtask --help`.
Prerequisites:
* Decent CPU and a sizable amount of RAM
* ~40G of free disk space for a full build
* ~20G of free disk space
* Patience
**NOTE** Full OS build requires you to build the `*-unknown-yggdrasil`
**NOTE** Full OS build requires you to build the `*-unknown-yggdrasil`
Rust toolchain, which may take quite a while, so be prepared.
Steps:
@ -105,30 +94,21 @@ Steps:
```
2. Run `cargo xtask toolchain` to fetch, build and link the toolchain
**NOTE** if toolchain fetch fails for some reason, try cloning directly
from `https://git.alnyan.me/yggdrasil/yggdrasil-rust.git` with appropriate
branch.
3. Run `cargo xtask` to build the OS.
Once the OS has been built, you can run it in QEMU by executing
Once the OS has been built, you can run it in QEMU by executing
`cargo xtask qemu`. For more `xtask` commands, see `cargo xtask --help`.
General plans (in no particular order)
--------------------------------------
1. Get it running on more real hardware
2. Get a full LLVM build to work
3. Get rustc to work
4. Get self-hosted
5. Run doom (?)
In addition to eternal code cleanup, I've been doing quite a lazy job at that lately...
* Better unification of architecture code
* `async` for VFS (?)
* Code cleanup, I've been doing quite a lazy job at that lately...
Navigation
----------
* Kernel: [`kernel`](kernel)
* Userspace: [`userspace`](userspace)
* ABI definitions: [`lib/abi-def`](lib/abi-def)
* Kernel: [`/kernel`](/kernel)
* Userspace: [`/userspace`](/userspace)
* ABI definitions: [`/lib/abi-def`](/lib/abi-def)

Binary file not shown.

View File

@ -4,9 +4,6 @@ use bytemuck::{Pod, Zeroable};
use crate::{AvailableRegion, IterableMemoryMap, LoadProtocolHeader};
pub const PIXEL_R8G8B8A8: u32 = 1;
pub const PIXEL_B8G8R8A8: u32 = 2;
#[derive(Clone, Copy, Pod, Zeroable)]
#[repr(C)]
pub struct LoadProtocol {
@ -15,9 +12,6 @@ pub struct LoadProtocol {
pub memory_map: MemoryMap,
pub cmdline: u64,
pub cmdline_len: u64,
pub rsdp_address: u64,
pub initrd_address: u64,
pub initrd_size: u64,
@ -52,8 +46,6 @@ pub struct FramebufferOption {
pub res_stride: u64,
pub res_address: u64,
pub res_size: u64,
pub res_format: u32,
pub _0: u32,
}
impl AvailableRegion for AvailableMemoryRegion {

View File

@ -1,39 +0,0 @@
use uefi::{
proto::media::file::{Directory, File, FileAttribute, FileMode},
CStr16, Result, Status,
};
#[derive(Debug)]
pub struct Config {
pub cmdline: [u8; 4096],
pub cmdline_len: usize,
}
impl Default for Config {
fn default() -> Self {
Self {
cmdline: [0; 4096],
cmdline_len: 0,
}
}
}
impl Config {
pub fn load(root: &mut Directory, path: &CStr16) -> Result<Self> {
let file = match root.open(path, FileMode::Read, FileAttribute::empty()) {
Ok(file) => file,
Err(error) => {
root.reset_entry_readout().ok();
log::warn!("Couldn't open {path:?}: {error:?}");
return Ok(Self::default());
}
};
root.reset_entry_readout().ok();
let mut this = Self::default();
let mut file = file.into_regular_file().ok_or(Status::INVALID_PARAMETER)?;
this.cmdline_len = file.read(&mut this.cmdline)?;
Ok(this)
}
}

View File

@ -2,7 +2,6 @@
#![no_std]
#![no_main]
pub mod config;
pub mod elf;
pub mod initrd;
pub mod mem;
@ -10,16 +9,13 @@ pub mod protocol_ext;
use core::{arch::asm, mem::size_of, ops::Deref};
use config::Config;
use elf::Object;
use log::{debug, error, info};
use uefi::{
prelude::*,
proto::{
console::gop::{GraphicsOutput, PixelFormat},
device_path::DevicePath,
loaded_image::LoadedImage,
media::{file::Directory, fs::SimpleFileSystem},
console::gop::GraphicsOutput, device_path::DevicePath, loaded_image::LoadedImage,
media::fs::SimpleFileSystem,
},
table::{
boot::{AllocateType, MemoryType, ScopedProtocol},
@ -28,7 +24,7 @@ use uefi::{
Error,
};
use yboot_proto::{
v1::{self, AvailableMemoryRegion, FramebufferOption},
v1::{AvailableMemoryRegion, FramebufferOption},
LoadProtocolV1, LOADER_MAGIC,
};
@ -51,18 +47,11 @@ fn setup_framebuffer(bs: &BootServices, fb: &mut FramebufferOption) -> Result<()
let mut result = gop.frame_buffer();
let format = match mode.info().pixel_format() {
PixelFormat::Bgr => v1::PIXEL_B8G8R8A8,
PixelFormat::Rgb => v1::PIXEL_R8G8B8A8,
_ => 0,
};
fb.res_width = fb.req_width;
fb.res_height = fb.req_height;
fb.res_address = result.as_mut_ptr() as _;
fb.res_stride = mode.info().stride() as u64 * 4;
fb.res_size = result.size() as _;
fb.res_format = format;
info!(
"Framebuffer: {}x{} @ {:#x}",
@ -96,45 +85,27 @@ fn boot_partition(
bs.open_protocol_exclusive::<SimpleFileSystem>(fs_handle)
}
fn open_root(image: Handle, bs: &BootServices) -> Result<Directory, Error> {
let mut boot_partition = boot_partition(image, bs)?;
boot_partition.open_volume()
}
fn load_kernel<'a>(
config: &Config,
root: &mut Directory,
ih: Handle,
st: &SystemTable<Boot>,
) -> Result<(u64, u64, &'a mut LoadProtocolV1), Error> {
let bs = st.boot_services();
let mut kernel_obj = Object::open(root, cstr16!("kernel.elf"))?;
let mut fs = boot_partition(ih, bs)?;
let mut root = fs.open_volume()?;
let mut kernel_obj = Object::open(&mut root, cstr16!("kernel.elf"))?;
let loaded_obj = kernel_obj.load(bs)?;
debug!("Loaded object: {:#x?}", loaded_obj);
// Load initrd
let (initrd_start, initrd_size) = initrd::load_somewhere(bs, root, cstr16!("initrd.img"))?;
let (initrd_start, initrd_size) = initrd::load_somewhere(bs, &mut root, cstr16!("initrd.img"))?;
debug!(
"Loaded initrd: {:#x?}",
initrd_start..initrd_start + initrd_size
);
// Load cmdline
let cmdline = if config.cmdline_len != 0 {
let address = bs.allocate_pages(AllocateType::AnyPages, MemoryType::LOADER_DATA, 1)?;
let dst =
unsafe { core::slice::from_raw_parts_mut(address as *mut u8, config.cmdline_len) };
dst.copy_from_slice(&config.cmdline[..config.cmdline_len]);
debug!("Cmdline at {:#x?}", address);
address
} else {
0
};
// Other versions are not existent yet
assert_eq!(loaded_obj.protocol_version, 1);
let proto_data = unsafe { &mut *(loaded_obj.protocol_struct_paddr as *mut LoadProtocolV1) };
@ -145,9 +116,6 @@ fn load_kernel<'a>(
})?;
info!("RSDP at {:#x}", rsdp);
proto_data.cmdline = cmdline;
proto_data.cmdline_len = config.cmdline_len as _;
proto_data.rsdp_address = rsdp;
proto_data.initrd_address = initrd_start;
proto_data.initrd_size = initrd_size;
@ -213,35 +181,9 @@ unsafe fn map_and_enter_kernel(
#[entry]
fn efi_main(image_handle: Handle, mut system_table: SystemTable<Boot>) -> Status {
if uefi_services::init(&mut system_table).is_err() {
return Status::LOAD_ERROR;
}
uefi_services::init(&mut system_table).unwrap();
let bs = system_table.boot_services();
let mut root = match open_root(image_handle, bs) {
Ok(root) => root,
Err(error) => {
error!("Could not open boot partition root: {error:?}");
return Status::LOAD_ERROR;
}
};
let config = match Config::load(&mut root, cstr16!("yboot.cfg")) {
Ok(config) => config,
Err(error) => {
error!("Malformed yboot.cfg: {error:?}");
return Status::LOAD_ERROR;
}
};
let (entry, mmap_memory, proto_data) = match load_kernel(&config, &mut root, &system_table) {
Ok(e) => e,
Err(error) => {
error!("Failed to load the kernel/initrd: {error:?}");
return Status::LOAD_ERROR;
}
};
let (entry, mmap_memory, proto_data) = load_kernel(image_handle, &system_table).unwrap();
unsafe {
map_and_enter_kernel(system_table, proto_data, mmap_memory, entry);

View File

@ -1,57 +0,0 @@
**NOTE** I haven't yet tested direct boot through Raspberry's
proprietary bootloader.
Booting Yggdrasil on Raspberry Pi 4B with u-boot:
1. Clone u-boot sources to some directory and checkout some
stable branch. I've used v2024.10.
2. Modify cmd/boot.c by replacing the do_go_exec function:
/* Allow ports to override the default behavior */
__attribute__((weak))
unsigned long do_go_exec(ulong (*entry)(int, char * const []), int argc,
char *const argv[])
{
void *entry_ptr = (void *) entry;
ulong fdt_addr_r = 0;
if (argc >= 2) {
fdt_addr_r = hextoul(argv[1], NULL);
}
void (*func)(ulong) = entry_ptr;
func(fdt_addr_r);
return 0;
}
3. make CROSS_COMPILE=aarch64-linux-gnu- ARCH=arm64 rpi_4_defconfig
4. make CROSS_COMPILE=aarch64-linux-gnu- ARCH=arm64 -j
5. Copy u-boot.bin into your Pi SD-card's boot partition.
**NOTE** I assume you have all the bootloader parts in the boot partition already.
If not, clone raspberry fw repo and copy the following files to the boot partition:
* bootcode.bin
* start4.elf
* all the .dtb files (a bcm2711-rpi-4-b.dtb should be enough though)
6. config.txt:
enable_uart=1
arm64_bit=1
kernel=u-boot.bin
7. Compile the OS with `cargo xtask --arch=aarch64 --board=raspi4b --release`
8. Copy the following files into some directory:
* target/aarch64-unknown-raspi4b/release/yggdrasil-kernel
* userspace/target/aarch64-unknown-yggdrasil/release/initrd.tar
9. cd into that directory and start a TFTP server of your choice. I used `uftpd`.
10. Connect an ethernet and serial to the Pi and run the following commands in u-boot shell:
tftpboot 0x04000000 <YOUR IP>:initrd.tar
tftpboot ${loadaddr} <YOUR IP>:yggdrasil-kernel
load mmc 0:1 ${fdt_addr_r} bcm2711-rpi-4-b.dtb
fdt addr ${fdt_addr_r}
fdt resize
fdt memory 0x0 0x3C000000
fdt chosen 0x04000000 <WHATEVER SIZE WAS PRINTED WHEN RUNNING THE FIRST COMMAND>
bootelf -p
go ${kernel_addr_r} ${fdt_addr_r}
11. Yggdrasil OS should start!

View File

@ -1,30 +0,0 @@
Booting Yggdrasil OS on Starfive VisionFive 2 RISC-V board:
* TODO: proper format for initrd image
* TODO: 0x70000000 can be replaced with a builtin var?
Prerequisites:
* OpenSBI + u-boot (you can use the regular debian installation from Starfive)
* yggdrasil-kernel.bin
* initrd.img
Steps:
1. Copy yggdrasil-kernel.bin and initrd.img into some directory and start a TFTP server there
2. Connect to VF2's serial port, ethernet and enter u-boot
3. Run the following commands:
# Get an IP address
dhcp
# [Optional] set some kernel cmdline params
setenv bootargs "debug.serial-level=info"
# Load initrd
tftpboot 0x70000000 <your-ip-address>:initrd.img
# Load kernel
tftpboot ${loadaddr} <your-ip-address>:yggdrasil-kernel.bin
# Load dtb
load mmc 1:3 ${fdt_addr_r} dtbs/...-starfive/starfive/${fdtfile}
fdt resize
# Enter the kernel
booti ${loadaddr} 0x70000000:<initrd-size> ${fdt_addr_r}

View File

@ -1,9 +1,9 @@
{
"is-builtin": false,
"arch": "aarch64",
"os": "none",
"abi": "softfloat",
"llvm-target": "aarch64-unknown-none",
"data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32",
"data-layout": "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32",
"max-atomic-width": 128,
"target-pointer-width": "64",
"features": "+v8a,+strict-align,-neon,-fp-armv8",

View File

@ -21,13 +21,12 @@ SECTIONS {
. = ALIGN(4K);
.rodata : AT(. - KERNEL_VIRT_OFFSET) {
*(.rodata*)
*(.eh_frame*)
. = ALIGN(16);
PROVIDE(__init_array_start = .);
KEEP(*(.init_array*))
PROVIDE(__init_array_end = .);
PROVIDE(__dt_probes_start = .);
KEEP(*(.dt_probes));
PROVIDE(__dt_probes_end = .);
*(.rodata*)
}
. = ALIGN(4K);
@ -49,7 +48,6 @@ SECTIONS {
}
. = ALIGN(4K);
PROVIDE(__bss_end_phys = . - KERNEL_VIRT_OFFSET);
PROVIDE(__bss_size = __bss_end_phys - __bss_start_phys);
PROVIDE(__kernel_end = .);
};

Binary file not shown.

View File

@ -1,8 +1,8 @@
{
"is-builtin": false,
"arch": "x86",
"cpu": "pentium4",
"os": "none",
"abi": "softfloat",
"llvm-target": "i686-unknown-linux-gnu",
"data-layout": "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:128-f64:32:64-f80:32-n8:16:32-S128",
"max-atomic-width": 64,

View File

@ -1,55 +0,0 @@
ENTRY(__aarch64_entry);
KERNEL_PHYS_BASE = 0x80000;
KERNEL_VIRT_OFFSET = 0xFFFFFF8000000000;
SECTIONS {
. = KERNEL_PHYS_BASE;
PROVIDE(__kernel_start = . + KERNEL_VIRT_OFFSET);
.text.entry : {
*(.text.entry)
}
. = ALIGN(16);
. = . + KERNEL_VIRT_OFFSET;
.text : AT(. - KERNEL_VIRT_OFFSET) {
KEEP(*(.text.vectors));
*(.text*)
}
. = ALIGN(4K);
.rodata : AT(. - KERNEL_VIRT_OFFSET) {
*(.rodata*)
*(.eh_frame*)
. = ALIGN(16);
PROVIDE(__init_array_start = .);
KEEP(*(.init_array*))
PROVIDE(__init_array_end = .);
}
. = ALIGN(4K);
.data.tables : AT (. - KERNEL_VIRT_OFFSET) {
KEEP(*(.data.tables))
}
. = ALIGN(4K);
.data : AT(. - KERNEL_VIRT_OFFSET) {
*(.data*)
*(.got*)
}
. = ALIGN(4K);
PROVIDE(__bss_start_phys = . - KERNEL_VIRT_OFFSET);
.bss : AT(. - KERNEL_VIRT_OFFSET) {
*(COMMON)
*(.bss*)
}
. = ALIGN(4K);
PROVIDE(__bss_end_phys = . - KERNEL_VIRT_OFFSET);
PROVIDE(__bss_size = __bss_end_phys - __bss_start_phys);
PROVIDE(__kernel_end = .);
};

View File

@ -1,58 +0,0 @@
ENTRY(__rv64_entry);
KERNEL_PHYS_BASE = 0x40200000;
KERNEL_VIRT_OFFSET = 0xFFFFFFF000000000;
SECTIONS {
. = KERNEL_PHYS_BASE;
PROVIDE(__kernel_start = . + KERNEL_VIRT_OFFSET);
.text.entry : {
*(.text.entry)
}
. = ALIGN(16);
. = . + KERNEL_VIRT_OFFSET;
.text : AT(. - KERNEL_VIRT_OFFSET) {
KEEP(*(.text.vectors));
*(.text*)
}
. = ALIGN(4K);
.rodata : AT(. - KERNEL_VIRT_OFFSET) {
*(.rodata*)
*(.eh_frame*)
}
. = ALIGN(4K);
.data.tables : AT (. - KERNEL_VIRT_OFFSET) {
KEEP(*(.data.tables))
}
. = ALIGN(4K);
.data : AT(. - KERNEL_VIRT_OFFSET) {
*(.data*)
. = ALIGN(8);
/* PROVIDE(__global_pointer = . + 0x800 - KERNEL_VIRT_OFFSET); */
. = ALIGN(16);
PROVIDE(__init_array_start = .);
KEEP(*(.init_array*))
PROVIDE(__init_array_end = .);
*(.got*)
}
. = ALIGN(4K);
PROVIDE(__bss_start_phys = . - KERNEL_VIRT_OFFSET);
.bss : AT(. - KERNEL_VIRT_OFFSET) {
*(COMMON)
*(.bss*)
}
. = ALIGN(4K);
PROVIDE(__bss_end_phys = . - KERNEL_VIRT_OFFSET);
PROVIDE(__bss_size = __bss_end_phys - __bss_start_phys);
PROVIDE(__kernel_end = .);
};

View File

@ -1,58 +0,0 @@
ENTRY(__rv64_entry);
KERNEL_PHYS_BASE = 0x80200000;
KERNEL_VIRT_OFFSET = 0xFFFFFFF000000000;
SECTIONS {
. = KERNEL_PHYS_BASE;
PROVIDE(__kernel_start = . + KERNEL_VIRT_OFFSET);
.text.entry : {
*(.text.entry)
}
. = ALIGN(16);
. = . + KERNEL_VIRT_OFFSET;
.text : AT(. - KERNEL_VIRT_OFFSET) {
KEEP(*(.text.vectors));
*(.text*)
}
. = ALIGN(4K);
.rodata : AT(. - KERNEL_VIRT_OFFSET) {
*(.rodata*)
*(.eh_frame*)
}
. = ALIGN(4K);
.data.tables : AT (. - KERNEL_VIRT_OFFSET) {
KEEP(*(.data.tables))
}
. = ALIGN(4K);
.data : AT(. - KERNEL_VIRT_OFFSET) {
*(.data*)
. = ALIGN(8);
/* PROVIDE(__global_pointer = . + 0x800 - KERNEL_VIRT_OFFSET); */
. = ALIGN(16);
PROVIDE(__init_array_start = .);
KEEP(*(.init_array*))
PROVIDE(__init_array_end = .);
*(.got*)
}
. = ALIGN(4K);
PROVIDE(__bss_start_phys = . - KERNEL_VIRT_OFFSET);
.bss : AT(. - KERNEL_VIRT_OFFSET) {
*(COMMON)
*(.bss*)
}
. = ALIGN(4K);
PROVIDE(__bss_end_phys = . - KERNEL_VIRT_OFFSET);
PROVIDE(__bss_size = __bss_end_phys - __bss_start_phys);
PROVIDE(__kernel_end = .);
};

View File

@ -1,26 +0,0 @@
{
"arch": "riscv64",
"os": "none",
"abi": "softfloat",
"cpu": "generic-rv64",
"llvm-target": "riscv64",
"data-layout": "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128",
"max-atomic-width": 64,
"target-pointer-width": "64",
"features": "+m,+a,+c",
"disable-redzone": true,
"executables": true,
"panic-strategy": "abort",
"dynamic-linking": true,
"relocation-model": "pic",
"code-model": "medium",
"eh-frame-header": false,
"crt-objects-fallback": "false",
"emit-debug-gdb-scripts": false,
"llvm-abiname": "lp64",
"linker": "rust-lld",
"linker-flavor": "ld.lld"
}

View File

@ -1,14 +1,13 @@
{
"is-builtin": false,
"arch": "x86_64",
"cpu": "x86-64",
"os": "none",
"abi": "softfloat",
"rustc-abi": "x86-softfloat",
"llvm-target": "x86_64-unknown-linux-gnu",
"data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128",
"max-atomic-width": 64,
"target-pointer-width": "64",
"features": "-avx,-sse,-avx2,+soft-float",
"features": "-avx,-sse,+soft-float",
"disable-redzone": true,
"executables": true,

View File

@ -7,14 +7,13 @@ authors = ["Mark Poliakov <mark@alnyan.me>"]
[dependencies]
abi-lib.workspace = true
abi-serde.workspace = true
yggdrasil-abi.workspace = true
kernel-arch-interface.workspace = true
libk.workspace = true
libk-util.workspace = true
libk-mm.workspace = true
libk-device.workspace = true
elf.workspace = true
chrono.workspace = true
device-api = { workspace = true, features = ["derive"] }
device-api-macros.workspace = true
@ -29,17 +28,13 @@ ygg_driver_usb = { path = "driver/bus/usb" }
ygg_driver_net_core = { path = "driver/net/core" }
ygg_driver_net_loopback = { path = "driver/net/loopback" }
ygg_driver_virtio_net = { path = "driver/virtio/net", features = ["pci"] }
ygg_driver_virtio_gpu = { path = "driver/virtio/gpu", features = ["pci"] }
ygg_driver_virtio_blk = { path = "driver/virtio/blk", features = ["pci"] }
ygg_driver_nvme = { path = "driver/block/nvme" }
ygg_driver_ahci = { path = "driver/block/ahci" }
ygg_driver_usb_xhci = { path = "driver/usb/xhci" }
ygg_driver_input = { path = "driver/input" }
ygg_driver_usb_xhci.path = "driver/usb/xhci"
ygg_driver_net_rtl81xx.path = "driver/net/rtl81xx"
kernel-fs = { path = "driver/fs/kernel-fs" }
memfs = { path = "driver/fs/memfs" }
ext2 = { path = "driver/fs/ext2" }
ygg_driver_fat32.path = "driver/fs/fat32"
log.workspace = true
bitflags.workspace = true
@ -49,7 +44,6 @@ bytemuck.workspace = true
futures-util.workspace = true
crossbeam-queue.workspace = true
async-trait.workspace = true
cfg-if.workspace = true
git-version = "0.3.9"
@ -58,20 +52,16 @@ aarch64-cpu.workspace = true
device-tree.workspace = true
kernel-arch-aarch64.workspace = true
[target.'cfg(target_arch = "riscv64")'.dependencies]
device-tree.workspace = true
kernel-arch-riscv64.workspace = true
ygg_driver_net_stmmac.path = "driver/net/stmmac"
[target.'cfg(target_arch = "x86_64")'.dependencies]
yboot-proto.workspace = true
kernel-arch-x86_64.workspace = true
kernel-arch-x86.workspace = true
ygg_driver_acpi.path = "driver/acpi"
ygg_driver_net_igbe.path = "driver/net/igbe"
ygg_driver_nvme = { path = "driver/block/nvme" }
acpi.workspace = true
aml.workspace = true
acpi-system.workspace = true
[target.'cfg(target_arch = "x86")'.dependencies]
kernel-arch-i686.workspace = true
@ -90,21 +80,10 @@ kernel-arch-x86_64.workspace = true
kernel-arch-i686.workspace = true
kernel-arch-x86.workspace = true
kernel-arch-aarch64.workspace = true
kernel-arch-riscv64.workspace = true
ygg_driver_acpi.path = "driver/acpi"
ygg_driver_net_stmmac.path = "driver/net/stmmac"
[features]
default = ["fb_console"]
fb_console = []
# TODO replace this with a better configuration mechanism
aarch64_board_virt = ["kernel-arch-aarch64/aarch64_board_virt"]
aarch64_board_raspi4b = ["kernel-arch-aarch64/aarch64_board_raspi4b"]
riscv64_board_virt = ["kernel-arch-riscv64/riscv64_board_virt"]
riscv64_board_jh7110 = ["kernel-arch-riscv64/riscv64_board_jh7110"]
[lints]
workspace = true
[lints.rust]
unexpected_cfgs = { level = "allow", check-cfg = ['cfg(rust_analyzer)'] }

View File

@ -3,22 +3,21 @@ name = "kernel-arch"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[target.'cfg(all(target_os = "none", target_arch = "x86_64"))'.dependencies]
kernel-arch-x86_64.path = "x86_64"
kernel-arch-x86_64 = { path = "x86_64" }
[target.'cfg(all(target_os = "none", target_arch = "aarch64"))'.dependencies]
kernel-arch-aarch64.path = "aarch64"
kernel-arch-aarch64 = { path = "aarch64" }
[target.'cfg(all(target_os = "none", target_arch = "x86"))'.dependencies]
kernel-arch-i686.path = "i686"
[target.'cfg(all(target_os = "none", target_arch = "riscv64"))'.dependencies]
kernel-arch-riscv64.path = "riscv64"
kernel-arch-i686 = { path = "i686" }
[target.'cfg(not(target_os = "none"))'.dependencies]
kernel-arch-hosted.path = "hosted"
kernel-arch-hosted = { path = "hosted" }
[dependencies]
kernel-arch-interface.path = "interface"
kernel-arch-interface = { path = "interface" }
cfg-if.workspace = true

View File

@ -17,11 +17,3 @@ tock-registers.workspace = true
[build-dependencies]
cc = "1.0"
[features]
default = []
aarch64_board_virt = []
aarch64_board_raspi4b = []
[lints]
workspace = true

View File

@ -23,8 +23,6 @@
.endm
.macro LOAD_TASK_STATE
dsb ishst
// x19 == tpidr_el0, x20 = ttbr0_el1
ldp x19, x20, [sp, #16 * 6]
msr tpidr_el0, x19
@ -38,12 +36,6 @@
ldp x29, x30, [sp, #16 * 5]
add sp, sp, #{context_size}
isb sy
tlbi vmalle1is
ic iallu
dsb ish
isb sy
.endm
__aarch64_task_enter_kernel:
@ -95,7 +87,7 @@ __aarch64_task_enter_user:
mov lr, xzr
dsb ish
dmb ish
isb sy
eret

View File

@ -60,20 +60,10 @@ impl FpContext {
}
}
/// Stores the FPU context into the `this` pointer.
///
/// # Safety
///
/// It is up to the caller to ensure `this` is a valid pointer to store the FPU context in.
pub unsafe fn store(this: *mut Self) {
__aarch64_fp_store_context(this as _)
}
/// Loads the FPU with the context stored in `this` pointer.
///
/// # Safety
///
/// It is up to the caller to ensure `this` is a valid pointer to load the FPU context from.
pub unsafe fn restore(this: *const Self) {
__aarch64_fp_restore_context(this as _)
}
@ -208,13 +198,11 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
stack.push(mdscr_el1);
stack.push(context.stack_pointer);
let ttbr0 = context.address_space | (context.asid << 48) | 1;
setup_common_context(
&mut stack,
__aarch64_task_enter_user as _,
ttbr0,
context.thread_pointer as _,
context.address_space,
context.tls as _,
);
let sp = stack.build();
@ -255,10 +243,6 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
__aarch64_switch_task_and_drop(self.inner.get(), thread);
}
fn set_thread_pointer(&self, _tp: usize) {
// Do nothing: tp can be set from EL0 by writing to TPIDR_EL0 directly
}
}
impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddress>> Drop

View File

@ -1,19 +1,15 @@
#![no_std]
#![feature(naked_functions, decl_macro)]
#![allow(clippy::new_without_default)]
#![feature(naked_functions, trait_upcasting)]
extern crate alloc;
use core::sync::atomic::{AtomicUsize, Ordering};
use aarch64_cpu::{
asm::barrier,
registers::{DAIF, MPIDR_EL1, TPIDR_EL1},
};
use alloc::{boxed::Box, sync::Arc, vec::Vec};
use device_api::interrupt::LocalInterruptController;
use aarch64_cpu::registers::{DAIF, MPIDR_EL1, TPIDR_EL1};
use alloc::{boxed::Box, vec::Vec};
use device_api::interrupt::{LocalInterruptController, MessageInterruptController};
use kernel_arch_interface::{
cpu::{CpuData, CpuImpl, IpiQueue},
cpu::{CpuImpl, IpiQueue},
guard::IrqGuard,
task::Scheduler,
util::OneTimeInit,
@ -32,11 +28,9 @@ pub struct ArchitectureImpl;
pub trait GicInterface: LocalInterruptController {}
pub struct PerCpuData {
pub gic: OneTimeInit<Arc<dyn GicInterface>>,
pub gic: OneTimeInit<&'static dyn GicInterface>,
}
impl CpuData for PerCpuData {}
static IPI_QUEUES: OneTimeInit<Vec<IpiQueue<ArchitectureImpl>>> = OneTimeInit::new();
pub static CPU_COUNT: AtomicUsize = AtomicUsize::new(1);
@ -48,7 +42,6 @@ extern "C" fn idle_task(_: usize) -> ! {
}
impl ArchitectureImpl {
#[inline]
pub fn local_cpu_data() -> Option<&'static mut PerCpuData> {
unsafe { (Self::local_cpu() as *mut PerCpuData).as_mut() }
}
@ -69,7 +62,6 @@ impl Architecture for ArchitectureImpl {
DAIF.read(DAIF::I) != 0
}
#[inline(never)]
unsafe fn set_interrupt_mask(mask: bool) -> bool {
let old = Self::interrupt_mask();
if mask {
@ -114,10 +106,6 @@ impl Architecture for ArchitectureImpl {
IPI_QUEUES.init(queues);
}
fn ipi_queue(cpu_id: u32) -> Option<&'static IpiQueue<Self>> {
IPI_QUEUES.try_get().and_then(|q| q.get(cpu_id as usize))
}
fn idle_task() -> extern "C" fn(usize) -> ! {
idle_task
}
@ -126,8 +114,13 @@ impl Architecture for ArchitectureImpl {
CPU_COUNT.load(Ordering::Acquire)
}
fn local_interrupt_controller() -> Option<&'static dyn LocalInterruptController> {
None
fn local_interrupt_controller() -> &'static dyn LocalInterruptController {
let local = Self::local_cpu_data().unwrap();
*local.gic.get()
}
fn message_interrupt_controller() -> &'static dyn MessageInterruptController {
todo!()
}
fn cpu_available_features<S: Scheduler>(_cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> {
@ -137,30 +130,4 @@ impl Architecture for ArchitectureImpl {
fn cpu_enabled_features<S: Scheduler>(_cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> {
None
}
// Cache/barrier operation
fn load_barrier() {
barrier::dmb(barrier::ISHLD);
}
fn store_barrier() {
barrier::dmb(barrier::ISHST);
}
fn memory_barrier() {
barrier::dsb(barrier::SY);
}
fn flush_virtual_range(range: core::ops::Range<usize>) {
// TODO cache line assumed to be 64 bytes
const CLSIZE: usize = 64;
let start = range.start & !(CLSIZE - 1);
let end = (range.end + (CLSIZE - 1)) & !(CLSIZE - 1);
for line in (start..end).step_by(CLSIZE) {
unsafe {
core::arch::asm!("dc ivac, {address}", address = in(reg) line);
}
}
}
}

View File

@ -1,16 +1,16 @@
use core::{
alloc::Layout,
ops::{Deref, DerefMut},
sync::atomic::{self, AtomicUsize, Ordering},
ptr::addr_of,
sync::atomic::AtomicUsize,
sync::atomic::Ordering,
};
use aarch64_cpu::{
asm::barrier,
registers::{MAIR_EL1, PAR_EL1, SCTLR_EL1, TTBR0_EL1, TTBR1_EL1},
};
use aarch64_cpu::registers::{TTBR0_EL1, TTBR1_EL1};
use kernel_arch_interface::{
mem::{DeviceMemoryAttributes, KernelTableManager, RawDeviceMemoryMapping},
split_spinlock, Architecture, KERNEL_VIRT_OFFSET,
sync::split_spinlock,
KERNEL_VIRT_OFFSET,
};
use libk_mm_interface::{
address::PhysicalAddress,
@ -18,11 +18,9 @@ use libk_mm_interface::{
};
use memtables::aarch64::{FixedTables, KERNEL_L3_COUNT};
use static_assertions::const_assert_eq;
use tock_registers::interfaces::{ReadWriteable, Readable, Writeable};
use tock_registers::interfaces::Writeable;
use yggdrasil_abi::error::Error;
use crate::ArchitectureImpl;
use self::table::{PageAttributes, PageEntry, PageTable, L1, L2, L3};
pub mod process;
@ -33,11 +31,7 @@ pub struct KernelTableManagerImpl;
// TODO eliminate this requirement by using precomputed indices
const MAPPING_OFFSET: usize = KERNEL_VIRT_OFFSET;
#[cfg(any(feature = "aarch64_board_virt", rust_analyzer))]
const KERNEL_PHYS_BASE: usize = 0x40080000;
#[cfg(any(feature = "aarch64_board_raspi4b", rust_analyzer))]
const KERNEL_PHYS_BASE: usize = 0x80000;
// Precomputed mappings
const KERNEL_L1_INDEX: usize = page_index::<L1>(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
@ -47,9 +41,6 @@ const KERNEL_END_L2_INDEX: usize = KERNEL_START_L2_INDEX + KERNEL_L3_COUNT;
// Must not be zero, should be at 4MiB
const_assert_eq!(KERNEL_START_L2_INDEX, 0);
// From static mapping
#[cfg(any(feature = "aarch64_board_raspi4b", rust_analyzer))]
const_assert_eq!(KERNEL_L1_INDEX, 0);
#[cfg(any(feature = "aarch64_board_virt", rust_analyzer))]
const_assert_eq!(KERNEL_L1_INDEX, 1);
// Runtime mappings
@ -81,7 +72,7 @@ split_spinlock! {
use libk_mm_interface::KernelImageObject;
#[link_section = ".data.tables"]
static KERNEL_TABLES: KernelImageObject<FixedTables> =
static KERNEL_TABLES<lock: ArchitectureImpl>: KernelImageObject<FixedTables> =
unsafe { KernelImageObject::new(FixedTables::zeroed()) };
}
@ -135,7 +126,7 @@ impl<'a, T: Sized> EarlyMapping<'a, T> {
let layout = Layout::array::<T>(len).unwrap();
let aligned = physical.page_align_down::<L3>();
let offset = physical.page_offset::<L3>();
let page_count = (offset + layout.size()).div_ceil(L3::SIZE);
let page_count = (offset + layout.size() + L3::SIZE - 1) / L3::SIZE;
let virt = map_early_pages(aligned, page_count)?;
let value = core::slice::from_raw_parts_mut((virt + offset) as *mut T, len);
@ -144,7 +135,7 @@ impl<'a, T: Sized> EarlyMapping<'a, T> {
}
}
impl<T: ?Sized> Deref for EarlyMapping<'_, T> {
impl<'a, T: ?Sized> Deref for EarlyMapping<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
@ -152,13 +143,13 @@ impl<T: ?Sized> Deref for EarlyMapping<'_, T> {
}
}
impl<T: ?Sized> DerefMut for EarlyMapping<'_, T> {
impl<'a, T: ?Sized> DerefMut for EarlyMapping<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.value
}
}
impl<T: ?Sized> Drop for EarlyMapping<'_, T> {
impl<'a, T: ?Sized> Drop for EarlyMapping<'a, T> {
fn drop(&mut self) {
let address = (self.value as *mut T).addr() & !(L3::SIZE - 1);
@ -184,7 +175,7 @@ fn ram_block_flags() -> PageAttributes {
// TODO UXN, PXN
PageAttributes::BLOCK
| PageAttributes::ACCESS
| PageAttributes::SH_OUTER
| PageAttributes::SH_INNER
| PageAttributes::PAGE_ATTR_NORMAL
| PageAttributes::PRESENT
}
@ -208,7 +199,6 @@ unsafe fn map_early_pages(physical: PhysicalAddress, count: usize) -> Result<usi
let page = physical.add(i * L3::SIZE);
// TODO NX, NC
EARLY_MAPPING_L3[i + l3i] = PageEntry::normal_page(page, PageAttributes::empty());
tlb_flush_vaae1(EARLY_MAPPING_OFFSET + (l3i + i) * L3::SIZE);
}
return Ok(EARLY_MAPPING_OFFSET + l3i * L3::SIZE);
@ -226,6 +216,8 @@ unsafe fn unmap_early_page(address: usize) {
assert!(EARLY_MAPPING_L3[l3i].is_present());
EARLY_MAPPING_L3[l3i] = PageEntry::INVALID;
// TODO invalidate tlb
}
/// # Safety
@ -233,17 +225,13 @@ unsafe fn unmap_early_page(address: usize) {
/// Only meant to be used by the architecture initialization functions.
pub unsafe fn map_ram_l1(index: usize) {
if index >= RAM_MAPPING_L1_COUNT {
ArchitectureImpl::halt();
todo!()
}
let mut tables = KERNEL_TABLES.lock();
let table_index = index + RAM_MAPPING_START_L1I;
assert_eq!(tables.l1.data[index + RAM_MAPPING_START_L1I], 0);
if tables.l1.data[table_index] != 0 {
ArchitectureImpl::halt();
}
tables.l1.data[table_index] = ((index * L1::SIZE) as u64) | ram_block_flags().bits();
tlb_flush_vaae1(RAM_MAPPING_OFFSET + index * L1::SIZE);
tables.l1.data[index + RAM_MAPPING_START_L1I] =
((index * L1::SIZE) as u64) | ram_block_flags().bits();
}
// Device mappings
@ -270,7 +258,6 @@ unsafe fn map_device_memory_l3(
// TODO NX, NC
DEVICE_MAPPING_L3S[l2i][l3i] = PageEntry::device_page(base.add(j * L3::SIZE));
tlb_flush_vaae1(DEVICE_MAPPING_OFFSET + l2i * L2::SIZE + l3i * L3::SIZE);
}
return Ok(DEVICE_MAPPING_OFFSET + i * L3::SIZE);
@ -279,7 +266,6 @@ unsafe fn map_device_memory_l3(
Err(Error::OutOfMemory)
}
#[allow(unused)]
unsafe fn map_device_memory_l2(
base: PhysicalAddress,
count: usize,
@ -294,9 +280,14 @@ unsafe fn map_device_memory_l2(
for j in 0..count {
DEVICE_MAPPING_L2[i + j] = PageEntry::<L2>::device_block(base.add(j * L2::SIZE));
tlb_flush_vaae1(DEVICE_MAPPING_OFFSET + (i + j) * L2::SIZE);
}
// log::debug!(
// "map l2s: base={:#x}, count={} -> {:#x}",
// base,
// count,
// DEVICE_MAPPING_OFFSET + i * L2::SIZE
// );
return Ok(DEVICE_MAPPING_OFFSET + i * L2::SIZE);
}
@ -308,6 +299,7 @@ pub(crate) unsafe fn map_device_memory(
size: usize,
attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping<KernelTableManagerImpl>, Error> {
// debugln!("Map {}B @ {:#x}", size, base);
let l3_aligned = base.page_align_down::<L3>();
let l3_offset = base.page_offset::<L3>();
let page_count = (l3_offset + size).page_count::<L3>();
@ -322,7 +314,6 @@ pub(crate) unsafe fn map_device_memory(
let address = base_address + l2_offset;
Ok(RawDeviceMemoryMapping::from_raw_parts(
l2_aligned.into_u64(),
address,
base_address,
page_count,
@ -334,7 +325,6 @@ pub(crate) unsafe fn map_device_memory(
let address = base_address + l3_offset;
Ok(RawDeviceMemoryMapping::from_raw_parts(
l3_aligned.into_u64(),
address,
base_address,
page_count,
@ -344,6 +334,11 @@ pub(crate) unsafe fn map_device_memory(
}
pub(crate) unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping<KernelTableManagerImpl>) {
// debugln!(
// "Unmap {}B @ {:#x}",
// map.page_count * map.page_size,
// map.base_address
// );
match map.page_size {
L3::SIZE => {
for i in 0..map.page_count {
@ -362,90 +357,10 @@ pub(crate) unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping<KernelTabl
}
#[inline]
pub fn tlb_flush_asid(asid: u8) {
barrier::dsb(barrier::ISHST);
let value = (asid as u64) << 48;
pub fn tlb_flush_vaae1(mut page: usize) {
page >>= 12;
unsafe {
core::arch::asm!("tlbi aside1, {value}", value = in(reg) value);
}
barrier::dsb(barrier::ISH);
barrier::isb(barrier::SY);
}
#[inline]
pub fn tlb_flush_all() {
barrier::dsb(barrier::ISHST);
unsafe {
core::arch::asm!("tlbi vmalle1is");
}
barrier::dsb(barrier::ISH);
barrier::isb(barrier::SY);
}
#[inline]
pub fn tlb_flush_vaae1(page: usize) {
barrier::dsb(barrier::ISHST);
let argument = page >> 12;
unsafe {
core::arch::asm!("tlbi vaae1, {argument}", argument = in(reg) argument);
}
barrier::dsb(barrier::ISH);
barrier::isb(barrier::SY);
}
pub fn at_s1e0r(input: usize) -> Option<u64> {
barrier::dsb(barrier::ISHST);
unsafe {
core::arch::asm!("at s1e0r, {address}", address = in(reg) input);
}
barrier::dsb(barrier::ISH);
barrier::isb(barrier::SY);
if PAR_EL1.matches_all(PAR_EL1::F::TranslationSuccessfull) {
Some(PAR_EL1.read(PAR_EL1::PA))
} else {
None
}
}
pub fn at_s1e1r(input: usize) -> Option<u64> {
barrier::dsb(barrier::ISHST);
unsafe {
core::arch::asm!("at s1e1r, {address}", address = in(reg) input);
}
barrier::dsb(barrier::ISH);
barrier::isb(barrier::SY);
if PAR_EL1.matches_all(PAR_EL1::F::TranslationSuccessfull) {
Some(PAR_EL1.read(PAR_EL1::PA))
} else {
None
}
}
pub fn ic_iallu() {
atomic::compiler_fence(Ordering::SeqCst);
barrier::dsb(barrier::ISH);
barrier::isb(barrier::SY);
unsafe {
core::arch::asm!("ic iallu");
}
barrier::isb(barrier::SY);
}
pub fn dc_cvac(input: usize) {
barrier::dsb(barrier::ISHST);
unsafe {
core::arch::asm!("dc cvac, {address}", address = in(reg) input);
}
}
fn auto_address<T>(value: *const T) -> usize {
let addr = value.addr();
if addr < KERNEL_VIRT_OFFSET {
// Called from lower half
addr
} else {
// Called from higher-half
addr - KERNEL_VIRT_OFFSET
core::arch::asm!("tlbi vaae1, {page}", page = in(reg) page);
}
}
@ -455,8 +370,7 @@ fn auto_address<T>(value: *const T) -> usize {
///
/// Unsafe, must only be called by BSP during its early init while still in "lower-half"
pub unsafe fn load_fixed_tables() {
let ttbr0 = auto_address(&raw const KERNEL_TABLES) as u64;
let ttbr0 = KERNEL_TABLES.lock().l1.data.as_ptr().addr() as u64;
TTBR0_EL1.set(ttbr0);
TTBR1_EL1.set(ttbr0);
}
@ -468,9 +382,9 @@ pub unsafe fn load_fixed_tables() {
/// Unsafe, must only be called by BSP during its early init, must already be in "higher-half"
pub unsafe fn init_fixed_tables() {
// TODO this could be built in compile-time too?
let mut tables = KERNEL_TABLES.grab();
let early_mapping_l3_phys = auto_address(&raw const EARLY_MAPPING_L3);
let device_mapping_l2_phys = auto_address(&raw const DEVICE_MAPPING_L2);
let mut tables = KERNEL_TABLES.lock();
let early_mapping_l3_phys = addr_of!(EARLY_MAPPING_L3) as usize - KERNEL_VIRT_OFFSET;
let device_mapping_l2_phys = addr_of!(DEVICE_MAPPING_L2) as usize - KERNEL_VIRT_OFFSET;
for i in 0..DEVICE_MAPPING_L3_COUNT {
let device_mapping_l3_phys = PhysicalAddress::from_usize(
@ -482,70 +396,8 @@ pub unsafe fn init_fixed_tables() {
assert_eq!(tables.l2.data[EARLY_MAPPING_L2I], 0);
tables.l2.data[EARLY_MAPPING_L2I] =
(early_mapping_l3_phys as u64) | kernel_table_flags().bits();
tlb_flush_vaae1(EARLY_MAPPING_OFFSET);
assert_eq!(tables.l1.data[DEVICE_MAPPING_L1I], 0);
tables.l1.data[DEVICE_MAPPING_L1I] =
(device_mapping_l2_phys as u64) | kernel_table_flags().bits();
tlb_flush_all();
}
pub fn setup_memory_attributes() {
// TODO: Figure out why WriteBack_NonTransient_ReadWriteAlloc doesn't work on Pi 4B
MAIR_EL1.write(
//// Attribute 0 -- normal memory
MAIR_EL1::Attr0_Normal_Inner::WriteBack_NonTransient +
MAIR_EL1::Attr0_Normal_Outer::WriteBack_NonTransient +
//// Attribute 1 -- normal non-cacheable memory
MAIR_EL1::Attr0_Normal_Inner::NonCacheable +
MAIR_EL1::Attr0_Normal_Outer::NonCacheable +
//// Attribute 2 -- device memory
MAIR_EL1::Attr1_Device::nonGathering_nonReordering_EarlyWriteAck,
);
}
/// Enables data cache.
///
/// # Safety
///
/// Manipulates low-level machine state, use with care.
pub unsafe fn enable_dcache() {
barrier::dsb(barrier::ISHST);
barrier::isb(barrier::SY);
SCTLR_EL1.modify(SCTLR_EL1::C::Cacheable);
barrier::dsb(barrier::ISH);
barrier::isb(barrier::SY);
}
/// Enables instruction cache.
///
/// # Safety
///
/// Manipulates low-level machine state, use with care.
pub unsafe fn enable_icache() {
barrier::isb(barrier::SY);
SCTLR_EL1.modify(SCTLR_EL1::I::Cacheable);
barrier::dsb(barrier::ISH);
barrier::isb(barrier::SY);
}
/// Disables instruction cache.
///
/// # Safety
///
/// Manipulates low-level machine state, use with care. Might break some instructions.
pub unsafe fn disable_icache() {
barrier::isb(barrier::SY);
ic_iallu();
SCTLR_EL1.modify(SCTLR_EL1::I::NonCacheable);
barrier::dsb(barrier::ISH);
barrier::isb(barrier::SY);
}

View File

@ -7,7 +7,7 @@ use core::{
use libk_mm_interface::{
address::{AsPhysicalAddress, PhysicalAddress},
pointer::PhysicalRefMut,
process::{PageAttributeUpdate, ProcessAddressSpaceManager},
process::ProcessAddressSpaceManager,
table::{
EntryLevel, EntryLevelDrop, EntryLevelExt, MapAttributes, NextPageTable, TableAllocator,
},
@ -17,9 +17,8 @@ use yggdrasil_abi::error::Error;
use crate::{mem::table::PageEntry, KernelTableManagerImpl};
use super::{
dc_cvac, ic_iallu,
table::{PageAttributes, PageTable, L1, L2, L3},
tlb_flush_asid, tlb_flush_vaae1,
table::{PageTable, L1, L2, L3},
tlb_flush_vaae1,
};
/// AArch64 implementation of a process address space table
@ -50,8 +49,6 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
l1[i] = PageEntry::INVALID;
}
tlb_flush_asid(asid);
Ok(Self {
l1,
asid,
@ -71,29 +68,17 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
) -> Result<(), Error> {
self.write_l3_entry(
address,
PageEntry::normal_page(
physical,
PageAttributes::from(flags) | PageAttributes::NON_GLOBAL,
),
PageEntry::normal_page(physical, flags.into()),
false,
)
}
unsafe fn update_page_attributes(
&mut self,
address: usize,
update: &PageAttributeUpdate,
) -> Result<(), Error> {
self.update_l3_entry(address, |entry| entry.update(update))
}
unsafe fn unmap_page(&mut self, address: usize) -> Result<(PhysicalAddress, bool), Error> {
unsafe fn unmap_page(&mut self, address: usize) -> Result<PhysicalAddress, Error> {
self.pop_l3_entry(address)
}
fn as_address_with_asid(&self) -> (u64, u64) {
let physical = unsafe { u64::from(self.l1.as_physical_address()) };
(physical, self.asid as u64)
fn as_address_with_asid(&self) -> u64 {
unsafe { u64::from(self.l1.as_physical_address()) | ((self.asid as u64) << 48) }
}
unsafe fn clear(&mut self) {
@ -122,38 +107,12 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
}
l3[l3i] = entry;
dc_cvac((&raw const l3[l3i]).addr());
tlb_flush_vaae1(virt);
Ok(())
}
fn update_l3_entry<F: FnOnce(&mut PageEntry<L3>) -> Result<(), Error>>(
&mut self,
virt: usize,
mapper: F,
) -> Result<(), Error> {
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
let mut l2 = self.l1.get_mut(l1i).ok_or(Error::DoesNotExist)?;
let mut l3 = l2.get_mut(l2i).ok_or(Error::DoesNotExist)?;
let entry = &mut l3[l3i];
if !entry.is_present() {
return Err(Error::DoesNotExist);
}
mapper(entry)?;
ic_iallu();
dc_cvac((&raw const l3[l3i]).addr());
tlb_flush_vaae1(virt);
Ok(())
}
fn pop_l3_entry(&mut self, virt: usize) -> Result<(PhysicalAddress, bool), Error> {
fn pop_l3_entry(&mut self, virt: usize) -> Result<PhysicalAddress, Error> {
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
@ -162,16 +121,12 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
let mut l2 = self.l1.get_mut(l1i).ok_or(Error::DoesNotExist)?;
let mut l3 = l2.get_mut(l2i).ok_or(Error::DoesNotExist)?;
let entry = l3[l3i];
let page = entry.as_page().ok_or(Error::DoesNotExist)?;
let dirty = entry.is_dirty();
let page = l3[l3i].as_page().ok_or(Error::DoesNotExist)?;
l3[l3i] = PageEntry::INVALID;
ic_iallu();
dc_cvac((&raw const l3[l3i]).addr());
tlb_flush_vaae1(virt);
Ok((page, dirty))
Ok(page)
}
fn read_l3_entry(&self, virt: usize) -> Option<(PhysicalAddress, MapAttributes)> {
@ -184,7 +139,7 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
let page = l3[l3i].as_page()?;
Some((page.add(virt & 0xFFF), l3[l3i].attributes().into()))
Some((page, l3[l3i].attributes().into()))
}
}

View File

@ -1,15 +1,12 @@
use core::{
fmt,
marker::PhantomData,
ops::{Index, IndexMut, Range},
};
use bitflags::bitflags;
use kernel_arch_interface::KERNEL_VIRT_OFFSET;
use libk_mm_interface::{
address::{AsPhysicalAddress, PhysicalAddress},
pointer::{PhysicalRef, PhysicalRefMut},
process::PageAttributeUpdate,
table::{
EntryLevel, EntryLevelDrop, MapAttributes, NextPageTable, NonTerminalEntryLevel,
TableAllocator,
@ -19,8 +16,6 @@ use yggdrasil_abi::error::Error;
use crate::KernelTableManagerImpl;
use super::dc_cvac;
bitflags! {
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct PageAttributes: u64 {
@ -42,12 +37,10 @@ bitflags! {
const SH_INNER = 3 << 8;
const PAGE_ATTR_NORMAL = 0 << 2;
const PAGE_ATTR_NORMAL_NC = 1 << 2;
const PAGE_ATTR_DEVICE = 2 << 2;
const PAGE_ATTR_DEVICE = 1 << 2;
const NON_GLOBAL = 1 << 11;
const DIRTY = 1 << 51;
const PXN = 1 << 53;
const UXN = 1 << 54;
}
@ -69,13 +62,6 @@ pub struct L2;
#[derive(Clone, Copy)]
pub struct L3;
#[derive(Debug, Clone, Copy)]
pub enum EntryType {
Table(PhysicalAddress),
Page(PhysicalAddress),
Invalid,
}
impl NonTerminalEntryLevel for L1 {
type NextLevel = L2;
}
@ -115,55 +101,11 @@ impl<L: EntryLevel> PageTable<L> {
Ok(table)
}
/// Creates a reference to [PageTable] from a physical address.
///
/// # Safety
///
/// The function takes in a raw physical address.
pub unsafe fn from_physical(
physical: PhysicalAddress,
) -> Option<PhysicalRefMut<'static, Self, KernelTableManagerImpl>> {
if physical.into_usize() >= KERNEL_VIRT_OFFSET {
// Looks fishy
return None;
}
if !physical.is_aligned_for::<L3>() {
return None;
}
let inner = PhysicalRefMut::map(physical);
Some(inner)
}
}
impl<L: EntryLevel> PageEntry<L> {
const ATTR_MASK: u64 = 0xFFF | (0xFFFF << 48);
pub const INVALID: Self = Self(0, PhantomData);
pub fn update(&mut self, update: &PageAttributeUpdate) -> Result<(), Error> {
let mut attrs = PageAttributes::from_bits_retain(self.0);
if let Some(write) = update.user_write {
// Make writeable/non-writeable
if write {
attrs &= !PageAttributes::AP_ACCESS_MASK;
attrs |= PageAttributes::AP_BOTH_READWRITE;
} else {
todo!();
}
}
if let Some(dirty) = update.dirty {
if dirty {
attrs |= PageAttributes::DIRTY;
} else {
attrs &= !PageAttributes::DIRTY;
}
}
self.0 &= !Self::ATTR_MASK;
self.0 |= attrs.bits() & Self::ATTR_MASK;
Ok(())
}
pub const fn is_present(self) -> bool {
self.0 & PageAttributes::PRESENT.bits() != 0
}
@ -173,12 +115,6 @@ impl<L: EntryLevel> PageEntry<L> {
}
}
impl<L: NonTerminalEntryLevel> PageTable<L> {
pub fn walk(&self, index: usize) -> EntryType {
self[index].classify()
}
}
impl<L: NonTerminalEntryLevel + 'static> NextPageTable for PageTable<L> {
type NextLevel = PageTable<L::NextLevel>;
type TableRef = PhysicalRef<'static, PageTable<L::NextLevel>, KernelTableManagerImpl>;
@ -210,7 +146,6 @@ impl<L: NonTerminalEntryLevel + 'static> NextPageTable for PageTable<L> {
unsafe { table.as_physical_address() },
PageAttributes::empty(),
);
dc_cvac((&raw const self[index]).addr());
Ok(table)
}
}
@ -249,7 +184,6 @@ where
}
self[index] = PageEntry::INVALID;
dc_cvac((&raw const self[index]).addr());
}
}
}
@ -268,7 +202,7 @@ impl<L: NonTerminalEntryLevel> PageEntry<L> {
| (PageAttributes::BLOCK
| PageAttributes::PRESENT
| PageAttributes::ACCESS
| PageAttributes::SH_OUTER
| PageAttributes::SH_INNER
| PageAttributes::PAGE_ATTR_NORMAL
| attrs)
.bits(),
@ -297,21 +231,11 @@ impl<L: NonTerminalEntryLevel> PageEntry<L> {
if self.0 & PageAttributes::PRESENT.bits() != 0
&& self.0 & PageAttributes::BLOCK.bits() == 0
{
Some(PhysicalAddress::from_u64(self.0 & !Self::ATTR_MASK))
Some(PhysicalAddress::from_u64(self.0 & !0xFFF))
} else {
None
}
}
pub fn classify(self) -> EntryType {
if !self.is_present() {
EntryType::Invalid
} else if let Some(table) = self.as_table() {
EntryType::Table(table)
} else {
EntryType::Page(PhysicalAddress::from_u64(self.0 & !Self::ATTR_MASK))
}
}
}
impl PageEntry<L3> {
@ -336,20 +260,18 @@ impl PageEntry<L3> {
| PageAttributes::PRESENT
| PageAttributes::ACCESS
| PageAttributes::SH_OUTER
| PageAttributes::PAGE_ATTR_DEVICE)
| PageAttributes::PAGE_ATTR_DEVICE
| PageAttributes::UXN
| PageAttributes::PXN)
.bits(),
PhantomData,
)
}
pub fn is_dirty(&self) -> bool {
self.0 & PageAttributes::DIRTY.bits() != 0
}
pub fn as_page(&self) -> Option<PhysicalAddress> {
let mask = (PageAttributes::PRESENT | PageAttributes::PAGE).bits();
if self.0 & mask == mask {
Some(PhysicalAddress::from_u64(self.0 & !Self::ATTR_MASK))
Some(PhysicalAddress::from_u64(self.0 & !0xFFF))
} else {
None
}
@ -385,10 +307,6 @@ impl From<MapAttributes> for PageAttributes {
out |= PageAttributes::AP_KERNEL_READONLY;
}
if value.contains(MapAttributes::DIRTY) {
out |= PageAttributes::DIRTY;
}
if value.contains(MapAttributes::NON_GLOBAL) {
out |= PageAttributes::NON_GLOBAL;
}
@ -411,10 +329,6 @@ impl From<PageAttributes> for MapAttributes {
_ => unreachable!(),
};
if value.contains(PageAttributes::DIRTY) {
out |= MapAttributes::DIRTY;
}
if value.contains(PageAttributes::NON_GLOBAL) {
out |= MapAttributes::NON_GLOBAL;
}
@ -422,13 +336,3 @@ impl From<PageAttributes> for MapAttributes {
out
}
}
impl fmt::Display for EntryType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Table(address) => write!(f, "table @ {address:#x}"),
Self::Page(address) => write!(f, "page @ {address:#x}"),
Self::Invalid => f.write_str("<invalid>"),
}
}
}

View File

@ -7,4 +7,3 @@ edition = "2021"
kernel-arch-interface.workspace = true
yggdrasil-abi.workspace = true
libk-mm-interface.workspace = true
device-api.workspace = true

View File

@ -1,13 +1,11 @@
#![feature(never_type, allocator_api, slice_ptr_get)]
#![feature(never_type)]
use std::{
alloc::{Allocator, Global, Layout},
marker::PhantomData,
sync::atomic::{AtomicBool, Ordering},
};
use device_api::dma::{DmaAllocation, DmaAllocator};
use kernel_arch_interface::{
cpu::{CpuData, IpiQueue},
cpu::IpiQueue,
mem::{
DeviceMemoryAttributes, KernelTableManager, PhysicalMemoryAllocator, RawDeviceMemoryMapping,
},
@ -38,21 +36,14 @@ pub struct TaskContextImpl<K: KernelTableManager, PA: PhysicalMemoryAllocator>(
static DUMMY_INTERRUPT_MASK: AtomicBool = AtomicBool::new(true);
pub struct DummyCpuData;
impl CpuData for DummyCpuData {}
impl Architecture for ArchitectureImpl {
type PerCpuData = DummyCpuData;
type CpuFeatures = ();
type BreakpointType = u8;
const BREAKPOINT_VALUE: Self::BreakpointType = 0x00;
type PerCpuData = ();
fn local_cpu() -> *mut () {
fn local_cpu() -> *mut Self::PerCpuData {
unimplemented!()
}
unsafe fn set_local_cpu(_cpu: *mut ()) {
unsafe fn set_local_cpu(_cpu: *mut Self::PerCpuData) {
unimplemented!()
}
@ -91,39 +82,15 @@ impl Architecture for ArchitectureImpl {
fn halt() -> ! {
unimplemented!()
}
fn cpu_enabled_features<S: Scheduler>(
_cpu: &kernel_arch_interface::cpu::CpuImpl<Self, S>,
) -> Option<&Self::CpuFeatures> {
unimplemented!()
}
fn cpu_available_features<S: Scheduler>(
_cpu: &kernel_arch_interface::cpu::CpuImpl<Self, S>,
) -> Option<&Self::CpuFeatures> {
unimplemented!()
}
fn ipi_queue(_cpu_id: u32) -> Option<&'static IpiQueue<Self>> {
None
}
fn load_barrier() {}
fn store_barrier() {}
fn memory_barrier() {}
fn flush_virtual_range(_range: std::ops::Range<usize>) {}
}
impl KernelTableManager for KernelTableManagerImpl {
fn virtualize(phys: u64) -> usize {
phys.try_into().unwrap()
fn virtualize(_phys: u64) -> usize {
unimplemented!()
}
fn physicalize(virt: usize) -> u64 {
virt.try_into().unwrap()
fn physicalize(_virt: usize) -> u64 {
unimplemented!()
}
unsafe fn map_device_pages(
@ -160,7 +127,7 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
unimplemented!()
}
unsafe fn unmap_page(&mut self, _address: usize) -> Result<(PhysicalAddress, bool), Error> {
unsafe fn unmap_page(&mut self, _address: usize) -> Result<PhysicalAddress, Error> {
unimplemented!()
}
@ -168,7 +135,7 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
unimplemented!()
}
fn as_address_with_asid(&self) -> (u64, u64) {
fn as_address_with_asid(&self) -> u64 {
unimplemented!()
}
}
@ -202,29 +169,9 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator> TaskContext<K, PA>
fn kernel_closure<F: FnOnce() -> ! + Send + 'static>(_f: F) -> Result<Self, Error> {
unimplemented!()
}
fn set_thread_pointer(&self, _tp: usize) {
unimplemented!()
}
}
#[no_mangle]
extern "Rust" fn __signal_process_group(_group_id: ProcessGroupId, _signal: Signal) {
unimplemented!()
}
pub struct HostedDmaAllocator;
impl DmaAllocator for HostedDmaAllocator {
fn allocate(&self, layout: Layout) -> Result<DmaAllocation, Error> {
let ptr = Global.allocate(layout.align_to(0x1000).unwrap()).unwrap();
let base = ptr.as_non_null_ptr();
let addr: usize = base.addr().into();
Ok(DmaAllocation {
host_virtual: base.cast(),
host_physical: addr as _,
page_count: layout.size().div_ceil(0x1000),
bus_address: addr as _,
})
}
}

View File

@ -31,8 +31,8 @@ pub struct ExceptionFrame {
pub eip: u32,
pub cs: u32,
pub eflags: u32,
pub esp: u32,
pub ss: u32,
esp: u32,
ss: u32,
}
#[allow(unused)]
@ -74,8 +74,6 @@ pub struct InterruptFrame {
struct Inner {
// 0x00
sp: usize,
gs_base: usize,
}
#[allow(dead_code)]
@ -90,29 +88,11 @@ pub struct TaskContextImpl<
cr3: u32,
tss_esp0: u32,
gs_base: usize,
_pd: PhantomData<(K, PA)>,
}
impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddress>>
TaskContextImpl<K, PA>
{
unsafe fn store_state(&self) {
if let Some(fpu) = self.fpu_context.as_ref() {
FpuContext::store(fpu.get());
}
}
unsafe fn load_state(&self) {
if let Some(fpu) = self.fpu_context.as_ref() {
FpuContext::restore(fpu.get());
}
gdt::set_gs_base((*self.inner.get()).gs_base);
TSS.esp0 = self.tss_esp0;
CR3.set(self.cr3 as _);
}
}
impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddress>>
TaskContext<K, PA> for TaskContextImpl<K, PA>
{
@ -143,16 +123,14 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
let fpu_context = FpuContext::new(true);
Ok(Self {
inner: UnsafeCell::new(Inner {
sp,
gs_base: context.thread_pointer,
}),
inner: UnsafeCell::new(Inner { sp }),
fpu_context: Some(UnsafeCell::new(fpu_context)),
stack_base_phys,
stack_size: USER_TASK_PAGES * 0x1000,
tss_esp0: esp0 as _,
cr3: context.address_space.try_into().unwrap(),
gs_base: context.tls,
_pd: PhantomData,
})
@ -188,46 +166,61 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
// TODO stack is leaked
Ok(Self {
inner: UnsafeCell::new(Inner { sp, gs_base: 0 }),
inner: UnsafeCell::new(Inner { sp }),
fpu_context: None,
stack_base_phys,
stack_size: KERNEL_TASK_PAGES * 0x1000,
tss_esp0: 0,
cr3,
gs_base: 0,
_pd: PhantomData,
})
}
unsafe fn switch(&self, from: &Self) {
if core::ptr::addr_eq(self, from) {
return;
let dst = self.inner.get();
let src = from.inner.get();
if dst != src {
// Save the old context
if let Some(src_fpu) = from.fpu_context.as_ref() {
FpuContext::store(src_fpu.get());
}
// Load next context
if let Some(dst_fpu) = self.fpu_context.as_ref() {
FpuContext::restore(dst_fpu.get());
}
gdt::set_gs_base(self.gs_base);
TSS.esp0 = self.tss_esp0;
CR3.set(self.cr3 as _);
__i686_switch_task(dst, src);
}
from.store_state();
self.load_state();
__i686_switch_task(self.inner.get(), from.inner.get());
}
unsafe fn enter(&self) -> ! {
self.load_state();
if let Some(dst_fpu) = self.fpu_context.as_ref() {
FpuContext::restore(dst_fpu.get());
}
gdt::set_gs_base(self.gs_base);
TSS.esp0 = self.tss_esp0;
CR3.set(self.cr3 as _);
__i686_enter_task(self.inner.get())
}
unsafe fn switch_and_drop(&self, thread: *const ()) {
self.load_state();
__i686_switch_and_drop(self.inner.get(), thread);
}
if let Some(dst_fpu) = self.fpu_context.as_ref() {
FpuContext::restore(dst_fpu.get());
}
gdt::set_gs_base(self.gs_base);
TSS.esp0 = self.tss_esp0;
CR3.set(self.cr3 as _);
fn set_thread_pointer(&self, tp: usize) {
unsafe { (*self.inner.get()).gs_base = tp };
gdt::set_gs_base(tp);
}
fn align_stack_for_entry(sp: usize) -> usize {
(sp & !0xF) - 12
__i686_switch_and_drop(self.inner.get(), thread)
}
}

View File

@ -8,7 +8,7 @@ use core::ptr::null_mut;
use alloc::vec::Vec;
use device_api::interrupt::{LocalInterruptController, MessageInterruptController};
use kernel_arch_interface::{
cpu::{CpuData, CpuImpl, IpiQueue},
cpu::{CpuImpl, IpiQueue},
task::Scheduler,
Architecture,
};
@ -29,8 +29,6 @@ pub struct PerCpuData {
pub enabled_features: CpuFeatures,
}
impl CpuData for PerCpuData {}
static mut CPU: *mut () = null_mut();
#[naked]
@ -108,16 +106,12 @@ impl Architecture for ArchitectureImpl {
1
}
fn local_interrupt_controller() -> Option<&'static dyn LocalInterruptController> {
None
fn message_interrupt_controller() -> &'static dyn MessageInterruptController {
unimplemented!()
}
fn message_interrupt_controller() -> Option<&'static dyn MessageInterruptController> {
None
}
fn ipi_queue(_cpu_id: u32) -> Option<&'static IpiQueue<Self>> {
None
fn local_interrupt_controller() -> &'static dyn LocalInterruptController {
unimplemented!()
}
fn idle_task() -> extern "C" fn(usize) -> ! {

View File

@ -1,7 +1,8 @@
use fixed::FixedTables;
use kernel_arch_interface::{
mem::{DeviceMemoryAttributes, KernelTableManager, RawDeviceMemoryMapping},
split_spinlock, KERNEL_VIRT_OFFSET,
sync::split_spinlock,
KERNEL_VIRT_OFFSET,
};
use libk_mm_interface::{
address::{AsPhysicalAddress, PhysicalAddress},
@ -25,7 +26,7 @@ split_spinlock! {
use crate::ArchitectureImpl;
#[link_section = ".data.tables"]
static KERNEL_TABLES: KernelImageObject<FixedTables> = unsafe {
static KERNEL_TABLES<lock: ArchitectureImpl>: KernelImageObject<FixedTables> = unsafe {
KernelImageObject::new(FixedTables::zeroed())
};
}
@ -37,12 +38,8 @@ impl KernelTableManager for KernelTableManagerImpl {
_attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping<Self>, Error> {
// TODO page align up
let offset = (base & 0xFFF) as usize;
let base = base & !0xFFF;
let end = (base + count as u64 + 0xFFF) & !0xFFF;
// assert_eq!(base & 0xFFF, 0);
let end = base + count as u64;
assert_eq!(base & 0xFFF, 0);
if end < fixed::MAX_FIXED_PHYSICAL.into_u64() {
// 1:1
let address = Self::virtualize(base);
@ -56,16 +53,13 @@ impl KernelTableManager for KernelTableManagerImpl {
let virt = KERNEL_TABLES.lock().map_dynamic_memory(base, page_count)?;
Ok(RawDeviceMemoryMapping::from_raw_parts(
virt + offset,
virt,
page_count,
0,
virt, virt, page_count, 0,
))
}
}
unsafe fn unmap_device_pages(_mapping: &RawDeviceMemoryMapping<Self>) {
// todo!()
todo!()
}
fn virtualize(phys: u64) -> usize {

View File

@ -68,8 +68,8 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
self.pop_l3_entry(address)
}
fn as_address_with_asid(&self) -> (u64, u64) {
(unsafe { self.l0.as_physical_address().into_u64() }, 0)
fn as_address_with_asid(&self) -> u64 {
unsafe { self.l0.as_physical_address().into_u64() }
}
}
@ -120,7 +120,7 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
let l3 = self.l0.get(l0i)?;
let page = l3[l3i].as_page()?;
Some((page.add(virt & 0xFFF), l3[l3i].attributes().into()))
Some((page, l3[l3i].attributes().into()))
}
}

View File

@ -7,5 +7,5 @@ edition = "2021"
yggdrasil-abi.workspace = true
device-api = { workspace = true, features = ["derive"] }
[lints]
workspace = true
[lints.rust]
unexpected_cfgs = { level = "allow", check-cfg = ['cfg(rust_analyzer)'] }

View File

@ -30,18 +30,6 @@ pub struct IpiQueue<A: Architecture> {
data: IrqSafeSpinlock<A, Option<IpiMessage>>,
}
pub trait CpuData {
fn is_bootstrap(&self, id: u32) -> bool {
// On most architectures
id == 0
}
fn queue_index(&self, id: u32) -> usize {
// On most architectures
id as usize
}
}
pub trait CpuFeatureSet {
fn iter(&self) -> impl Iterator<Item = &'static str>;
}
@ -62,14 +50,6 @@ impl<A: Architecture, S: Scheduler + 'static> CpuImpl<A, S> {
unsafe { A::init_ipi_queues(queues) }
}
pub fn is_bootstrap(&self) -> bool {
self.inner.is_bootstrap(self.id)
}
pub fn queue_index(&self) -> usize {
self.inner.queue_index(self.id)
}
pub fn set_current_thread_id(&mut self, id: Option<S::ThreadId>) {
self.current_thread_id = id;
}
@ -112,14 +92,14 @@ impl<A: Architecture, S: Scheduler + 'static> CpuImpl<A, S> {
self.id
}
pub fn push_ipi_queue(cpu_id: u32, msg: IpiMessage) {
if let Some(queue) = A::ipi_queue(cpu_id) {
queue.push(msg);
}
pub fn push_ipi_queue(_cpu_id: u32, _msg: IpiMessage) {
// XXX
todo!()
}
pub fn get_ipi(&self) -> Option<IpiMessage> {
A::ipi_queue(self.id).and_then(|q| q.pop())
// XXX
todo!()
}
pub fn available_features(&self) -> Option<&A::CpuFeatures> {
@ -145,13 +125,13 @@ impl<A: Architecture, S: Scheduler> DerefMut for CpuImpl<A, S> {
}
}
impl<A: Architecture, S: Scheduler + 'static> LocalCpuImpl<'_, A, S> {
impl<'a, A: Architecture, S: Scheduler + 'static> LocalCpuImpl<'a, A, S> {
pub fn into_guard(self) -> IrqGuard<A> {
self.guard
}
}
impl<A: Architecture, S: Scheduler> Deref for LocalCpuImpl<'_, A, S> {
impl<'a, A: Architecture, S: Scheduler> Deref for LocalCpuImpl<'a, A, S> {
type Target = CpuImpl<A, S>;
fn deref(&self) -> &Self::Target {
@ -159,7 +139,7 @@ impl<A: Architecture, S: Scheduler> Deref for LocalCpuImpl<'_, A, S> {
}
}
impl<A: Architecture, S: Scheduler> DerefMut for LocalCpuImpl<'_, A, S> {
impl<'a, A: Architecture, S: Scheduler> DerefMut for LocalCpuImpl<'a, A, S> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.cpu
}

View File

@ -2,18 +2,13 @@
#![feature(step_trait, const_trait_impl, never_type, decl_macro)]
#![allow(clippy::new_without_default)]
use core::ops::Range;
use alloc::vec::Vec;
use cpu::{CpuData, CpuFeatureSet, CpuImpl, IpiQueue};
use device_api::interrupt::LocalInterruptController;
use cpu::{CpuFeatureSet, CpuImpl, IpiQueue};
use device_api::interrupt::{LocalInterruptController, MessageInterruptController};
use task::Scheduler;
extern crate alloc;
#[macro_use]
pub mod macros;
pub mod cpu;
pub mod guard;
pub mod mem;
@ -21,15 +16,14 @@ pub mod sync;
pub mod task;
pub mod util;
#[cfg(any(target_arch = "x86", rust_analyzer))]
#[cfg(any(target_pointer_width = "32", rust_analyzer))]
pub const KERNEL_VIRT_OFFSET: usize = 0xC0000000;
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64", rust_analyzer))]
pub const KERNEL_VIRT_OFFSET: usize = 0xFFFFFF8000000000;
#[cfg(any(target_arch = "riscv64", rust_analyzer))]
pub const KERNEL_VIRT_OFFSET: usize = 0xFFFFFFF000000000;
pub trait Architecture: Sized + 'static {
type PerCpuData: CpuData;
#[cfg(any(target_pointer_width = "64", rust_analyzer))]
pub const KERNEL_VIRT_OFFSET: usize = 0xFFFFFF8000000000;
pub trait Architecture: Sized {
type PerCpuData;
type CpuFeatures: CpuFeatureSet;
type BreakpointType;
@ -46,9 +40,6 @@ pub trait Architecture: Sized + 'static {
///
/// Precondition: this function has not yet been called on the local CPU.
unsafe fn init_ipi_queues(queues: Vec<IpiQueue<Self>>);
fn ipi_queue(cpu_id: u32) -> Option<&'static IpiQueue<Self>>;
/// # Safety
///
/// Precondition: this function has not yet been called on the local CPU.
@ -70,8 +61,12 @@ pub trait Architecture: Sized + 'static {
fn halt() -> !;
// Architectural devices
fn local_interrupt_controller() -> Option<&'static dyn LocalInterruptController> {
None
fn local_interrupt_controller() -> &'static dyn LocalInterruptController {
unimplemented!()
}
fn message_interrupt_controller() -> &'static dyn MessageInterruptController {
unimplemented!()
}
#[allow(unused)]
@ -82,16 +77,4 @@ pub trait Architecture: Sized + 'static {
fn cpu_enabled_features<S: Scheduler>(cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> {
None
}
// Cache/barrier operation
fn load_barrier();
fn store_barrier();
fn memory_barrier() {
Self::store_barrier();
Self::load_barrier();
}
/// Flushes/invalidates a range of virtual memory from the CPU's data cache.
fn flush_virtual_range(range: Range<usize>);
}

View File

@ -1,92 +0,0 @@
/// Helper macro to implement "split" locks. This may be needed when a very specific storage
/// layout for the locked type is required.
// pub macro split_spinlock(
// ) {
#[macro_export]
macro_rules! split_spinlock {
(
$(use $use:path;)*
$(#[$meta:meta])*
static $name:ident: $ty:ty = $init:expr;
) => {
pub use $name::$name;
#[allow(non_snake_case)]
pub mod $name {
$(use $use;)*
use core::cell::UnsafeCell;
use core::marker::PhantomData;
use core::sync::atomic::{AtomicU32, Ordering};
#[repr(transparent)]
pub struct __Wrapper {
inner: UnsafeCell<$ty>
}
$(#[$meta])*
pub static $name: __Wrapper = __Wrapper {
inner: UnsafeCell::new($init)
};
static __LOCK: AtomicU32 = AtomicU32::new(0);
pub struct __Guard($crate::guard::IrqGuard<ArchitectureImpl>);
pub struct __UnsafeGuard($crate::guard::IrqGuard<ArchitectureImpl>);
impl __Wrapper {
#[inline(never)]
pub fn lock(&self) -> __Guard {
let irq = $crate::guard::IrqGuard::acquire();
while __LOCK.compare_exchange(0, 1, Ordering::Acquire, Ordering::Relaxed).is_err() {
core::hint::spin_loop();
}
__Guard(irq)
}
#[inline(never)]
pub unsafe fn grab(&self) -> __UnsafeGuard {
let irq = $crate::guard::IrqGuard::acquire();
__UnsafeGuard(irq)
}
}
unsafe impl Sync for __Wrapper {}
impl core::ops::Deref for __Guard {
type Target = $ty;
fn deref(&self) -> &Self::Target {
unsafe { &*$name.inner.get() }
}
}
impl core::ops::DerefMut for __Guard {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *$name.inner.get() }
}
}
impl core::ops::Deref for __UnsafeGuard {
type Target = $ty;
fn deref(&self) -> &Self::Target {
unsafe { &*$name.inner.get() }
}
}
impl core::ops::DerefMut for __UnsafeGuard {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *$name.inner.get() }
}
}
impl Drop for __Guard {
fn drop(&mut self) {
__LOCK.store(0, Ordering::Release)
}
}
}
};
}

View File

@ -35,8 +35,6 @@ pub struct DeviceMemoryAttributes {
/// Describes a single device memory mapping
#[derive(Debug)]
pub struct RawDeviceMemoryMapping<A: KernelTableManager> {
/// Physical base address of the object
pub physical_base: u64,
/// Virtual address of the mapped object
pub address: usize,
/// Base address of the mapping start
@ -100,8 +98,7 @@ impl<A: KernelTableManager> RawDeviceMemoryMapping<A> {
address
}
pub fn into_raw_parts(self) -> (u64, usize, usize, usize, usize) {
let physical_base = self.physical_base;
pub fn into_raw_parts(self) -> (usize, usize, usize, usize) {
let address = self.address;
let base_address = self.base_address;
let page_count = self.page_count;
@ -109,7 +106,7 @@ impl<A: KernelTableManager> RawDeviceMemoryMapping<A> {
core::mem::forget(self);
(physical_base, address, base_address, page_count, page_size)
(address, base_address, page_count, page_size)
}
/// # Safety
@ -117,14 +114,12 @@ impl<A: KernelTableManager> RawDeviceMemoryMapping<A> {
/// Preconditions: all the fields must come from a [RawDeviceMemoryMapping::into_raw_parts]
/// call.
pub unsafe fn from_raw_parts(
physical_base: u64,
address: usize,
base_address: usize,
page_count: usize,
page_size: usize,
) -> Self {
Self {
physical_base,
address,
base_address,
page_count,

View File

@ -44,9 +44,9 @@ impl<A: Architecture, T> Spinlock<A, T> {
pub fn lock(&self) -> SpinlockGuard<A, T> {
// Loop until the lock can be acquired
if LOCK_HACK.load(Ordering::Acquire) {
return SpinlockGuard { lock: self };
}
// if LOCK_HACK.load(Ordering::Acquire) {
// return SpinlockInnerGuard { lock: self };
// }
while self
.state
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
@ -59,7 +59,7 @@ impl<A: Architecture, T> Spinlock<A, T> {
}
}
impl<A: Architecture, T> Deref for SpinlockGuard<'_, A, T> {
impl<'a, A: Architecture, T> Deref for SpinlockGuard<'a, A, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
@ -67,25 +67,25 @@ impl<A: Architecture, T> Deref for SpinlockGuard<'_, A, T> {
}
}
impl<A: Architecture, T> DerefMut for SpinlockGuard<'_, A, T> {
impl<'a, A: Architecture, T> DerefMut for SpinlockGuard<'a, A, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.lock.value.get() }
}
}
impl<A: Architecture, T> Drop for SpinlockGuard<'_, A, T> {
impl<'a, A: Architecture, T> Drop for SpinlockGuard<'a, A, T> {
fn drop(&mut self) {
if !LOCK_HACK.load(Ordering::Acquire) {
self.lock
.state
.compare_exchange(true, false, Ordering::Release, Ordering::Relaxed)
.unwrap();
}
// if !LOCK_HACK.load(Ordering::Acquire) {
self.lock
.state
.compare_exchange(true, false, Ordering::Release, Ordering::Relaxed)
.unwrap();
// }
}
}
unsafe impl<A: Architecture, T: Send> Sync for Spinlock<A, T> {}
unsafe impl<A: Architecture, T: Send> Send for Spinlock<A, T> {}
unsafe impl<A: Architecture, T> Sync for Spinlock<A, T> {}
unsafe impl<A: Architecture, T> Send for Spinlock<A, T> {}
// IrqSafeSpinlock impls
impl<A: Architecture, T> IrqSafeSpinlock<A, T> {
@ -140,7 +140,7 @@ impl<A: Architecture, T: Clone> Clone for IrqSafeSpinlock<A, T> {
}
}
impl<A: Architecture, T> Deref for IrqSafeSpinlockGuard<'_, A, T> {
impl<'a, A: Architecture, T> Deref for IrqSafeSpinlockGuard<'a, A, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
@ -148,19 +148,72 @@ impl<A: Architecture, T> Deref for IrqSafeSpinlockGuard<'_, A, T> {
}
}
impl<A: Architecture, T> DerefMut for IrqSafeSpinlockGuard<'_, A, T> {
impl<'a, A: Architecture, T> DerefMut for IrqSafeSpinlockGuard<'a, A, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.inner.deref_mut()
}
}
static LOCK_HACK: AtomicBool = AtomicBool::new(false);
/// Helper macro to implement "split" locks. This may be needed when a very specific storage
/// layout for the locked type is required.
pub macro split_spinlock(
$(use $use:path;)*
/// "Hacks" all the locks in the kernel to make them function as "NULL"-locks instead of spinlocks.
///
/// # Safety
///
/// Only meant to be called from panic handler when the caller is sure other CPUs are halted.
pub unsafe fn hack_locks() {
LOCK_HACK.store(true, Ordering::Release);
$(#[$meta:meta])*
static $name:ident<$lock:ident: $arch:ty>: $ty:ty = $init:expr;
) {
pub use $name::$name;
#[allow(non_snake_case)]
pub mod $name {
$(use $use;)*
use core::cell::UnsafeCell;
use core::marker::PhantomData;
use core::sync::atomic::{AtomicBool, Ordering};
$(#[$meta])*
pub static $name: __Wrapper = __Wrapper {
inner: UnsafeCell::new($init)
};
static __LOCK: AtomicBool = AtomicBool::new(false);
#[repr(transparent)]
pub struct __Wrapper {
inner: UnsafeCell<$ty>
}
pub struct __Guard($crate::guard::IrqGuard<$arch>);
impl __Wrapper {
pub fn $lock(&self) -> __Guard {
let irq = $crate::guard::IrqGuard::acquire();
while __LOCK.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() {
core::hint::spin_loop();
}
__Guard(irq)
}
}
unsafe impl Sync for __Wrapper {}
impl core::ops::Deref for __Guard {
type Target = $ty;
fn deref(&self) -> &Self::Target {
unsafe { &*$name.inner.get() }
}
}
impl core::ops::DerefMut for __Guard {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *$name.inner.get() }
}
}
impl Drop for __Guard {
fn drop(&mut self) {
__LOCK.store(false, Ordering::Release)
}
}
}
}

View File

@ -81,9 +81,8 @@ pub struct UserContextInfo {
pub entry: usize,
pub argument: usize,
pub stack_pointer: usize,
pub thread_pointer: usize,
pub tls: usize,
pub address_space: u64,
pub asid: u64,
pub single_step: bool,
}
@ -122,9 +121,6 @@ pub trait TaskContext<K: KernelTableManager, PA: PhysicalMemoryAllocator>: Sized
/// Only meant to be called from the scheduler code after the `thread` has terminated.
unsafe fn switch_and_drop(&self, thread: *const ());
/// Replaces the current thread pointer with the provided one.
fn set_thread_pointer(&self, tp: usize);
// XXX
/// Constructs a safe wrapper process to execute a kernel-space closure
fn kernel_closure<F: FnOnce() -> ! + Send + 'static>(f: F) -> Result<Self, Error> {
@ -142,10 +138,6 @@ pub trait TaskContext<K: KernelTableManager, PA: PhysicalMemoryAllocator>: Sized
let ptr = Box::into_raw(closure) as usize;
Self::kernel(closure_wrapper::<F>, ptr)
}
fn align_stack_for_entry(sp: usize) -> usize {
sp
}
}
pub struct StackBuilder {

View File

@ -5,8 +5,6 @@ use core::{
sync::atomic::{AtomicUsize, Ordering},
};
use yggdrasil_abi::error::Error;
/// Wrapper struct to ensure a value can only be initialized once and used only after that
#[repr(C)]
pub struct OneTimeInit<T> {
@ -44,81 +42,23 @@ impl<T> OneTimeInit<T> {
self.state.load(Ordering::Acquire) == Self::STATE_INITIALIZED
}
pub fn try_init_with_opt<F: FnOnce() -> Result<T, Error>>(&self, f: F) -> Result<&T, Error> {
if !self.try_begin_init() {
// Already initialized
return Err(Error::AlreadyExists);
}
match f() {
Ok(value) => {
let value = unsafe { (*self.value.get()).write(value) };
self.finish_init();
Ok(value)
}
Err(error) => {
self.fail_init();
Err(error)
}
}
}
pub fn or_init_with<F: FnOnce() -> T>(&self, f: F) -> &T {
if !self.try_begin_init() {
return self.wait_for_init();
}
let value = unsafe { (*self.value.get()).write(f()) };
self.finish_init();
value
}
pub fn or_init_with_opt<F: FnOnce() -> Option<T>>(&self, f: F) -> Option<&T> {
if !self.try_begin_init() {
return Some(self.wait_for_init());
}
match f() {
Some(value) => {
let value = unsafe { (*self.value.get()).write(value) };
self.finish_init();
Some(value)
}
None => {
self.fail_init();
None
}
}
}
pub fn or_try_init_with<F: FnOnce() -> Result<T, Error>>(&self, f: F) -> Result<&T, Error> {
if !self.try_begin_init() {
return Ok(self.wait_for_init());
}
match f() {
Ok(value) => {
let value = unsafe { (*self.value.get()).write(value) };
self.finish_init();
Ok(value)
}
Err(error) => {
// Init failed
self.fail_init();
Err(error)
}
}
}
fn try_begin_init(&self) -> bool {
self.state
pub fn try_init_with<F: FnOnce() -> T>(&self, f: F) -> Option<&T> {
if self
.state
.compare_exchange(
Self::STATE_UNINITIALIZED,
Self::STATE_INITIALIZING,
Ordering::Acquire,
Ordering::Release,
Ordering::Relaxed,
)
.is_ok()
}
.is_err()
{
// Already initialized
return None;
}
let value = unsafe { (*self.value.get()).write(f()) };
fn finish_init(&self) {
self.state
.compare_exchange(
Self::STATE_INITIALIZING,
@ -127,38 +67,8 @@ impl<T> OneTimeInit<T> {
Ordering::Relaxed,
)
.unwrap();
}
fn fail_init(&self) {
self.state
.compare_exchange(
Self::STATE_INITIALIZING,
Self::STATE_UNINITIALIZED,
Ordering::Release,
Ordering::Relaxed,
)
.unwrap();
}
fn wait_for_init(&self) -> &T {
while self.state.load(Ordering::Acquire) != Self::STATE_INITIALIZED {
core::hint::spin_loop();
}
unsafe { (*self.value.get()).assume_init_ref() }
}
pub fn try_init_with<F: FnOnce() -> T>(&self, f: F) -> Result<&T, Error> {
if !self.try_begin_init() {
// Already initialized
return Err(Error::AlreadyExists);
}
let value = unsafe { (*self.value.get()).write(f()) };
self.finish_init();
Ok(value)
Some(value)
}
/// Sets the underlying value of the [OneTimeInit]. If already initialized, panics.

View File

@ -1,26 +0,0 @@
[package]
name = "kernel-arch-riscv64"
version = "0.1.0"
edition = "2024"
[dependencies]
yggdrasil-abi.workspace = true
kernel-arch-interface.workspace = true
libk-mm-interface.workspace = true
memtables.workspace = true
device-api = { workspace = true, features = ["derive"] }
tock-registers.workspace = true
bitflags.workspace = true
static_assertions.workspace = true
log.workspace = true
cfg-if.workspace = true
[features]
default = []
riscv64_board_virt = []
riscv64_board_jh7110 = []
[lints]
workspace = true

View File

@ -1,128 +0,0 @@
// vi:ft=asm:
.section .text
.macro SAVE_TASK_STATE
addi sp, sp, -{context_size}
sd ra, 0 * 8(sp)
sd gp, 1 * 8(sp)
sd s11, 2 * 8(sp)
sd s10, 3 * 8(sp)
sd s9, 4 * 8(sp)
sd s8, 5 * 8(sp)
sd s7, 6 * 8(sp)
sd s6, 7 * 8(sp)
sd s5, 8 * 8(sp)
sd s4, 9 * 8(sp)
sd s3, 10 * 8(sp)
sd s2, 11 * 8(sp)
sd s1, 12 * 8(sp)
sd s0, 13 * 8(sp)
.endm
.macro LOAD_TASK_STATE
ld ra, 0 * 8(sp)
ld gp, 1 * 8(sp)
ld s11, 2 * 8(sp)
ld s10, 3 * 8(sp)
ld s9, 4 * 8(sp)
ld s8, 5 * 8(sp)
ld s7, 6 * 8(sp)
ld s6, 7 * 8(sp)
ld s5, 8 * 8(sp)
ld s4, 9 * 8(sp)
ld s3, 10 * 8(sp)
ld s2, 11 * 8(sp)
ld s1, 12 * 8(sp)
ld s0, 13 * 8(sp)
addi sp, sp, {context_size}
.endm
.option push
.option norvc
.global __rv64_task_enter_kernel
.global __rv64_task_enter_user
.global __rv64_switch_task
.global __rv64_switch_task_and_drop
.global __rv64_enter_task
// Context switching
.type __rv64_enter_task, @function
__rv64_enter_task:
// a0 - task ctx
ld sp, (a0)
LOAD_TASK_STATE
ret
.size __rv64_enter_task, . - __rv64_enter_task
.type __rv64_switch_task, @function
__rv64_switch_task:
// a0 - destination task ctx
// a1 - source task ctx
SAVE_TASK_STATE
sd sp, (a1)
ld sp, (a0)
LOAD_TASK_STATE
ret
.size __rv64_switch_task, . - __rv64_switch_task
.type __rv64_switch_task_and_drop, @function
__rv64_switch_task_and_drop:
// a0 - destination task ctx
// a1 - thread struct to drop
ld sp, (a0)
mv a0, a1
call __arch_drop_thread
LOAD_TASK_STATE
ret
.size __rv64_switch_task_and_drop, . - __rv64_switch_task_and_drop
// Entry functions
.type __rv64_task_enter_kernel, @function
__rv64_task_enter_kernel:
ld a0, (sp) // argument
ld ra, 8(sp) // entry
addi sp, sp, 16
// Set SPIE to enable interrupts
// Set SPP = 1 to indicate a return to S-mode
csrr t0, sstatus
ori t0, t0, (1 << 5)
ori t0, t0, (1 << 8)
csrw sstatus, t0
csrw sepc, ra
csrw sscratch, zero
sret
.size __rv64_task_enter_kernel, . - __rv64_task_enter_kernel
.type __rv64_task_enter_user, @function
__rv64_task_enter_user:
csrw sscratch, tp
ld a0, 0 * 8(sp) // argument
ld ra, 1 * 8(sp) // entry
ld tp, 2 * 8(sp) // thread pointer
ld sp, 3 * 8(sp) // user stack
// Set SPIE to enable interrupts
// Set SPP = 0 to indicate a return to U-mode
li t1, (1 << 8)
not t1, t1
csrr t0, sstatus
ori t0, t0, (1 << 5)
and t0, t0, t1
csrw sstatus, t0
csrw sepc, ra
sret
.size __rv64_task_enter_user, . - __rv64_task_enter_user
.option pop

View File

@ -1,222 +0,0 @@
use core::{arch::global_asm, cell::UnsafeCell, marker::PhantomData};
use kernel_arch_interface::{
mem::{KernelTableManager, PhysicalMemoryAllocator},
task::{StackBuilder, TaskContext, UserContextInfo},
Architecture,
};
use libk_mm_interface::address::PhysicalAddress;
use tock_registers::{
interfaces::{Readable, Writeable},
registers::InMemoryRegister,
};
use yggdrasil_abi::error::Error;
use crate::{
mem::{self, KERNEL_VIRT_OFFSET},
registers::SATP,
ArchitectureImpl, PerCpuData,
};
pub const CONTEXT_SIZE: usize = 14 * size_of::<usize>();
#[repr(C, align(0x10))]
struct TaskContextInner {
// 0x00
sp: usize,
satp: InMemoryRegister<u64, SATP::Register>,
}
pub struct TaskContextImpl<
K: KernelTableManager,
PA: PhysicalMemoryAllocator<Address = PhysicalAddress>,
> {
inner: UnsafeCell<TaskContextInner>,
// fp_context: UnsafeCell<FpContext>,
stack_base_phys: PhysicalAddress,
stack_top: usize,
stack_size: usize,
_pd: PhantomData<(K, PA)>,
}
impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddress>>
TaskContextImpl<K, PA>
{
unsafe fn load_state(&self) {
// TODO load new SATP value
let inner = unsafe { &*self.inner.get() };
let cpu = unsafe { &mut *ArchitectureImpl::local_cpu().cast::<PerCpuData>() };
// Copy new SATP
let satp = inner.satp.get();
let asid = inner.satp.read(SATP::ASID);
if satp != SATP.get() {
mem::tlb_flush_asid(asid as usize);
SATP.set(satp);
}
cpu.smode_sp = self.stack_top;
}
unsafe fn store_state(&self) {}
}
impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddress>>
TaskContext<K, PA> for TaskContextImpl<K, PA>
{
const USER_STACK_EXTRA_ALIGN: usize = 8;
const SIGNAL_STACK_EXTRA_ALIGN: usize = 0;
fn user(context: UserContextInfo) -> Result<Self, Error> {
const USER_TASK_PAGES: usize = 16;
let stack_base_phys = PA::allocate_contiguous_pages(USER_TASK_PAGES)?;
let stack_base = stack_base_phys.raw_virtualize::<K>();
let mut stack = StackBuilder::new(stack_base, USER_TASK_PAGES * 0x1000);
log::debug!(
"Set up user task: pc={:#x}, sp={:#x}, tp={:#x}",
context.entry,
context.stack_pointer,
context.thread_pointer
);
stack.push(context.stack_pointer);
stack.push(context.thread_pointer);
stack.push(context.entry);
stack.push(context.argument);
setup_common_context(&mut stack, __rv64_task_enter_user as _);
let sp = stack.build();
let satp = InMemoryRegister::new(0);
satp.write(
SATP::MODE::Sv39
+ SATP::ASID.val(context.asid)
+ SATP::PPN.val(context.address_space >> 12),
);
Ok(Self {
inner: UnsafeCell::new(TaskContextInner { sp, satp }),
// fp_context: UnsafeCell::new(FpContext::new()),
stack_base_phys,
stack_top: stack_base + USER_TASK_PAGES * 0x1000,
stack_size: USER_TASK_PAGES * 0x1000,
_pd: PhantomData,
})
}
fn kernel(entry: extern "C" fn(usize) -> !, arg: usize) -> Result<Self, Error> {
const KERNEL_TASK_PAGES: usize = 8;
let stack_base_phys = PA::allocate_contiguous_pages(KERNEL_TASK_PAGES)?;
let stack_base = stack_base_phys.raw_virtualize::<K>();
let mut stack = StackBuilder::new(stack_base, KERNEL_TASK_PAGES * 0x1000);
// Entry and argument
stack.push(entry as _);
stack.push(arg);
setup_common_context(&mut stack, __rv64_task_enter_kernel as _);
let sp = stack.build();
// TODO stack is leaked
let satp = InMemoryRegister::new(0);
let kernel_table_phys =
((&raw const mem::KERNEL_TABLES).addr() - KERNEL_VIRT_OFFSET) as u64;
satp.write(SATP::MODE::Sv39 + SATP::ASID.val(0) + SATP::PPN.val(kernel_table_phys >> 12));
Ok(Self {
inner: UnsafeCell::new(TaskContextInner { sp, satp }),
// fp_context: UnsafeCell::new(FpContext::new()),
stack_base_phys,
stack_top: 0,
stack_size: KERNEL_TASK_PAGES * 0x1000,
_pd: PhantomData,
})
}
fn set_thread_pointer(&self, tp: usize) {
let _ = tp;
todo!()
}
fn align_stack_for_entry(sp: usize) -> usize {
sp
}
unsafe fn enter(&self) -> ! {
unsafe {
self.load_state();
__rv64_enter_task(self.inner.get())
}
}
unsafe fn switch(&self, from: &Self) {
if core::ptr::addr_eq(self, from) {
return;
}
unsafe {
from.store_state();
self.load_state();
__rv64_switch_task(self.inner.get(), from.inner.get())
}
}
unsafe fn switch_and_drop(&self, thread: *const ()) {
unsafe {
self.load_state();
__rv64_switch_task_and_drop(self.inner.get(), thread)
}
}
}
impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddress>> Drop
for TaskContextImpl<K, PA>
{
fn drop(&mut self) {
assert_eq!(self.stack_size % 0x1000, 0);
for offset in (0..self.stack_size).step_by(0x1000) {
unsafe {
PA::free_page(self.stack_base_phys.add(offset));
}
}
}
}
fn setup_common_context(builder: &mut StackBuilder, entry: usize) {
builder.push(0); // x8/s0/fp
builder.push(0); // x9/s1
builder.push(0); // x18/s2
builder.push(0); // x19/s3
builder.push(0); // x20/s4
builder.push(0); // x21/s5
builder.push(0); // x22/s6
builder.push(0); // x23/s7
builder.push(0); // x24/s8
builder.push(0); // x25/s9
builder.push(0); // x26/s10
builder.push(0); // x27/s11
builder.push(0); // x4/gp
builder.push(entry); // x1/ra return address
}
unsafe extern "C" {
fn __rv64_enter_task(to: *mut TaskContextInner) -> !;
fn __rv64_switch_task(to: *mut TaskContextInner, from: *mut TaskContextInner);
fn __rv64_switch_task_and_drop(to: *mut TaskContextInner, thread: *const ()) -> !;
fn __rv64_task_enter_kernel();
fn __rv64_task_enter_user();
// fn __rv64_fp_store_context(to: *mut c_void);
// fn __rv64_fp_restore_context(from: *const c_void);
}
global_asm!(
include_str!("context.S"),
context_size = const CONTEXT_SIZE,
);

View File

@ -1,6 +0,0 @@
#[inline]
pub fn rdtime() -> u64 {
let mut output: u64;
unsafe { core::arch::asm!("rdtime {0}", out(reg) output) };
output
}

View File

@ -1,186 +0,0 @@
#![feature(decl_macro, naked_functions)]
#![no_std]
extern crate alloc;
use core::{
ops::Range,
sync::atomic::{AtomicUsize, Ordering},
};
use alloc::{boxed::Box, collections::btree_map::BTreeMap, vec::Vec};
use device_api::interrupt::LocalInterruptController;
use kernel_arch_interface::{
cpu::{CpuData, CpuImpl, IpiQueue},
sync::IrqSafeSpinlock,
task::Scheduler,
util::OneTimeInit,
Architecture,
};
use tock_registers::interfaces::{ReadWriteable, Readable};
use registers::SSTATUS;
pub mod mem;
pub use mem::{process::ProcessAddressSpaceImpl, KernelTableManagerImpl};
pub mod context;
pub use context::TaskContextImpl;
pub mod intrinsics;
pub mod registers;
pub mod sbi;
pub struct ArchitectureImpl;
#[repr(C)]
pub struct PerCpuData {
// Used in assembly
pub tmp_t0: usize, // 0x00
pub umode_sp: usize, // 0x08
pub smode_sp: usize, // 0x10
// Used elsewhere
pub bootstrap: bool,
pub queue_index: usize,
}
pub static CPU_COUNT: AtomicUsize = AtomicUsize::new(1);
static IPI_QUEUES: OneTimeInit<Vec<IpiQueue<ArchitectureImpl>>> = OneTimeInit::new();
static HART_TO_QUEUE: IrqSafeSpinlock<ArchitectureImpl, BTreeMap<u32, usize>> =
IrqSafeSpinlock::new(BTreeMap::new());
impl CpuData for PerCpuData {
fn is_bootstrap(&self, id: u32) -> bool {
let _ = id;
self.bootstrap
}
fn queue_index(&self, id: u32) -> usize {
let _ = id;
self.queue_index
}
}
#[naked]
extern "C" fn idle_task(_: usize) -> ! {
unsafe {
core::arch::naked_asm!("1: nop; j 1b");
}
}
impl ArchitectureImpl {
pub fn for_each_hart<F: FnMut(u32, usize, &IpiQueue<ArchitectureImpl>)>(mut f: F) {
let map = HART_TO_QUEUE.lock();
map.iter().for_each(|(&hart_id, &queue_index)| {
let queue = &IPI_QUEUES.get()[queue_index];
f(hart_id, queue_index, queue);
});
}
}
impl Architecture for ArchitectureImpl {
type PerCpuData = PerCpuData;
type CpuFeatures = ();
type BreakpointType = u32;
const BREAKPOINT_VALUE: Self::BreakpointType = 0;
fn halt() -> ! {
loop {
unsafe { Self::set_interrupt_mask(true) };
Self::wait_for_interrupt();
}
}
unsafe fn set_local_cpu(cpu: *mut ()) {
unsafe { core::arch::asm!("mv tp, {0}", in(reg) cpu) };
}
#[inline]
fn local_cpu() -> *mut () {
let value: u64;
unsafe { core::arch::asm!("mv {0}, tp", out(reg) value) };
value as _
}
unsafe fn init_local_cpu<S: Scheduler + 'static>(id: Option<u32>, data: Self::PerCpuData) {
let id = id.expect("riscv64 requires an explicit HART ID in its per-processor struct");
let queue_index = data.queue_index;
HART_TO_QUEUE.lock().insert(id, queue_index);
let cpu = Box::leak(Box::new(CpuImpl::<Self, S>::new(id, data)));
unsafe { cpu.set_local() };
}
unsafe fn init_ipi_queues(queues: Vec<IpiQueue<Self>>) {
IPI_QUEUES.init(queues);
}
fn ipi_queue(cpu_id: u32) -> Option<&'static IpiQueue<Self>> {
let queue_index = *HART_TO_QUEUE.lock().get(&cpu_id)?;
IPI_QUEUES.try_get().and_then(|q| q.get(queue_index))
}
#[inline]
unsafe fn set_interrupt_mask(mask: bool) -> bool {
let old = Self::interrupt_mask();
if mask {
SSTATUS.modify(SSTATUS::SIE::CLEAR);
} else {
SSTATUS.modify(SSTATUS::SIE::SET);
}
old
}
#[inline]
fn interrupt_mask() -> bool {
SSTATUS.matches_all(SSTATUS::SIE::CLEAR)
}
fn wait_for_interrupt() {
unsafe {
core::arch::asm!("wfi");
}
}
fn cpu_count() -> usize {
CPU_COUNT.load(Ordering::Acquire)
}
fn cpu_index<S: Scheduler + 'static>() -> u32 {
CpuImpl::<Self, S>::local().id()
}
fn cpu_enabled_features<S: Scheduler>(cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> {
let _ = cpu;
todo!()
}
fn cpu_available_features<S: Scheduler>(cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> {
let _ = cpu;
todo!()
}
fn local_interrupt_controller() -> Option<&'static dyn LocalInterruptController> {
todo!()
}
fn idle_task() -> extern "C" fn(usize) -> ! {
idle_task
}
// Cache/barrier operation
fn load_barrier() {
unsafe { core::arch::asm!("fence r, w") };
}
fn store_barrier() {
unsafe { core::arch::asm!("fence w, r") };
}
fn memory_barrier() {
unsafe { core::arch::asm!("fence rw, rw") };
}
fn flush_virtual_range(_range: Range<usize>) {
// TODO
}
}

View File

@ -1,359 +0,0 @@
use cfg_if::cfg_if;
use kernel_arch_interface::{
mem::{DeviceMemoryAttributes, KernelTableManager, RawDeviceMemoryMapping},
split_spinlock,
};
use libk_mm_interface::{
address::PhysicalAddress,
table::{page_index, EntryLevel, EntryLevelExt},
};
use memtables::riscv64::PageAttributes;
use static_assertions::{const_assert, const_assert_eq};
use table::{PageEntry, PageTable, L1, L2, L3};
use tock_registers::interfaces::Writeable;
use yggdrasil_abi::error::Error;
pub use memtables::riscv64::FixedTables;
use crate::registers::SATP;
pub mod process;
pub mod table;
split_spinlock! {
use crate::ArchitectureImpl;
use crate::mem::FixedTables;
use libk_mm_interface::KernelImageObject;
#[link_section = ".data.tables"]
#[used]
static KERNEL_TABLES: KernelImageObject<FixedTables> =
unsafe { KernelImageObject::new(FixedTables::zeroed()) };
}
cfg_if! {
if #[cfg(feature = "riscv64_board_virt")] {
pub const KERNEL_PHYS_BASE: usize = 0x80200000;
} else if #[cfg(feature = "riscv64_board_jh7110")] {
pub const KERNEL_PHYS_BASE: usize = 0x40200000;
} else if #[cfg(rust_analyzer)] {
pub const KERNEL_PHYS_BASE: usize = 0x80200000;
}
}
pub const KERNEL_VIRT_OFFSET: usize = kernel_arch_interface::KERNEL_VIRT_OFFSET;
pub const SIGN_EXTEND_MASK: usize = 0xFFFFFF80_00000000;
pub const KERNEL_START_L1I: usize = page_index::<L1>(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
pub const KERNEL_L2I: usize = page_index::<L2>(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
const_assert_eq!(KERNEL_L2I, 1);
// Runtime mappings
// 1GiB of device memory space
const DEVICE_MAPPING_L1I: usize = KERNEL_START_L1I + 1;
const DEVICE_MAPPING_L3_COUNT: usize = 4;
// 32GiB of RAM space
const RAM_MAPPING_START_L1I: usize = KERNEL_START_L1I + 2;
const RAM_MAPPING_L1_COUNT: usize = 32;
const_assert!(RAM_MAPPING_START_L1I + RAM_MAPPING_L1_COUNT <= 512);
const_assert!(DEVICE_MAPPING_L1I < 512);
const DEVICE_MAPPING_OFFSET: usize = (DEVICE_MAPPING_L1I << L1::SHIFT) | SIGN_EXTEND_MASK;
const RAM_MAPPING_OFFSET: usize = (RAM_MAPPING_START_L1I << L1::SHIFT) | SIGN_EXTEND_MASK;
// Runtime tables
static mut DEVICE_MAPPING_L2: PageTable<L2> = PageTable::zeroed();
static mut DEVICE_MAPPING_L3S: [PageTable<L3>; DEVICE_MAPPING_L3_COUNT] =
[const { PageTable::zeroed() }; DEVICE_MAPPING_L3_COUNT];
/// Any VAs above this one are sign-extended
pub const USER_BOUNDARY: usize = 0x40_00000000;
#[derive(Debug)]
pub struct KernelTableManagerImpl;
impl KernelTableManager for KernelTableManagerImpl {
fn virtualize(address: u64) -> usize {
let address = address as usize;
if address >= RAM_MAPPING_OFFSET {
panic!("Invalid physical address: {address:#x}");
}
address + RAM_MAPPING_OFFSET
}
fn physicalize(address: usize) -> u64 {
if address < RAM_MAPPING_OFFSET {
panic!("Invalid \"physicalized\" virtual address {address:#x}");
}
(address - RAM_MAPPING_OFFSET) as u64
}
unsafe fn map_device_pages(
base: u64,
count: usize,
attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping<Self>, Error> {
unsafe { map_device_memory(PhysicalAddress::from_u64(base), count, attrs) }
}
unsafe fn unmap_device_pages(mapping: &RawDeviceMemoryMapping<Self>) {
unsafe { unmap_device_memory(mapping) }
}
}
// Device mappings
unsafe fn map_device_memory_l3(
base: PhysicalAddress,
count: usize,
_attrs: DeviceMemoryAttributes,
) -> Result<usize, Error> {
// TODO don't map pages if already mapped
'l0: for i in 0..DEVICE_MAPPING_L3_COUNT * 512 {
for j in 0..count {
let l2i = (i + j) / 512;
let l3i = (i + j) % 512;
unsafe {
if DEVICE_MAPPING_L3S[l2i][l3i].is_present() {
continue 'l0;
}
}
}
for j in 0..count {
let l2i = (i + j) / 512;
let l3i = (i + j) % 512;
unsafe {
DEVICE_MAPPING_L3S[l2i][l3i] =
PageEntry::page(base.add(j * L3::SIZE), PageAttributes::W);
}
}
let start = DEVICE_MAPPING_OFFSET + i * L3::SIZE;
tlb_flush_range_va(start, count * L3::SIZE);
return Ok(start);
}
Err(Error::OutOfMemory)
}
#[allow(unused)]
unsafe fn map_device_memory_l2(
base: PhysicalAddress,
count: usize,
_attrs: DeviceMemoryAttributes,
) -> Result<usize, Error> {
'l0: for i in DEVICE_MAPPING_L3_COUNT..512 {
for j in 0..count {
unsafe {
if DEVICE_MAPPING_L2[i + j].is_present() {
continue 'l0;
}
}
}
unsafe {
for j in 0..count {
DEVICE_MAPPING_L2[i + j] =
PageEntry::<L2>::block(base.add(j * L2::SIZE), PageAttributes::W);
}
}
let start = DEVICE_MAPPING_OFFSET + i * L2::SIZE;
tlb_flush_range_va(start, count * L2::SIZE);
return Ok(start);
}
Err(Error::OutOfMemory)
}
pub(crate) unsafe fn map_device_memory(
base: PhysicalAddress,
size: usize,
attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping<KernelTableManagerImpl>, Error> {
let l3_aligned = base.page_align_down::<L3>();
let l3_offset = base.page_offset::<L3>();
let page_count = (l3_offset + size).page_count::<L3>();
if page_count > 256 {
// Large mapping, use L2 mapping instead
let l2_aligned = base.page_align_down::<L2>();
let l2_offset = base.page_offset::<L2>();
let page_count = (l2_offset + size).page_count::<L2>();
unsafe {
let base_address = map_device_memory_l2(l2_aligned, page_count, attrs)?;
let address = base_address + l2_offset;
Ok(RawDeviceMemoryMapping::from_raw_parts(
l2_aligned.into_u64(),
address,
base_address,
page_count,
L2::SIZE,
))
}
} else {
// Just map the pages directly
unsafe {
let base_address = map_device_memory_l3(l3_aligned, page_count, attrs)?;
let address = base_address + l3_offset;
Ok(RawDeviceMemoryMapping::from_raw_parts(
l3_aligned.into_u64(),
address,
base_address,
page_count,
L3::SIZE,
))
}
}
}
pub(crate) unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping<KernelTableManagerImpl>) {
match map.page_size {
L3::SIZE => {
for i in 0..map.page_count {
let page = map.base_address + i * L3::SIZE;
let l2i = page.page_index::<L2>();
let l3i = page.page_index::<L3>();
unsafe {
assert!(DEVICE_MAPPING_L3S[l2i][l3i].is_present());
DEVICE_MAPPING_L3S[l2i][l3i] = PageEntry::INVALID;
}
}
tlb_flush_range_va(map.base_address, map.page_count * L3::SIZE);
}
L2::SIZE => todo!(),
_ => unimplemented!(),
}
}
pub fn auto_address<T>(x: *const T) -> usize {
let x = x.addr();
if x >= KERNEL_VIRT_OFFSET {
x - KERNEL_VIRT_OFFSET
} else {
x
}
}
/// Enables the memory translation.
///
/// # Safety
///
/// Only meant to be called once per each HART during their early init.
pub unsafe fn enable_mmu() {
let l1_phys = auto_address(&raw const KERNEL_TABLES) as u64;
tlb_flush_full();
SATP.write(SATP::PPN.val(l1_phys >> 12) + SATP::MODE::Sv39);
}
/// Removes the lower half translation mappings.
///
/// # Safety
///
/// Needs to be called once after secondary HARTs are initialized.
pub unsafe fn unmap_lower_half() {
let mut tables = KERNEL_TABLES.lock();
let kernel_l1i_lower = page_index::<L1>(KERNEL_PHYS_BASE);
tables.l1.data[kernel_l1i_lower] = 0;
tlb_flush_range_va(0x0, L1::SIZE);
}
/// Sets up run-time kernel translation tables.
///
/// # Safety
///
/// The caller must ensure MMU is already enabled.
pub unsafe fn setup_fixed_tables() {
let mut tables = KERNEL_TABLES.lock();
let device_mapping_l2_phys = auto_address(&raw const DEVICE_MAPPING_L2);
// Set up static runtime mappings
for i in 0..DEVICE_MAPPING_L3_COUNT {
unsafe {
let device_mapping_l3_phys = PhysicalAddress::from_usize(
(&raw const DEVICE_MAPPING_L3S[i]).addr() - KERNEL_VIRT_OFFSET,
);
DEVICE_MAPPING_L2[i] =
PageEntry::table(device_mapping_l3_phys, PageAttributes::empty());
}
}
assert_eq!(tables.l1.data[DEVICE_MAPPING_L1I], 0);
tables.l1.data[DEVICE_MAPPING_L1I] =
((device_mapping_l2_phys as u64) >> 2) | PageAttributes::V.bits();
for l1i in 0..RAM_MAPPING_L1_COUNT {
let physical = (l1i as u64) << L1::SHIFT;
tables.l1.data[l1i + RAM_MAPPING_START_L1I] = (physical >> 2)
| (PageAttributes::R
| PageAttributes::W
| PageAttributes::A
| PageAttributes::D
| PageAttributes::V)
.bits();
}
tlb_flush_full();
}
pub fn tlb_flush_global_full() {
tlb_flush_full();
// TODO send TLB shootdown IPI to other harts
}
pub fn tlb_flush_global_va(va: usize) {
tlb_flush_va(va);
// TODO send TLB shootdown IPI to other harts
}
pub fn tlb_flush_range_va(start: usize, size: usize) {
let end = (start + size).page_align_up::<L3>();
let start = start.page_align_down::<L3>();
for page in (start..end).step_by(L3::SIZE) {
tlb_flush_va(page);
}
}
pub fn tlb_flush_range_va_asid(asid: usize, start: usize, size: usize) {
let end = (start + size).page_align_up::<L3>();
let start = start.page_align_down::<L3>();
for page in (start..end).step_by(L3::SIZE) {
tlb_flush_va_asid(page, asid);
}
}
#[inline]
pub fn tlb_flush_full() {
unsafe { core::arch::asm!("sfence.vma") };
}
#[inline]
pub fn tlb_flush_va(va: usize) {
unsafe { core::arch::asm!("sfence.vma {0}, zero", in(reg) va) };
}
#[inline]
pub fn tlb_flush_asid(asid: usize) {
unsafe { core::arch::asm!("sfence.vma zero, {0}", in(reg) asid) };
}
#[inline]
pub fn tlb_flush_va_asid(va: usize, asid: usize) {
unsafe { core::arch::asm!("sfence.vma {0}, {1}", in(reg) va, in(reg) asid) };
}
pub fn clone_kernel_tables(dst: &mut PageTable<L1>) {
let tables = KERNEL_TABLES.lock();
for l1i in page_index::<L1>(USER_BOUNDARY)..512 {
dst[l1i] = unsafe { PageEntry::from_raw(tables.l1.data[l1i]) };
}
}

View File

@ -1,236 +0,0 @@
use core::{
marker::PhantomData,
sync::atomic::{AtomicU16, Ordering},
};
use libk_mm_interface::{
address::{AsPhysicalAddress, PhysicalAddress},
pointer::PhysicalRefMut,
process::{PageAttributeUpdate, ProcessAddressSpaceManager},
table::{
EntryLevel, EntryLevelDrop, EntryLevelExt, MapAttributes, NextPageTable, TableAllocator,
},
};
use memtables::riscv64::PageAttributes;
use yggdrasil_abi::error::Error;
use crate::mem::{clone_kernel_tables, table::PageEntry};
use super::{
table::{DroppableRange, PageTable, L1, L2, L3},
KernelTableManagerImpl, USER_BOUNDARY,
};
pub struct ProcessAddressSpaceImpl<TA: TableAllocator> {
l1: PhysicalRefMut<'static, PageTable<L1>, KernelTableManagerImpl>,
asid: u16,
_pd: PhantomData<TA>,
}
impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceImpl<TA> {
const LOWER_LIMIT_PFN: usize = 8;
const UPPER_LIMIT_PFN: usize = (16 << 30) / L3::SIZE;
fn new() -> Result<Self, Error> {
static LAST_ASID: AtomicU16 = AtomicU16::new(1);
let mut l1 = unsafe {
PhysicalRefMut::<'static, PageTable<L1>, KernelTableManagerImpl>::map(
TA::allocate_page_table()?,
)
};
for i in 0..512 {
l1[i] = PageEntry::INVALID;
}
// Copy the kernel mappings
clone_kernel_tables(&mut l1);
let asid = LAST_ASID.fetch_add(1, Ordering::AcqRel);
Ok(Self {
l1,
asid,
_pd: PhantomData,
})
}
unsafe fn map_page(
&mut self,
address: usize,
physical: PhysicalAddress,
flags: MapAttributes,
) -> Result<(), Error> {
self.write_l3_entry(
address,
PageEntry::page(physical, to_page_attributes(flags)),
false,
)
.unwrap();
Ok(())
}
unsafe fn update_page_attributes(
&mut self,
address: usize,
update: &PageAttributeUpdate,
) -> Result<(), Error> {
self.update_l3_entry(address, |entry| entry.update(update))
}
unsafe fn unmap_page(&mut self, address: usize) -> Result<(PhysicalAddress, bool), Error> {
self.pop_l3_entry(address)
}
fn translate(&self, address: usize) -> Result<(PhysicalAddress, MapAttributes), Error> {
self.read_l3_entry(address).ok_or(Error::DoesNotExist)
}
fn as_address_with_asid(&self) -> (u64, u64) {
let physical = unsafe { self.l1.as_physical_address() }.into_u64();
(physical, self.asid as u64)
}
unsafe fn clear(&mut self) {
unsafe { self.l1.drop_range::<TA>(L1::DROPPABLE_RANGE) };
}
}
impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
// Write a single 4KiB entry
fn write_l3_entry(
&mut self,
virt: usize,
entry: PageEntry<L3>,
overwrite: bool,
) -> Result<(), Error> {
if virt >= USER_BOUNDARY {
log::warn!("Tried to map a userspace page to a non-userspace virtual region");
return Err(Error::InvalidArgument);
}
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
let mut l2 = self.l1.get_mut_or_alloc::<TA>(l1i)?;
let mut l3 = l2.get_mut_or_alloc::<TA>(l2i)?;
if l3[l3i].is_present() && !overwrite {
todo!();
}
l3[l3i] = entry;
super::tlb_flush_va_asid(virt, self.asid as usize);
Ok(())
}
fn update_l3_entry<F: FnOnce(&mut PageEntry<L3>) -> Result<(), Error>>(
&mut self,
virt: usize,
mapper: F,
) -> Result<(), Error> {
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
// TODO somehow drop tables if they're known to be empty?
let mut l2 = self.l1.get_mut(l1i).ok_or(Error::DoesNotExist)?;
let mut l3 = l2.get_mut(l2i).ok_or(Error::DoesNotExist)?;
let entry = &mut l3[l3i];
if !entry.is_present() {
return Err(Error::DoesNotExist);
}
mapper(entry)?;
super::tlb_flush_va_asid(virt, self.asid as usize);
Ok(())
}
fn pop_l3_entry(&mut self, virt: usize) -> Result<(PhysicalAddress, bool), Error> {
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
// TODO somehow drop tables if they're known to be empty?
let mut l2 = self.l1.get_mut(l1i).ok_or(Error::DoesNotExist)?;
let mut l3 = l2.get_mut(l2i).ok_or(Error::DoesNotExist)?;
let entry = l3[l3i];
let page = entry.as_page().ok_or(Error::DoesNotExist)?;
let dirty = entry.is_dirty();
l3[l3i] = PageEntry::INVALID;
super::tlb_flush_va_asid(virt, self.asid as usize);
Ok((page, dirty))
}
fn read_l3_entry(&self, virt: usize) -> Option<(PhysicalAddress, MapAttributes)> {
if virt >= USER_BOUNDARY {
log::warn!("Tried read an userspace page to a non-userspace virtual region");
return None;
}
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
let l2 = self.l1.get(l1i)?;
let l3 = l2.get(l2i)?;
let page = l3[l3i].as_page()?;
Some((
page.add(virt & 0xFFF),
to_map_attributes(l3[l3i].attributes()),
))
}
}
impl<TA: TableAllocator> Drop for ProcessAddressSpaceImpl<TA> {
fn drop(&mut self) {
// SAFETY: with safe usage of the ProcessAddressSpaceImpl, clearing and dropping
// is safe, no one refers to the memory
unsafe {
self.clear();
let l1_phys = self.l1.as_physical_address();
TA::free_page_table(l1_phys);
super::tlb_flush_asid(self.asid as usize);
}
}
}
fn to_page_attributes(src: MapAttributes) -> PageAttributes {
let mut result = PageAttributes::R | PageAttributes::X;
if src.contains(MapAttributes::USER_WRITE) {
result |= PageAttributes::W;
}
if src.intersects(MapAttributes::USER_READ | MapAttributes::USER_WRITE) {
result |= PageAttributes::U;
}
if src.contains(MapAttributes::DIRTY) {
result |= PageAttributes::SW_DIRTY;
}
result
}
fn to_map_attributes(src: PageAttributes) -> MapAttributes {
let mut result = MapAttributes::NON_GLOBAL;
if src.contains(PageAttributes::U) {
result |= MapAttributes::USER_READ;
if src.contains(PageAttributes::W) {
result |= MapAttributes::USER_WRITE;
}
}
if src.contains(PageAttributes::SW_DIRTY) {
result |= MapAttributes::DIRTY;
}
result
}

View File

@ -1,272 +0,0 @@
use core::{
marker::PhantomData,
ops::{Index, IndexMut, Range},
};
use libk_mm_interface::{
address::{AsPhysicalAddress, PhysicalAddress},
pointer::{PhysicalRef, PhysicalRefMut},
process::PageAttributeUpdate,
table::{
page_index, EntryLevel, EntryLevelDrop, NextPageTable, NonTerminalEntryLevel,
TableAllocator,
},
};
use yggdrasil_abi::error::Error;
use super::{KernelTableManagerImpl, USER_BOUNDARY};
pub use memtables::riscv64::PageAttributes;
/// L3 - entry is 4KiB
#[derive(Debug, Clone, Copy)]
pub struct L3;
/// L2 - entry is 2MiB
#[derive(Debug, Clone, Copy)]
pub struct L2;
/// L1 - entry is 1GiB
#[derive(Debug, Clone, Copy)]
pub struct L1;
impl EntryLevel for L3 {
const SHIFT: usize = 12;
}
impl EntryLevel for L2 {
const SHIFT: usize = 21;
}
impl EntryLevel for L1 {
const SHIFT: usize = 30;
}
#[repr(C, align(0x1000))]
pub struct PageTable<L: EntryLevel> {
entries: [PageEntry<L>; 512],
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct PageEntry<L: EntryLevel>(pub u64, PhantomData<L>);
pub(super) trait DroppableRange {
const DROPPABLE_RANGE: Range<usize>;
}
impl DroppableRange for L1 {
const DROPPABLE_RANGE: Range<usize> = 0..page_index::<L1>(USER_BOUNDARY);
}
impl DroppableRange for L2 {
const DROPPABLE_RANGE: Range<usize> = 0..512;
}
impl NonTerminalEntryLevel for L1 {
type NextLevel = L2;
}
impl NonTerminalEntryLevel for L2 {
type NextLevel = L3;
}
impl<L: EntryLevel> PageTable<L> {
pub const fn zeroed() -> Self {
Self {
entries: [PageEntry::INVALID; 512],
}
}
pub fn new_zeroed<'a, TA: TableAllocator>(
) -> Result<PhysicalRefMut<'a, PageTable<L>, KernelTableManagerImpl>, Error> {
let physical = TA::allocate_page_table()?;
let mut table =
unsafe { PhysicalRefMut::<'a, Self, KernelTableManagerImpl>::map(physical) };
for i in 0..512 {
table[i] = PageEntry::INVALID;
}
Ok(table)
}
}
impl<L: EntryLevel> PageEntry<L> {
// Upper + lower 10 bits
const ATTR_MASK: u64 = 0xFFC00000000003FF;
pub const INVALID: Self = Self(0, PhantomData);
/// Constructs a [PageEntry] from its raw representation.
///
/// # Safety
///
/// The caller must ensure `value` is actually a "valid" PTE.
pub const unsafe fn from_raw(value: u64) -> Self {
Self(value, PhantomData)
}
pub const fn is_present(&self) -> bool {
self.0 & PageAttributes::V.bits() != 0
}
pub fn update(&mut self, update: &PageAttributeUpdate) -> Result<(), Error> {
let mut attrs = self.attributes();
if let Some(write) = update.user_write {
attrs.set(PageAttributes::W, write);
}
if let Some(dirty) = update.dirty {
attrs.set(PageAttributes::SW_DIRTY, dirty);
}
self.0 &= !Self::ATTR_MASK;
self.0 |= attrs.bits() & Self::ATTR_MASK;
Ok(())
}
pub const fn is_dirty(&self) -> bool {
self.0 & PageAttributes::SW_DIRTY.bits() != 0
}
pub fn attributes(self) -> PageAttributes {
PageAttributes::from_bits_retain(self.0)
}
}
impl<L: NonTerminalEntryLevel + DroppableRange> EntryLevelDrop for PageTable<L>
where
PageTable<L::NextLevel>: EntryLevelDrop,
{
const FULL_RANGE: Range<usize> = L::DROPPABLE_RANGE;
unsafe fn drop_range<TA: TableAllocator>(&mut self, range: Range<usize>) {
for index in range {
let entry = self[index];
if let Some(table) = entry.as_table() {
unsafe {
let mut table_ref: PhysicalRefMut<
PageTable<L::NextLevel>,
KernelTableManagerImpl,
> = PhysicalRefMut::map(table);
table_ref.drop_all::<TA>();
TA::free_page_table(table);
}
} else if entry.is_present() {
// Memory must've been cleared beforehand, so no non-table entries must be present
panic!(
"Expected a table containing only tables, got table[{}] = {:#x?}",
index, entry.0
);
}
self[index] = PageEntry::INVALID;
// dc_cvac((&raw const self[index]).addr());
}
}
}
impl EntryLevelDrop for PageTable<L3> {
const FULL_RANGE: Range<usize> = 0..512;
// Do nothing
unsafe fn drop_range<TA: TableAllocator>(&mut self, _range: Range<usize>) {}
}
impl<L: NonTerminalEntryLevel + 'static> NextPageTable for PageTable<L> {
type NextLevel = PageTable<L::NextLevel>;
type TableRef = PhysicalRef<'static, PageTable<L::NextLevel>, KernelTableManagerImpl>;
type TableRefMut = PhysicalRefMut<'static, PageTable<L::NextLevel>, KernelTableManagerImpl>;
fn get(&self, index: usize) -> Option<Self::TableRef> {
let table = self[index].as_table()?;
Some(unsafe { PhysicalRef::map(table) })
}
fn get_mut(&mut self, index: usize) -> Option<Self::TableRefMut> {
let table = self[index].as_table()?;
Some(unsafe { PhysicalRefMut::map(table) })
}
fn get_mut_or_alloc<TA: TableAllocator>(
&mut self,
index: usize,
) -> Result<Self::TableRefMut, Error> {
if let Some(table) = self[index].as_table() {
Ok(unsafe { PhysicalRefMut::map(table) })
} else {
let table = PageTable::new_zeroed::<TA>()?;
self[index] = PageEntry::<L>::table(
unsafe { table.as_physical_address() },
PageAttributes::empty(),
);
// dc_cvac((&raw const self[index]).addr());
Ok(table)
}
}
}
impl<L: NonTerminalEntryLevel> PageEntry<L> {
pub fn block(address: PhysicalAddress, attrs: PageAttributes) -> Self {
// TODO validate address alignment
Self(
(address.into_u64() >> 2)
| (PageAttributes::R
| PageAttributes::A
| PageAttributes::D
| PageAttributes::V
| attrs)
.bits(),
PhantomData,
)
}
pub fn table(address: PhysicalAddress, mut attrs: PageAttributes) -> Self {
attrs.remove(PageAttributes::R | PageAttributes::W | PageAttributes::X);
Self(
(address.into_u64() >> 2) | (PageAttributes::V | attrs).bits(),
PhantomData,
)
}
pub fn as_table(&self) -> Option<PhysicalAddress> {
(self.0
& (PageAttributes::R | PageAttributes::W | PageAttributes::X | PageAttributes::V)
.bits()
== PageAttributes::V.bits())
.then_some((self.0 & !Self::ATTR_MASK) << 2)
.map(PhysicalAddress::from_u64)
}
}
impl PageEntry<L3> {
pub fn page(address: PhysicalAddress, attrs: PageAttributes) -> Self {
Self(
(address.into_u64() >> 2)
| (PageAttributes::R
| PageAttributes::A
| PageAttributes::D
| PageAttributes::V
| attrs)
.bits(),
PhantomData,
)
}
pub fn as_page(&self) -> Option<PhysicalAddress> {
(self.0 & PageAttributes::V.bits() != 0)
.then_some((self.0 & !Self::ATTR_MASK) << 2)
.map(PhysicalAddress::from_u64)
}
}
impl<L: EntryLevel> Index<usize> for PageTable<L> {
type Output = PageEntry<L>;
fn index(&self, index: usize) -> &Self::Output {
&self.entries[index]
}
}
impl<L: EntryLevel> IndexMut<usize> for PageTable<L> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.entries[index]
}
}

View File

@ -1,221 +0,0 @@
macro impl_csr_read($struct:ident, $repr:ty, $reg:ident, $register:ty) {
impl tock_registers::interfaces::Readable for $struct {
type T = $repr;
type R = $register;
#[inline]
fn get(&self) -> $repr {
let mut value: $repr;
unsafe {
core::arch::asm!(concat!("csrr {0}, ", stringify!($reg)), out(reg) value);
}
value
}
}
}
macro impl_csr_write($struct:ident, $repr:ty, $reg:ident, $register:ty) {
impl tock_registers::interfaces::Writeable for $struct {
type T = $repr;
type R = $register;
#[inline]
fn set(&self, value: $repr) {
unsafe {
core::arch::asm!(concat!("csrw ", stringify!($reg), ", {0}"), in(reg) value);
}
}
}
}
pub mod satp {
use tock_registers::register_bitfields;
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub SATP [
PPN OFFSET(0) NUMBITS(44) [],
ASID OFFSET(44) NUMBITS(16) [],
MODE OFFSET(60) NUMBITS(4) [
Bare = 0,
Sv39 = 8,
Sv48 = 9,
Sv57 = 10,
Sv64 = 11,
],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, satp, SATP::Register);
impl_csr_write!(Reg, u64, satp, SATP::Register);
pub const SATP: Reg = Reg;
}
pub mod stvec {
use tock_registers::{interfaces::ReadWriteable, register_bitfields};
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub STVEC [
MODE OFFSET(0) NUMBITS(2) [
Direct = 0,
Vectored = 1
],
BASE OFFSET(2) NUMBITS(62) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, stvec, STVEC::Register);
impl_csr_write!(Reg, u64, stvec, STVEC::Register);
impl Reg {
pub fn set_base(&self, base: usize) {
debug_assert_eq!(base & 0xF, 0);
let mask = match base & 63 != 0 {
false => 0,
true => 0x3 << 62,
};
self.modify(STVEC::BASE.val(((base as u64) >> 2) | mask));
}
}
pub const STVEC: Reg = Reg;
}
pub mod scause {
use tock_registers::register_bitfields;
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub SCAUSE [
CODE OFFSET(0) NUMBITS(63) [],
INTERRUPT OFFSET(63) NUMBITS(1) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, scause, SCAUSE::Register);
impl_csr_write!(Reg, u64, scause, SCAUSE::Register);
pub const SCAUSE: Reg = Reg;
}
pub mod stval {
use super::{impl_csr_read, impl_csr_write};
pub struct Reg;
impl_csr_read!(Reg, u64, stval, ());
impl_csr_write!(Reg, u64, stval, ());
pub const STVAL: Reg = Reg;
}
pub mod sepc {
use super::{impl_csr_read, impl_csr_write};
pub struct Reg;
impl_csr_read!(Reg, u64, sepc, ());
impl_csr_write!(Reg, u64, sepc, ());
pub const SEPC: Reg = Reg;
}
pub mod sstatus {
use tock_registers::register_bitfields;
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub SSTATUS [
SUM OFFSET(18) NUMBITS(1) [],
SPP OFFSET(8) NUMBITS(1) [],
SIE OFFSET(1) NUMBITS(1) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, sstatus, SSTATUS::Register);
impl_csr_write!(Reg, u64, sstatus, SSTATUS::Register);
pub const SSTATUS: Reg = Reg;
}
pub mod sscratch {
use super::{impl_csr_read, impl_csr_write};
pub struct Reg;
impl_csr_read!(Reg, u64, sscratch, ());
impl_csr_write!(Reg, u64, sscratch, ());
pub const SSCRATCH: Reg = Reg;
}
pub mod sip {
use tock_registers::register_bitfields;
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub SIP [
SSIP OFFSET(1) NUMBITS(1) [],
STIP OFFSET(5) NUMBITS(1) [],
SEIP OFFSET(9) NUMBITS(1) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, sip, SIP::Register);
impl_csr_write!(Reg, u64, sip, SIP::Register);
pub const SIP: Reg = Reg;
}
pub mod sie {
use tock_registers::register_bitfields;
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub SIE [
SSIE OFFSET(1) NUMBITS(1) [],
STIE OFFSET(5) NUMBITS(1) [],
SEIE OFFSET(9) NUMBITS(1) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, sie, SIE::Register);
impl_csr_write!(Reg, u64, sie, SIE::Register);
pub const SIE: Reg = Reg;
}
pub use satp::SATP;
pub use scause::SCAUSE;
pub use sepc::SEPC;
pub use sie::SIE;
pub use sip::SIP;
pub use sscratch::SSCRATCH;
pub use sstatus::SSTATUS;
pub use stval::STVAL;
pub use stvec::STVEC;

View File

@ -1,110 +0,0 @@
use yggdrasil_abi::{error::Error, primitive_enum};
const EXT_HSM: u64 = 0x48534D;
const EXT_TIME: u64 = 0x54494D45;
const EXT_DBCN: u64 = 0x4442434E;
const EXT_SPI: u64 = 0x735049;
primitive_enum! {
pub enum Status: i64 {
Failed = -1,
NotSupported = -2,
InvalidParam = -3,
Denied = -4,
InvalidAddress = -5,
AlreadyAvailable = -6,
AlreadyStarted = -7,
AlreadyStopped = -8,
NoShmem = -9,
InvalidState = -10,
BadRange = -11,
Timeout = -12,
Io = -13,
}
}
primitive_enum! {
pub enum HartState: u64 {
Started = 0,
Stopped = 1,
StartPending = 2,
StopPending = 3,
Suspended = 4,
SuspendPending = 5,
ResumePending = 6,
}
}
pub enum SbiError {
Status(Status),
Other(i64),
}
impl From<i64> for SbiError {
#[inline]
fn from(value: i64) -> Self {
match Status::try_from(value) {
Ok(value) => Self::Status(value),
Err(_) => Self::Other(value),
}
}
}
#[allow(clippy::too_many_arguments)]
#[inline(always)]
unsafe fn sbi_do_call(
extension: u64,
function: u64,
mut a0: u64,
mut a1: u64,
a2: u64,
a3: u64,
a4: u64,
a5: u64,
) -> Result<u64, SbiError> {
unsafe {
core::arch::asm!(
"ecall",
inlateout("a0") a0,
inlateout("a1") a1,
in("a2") a2,
in("a3") a3,
in("a4") a4,
in("a5") a5,
in("a6") function,
in("a7") extension,
);
}
let a0 = a0 as i64;
if a0 == 0 {
Ok(a1)
} else {
Err(a0.into())
}
}
pub fn sbi_hart_start(hart_id: u64, start_addr: u64, opaque: u64) -> Result<(), Error> {
match unsafe { sbi_do_call(EXT_HSM, 0x00, hart_id, start_addr, opaque, 0, 0, 0) } {
Ok(_) => Ok(()),
Err(SbiError::Status(Status::AlreadyAvailable)) => Err(Error::AlreadyExists),
Err(SbiError::Status(Status::InvalidParam)) => Err(Error::DoesNotExist),
Err(SbiError::Status(Status::InvalidAddress)) => Err(Error::InvalidArgument),
Err(_) => Err(Error::InvalidOperation),
}
}
pub fn sbi_send_ipi(hart_mask: u64, hart_mask_base: u64) -> Result<(), Error> {
match unsafe { sbi_do_call(EXT_SPI, 0x00, hart_mask, hart_mask_base, 0, 0, 0, 0) } {
Ok(_) => Ok(()),
Err(SbiError::Status(Status::InvalidParam)) => Err(Error::DoesNotExist),
Err(_) => Err(Error::InvalidOperation),
}
}
pub fn sbi_debug_console_write_byte(byte: u8) {
unsafe { sbi_do_call(EXT_DBCN, 0x02, byte as u64, 0, 0, 0, 0, 0) }.ok();
}
pub fn sbi_set_timer(next_event: u64) {
unsafe { sbi_do_call(EXT_TIME, 0x00, next_event, 0, 0, 0, 0, 0) }.ok();
}

View File

@ -28,8 +28,6 @@ cfg_if! {
extern crate kernel_arch_x86_64 as imp;
} else if #[cfg(target_arch = "x86")] {
extern crate kernel_arch_i686 as imp;
} else if #[cfg(target_arch = "riscv64")] {
extern crate kernel_arch_riscv64 as imp;
} else {
compile_error!("Unsupported architecture");
}

View File

@ -12,5 +12,5 @@ tock-registers.workspace = true
static_assertions.workspace = true
log.workspace = true
[lints]
workspace = true
[lints.rust]
unexpected_cfgs = { level = "allow", check-cfg = ['cfg(rust_analyzer)'] }

View File

@ -95,17 +95,10 @@ cpuid_features! {
]
}
cpuid_features! {
pub ExtEdxFeatures: u32 [
PDPE1GB: 26
]
}
#[derive(Clone, Copy, Debug)]
pub struct CpuFeatures {
pub ecx: EcxFeatures,
pub edx: EdxFeatures,
pub ext_edx: ExtEdxFeatures,
}
impl CpuFeatures {
@ -113,7 +106,6 @@ impl CpuFeatures {
Self {
ecx: EcxFeatures::empty(),
edx: EdxFeatures::empty(),
ext_edx: ExtEdxFeatures::empty(),
}
}
@ -128,7 +120,6 @@ impl CpuFeatures {
Err(Self {
ecx: features.ecx & !self.ecx,
edx: features.edx & !self.edx,
ext_edx: features.ext_edx & !self.ext_edx,
})
}
}
@ -141,7 +132,6 @@ impl BitAnd<CpuFeatures> for CpuFeatures {
Self {
ecx: self.ecx & rhs.ecx,
edx: self.edx & rhs.edx,
ext_edx: self.ext_edx & rhs.ext_edx,
}
}
}
@ -153,7 +143,6 @@ impl BitOr<CpuFeatures> for CpuFeatures {
Self {
ecx: self.ecx | rhs.ecx,
edx: self.edx | rhs.edx,
ext_edx: self.ext_edx | rhs.ext_edx,
}
}
}
@ -162,9 +151,8 @@ impl CpuFeatureSet for CpuFeatures {
fn iter(&self) -> impl Iterator<Item = &'static str> {
let ecx = self.ecx.iter().map(|e| e.as_str());
let edx = self.edx.iter().map(|e| e.as_str());
let ext_edx = self.ext_edx.iter().map(|e| e.as_str());
core::iter::chain(core::iter::chain(ecx, edx), ext_edx)
core::iter::chain(ecx, edx)
}
}
@ -202,26 +190,20 @@ unsafe fn raw_cpuid(eax: u32, result: &mut [u32]) {
);
}
fn cpuid_features() -> (EcxFeatures, EdxFeatures, ExtEdxFeatures) {
fn cpuid_features() -> (EcxFeatures, EdxFeatures) {
let mut raw = [0; 3];
unsafe {
raw_cpuid(0x1, &mut raw);
}
let ecx = EcxFeatures::from_bits_truncate(raw[2]);
let edx = EdxFeatures::from_bits_truncate(raw[1]);
unsafe {
raw_cpuid(0x80000001, &mut raw);
}
let ext_edx = ExtEdxFeatures::from_bits_truncate(raw[1]);
(ecx, edx, ext_edx)
(
EcxFeatures::from_bits_truncate(raw[2]),
EdxFeatures::from_bits_truncate(raw[1]),
)
}
fn enable_features(ecx: EcxFeatures, edx: EdxFeatures, _ext_edx: ExtEdxFeatures) {
fn enable_features(ecx: EcxFeatures, edx: EdxFeatures) {
if ecx.contains(EcxFeatures::XSAVE) {
CR4.modify(CR4::OSXSAVE::SET);
}
@ -248,16 +230,13 @@ fn enable_features(ecx: EcxFeatures, edx: EdxFeatures, _ext_edx: ExtEdxFeatures)
if ecx.contains(EcxFeatures::PCID) {
CR4.modify(CR4::PCIDE::SET);
}
if edx.contains(EdxFeatures::PSE) {
CR4.modify(CR4::PSE::SET);
}
CR0.modify(CR0::TS::CLEAR);
}
fn read_features() -> CpuFeatures {
let (ecx, edx, ext_edx) = cpuid_features();
CpuFeatures { ecx, edx, ext_edx }
let (ecx, edx) = cpuid_features();
CpuFeatures { ecx, edx }
}
pub fn setup_features(
@ -271,7 +250,7 @@ pub fn setup_features(
return (have_features, Err(missing_features));
}
enable_features(will_features.ecx, will_features.edx, will_features.ext_edx);
enable_features(will_features.ecx, will_features.edx);
(have_features, Ok(will_features))
}

View File

@ -263,11 +263,6 @@ mod imp {
);
}
/// Initializes and loads the GDT data structure for the current CPU.
///
/// # Safety
///
/// Intended to be called once per each CPU during their early initialization.
pub unsafe fn init() -> usize {
let (gdt, tss) = create_gdt();
load_gdt(gdt);

View File

@ -6,10 +6,4 @@ extern crate alloc;
pub mod cpuid;
pub mod gdt;
pub mod intrinsics;
pub mod registers;
#[cfg(any(target_arch = "x86_64", rust_analyzer))]
pub const ISA_IRQ_OFFSET: u32 = 1024;
#[cfg(any(target_arch = "x86", rust_analyzer))]
pub const ISA_IRQ_OFFSET: u32 = 0;

View File

@ -146,7 +146,7 @@ mod cr2 {
}
mod cr3 {
use tock_registers::{interfaces::Writeable, register_bitfields};
use tock_registers::{interfaces::ReadWriteable, register_bitfields};
register_bitfields! {
usize,
@ -164,7 +164,7 @@ mod cr3 {
impl Reg {
pub fn set_address(&self, address: usize) {
assert_eq!(address & 0xFFF, 0);
self.write(CR3::ADDR.val(address >> 12))
self.modify(CR3::ADDR.val(address >> 12))
}
}
@ -291,18 +291,6 @@ mod msr {
pub const MSR_IA32_KERNEL_GS_BASE: Reg = Reg;
}
pub mod ia32_fs_base {
const ADDR: u32 = 0xC0000100;
pub struct Reg;
msr_impl_read!(Reg, ADDR);
msr_impl_write!(Reg, ADDR);
/// IA32_FS_BASE model-specific register. Provides the base address for %fs-relative
/// loads/stores.
pub const MSR_IA32_FS_BASE: Reg = Reg;
}
pub mod ia32_apic_base {
use tock_registers::{interfaces::Readable, register_bitfields};
@ -432,7 +420,6 @@ pub use cr3::CR3;
pub use cr4::CR4;
pub use msr::ia32_apic_base::MSR_IA32_APIC_BASE;
pub use msr::ia32_efer::MSR_IA32_EFER;
pub use msr::ia32_fs_base::MSR_IA32_FS_BASE;
pub use msr::ia32_kernel_gs_base::MSR_IA32_KERNEL_GS_BASE;
pub use msr::ia32_lstar::MSR_IA32_LSTAR;
pub use msr::ia32_sfmask::MSR_IA32_SFMASK;
@ -504,12 +491,7 @@ impl FpuContext {
Self { inner }
}
/// Stores the FPU context into the `this` pointer.
///
/// # Safety
///
/// It is up to the caller to ensure `this` is a valid pointer to store the FPU context in.
pub unsafe fn store(this: *mut Self) {
pub fn store(this: *mut Self) {
#[cfg(any(target_arch = "x86", rust_analyzer))]
unsafe {
core::arch::x86::_fxsave(Box::as_mut_ptr(&mut (*this).inner) as _)
@ -520,12 +502,7 @@ impl FpuContext {
}
}
/// Loads the FPU with the context stored in `this` pointer.
///
/// # Safety
///
/// It is up to the caller to ensure `this` is a valid pointer to load the FPU context from.
pub unsafe fn restore(this: *const Self) {
pub fn restore(this: *const Self) {
#[cfg(any(target_arch = "x86", rust_analyzer))]
unsafe {
core::arch::x86::_fxrstor(Box::as_ptr(&(*this).inner) as _)

View File

@ -14,4 +14,3 @@ kernel-arch-x86.workspace = true
bitflags.workspace = true
static_assertions.workspace = true
tock-registers.workspace = true
log.workspace = true

View File

@ -3,21 +3,53 @@
.set MSR_IA32_FS_BASE, 0xC0000100
.macro SAVE_TASK_STATE
push %rbp
push %r15
push %r14
push %r13
push %r12
push %rbx
sub ${context_size}, %rsp
mov %rbx, 0(%rsp)
mov %r12, 8(%rsp)
mov %r13, 16(%rsp)
mov %r14, 24(%rsp)
mov %r15, 32(%rsp)
// Store FS_BASE
mov $MSR_IA32_FS_BASE, %ecx
rdmsr
mov %edx, %ecx
shl $32, %rcx
or %rax, %rcx
mov %rcx, 40(%rsp)
// TODO save %fs
mov %rbp, 48(%rsp)
mov %cr3, %rbx
mov %rbx, 56(%rsp)
.endm
.macro LOAD_TASK_STATE
pop %rbx
pop %r12
pop %r13
pop %r14
pop %r15
pop %rbp
mov 56(%rsp), %rbx
mov %rbx, %cr3
mov 0(%rsp), %rbx
mov 8(%rsp), %r12
mov 16(%rsp), %r13
mov 24(%rsp), %r14
mov 32(%rsp), %r15
// Load FS_BASE
// edx:eax = fs_base
mov 40(%rsp), %rdx
mov %edx, %eax
shr $32, %rdx
mov $MSR_IA32_FS_BASE, %ecx
wrmsr
// mov 40(%rsp), %fs
mov 48(%rsp), %rbp
add ${context_size}, %rsp
.endm
.global __x86_64_task_enter_user
@ -102,27 +134,55 @@ __x86_64_task_enter_kernel:
// %rsi - from struct ptr, %rdi - to struct ptr
__x86_64_switch_task:
// save state to source stack
SAVE_TASK_STATE
mov %rsp, 0(%rsi)
// load destination stack
mov 0(%rdi), %rsp
// TSS.RSP0
mov 8(%rdi), %rax
// Kernel stack
mov 0(%rdi), %rdi
mov %rdi, %rsp
// Load TSS.RSP0
mov %gs:(8), %rdi
mov %rax, 4(%rdi)
LOAD_TASK_STATE
ret
__x86_64_switch_and_drop:
mov 0(%rdi), %rsp
// TSS.RSP0
mov 8(%rdi), %rax
// Kernel stack
mov 0(%rdi), %rdi
mov %rdi, %rsp
// Load TSS.RSP0
mov %gs:(8), %rdi
mov %rax, 4(%rdi)
// Call thread drop before loading the state
mov %rsi, %rdi
call __arch_drop_thread
LOAD_TASK_STATE
ret
// %rdi - to struct ptr
__x86_64_enter_task:
mov 0(%rdi), %rsp
// TSS.RSP0
mov 8(%rdi), %rax
// Kernel stack
mov 0(%rdi), %rdi
mov %rdi, %rsp
// Load TSS.RSP0
mov %gs:(8), %rdi
mov %rax, 4(%rdi)
LOAD_TASK_STATE
ret

View File

@ -4,12 +4,11 @@ use kernel_arch_interface::{
mem::{KernelTableManager, PhysicalMemoryAllocator},
task::{ForkFrame, StackBuilder, TaskContext, TaskFrame, UserContextInfo},
};
use kernel_arch_x86::registers::{FpuContext, CR3, MSR_IA32_FS_BASE};
use kernel_arch_x86::registers::FpuContext;
use libk_mm_interface::address::{AsPhysicalAddress, PhysicalAddress};
use tock_registers::interfaces::Writeable;
use yggdrasil_abi::{arch::SavedFrame, error::Error};
use crate::{mem::KERNEL_TABLES, ArchitectureImpl};
use crate::mem::KERNEL_TABLES;
/// Frame saved onto the stack when taking an IRQ
#[derive(Debug)]
@ -94,8 +93,8 @@ pub struct SyscallFrame {
struct Inner {
// 0x00
sp: usize,
fs_base: usize,
// 0x08
tss_rsp0: usize,
}
/// x86-64 implementation of a task context
@ -108,14 +107,14 @@ pub struct TaskContextImpl<
fpu_context: UnsafeCell<FpuContext>,
stack_base_phys: PhysicalAddress,
stack_size: usize,
tss_rsp0: usize,
cr3: usize,
_alloc: PhantomData<PA>,
_table_manager: PhantomData<K>,
}
// 8 registers + return address (which is not included)
const COMMON_CONTEXT_SIZE: usize = 8 * 8;
impl TaskFrame for IrqFrame {
fn store(&self) -> SavedFrame {
SavedFrame {
@ -349,71 +348,52 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
TaskContextImpl<K, PA>
{
/// Constructs a new task context from a "forked" syscall frame
pub(super) unsafe fn from_syscall_frame(
_frame: &SyscallFrame,
_cr3: u64,
) -> Result<Self, Error> {
todo!()
// const USER_TASK_PAGES: usize = 8;
pub(super) unsafe fn from_syscall_frame(frame: &SyscallFrame, cr3: u64) -> Result<Self, Error> {
const USER_TASK_PAGES: usize = 8;
// let stack_base_phys = PA::allocate_contiguous_pages(USER_TASK_PAGES)?;
// let stack_base = stack_base_phys.raw_virtualize::<K>();
let stack_base_phys = PA::allocate_contiguous_pages(USER_TASK_PAGES)?;
let stack_base = stack_base_phys.raw_virtualize::<K>();
// let mut stack = StackBuilder::new(stack_base, USER_TASK_PAGES * 0x1000);
let mut stack = StackBuilder::new(stack_base, USER_TASK_PAGES * 0x1000);
// // iretq frame
// stack.push(0x1B);
// stack.push(frame.user_sp as _);
// stack.push(0x200);
// stack.push(0x23);
// stack.push(frame.user_ip as _);
// iretq frame
stack.push(0x1B);
stack.push(frame.user_sp as _);
stack.push(0x200);
stack.push(0x23);
stack.push(frame.user_ip as _);
// stack.push(frame.args[5] as _); // r9
// stack.push(frame.args[4] as _); // r8
// stack.push(frame.args[3] as _); // r10
// stack.push(frame.args[2] as _); // rdx
// stack.push(frame.args[1] as _); // rsi
// stack.push(frame.args[0] as _); // rdi
stack.push(frame.args[5] as _); // r9
stack.push(frame.args[4] as _); // r8
stack.push(frame.args[3] as _); // r10
stack.push(frame.args[2] as _); // rdx
stack.push(frame.args[1] as _); // rsi
stack.push(frame.args[0] as _); // rdi
// // callee-saved registers
// stack.push(__x86_64_task_enter_from_fork as _);
// callee-saved registers
stack.push(__x86_64_task_enter_from_fork as _);
// stack.push(cr3 as _);
stack.push(cr3 as _);
// stack.push(frame.rbp as _);
// stack.push(0x12345678); // XXX TODO: fs_base from SyscallFrame
// stack.push(frame.r15 as _);
// stack.push(frame.r14 as _);
// stack.push(frame.r13 as _);
// stack.push(frame.r12 as _);
// stack.push(frame.rbx as _);
stack.push(frame.rbp as _);
stack.push(0x12345678); // XXX TODO: fs_base from SyscallFrame
stack.push(frame.r15 as _);
stack.push(frame.r14 as _);
stack.push(frame.r13 as _);
stack.push(frame.r12 as _);
stack.push(frame.rbx as _);
// let sp = stack.build();
// let rsp0 = stack_base + USER_TASK_PAGES * 0x1000;
let sp = stack.build();
let rsp0 = stack_base + USER_TASK_PAGES * 0x1000;
// Ok(Self {
// inner: UnsafeCell::new(Inner { sp, tss_rsp0: rsp0 }),
// fpu_context: UnsafeCell::new(FpuContext::new(true)),
// stack_base_phys,
// stack_size: USER_TASK_PAGES * 0x1000,
// _alloc: PhantomData,
// _table_manager: PhantomData,
// })
}
unsafe fn store_state(&self) {
FpuContext::store(self.fpu_context.get());
// No need to save TSS/%cr3/%fs base back into the TCB, only the kernel
// can make changes to those
}
unsafe fn load_state(&self) {
FpuContext::restore(self.fpu_context.get());
// When the task is interrupted from Ring 3, make the CPU load
// the top of its kernel stack
ArchitectureImpl::set_local_tss_sp0(self.tss_rsp0);
MSR_IA32_FS_BASE.set((*self.inner.get()).fs_base as u64);
CR3.set_address(self.cr3);
Ok(Self {
inner: UnsafeCell::new(Inner { sp, tss_rsp0: rsp0 }),
fpu_context: UnsafeCell::new(FpuContext::new(true)),
stack_base_phys,
stack_size: USER_TASK_PAGES * 0x1000,
_alloc: PhantomData,
_table_manager: PhantomData,
})
}
}
@ -431,8 +411,6 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
fn kernel(entry: extern "C" fn(usize) -> !, arg: usize) -> Result<Self, Error> {
const KERNEL_TASK_PAGES: usize = 32;
let cr3: usize = unsafe { KERNEL_TABLES.lock().as_physical_address() }.into();
let stack_base_phys = PA::allocate_contiguous_pages(KERNEL_TASK_PAGES)?;
let stack_base = stack_base_phys.raw_virtualize::<K>();
@ -442,28 +420,30 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
stack.push(entry as _);
stack.push(arg);
setup_common_context(&mut stack, __x86_64_task_enter_kernel as _);
setup_common_context(
&mut stack,
__x86_64_task_enter_kernel as _,
unsafe { KERNEL_TABLES.lock().as_physical_address() }.into(),
0,
);
let sp = stack.build();
// TODO stack is leaked
Ok(Self {
inner: UnsafeCell::new(Inner { sp, fs_base: 0 }),
inner: UnsafeCell::new(Inner { sp, tss_rsp0: 0 }),
fpu_context: UnsafeCell::new(FpuContext::new(false)),
stack_base_phys,
stack_size: KERNEL_TASK_PAGES * 0x1000,
tss_rsp0: 0,
cr3,
_alloc: PhantomData,
_table_manager: PhantomData,
})
}
fn user(context: UserContextInfo) -> Result<Self, Error> {
const USER_TASK_PAGES: usize = 16;
const USER_TASK_PAGES: usize = 8;
let stack_base_phys = PA::allocate_contiguous_pages(USER_TASK_PAGES)?;
let stack_base = stack_base_phys.raw_virtualize::<K>();
@ -480,55 +460,53 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
stack.push(context.argument);
stack.push(context.stack_pointer);
setup_common_context(&mut stack, __x86_64_task_enter_user as _);
setup_common_context(
&mut stack,
__x86_64_task_enter_user as _,
context.address_space,
context.tls,
);
let sp = stack.build();
let rsp0 = stack_base + USER_TASK_PAGES * 0x1000;
Ok(Self {
inner: UnsafeCell::new(Inner {
sp,
fs_base: context.thread_pointer,
}),
inner: UnsafeCell::new(Inner { sp, tss_rsp0: rsp0 }),
fpu_context: UnsafeCell::new(FpuContext::new(true)),
stack_base_phys,
stack_size: USER_TASK_PAGES * 0x1000,
tss_rsp0: rsp0,
cr3: context.address_space as usize,
_alloc: PhantomData,
_table_manager: PhantomData,
})
}
unsafe fn enter(&self) -> ! {
self.load_state();
FpuContext::restore(self.fpu_context.get());
__x86_64_enter_task(self.inner.get())
}
unsafe fn switch(&self, from: &Self) {
if core::ptr::addr_eq(self, from) {
return;
}
let dst = self.inner.get();
let src = from.inner.get();
from.store_state();
self.load_state();
__x86_64_switch_task(self.inner.get(), from.inner.get())
if dst != src {
// Save the old context
FpuContext::store(from.fpu_context.get());
// Load next context
FpuContext::restore(self.fpu_context.get());
__x86_64_switch_task(dst, src);
}
}
unsafe fn switch_and_drop(&self, thread: *const ()) {
self.load_state();
__x86_64_switch_and_drop(self.inner.get(), thread)
}
let dst = self.inner.get();
fn set_thread_pointer(&self, tp: usize) {
unsafe { (*self.inner.get()).fs_base = tp };
MSR_IA32_FS_BASE.set(tp as _);
}
FpuContext::restore(self.fpu_context.get());
fn align_stack_for_entry(sp: usize) -> usize {
(sp & !0xF) - 8
__x86_64_switch_and_drop(dst, thread)
}
}
@ -536,7 +514,6 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
for TaskContextImpl<K, PA>
{
fn drop(&mut self) {
log::trace!("Drop Context {:#p}", self);
assert_eq!(self.stack_size % 0x1000, 0);
for offset in (0..self.stack_size).step_by(0x1000) {
@ -547,10 +524,13 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
}
}
fn setup_common_context(builder: &mut StackBuilder, entry: usize) {
fn setup_common_context(builder: &mut StackBuilder, entry: usize, cr3: u64, fs_base: usize) {
builder.push(entry);
builder.push(cr3 as _);
builder.push(0); // %rbp
builder.push(fs_base); // %fs_base
builder.push(0); // %r15
builder.push(0); // %r14
builder.push(0); // %r13
@ -567,4 +547,8 @@ extern "C" {
fn __x86_64_switch_and_drop(to: *mut Inner, from: *const ());
}
global_asm!(include_str!("context.S"), options(att_syntax));
global_asm!(
include_str!("context.S"),
context_size = const COMMON_CONTEXT_SIZE,
options(att_syntax)
);

View File

@ -1,18 +1,18 @@
#![no_std]
#![allow(clippy::new_without_default)]
#![feature(naked_functions)]
#![feature(naked_functions, trait_upcasting)]
extern crate alloc;
use core::{
ops::{DerefMut, Range},
ops::DerefMut,
sync::atomic::{AtomicUsize, Ordering},
};
use alloc::{sync::Arc, vec::Vec};
use alloc::vec::Vec;
use device_api::interrupt::{LocalInterruptController, MessageInterruptController};
use kernel_arch_interface::{
cpu::{CpuData, CpuImpl, IpiQueue},
cpu::{CpuImpl, IpiQueue},
task::Scheduler,
util::OneTimeInit,
Architecture,
@ -52,17 +52,14 @@ pub struct PerCpuData {
// 0x10, used in assembly
pub tmp_address: usize,
pub local_apic: Arc<dyn LocalApicInterface>,
pub local_apic: &'static dyn LocalApicInterface,
pub available_features: CpuFeatures,
pub enabled_features: CpuFeatures,
}
impl CpuData for PerCpuData {}
impl PerCpuData {
#[inline]
pub fn local_apic(&self) -> &dyn LocalApicInterface {
self.local_apic.as_ref()
pub fn local_apic(&self) -> &'static dyn LocalApicInterface {
self.local_apic
}
}
@ -87,14 +84,6 @@ impl ArchitectureImpl {
fn local_cpu_data() -> Option<&'static mut PerCpuData> {
unsafe { (Self::local_cpu() as *mut PerCpuData).as_mut() }
}
fn set_local_tss_sp0(sp: usize) {
let local_cpu = Self::local_cpu_data().unwrap();
unsafe {
(core::ptr::with_exposed_provenance_mut::<usize>(local_cpu.tss_address + 4))
.write_unaligned(sp);
}
}
}
impl Architecture for ArchitectureImpl {
@ -179,13 +168,14 @@ impl Architecture for ArchitectureImpl {
}
}
fn ipi_queue(cpu_id: u32) -> Option<&'static IpiQueue<Self>> {
IPI_QUEUES.try_get().and_then(|q| q.get(cpu_id as usize))
fn local_interrupt_controller() -> &'static dyn LocalInterruptController {
let local = Self::local_cpu_data().unwrap();
local.local_apic
}
fn local_interrupt_controller() -> Option<&'static dyn LocalInterruptController> {
let cpu = Self::local_cpu_data()?;
Some(cpu.local_apic.as_ref())
fn message_interrupt_controller() -> &'static dyn MessageInterruptController {
let local = Self::local_cpu_data().unwrap();
local.local_apic
}
fn cpu_enabled_features<S: Scheduler>(cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> {
@ -195,30 +185,4 @@ impl Architecture for ArchitectureImpl {
fn cpu_available_features<S: Scheduler>(cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> {
Some(&cpu.available_features)
}
// Cache/barrier
fn load_barrier() {
unsafe { core::arch::x86_64::_mm_lfence() };
}
fn store_barrier() {
unsafe { core::arch::x86_64::_mm_sfence() };
}
fn memory_barrier() {
unsafe { core::arch::x86_64::_mm_mfence() };
}
fn flush_virtual_range(range: Range<usize>) {
// TODO I assume 64-byte cache line on all CPUs
// TODO clflush instruction may not be available, test for it
const CLSIZE: usize = 64;
let start = range.start & !(CLSIZE - 1);
let end = (range.end + (CLSIZE - 1)) & !(CLSIZE - 1);
for line in (start..end).step_by(CLSIZE) {
unsafe { core::arch::x86_64::_mm_clflush(line as _) };
}
}
}

View File

@ -1,18 +1,20 @@
use core::{
alloc::Layout,
ops::{Deref, DerefMut},
ptr::addr_of,
sync::atomic::{AtomicUsize, Ordering},
};
use kernel_arch_interface::{
mem::{DeviceMemoryAttributes, KernelTableManager, RawDeviceMemoryMapping},
split_spinlock,
sync::split_spinlock,
};
use kernel_arch_x86::registers::CR3;
use libk_mm_interface::{
address::PhysicalAddress,
table::{page_index, EntryLevel, EntryLevelExt},
};
use memtables::x86_64::FixedTables;
use static_assertions::{const_assert_eq, const_assert_ne};
use yggdrasil_abi::error::Error;
@ -49,12 +51,12 @@ const RAM_MAPPING_L0I: usize = KERNEL_L0_INDEX - 1;
const DEVICE_MAPPING_L3_COUNT: usize = 4;
split_spinlock! {
use libk_mm_interface::KernelImageObject;
use memtables::x86_64::FixedTables;
use crate::ArchitectureImpl;
use crate::mem::FixedTables;
use libk_mm_interface::KernelImageObject;
#[link_section = ".data.tables"]
static KERNEL_TABLES: KernelImageObject<FixedTables> =
static KERNEL_TABLES<lock: ArchitectureImpl>: KernelImageObject<FixedTables> =
unsafe { KernelImageObject::new(FixedTables::zeroed()) };
}
@ -127,7 +129,6 @@ unsafe fn map_early_pages(physical: PhysicalAddress, count: usize) -> Result<usi
// TODO NX, NC
EARLY_MAPPING_L3[i + l3i] =
PageEntry::page(physical.add(i * L3::SIZE), PageAttributes::WRITABLE);
flush_tlb_entry(EARLY_MAPPING_OFFSET + (i + l3i) * L3::SIZE);
}
return Ok(EARLY_MAPPING_OFFSET + l3i * L3::SIZE);
@ -197,6 +198,12 @@ unsafe fn map_device_memory_l2(
PageEntry::<L2>::block(base.add(j * L2::SIZE), PageAttributes::WRITABLE);
}
// debugln!(
// "map l2s: base={:#x}, count={} -> {:#x}",
// base,
// count,
// DEVICE_MAPPING_OFFSET + i * L2::SIZE
// );
return Ok(DEVICE_MAPPING_OFFSET + i * L2::SIZE);
}
@ -208,6 +215,7 @@ unsafe fn map_device_memory(
size: usize,
attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping<KernelTableManagerImpl>, Error> {
// debugln!("Map {}B @ {:#x}", size, base);
let l3_aligned = base.page_align_down::<L3>();
let l3_offset = base.page_offset::<L3>();
let page_count = (l3_offset + size).page_count::<L3>();
@ -222,7 +230,6 @@ unsafe fn map_device_memory(
let address = base_address + l2_offset;
Ok(RawDeviceMemoryMapping::from_raw_parts(
l2_aligned.into_u64(),
address,
base_address,
page_count,
@ -234,7 +241,6 @@ unsafe fn map_device_memory(
let address = base_address + l3_offset;
Ok(RawDeviceMemoryMapping::from_raw_parts(
l3_aligned.into_u64(),
address,
base_address,
page_count,
@ -244,6 +250,11 @@ unsafe fn map_device_memory(
}
unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping<KernelTableManagerImpl>) {
// debugln!(
// "Unmap {}B @ {:#x}",
// map.page_count * map.page_size,
// map.base_address
// );
match map.page_size {
L3::SIZE => {
for i in 0..map.page_count {
@ -274,7 +285,7 @@ impl<'a, T: Sized> EarlyMapping<'a, T> {
let layout = Layout::new::<T>();
let aligned = physical.page_align_down::<L3>();
let offset = physical.page_offset::<L3>();
let page_count = (offset + layout.size()).div_ceil(L3::SIZE);
let page_count = (offset + layout.size() + L3::SIZE - 1) / L3::SIZE;
let virt = map_early_pages(aligned, page_count)?;
let value = &mut *((virt + offset) as *mut T);
@ -293,7 +304,7 @@ impl<'a, T: Sized> EarlyMapping<'a, T> {
let layout = Layout::array::<T>(len).unwrap();
let aligned = physical.page_align_down::<L3>();
let offset = physical.page_offset::<L3>();
let page_count = (offset + layout.size()).div_ceil(L3::SIZE);
let page_count = (offset + layout.size() + L3::SIZE - 1) / L3::SIZE;
let virt = map_early_pages(aligned, page_count)?;
let value = core::slice::from_raw_parts_mut((virt + offset) as *mut T, len);
@ -302,7 +313,7 @@ impl<'a, T: Sized> EarlyMapping<'a, T> {
}
}
impl<T: ?Sized> Deref for EarlyMapping<'_, T> {
impl<'a, T: ?Sized> Deref for EarlyMapping<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
@ -310,13 +321,13 @@ impl<T: ?Sized> Deref for EarlyMapping<'_, T> {
}
}
impl<T: ?Sized> DerefMut for EarlyMapping<'_, T> {
impl<'a, T: ?Sized> DerefMut for EarlyMapping<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.value
}
}
impl<T: ?Sized> Drop for EarlyMapping<'_, T> {
impl<'a, T: ?Sized> Drop for EarlyMapping<'a, T> {
fn drop(&mut self) {
let address = (self.value as *mut T).addr() & !(L3::SIZE - 1);
@ -338,15 +349,6 @@ pub fn clone_kernel_tables(dst: &mut PageTable<L0>) {
}
}
pub fn auto_address<T>(pointer: *const T) -> usize {
let address = pointer.addr();
if address < KERNEL_VIRT_OFFSET {
address
} else {
address - KERNEL_VIRT_OFFSET
}
}
/// Sets up the following memory map:
/// ...: KERNEL_TABLES.l0:
/// * 0xFFFFFF0000000000 .. 0xFFFFFFFF8000000000 : RAM_MAPPING_L1
@ -365,15 +367,15 @@ pub fn auto_address<T>(pointer: *const T) -> usize {
/// Unsafe, must only be called by BSP during its early init, must already be in "higher-half"
pub unsafe fn init_fixed_tables() {
let mut tables = KERNEL_TABLES.lock();
// TODO this could be built in compile-time too?
let early_mapping_l3_phys = auto_address(&raw const EARLY_MAPPING_L3);
let device_mapping_l2_phys = auto_address(&raw const DEVICE_MAPPING_L2);
let ram_mapping_l1_phys = auto_address(&raw const RAM_MAPPING_L1);
let early_mapping_l3_phys = addr_of!(EARLY_MAPPING_L3) as usize - KERNEL_VIRT_OFFSET;
let device_mapping_l2_phys = addr_of!(DEVICE_MAPPING_L2) as usize - KERNEL_VIRT_OFFSET;
let ram_mapping_l1_phys = addr_of!(RAM_MAPPING_L1) as usize - KERNEL_VIRT_OFFSET;
for i in 0..DEVICE_MAPPING_L3_COUNT {
let device_mapping_l3_phys =
PhysicalAddress::from_usize(auto_address(&raw const DEVICE_MAPPING_L3S[i]));
let device_mapping_l3_phys = PhysicalAddress::from_usize(
&DEVICE_MAPPING_L3S[i] as *const _ as usize - KERNEL_VIRT_OFFSET,
);
DEVICE_MAPPING_L2[i] = PageEntry::table(device_mapping_l3_phys, PageAttributes::WRITABLE);
}
@ -390,7 +392,7 @@ pub unsafe fn init_fixed_tables() {
(ram_mapping_l1_phys as u64) | (PageAttributes::WRITABLE | PageAttributes::PRESENT).bits();
// TODO ENABLE EFER.NXE
let cr3 = auto_address(&raw const tables.l0);
let cr3 = (&raw const tables.l0).addr() - KERNEL_VIRT_OFFSET;
CR3.set_address(cr3);
}

View File

@ -4,7 +4,7 @@ use core::marker::PhantomData;
use libk_mm_interface::{
address::{AsPhysicalAddress, PhysicalAddress},
pointer::PhysicalRefMut,
process::{PageAttributeUpdate, ProcessAddressSpaceManager},
process::ProcessAddressSpaceManager,
table::{
EntryLevel, EntryLevelDrop, EntryLevelExt, MapAttributes, NextPageTable, TableAllocator,
},
@ -61,15 +61,7 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
self.write_l3_entry(address, PageEntry::page(physical, flags.into()), false)
}
unsafe fn update_page_attributes(
&mut self,
address: usize,
update: &PageAttributeUpdate,
) -> Result<(), Error> {
self.update_l3_entry(address, |entry| entry.update(update))
}
unsafe fn unmap_page(&mut self, address: usize) -> Result<(PhysicalAddress, bool), Error> {
unsafe fn unmap_page(&mut self, address: usize) -> Result<PhysicalAddress, Error> {
self.pop_l3_entry(address)
}
@ -79,9 +71,9 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
.ok_or(Error::InvalidMemoryOperation)
}
fn as_address_with_asid(&self) -> (u64, u64) {
fn as_address_with_asid(&self) -> u64 {
// TODO x86-64 PCID/ASID?
(unsafe { self.l0.as_physical_address().into_u64() }, 0)
unsafe { self.l0.as_physical_address().into_u64() }
}
unsafe fn clear(&mut self) {
@ -119,33 +111,7 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
Ok(())
}
fn update_l3_entry<F: FnOnce(&mut PageEntry<L3>) -> Result<(), Error>>(
&mut self,
virt: usize,
mapper: F,
) -> Result<(), Error> {
let l0i = virt.page_index::<L0>();
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
let mut l1 = self.l0.get_mut(l0i).ok_or(Error::DoesNotExist)?;
let mut l2 = l1.get_mut(l1i).ok_or(Error::DoesNotExist)?;
let mut l3 = l2.get_mut(l2i).ok_or(Error::DoesNotExist)?;
let entry = &mut l3[l3i];
if !entry.is_present() {
return Err(Error::DoesNotExist);
}
mapper(entry)?;
unsafe {
flush_tlb_entry(virt);
}
Ok(())
}
fn pop_l3_entry(&mut self, virt: usize) -> Result<(PhysicalAddress, bool), Error> {
fn pop_l3_entry(&mut self, virt: usize) -> Result<PhysicalAddress, Error> {
let l0i = virt.page_index::<L0>();
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
@ -157,14 +123,13 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
let mut l3 = l2.get_mut(l2i).ok_or(Error::DoesNotExist)?;
let page = l3[l3i].as_page().ok_or(Error::DoesNotExist)?;
let dirty = l3[l3i].is_dirty();
l3[l3i] = PageEntry::INVALID;
unsafe {
flush_tlb_entry(virt);
}
Ok((page, dirty))
Ok(page)
}
fn read_l3_entry(&self, virt: usize) -> Option<(PhysicalAddress, MapAttributes)> {
@ -179,7 +144,7 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
let page = l3[l3i].as_page()?;
Some((page.add(virt & 0xFFF), l3[l3i].attributes().into()))
Some((page, l3[l3i].attributes().into()))
}
}

View File

@ -8,7 +8,6 @@ use bitflags::bitflags;
use libk_mm_interface::{
address::{AsPhysicalAddress, PhysicalAddress},
pointer::{PhysicalRef, PhysicalRefMut},
process::PageAttributeUpdate,
table::{
EntryLevel, EntryLevelDrop, MapAttributes, NextPageTable, NonTerminalEntryLevel,
TableAllocator,
@ -32,8 +31,6 @@ bitflags! {
/// For tables, allows user access to further translation levels, for pages/blocks, allows
/// user access to the region covered by the entry
const USER = 1 << 2;
/// If set, the page has been written to
const DIRTY = 1 << 6;
}
}
@ -101,15 +98,11 @@ impl PageEntry<L3> {
/// not
pub fn as_page(self) -> Option<PhysicalAddress> {
if self.0 & PageAttributes::PRESENT.bits() != 0 {
Some(PhysicalAddress::from_u64(self.0 & !Self::ATTR_MASK))
Some(PhysicalAddress::from_u64(self.0 & !0xFFF))
} else {
None
}
}
pub fn is_dirty(&self) -> bool {
self.0 & PageAttributes::DIRTY.bits() != 0
}
}
impl PageEntry<L2> {
@ -152,7 +145,7 @@ impl<L: NonTerminalEntryLevel> PageEntry<L> {
if self.0 & PageAttributes::PRESENT.bits() != 0
&& self.0 & PageAttributes::BLOCK.bits() == 0
{
Some(PhysicalAddress::from_u64(self.0 & !Self::ATTR_MASK))
Some(PhysicalAddress::from_u64(self.0 & !0xFFF))
} else {
None
}
@ -165,8 +158,6 @@ impl<L: NonTerminalEntryLevel> PageEntry<L> {
}
impl<L: EntryLevel> PageEntry<L> {
const ATTR_MASK: u64 = 0xFFF | (1 << 63);
/// An entry that is not mapped
pub const INVALID: Self = Self(0, PhantomData);
@ -188,21 +179,6 @@ impl<L: EntryLevel> PageEntry<L> {
pub fn is_present(&self) -> bool {
self.0 & PageAttributes::PRESENT.bits() != 0
}
pub fn update(&mut self, update: &PageAttributeUpdate) -> Result<(), Error> {
let mut attrs = PageAttributes::from_bits_retain(self.0);
if let Some(write) = update.user_write {
if write {
attrs |= PageAttributes::WRITABLE;
} else {
attrs &= !PageAttributes::WRITABLE;
}
}
// Dirty is ignored, it's hardware-managed
self.0 &= !Self::ATTR_MASK;
self.0 |= attrs.bits() & Self::ATTR_MASK;
Ok(())
}
}
impl<L: EntryLevel> PageTable<L> {

View File

@ -92,7 +92,6 @@ fn main() {
"x86" => (),
"x86_64" => build_x86_64(),
"aarch64" => (),
"riscv64" => (),
_ => panic!("Unknown target arch: {:?}", arch),
}
}

View File

@ -1,18 +0,0 @@
[package]
name = "ygg_driver_acpi"
version = "0.1.0"
edition = "2024"
[dependencies]
libk-util.workspace = true
libk-mm.workspace = true
libk.workspace = true
device-api.workspace = true
kernel-arch-x86.path = "../../arch/x86"
acpi.workspace = true
rsdp.workspace = true
aml.workspace = true
acpi-system.workspace = true
log.workspace = true

View File

@ -1,131 +0,0 @@
use core::time::Duration;
use crate::AcpiHandlerImpl;
impl aml::Handler for AcpiHandlerImpl {
fn read_io_u8(&self, port: u16) -> u8 {
<Self as acpi_system::Handler>::io_read_u8(port)
}
fn read_io_u16(&self, port: u16) -> u16 {
<Self as acpi_system::Handler>::io_read_u16(port)
}
fn read_io_u32(&self, port: u16) -> u32 {
<Self as acpi_system::Handler>::io_read_u32(port)
}
fn write_io_u8(&self, port: u16, value: u8) {
<Self as acpi_system::Handler>::io_write_u8(port, value)
}
fn write_io_u16(&self, port: u16, value: u16) {
<Self as acpi_system::Handler>::io_write_u16(port, value)
}
fn write_io_u32(&self, port: u16, value: u32) {
<Self as acpi_system::Handler>::io_write_u32(port, value)
}
fn read_u8(&self, address: usize) -> u8 {
<Self as acpi_system::Handler>::mem_read_u8(address as u64)
}
fn read_u16(&self, address: usize) -> u16 {
<Self as acpi_system::Handler>::mem_read_u16(address as u64)
}
fn read_u32(&self, address: usize) -> u32 {
<Self as acpi_system::Handler>::mem_read_u32(address as u64)
}
fn read_u64(&self, address: usize) -> u64 {
<Self as acpi_system::Handler>::mem_read_u64(address as u64)
}
fn write_u8(&self, address: usize, value: u8) {
<Self as acpi_system::Handler>::mem_write_u8(address as u64, value)
}
fn write_u16(&self, address: usize, value: u16) {
<Self as acpi_system::Handler>::mem_write_u16(address as u64, value)
}
fn write_u32(&self, address: usize, value: u32) {
<Self as acpi_system::Handler>::mem_write_u32(address as u64, value)
}
fn write_u64(&self, address: usize, value: u64) {
<Self as acpi_system::Handler>::mem_write_u64(address as u64, value)
}
fn read_pci_u8(&self, _segment: u16, _bus: u8, _device: u8, _function: u8, _offset: u16) -> u8 {
0xFF
}
fn read_pci_u16(
&self,
_segment: u16,
_bus: u8,
_device: u8,
_function: u8,
_offset: u16,
) -> u16 {
0xFFFF
}
fn read_pci_u32(
&self,
_segment: u16,
_bus: u8,
_device: u8,
_function: u8,
_offset: u16,
) -> u32 {
0xFFFFFFFF
}
fn write_pci_u8(
&self,
_segment: u16,
_bus: u8,
_device: u8,
_function: u8,
_offset: u16,
_value: u8,
) {
}
fn write_pci_u16(
&self,
_segment: u16,
_bus: u8,
_device: u8,
_function: u8,
_offset: u16,
_value: u16,
) {
}
fn write_pci_u32(
&self,
_segment: u16,
_bus: u8,
_device: u8,
_function: u8,
_offset: u16,
_value: u32,
) {
}
fn read_ec_u8(&self, _address: u64) -> u8 {
0x00
}
fn write_ec_u8(&self, _address: u64, _value: u8) {}
fn sleep(&self, _duration: Duration) {
todo!()
// util::polling_sleep(duration).unwrap();
}
}

View File

@ -1,171 +0,0 @@
use core::{ptr::NonNull, time::Duration};
use acpi::PhysicalMapping;
use acpi_system::AcpiSystemError;
use alloc::sync::Arc;
use device_api::{
device::Device,
interrupt::{InterruptHandler, Irq, IrqVector},
};
use kernel_arch_x86::{intrinsics, ISA_IRQ_OFFSET};
use libk::device::external_interrupt_controller;
use libk_mm::{
address::{PhysicalAddress, Virtualize},
pointer::PhysicalRef,
};
use crate::{
mem::{read_memory, write_memory},
ACPI_SYSTEM,
};
#[derive(Clone, Copy)]
#[doc(hidden)]
pub struct AcpiHandlerImpl;
struct SciHandler;
impl acpi_system::Handler for AcpiHandlerImpl {
type MappedSlice = PhysicalRef<'static, [u8]>;
unsafe fn map_slice(address: u64, length: u64) -> Self::MappedSlice {
unsafe {
PhysicalRef::map_slice(
PhysicalAddress::from_u64(address),
length.try_into().unwrap(),
)
}
}
fn io_read_u8(port: u16) -> u8 {
let value = unsafe { intrinsics::inb(port) };
log::trace!("io_read_u8 {:#x} <- {:#x}", port, value);
value
}
fn io_read_u16(port: u16) -> u16 {
let value = unsafe { intrinsics::inw(port) };
log::trace!("io_read_u16 {:#x} <- {:#x}", port, value);
value
}
fn io_read_u32(port: u16) -> u32 {
let value = unsafe { intrinsics::inl(port) };
log::trace!("io_read_u32 {:#x} <- {:#x}", port, value);
value
}
fn io_write_u8(port: u16, value: u8) {
log::trace!("io_write_u8 {:#x}, {:#x}", port, value);
unsafe { intrinsics::outb(port, value) }
}
fn io_write_u16(port: u16, value: u16) {
log::trace!("io_write_u16 {:#x}, {:#x}", port, value);
unsafe { intrinsics::outw(port, value) }
}
fn io_write_u32(port: u16, value: u32) {
log::trace!("io_write_u32 {:#x}, {:#x}", port, value);
unsafe { intrinsics::outl(port, value) }
}
fn mem_read_u8(address: u64) -> u8 {
let value = unsafe { read_memory(PhysicalAddress::from_u64(address)) };
log::trace!("mem_read_u8 {:#x} -> {:#x}", address, value);
value
}
fn mem_read_u16(address: u64) -> u16 {
let value = unsafe { read_memory(PhysicalAddress::from_u64(address)) };
log::trace!("mem_read_u16 {:#x} -> {:#x}", address, value);
value
}
fn mem_read_u32(address: u64) -> u32 {
let value = unsafe { read_memory(PhysicalAddress::from_u64(address)) };
log::trace!("mem_read_u32 {:#x} -> {:#x}", address, value);
value
}
fn mem_read_u64(address: u64) -> u64 {
let value = unsafe { read_memory(PhysicalAddress::from_u64(address)) };
log::trace!("mem_read_u64 {:#x} -> {:#x}", address, value);
value
}
fn mem_write_u8(address: u64, value: u8) {
log::trace!("mem_write_u8 {:#x}, {:#x}", address, value);
unsafe { write_memory(PhysicalAddress::from_u64(address), value) }
}
fn mem_write_u16(address: u64, value: u16) {
log::trace!("mem_write_u16 {:#x}, {:#x}", address, value);
unsafe { write_memory(PhysicalAddress::from_u64(address), value) }
}
fn mem_write_u32(address: u64, value: u32) {
log::trace!("mem_write_u32 {:#x}, {:#x}", address, value);
unsafe { write_memory(PhysicalAddress::from_u64(address), value) }
}
fn mem_write_u64(address: u64, value: u64) {
log::trace!("mem_write_u64 {:#x}, {:#x}", address, value);
unsafe { write_memory(PhysicalAddress::from_u64(address), value) }
}
fn install_interrupt_handler(irq: u32) -> Result<(), AcpiSystemError> {
log::info!("Installing ACPI SCI handler at IRQ #{}", irq);
let intc = external_interrupt_controller().expect("No external intc");
let handler = Arc::new(SciHandler);
let irq = Irq::External(irq + ISA_IRQ_OFFSET);
intc.register_irq(irq, Default::default(), handler).unwrap();
intc.enable_irq(irq).unwrap();
Ok(())
}
fn stall(_duration: Duration) {
// TODO polling_sleep is not yet implemented properly
todo!()
// util::polling_sleep(duration).ok();
}
}
impl rsdp::handler::AcpiHandler for AcpiHandlerImpl {
unsafe fn map_physical_region<T>(
&self,
physical_address: usize,
size: usize,
) -> PhysicalMapping<Self, T> {
unsafe {
PhysicalMapping::new(
physical_address,
NonNull::new_unchecked(
PhysicalAddress::from_usize(physical_address).virtualize() as *mut T
),
size,
size,
*self,
)
}
}
fn unmap_physical_region<T>(_region: &acpi::PhysicalMapping<Self, T>) {}
}
impl InterruptHandler for SciHandler {
fn handle_irq(self: Arc<Self>, _vector: IrqVector) -> bool {
log::trace!("ACPI SCI received");
ACPI_SYSTEM.get().lock().handle_sci();
true
}
}
impl Device for SciHandler {
fn display_name(&self) -> &str {
"ACPI SCI handler"
}
}

View File

@ -1,89 +0,0 @@
#![feature(allocator_api)]
#![no_std]
use acpi::AcpiTables;
use acpi_system::{AcpiInterruptMethod, AcpiSystem};
use alloc::boxed::Box;
use libk::error::Error;
use libk_util::{sync::IrqSafeSpinlock, OneTimeInit};
extern crate alloc;
pub mod mem;
pub use mem::AcpiAllocator;
pub mod handler;
pub use handler::AcpiHandlerImpl;
pub mod aml_handler;
pub use acpi_system::{
EventAction, FixedEvent, InterruptPolarity, InterruptTrigger, IrqDescriptor, PciPin,
};
static ACPI_SYSTEM: OneTimeInit<IrqSafeSpinlock<AcpiSystem<AcpiHandlerImpl>>> = OneTimeInit::new();
pub fn add_event_handler<F: Fn(&AcpiSystem<AcpiHandlerImpl>) -> EventAction + 'static>(
event: &FixedEvent,
handler: F,
) -> Result<(), Error> {
ACPI_SYSTEM
.get()
.lock()
.enable_fixed_event(event, Box::new(handler))
.map_err(|_| Error::InvalidArgument)
}
pub fn get_pci_route(
aml_path: &str,
device: u16,
function: u16,
pin: PciPin,
) -> Option<IrqDescriptor> {
ACPI_SYSTEM
.get()
.lock()
.pci_route(aml_path, device, function, pin)
.ok()
}
/// Initializes ACPI management
pub fn switch_to_acpi(tables: &'static AcpiTables<AcpiHandlerImpl>) -> Result<(), Error> {
// NOTE mostly broken for real HW
let mut system = AcpiSystem::new(tables, Box::new(AcpiHandlerImpl)).unwrap();
system.initialize(AcpiInterruptMethod::Apic).unwrap();
// system
// .enable_fixed_event(
// &FixedEvent::POWER_BUTTON,
// Box::new(|_| {
// log::info!("Power button was pressed");
// // TODO the correct way would be to
// // 1. Nicely ask all the processes to quit
// // 2. Wait for some time
// // 3. Kill the remaining ones
// // 4. Halt other cores
// // 5. Sync filesystem
// // 6. Do something with the devices
// // 7. Actually enter the S5 state
// unsafe {
// PLATFORM
// .send_ipi(IpiDeliveryTarget::OtherCpus, IpiMessage::Shutdown)
// .unwrap();
// }
// SHUTDOWN_FENCE.signal();
// SHUTDOWN_FENCE.wait_all(CPU_COUNT.load(Ordering::Acquire));
// log::info!("CPUs are parked, can shutdown now");
// EventAction::EnterSleepState(AcpiSleepState::S5)
// }),
// )
// .unwrap();
ACPI_SYSTEM.init(IrqSafeSpinlock::new(system));
Ok(())
}

View File

@ -1,64 +0,0 @@
//! ACPI memory IO and management functions
use core::{
alloc::{AllocError, Allocator, GlobalAlloc, Layout},
ptr::NonNull,
};
use libk_mm::{address::PhysicalAddress, device::DeviceMemoryMapping, heap::GLOBAL_HEAP};
#[derive(Clone, Copy)]
#[doc(hidden)]
pub struct AcpiAllocator;
unsafe impl Allocator for AcpiAllocator {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
let ptr = unsafe { GLOBAL_HEAP.alloc(layout) };
log::trace!("ACPI alloc: {:?} -> {:p}", layout, ptr);
if ptr.is_null() {
Err(AllocError)
} else {
unsafe {
Ok(NonNull::slice_from_raw_parts(
NonNull::new_unchecked(ptr),
layout.size(),
))
}
}
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
log::trace!("ACPI dealloc: {:?}, {:?}", ptr, layout);
unsafe { GLOBAL_HEAP.dealloc(ptr.as_ptr(), layout) };
}
}
// TODO don't map memory as device if not necessary
pub unsafe fn read_memory<T>(address: PhysicalAddress) -> T {
let io =
unsafe { DeviceMemoryMapping::map(address, size_of::<T>(), Default::default()).unwrap() };
let address = io.address();
unsafe {
if address % align_of::<T>() == 0 {
(address as *const T).read_volatile()
} else {
(address as *const T).read_unaligned()
}
}
}
pub unsafe fn write_memory<T>(address: PhysicalAddress, value: T) {
let io =
unsafe { DeviceMemoryMapping::map(address, size_of::<T>(), Default::default()).unwrap() };
let address = io.address();
unsafe {
if address % align_of::<T>() == 0 {
(address as *mut T).write_volatile(value)
} else {
(address as *mut T).write_unaligned(value)
}
}
}

View File

@ -1,10 +1,12 @@
use core::mem::{size_of, MaybeUninit};
use device_api::dma::DmaAllocator;
use libk::dma::{BusAddress, DmaBuffer, DmaSliceMut};
use libk_mm::{
address::{AsPhysicalAddress, PhysicalAddress},
PageBox, PageSlice,
};
use tock_registers::register_structs;
use crate::{data::AtaString, error::AhciError, MAX_PRD_SIZE};
use crate::{data::AtaString, error::AhciError, SECTOR_SIZE};
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
#[repr(u8)]
@ -20,16 +22,8 @@ pub trait AtaCommand {
fn lba(&self) -> u64;
fn sector_count(&self) -> usize;
fn buffer(&self) -> Option<(BusAddress, usize)>;
fn regions(&self) -> &[(PhysicalAddress, usize)];
unsafe fn into_response(self) -> Self::Response;
fn prd_count(&self) -> usize {
if let Some((_, size)) = self.buffer() {
size.div_ceil(MAX_PRD_SIZE)
} else {
0
}
}
}
register_structs! {
@ -62,41 +56,49 @@ register_structs! {
}
pub struct AtaIdentify {
buffer: DmaBuffer<MaybeUninit<AtaIdentifyResponse>>,
buffer: PageBox<MaybeUninit<AtaIdentifyResponse>>,
regions: [(PhysicalAddress, usize); 1],
}
pub struct AtaReadDmaEx {
lba: u64,
sector_count: usize,
buffer_base: BusAddress,
buffer_size: usize,
regions: [(PhysicalAddress, usize); 1],
}
impl AtaIdentify {
pub fn create(dma: &dyn DmaAllocator) -> Result<Self, AhciError> {
DmaBuffer::new_uninit(dma)
pub fn create() -> Result<Self, AhciError> {
PageBox::new_uninit()
.map(Self::with_data)
.map_err(AhciError::MemoryError)
}
pub fn with_data(buffer: DmaBuffer<MaybeUninit<AtaIdentifyResponse>>) -> Self {
Self { buffer }
pub fn with_data(buffer: PageBox<MaybeUninit<AtaIdentifyResponse>>) -> Self {
Self {
regions: [(
unsafe { buffer.as_physical_address() },
size_of::<AtaIdentifyResponse>(),
)],
buffer,
}
}
}
impl AtaReadDmaEx {
pub fn new(lba: u64, sector_count: usize, buffer: DmaSliceMut<MaybeUninit<u8>>) -> Self {
pub fn new(lba: u64, sector_count: usize, buffer: &PageSlice<MaybeUninit<u8>>) -> Self {
assert_eq!(buffer.len() % SECTOR_SIZE, 0);
assert_ne!(buffer.len(), 0);
Self {
lba,
sector_count,
buffer_base: buffer.bus_address(),
buffer_size: buffer.len(),
regions: [(unsafe { buffer.as_physical_address() }, buffer.len())],
}
}
}
impl AtaCommand for AtaIdentify {
type Response = DmaBuffer<AtaIdentifyResponse>;
type Response = PageBox<AtaIdentifyResponse>;
const COMMAND_ID: AtaCommandId = AtaCommandId::Identify;
@ -108,14 +110,12 @@ impl AtaCommand for AtaIdentify {
0
}
fn buffer(&self) -> Option<(BusAddress, usize)> {
let base = self.buffer.bus_address();
let size = size_of::<AtaIdentifyResponse>();
Some((base, size))
fn regions(&self) -> &[(PhysicalAddress, usize)] {
&self.regions
}
unsafe fn into_response(self) -> Self::Response {
DmaBuffer::assume_init(self.buffer)
self.buffer.assume_init()
}
}
@ -132,8 +132,8 @@ impl AtaCommand for AtaReadDmaEx {
self.sector_count
}
fn buffer(&self) -> Option<(BusAddress, usize)> {
Some((self.buffer_base, self.buffer_size))
fn regions(&self) -> &[(PhysicalAddress, usize)] {
&self.regions
}
unsafe fn into_response(self) -> Self::Response {}

View File

@ -2,7 +2,7 @@ use core::mem::size_of;
use alloc::string::String;
use bytemuck::{Pod, Zeroable};
use libk::dma::BusAddress;
use libk_mm::address::PhysicalAddress;
use libk_util::{ConstAssert, IsTrue};
use static_assertions::const_assert_eq;
@ -145,28 +145,10 @@ impl CommandTable {
};
}
// Setup PRDs
if let Some((base, size)) = command.buffer() {
let mut remaining = size;
let mut prd = 0;
while remaining != 0 {
let rem = remaining.min(MAX_PRD_SIZE);
let last = rem <= MAX_PRD_SIZE;
log::trace!(
target: "io",
"prd[{prd}] base={:#x}, size={rem}",
base.add(prd * MAX_PRD_SIZE)
);
self.prdt[prd] =
PhysicalRegionDescriptor::new(base.add(prd * MAX_PRD_SIZE), rem, last)?;
prd += 1;
remaining -= rem;
}
assert_eq!(prd, command.prd_count());
self.prdt[prd..].fill_with(PhysicalRegionDescriptor::zeroed);
let regions = command.regions();
for (i, &(base, size)) in regions.iter().enumerate() {
let last = i == regions.len() - 1;
self.prdt[i] = PhysicalRegionDescriptor::new(base, size, last)?;
}
Ok(())
@ -174,7 +156,7 @@ impl CommandTable {
}
impl CommandListEntry {
pub fn new(command_table_entry: BusAddress, prd_count: usize) -> Result<Self, AhciError> {
pub fn new(command_table_entry: PhysicalAddress, prd_count: usize) -> Result<Self, AhciError> {
if prd_count > 0xFFFF {
todo!()
}
@ -183,7 +165,7 @@ impl CommandListEntry {
attr: (size_of::<RegisterHostToDeviceFis>() / size_of::<u32>()) as _,
prdtl: prd_count as _,
prdbc: 0,
ctba: command_table_entry.into_u64(),
ctba: command_table_entry.into(),
_0: [0; 4],
})
}
@ -201,14 +183,18 @@ unsafe impl Zeroable for CommandTable {
}
impl PhysicalRegionDescriptor {
pub fn new(address: BusAddress, byte_count: usize, is_last: bool) -> Result<Self, AhciError> {
if byte_count > MAX_PRD_SIZE {
pub fn new(
address: PhysicalAddress,
byte_count: usize,
is_last: bool,
) -> Result<Self, AhciError> {
if byte_count >= MAX_PRD_SIZE {
return Err(AhciError::RegionTooLarge);
}
let dbc_mask = (is_last as u32) << 31;
Ok(Self {
buffer_address: address.into_u64(),
buffer_address: address.into(),
_0: 0,
dbc: ((byte_count as u32 - 1) << 1) | 1 | dbc_mask,
})

View File

@ -3,7 +3,6 @@ use yggdrasil_abi::error::Error;
#[derive(Debug)]
pub enum AhciError {
MemoryError(#[allow(dead_code)] Error),
InvalidBufferSize(usize),
RegionTooLarge,
DeviceError,
FeatureNotImplemented,
@ -13,7 +12,6 @@ impl From<AhciError> for Error {
fn from(value: AhciError) -> Self {
match value {
// TODO: Error::DeviceError
AhciError::InvalidBufferSize(_) => Error::InvalidArgument,
AhciError::DeviceError => Error::InvalidArgument,
AhciError::RegionTooLarge => Error::InvalidArgument,
AhciError::MemoryError(err) => err,

View File

@ -4,27 +4,29 @@
extern crate alloc;
use alloc::{format, sync::Arc, vec::Vec};
use alloc::{boxed::Box, format, vec, vec::Vec};
use bytemuck::Zeroable;
use data::ReceivedFis;
use device_api::{
device::{Device, DeviceInitContext},
dma::DmaAllocator,
interrupt::{InterruptAffinity, InterruptHandler, IrqVector},
interrupt::{InterruptAffinity, InterruptHandler},
Device,
};
use error::AhciError;
use libk::{device::manager::probe_partitions, dma::DmaBuffer, fs::devfs, task::runtime};
use libk_mm::device::DeviceMemoryIo;
use kernel_fs::devfs;
use libk::{
task::runtime,
vfs::block::{probe_partitions, NgBlockDeviceWrapper},
};
use libk_mm::{address::AsPhysicalAddress, device::DeviceMemoryIo, PageBox};
use libk_util::{sync::IrqSafeSpinlock, OneTimeInit};
use port::AhciPort;
use regs::{PortRegs, Regs};
use tock_registers::interfaces::{ReadWriteable, Readable, Writeable};
use ygg_driver_pci::{
device::{PciDeviceInfo, PreferredInterruptMode},
macros::pci_driver,
PciCommandRegister, PciConfigurationSpace,
};
use yggdrasil_abi::{error::Error, io::FileMode};
use yggdrasil_abi::error::Error;
use crate::regs::{Version, CAP, GHC, SSTS};
@ -34,16 +36,15 @@ mod error;
mod port;
mod regs;
const MAX_PRD_SIZE: usize = 65536;
const MAX_PRD_SIZE: usize = 8192;
const MAX_COMMANDS: usize = u32::BITS as usize;
const SECTOR_SIZE: usize = 512;
const MAX_DRIVES: usize = (b'z' - b'a') as usize;
pub struct AhciController {
regs: IrqSafeSpinlock<DeviceMemoryIo<'static, Regs>>,
dma: Arc<dyn DmaAllocator>,
ports: OneTimeInit<Vec<Arc<AhciPort>>>,
received_fis_buffers: OneTimeInit<[Option<DmaBuffer<ReceivedFis>>; 16]>,
ports: OneTimeInit<Vec<&'static AhciPort>>,
received_fis_buffers: OneTimeInit<[Option<PageBox<ReceivedFis>>; 16]>,
version: Version,
max_port_count: usize,
@ -52,7 +53,7 @@ pub struct AhciController {
}
impl AhciController {
async fn late_init(self: Arc<Self>) -> Result<(), AhciError> {
async fn late_init(&'static self) -> Result<(), AhciError> {
log::info!("Initializing AHCI SATA Controller {:?}", self.version);
let regs = self.regs.lock();
@ -69,7 +70,7 @@ impl AhciController {
let pi = regs.PI.get();
let mut ports = Vec::new();
let mut ports = vec![];
drop(regs);
@ -83,9 +84,8 @@ impl AhciController {
let regs = self.regs.lock();
let port = &regs.PORTS[i];
let buffer = DmaBuffer::new(&*self.dma, ReceivedFis::zeroed())
.map_err(AhciError::MemoryError)?;
port.set_received_fis_address_64(buffer.bus_address());
let buffer = PageBox::new(ReceivedFis::zeroed()).map_err(AhciError::MemoryError)?;
port.set_received_fis_address_64(unsafe { buffer.as_physical_address() });
*fis_buffer_slot = Some(buffer);
}
@ -117,7 +117,7 @@ impl AhciController {
drop(regs);
let port = match AhciPort::create(port, self.clone(), i) {
let port = match AhciPort::create(port, self, i) {
Ok(port) => port,
Err(error) => {
log::warn!("Port {} init error: {:?}", i, error);
@ -134,13 +134,13 @@ impl AhciController {
self.regs.lock().GHC.modify(GHC::IE::SET);
// Setup the detected ports
for (i, port) in ports.iter().enumerate() {
for (i, &port) in ports.iter().enumerate() {
log::info!("Init port {}", i);
port.init_inner().await?;
port.init().await?;
}
// Dump info about the drives
for (i, port) in ports.iter().enumerate() {
for (i, &port) in ports.iter().enumerate() {
let info = port.info().unwrap();
log::info!(
"Port {}: model={:?}, serial={:?}, lba_count={}",
@ -151,8 +151,25 @@ impl AhciController {
);
}
for port in ports.iter() {
register_sata_drive(port.clone(), true);
{
let mut lock = SATA_DRIVES.lock();
for &port in ports.iter() {
let n = lock.len();
if n >= MAX_DRIVES {
todo!("Too many drives, ran out of letters");
}
let n = n as u8;
lock.push(port);
let name = format!("sd{}", (n + b'a') as char);
let blk = NgBlockDeviceWrapper::new(port);
devfs::add_named_block_device(blk, name.clone()).ok();
probe_partitions(blk, move |index, partition| {
devfs::add_block_device_partition(name.clone(), index, partition)
})
.ok();
}
}
log::debug!("All ports initialized");
@ -162,7 +179,7 @@ impl AhciController {
}
impl InterruptHandler for AhciController {
fn handle_irq(self: Arc<Self>, _vector: IrqVector) -> bool {
fn handle_irq(&self, _vector: Option<usize>) -> bool {
let regs = self.regs.lock();
let is = regs.IS.get();
@ -171,7 +188,7 @@ impl InterruptHandler for AhciController {
// Clear global interrupt status
regs.IS.set(u32::MAX);
for port in ports {
for &port in ports {
if is & (1 << port.index) != 0 {
port.handle_pending_interrupts();
}
@ -184,106 +201,58 @@ impl InterruptHandler for AhciController {
}
impl Device for AhciController {
unsafe fn init(self: Arc<Self>, _cx: DeviceInitContext) -> Result<(), Error> {
unsafe fn init(&'static self) -> Result<(), Error> {
// Do the init in background
runtime::spawn(self.late_init())?;
Ok(())
}
fn display_name(&self) -> &str {
"AHCI Controller"
fn display_name(&self) -> &'static str {
"AHCI SATA Controller"
}
}
pub fn register_sata_drive(drive: Arc<AhciPort>, probe: bool) {
let index = {
let mut drives = SATA_DRIVES.lock();
let index = drives.len();
if index >= MAX_DRIVES {
log::error!("Cannot add a SATA drive: too many of them");
return;
}
drives.push(drive.clone());
index
};
let letter = (index as u8 + b'a') as char;
static SATA_DRIVES: IrqSafeSpinlock<Vec<&'static AhciPort>> = IrqSafeSpinlock::new(Vec::new());
let name = format!("sd{letter}");
log::info!("Register SATA drive: {name}");
pub fn probe(info: &PciDeviceInfo) -> Result<&'static dyn Device, Error> {
let bar5 = info.config_space.bar(5).ok_or(Error::InvalidOperation)?;
let bar5 = bar5.as_memory().ok_or(Error::InvalidOperation)?;
devfs::add_named_block_device(drive.clone(), name.clone(), FileMode::new(0o600)).ok();
let mut cmd = PciCommandRegister::from_bits_retain(info.config_space.command());
cmd &= !(PciCommandRegister::DISABLE_INTERRUPTS | PciCommandRegister::ENABLE_IO);
cmd |= PciCommandRegister::ENABLE_MEMORY | PciCommandRegister::BUS_MASTER;
info.config_space.set_command(cmd.bits());
if probe {
runtime::spawn(async move {
let name = name;
log::info!("Probing partitions for {name}");
probe_partitions(drive, |index, partition| {
let partition_name = format!("{name}{}", index + 1);
devfs::add_named_block_device(
Arc::new(partition),
partition_name,
FileMode::new(0o600),
)
.ok();
})
.await
.ok();
})
.ok();
}
}
static SATA_DRIVES: IrqSafeSpinlock<Vec<Arc<AhciPort>>> = IrqSafeSpinlock::new(Vec::new());
pci_driver! {
matches: [class (0x01:0x06:0x01)],
driver: {
fn driver_name(&self) -> &str {
"ahci"
}
fn probe(&self, info: &PciDeviceInfo, dma: &Arc<dyn DmaAllocator>) -> Result<Arc<dyn Device>, Error> {
let bar5 = info.config_space.bar(5).ok_or(Error::InvalidOperation)?;
let bar5 = bar5.as_memory().ok_or(Error::InvalidOperation)?;
let mut cmd = PciCommandRegister::from_bits_retain(info.config_space.command());
cmd &= !(PciCommandRegister::DISABLE_INTERRUPTS | PciCommandRegister::ENABLE_IO);
cmd |= PciCommandRegister::ENABLE_MEMORY | PciCommandRegister::BUS_MASTER;
info.config_space.set_command(cmd.bits());
info.init_interrupts(PreferredInterruptMode::Msi(true))?;
// // TODO support regular PCI interrupts (ACPI dependency)
// let Some(mut msi) = info.config_space.capability::<MsiCapability>() else {
// log::warn!("Ignoring AHCI: does not support MSI (and the OS doesn't yet support PCI IRQ)");
// return Err(Error::InvalidOperation);
// };
// Map the registers
let regs = unsafe { DeviceMemoryIo::<Regs>::map(bar5, Default::default()) }?;
let version = Version::try_from(regs.VS.get())?;
let ahci_only = regs.CAP.matches_all(CAP::SAM::SET);
let max_port_count = regs.CAP.read(CAP::NP) as usize;
let has_64_bit = regs.CAP.matches_all(CAP::S64A::SET);
// TODO extract Number of Command Slots
let ahci = Arc::new(AhciController {
regs: IrqSafeSpinlock::new(regs),
dma: dma.clone(),
ports: OneTimeInit::new(),
received_fis_buffers: OneTimeInit::new(),
version,
max_port_count,
ahci_only,
has_64_bit,
});
// TODO use multiple vectors if capable
info.map_interrupt(InterruptAffinity::Any, ahci.clone())?;
Ok(ahci)
}
}
info.init_interrupts(PreferredInterruptMode::Msi)?;
// // TODO support regular PCI interrupts (ACPI dependency)
// let Some(mut msi) = info.config_space.capability::<MsiCapability>() else {
// log::warn!("Ignoring AHCI: does not support MSI (and the OS doesn't yet support PCI IRQ)");
// return Err(Error::InvalidOperation);
// };
// Map the registers
let regs = unsafe { DeviceMemoryIo::<Regs>::map(bar5, Default::default()) }?;
let version = Version::try_from(regs.VS.get())?;
let ahci_only = regs.CAP.matches_all(CAP::SAM::SET);
let max_port_count = regs.CAP.read(CAP::NP) as usize;
let has_64_bit = regs.CAP.matches_all(CAP::S64A::SET);
// TODO extract Number of Command Slots
let ahci = Box::leak(Box::new(AhciController {
regs: IrqSafeSpinlock::new(regs),
ports: OneTimeInit::new(),
received_fis_buffers: OneTimeInit::new(),
version,
max_port_count,
ahci_only,
has_64_bit,
}));
// TODO use multiple vectors if capable
info.map_interrupt(InterruptAffinity::Any, ahci)?;
Ok(ahci)
}

View File

@ -5,20 +5,12 @@ use core::{
task::{Context, Poll},
};
use alloc::{boxed::Box, string::String, sync::Arc};
use alloc::{boxed::Box, string::String};
use async_trait::async_trait;
use bytemuck::Zeroable;
use device_api::{device::Device, dma::DmaAllocator};
use futures_util::task::AtomicWaker;
use libk::{
device::block::BlockDevice,
dma::{DmaBuffer, DmaSlice, DmaSliceMut},
error::Error,
};
use libk_mm::{
address::PhysicalAddress, device::DeviceMemoryIo, table::MapAttributes, OnDemandPage,
PageProvider, VirtualPage,
};
use libk::vfs::block::NgBlockDevice;
use libk_mm::{address::AsPhysicalAddress, device::DeviceMemoryIo, PageBox, PageSlice};
use libk_util::{sync::IrqSafeSpinlock, waker::QueueWaker, OneTimeInit};
use tock_registers::interfaces::{Readable, Writeable};
@ -27,7 +19,7 @@ use crate::{
data::{CommandListEntry, CommandTable, ReceivedFis, COMMAND_LIST_LENGTH},
error::AhciError,
regs::{PortRegs, CMD_PENDING, CMD_READY, IE, TFD},
AhciController, MAX_COMMANDS, MAX_PRD_SIZE, SECTOR_SIZE,
AhciController, MAX_COMMANDS, SECTOR_SIZE,
};
#[derive(Clone, Copy, PartialEq, Debug)]
@ -39,8 +31,8 @@ struct PortInner {
regs: DeviceMemoryIo<'static, PortRegs>,
#[allow(unused)]
received_fis: DmaBuffer<ReceivedFis>,
command_list: DmaBuffer<[CommandListEntry]>,
received_fis: PageBox<ReceivedFis>,
command_list: PageBox<[CommandListEntry]>,
}
pub struct PortInfo {
@ -52,7 +44,7 @@ pub struct PortInfo {
#[allow(unused)]
pub struct AhciPort {
inner: IrqSafeSpinlock<PortInner>,
ahci: Arc<AhciController>,
ahci: &'static AhciController,
ty: PortType,
pub(crate) index: usize,
info: OneTimeInit<PortInfo>,
@ -68,7 +60,7 @@ struct SubmittedCommand<'a> {
index: usize,
}
impl SubmittedCommand<'_> {
impl<'a> SubmittedCommand<'a> {
pub async fn wait_for_completion(self) -> Result<(), AhciError> {
let result = poll_fn(|cx| self.port.poll_slot(cx, self.index)).await;
@ -80,7 +72,7 @@ impl SubmittedCommand<'_> {
}
}
impl Drop for SubmittedCommand<'_> {
impl<'a> Drop for SubmittedCommand<'a> {
fn drop(&mut self) {
panic!(
"Cannot drop command in flight: port{}, slot{}",
@ -92,16 +84,18 @@ impl Drop for SubmittedCommand<'_> {
impl PortInner {
fn submit_command<C: AtaCommand>(
&mut self,
dma: &dyn DmaAllocator,
index: usize,
command: &C,
) -> Result<(), AhciError> {
let list_entry = &mut self.command_list[index];
let mut table_entry =
DmaBuffer::new(dma, CommandTable::zeroed()).map_err(AhciError::MemoryError)?;
PageBox::new(CommandTable::zeroed()).map_err(AhciError::MemoryError)?;
table_entry.setup_command(command)?;
*list_entry = CommandListEntry::new(table_entry.bus_address(), command.prd_count())?;
*list_entry = CommandListEntry::new(
unsafe { table_entry.as_physical_address() },
command.regions().len(),
)?;
// Sync before send
// XXX do this properly
@ -126,25 +120,22 @@ impl PortInner {
impl AhciPort {
pub fn create(
regs: DeviceMemoryIo<'static, PortRegs>,
ahci: Arc<AhciController>,
ahci: &'static AhciController,
index: usize,
) -> Result<Arc<Self>, AhciError> {
) -> Result<&'static Self, AhciError> {
log::debug!("Initialize port {}", index);
regs.stop()?;
if !ahci.has_64_bit {
log::error!("Handle controllers incapable of 64 bit");
return Err(AhciError::DeviceError);
todo!("Handle controllers incapable of 64 bit");
}
let received_fis =
DmaBuffer::new(&*ahci.dma, ReceivedFis::zeroed()).map_err(AhciError::MemoryError)?;
let command_list =
DmaBuffer::new_slice(&*ahci.dma, CommandListEntry::zeroed(), COMMAND_LIST_LENGTH)
.map_err(AhciError::MemoryError)?;
let received_fis = PageBox::new(ReceivedFis::zeroed()).map_err(AhciError::MemoryError)?;
let command_list = PageBox::new_slice(CommandListEntry::zeroed(), COMMAND_LIST_LENGTH)
.map_err(AhciError::MemoryError)?;
regs.set_received_fis_address_64(received_fis.bus_address());
regs.set_command_list_address_64(command_list.bus_address());
regs.set_received_fis_address_64(unsafe { received_fis.as_physical_address() });
regs.set_command_list_address_64(unsafe { command_list.as_physical_address() });
regs.IE.write(
IE::DPE::SET
@ -168,7 +159,7 @@ impl AhciPort {
let command_available = QueueWaker::new();
let command_allocation = IrqSafeSpinlock::new(0);
let port = Arc::new(Self {
Ok(Box::leak(Box::new(Self {
inner: IrqSafeSpinlock::new(inner),
ty: PortType::Sata,
info: OneTimeInit::new(),
@ -178,16 +169,11 @@ impl AhciPort {
command_completion,
command_allocation,
command_available,
});
Ok(port)
})))
}
pub async fn init_inner(&self) -> Result<(), AhciError> {
let identify = self
.perform_command(AtaIdentify::create(&*self.ahci.dma)?)
.await?;
pub async fn init(&'static self) -> Result<(), AhciError> {
let identify = self.perform_command(AtaIdentify::create()?).await?;
let model = identify.model_number.to_string();
let serial = identify.serial_number.to_string();
let lba_count = identify.logical_sector_count();
@ -236,16 +222,8 @@ impl AhciPort {
}
async fn submit<C: AtaCommand>(&self, command: &C) -> Result<SubmittedCommand, AhciError> {
if command.prd_count() > 2 {
log::warn!("TODO: AHCI doesn't like 3+ PRD transfers");
return Err(AhciError::RegionTooLarge);
}
let index = self.allocate_command().await;
if let Err(error) = self
.inner
.lock()
.submit_command(&*self.ahci.dma, index, command)
{
if let Err(error) = self.inner.lock().submit_command(index, command) {
self.free_command(index);
return Err(error);
}
@ -299,7 +277,7 @@ impl AhciPort {
if ci & (1 << i) == 0
&& self.command_completion[i].1.swap(status, Ordering::Release) == CMD_PENDING
{
log::trace!(target: "io", "port{}: completion on slot {}", self.index, i);
log::info!("port{}: completion on slot {}", self.index, i);
self.command_completion[i].0.wake();
}
}
@ -309,86 +287,33 @@ impl AhciPort {
}
#[async_trait]
impl BlockDevice for AhciPort {
fn allocate_buffer(&self, size: usize) -> Result<DmaBuffer<[MaybeUninit<u8>]>, Error> {
DmaBuffer::new_uninit_slice(&*self.ahci.dma, size)
}
impl NgBlockDevice for AhciPort {
type Error = AhciError;
async fn read_aligned(
async fn read(
&self,
position: u64,
buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<(), Error> {
if buffer.len() % SECTOR_SIZE != 0 {
log::warn!("ahci: misaligned buffer size: {}", buffer.len());
return Err(Error::InvalidOperation);
}
if position % SECTOR_SIZE as u64 != 0 {
log::warn!("ahci: misaligned read");
return Err(Error::InvalidOperation);
}
let lba = position / SECTOR_SIZE as u64;
let lba_count = buffer.len() / SECTOR_SIZE;
if lba + lba_count as u64 >= self.block_count() {
log::warn!("ahci: read crosses medium end");
return Err(Error::InvalidOperation);
}
let command = AtaReadDmaEx::new(lba, lba_count, buffer);
self.submit(&command).await?.wait_for_completion().await?;
Ok(())
lba: u64,
buffer: &mut PageSlice<MaybeUninit<u8>>,
) -> Result<(), AhciError> {
let command = AtaReadDmaEx::new(lba, buffer.len() / SECTOR_SIZE, buffer);
self.submit(&command).await?.wait_for_completion().await
}
async fn write_aligned(&self, _position: u64, _buffer: DmaSlice<'_, u8>) -> Result<(), Error> {
// TODO AtaWriteDmaEx
Err(Error::NotImplemented)
async fn write(&self, _lba: u64, _buffer: &PageSlice<u8>) -> Result<(), AhciError> {
// TODO AtaDmaWriteEx
Err(AhciError::FeatureNotImplemented)
}
fn block_size(&self) -> usize {
SECTOR_SIZE
}
fn block_count(&self) -> u64 {
fn block_count(&self) -> usize {
self.info().as_ref().map(|i| i.lba_count).unwrap() as _
}
fn max_blocks_per_request(&self) -> usize {
(MAX_PRD_SIZE * 2) / SECTOR_SIZE
}
}
impl Device for AhciPort {
fn display_name(&self) -> &str {
"AHCI SATA Drive"
}
}
impl PageProvider for AhciPort {
fn ondemand_fetch(&self, _opaque: u64) -> Result<OnDemandPage, Error> {
unimplemented!()
}
fn get_page(&self, _offset: u64) -> Result<VirtualPage, Error> {
unimplemented!()
}
fn release_page(
&self,
_offset: u64,
_phys: PhysicalAddress,
_dirty: bool,
) -> Result<(), Error> {
unimplemented!()
}
fn clone_page(
&self,
_offset: u64,
_src_phys: PhysicalAddress,
_src_attrs: MapAttributes,
) -> Result<PhysicalAddress, Error> {
unimplemented!()
// TODO
1
}
}

View File

@ -1,4 +1,4 @@
use libk::dma::BusAddress;
use libk_mm::address::PhysicalAddress;
use tock_registers::{
interfaces::{ReadWriteable, Readable, Writeable},
register_bitfields, register_structs,
@ -141,14 +141,14 @@ impl PortRegs {
Ok(())
}
pub fn set_received_fis_address_64(&self, address: BusAddress) {
let address: u64 = address.into_u64();
pub fn set_received_fis_address_64(&self, address: PhysicalAddress) {
let address: u64 = address.into();
self.FB.set(address as u32);
self.FBU.set((address >> 32) as u32);
}
pub fn set_command_list_address_64(&self, address: BusAddress) {
let address: u64 = address.into_u64();
pub fn set_command_list_address_64(&self, address: PhysicalAddress) {
let address: u64 = address.into();
self.CLB.set(address as u32);
self.CLBU.set((address >> 32) as u32);
}

View File

@ -10,7 +10,6 @@ libk-util.workspace = true
libk-mm.workspace = true
libk.workspace = true
device-api = { workspace = true, features = ["derive"] }
kernel-arch.workspace = true
ygg_driver_pci = { path = "../../bus/pci" }
kernel-fs = { path = "../../fs/kernel-fs" }

View File

@ -2,7 +2,7 @@
use core::fmt::{self, Write};
use libk::dma::BusAddress;
use libk_mm::address::PhysicalAddress;
use tock_registers::{interfaces::Readable, register_structs, registers::ReadOnly, UIntLike};
use crate::queue::PhysicalRegionPage;
@ -74,7 +74,7 @@ pub struct CreateIoCompletionQueue {
pub id: u32,
pub size: usize,
pub vector: u32,
pub data: BusAddress,
pub data: PhysicalAddress,
}
#[derive(Clone, Copy, Debug)]
@ -82,7 +82,7 @@ pub struct CreateIoSubmissionQueue {
pub id: u32,
pub cq_id: u32,
pub size: usize,
pub data: BusAddress,
pub data: PhysicalAddress,
}
// Replies
@ -247,12 +247,11 @@ impl<const N: usize> fmt::Debug for String<N> {
impl Command for IoRead {
fn fill_sqe(&self, sqe: &mut SubmissionQueueEntry) {
assert!(self.count < 65536);
assert_ne!(self.count, 0);
sqe.command.set_opcode(0x02);
sqe.command_specific[0] = self.lba as u32;
sqe.command_specific[1] = (self.lba >> 32) as u32;
sqe.command_specific[2] = self.count - 1;
sqe.command_specific[2] = self.count;
sqe.nsid = self.nsid;
}
}
@ -260,12 +259,11 @@ impl Command for IoRead {
impl Command for IoWrite {
fn fill_sqe(&self, sqe: &mut SubmissionQueueEntry) {
assert!(self.count < 65536);
assert_ne!(self.count, 0);
sqe.command.set_opcode(0x01);
sqe.command_specific[0] = self.lba as u32;
sqe.command_specific[1] = (self.lba >> 32) as u32;
sqe.command_specific[2] = self.count - 1;
sqe.command_specific[2] = self.count;
sqe.nsid = self.nsid;
}
}

View File

@ -1,179 +1,108 @@
use core::mem::MaybeUninit;
use alloc::{boxed::Box, sync::Arc};
use alloc::{boxed::Box, format};
use async_trait::async_trait;
use device_api::device::Device;
use libk::{
device::block::BlockDevice,
dma::{DmaBuffer, DmaSlice, DmaSliceMut},
error::Error,
};
use libk_mm::{
address::{AsPhysicalAddress, PhysicalAddress},
table::MapAttributes,
OnDemandPage, PageProvider, PageSlice, VirtualPage,
};
use kernel_fs::devfs;
use libk::vfs::block::{probe_partitions, NgBlockDevice, NgBlockDeviceWrapper};
use libk_mm::{address::AsPhysicalAddress, PageSlice};
use crate::{command::IdentifyNamespaceRequest, register_nvme_namespace, IoDirection};
use crate::{command::IdentifyNamespaceRequest, IoDirection};
use super::{error::NvmeError, NvmeController};
#[allow(unused)]
pub struct NvmeNamespace {
controller: Arc<NvmeController>,
pub struct NvmeDrive {
controller: &'static NvmeController,
nsid: u32,
total_lba_count: u64,
lba_size: u64,
max_lba_per_request: usize,
}
impl NvmeNamespace {
impl NvmeDrive {
pub async fn create(
controller: Arc<NvmeController>,
controller: &'static NvmeController,
nsid: u32,
max_transfer_size: usize,
) -> Result<Arc<NvmeNamespace>, NvmeError> {
) -> Result<&'static NvmeDrive, NvmeError> {
let admin_q = controller.admin_q.get();
let identify = admin_q
.request(&*controller.dma, IdentifyNamespaceRequest { nsid })
.await?;
let identify = admin_q.request(IdentifyNamespaceRequest { nsid }).await?;
let current_lba_format_idx = identify.current_lba_fmt_idx();
let current_lba_format = identify.lba_fmt(current_lba_format_idx).unwrap();
let lba_size = current_lba_format.lba_data_size().unwrap();
let total_lba_count = identify.total_lba_count();
let max_lba_per_request = (max_transfer_size / lba_size as usize).min(512);
log::debug!(
"ns = {}, lba = {}B, size = {}M, max lba/req = {}",
"ns = {}, lba = {}B, size = {}M",
nsid,
lba_size,
(total_lba_count * lba_size) / (1024 * 1024),
max_lba_per_request,
(total_lba_count * lba_size) / (1024 * 1024)
);
let dev = NvmeNamespace {
let dev = Box::leak(Box::new(NvmeDrive {
controller,
nsid,
total_lba_count,
lba_size,
max_lba_per_request,
};
let dev = Arc::new(dev);
}));
register_nvme_namespace(dev.clone(), true);
let node_name = format!("nvme{}n{}", controller.controller_id.get(), nsid);
let blk = NgBlockDeviceWrapper::new(dev);
devfs::add_named_block_device(blk, node_name.clone()).ok();
probe_partitions(blk, move |index, partition| {
devfs::add_block_device_partition(format!("{}p", node_name), index, partition)
})
.ok();
Ok(dev)
}
pub fn controller_id(&self) -> u32 {
*self.controller.controller_id.get()
}
pub fn id(&self) -> u32 {
self.nsid
}
}
impl Device for NvmeNamespace {
fn display_name(&self) -> &str {
"NVMe Namespace"
}
}
#[async_trait]
impl BlockDevice for NvmeNamespace {
fn allocate_buffer(&self, size: usize) -> Result<DmaBuffer<[MaybeUninit<u8>]>, Error> {
DmaBuffer::new_uninit_slice(&*self.controller.dma, size)
}
impl NgBlockDevice for NvmeDrive {
type Error = NvmeError;
// TODO read directly to cache
async fn read_aligned(
async fn read(
&self,
position: u64,
buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<(), Error> {
if position % self.block_size() as u64 != 0 {
return Err(Error::InvalidOperation);
}
if buffer.len() % self.block_size() != 0 || buffer.is_empty() {
return Err(Error::InvalidOperation);
}
let lba = position / self.block_size() as u64;
let lba_count = buffer.len().div_ceil(self.block_size());
if lba + lba_count as u64 > self.block_count() {
return Err(Error::InvalidOperation);
}
lba: u64,
buffer: &mut PageSlice<MaybeUninit<u8>>,
) -> Result<(), NvmeError> {
debug_assert_eq!(buffer.len() % self.block_size(), 0);
let lba_count = buffer.len() / self.block_size();
let result = self
.controller
.perform_read(self.nsid, lba, lba_count, buffer)
.await;
log::trace!(target: "io", "read #{lba}, {lba_count} blocks -> {result:?}");
result.map_err(NvmeError::into)
self.controller
.perform_io(
self.nsid,
lba,
lba_count,
unsafe { buffer.as_physical_address() },
IoDirection::Read,
)
.await
}
async fn write(&self, lba: u64, buffer: &PageSlice<u8>) -> Result<(), NvmeError> {
debug_assert_eq!(buffer.len() % self.block_size(), 0);
let lba_count = buffer.len() / self.block_size();
async fn write_aligned(&self, position: u64, buffer: DmaSlice<'_, u8>) -> Result<(), Error> {
if position % self.block_size() as u64 != 0 {
return Err(Error::InvalidOperation);
}
if buffer.len() % self.block_size() != 0 || buffer.is_empty() {
return Err(Error::InvalidOperation);
}
let lba = position / self.block_size() as u64;
let lba_count = buffer.len().div_ceil(self.block_size());
if lba + lba_count as u64 > self.block_count() {
return Err(Error::InvalidOperation);
}
let result = self
.controller
.perform_write(self.nsid, lba, lba_count, buffer)
.await;
log::trace!(target: "io", "write -> #{lba}, {lba_count} blocks -> {result:?}");
result.map_err(NvmeError::into)
self.controller
.perform_io(
self.nsid,
lba,
lba_count,
unsafe { buffer.as_physical_address() },
IoDirection::Write,
)
.await
}
fn block_size(&self) -> usize {
self.lba_size as _
}
fn block_count(&self) -> u64 {
self.total_lba_count
fn block_count(&self) -> usize {
self.total_lba_count as _
}
fn max_blocks_per_request(&self) -> usize {
self.max_lba_per_request
}
}
impl PageProvider for NvmeNamespace {
fn ondemand_fetch(&self, _opaque: u64) -> Result<OnDemandPage, Error> {
unimplemented!()
}
fn get_page(&self, _offset: u64) -> Result<VirtualPage, Error> {
unimplemented!()
}
fn release_page(
&self,
_offset: u64,
_phys: PhysicalAddress,
_dirty: bool,
) -> Result<(), Error> {
unimplemented!()
}
fn clone_page(
&self,
_offset: u64,
_src_phys: PhysicalAddress,
_src_attrs: MapAttributes,
) -> Result<PhysicalAddress, Error> {
unimplemented!()
// TODO get from device
8
}
}

View File

@ -1,13 +1,9 @@
use libk_mm::address::PhysicalAddress;
use yggdrasil_abi::error::Error;
use super::queue::CommandError;
#[derive(Debug)]
pub enum NvmeError {
InitError(Error),
InvalidBuffer(PhysicalAddress, usize),
RequestTooLarge(usize),
MemoryError(Error),
CommandError(CommandError),
}
@ -21,9 +17,6 @@ impl From<CommandError> for NvmeError {
impl From<NvmeError> for Error {
fn from(value: NvmeError) -> Self {
match value {
NvmeError::InitError(error) => error,
NvmeError::RequestTooLarge(_) => Error::InvalidArgument,
NvmeError::InvalidBuffer(_, _) => Error::InvalidArgument,
NvmeError::MemoryError(err) => err,
// TODO Error::DeviceError
NvmeError::CommandError(_err) => Error::InvalidArgument,

View File

@ -1,39 +1,28 @@
#![feature(const_trait_impl, let_chains, if_let_guard, maybe_uninit_slice)]
#![feature(const_trait_impl, let_chains, if_let_guard)]
#![allow(missing_docs)]
#![no_std]
// TODO
#![allow(unused)]
extern crate alloc;
use core::{
mem::{size_of, MaybeUninit},
mem::size_of,
sync::atomic::{AtomicUsize, Ordering},
time::Duration,
};
use alloc::{collections::BTreeMap, format, sync::Arc, vec::Vec};
use alloc::{boxed::Box, collections::BTreeMap, vec::Vec};
use command::{IdentifyActiveNamespaceIdListRequest, IdentifyControllerRequest};
use device_api::{
device::{Device, DeviceInitContext},
dma::DmaAllocator,
interrupt::{InterruptAffinity, InterruptHandler, IrqVector},
interrupt::{InterruptAffinity, InterruptHandler},
Device,
};
use drive::NvmeNamespace;
use kernel_arch::{Architecture, ArchitectureImpl};
use libk::{
device::manager::probe_partitions,
dma::{BusAddress, DmaSlice, DmaSliceMut},
fs::devfs,
task::{cpu_count, cpu_index, runtime},
};
use libk_mm::{address::PhysicalAddress, device::DeviceMemoryIo, L3_PAGE_SIZE};
use drive::NvmeDrive;
use libk::task::{cpu_count, cpu_index, runtime};
use libk_mm::{address::PhysicalAddress, device::DeviceMemoryIo};
use libk_util::{
sync::{IrqGuard, IrqSafeSpinlock},
OneTimeInit,
};
use queue::PrpList;
use regs::{CAP, CC};
use tock_registers::{
interfaces::{ReadWriteable, Readable, Writeable},
register_bitfields, register_structs,
@ -41,10 +30,9 @@ use tock_registers::{
};
use ygg_driver_pci::{
device::{PciDeviceInfo, PreferredInterruptMode},
macros::pci_driver,
PciCommandRegister, PciConfigurationSpace,
};
use yggdrasil_abi::{error::Error, io::FileMode};
use yggdrasil_abi::error::Error;
use crate::{
command::{IoRead, IoWrite},
@ -55,32 +43,90 @@ use self::{
command::{CreateIoCompletionQueue, CreateIoSubmissionQueue, SetFeatureRequest},
error::NvmeError,
queue::QueuePair,
regs::Regs,
};
mod command;
mod drive;
mod error;
mod queue;
mod regs;
pub const MAX_PAGES_PER_REQUEST: usize = 256;
// Use host page
pub const PAGE_SIZE: usize = L3_PAGE_SIZE;
register_bitfields! {
u32,
CC [
IOCQES OFFSET(20) NUMBITS(4) [],
IOSQES OFFSET(16) NUMBITS(4) [],
AMS OFFSET(11) NUMBITS(3) [],
MPS OFFSET(7) NUMBITS(4) [],
CSS OFFSET(4) NUMBITS(3) [
NvmCommandSet = 0
],
ENABLE OFFSET(0) NUMBITS(1) [],
],
CSTS [
CFS OFFSET(1) NUMBITS(1) [],
RDY OFFSET(0) NUMBITS(1) [],
],
AQA [
/// Admin Completion Queue Size in entries - 1
ACQS OFFSET(16) NUMBITS(12) [],
/// Admin Submission Queue Size in entries - 1
ASQS OFFSET(0) NUMBITS(12) [],
]
}
register_bitfields! {
u64,
CAP [
/// Maximum Queue Entries Supported - 1. i.e., 0 means maximum queue len of 1, 1 = 2 etc.
MQES OFFSET(0) NUMBITS(16) [],
/// Timeout. Represents the worst-case time the host software should wait for CSTS.RDY to
/// change its state.
TO OFFSET(24) NUMBITS(8) [],
/// Doorbell stride. Stride in bytes = pow(2, 2 + DSTRD).
DSTRD OFFSET(32) NUMBITS(4) [],
/// NVM Subsystem Reset Supported (see NVMe BS Section 3.7.1)
NSSRS OFFSET(36) NUMBITS(1) [],
/// Controller supports one or more I/O command sets
CSS_IO_COMMANDS OFFSET(43) NUMBITS(1) [],
/// Controller only supports admin commands and no I/O commands
CSS_ADMIN_ONLY OFFSET(44) NUMBITS(1) [],
/// Memory page size minimum (bytes = pow(2, 12 + MPSMIN))
MPSMIN OFFSET(48) NUMBITS(4) [],
/// Memory page size maximum -|-
MPSMAX OFFSET(52) NUMBITS(4) [],
]
}
register_structs! {
#[allow(non_snake_case)]
Regs {
(0x00 => CAP: ReadOnly<u64, CAP::Register>),
(0x08 => VS: ReadOnly<u32>),
(0x0C => INTMS: WriteOnly<u32>),
(0x10 => INTMC: WriteOnly<u32>),
(0x14 => CC: ReadWrite<u32, CC::Register>),
(0x18 => _0),
(0x1C => CSTS: ReadOnly<u32, CSTS::Register>),
(0x20 => _1),
(0x24 => AQA: ReadWrite<u32, AQA::Register>),
(0x28 => ASQ: ReadWrite<u64>),
(0x30 => ACQ: ReadWrite<u64>),
(0x38 => _2),
(0x2000 => @END),
}
}
pub struct NvmeController {
regs: IrqSafeSpinlock<DeviceMemoryIo<'static, Regs>>,
admin_q: OneTimeInit<QueuePair>,
ioqs: OneTimeInit<Vec<QueuePair>>,
io_queue_count: AtomicUsize,
drive_table: IrqSafeSpinlock<BTreeMap<u32, Arc<NvmeNamespace>>>,
controller_id: OneTimeInit<u32>,
drive_table: IrqSafeSpinlock<BTreeMap<u32, &'static NvmeDrive>>,
controller_id: OneTimeInit<usize>,
pci: PciDeviceInfo,
dma: Arc<dyn DmaAllocator>,
doorbell_shift: usize,
min_page_size: usize,
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
@ -101,7 +147,7 @@ impl NvmeController {
const ADMIN_QUEUE_SIZE: usize = 32;
const IO_QUEUE_SIZE: usize = 32;
async fn create_queues(&self) -> Result<(), NvmeError> {
async fn create_queues(&'static self) -> Result<(), NvmeError> {
let admin_q = self.admin_q.get();
let io_queue_count = self.io_queue_count.load(Ordering::Acquire);
@ -124,22 +170,15 @@ impl NvmeController {
let id = i as u32;
let (sq_doorbell, cq_doorbell) = unsafe { self.doorbell_pair(i) };
let queue = QueuePair::new(
&*self.dma,
id,
i,
Self::IO_QUEUE_SIZE,
sq_doorbell,
cq_doorbell,
)
.map_err(NvmeError::MemoryError)?;
let queue = QueuePair::new(id, i, Self::IO_QUEUE_SIZE, sq_doorbell, cq_doorbell)
.map_err(NvmeError::MemoryError)?;
admin_q
.request_no_data(CreateIoCompletionQueue {
id,
vector: id,
size: Self::IO_QUEUE_SIZE,
data: queue.cq_bus_pointer(),
data: queue.cq_physical_pointer(),
})
.await?;
@ -148,7 +187,7 @@ impl NvmeController {
id,
cq_id: id,
size: Self::IO_QUEUE_SIZE,
data: queue.sq_bus_pointer(),
data: queue.sq_physical_pointer(),
})
.await?;
@ -160,17 +199,15 @@ impl NvmeController {
Ok(())
}
async fn late_init(self: Arc<Self>) -> Result<(), NvmeError> {
register_nvme_controller(self.clone());
async fn late_init(&'static self) -> Result<(), NvmeError> {
let io_queue_count = cpu_count();
self.io_queue_count.store(io_queue_count, Ordering::Release);
{
let range = self
.pci
.map_interrupt_multiple(0..io_queue_count + 1, InterruptAffinity::Any, self.clone())
.map_err(NvmeError::InitError)?;
.map_interrupt_multiple(0..io_queue_count + 1, InterruptAffinity::Any, self)
.unwrap();
// TODO handle different MSI range allocations
for (i, msi) in range.iter().enumerate() {
@ -178,46 +215,35 @@ impl NvmeController {
}
}
register_nvme_controller(self);
let admin_q = self.admin_q.get();
// Identify the controller
let identify = admin_q
.request(&*self.dma, IdentifyControllerRequest)
.await?;
let _identify = admin_q.request(IdentifyControllerRequest).await?;
let max_transfer_size = if identify.mdts == 0 {
// Pick some sane default value
256 * self.min_page_size
} else {
(1 << identify.mdts) * self.min_page_size
};
// TODO do something with identify_controller
self.create_queues().await?;
// Identify namespaces
self.enumerate_namespaces(max_transfer_size).await?;
self.enumerate_namespaces().await?;
Ok(())
}
async fn enumerate_namespaces(
self: &Arc<Self>,
max_transfer_size: usize,
) -> Result<(), NvmeError> {
async fn enumerate_namespaces(&'static self) -> Result<(), NvmeError> {
let admin_q = self.admin_q.get();
let namespaces = admin_q
.request(
&*self.dma,
IdentifyActiveNamespaceIdListRequest { start_id: 0 },
)
.request(IdentifyActiveNamespaceIdListRequest { start_id: 0 })
.await?;
let count = namespaces.entries.iter().position(|&x| x == 0).unwrap();
let list = &namespaces.entries[..count];
for &nsid in list {
match NvmeNamespace::create(self.clone(), nsid, max_transfer_size).await {
match NvmeDrive::create(self, nsid).await {
Ok(drive) => {
self.drive_table.lock().insert(nsid, drive);
}
@ -230,53 +256,47 @@ impl NvmeController {
Ok(())
}
pub async fn perform_read(
&self,
pub async fn perform_io(
&'static self,
nsid: u32,
lba: u64,
lba_count: usize,
buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
buffer_address: PhysicalAddress,
direction: IoDirection,
) -> Result<(), NvmeError> {
let prp_list = PrpList::from_buffer(&*self.dma, buffer.bus_address(), buffer.len())?;
let _guard = IrqGuard::acquire();
let cpu_index = cpu_index();
let ioq = &self.ioqs.get()[cpu_index as usize];
let cmd_id = ioq.submit(
IoRead {
nsid,
lba,
count: lba_count as _,
},
&prp_list,
true,
)?;
ioq.wait_for_completion(cmd_id, ()).await?;
buffer.cache_flush_all(false);
// log::debug!(
// "{:?} ioq #{}, nsid={}, lba={:#x}",
// direction,
// cpu_index,
// nsid,
// lba
// );
Ok(())
}
let cmd_id = match direction {
IoDirection::Read => ioq.submit(
IoRead {
nsid,
lba,
count: lba_count as _,
},
&[buffer_address],
true,
),
IoDirection::Write => ioq.submit(
IoWrite {
nsid,
lba,
count: lba_count as _,
},
&[buffer_address],
true,
),
};
pub async fn perform_write(
&self,
nsid: u32,
lba: u64,
lba_count: usize,
buffer: DmaSlice<'_, u8>,
) -> Result<(), NvmeError> {
buffer.cache_flush_all(true);
let prp_list = PrpList::from_buffer(&*self.dma, buffer.bus_address(), buffer.len())?;
let cpu_index = cpu_index();
let ioq = &self.ioqs.get()[cpu_index as usize];
let cmd_id = ioq.submit(
IoWrite {
nsid,
lba,
count: lba_count as _,
},
&prp_list,
true,
)?;
ioq.wait_for_completion(cmd_id, ()).await?;
Ok(())
@ -291,10 +311,8 @@ impl NvmeController {
}
impl InterruptHandler for NvmeController {
fn handle_irq(self: Arc<Self>, vector: IrqVector) -> bool {
let IrqVector::Msi(vector) = vector else {
unreachable!("Only MSI-x interrupts are supported for NVMe");
};
fn handle_irq(&self, vector: Option<usize>) -> bool {
let vector = vector.expect("Only MSI-X interrupts are supported");
if vector == 0 {
self.admin_q.get().process_completions() != 0
@ -309,137 +327,128 @@ impl InterruptHandler for NvmeController {
}
impl Device for NvmeController {
unsafe fn init(self: Arc<Self>, _cx: DeviceInitContext) -> Result<(), Error> {
unsafe fn init(&'static self) -> Result<(), Error> {
let regs = self.regs.lock();
let min_page_size = 1usize << (12 + regs.CAP.read(CAP::MPSMIN));
if min_page_size > 4096 {
panic!();
}
let timeout = Duration::from_millis(regs.CAP.read(CAP::TO) * 500);
log::debug!("Worst-case timeout: {:?}", timeout);
while regs.CSTS.matches_all(CSTS::RDY::SET) {
core::hint::spin_loop();
}
if Self::ADMIN_QUEUE_SIZE as u64 > regs.CAP.read(CAP::MQES) + 1 {
todo!(
"queue_slots too big, max = {}",
regs.CAP.read(CAP::MQES) + 1
);
}
// Setup the admin queue (index 0)
let admin_sq_doorbell = unsafe { regs.doorbell_ptr(self.doorbell_shift, false, 0) };
let admin_cq_doorbell = unsafe { regs.doorbell_ptr(self.doorbell_shift, true, 0) };
log::debug!("sq_doorbell for adminq = {:p}", admin_sq_doorbell);
let admin_q = QueuePair::new(
&*self.dma,
0,
0,
Self::ADMIN_QUEUE_SIZE,
admin_sq_doorbell,
admin_cq_doorbell,
)?;
)
.unwrap();
regs.configure_admin_queue(
admin_q.sq_bus_pointer(),
admin_q.cq_bus_pointer(),
Self::ADMIN_QUEUE_SIZE,
Self::ADMIN_QUEUE_SIZE,
)?;
regs.AQA.modify(
AQA::ASQS.val(Self::ADMIN_QUEUE_SIZE as u32 - 1)
+ AQA::ACQS.val(Self::ADMIN_QUEUE_SIZE as u32 - 1),
);
regs.ASQ.set(admin_q.sq_physical_pointer().into());
regs.ACQ.set(admin_q.cq_physical_pointer().into());
// Configure the controller
regs.configure_controller();
regs.enable_controller(10000000)?;
const IOSQES: u32 = size_of::<SubmissionQueueEntry>().ilog2();
const IOCQES: u32 = size_of::<CompletionQueueEntry>().ilog2();
regs.CC.modify(
CC::IOCQES.val(IOCQES)
+ CC::IOSQES.val(IOSQES)
+ CC::MPS.val(0)
+ CC::CSS::NvmCommandSet,
);
// Enable the controller
regs.CC.modify(CC::ENABLE::SET);
log::debug!("Reset the controller");
while !regs.CSTS.matches_any(&[CSTS::RDY::SET, CSTS::CFS::SET]) {
core::hint::spin_loop();
}
if regs.CSTS.matches_all(CSTS::CFS::SET) {
todo!("CFS set after reset!");
}
self.admin_q.init(admin_q);
// Schedule late_init task
runtime::spawn(self.clone().late_init())?;
runtime::spawn(self.late_init())?;
Ok(())
}
fn display_name(&self) -> &str {
fn display_name(&self) -> &'static str {
"NVM Express Controller"
}
}
// TODO
unsafe impl Sync for NvmeController {}
pub fn register_nvme_controller(controller: Arc<NvmeController>) {
let mut list = NVME_CONTROLLERS.lock();
let id = list.len();
list.push(controller.clone());
controller.controller_id.init(id as u32);
}
pub fn register_nvme_namespace(namespace: Arc<NvmeNamespace>, probe: bool) {
let name = format!("nvme{}n{}", namespace.controller_id(), namespace.id());
log::info!("Register NVMe namespace: {name}");
devfs::add_named_block_device(namespace.clone(), name.clone(), FileMode::new(0o600)).ok();
if probe {
runtime::spawn(async move {
let name = name;
log::info!("Probing partitions for {name}");
probe_partitions(namespace, |index, partition| {
let partition_name = format!("{name}p{}", index + 1);
devfs::add_named_block_device(
Arc::new(partition),
partition_name,
FileMode::new(0o600),
)
.ok();
})
.await
.inspect_err(|error| log::error!("{name}: partition probe failed: {error:?}"))
})
.ok();
}
}
static NVME_CONTROLLERS: IrqSafeSpinlock<Vec<Arc<NvmeController>>> =
static NVME_CONTROLLERS: IrqSafeSpinlock<Vec<&'static NvmeController>> =
IrqSafeSpinlock::new(Vec::new());
pci_driver! {
matches: [class (0x01:0x08:0x02)],
driver: {
fn driver_name(&self) -> &str {
"nvme"
}
pub fn probe(info: &PciDeviceInfo) -> Result<&'static dyn Device, Error> {
let bar0 = info
.config_space
.bar(0)
.unwrap()
.as_memory()
.expect("Expected a memory BAR0");
fn probe(&self, info: &PciDeviceInfo, dma: &Arc<dyn DmaAllocator>) -> Result<Arc<dyn Device>, Error> {
let bar0 = info
.config_space
.bar(0)
.unwrap()
.as_memory()
.expect("Expected a memory BAR0");
info.init_interrupts(PreferredInterruptMode::Msi)?;
info.init_interrupts(PreferredInterruptMode::Msi(true))?;
let mut cmd = PciCommandRegister::from_bits_retain(info.config_space.command());
cmd &= !(PciCommandRegister::DISABLE_INTERRUPTS | PciCommandRegister::ENABLE_IO);
cmd |= PciCommandRegister::ENABLE_MEMORY | PciCommandRegister::BUS_MASTER;
info.config_space.set_command(cmd.bits());
let mut cmd = PciCommandRegister::from_bits_retain(info.config_space.command());
cmd &= !(PciCommandRegister::DISABLE_INTERRUPTS | PciCommandRegister::ENABLE_IO);
cmd |= PciCommandRegister::ENABLE_MEMORY | PciCommandRegister::BUS_MASTER;
info.config_space.set_command(cmd.bits());
let regs = unsafe { DeviceMemoryIo::<Regs>::map(bar0, Default::default()) }?;
let regs = unsafe { DeviceMemoryIo::<Regs>::map(bar0, Default::default()) }?;
// Disable the controller
regs.CC.modify(CC::ENABLE::CLEAR);
// Disable the controller
regs.disable_controller(10000000)?;
let doorbell_shift = regs.CAP.read(CAP::DSTRD) as usize + 1;
let doorbell_shift = regs.CAP.read(CAP::DSTRD) as usize + 1;
let min_page_size = 1 << (regs.CAP.read(CAP::MPSMIN) + 12);
Ok(Box::leak(Box::new(NvmeController {
regs: IrqSafeSpinlock::new(regs),
admin_q: OneTimeInit::new(),
ioqs: OneTimeInit::new(),
drive_table: IrqSafeSpinlock::new(BTreeMap::new()),
controller_id: OneTimeInit::new(),
if min_page_size > PAGE_SIZE {
log::error!("Cannot support NVMe HC: min page size ({min_page_size}) > host page size ({PAGE_SIZE})");
return Err(Error::InvalidArgument);
}
pci: info.clone(),
let device = NvmeController {
regs: IrqSafeSpinlock::new(regs),
admin_q: OneTimeInit::new(),
ioqs: OneTimeInit::new(),
drive_table: IrqSafeSpinlock::new(BTreeMap::new()),
controller_id: OneTimeInit::new(),
pci: info.clone(),
dma: dma.clone(),
io_queue_count: AtomicUsize::new(1),
doorbell_shift,
min_page_size,
};
Ok(Arc::new(device))
}
}
io_queue_count: AtomicUsize::new(1),
doorbell_shift,
})))
}
pub fn register_nvme_controller(ctrl: &'static NvmeController) {
let mut list = NVME_CONTROLLERS.lock();
let id = list.len();
list.push(ctrl);
ctrl.controller_id.init(id);
}

View File

@ -1,11 +1,20 @@
use core::{future::poll_fn, mem::size_of, ptr::null_mut, task::Poll};
use core::{
mem::size_of,
pin::Pin,
ptr::null_mut,
task::{Context, Poll},
};
use alloc::collections::{BTreeMap, BTreeSet};
use alloc::{
collections::{BTreeMap, BTreeSet},
vec::Vec,
};
use bytemuck::{Pod, Zeroable};
use device_api::dma::DmaAllocator;
use kernel_arch::{Architecture, ArchitectureImpl};
use libk::dma::{BusAddress, DmaBuffer};
use libk_mm::address::AsPhysicalAddress;
use futures_util::Future;
use libk_mm::{
address::{AsPhysicalAddress, PhysicalAddress},
PageBox,
};
use libk_util::{sync::IrqSafeSpinlock, waker::QueueWaker};
use static_assertions::const_assert;
use yggdrasil_abi::error::Error;
@ -58,7 +67,7 @@ pub struct CompletionQueueEntry {
}
pub struct Queue<T> {
data: DmaBuffer<[T]>,
data: PageBox<[T]>,
mask: usize,
head: usize,
tail: usize,
@ -82,68 +91,14 @@ pub struct QueuePair {
#[allow(unused)]
vector: usize,
sq_base: BusAddress,
cq_base: BusAddress,
sq_base: PhysicalAddress,
cq_base: PhysicalAddress,
pub completion_notify: QueueWaker,
inner: IrqSafeSpinlock<Inner>,
}
pub struct PrpList {
prp1: PhysicalRegionPage,
prp2: PhysicalRegionPage,
#[allow(unused)]
list: Option<DmaBuffer<[BusAddress]>>,
}
impl PrpList {
pub const fn empty() -> Self {
Self {
prp1: PhysicalRegionPage::null(),
prp2: PhysicalRegionPage::null(),
list: None,
}
}
pub fn from_buffer(
dma: &dyn DmaAllocator,
base: BusAddress,
size: usize,
) -> Result<Self, NvmeError> {
// TODO hardcoded page size
if base.into_u64() % 0x1000 != 0 {
todo!();
}
match size {
0 => Ok(Self::empty()),
_ if size <= 0x1000 => Ok(Self {
prp1: PhysicalRegionPage::with_addr(base),
prp2: PhysicalRegionPage::null(),
list: None,
}),
_ if size <= 0x2000 => Ok(Self {
prp1: PhysicalRegionPage::with_addr(base),
prp2: PhysicalRegionPage::with_addr(base.add(0x1000)),
list: None,
}),
_ => {
let count = (size + 0xFFF) / 0x1000;
let list =
DmaBuffer::new_slice_with(dma, |i| base.add((i + 1) * 0x1000), count - 1)
.map_err(NvmeError::MemoryError)?;
Ok(Self {
prp1: PhysicalRegionPage::with_addr(base),
prp2: PhysicalRegionPage::with_addr(list.bus_address()),
list: Some(list),
})
}
}
}
}
const_assert!(size_of::<CompletionQueueEntry>().is_power_of_two());
impl PhysicalRegionPage {
@ -151,7 +106,7 @@ impl PhysicalRegionPage {
Self(0)
}
pub const fn with_addr(address: BusAddress) -> Self {
pub const fn with_addr(address: PhysicalAddress) -> Self {
Self(address.into_u64())
}
}
@ -202,7 +157,7 @@ impl CompletionQueueEntry {
impl<T> Queue<T> {
pub fn new(
data: DmaBuffer<[T]>,
data: PageBox<[T]>,
head_doorbell: *mut u32,
tail_doorbell: *mut u32,
phase: bool,
@ -255,12 +210,10 @@ impl<T> Queue<T> {
self.tail = new_tail;
if !self.tail_doorbell.is_null() {
self.data.cache_flush_element(self.tail, true);
unsafe {
self.tail_doorbell
.write_volatile(self.tail.try_into().unwrap());
}
ArchitectureImpl::memory_barrier();
}
wrapped
@ -284,18 +237,17 @@ impl<T> Queue<T> {
impl QueuePair {
pub fn new(
dma: &dyn DmaAllocator,
id: u32,
vector: usize,
capacity: usize,
sq_doorbell: *mut u32,
cq_doorbell: *mut u32,
) -> Result<Self, Error> {
let sq_data = DmaBuffer::new_slice(dma, SubmissionQueueEntry::zeroed(), capacity)?;
let cq_data = DmaBuffer::new_slice(dma, CompletionQueueEntry::zeroed(), capacity)?;
let sq_data = PageBox::new_slice(SubmissionQueueEntry::zeroed(), capacity)?;
let cq_data = PageBox::new_slice(CompletionQueueEntry::zeroed(), capacity)?;
let sq_base = sq_data.bus_address();
let cq_base = cq_data.bus_address();
let sq_base = unsafe { sq_data.as_physical_address() };
let cq_base = unsafe { cq_data.as_physical_address() };
log::debug!("Allocated queue pair: sq={:p}, cq={:p}", sq_data, cq_data);
@ -321,53 +273,81 @@ impl QueuePair {
}
#[inline]
pub fn sq_bus_pointer(&self) -> BusAddress {
pub fn sq_physical_pointer(&self) -> PhysicalAddress {
self.sq_base
}
#[inline]
pub fn cq_bus_pointer(&self) -> BusAddress {
pub fn cq_physical_pointer(&self) -> PhysicalAddress {
self.cq_base
}
pub async fn wait_for_completion<T: Unpin>(
&self,
// pub fn poll_completion(&self, command_id: u32) -> Poll<Result<(), Error>> {
// let mut inner = self.inner.lock();
// match inner.completed.remove(&command_id) {
// Some(result) if let Some(_error) = result.error() => todo!(),
// Some(_) => Poll::Ready(Ok(())),
// None => Poll::Pending,
// }
// }
pub fn wait_for_completion<'r, T: Unpin + 'r>(
&'r self,
command_id: u32,
result: T,
) -> Result<T, CommandError> {
let mut response = Some(result);
poll_fn(|cx| {
let mut inner = self.inner.lock();
) -> impl Future<Output = Result<T, CommandError>> + 'r {
struct Fut<'r, R: Unpin + 'r> {
this: &'r QueuePair,
response: Option<R>,
command_id: u32,
}
if let Some(entry) = inner.completed.remove(&command_id) {
self.completion_notify.remove(cx.waker());
impl<'r, R: Unpin + 'r> Future for Fut<'r, R> {
type Output = Result<R, CommandError>;
let result = if let Some(error) = entry.error() {
Err(error)
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.this.completion_notify.register(cx.waker());
let mut inner = self.this.inner.lock();
if let Some(entry) = inner.completed.remove(&self.command_id) {
self.this.completion_notify.remove(cx.waker());
let result = if let Some(error) = entry.error() {
Err(error)
} else {
Ok(self.response.take().unwrap())
};
Poll::Ready(result)
} else {
Ok(response.take().unwrap())
};
Poll::Ready(result)
} else {
self.completion_notify.register(cx.waker());
Poll::Pending
Poll::Pending
}
}
})
.await
}
Fut {
this: self,
response: Some(result),
command_id,
}
}
pub fn submit<C: Command>(
&self,
cmd: C,
ranges: &PrpList,
set_pending: bool,
) -> Result<u32, NvmeError> {
pub fn submit<C: Command>(&self, cmd: C, ranges: &[PhysicalAddress], set_pending: bool) -> u32 {
let mut inner = self.inner.lock();
let mut sqe = SubmissionQueueEntry::zeroed();
sqe.data_pointer[0] = ranges.prp1;
sqe.data_pointer[1] = ranges.prp2;
match ranges.len() {
1 => {
sqe.data_pointer[0] = PhysicalRegionPage::with_addr(ranges[0]);
sqe.data_pointer[1] = PhysicalRegionPage::null();
}
0 => {
sqe.data_pointer[0] = PhysicalRegionPage::null();
sqe.data_pointer[1] = PhysicalRegionPage::null();
}
_ => todo!(),
}
cmd.fill_sqe(&mut sqe);
@ -380,36 +360,36 @@ impl QueuePair {
inner.sq.enqueue(sqe);
Ok(command_id)
command_id
}
pub async fn request_no_data<C: Command>(&self, req: C) -> Result<(), NvmeError> {
let list = PrpList::empty();
let command_id = self.submit(req, &list, true)?;
pub fn request_no_data<C: Command>(
&self,
req: C,
) -> impl Future<Output = Result<(), CommandError>> + '_ {
let command_id = self.submit(req, &[], true);
self.wait_for_completion(command_id, ())
.await
.map_err(NvmeError::CommandError)
}
pub async fn request<'r, R: Request>(
&'r self,
dma: &dyn DmaAllocator,
req: R,
) -> Result<DmaBuffer<R::Response>, NvmeError>
) -> Result<PageBox<R::Response>, NvmeError>
where
R::Response: 'r,
{
let response = DmaBuffer::new_uninit(dma).map_err(NvmeError::MemoryError)?;
let list = PrpList::from_buffer(dma, response.bus_address(), size_of::<R>())?;
let command_id = self.submit(req, &list, true)?;
let response = PageBox::new_uninit().map_err(NvmeError::MemoryError)?;
let command_id = self.submit(req, &[unsafe { response.as_physical_address() }], true);
let result = self.wait_for_completion(command_id, response).await?;
Ok(unsafe { DmaBuffer::assume_init(result) })
Ok(unsafe { result.assume_init() })
}
pub fn process_completions(&self) -> usize {
let mut inner = self.inner.lock();
let mut n = 0;
let mut completion_list = Vec::new();
loop {
let (cmp, expected_phase) = inner.cq.at_head(n);
let cmp_phase = cmp.phase();
@ -420,24 +400,29 @@ impl QueuePair {
n += 1;
let command_id = cmp.command_id();
let sub_queue_id = cmp.sub_queue_id();
// TODO allow several sqs receive completions through one cq?
debug_assert_eq!(sub_queue_id, self.id);
assert_eq!(sub_queue_id, self.id);
let sub_queue_head = cmp.sub_queue_head();
let cmp = *cmp;
inner.sq.take_until(sub_queue_head);
if inner.pending.remove(&command_id) {
inner.completed.insert(command_id, cmp);
}
completion_list.push(cmp);
}
if n != 0 {
inner.cq.take(n);
}
for cmp in completion_list {
let command_id = cmp.command_id();
if inner.pending.remove(&command_id) {
inner.completed.insert(command_id, cmp);
}
}
if n != 0 {
self.completion_notify.wake_all();
}
@ -445,7 +430,3 @@ impl QueuePair {
n
}
}
// TODO
unsafe impl Sync for QueuePair {}
unsafe impl Send for QueuePair {}

View File

@ -1,150 +0,0 @@
use libk::{dma::BusAddress, error::Error};
use tock_registers::{
interfaces::{ReadWriteable, Readable, Writeable},
register_bitfields, register_structs,
registers::{ReadOnly, ReadWrite, WriteOnly},
};
use crate::queue::{CompletionQueueEntry, SubmissionQueueEntry};
register_bitfields! {
u32,
pub CC [
IOCQES OFFSET(20) NUMBITS(4) [],
IOSQES OFFSET(16) NUMBITS(4) [],
AMS OFFSET(11) NUMBITS(3) [],
MPS OFFSET(7) NUMBITS(4) [],
CSS OFFSET(4) NUMBITS(3) [
NvmCommandSet = 0
],
ENABLE OFFSET(0) NUMBITS(1) [],
],
pub CSTS [
CFS OFFSET(1) NUMBITS(1) [],
RDY OFFSET(0) NUMBITS(1) [],
],
pub AQA [
/// Admin Completion Queue Size in entries - 1
ACQS OFFSET(16) NUMBITS(12) [],
/// Admin Submission Queue Size in entries - 1
ASQS OFFSET(0) NUMBITS(12) [],
]
}
register_bitfields! {
u64,
pub CAP [
/// Maximum Queue Entries Supported - 1. i.e., 0 means maximum queue len of 1, 1 = 2 etc.
MQES OFFSET(0) NUMBITS(16) [],
/// Timeout. Represents the worst-case time the host software should wait for CSTS.RDY to
/// change its state.
TO OFFSET(24) NUMBITS(8) [],
/// Doorbell stride. Stride in bytes = pow(2, 2 + DSTRD).
DSTRD OFFSET(32) NUMBITS(4) [],
/// NVM Subsystem Reset Supported (see NVMe BS Section 3.7.1)
NSSRS OFFSET(36) NUMBITS(1) [],
/// Controller supports one or more I/O command sets
CSS_IO_COMMANDS OFFSET(43) NUMBITS(1) [],
/// Controller only supports admin commands and no I/O commands
CSS_ADMIN_ONLY OFFSET(44) NUMBITS(1) [],
/// Memory page size minimum (bytes = pow(2, 12 + MPSMIN))
MPSMIN OFFSET(48) NUMBITS(4) [],
/// Memory page size maximum -|-
MPSMAX OFFSET(52) NUMBITS(4) [],
]
}
register_structs! {
#[allow(non_snake_case)]
pub Regs {
(0x00 => pub CAP: ReadOnly<u64, CAP::Register>),
(0x08 => pub VS: ReadOnly<u32>),
(0x0C => pub INTMS: WriteOnly<u32>),
(0x10 => pub INTMC: WriteOnly<u32>),
(0x14 => pub CC: ReadWrite<u32, CC::Register>),
(0x18 => _0),
(0x1C => pub CSTS: ReadOnly<u32, CSTS::Register>),
(0x20 => _1),
(0x24 => AQA: ReadWrite<u32, AQA::Register>),
(0x28 => ASQ: ReadWrite<u64>),
(0x30 => ACQ: ReadWrite<u64>),
(0x38 => _2),
(0x2000 => @END),
}
}
impl Regs {
pub fn configure_admin_queue(
&self,
submission_queue_pointer: BusAddress,
completion_queue_pointer: BusAddress,
submission_queue_size: usize,
completion_queue_size: usize,
) -> Result<(), Error> {
let max_queue_size = self.CAP.read(CAP::MQES) + 1;
if submission_queue_size as u64 > max_queue_size {
log::error!("admin submission queue too large");
return Err(Error::InvalidArgument);
}
if completion_queue_size as u64 > max_queue_size {
log::error!("admin completion queue too large");
return Err(Error::InvalidArgument);
}
self.AQA.write(
AQA::ASQS.val(submission_queue_size as u32 - 1)
+ AQA::ACQS.val(completion_queue_size as u32 - 1),
);
self.ASQ.set(submission_queue_pointer.into_u64());
self.ACQ.set(completion_queue_pointer.into_u64());
Ok(())
}
pub fn configure_controller(&self) {
const IOSQES: u32 = size_of::<SubmissionQueueEntry>().ilog2();
const IOCQES: u32 = size_of::<CompletionQueueEntry>().ilog2();
self.CC.modify(
CC::IOCQES.val(IOCQES)
+ CC::IOSQES.val(IOSQES)
+ CC::MPS.val(0)
+ CC::CSS::NvmCommandSet,
);
}
pub fn enable_controller(&self, mut timeout_cycles: u64) -> Result<(), Error> {
self.CC.modify(CC::ENABLE::SET);
while timeout_cycles > 0 && !self.CSTS.matches_any(&[CSTS::RDY::SET, CSTS::CFS::SET]) {
timeout_cycles -= 1;
core::hint::spin_loop();
}
if timeout_cycles == 0 {
return Err(Error::TimedOut);
}
if self.CSTS.matches_all(CSTS::CFS::SET) {
log::error!("nvme: controller fatal status after enable");
return Err(Error::InvalidArgument);
}
Ok(())
}
pub fn disable_controller(&self, mut timeout_cycles: u64) -> Result<(), Error> {
self.CC.modify(CC::ENABLE::CLEAR);
while timeout_cycles > 0 && self.CSTS.matches_all(CSTS::RDY::SET) {
timeout_cycles -= 1;
core::hint::spin_loop();
}
if timeout_cycles > 0 {
Ok(())
} else {
Err(Error::TimedOut)
}
}
}

View File

@ -1,14 +0,0 @@
[package]
name = "ygg_driver_scsi"
version = "0.1.0"
edition = "2024"
[dependencies]
yggdrasil-abi.workspace = true
device-api.workspace = true
libk-util.workspace = true
libk-mm.workspace = true
libk.workspace = true
async-trait.workspace = true
log.workspace = true

View File

@ -1,102 +0,0 @@
use libk::error::Error;
use crate::device::ScsiDeviceType;
pub trait ScsiCommand {
type Response;
const REQUEST_LEN: usize;
const RESPONSE_LEN: usize;
fn into_bytes(self) -> [u8; Self::REQUEST_LEN];
fn parse_response(bytes: &[u8]) -> Result<Self::Response, Error>;
}
// Add more info when needed
pub struct ScsiInquiry;
#[derive(Debug)]
pub struct ScsiInquiryResponse {
pub device_type: ScsiDeviceType,
}
impl ScsiCommand for ScsiInquiry {
type Response = ScsiInquiryResponse;
const REQUEST_LEN: usize = 6;
const RESPONSE_LEN: usize = 36;
fn into_bytes(self) -> [u8; Self::REQUEST_LEN] {
[0x12, 0x00, 0x00, 0x00, 0x00, 0x00]
}
fn parse_response(bytes: &[u8]) -> Result<Self::Response, Error> {
if bytes.len() != 36 {
return Err(Error::InvalidArgument);
}
let device_type = ScsiDeviceType::try_from(bytes[0] & 0x1F).unwrap_or_default();
Ok(ScsiInquiryResponse { device_type })
}
}
pub struct ScsiTestUnitReady;
#[derive(Debug)]
pub struct ScsiTestUnitReadyResponse;
impl ScsiCommand for ScsiTestUnitReady {
type Response = ScsiTestUnitReadyResponse;
const RESPONSE_LEN: usize = 0;
const REQUEST_LEN: usize = 6;
fn into_bytes(self) -> [u8; Self::REQUEST_LEN] {
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
}
fn parse_response(_bytes: &[u8]) -> Result<Self::Response, Error> {
Ok(ScsiTestUnitReadyResponse)
}
}
pub struct ScsiRequestSense;
#[derive(Debug)]
pub struct ScsiRequestSenseResponse;
impl ScsiCommand for ScsiRequestSense {
type Response = ScsiRequestSenseResponse;
const RESPONSE_LEN: usize = 0;
const REQUEST_LEN: usize = 6;
fn into_bytes(self) -> [u8; Self::REQUEST_LEN] {
[0x03, 0x00, 0x00, 0x00, 0x00, 0x00]
}
fn parse_response(_bytes: &[u8]) -> Result<Self::Response, Error> {
Ok(ScsiRequestSenseResponse)
}
}
pub struct ScsiReadCapacity;
#[derive(Debug)]
pub struct ScsiReadCapacityResponse {
pub block_size: u32,
pub block_count: u32,
}
impl ScsiCommand for ScsiReadCapacity {
type Response = ScsiReadCapacityResponse;
const REQUEST_LEN: usize = 10;
const RESPONSE_LEN: usize = 8;
fn into_bytes(self) -> [u8; Self::REQUEST_LEN] {
[0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
}
fn parse_response(bytes: &[u8]) -> Result<Self::Response, Error> {
if bytes.len() != 8 {
return Err(Error::InvalidArgument);
}
let block_count = u32::from_be_bytes(bytes[0..4].try_into().unwrap());
let block_size = u32::from_be_bytes(bytes[4..8].try_into().unwrap());
Ok(ScsiReadCapacityResponse {
block_size,
block_count,
})
}
}

View File

@ -1,24 +0,0 @@
use yggdrasil_abi::primitive_enum;
primitive_enum! {
#[derive(Default)]
pub enum ScsiDeviceType: u8 {
DirectAccessBlock = 0x00,
SequentialAccess = 0x01,
Printer = 0x02,
Processor = 0x03,
WriteOnce = 0x04,
CdDvd = 0x05,
OpticalMemory = 0x07,
MediumChanger = 0x08,
StorageArrayController = 0x0C,
EnclosureServices = 0x0D,
SimplifiedDirectAccess = 0x0E,
OpticalCard = 0x0F,
BridgeController = 0x10,
ObjectBasedStorage = 0x11,
AutomationDriveInterface = 0x12,
#[default]
Other = 0x1F,
}
}

View File

@ -1,381 +0,0 @@
#![feature(generic_const_exprs, maybe_uninit_slice)]
#![allow(incomplete_features)]
#![no_std]
use core::{
mem::MaybeUninit,
sync::atomic::{AtomicBool, Ordering},
time::Duration,
};
use alloc::{
boxed::Box, collections::btree_map::BTreeMap, format, string::String, sync::Arc, vec::Vec,
};
use async_trait::async_trait;
use command::{ScsiReadCapacity, ScsiRequestSense, ScsiTestUnitReady};
use device_api::device::Device;
use libk::{
block,
device::{block::BlockDevice, manager::probe_partitions},
dma::{DmaBuffer, DmaSlice, DmaSliceMut},
error::Error,
fs::devfs,
task::{runtime, sync::AsyncMutex},
};
use libk_mm::{
address::PhysicalAddress, table::MapAttributes, OnDemandPage, PageProvider, VirtualPage,
};
use libk_util::{
sync::{spin_rwlock::IrqSafeRwLock, IrqSafeSpinlock},
OneTimeInit,
};
use transport::{ScsiTransport, ScsiTransportWrapper};
use yggdrasil_abi::io::FileMode;
extern crate alloc;
pub mod command;
pub mod device;
pub mod transport;
pub struct ScsiEnclosure {
transport: AsyncMutex<ScsiTransportWrapper>,
units: Vec<IrqSafeRwLock<Option<Arc<ScsiUnit>>>>,
index: OneTimeInit<u32>,
shutdown: AtomicBool,
}
pub struct ScsiUnit {
enclosure: Arc<ScsiEnclosure>,
lun: u8,
lba_count: u64,
lba_size: usize,
max_lba_per_request: usize,
names: IrqSafeRwLock<Vec<String>>,
}
impl ScsiEnclosure {
pub async fn setup(
transport: Box<dyn ScsiTransport>,
lun_count: usize,
) -> Result<Arc<Self>, Error> {
let transport = AsyncMutex::new(ScsiTransportWrapper::new(transport));
let units = (0..lun_count).map(|_| IrqSafeRwLock::new(None)).collect();
let this = Arc::new(Self {
transport,
units,
index: OneTimeInit::new(),
shutdown: AtomicBool::new(false),
});
register_enclosure(this.clone())?;
// Probe LUNs
for i in 0..lun_count {
if this.probe_lun(i as u8).await {
if let Ok(unit) = ScsiUnit::setup(this.clone(), i as u8).await {
*this.units[i].write() = Some(unit);
}
}
}
// Start enclosure poll task
let enclosure = this.clone();
runtime::spawn(async move {
while !enclosure.shutdown.load(Ordering::Acquire) {
enclosure.poll().await;
runtime::sleep(Duration::from_millis(100)).await;
}
})
.ok();
Ok(this)
}
async fn probe_lun(self: &Arc<Self>, lun: u8) -> bool {
let mut attempts = 3;
let mut timeout = 10;
// TODO get statuses to better see if there's a real error or the LUN is not present
while attempts > 0 {
let mut transport = self.transport.lock().await;
// TEST UNIT READY (6)
if transport
.perform_command(lun, ScsiTestUnitReady)
.await
.is_ok()
{
break;
}
// If not, send a REQUEST SENSE (6)
transport.perform_command(lun, ScsiRequestSense).await.ok();
drop(transport);
runtime::sleep(Duration::from_millis(timeout)).await;
timeout *= 2;
attempts -= 1;
}
if attempts == 0 {
false
} else {
true
}
}
async fn poll(self: &Arc<Self>) {
let index = *self.index.get();
for lun in 0..self.units.len() {
let mut slot = self.units[lun].write();
let present = self.probe_lun(lun as u8).await;
if let Some(unit) = slot.as_ref() {
if !present {
log::warn!("scsi{index}u{lun} lost");
unit.detach();
*slot = None;
}
} else if present {
if let Ok(unit) = ScsiUnit::setup(self.clone(), lun as u8).await {
log::info!("scsi{index}u{lun} attached");
*slot = Some(unit);
} else {
log::warn!("scsi{index}u{lun} attached, but could not setup");
}
}
}
}
pub fn detach(&self) {
self.shutdown.store(true, Ordering::Release);
let index = self.index.try_get().copied();
for unit in self.units.iter() {
if let Some(unit) = unit.write().take() {
unit.detach();
}
}
// Deregister the enclosure
if let Some(index) = index {
remove_enclosure(index);
}
}
}
impl ScsiUnit {
pub async fn setup(enclosure: Arc<ScsiEnclosure>, lun: u8) -> Result<Arc<Self>, Error> {
let enclosure_index = *enclosure.index.get();
let mut transport = enclosure.transport.lock().await;
// TODO INQUIRY fails for real USB flash drives
// transport.perform_command(0, ScsiInquiry).await?;
let capacity_info = transport.perform_command(lun, ScsiReadCapacity).await?;
let max_lba_per_request =
transport.max_bytes_per_request() / capacity_info.block_size as usize;
log::info!(
"scsi{enclosure_index}u{lun}: lba_size={}, lba_count={}, max_lba_per_request={}",
capacity_info.block_size,
capacity_info.block_count,
max_lba_per_request
);
drop(transport);
let unit = Arc::new(Self {
enclosure,
lun,
lba_count: capacity_info.block_count.into(),
lba_size: capacity_info.block_size as usize,
max_lba_per_request,
names: IrqSafeRwLock::new(Vec::new()),
});
register_unit(enclosure_index, lun, unit.clone());
Ok(unit)
}
fn detach(&self) {
let id = *self.enclosure.index.get();
log::info!("scsi{id}u{} detached", self.lun);
for name in self.names.read().iter() {
devfs::remove_node(name).ok();
}
}
}
#[async_trait]
impl BlockDevice for ScsiUnit {
fn allocate_buffer(&self, size: usize) -> Result<DmaBuffer<[MaybeUninit<u8>]>, Error> {
block!(self.enclosure.transport.lock().await.allocate_buffer(size))?
}
async fn read_aligned(
&self,
position: u64,
buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<(), Error> {
if position % self.lba_size as u64 != 0 {
log::warn!("scsi: misaligned read");
return Err(Error::InvalidArgument);
}
if buffer.len() % self.lba_size != 0 {
log::warn!("scsi: misaligned buffer size");
return Err(Error::InvalidArgument);
}
let lba_start = position / self.lba_size as u64;
let lba_count = buffer.len() / self.lba_size;
if lba_start.saturating_add(lba_count as u64) >= self.lba_count {
log::warn!("scsi: read beyond medium end");
return Err(Error::InvalidArgument);
}
let lba_end = lba_start + lba_count as u64;
let mut transport = self.enclosure.transport.lock().await;
// TODO DmaSliceMut subslicing
let (buffer, range) = buffer.into_parts();
let mut offset = range.start;
for i in (0..lba_count).step_by(self.max_lba_per_request) {
let lba = lba_start + i as u64;
let end = (lba + self.max_lba_per_request as u64).min(lba_end);
let count = (end - lba) as usize;
let amount = count * self.lba_size;
let dst_slice = buffer.slice_mut(offset..offset + amount);
let len = transport
.read(self.lun, lba, count as u16, dst_slice)
.await?;
if len != amount {
return Err(Error::InvalidArgument);
}
offset += amount;
}
Ok(())
}
async fn write_aligned(&self, _position: u64, _buffer: DmaSlice<'_, u8>) -> Result<(), Error> {
Err(Error::NotImplemented)
}
fn block_size(&self) -> usize {
self.lba_size
}
fn block_count(&self) -> u64 {
self.lba_count
}
fn max_blocks_per_request(&self) -> usize {
self.max_lba_per_request
}
}
impl PageProvider for ScsiUnit {
fn ondemand_fetch(&self, _opaque: u64) -> Result<OnDemandPage, Error> {
unimplemented!()
}
fn get_page(&self, _offset: u64) -> Result<VirtualPage, Error> {
unimplemented!()
}
fn release_page(
&self,
_offset: u64,
_phys: PhysicalAddress,
_dirty: bool,
) -> Result<(), Error> {
unimplemented!()
}
fn clone_page(
&self,
_offset: u64,
_src_phys: PhysicalAddress,
_src_attrs: MapAttributes,
) -> Result<PhysicalAddress, Error> {
unimplemented!()
}
}
impl Device for ScsiUnit {
fn display_name(&self) -> &str {
"SCSI Unit"
}
}
impl Drop for ScsiUnit {
fn drop(&mut self) {
if let Some(index) = self.enclosure.index.try_get() {
log::info!("scsi{index}u{} dropped", self.lun);
}
}
}
// TODO this is crap
static SCSI_ENCLOSURES: IrqSafeSpinlock<BTreeMap<u32, Arc<ScsiEnclosure>>> =
IrqSafeSpinlock::new(BTreeMap::new());
static SCSI_BITMAP: IrqSafeSpinlock<u32> = IrqSafeSpinlock::new(0);
fn register_enclosure(enclosure: Arc<ScsiEnclosure>) -> Result<(), Error> {
let index = {
let mut bitmap = SCSI_BITMAP.lock();
let index = (0..8)
.position(|p| *bitmap & (1 << p) == 0)
.ok_or(Error::InvalidOperation)
.inspect_err(|_| log::warn!("Cannot attach SCSI enclosure: too many of them"))?
as u32;
let mut devices = SCSI_ENCLOSURES.lock();
*bitmap |= 1 << index;
assert!(!devices.contains_key(&index));
devices.insert(index, enclosure.clone());
index
};
enclosure.index.init(index);
Ok(())
}
fn register_unit(enclosure_index: u32, lun: u8, unit: Arc<ScsiUnit>) {
let name = format!("scsi{enclosure_index}u{lun}");
unit.names.write().push(name.clone());
devfs::add_named_block_device(unit.clone(), name.clone(), FileMode::new(0o600)).ok();
// TODO this code is repeated everywhere
runtime::spawn(async move {
let name = name;
probe_partitions(unit.clone(), |index, partition| {
let partition_name = format!("{name}p{}", index + 1);
log::info!("{name}: partition {partition_name}");
unit.names.write().push(partition_name.clone());
devfs::add_named_block_device(
Arc::new(partition),
partition_name,
FileMode::new(0o600),
)
.ok();
})
.await
.ok();
})
.ok();
}
fn remove_enclosure(index: u32) {
let mut devices = SCSI_ENCLOSURES.lock();
let mut bitmap = SCSI_BITMAP.lock();
*bitmap &= !(1 << index);
devices.remove(&index);
log::info!("scsi: enclosure {index} detached");
}

View File

@ -1,99 +0,0 @@
use core::{mem::MaybeUninit, ops::Deref};
use alloc::boxed::Box;
use async_trait::async_trait;
use libk::{
dma::{DmaBuffer, DmaSliceMut},
error::Error,
};
use crate::command::ScsiCommand;
#[async_trait]
pub trait ScsiTransport: Send + Sync {
fn allocate_buffer(&self, size: usize) -> Result<DmaBuffer<[MaybeUninit<u8>]>, Error>;
/// Perform a no-data request
async fn perform_request_raw(
&mut self,
lun: u8,
request_data: &[u8],
response_buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<usize, Error>;
fn max_bytes_per_request(&self) -> usize;
}
pub struct ScsiTransportWrapper {
inner: Box<dyn ScsiTransport>,
}
impl ScsiTransportWrapper {
pub fn new(inner: Box<dyn ScsiTransport>) -> Self {
Self { inner }
}
pub async fn read(
&mut self,
lun: u8,
lba: u64,
lba_count: u16,
buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<usize, Error> {
if lba >= u32::MAX as u64 {
return Err(Error::InvalidArgument);
}
let lba_bytes = (lba as u32).to_be_bytes();
let lba_count = (lba_count as u16).to_be_bytes();
// Issue a READ (10) command
let request_buffer = [
0x28,
0x00,
lba_bytes[0],
lba_bytes[1],
lba_bytes[2],
lba_bytes[3],
0x00,
lba_count[0],
lba_count[1],
0x00,
];
self.inner
.perform_request_raw(lun, &request_buffer, buffer)
.await
}
pub async fn perform_command<R: ScsiCommand>(
&mut self,
lun: u8,
request: R,
) -> Result<R::Response, Error>
where
[u8; R::RESPONSE_LEN]: Sized,
[u8; R::REQUEST_LEN]: Sized,
{
let mut response_buffer = self.allocate_buffer(R::RESPONSE_LEN)?;
let request_buffer = request.into_bytes();
let response_len = self
.inner
.perform_request_raw(
lun,
&request_buffer,
response_buffer.slice_mut(0..R::RESPONSE_LEN),
)
.await?;
let response_bytes = unsafe { response_buffer[..response_len].assume_init_ref() };
R::parse_response(response_bytes)
}
}
impl Deref for ScsiTransportWrapper {
type Target = dyn ScsiTransport;
fn deref(&self) -> &Self::Target {
self.inner.as_ref()
}
}

View File

@ -8,17 +8,15 @@ authors = ["Mark Poliakov <mark@alnyan.me>"]
yggdrasil-abi.workspace = true
device-api = { workspace = true, features = ["derive"] }
libk-mm.workspace = true
libk-device.workspace = true
libk-util.workspace = true
libk.workspace = true
log.workspace = true
bitflags.workspace = true
tock-registers.workspace = true
[target.'cfg(target_arch = "x86_64")'.dependencies]
ygg_driver_acpi.path = "../../acpi"
acpi.workspace = true
kernel-arch-x86.workspace = true
[lints]
workspace = true
[lints.rust]
unexpected_cfgs = { level = "allow", check-cfg = ['cfg(rust_analyzer)'] }

View File

@ -1,7 +1,6 @@
//! PCI capability structures and queries
use alloc::{sync::Arc, vec, vec::Vec};
use bitflags::bitflags;
use alloc::{vec, vec::Vec};
use device_api::interrupt::{
InterruptAffinity, InterruptHandler, MessageInterruptController, MsiInfo,
};
@ -12,25 +11,8 @@ use tock_registers::{
};
use yggdrasil_abi::error::Error;
use crate::PciBaseAddress;
use super::{PciCapability, PciCapabilityId, PciConfigurationSpace};
bitflags! {
pub struct PcieLinkControl: u16 {
const ASPM_DISABLE = 0 << 0;
// Active state power management control
const ASPM_MASK = 0x3 << 0;
// Enable clock power management
const ECPM = 1 << 8;
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64", rust_analyzer))]
use core::mem::offset_of;
#[cfg(any(target_arch = "x86", target_arch = "x86_64", rust_analyzer))]
use kernel_arch_x86::intrinsics;
pub trait VirtioCapabilityData<'s, S: PciConfigurationSpace + ?Sized + 's>: Sized {
fn from_space_offset(space: &'s S, offset: usize) -> Self;
@ -59,14 +41,11 @@ pub trait VirtioCapability {
type Output<'a, S: PciConfigurationSpace + ?Sized + 'a>: VirtioCapabilityData<'a, S>;
}
/// Power management capability entry
pub struct PowerManagementCapability;
/// MSI-X capability query
pub struct MsiXCapability;
/// MSI capability query
pub struct MsiCapability;
/// PCIe capability
pub struct PciExpressCapability;
// VirtIO-over-PCI capabilities
/// VirtIO PCI configuration access
@ -78,15 +57,6 @@ pub struct VirtioNotifyConfigCapability;
/// VirtIO interrupt status
pub struct VirtioInterruptStatusCapability;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum DevicePowerState {
D0,
D1,
D2,
D3Cold,
D3Hot,
}
/// Represents an entry in MSI-X vector table
#[repr(C)]
pub struct MsiXEntry {
@ -98,21 +68,8 @@ pub struct MsiXEntry {
pub control: ReadWrite<u32>,
}
enum MsiXVectorTableAccess<'a> {
Memory(DeviceMemoryIoMut<'a, [MsiXEntry]>),
#[cfg(any(target_arch = "x86", target_arch = "x86_64", rust_analyzer))]
Io(u16),
}
pub struct MsiXVectorTable<'a> {
access: MsiXVectorTableAccess<'a>,
len: usize,
}
/// PCI Power Management capability data structure
pub struct PowerManagementData<'s, S: PciConfigurationSpace + ?Sized + 's> {
space: &'s S,
offset: usize,
vectors: DeviceMemoryIoMut<'a, [MsiXEntry]>,
}
/// MSI-X capability data structure
@ -127,12 +84,6 @@ pub struct MsiData<'s, S: PciConfigurationSpace + ?Sized + 's> {
offset: usize,
}
/// PCI Express capability data structure
pub struct PcieData<'s, S: PciConfigurationSpace + ?Sized + 's> {
space: &'s S,
offset: usize,
}
pub struct VirtioDeviceConfigData<'s, S: PciConfigurationSpace + ?Sized + 's> {
space: &'s S,
offset: usize,
@ -171,19 +122,6 @@ impl<T: VirtioCapability> PciCapability for T {
}
}
impl PciCapability for PowerManagementCapability {
const ID: PciCapabilityId = PciCapabilityId::PowerManagement;
type CapabilityData<'a, S: PciConfigurationSpace + ?Sized + 'a> = PowerManagementData<'a, S>;
fn data<'s, S: PciConfigurationSpace + ?Sized + 's>(
space: &'s S,
offset: usize,
_len: usize,
) -> Self::CapabilityData<'s, S> {
PowerManagementData { space, offset }
}
}
impl PciCapability for MsiXCapability {
const ID: PciCapabilityId = PciCapabilityId::MsiX;
type CapabilityData<'a, S: PciConfigurationSpace + ?Sized + 'a> = MsiXData<'a, S>;
@ -210,19 +148,6 @@ impl PciCapability for MsiCapability {
}
}
impl PciCapability for PciExpressCapability {
const ID: PciCapabilityId = PciCapabilityId::PciExpress;
type CapabilityData<'a, S: PciConfigurationSpace + ?Sized + 'a> = PcieData<'a, S>;
fn data<'s, S: PciConfigurationSpace + ?Sized + 's>(
space: &'s S,
offset: usize,
_len: usize,
) -> Self::CapabilityData<'s, S> {
PcieData { space, offset }
}
}
impl VirtioCapability for VirtioDeviceConfigCapability {
const CFG_TYPE: u8 = 0x04;
type Output<'a, S: PciConfigurationSpace + ?Sized + 'a> = VirtioDeviceConfigData<'a, S>;
@ -321,56 +246,6 @@ impl<'s, S: PciConfigurationSpace + ?Sized + 's> VirtioCapabilityData<'s, S>
}
}
impl<'s, S: PciConfigurationSpace + ?Sized + 's> PowerManagementData<'s, S> {
pub fn set_device_power_state(&self, state: DevicePowerState) {
let pmcsr = self.space.read_u16(self.offset + 4) & !0x3;
let current = self.get_device_power_state();
if state == current {
return;
}
log::info!("Set device power state: {state:?}");
match state {
DevicePowerState::D0 => {
// power = 0b00 | PME_EN
self.space.write_u16(self.offset + 4, pmcsr);
}
_ => {
log::warn!("TODO: {state:?} power state");
}
}
}
pub fn set_pme_en(&self, state: bool) {
let pmcsr = self.space.read_u16(self.offset + 4);
let new = if state {
pmcsr | (1 << 8)
} else {
pmcsr & !(1 << 8)
};
if pmcsr == new {
return;
}
log::info!("Set PMCSR.PME_En = {state}");
self.space.write_u16(self.offset + 4, new);
}
pub fn get_device_power_state(&self) -> DevicePowerState {
let pmcsr = self.space.read_u16(self.offset + 4);
match pmcsr & 0x3 {
0b00 => DevicePowerState::D0,
0b01 => DevicePowerState::D1,
0b10 => DevicePowerState::D2,
0b11 => DevicePowerState::D3Hot,
_ => unreachable!(),
}
}
}
impl<'s, S: PciConfigurationSpace + ?Sized + 's> MsiXData<'s, S> {
// TODO use pending bits as well
/// Maps and returns the vector table associated with the device's MSI-X capability
@ -385,30 +260,13 @@ impl<'s, S: PciConfigurationSpace + ?Sized + 's> MsiXData<'s, S> {
let Some(base) = self.space.bar(bir) else {
return Err(Error::DoesNotExist);
};
let Some(base) = base.as_memory() else {
return Err(Error::InvalidOperation);
};
match base {
PciBaseAddress::Memory32(mem32) => unsafe {
log::info!("MSI-X table address: {:#x}", mem32 + table_offset as u32);
MsiXVectorTable::memory_from_raw_parts(
PhysicalAddress::from_u32(mem32).add(table_offset),
table_size,
)
},
PciBaseAddress::Memory64(mem64) => unsafe {
log::info!("MSI-X table address: {:#x}", mem64 + table_offset as u64);
MsiXVectorTable::memory_from_raw_parts(
PhysicalAddress::from_u64(mem64).add(table_offset),
table_size,
)
},
#[cfg(any(target_arch = "x86", target_arch = "x86_64", rust_analyzer))]
PciBaseAddress::Io(io) => unsafe {
log::info!("MSI-X table I/O: {:#x}", io + table_offset as u16);
MsiXVectorTable::io_from_raw_parts(io + table_offset as u16, table_size)
},
#[cfg(any(not(any(target_arch = "x86", target_arch = "x86_64")), rust_analyzer))]
PciBaseAddress::Io(_) => Err(Error::DoesNotExist),
}
log::debug!("MSI-X table address: {:#x}", base.add(table_offset));
unsafe { MsiXVectorTable::from_raw_parts(base.add(table_offset), table_size) }
}
/// Changes the global enable status for the device's MSI-X capability. If set, regular IRQs
@ -434,98 +292,25 @@ impl<'s, S: PciConfigurationSpace + ?Sized + 's> MsiXData<'s, S> {
}
}
impl MsiXVectorTableAccess<'_> {
fn set_vector_masked(&mut self, vector: usize, masked: bool) {
let old = self.read_control(vector);
let new = if masked { old | 1 } else { old & !1 };
if old != new {
self.write_control(vector, new);
}
}
fn read_control(&mut self, vector: usize) -> u32 {
match self {
#[cfg(any(target_arch = "x86", target_arch = "x86_64", rust_analyzer))]
&mut Self::Io(base) => unsafe {
let a = base
+ (vector * size_of::<MsiXEntry>() + offset_of!(MsiXEntry, control)) as u16;
intrinsics::inl(a)
},
Self::Memory(vectors) => vectors[vector].control.get(),
}
}
fn write_address(&mut self, vector: usize, value: u64) {
match self {
#[cfg(any(target_arch = "x86", target_arch = "x86_64", rust_analyzer))]
&mut Self::Io(base) => unsafe {
let a = base + (vector * size_of::<MsiXEntry>()) as u16;
intrinsics::outl(a, value as u32);
intrinsics::outl(a + 4, (value >> 32) as u32);
},
Self::Memory(vectors) => vectors[vector].address.set(value),
}
}
fn write_data(&mut self, vector: usize, value: u32) {
match self {
#[cfg(any(target_arch = "x86", target_arch = "x86_64", rust_analyzer))]
&mut Self::Io(base) => unsafe {
let a =
base + (vector * size_of::<MsiXEntry>() + offset_of!(MsiXEntry, data)) as u16;
intrinsics::outl(a, value)
},
Self::Memory(vectors) => vectors[vector].data.set(value),
}
}
fn write_control(&mut self, vector: usize, value: u32) {
match self {
#[cfg(any(target_arch = "x86", target_arch = "x86_64", rust_analyzer))]
&mut Self::Io(base) => unsafe {
let a = base
+ (vector * size_of::<MsiXEntry>() + offset_of!(MsiXEntry, control)) as u16;
intrinsics::outl(a, value)
},
Self::Memory(vectors) => vectors[vector].control.set(value),
}
}
}
impl MsiXVectorTable<'_> {
unsafe fn memory_from_raw_parts(base: PhysicalAddress, len: usize) -> Result<Self, Error> {
unsafe fn from_raw_parts(base: PhysicalAddress, len: usize) -> Result<Self, Error> {
let vectors = DeviceMemoryIoMut::map_slice(base, len, Default::default())?;
Ok(Self {
access: MsiXVectorTableAccess::Memory(vectors),
len,
})
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64", rust_analyzer))]
unsafe fn io_from_raw_parts(base: u16, len: usize) -> Result<Self, Error> {
Ok(Self {
access: MsiXVectorTableAccess::Io(base),
len,
})
Ok(Self { vectors })
}
pub fn mask_all(&mut self) {
for i in 0..self.len {
self.access.set_vector_masked(i, true);
for vector in self.vectors.iter_mut() {
vector.set_masked(true);
}
}
pub fn register_range(
pub fn register_range<C: MessageInterruptController + ?Sized>(
&mut self,
start: usize,
end: usize,
ic: &Arc<dyn MessageInterruptController>,
ic: &C,
affinity: InterruptAffinity,
handler: Arc<dyn InterruptHandler>,
handler: &'static dyn InterruptHandler,
) -> Result<Vec<MsiInfo>, Error> {
assert!(end > start);
let mut range = vec![
@ -535,37 +320,38 @@ impl MsiXVectorTable<'_> {
};
end - start
];
ic.clone().register_msi_range(&mut range, handler)?;
ic.register_msi_range(&mut range, handler)?;
for (i, info) in range.iter().enumerate() {
let index = i + start;
self.access.write_address(index, info.address as _);
self.access.write_data(index, info.value);
self.access.set_vector_masked(index, false);
self.vectors[index].address.set(info.address as _);
self.vectors[index].data.set(info.value);
self.vectors[index].set_masked(false);
}
Ok(range)
}
}
impl<'s, S: PciConfigurationSpace + ?Sized + 's> MsiData<'s, S> {
pub fn set_enabled(&mut self, enabled: bool) {
let mut w0 = self.space.read_u16(self.offset + 2);
if enabled {
w0 |= 1 << 0;
impl MsiXEntry {
/// If set, prevents the MSI-X interrupt from being delivered
fn set_masked(&mut self, masked: bool) {
if masked {
self.control.set(self.control.get() | 1);
} else {
w0 &= !(1 << 0);
self.control.set(self.control.get() & !1);
}
self.space.write_u16(self.offset + 2, w0);
}
}
pub fn register(
impl<'s, S: PciConfigurationSpace + ?Sized + 's> MsiData<'s, S> {
pub fn register<C: MessageInterruptController + ?Sized>(
&mut self,
ic: &Arc<dyn MessageInterruptController>,
ic: &C,
affinity: InterruptAffinity,
handler: Arc<dyn InterruptHandler>,
handler: &'static dyn InterruptHandler,
) -> Result<MsiInfo, Error> {
let info = ic.clone().register_msi(affinity, handler)?;
let info = ic.register_msi(affinity, handler)?;
let mut w0 = self.space.read_u16(self.offset + 2);
// Enable the vector first
@ -601,13 +387,3 @@ impl<'s, S: PciConfigurationSpace + ?Sized + 's> MsiData<'s, S> {
Ok(info)
}
}
impl<'s, S: PciConfigurationSpace + ?Sized + 's> PcieData<'s, S> {
pub fn link_control(&self) -> PcieLinkControl {
PcieLinkControl::from_bits_retain(self.space.read_u16(self.offset + 0x10))
}
pub fn set_link_control(&mut self, value: PcieLinkControl) {
self.space.write_u16(self.offset + 0x10, value.bits());
}
}

View File

@ -2,20 +2,16 @@ use core::ops::Range;
use alloc::{sync::Arc, vec::Vec};
use device_api::{
device::Device,
interrupt::{
ExternalInterruptController, InterruptAffinity, InterruptHandler, Irq, IrqOptions,
MessageInterruptController, MsiInfo,
},
interrupt::{InterruptAffinity, InterruptHandler, IrqOptions, MsiInfo},
Device,
};
use libk::device::external_interrupt_controller;
use libk_device::{message_interrupt_controller, register_global_interrupt};
use libk_util::{sync::spin_rwlock::IrqSafeRwLock, OneTimeInit};
use yggdrasil_abi::error::Error;
use crate::{
capability::{MsiCapability, MsiXCapability, MsiXVectorTable},
driver::PciDriver,
PciAddress, PciCommandRegister, PciConfigSpace, PciConfigurationSpace, PciSegmentInfo,
PciAddress, PciConfigSpace, PciConfigurationSpace, PciSegmentInfo,
};
/// Describes a PCI device
@ -23,16 +19,6 @@ use crate::{
pub struct PciDeviceInfo {
/// Address of the device
pub address: PciAddress,
/// Class field of the configuration space
pub class: u8,
/// Subclass field of the configuration space
pub subclass: u8,
/// Prog IF field of the configuration space
pub prog_if: u8,
/// Vendor ID field of the configuration space
pub vendor_id: u16,
/// Device ID field of the configuration space
pub device_id: u16,
/// Configuration space access method
pub config_space: PciConfigSpace,
/// Describes the PCI segment this device is a part of
@ -57,19 +43,16 @@ pub enum PciInterruptPin {
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum PreferredInterruptMode {
Msi(bool),
Msi,
Legacy,
}
enum ConfiguredInterruptMode {
MsiX(
Arc<dyn MessageInterruptController>,
MsiXVectorTable<'static>,
),
Msi(Arc<dyn MessageInterruptController>),
LegacyPin(Arc<dyn ExternalInterruptController>, PciInterruptPin),
MsiX(MsiXVectorTable<'static>),
Msi,
LegacyPin(PciInterruptPin),
#[cfg_attr(not(target_arch = "x86"), allow(unused))]
LegacyLine(Arc<dyn ExternalInterruptController>, u8),
LegacyLine(u8),
None,
}
@ -85,133 +68,69 @@ pub struct PciInterruptRoute {
pub options: IrqOptions,
}
#[derive(Clone)]
pub struct PciMsiRoute {
// TODO `msi-base`
pub controller: Arc<dyn MessageInterruptController>,
pub enum PciMatch {
Generic(fn(&PciDeviceInfo) -> bool),
Vendor(u16, u16),
Class(u8, Option<u8>, Option<u8>),
}
#[derive(Clone, Copy, PartialEq, Debug)]
pub enum PciDeviceState {
None,
Probed,
Initialized,
Failed,
pub struct PciDriver {
pub(crate) name: &'static str,
pub(crate) check: PciMatch,
pub(crate) probe: fn(&PciDeviceInfo) -> Result<&'static dyn Device, Error>,
}
/// Used to store PCI bus devices which were enumerated by the kernel
pub struct PciBusDevice {
pub(crate) info: PciDeviceInfo,
pub(crate) device: Option<Arc<dyn Device>>,
pub(crate) driver: Option<&'static dyn PciDriver>,
pub(crate) state: PciDeviceState,
pub(crate) driver: Option<&'static dyn Device>,
}
impl PciDeviceInfo {
pub fn set_command(
&self,
enable_irq: bool,
enable_mem: bool,
enable_io: bool,
enable_bus_master: bool,
) {
let command = PciCommandRegister::from_bits_retain(self.config_space.command());
let mut new = command;
if enable_irq {
new &= !PciCommandRegister::DISABLE_INTERRUPTS;
} else {
new |= PciCommandRegister::DISABLE_INTERRUPTS;
}
if enable_mem {
new |= PciCommandRegister::ENABLE_MEMORY;
} else {
new &= !PciCommandRegister::ENABLE_MEMORY;
}
if enable_io {
new |= PciCommandRegister::ENABLE_IO;
} else {
new &= !PciCommandRegister::ENABLE_IO;
}
if enable_bus_master {
new |= PciCommandRegister::BUS_MASTER;
} else {
new &= !PciCommandRegister::BUS_MASTER;
}
if new != command {
self.config_space.set_command(new.bits());
}
}
pub fn init_interrupts(&self, preferred_mode: PreferredInterruptMode) -> Result<(), Error> {
self.interrupt_config
.try_init_with(|| {
let configured_mode = if let PreferredInterruptMode::Msi(want_msix) = preferred_mode
&& let Some(msi_route) = self.segment.msi_translation_map.map_msi(self.address)
{
// Try to setup MSI (or MSI-x, if requested)
let mut result = None;
if want_msix
&& let Some(mut msix) = self.config_space.capability::<MsiXCapability>()
{
if let Ok(mut vt) = msix.vector_table() {
if let Some(mut msi) = self.config_space.capability::<MsiCapability>() {
msi.set_enabled(false);
}
let configured_mode =
if self.segment.has_msi && preferred_mode == PreferredInterruptMode::Msi {
if let Some(mut msix) = self.config_space.capability::<MsiXCapability>() {
let mut vt = msix.vector_table().unwrap();
vt.mask_all();
msix.set_function_mask(false);
msix.set_enabled(true);
result = Some(ConfiguredInterruptMode::MsiX(
msi_route.controller.clone(),
vt,
));
ConfiguredInterruptMode::MsiX(vt)
} else if self.config_space.capability::<MsiCapability>().is_some() {
ConfiguredInterruptMode::Msi
} else {
self.legacy_interrupt_mode()
}
}
// Fall back to MSI if MSI-x is not available or not requested
if result.is_none() && self.config_space.capability::<MsiCapability>().is_some()
{
result = Some(ConfiguredInterruptMode::Msi(msi_route.controller));
}
// Fall back to legacy IRQ if nothing else works
if let Some(result) = result {
result
} else {
// Ignore preferred_mode, the only supported is Legacy
self.legacy_interrupt_mode()
}
} else {
// MSI not requested or segment does not have MSI functionality
self.legacy_interrupt_mode()
};
};
IrqSafeRwLock::new(InterruptConfig {
preferred_mode,
configured_mode,
})
})
.expect("Possible bug: double-initialization of PCI(e) interrupt config");
.expect("Attempted to double-configure interrupts for a PCI device");
Ok(())
}
fn legacy_interrupt_mode(&self) -> ConfiguredInterruptMode {
let Ok(intc) = external_interrupt_controller() else {
return ConfiguredInterruptMode::None;
};
// TODO this should be retrieved from interrupt map
#[cfg(any(target_arch = "x86", rust_analyzer))]
{
if let Some(irq) = self.config_space.interrupt_line() {
return ConfiguredInterruptMode::LegacyLine(intc.clone(), irq);
return ConfiguredInterruptMode::LegacyLine(irq);
}
}
match self.config_space.interrupt_pin() {
Some(pin) => ConfiguredInterruptMode::LegacyPin(intc.clone(), pin),
Some(pin) => ConfiguredInterruptMode::LegacyPin(pin),
None => ConfiguredInterruptMode::None,
}
}
@ -219,31 +138,32 @@ impl PciDeviceInfo {
pub fn map_interrupt(
&self,
affinity: InterruptAffinity,
handler: Arc<dyn InterruptHandler>,
handler: &'static dyn InterruptHandler,
) -> Result<Option<MsiInfo>, Error> {
let mut irq = self.interrupt_config.get().write();
match &mut irq.configured_mode {
ConfiguredInterruptMode::Msi(controller) => {
ConfiguredInterruptMode::MsiX(msix) => {
let info =
msix.register_range(0, 1, message_interrupt_controller(), affinity, handler)?;
Ok(Some(info[0]))
}
ConfiguredInterruptMode::Msi => {
let mut msi = self
.config_space
.capability::<MsiCapability>()
.ok_or(Error::InvalidOperation)?;
let info = msi.register(controller, affinity, handler)?;
let info = msi.register(message_interrupt_controller(), affinity, handler)?;
Ok(Some(info))
}
ConfiguredInterruptMode::MsiX(controller, msix) => {
let info = msix.register_range(0, 1, controller, affinity, handler)?;
Ok(Some(info[0]))
}
ConfiguredInterruptMode::LegacyPin(intc, pin) => {
self.try_map_legacy(intc.as_ref(), *pin, handler)?;
ConfiguredInterruptMode::LegacyPin(pin) => {
self.try_map_legacy(*pin, handler)?;
Ok(None)
}
ConfiguredInterruptMode::LegacyLine(intc, irq) => {
self.try_map_legacy_line(intc.as_ref(), *irq, handler)?;
ConfiguredInterruptMode::LegacyLine(irq) => {
self.try_map_legacy_line(*irq, handler)?;
Ok(None)
}
ConfiguredInterruptMode::None => Err(Error::InvalidOperation),
@ -254,25 +174,28 @@ impl PciDeviceInfo {
&self,
vector_range: Range<usize>,
affinity: InterruptAffinity,
handler: Arc<dyn InterruptHandler>,
handler: &'static dyn InterruptHandler,
) -> Result<Vec<MsiInfo>, Error> {
let mut irq = self.interrupt_config.get().write();
let start = vector_range.start;
let end = vector_range.end;
match &mut irq.configured_mode {
ConfiguredInterruptMode::MsiX(controller, msix) => {
msix.register_range(start, end, controller, affinity, handler)
}
ConfiguredInterruptMode::MsiX(msix) => msix.register_range(
start,
end,
message_interrupt_controller(),
affinity,
handler,
),
_ => Err(Error::InvalidOperation),
}
}
fn try_map_legacy(
&self,
intc: &dyn ExternalInterruptController,
pin: PciInterruptPin,
handler: Arc<dyn InterruptHandler>,
handler: &'static dyn InterruptHandler,
) -> Result<(), Error> {
let src = PciInterrupt {
address: self.address,
@ -281,8 +204,8 @@ impl PciDeviceInfo {
let route = self
.segment
.irq_translation_map
.map_interrupt(&src)
.inspect_err(|e| log::warn!("Could not map PCI IRQ {pin:?}: {e:?}"))?;
.get(&src)
.ok_or(Error::InvalidOperation)?;
log::debug!(
"PCI {} pin {:?} -> system IRQ #{}",
@ -291,22 +214,17 @@ impl PciDeviceInfo {
route.number
);
let irq = Irq::External(route.number);
intc.register_irq(irq, route.options, handler)?;
intc.enable_irq(irq)
register_global_interrupt(route.number, route.options, handler)
}
fn try_map_legacy_line(
&self,
intc: &dyn ExternalInterruptController,
line: u8,
handler: Arc<dyn InterruptHandler>,
handler: &'static dyn InterruptHandler,
) -> Result<(), Error> {
log::debug!("PCI {} -> IRQ#{}", self.address, line);
let irq = Irq::External(line as u32);
intc.register_irq(irq, Default::default(), handler)?;
intc.enable_irq(irq)
register_global_interrupt(line as _, Default::default(), handler)
}
}

View File

@ -1,63 +0,0 @@
use alloc::{sync::Arc, vec::Vec};
use device_api::{device::Device, dma::DmaAllocator};
use libk::error::Error;
use libk_util::sync::spin_rwlock::IrqSafeRwLock;
use crate::device::PciDeviceInfo;
pub enum PciMatch {
Generic(fn(&PciDeviceInfo) -> bool),
Vendor(u16, u16),
Class(u8, Option<u8>, Option<u8>),
}
pub struct PciDriverMatch {
pub driver: &'static dyn PciDriver,
pub check: PciMatch,
}
pub trait PciDriver: Sync {
fn probe(
&self,
info: &PciDeviceInfo,
dma: &Arc<dyn DmaAllocator>,
) -> Result<Arc<dyn Device>, Error>;
fn driver_name(&self) -> &str;
}
impl PciMatch {
pub fn check_device(&self, info: &PciDeviceInfo) -> bool {
match self {
Self::Generic(f) => f(info),
&Self::Vendor(vendor_, device_) => {
info.vendor_id == vendor_ && info.device_id == device_
}
&Self::Class(class_, Some(subclass_), Some(prog_if_)) => {
class_ == info.class && subclass_ == info.subclass && prog_if_ == info.prog_if
}
&Self::Class(class_, Some(subclass_), _) => {
class_ == info.class && subclass_ == info.subclass
}
&Self::Class(class_, _, _) => class_ == info.class,
}
}
}
pub fn register_match(pmatch: PciMatch, driver: &'static dyn PciDriver) {
DRIVERS.write().push(PciDriverMatch {
check: pmatch,
driver,
});
}
pub fn lookup_driver(info: &PciDeviceInfo) -> Option<&'static dyn PciDriver> {
DRIVERS.read().iter().find_map(|pmatch| {
if pmatch.check.check_device(info) {
Some(pmatch.driver)
} else {
None
}
})
}
static DRIVERS: IrqSafeRwLock<Vec<PciDriverMatch>> = IrqSafeRwLock::new(Vec::new());

View File

@ -1,141 +0,0 @@
use core::fmt;
use alloc::{collections::btree_map::BTreeMap, sync::Arc, vec::Vec};
use device_api::interrupt::MessageInterruptController;
use libk::error::Error;
use crate::{
device::{PciInterrupt, PciInterruptRoute, PciMsiRoute},
PciAddress,
};
#[derive(Debug)]
pub enum PciInterruptMap {
Fixed(BTreeMap<PciInterrupt, PciInterruptRoute>),
#[cfg(any(target_arch = "x86_64", rust_analyzer))]
Acpi(alloc::string::String),
Legacy,
}
// TODO device-tree also provides a "msi-base" value, which is ignored and assumed to be zero for
// now
pub struct PciFixedMsiMapping {
pub start_address: PciAddress,
pub end_address: PciAddress,
pub controller: Arc<dyn MessageInterruptController>,
}
pub struct PciFixedMsiMap {
pub entries: Vec<PciFixedMsiMapping>,
}
pub enum PciMsiMap {
Fixed(PciFixedMsiMap),
Identity(Arc<dyn MessageInterruptController>),
Legacy,
}
impl PciInterruptMap {
pub fn map_interrupt(&self, interrupt: &PciInterrupt) -> Result<PciInterruptRoute, Error> {
match self {
Self::Fixed(map) => map.get(interrupt).cloned().ok_or(Error::DoesNotExist),
#[cfg(any(target_arch = "x86_64", rust_analyzer))]
Self::Acpi(aml_object_name) => {
use device_api::interrupt::{IrqLevel, IrqOptions, IrqTrigger};
use crate::device::PciInterruptPin;
let aml_pin = match interrupt.pin {
PciInterruptPin::A => ygg_driver_acpi::PciPin::IntA,
PciInterruptPin::B => ygg_driver_acpi::PciPin::IntB,
PciInterruptPin::C => ygg_driver_acpi::PciPin::IntC,
PciInterruptPin::D => ygg_driver_acpi::PciPin::IntD,
};
let aml_route = ygg_driver_acpi::get_pci_route(
aml_object_name.as_str(),
interrupt.address.device as u16,
interrupt.address.function as u16,
aml_pin,
)
.or_else(|| {
ygg_driver_acpi::get_pci_route(
aml_object_name.as_str(),
interrupt.address.device as u16,
0xFFFF,
aml_pin,
)
})
.ok_or(Error::DoesNotExist)?;
let trigger = match aml_route.trigger {
ygg_driver_acpi::InterruptTrigger::Edge => IrqTrigger::Edge,
ygg_driver_acpi::InterruptTrigger::Level => IrqTrigger::Level,
};
let level = match aml_route.polarity {
ygg_driver_acpi::InterruptPolarity::ActiveLow => IrqLevel::ActiveLow,
ygg_driver_acpi::InterruptPolarity::ActiveHigh => IrqLevel::ActiveHigh,
};
Ok(PciInterruptRoute {
options: IrqOptions { trigger, level },
number: aml_route.irq,
})
}
Self::Legacy => todo!(),
}
}
}
impl PciMsiMap {
pub fn map_msi(&self, address: PciAddress) -> Option<PciMsiRoute> {
match self {
Self::Fixed(map) => map.map_msi(address),
Self::Identity(controller) => Some(PciMsiRoute {
controller: controller.clone(),
}),
Self::Legacy => None,
}
}
}
impl fmt::Debug for PciMsiMap {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Legacy => f.debug_struct("Legacy").finish(),
Self::Fixed(map) => f
.debug_struct("Fixed")
.field("entries", &map.entries)
.finish(),
Self::Identity(_) => f.debug_struct("Identity").finish(),
}
}
}
impl PciFixedMsiMap {
pub fn map_msi(&self, address: PciAddress) -> Option<PciMsiRoute> {
for entry in self.entries.iter() {
if entry.contains(address) {
let route = PciMsiRoute {
controller: entry.controller.clone(),
};
return Some(route);
}
}
None
}
}
impl PciFixedMsiMapping {
pub fn contains(&self, address: PciAddress) -> bool {
self.start_address <= address && self.end_address > address
}
}
impl fmt::Debug for PciFixedMsiMapping {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PciFixedMsiMapping")
.field("start_address", &self.start_address)
.field("end_address", &self.end_address)
.finish()
}
}

View File

@ -1,38 +1,24 @@
//! PCI/PCIe bus interfaces
#![no_std]
#![feature(let_chains, decl_macro)]
#![allow(clippy::missing_transmute_annotations)]
extern crate alloc;
use core::fmt;
#[cfg(any(target_arch = "x86_64", rust_analyzer))]
#[cfg(target_arch = "x86_64")]
use acpi::mcfg::McfgEntry;
use alloc::{format, sync::Arc, vec::Vec};
use alloc::{collections::BTreeMap, sync::Arc, vec::Vec};
use bitflags::bitflags;
use device::{PciBusDevice, PciDeviceInfo, PciDeviceState};
use device_api::{device::DeviceInitContext, dma::DmaAllocator};
use interrupt::{PciInterruptMap, PciMsiMap};
use libk::{
dma::DummyDmaAllocator,
fs::sysfs::{self, object::KObject},
};
use device::{PciBusDevice, PciDeviceInfo, PciDriver, PciInterrupt, PciInterruptRoute, PciMatch};
use device_api::Device;
use libk_mm::address::PhysicalAddress;
use libk_util::{sync::IrqSafeSpinlock, OneTimeInit};
use space::legacy;
use yggdrasil_abi::{error::Error, primitive_enum};
#[cfg(target_arch = "x86_64")]
use device_api::interrupt::MessageInterruptController;
use yggdrasil_abi::error::Error;
pub mod capability;
pub mod device;
pub mod driver;
pub mod interrupt;
pub mod macros;
mod nodes;
mod space;
pub use space::{
@ -43,7 +29,6 @@ pub use space::{
bitflags! {
/// Command register of the PCI configuration space
#[derive(PartialEq, Clone, Copy)]
pub struct PciCommandRegister: u16 {
/// If set, I/O access to the device is enabled
const ENABLE_IO = 1 << 0;
@ -88,14 +73,19 @@ pub enum PciBaseAddress {
Io(u16),
}
primitive_enum! {
pub enum PciCapabilityId: u8 {
PowerManagement = 0x01,
Msi = 0x05,
VendorSpecific = 0x09,
PciExpress = 0x10,
MsiX = 0x11,
}
/// Unique ID assigned to PCI capability structures
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
#[non_exhaustive]
#[repr(u8)]
pub enum PciCapabilityId {
/// MSI (32-bit or 64-bit)
Msi = 0x05,
/// Vendor-specific capability
VendorSpecific = 0x09,
/// MSI-X
MsiX = 0x11,
/// Unknown capability missing from this list
Unknown,
}
/// Interface used for querying PCI capabilities
@ -132,49 +122,7 @@ struct BusAddressAllocator {
offset_32: u32,
}
#[derive(Debug)]
pub struct PciSegmentInfo {
pub segment_number: u8,
pub bus_number_start: u8,
pub bus_number_end: u8,
pub ecam_phys_base: Option<PhysicalAddress>,
pub irq_translation_map: PciInterruptMap,
pub msi_translation_map: PciMsiMap,
}
/// Represents a single PCIe bus segment
pub struct PciBusSegment {
allocator: Option<BusAddressAllocator>,
info: Arc<PciSegmentInfo>,
devices: Vec<Arc<KObject<IrqSafeSpinlock<PciBusDevice>>>>,
}
#[derive(Debug)]
pub enum PciRangeType {
Configuration,
Io,
Memory32,
Memory64,
}
pub struct PciAddressRange {
pub ty: PciRangeType,
pub bus_number: u8,
pub pci_base: u64,
pub host_base: PhysicalAddress,
pub size: usize,
}
/// Manager struct to store and control all PCI devices in the system
pub struct PciBusManager {
segments: Vec<PciBusSegment>,
}
#[cfg_attr(
any(target_arch = "x86_64", target_arch = "x86", target_arch = "riscv64"),
allow(dead_code)
)]
#[cfg_attr(any(target_arch = "x86_64", target_arch = "x86"), allow(dead_code))]
impl BusAddressAllocator {
pub fn from_ranges(ranges: &[PciAddressRange]) -> Self {
let mut range_32 = None;
@ -242,6 +190,44 @@ impl BusAddressAllocator {
}
}
#[derive(Debug)]
pub struct PciSegmentInfo {
pub segment_number: u8,
pub bus_number_start: u8,
pub bus_number_end: u8,
pub ecam_phys_base: Option<PhysicalAddress>,
pub irq_translation_map: BTreeMap<PciInterrupt, PciInterruptRoute>,
pub has_msi: bool,
}
/// Represents a single PCIe bus segment
pub struct PciBusSegment {
allocator: Option<BusAddressAllocator>,
info: Arc<PciSegmentInfo>,
devices: Vec<PciBusDevice>,
}
pub enum PciRangeType {
Configuration,
Io,
Memory32,
Memory64,
}
pub struct PciAddressRange {
pub ty: PciRangeType,
pub bus_number: u8,
pub pci_base: u64,
pub host_base: PhysicalAddress,
pub size: usize,
}
/// Manager struct to store and control all PCI devices in the system
pub struct PciBusManager {
segments: Vec<PciBusSegment>,
}
impl PciBaseAddress {
pub fn as_memory(self) -> Option<PhysicalAddress> {
match self {
@ -250,14 +236,6 @@ impl PciBaseAddress {
_ => None,
}
}
pub fn is_zero(&self) -> bool {
match *self {
Self::Memory32(base) => base == 0,
Self::Memory64(base) => base == 0,
Self::Io(base) => base == 0,
}
}
}
impl PciBusSegment {
@ -322,9 +300,7 @@ impl PciBusSegment {
for i in 0..6 {
if (1 << i) & bar_mask != 0 {
let Some(orig_value) = config.bar(i) else {
continue;
};
let orig_value = config.bar(i).unwrap();
let size = unsafe { config.bar_size(i) };
if size != 0 {
@ -368,40 +344,13 @@ impl PciBusSegment {
}
}
let vendor_id = config.vendor_id();
let device_id = config.device_id();
let class = config.class_code();
let subclass = config.subclass();
let prog_if = config.prog_if();
let info = PciDeviceInfo {
address,
vendor_id,
device_id,
class,
subclass,
prog_if,
segment: self.info.clone(),
config_space: config,
interrupt_config: Arc::new(OneTimeInit::new()),
};
let object = nodes::make_sysfs_object(PciBusDevice {
info,
driver: None,
device: None,
state: PciDeviceState::None,
});
let pci_object = PCI_SYSFS_NODE.or_init_with(|| {
let bus_object = sysfs::bus().unwrap();
let pci_object = KObject::new(());
bus_object.add_object("pci", pci_object.clone()).ok();
pci_object
});
let name = format!("{address}");
pci_object.add_object(name, object.clone()).ok();
self.devices.push(object);
self.devices.push(PciBusDevice { info, driver: None });
Ok(())
}
@ -427,12 +376,6 @@ impl PciBusSegment {
}
}
impl PciSegmentInfo {
pub fn has_msi(&self) -> bool {
!matches!(self.msi_translation_map, PciMsiMap::Legacy)
}
}
impl PciBusManager {
const fn new() -> Self {
Self {
@ -442,16 +385,11 @@ impl PciBusManager {
/// Walks the bus device list and calls init/init_irq functions on any devices with associated
/// drivers
pub fn probe_bus_devices() -> Result<(), Error> {
pub fn setup_bus_devices() -> Result<(), Error> {
log::info!("Setting up bus devices");
Self::walk_bus_devices(|device| {
probe_bus_device(device, false)?;
Ok(true)
})
}
pub fn setup_bus_devices(rescan: bool) -> Result<(), Error> {
Self::walk_bus_devices(|device| {
setup_bus_device(device, rescan)?;
log::info!("Set up {}", device.info.address);
setup_bus_device(device)?;
Ok(true)
})
}
@ -465,8 +403,7 @@ impl PciBusManager {
for segment in this.segments.iter_mut() {
for device in segment.devices.iter_mut() {
let mut device = device.lock();
if !f(&mut *device)? {
if !f(device)? {
return Ok(());
}
}
@ -484,8 +421,8 @@ impl PciBusManager {
bus_number_start: 0,
bus_number_end: 255,
ecam_phys_base: None,
irq_translation_map: PciInterruptMap::Legacy,
msi_translation_map: PciMsiMap::Legacy,
irq_translation_map: BTreeMap::new(),
has_msi: false,
}),
allocator: None,
devices: Vec::new(),
@ -500,12 +437,7 @@ impl PciBusManager {
/// Enumerates a bus segment provided by ACPI MCFG table entry
#[cfg(target_arch = "x86_64")]
pub fn add_segment_from_mcfg(
entry: &McfgEntry,
msi_controller: Arc<dyn MessageInterruptController>,
) -> Result<(), Error> {
let msi_translation_map = PciMsiMap::Identity(msi_controller);
pub fn add_segment_from_mcfg(entry: &McfgEntry) -> Result<(), Error> {
let mut bus_segment = PciBusSegment {
info: Arc::new(PciSegmentInfo {
segment_number: entry.pci_segment_group as u8,
@ -513,9 +445,9 @@ impl PciBusManager {
bus_number_end: entry.bus_number_end,
ecam_phys_base: Some(PhysicalAddress::from_u64(entry.base_address)),
// TODO get the segment's PCI root bridge AML name
irq_translation_map: PciInterruptMap::Acpi("\\_SB.PCI0._PRT".into()),
msi_translation_map,
// TODO obtain this from ACPI SSDT
irq_translation_map: BTreeMap::new(),
has_msi: true,
}),
// Firmware done this for us
allocator: None,
@ -530,13 +462,12 @@ impl PciBusManager {
Ok(())
}
#[cfg(any(target_arch = "aarch64", target_arch = "riscv64", rust_analyzer))]
#[cfg(target_arch = "aarch64")]
pub fn add_segment_from_device_tree(
cfg_base: PhysicalAddress,
bus_range: core::ops::Range<u8>,
ranges: Vec<PciAddressRange>,
irq_translation_map: PciInterruptMap,
msi_translation_map: PciMsiMap,
interrupt_map: BTreeMap<PciInterrupt, PciInterruptRoute>,
) -> Result<(), Error> {
let mut bus_segment = PciBusSegment {
info: Arc::new(PciSegmentInfo {
@ -545,8 +476,8 @@ impl PciBusManager {
bus_number_end: bus_range.end,
ecam_phys_base: Some(cfg_base),
irq_translation_map,
msi_translation_map,
irq_translation_map: interrupt_map,
has_msi: false,
}),
allocator: Some(BusAddressAllocator::from_ranges(&ranges)),
@ -619,66 +550,102 @@ impl PciConfigurationSpace for PciConfigSpace {
}
}
fn probe_bus_device(device: &mut PciBusDevice, _rescan: bool) -> Result<(), Error> {
// Already has a driver/device set up
if device.device.is_some() || device.state != PciDeviceState::None {
fn setup_bus_device(device: &mut PciBusDevice) -> Result<(), Error> {
if device.driver.is_some() {
return Ok(());
}
if let Some(driver) = driver::lookup_driver(&device.info) {
let dma: Arc<dyn DmaAllocator> = Arc::new(DummyDmaAllocator);
let config = &device.info.config_space;
match driver.probe(&device.info, &dma) {
Ok(instance) => {
log::info!("{} -> {}", device.info.address, driver.driver_name());
device.device.replace(instance);
device.driver.replace(driver);
device.state = PciDeviceState::Probed;
}
Err(error) => {
log::error!(
"{} ({}) probe error: {error:?}",
device.info.address,
driver.driver_name()
);
log::debug!(
"{}: {:04x}:{:04x}",
device.info.address,
config.vendor_id(),
config.device_id()
);
let class = config.class_code();
let subclass = config.subclass();
let prog_if = config.prog_if();
let drivers = PCI_DRIVERS.lock();
for driver in drivers.iter() {
if driver
.check
.check_device(&device.info, class, subclass, prog_if)
{
// TODO add the device to the bus
log::debug!(" -> {:?}", driver.name);
let instance = (driver.probe)(&device.info)?;
unsafe {
instance.init()?;
}
device.driver.replace(instance);
break;
} else {
log::debug!(" -> No driver");
}
}
Ok(())
}
fn setup_bus_device(device: &mut PciBusDevice, _rescan: bool) -> Result<(), Error> {
// No driver yet (TODO probe if rescan is asked)
let (Some(dev), Some(driver)) = (device.device.as_ref(), device.driver) else {
return Ok(());
};
// Already initialized/failed
if device.state != PciDeviceState::Probed {
return Ok(());
}
let dma: Arc<dyn DmaAllocator> = Arc::new(DummyDmaAllocator);
let cx = DeviceInitContext {
dma_allocator: dma.clone(),
};
match unsafe { dev.clone().init(cx) } {
Ok(()) => {
device.state = PciDeviceState::Initialized;
}
Err(error) => {
log::error!(
"{} ({}) setup error: {error:?}",
device.info.address,
driver.driver_name()
);
device.state = PciDeviceState::Failed;
impl PciMatch {
pub fn check_device(&self, info: &PciDeviceInfo, class: u8, subclass: u8, prog_if: u8) -> bool {
match self {
Self::Generic(f) => f(info),
&Self::Vendor(vendor_, device_) => {
info.config_space.vendor_id() == vendor_ && info.config_space.device_id() == device_
}
&Self::Class(class_, Some(subclass_), Some(prog_if_)) => {
class_ == class && subclass_ == subclass && prog_if_ == prog_if
}
&Self::Class(class_, Some(subclass_), _) => class_ == class && subclass_ == subclass,
&Self::Class(class_, _, _) => class_ == class,
}
}
Ok(())
}
pub fn register_class_driver(
name: &'static str,
class: u8,
subclass: Option<u8>,
prog_if: Option<u8>,
probe: fn(&PciDeviceInfo) -> Result<&'static dyn Device, Error>,
) {
PCI_DRIVERS.lock().push(PciDriver {
name,
check: PciMatch::Class(class, subclass, prog_if),
probe,
});
}
pub fn register_vendor_driver(
name: &'static str,
vendor_id: u16,
device_id: u16,
probe: fn(&PciDeviceInfo) -> Result<&'static dyn Device, Error>,
) {
PCI_DRIVERS.lock().push(PciDriver {
name,
check: PciMatch::Vendor(vendor_id, device_id),
probe,
});
}
pub fn register_generic_driver(
name: &'static str,
check: fn(&PciDeviceInfo) -> bool,
probe: fn(&PciDeviceInfo) -> Result<&'static dyn Device, Error>,
) {
PCI_DRIVERS.lock().push(PciDriver {
name,
check: PciMatch::Generic(check),
probe,
});
}
static PCI_DRIVERS: IrqSafeSpinlock<Vec<PciDriver>> = IrqSafeSpinlock::new(Vec::new());
static PCI_MANAGER: IrqSafeSpinlock<PciBusManager> = IrqSafeSpinlock::new(PciBusManager::new());
static PCI_SYSFS_NODE: OneTimeInit<Arc<KObject<()>>> = OneTimeInit::new();

View File

@ -1,35 +0,0 @@
pub macro pci_driver_match {
(class ($class:literal:$subclass:literal:$prog_if:literal)) => {
$crate::driver::PciMatch::Class($class, Some($subclass), Some($prog_if))
},
(class ($class:literal:$subclass:literal)) => {
$crate::driver::PciMatch::Class($class, Some($subclass), None)
},
(class $class:literal) => {
$crate::driver::PciMatch::Class($class, None, None)
},
(device ($vendor:literal:$device:literal)) => {
$crate::driver::PciMatch::Vendor($vendor, $device)
}
}
pub macro pci_driver(
matches: [$($kind:ident $match:tt),+ $(,)?],
driver: $driver:tt
) {
#[link_section = ".init_array"]
#[used]
static __REGISTER_FN: extern "C" fn() = __register_fn;
extern "C" fn __register_fn() {
struct Driver;
impl $crate::driver::PciDriver for Driver $driver
static DRIVER: Driver = Driver;
log::info!("register pci driver: {:?}", $crate::driver::PciDriver::driver_name(&Driver));
$(
let pmatch = $crate::macros::pci_driver_match!($kind $match);
$crate::driver::register_match(pmatch, &DRIVER);
)+
}
}

Some files were not shown because too many files have changed in this diff Show More