Compare commits

...

213 Commits

Author SHA1 Message Date
608912804f shell: rework shell parsing, if/while/for/&&/|| 2025-03-12 22:11:54 +02:00
4798240473 libc: fix missing bits 2025-03-09 21:45:32 +02:00
c5994dd390 libc: implement program_invocation_[short_]name 2025-03-09 14:42:38 +02:00
d963b3bac9 libc: borrow relibc's wctype.h 2025-03-09 14:16:42 +02:00
7a9a0ce59e libc: extend libc I/O coverage 2025-03-09 11:59:38 +02:00
69649f1cea ports: add gnu grep 2025-03-09 01:20:42 +02:00
56640a4fc2 ports: add gnu diffutils, patch, gzip 2025-03-09 01:02:15 +02:00
fc9018585b libc: extend GNU library/tool compatibility 2025-03-08 22:31:50 +02:00
87ae150dc1 ports: add lua-5.4.7 port 2025-03-08 02:39:09 +02:00
cac16c1df9 libc: add L_tmpnam to stdio.h 2025-03-08 02:20:20 +02:00
1c07b74e6d ports: add GNU make port 2025-03-08 01:58:43 +02:00
8ffc223a2b fs/libc: implement some libc functions, fix file times 2025-03-08 01:22:19 +02:00
fd0e2cc229 ports: fix rv64 port 2025-03-07 16:00:55 +02:00
cd6b6ac7f5 maint: update gitignore 2025-03-07 12:11:53 +02:00
60bd925122 dyn-loader: add config, better option parsing 2025-03-07 12:10:11 +02:00
9f2ad4f2c9 ports: nicer build system for C/C++ ports 2025-03-06 20:41:16 +02:00
f30cafb3bd user: add a basic NTP client 2025-03-05 17:21:33 +02:00
fb25e70714 user/proc: fix bug in env passing, more shell env 2025-03-05 15:14:21 +02:00
be3e72b80e shell: fix script discovery 2025-03-05 14:14:04 +02:00
c35a61fb7f vfs/user: implement chmod utility 2025-03-05 13:10:51 +02:00
e330db1e55 user: add md2txt 2025-03-05 11:30:04 +02:00
8deeb3ac9e user: reorganize userspace directories 2025-03-05 00:36:51 +02:00
3567b79e1d term: more attribute support 2025-03-03 17:53:19 +02:00
7485476caa shell/term: ^L to clear screen 2025-03-03 15:24:50 +02:00
91d05d352f colors: add window management events 2025-03-03 13:47:06 +02:00
8493573721 colors: add workspaces 2025-03-03 13:05:38 +02:00
c4e3128528 shell: accept unicode input 2025-03-03 00:17:13 +02:00
771c553571 term/sysutils: alternate mode, cursor hide/show, top-like utility 2025-03-02 17:27:26 +02:00
59b34fb269 sysutils: add thread display to ps 2025-03-02 14:04:29 +02:00
6f8fce3388 sysutils: basic ps utility 2025-03-02 13:00:38 +02:00
1b2b41406a rv64: fix incorrect relocations produced for rv64 entry code 2025-03-02 12:45:40 +02:00
31fa51e64c maint/proc: add /sys/proc + migrate to rustc 1.87.0-nightly 2025-03-02 02:17:27 +02:00
a45c54faf8 xtask: split run into two functions in cargo.rs 2025-03-02 00:03:23 +02:00
de98ae1082 sysutils: grep for stdin 2025-03-01 23:52:38 +02:00
aefa7a93fa x86: fix ps/2 E0-keys not working on some devices 2025-03-01 23:26:22 +02:00
3291df4eeb strace: display mutex() calls as well 2025-03-01 21:09:01 +02:00
c069982ed9 sysutils: ls colors 2025-03-01 18:40:24 +02:00
dfa74e5c87 term: simple utf8 decoder 2025-03-01 15:23:33 +02:00
770021df6a sysutils: basic grep-like utility 2025-03-01 01:37:31 +02:00
4a7aa8d831 doc: add MIT license and font attributions 2025-03-01 01:35:16 +02:00
c4c8b8acc6 term: switch to truetype fonts 2025-03-01 01:20:51 +02:00
8c4bdcbe64 pty: make pty buffer blocking 2025-02-28 12:40:14 +02:00
99644d335d pty: increase output buffer size 2025-02-28 12:09:06 +02:00
03242a0635 proc: implement process tracing 2025-02-27 18:49:20 +02:00
bbdcfd947a c: add rv64 cmake toolchain file 2025-02-26 19:17:55 +02:00
e3916868d2 dyn-loader: relocs and entry for rv64 2025-02-26 18:30:43 +02:00
3a5a693691 rv64: static libc for riscv64 2025-02-26 17:52:52 +02:00
72633eb339 maint: sync up other architectures with mmap(file) 2025-02-26 16:21:56 +02:00
43acdb9e13 libc/colors: non-blocking local socket + fixed string bugs in libc 2025-02-26 11:53:08 +02:00
7fdc57fd9f proc: implement basic mmap with files, real_program in arg 2025-02-25 17:41:14 +02:00
d910e8c1a0 user: replace third-party humansize 2025-02-24 14:53:09 +02:00
6abea7ef22 libc: implement posix_spawn()/truncate() 2025-02-24 11:05:23 +02:00
5d5379ac8a dyn-loader: implement basic dladdr() 2025-02-24 11:00:56 +02:00
8e45e48362 mm: bump physical memory limit 2025-02-24 11:00:10 +02:00
f1a6033f5b vfs: implement file truncation, bump open file limit 2025-02-24 10:59:48 +02:00
a1ccdf7e76 ext2: support reading from files with holes 2025-02-24 10:58:44 +02:00
dcc5d56750 user: rework userspace tracing 2025-02-19 22:06:29 +02:00
0105be8fea libc: partially sync ygglibc with socket changes 2025-02-18 23:51:07 +02:00
c2cf314dcd colors: basic bar program 2025-02-18 19:44:17 +02:00
f605b0a80c colors: rewrite colors, hosted testing support 2025-02-18 11:27:54 +02:00
82175f342e debug: avoid debugging deadlocks 2025-02-15 20:18:30 +02:00
f716c50988 virtio: add virtio-blk, rework virtio queues 2025-02-15 16:41:47 +02:00
99f5ad0987 jh7110: syscrg/aoncrg map memory in probe 2025-02-14 18:47:51 +02:00
41944890b6 jh7110: get second ethernet kinda working 2025-02-14 16:52:45 +02:00
70eb6cfaed jh7110: better syscrg clock structs 2025-02-14 12:10:12 +02:00
5c1c980ccd stmmac: link status reporting 2025-02-14 01:08:27 +02:00
99a099efad conf: fix "emulated" DNS nameserver in 10-resolver rc script 2025-02-14 00:26:42 +02:00
6253ab282e stmmac: move to softirq approach 2025-02-14 00:26:41 +02:00
57d46ed070 stmmac: print Tx error statuses 2025-02-14 00:26:16 +02:00
f2cfc9136a stmmac: implement a basic stmmac driver 2025-02-14 00:26:14 +02:00
24f1f41217 tools: write riscv image size to header in gentables 2025-02-13 22:53:49 +02:00
5d406feb07 rv64/jh7110: implement syscrg stub + uart clock/reset 2025-02-13 22:53:47 +02:00
975df985ac x86: fix hpet on ThinkPad T430 by enabling FSB delivery 2025-02-13 22:18:33 +02:00
a0cdc39f30 shell: set created pipeline pgid 2025-02-13 17:01:36 +02:00
de16799908 term: add scrollback 2025-02-13 13:10:06 +02:00
250d70a958 ps2: add more keys 2025-02-13 11:41:15 +02:00
b836cf7fc7 igbe: cleanup igbe code 2025-02-11 18:36:41 +02:00
90edc4c8ed rtl81xx: fix rtl8168h (rtl8168gu still broken) 2025-02-11 17:12:29 +02:00
2867597c8e apic/net: better MSI allocation, better PHY setup 2025-02-10 18:07:43 +02:00
3f62374431 net: raw packet tx capture 2025-02-10 15:14:14 +02:00
b8078561bf net: report link state to userspace 2025-02-10 11:42:09 +02:00
8db05f304e netutils: fix missing parameter list in DHCPDISCOVER 2025-02-10 09:52:28 +02:00
0a46e6f57c WIP: implement igbe driver 2025-02-10 09:15:25 +02:00
f1becafaaf xhci: some quirks for Intel chipsets 2025-02-09 19:04:27 +02:00
3e5f3fc6cd x86: don't panic when RTC returns invalid values 2025-02-09 18:21:16 +02:00
ca7564a70b fat32: fix zero-sized root directory 2025-02-09 17:04:49 +02:00
40574c60f0 nvme: dma cache flush, better performance (played with timer) 2025-02-09 16:52:36 +02:00
a5e479007f nvme: prettify register operation 2025-02-09 15:31:00 +02:00
5edb26a757 debug: better kernel ring logger 2025-02-09 14:30:39 +02:00
a08fe6ab1b fat32: implement FAT32 (read) driver 2025-02-07 23:18:03 +02:00
80e5e72bb7 scsi: support multiple units per single enclosure 2025-02-07 11:34:57 +02:00
7358852f67 block: make block subsystem use DmaBuffer/DmaSlice 2025-02-07 01:34:36 +02:00
e812453a97 dev: use DmaBuffer instead of PageBox where needed 2025-02-06 21:05:53 +02:00
8cbde8389f dev: pass DeviceInitContext into Device::init() 2025-02-06 12:24:03 +02:00
7348232aa9 pci/dt: rework pci interrupts, add gicv2m and dt msi controllers 2025-02-05 21:45:48 +02:00
01dbac2132 dt: fix pci interrupt-map parsing 2025-02-05 12:35:38 +02:00
d83b82ef45 pci: move to .init_array-based driver registration 2025-02-04 22:57:18 +02:00
87c7614fd8 xhci: rework xhci driver, now works on real hw 2025-02-04 22:55:04 +02:00
d687051c48 usb: implement usb-msc-bbb class driver, basic scsi commands 2025-02-04 22:51:33 +02:00
791a936b7f debug: change logging format 2025-02-04 22:51:27 +02:00
2f46a36255 net: implement rtl8168 driver 2025-01-31 23:48:14 +02:00
50a760985b net: implement rtl8139 driver 2025-01-31 23:46:01 +02:00
b567995466 pci: implement acpi irq route lookup 2025-01-31 23:46:00 +02:00
6e7a42c2cb acpi: move ACPI to its own driver 2025-01-31 23:43:56 +02:00
9e48530e62 pci: add lspci-like utility 2025-01-31 18:03:08 +02:00
abdf53368b xhci: add a warning when a device is detached during init 2025-01-27 16:59:48 +02:00
1bc99bc05f WIP: x86: allow non-64bit counters in HPET 2025-01-27 16:57:20 +02:00
5b1b69e467 rv64: remove commented out M-mode CSRs 2025-01-21 19:53:33 +02:00
e0bb98e92a doc: add notes on VisionFive 2 boot 2025-01-21 19:39:48 +02:00
822d4f891c rv64: implement address space dropping 2025-01-21 18:50:26 +02:00
ca82e25cf6 rv64: relax TLB flushes 2025-01-21 18:37:30 +02:00
6aa3e7f6be rv64: implement signal handling 2025-01-21 18:13:05 +02:00
8ff0f03989 xtask: increase default mem for riscv64 2025-01-21 17:58:16 +02:00
5d27bd6033 mm: PageBox::into_physical_raw/from_physical_raw 2025-01-21 17:05:38 +02:00
cfc11c402a rv64: fix smp init in asymmetric systems 2025-01-21 16:53:57 +02:00
909980f4eb rv64: add jh7110/starfive visionfive2 support 2025-01-21 16:34:03 +02:00
16f580e7af rv64: smp secondary hart startup 2025-01-20 17:04:17 +02:00
65b8c0ee67 rv64: fix timer clock scale 2025-01-20 14:15:45 +02:00
2f942e1721 maint: sync up other archs 2025-01-20 13:59:06 +02:00
86509e39c1 rv64: riscv plic driver, time accouting 2025-01-20 13:22:24 +02:00
8ba37c9762 rv64: boot into usermode 2025-01-20 00:54:26 +02:00
20fa34c945 rv64: platform init, task switching 2025-01-19 15:16:26 +02:00
f46f3ddc31 rv64: implement M-/S-mode trap stubs 2025-01-17 03:28:00 +02:00
86eb2d3252 rv64: boot into upper half 2025-01-17 02:25:49 +02:00
07458f33e4 abi: clean up unused stuff in ABI 2025-01-16 18:28:52 +02:00
009f545cb3 net: remove MessageChannel, replace with local sockets 2025-01-16 18:06:00 +02:00
0889e99049 shell: reimplement shell 2025-01-15 16:13:49 +02:00
6859e70651 shell: rework parsing (again) 2025-01-12 20:09:56 +02:00
8454fec183 libc: clean up socket option handling 2025-01-12 15:03:19 +02:00
945c490fa7 libc: sync with abi changes 2025-01-12 14:54:37 +02:00
9fa940f011 abi: change set/get_thread/process_option() 2025-01-12 14:42:52 +02:00
26d8b9b3bd abi: rework device_request/filesystem_control 2025-01-11 14:08:10 +02:00
2e3af98822 abi: file options, remove NonBlocking socket option 2025-01-10 15:40:58 +02:00
ed9bbea189 abi: update abi def syntax 2025-01-09 22:27:33 +02:00
6ad8024ca0 maint: fix broken tests 2025-01-09 20:34:13 +02:00
ab71cac6fa abi: rework socket option ABI 2025-01-09 19:35:58 +02:00
dcf3658bd1 libc: implement socket functionality 2025-01-08 19:50:33 +02:00
a4e441d236 net: move to berkeley-style sockets 2025-01-08 18:02:22 +02:00
f1256e262b abi: merge remove() and remove_directory() 2025-01-06 20:19:32 +02:00
f978a6b287 rt: move mem intrinsics 2025-01-06 18:41:25 +02:00
86f4a3f214 abi: update abi docs 2025-01-06 18:33:35 +02:00
34a5f4f0d8 abi: rework process options 2025-01-06 15:28:53 +02:00
e5b287e090 abi: add fsync()/truncate() 2025-01-06 10:50:32 +02:00
dfa6f7c296 rt: add stubs for current_exe()/home_directory()/make_temp_directory() 2025-01-05 14:45:09 +02:00
97e11c3bee abi: fix bug in checked_sub_duration(), add tests 2025-01-05 14:23:29 +02:00
dc76c5b7a8 abi: implement time functions 2025-01-05 12:25:22 +02:00
89f4965460 shell: better readline, rsh: allow builtin pubkey 2025-01-03 19:06:16 +02:00
f36436ee07 shell: better parser, kernel: better fd inheritance in spawn 2025-01-03 15:28:05 +02:00
3aec9ce556 vfs: improve _find() path handling 2025-01-03 11:37:43 +02:00
a126118589 ext2: properly handle max file size 2025-01-02 21:48:00 +02:00
d8f035dc69 ext2: don't try freeing fastlink "blocks" 2025-01-02 21:34:42 +02:00
f1f7b753d8 vfs: replace assertion with a check 2025-01-02 21:30:16 +02:00
595504b371 vfs: check mountpoint before instantiating a new filesystem 2025-01-02 21:29:02 +02:00
df0a48ca42 vfs: symlink creation 2025-01-02 20:05:22 +02:00
f13f756c20 ext2: dedup block/inode free code 2025-01-02 17:20:24 +02:00
b0aab12bf3 refactor: fix ext2 warnings 2025-01-02 16:21:31 +02:00
11f731bf0f ext2: free deleted inodes/blocks 2025-01-02 16:18:35 +02:00
17e2fba8b7 ext2: check for inode deletion before pulling from cache 2025-01-02 13:47:14 +02:00
68d0568af3 ext2: remove useless code from inode cache 2025-01-02 11:46:47 +02:00
d597197ca2 vfs: implement filename validation 2025-01-01 21:17:08 +02:00
77b6403c68 ext2: better directory handling 2024-12-31 15:17:06 +02:00
3aea206cad ext2: cache sb as a regular block, avoid incoherency 2024-12-31 12:10:30 +02:00
fd9ea77adb ext2: use the same access method for icache as for bcache 2024-12-31 11:23:33 +02:00
5f2c99f5c7 ext2: non-empty directory check when unlinking 2024-12-29 23:51:30 +02:00
69c672bfca ext2: metadata update 2024-12-29 19:43:08 +02:00
ce8600a782 ext2: add a simple unlink() 2024-12-29 17:53:01 +02:00
8c96a009ad fs: implement rename() 2024-12-29 15:34:59 +02:00
1d58b77241 tty: fix ring getting stuck with stale wakers registered 2024-12-29 13:04:51 +02:00
7844116411 ext2: fix panic when encountering invalid UTF-8 2024-12-29 12:51:34 +02:00
baff0cd5bd ext2: add all required/readonly features 2024-12-28 14:33:22 +02:00
37f8182eae x86_64: enable PDPE1GB support 2024-12-28 12:52:24 +02:00
46854c0f81 x86_64: fix L2 RAM mapping issue + incorrect CR3 write 2024-12-28 12:32:14 +02:00
76f1872764 vfs: add ctime/mtime/atime 2024-12-22 15:22:40 +02:00
4acb148d0e vfs: add is_terminal() 2024-12-21 00:28:28 +02:00
8dbbc07ff6 vfs: open() for pty halves 2024-12-21 00:04:31 +02:00
9b07dd7c6b vfs: implement hard links 2024-12-21 00:00:00 +02:00
ba00c97c66 vfs: force all symlinks to be path-based, chdir/getcwd 2024-12-20 23:00:43 +02:00
55e6dae194 abi: proper handling for empty slice arguments 2024-12-20 13:41:31 +02:00
dfba752ddf dev: rename dt-pci to be consistent with device tree 2024-12-18 19:57:13 +02:00
e309fdab81 dt: add /model and /compatible to sysfs 2024-12-17 19:45:44 +02:00
49b918e2ac cfg: allow passing cmdline options to the kernel 2024-12-17 19:12:39 +02:00
cb5814a5ce fs: rework sysfs 2024-12-17 16:42:21 +02:00
047746d134 dt: add clocks, bcm2835-aux driver 2024-12-16 22:06:40 +02:00
ac2394a2c3 maint: upgrade to rustc 1.85.0-nightly, use softfloat abi 2024-12-16 20:15:06 +02:00
bc1c8c41f5 libc: fix missing definitions in limits.h 2024-12-16 20:08:10 +02:00
b28b12b069 doc: document the device-tree crate 2024-12-16 19:18:35 +02:00
b090fb9fb6 xtask: configurable UEFI code path 2024-12-16 16:38:20 +02:00
300646e8a7 xtask: print environment command 2024-12-16 16:36:38 +02:00
c4b18c3379 xtask: LLVM clone, clone progress 2024-12-16 16:20:29 +02:00
afdc86ef3a xtask: remove hardcoded paths, better clean cmd 2024-12-16 14:48:44 +02:00
ac0432100d aarch64: reenable L2 device pages 2024-12-16 14:25:48 +02:00
699816d61d aarch64: reenable psci, add spin-table smp init 2024-12-16 14:24:46 +02:00
dfae656833 dt: refactor device-tree lib 2024-12-16 12:56:05 +02:00
a9340ea089 dt: rework device-tree, proper interrupt handling 2024-12-16 00:23:23 +02:00
ccb5a6a7eb aarch64: prettify cache init 2024-12-15 15:20:09 +02:00
a2adff85a7 x86: fix i686/x86_64 build 2024-12-14 13:10:46 +02:00
60164fedca aarch64: add raspberry pi 4b target 2024-12-14 02:34:22 +02:00
8635914ba1 doc: fix readme links 2024-12-10 20:59:03 +02:00
9b71caa103 doc: update README.md 2024-12-10 20:57:23 +02:00
f3249c500d refactor: get rid of clippy warnings 2024-12-10 20:42:47 +02:00
6a7891838e aarch64: fix timer warnings 2024-12-10 20:39:12 +02:00
718aad8a7a timer: rework timers, implement hpet for x86_64 2024-12-10 20:37:47 +02:00
433094837d x86: unify x86_64/i686 platform init 2024-12-10 16:55:27 +02:00
0b2822cea1 x86: make com-port usable as a serial console 2024-12-10 13:54:26 +02:00
1ad90ce181 x86: add puts() impl to com-port 2024-12-10 13:02:36 +02:00
b60cc4df52 sysutils: fix dd divide by zero 2024-12-10 12:51:52 +02:00
c0d34d2c56 i686: remove irrelevant comment 2024-12-10 12:18:15 +02:00
8db9c08224 x86: re-enable RTC 2024-12-10 12:17:52 +02:00
56fbcefa80 dev: rework device management (&'static -> Arc) 2024-12-10 11:52:26 +02:00
18bfeaf917 i686: add graphical mode 2024-12-09 11:12:33 +02:00
3968c3a4cd ipc: flexbuffers -> postcard 2024-12-09 10:21:15 +02:00
8c09e046e9 video: x86_64 switch-over from boot fb to virtio-gpu if present 2024-12-08 14:49:29 +02:00
6bd3d387bf display: basic virtio-gpu support, better display API 2024-12-06 18:03:18 +02:00
938 changed files with 157930 additions and 19366 deletions

View File

@ -1,2 +1,2 @@
[alias]
xtask = "run --manifest-path ./xtask/Cargo.toml --"
xtask = "run --manifest-path ./xtask/Cargo.toml --release --"

5
.gitignore vendored
View File

@ -1,3 +1,8 @@
/target
/toolchain
/xtask.toml
/qemu.toml
/etc/boot/yboot.cfg
/disk-*.img
/tmp-*.txt
/*.log

1284
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -6,7 +6,8 @@ exclude = [
"tool/abi-generator",
"toolchain",
"userspace/dynload-program",
"userspace/lib/ygglibc"
"userspace/lib/ygglibc",
"toolchain-c"
]
members = [
"xtask",
@ -15,7 +16,9 @@ members = [
"lib/abi",
"lib/libyalloc",
"lib/runtime",
"lib/qemu"
"lib/qemu",
"lib/abi-serde",
"lib/libutil"
]
[workspace.dependencies]
@ -37,6 +40,7 @@ ahash = { version = "0.8.11", default-features = false, features = ["no-rng"] }
# acpi
acpi = { git = "https://github.com/alnyan/acpi.git", package = "acpi", branch = "acpi-system" }
rsdp = { git = "https://github.com/alnyan/acpi.git", package = "rsdp", branch = "acpi-system" }
aml = { git = "https://github.com/alnyan/acpi.git", branch = "acpi-system" }
acpi-system = { git = "https://github.com/alnyan/acpi-system.git" }
@ -45,12 +49,14 @@ yboot-proto.path = "boot/yboot-proto"
# Local libs
abi-lib.path = "lib/abi-lib"
abi-serde.path = "lib/abi-serde"
yggdrasil-abi.path = "lib/abi"
abi-generator.path = "tool/abi-generator"
# Kernel parts
kernel-arch-interface.path = "kernel/arch/interface"
kernel-arch-aarch64.path = "kernel/arch/aarch64"
kernel-arch-riscv64.path = "kernel/arch/riscv64"
kernel-arch-x86_64.path = "kernel/arch/x86_64"
kernel-arch-i686.path = "kernel/arch/i686"
kernel-arch-x86.path = "kernel/arch/x86"
@ -86,3 +92,15 @@ features = ["no_std_stream"]
unexpected_cfgs = { level = "allow", check-cfg = ['cfg(rust_analyzer)'] }
[workspace.lints.clippy]
derivable_impls = { level = "allow" }
[profile.dev]
opt-level = 1
# split-debuginfo = "packed"
lto = "thin"
panic = "abort"
[profile.test]
split-debuginfo = "none"
[profile.dev.package."*"]
opt-level = 3

9
LICENSE.txt Normal file
View File

@ -0,0 +1,9 @@
MIT License
Copyright (c) 2025 Mark Poliakov <mark@alnyan.me>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the " Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice (including the next paragraph) shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -8,22 +8,38 @@ Rust Unix-like operating system.
Main features
-------------
* Architecture support: [aarch64](/kernel/src/arch/aarch64) and [x86_64](/kernel/src/arch/x86_64)
* Kernel/userspace preemptive multithreading
* Kernel-space multitasking with `async`/`await` runtime
* Symmetric Multiprocessing
* Unix-like virtual filesystem:
* Architecture support:
* [aarch64](kernel/src/arch/aarch64)
* [x86_64](kernel/src/arch/x86_64)
* [i686](kernel/src/arch/i686) (Pentium Pro and later)
* Core features:
* Kernel/userspace preemptive multithreading
* Kernel-space multitasking with `async`/`await` runtime
* Symmetric Multiprocessing
* Rust-style interfaces for most of the stuff like memory management, devices etc.
* Filesystem features:
* Unix-like virtual filesystem:
files, directories, block/char devices, symlinks, mounts
* [Kernel-user ABI](/lib/abi-def/yggdrasil.abi) generated from a rust-like description language
* In-memory read-write filesystem for tar-based initrd
* sysfs/devfs
* Binary formats: ELF + `#!/...` shebangs
* Rust-style interfaces for most of the stuff like memory management, devices etc.
* PCI Express devices
* NVMe drive support (read/write, currently x86_64 only, due to lack of MSI-X support on aarch64).
* AHCI SATA drive support (read/write)
* xHCI USB host controller
* USB HID keyboards
* In-memory read-write filesystem for tar-based initrd
* sysfs
* devfs
* ext2
* Userspace features:
* [Kernel-user ABI](lib/abi-def/yggdrasil.abi) generated from a rust-like description language
* Sanitized system calls better suited for use in Rust
* Binary formats: ELF + `#!/...` shebangs
* C compatibility through a [custom Rust libc](userspace/lib/ygglibc)
* Userspace multithreading
* Synchronization primitives through futex-like interface
* Unix-like signals and exceptions
* [Dynamic loader](userspace/dyn-loader) for linking with shared libraries
* Hardware features:
* PCI Express devices
* NVMe drive support (read/write, currently x86_64 only, due to lack of MSI-X support on aarch64/i686).
* AHCI SATA drive support (read/write)
* xHCI USB host controller
* VirtIO Network + GPU framebuffer support
* USB HID keyboards
aarch64-specific:
@ -32,39 +48,34 @@ aarch64-specific:
* ARM generic timer as system/monotonic timer
* GICv2 IRQ controller
x86_64-specific:
x86-specific:
* UEFI boot through [yboot](https://git.alnyan.me/yggdrasil/yboot)
(no plans for legacy boot)
* Boot options:
* x86_64: UEFI [yboot](https://git.alnyan.me/yggdrasil/yboot)
* i686: multiboot/grub
* I/O and Local APIC IRQ controllers
* PS/2 keyboard
* i8253-based timer (got some problems with HPET on
real hw, had to revert, lol)
* HPET for x86_64
* i8253-based timer for i686 or as a fallback timer
* COM ports
* ACPI, [work in progress](https://github.com/rust-osdev/acpi), mostly broken
on real hardware
on real hardware, so currently disabled
* ACPI shutdown
* PCI IRQ pin routing
* Events like power button, etc.
* Fancy framebuffer console
Userspace features:
* Sanitized system calls better suited for Rust
* Userspace threads
* Synchronization primitives through futex-like interface
* Unix-like signals and exceptions
Building the OS
---------------
**NOTE** This project uses `xtask`-style build system. To see help, use `cargo xtask --help`.
Prerequisites:
* Decent CPU and a sizable amount of RAM
* ~20G of free disk space
* ~40G of free disk space for a full build
* Patience
**NOTE** Full OS build requires you to build the `*-unknown-yggdrasil`
**NOTE** Full OS build requires you to build the `*-unknown-yggdrasil`
Rust toolchain, which may take quite a while, so be prepared.
Steps:
@ -94,21 +105,30 @@ Steps:
```
2. Run `cargo xtask toolchain` to fetch, build and link the toolchain
**NOTE** if toolchain fetch fails for some reason, try cloning directly
from `https://git.alnyan.me/yggdrasil/yggdrasil-rust.git` with appropriate
branch.
3. Run `cargo xtask` to build the OS.
Once the OS has been built, you can run it in QEMU by executing
Once the OS has been built, you can run it in QEMU by executing
`cargo xtask qemu`. For more `xtask` commands, see `cargo xtask --help`.
General plans (in no particular order)
--------------------------------------
* Better unification of architecture code
* `async` for VFS (?)
* Code cleanup, I've been doing quite a lazy job at that lately...
1. Get it running on more real hardware
2. Get a full LLVM build to work
3. Get rustc to work
4. Get self-hosted
5. Run doom (?)
In addition to eternal code cleanup, I've been doing quite a lazy job at that lately...
Navigation
----------
* Kernel: [`/kernel`](/kernel)
* Userspace: [`/userspace`](/userspace)
* ABI definitions: [`/lib/abi-def`](/lib/abi-def)
* Kernel: [`kernel`](kernel)
* Userspace: [`userspace`](userspace)
* ABI definitions: [`lib/abi-def`](lib/abi-def)

BIN
boot/riscv/fw_jump.bin Normal file

Binary file not shown.

View File

@ -4,6 +4,9 @@ use bytemuck::{Pod, Zeroable};
use crate::{AvailableRegion, IterableMemoryMap, LoadProtocolHeader};
pub const PIXEL_R8G8B8A8: u32 = 1;
pub const PIXEL_B8G8R8A8: u32 = 2;
#[derive(Clone, Copy, Pod, Zeroable)]
#[repr(C)]
pub struct LoadProtocol {
@ -12,6 +15,9 @@ pub struct LoadProtocol {
pub memory_map: MemoryMap,
pub cmdline: u64,
pub cmdline_len: u64,
pub rsdp_address: u64,
pub initrd_address: u64,
pub initrd_size: u64,
@ -46,6 +52,8 @@ pub struct FramebufferOption {
pub res_stride: u64,
pub res_address: u64,
pub res_size: u64,
pub res_format: u32,
pub _0: u32,
}
impl AvailableRegion for AvailableMemoryRegion {

39
boot/yboot/src/config.rs Normal file
View File

@ -0,0 +1,39 @@
use uefi::{
proto::media::file::{Directory, File, FileAttribute, FileMode},
CStr16, Result, Status,
};
#[derive(Debug)]
pub struct Config {
pub cmdline: [u8; 4096],
pub cmdline_len: usize,
}
impl Default for Config {
fn default() -> Self {
Self {
cmdline: [0; 4096],
cmdline_len: 0,
}
}
}
impl Config {
pub fn load(root: &mut Directory, path: &CStr16) -> Result<Self> {
let file = match root.open(path, FileMode::Read, FileAttribute::empty()) {
Ok(file) => file,
Err(error) => {
root.reset_entry_readout().ok();
log::warn!("Couldn't open {path:?}: {error:?}");
return Ok(Self::default());
}
};
root.reset_entry_readout().ok();
let mut this = Self::default();
let mut file = file.into_regular_file().ok_or(Status::INVALID_PARAMETER)?;
this.cmdline_len = file.read(&mut this.cmdline)?;
Ok(this)
}
}

View File

@ -2,6 +2,7 @@
#![no_std]
#![no_main]
pub mod config;
pub mod elf;
pub mod initrd;
pub mod mem;
@ -9,13 +10,16 @@ pub mod protocol_ext;
use core::{arch::asm, mem::size_of, ops::Deref};
use config::Config;
use elf::Object;
use log::{debug, error, info};
use uefi::{
prelude::*,
proto::{
console::gop::GraphicsOutput, device_path::DevicePath, loaded_image::LoadedImage,
media::fs::SimpleFileSystem,
console::gop::{GraphicsOutput, PixelFormat},
device_path::DevicePath,
loaded_image::LoadedImage,
media::{file::Directory, fs::SimpleFileSystem},
},
table::{
boot::{AllocateType, MemoryType, ScopedProtocol},
@ -24,7 +28,7 @@ use uefi::{
Error,
};
use yboot_proto::{
v1::{AvailableMemoryRegion, FramebufferOption},
v1::{self, AvailableMemoryRegion, FramebufferOption},
LoadProtocolV1, LOADER_MAGIC,
};
@ -47,11 +51,18 @@ fn setup_framebuffer(bs: &BootServices, fb: &mut FramebufferOption) -> Result<()
let mut result = gop.frame_buffer();
let format = match mode.info().pixel_format() {
PixelFormat::Bgr => v1::PIXEL_B8G8R8A8,
PixelFormat::Rgb => v1::PIXEL_R8G8B8A8,
_ => 0,
};
fb.res_width = fb.req_width;
fb.res_height = fb.req_height;
fb.res_address = result.as_mut_ptr() as _;
fb.res_stride = mode.info().stride() as u64 * 4;
fb.res_size = result.size() as _;
fb.res_format = format;
info!(
"Framebuffer: {}x{} @ {:#x}",
@ -85,27 +96,45 @@ fn boot_partition(
bs.open_protocol_exclusive::<SimpleFileSystem>(fs_handle)
}
fn open_root(image: Handle, bs: &BootServices) -> Result<Directory, Error> {
let mut boot_partition = boot_partition(image, bs)?;
boot_partition.open_volume()
}
fn load_kernel<'a>(
ih: Handle,
config: &Config,
root: &mut Directory,
st: &SystemTable<Boot>,
) -> Result<(u64, u64, &'a mut LoadProtocolV1), Error> {
let bs = st.boot_services();
let mut fs = boot_partition(ih, bs)?;
let mut root = fs.open_volume()?;
let mut kernel_obj = Object::open(&mut root, cstr16!("kernel.elf"))?;
let mut kernel_obj = Object::open(root, cstr16!("kernel.elf"))?;
let loaded_obj = kernel_obj.load(bs)?;
debug!("Loaded object: {:#x?}", loaded_obj);
// Load initrd
let (initrd_start, initrd_size) = initrd::load_somewhere(bs, &mut root, cstr16!("initrd.img"))?;
let (initrd_start, initrd_size) = initrd::load_somewhere(bs, root, cstr16!("initrd.img"))?;
debug!(
"Loaded initrd: {:#x?}",
initrd_start..initrd_start + initrd_size
);
// Load cmdline
let cmdline = if config.cmdline_len != 0 {
let address = bs.allocate_pages(AllocateType::AnyPages, MemoryType::LOADER_DATA, 1)?;
let dst =
unsafe { core::slice::from_raw_parts_mut(address as *mut u8, config.cmdline_len) };
dst.copy_from_slice(&config.cmdline[..config.cmdline_len]);
debug!("Cmdline at {:#x?}", address);
address
} else {
0
};
// Other versions are not existent yet
assert_eq!(loaded_obj.protocol_version, 1);
let proto_data = unsafe { &mut *(loaded_obj.protocol_struct_paddr as *mut LoadProtocolV1) };
@ -116,6 +145,9 @@ fn load_kernel<'a>(
})?;
info!("RSDP at {:#x}", rsdp);
proto_data.cmdline = cmdline;
proto_data.cmdline_len = config.cmdline_len as _;
proto_data.rsdp_address = rsdp;
proto_data.initrd_address = initrd_start;
proto_data.initrd_size = initrd_size;
@ -181,9 +213,35 @@ unsafe fn map_and_enter_kernel(
#[entry]
fn efi_main(image_handle: Handle, mut system_table: SystemTable<Boot>) -> Status {
uefi_services::init(&mut system_table).unwrap();
if uefi_services::init(&mut system_table).is_err() {
return Status::LOAD_ERROR;
}
let (entry, mmap_memory, proto_data) = load_kernel(image_handle, &system_table).unwrap();
let bs = system_table.boot_services();
let mut root = match open_root(image_handle, bs) {
Ok(root) => root,
Err(error) => {
error!("Could not open boot partition root: {error:?}");
return Status::LOAD_ERROR;
}
};
let config = match Config::load(&mut root, cstr16!("yboot.cfg")) {
Ok(config) => config,
Err(error) => {
error!("Malformed yboot.cfg: {error:?}");
return Status::LOAD_ERROR;
}
};
let (entry, mmap_memory, proto_data) = match load_kernel(&config, &mut root, &system_table) {
Ok(e) => e,
Err(error) => {
error!("Failed to load the kernel/initrd: {error:?}");
return Status::LOAD_ERROR;
}
};
unsafe {
map_and_enter_kernel(system_table, proto_data, mmap_memory, entry);

57
doc/raspi4b.txt Normal file
View File

@ -0,0 +1,57 @@
**NOTE** I haven't yet tested direct boot through Raspberry's
proprietary bootloader.
Booting Yggdrasil on Raspberry Pi 4B with u-boot:
1. Clone u-boot sources to some directory and checkout some
stable branch. I've used v2024.10.
2. Modify cmd/boot.c by replacing the do_go_exec function:
/* Allow ports to override the default behavior */
__attribute__((weak))
unsigned long do_go_exec(ulong (*entry)(int, char * const []), int argc,
char *const argv[])
{
void *entry_ptr = (void *) entry;
ulong fdt_addr_r = 0;
if (argc >= 2) {
fdt_addr_r = hextoul(argv[1], NULL);
}
void (*func)(ulong) = entry_ptr;
func(fdt_addr_r);
return 0;
}
3. make CROSS_COMPILE=aarch64-linux-gnu- ARCH=arm64 rpi_4_defconfig
4. make CROSS_COMPILE=aarch64-linux-gnu- ARCH=arm64 -j
5. Copy u-boot.bin into your Pi SD-card's boot partition.
**NOTE** I assume you have all the bootloader parts in the boot partition already.
If not, clone raspberry fw repo and copy the following files to the boot partition:
* bootcode.bin
* start4.elf
* all the .dtb files (a bcm2711-rpi-4-b.dtb should be enough though)
6. config.txt:
enable_uart=1
arm64_bit=1
kernel=u-boot.bin
7. Compile the OS with `cargo xtask --arch=aarch64 --board=raspi4b --release`
8. Copy the following files into some directory:
* target/aarch64-unknown-raspi4b/release/yggdrasil-kernel
* userspace/target/aarch64-unknown-yggdrasil/release/initrd.tar
9. cd into that directory and start a TFTP server of your choice. I used `uftpd`.
10. Connect an ethernet and serial to the Pi and run the following commands in u-boot shell:
tftpboot 0x04000000 <YOUR IP>:initrd.tar
tftpboot ${loadaddr} <YOUR IP>:yggdrasil-kernel
load mmc 0:1 ${fdt_addr_r} bcm2711-rpi-4-b.dtb
fdt addr ${fdt_addr_r}
fdt resize
fdt memory 0x0 0x3C000000
fdt chosen 0x04000000 <WHATEVER SIZE WAS PRINTED WHEN RUNNING THE FIRST COMMAND>
bootelf -p
go ${kernel_addr_r} ${fdt_addr_r}
11. Yggdrasil OS should start!

30
doc/visionfive2.txt Normal file
View File

@ -0,0 +1,30 @@
Booting Yggdrasil OS on Starfive VisionFive 2 RISC-V board:
* TODO: proper format for initrd image
* TODO: 0x70000000 can be replaced with a builtin var?
Prerequisites:
* OpenSBI + u-boot (you can use the regular debian installation from Starfive)
* yggdrasil-kernel.bin
* initrd.img
Steps:
1. Copy yggdrasil-kernel.bin and initrd.img into some directory and start a TFTP server there
2. Connect to VF2's serial port, ethernet and enter u-boot
3. Run the following commands:
# Get an IP address
dhcp
# [Optional] set some kernel cmdline params
setenv bootargs "debug.serial-level=info"
# Load initrd
tftpboot 0x70000000 <your-ip-address>:initrd.img
# Load kernel
tftpboot ${loadaddr} <your-ip-address>:yggdrasil-kernel.bin
# Load dtb
load mmc 1:3 ${fdt_addr_r} dtbs/...-starfive/starfive/${fdtfile}
fdt resize
# Enter the kernel
booti ${loadaddr} 0x70000000:<initrd-size> ${fdt_addr_r}

View File

@ -1,9 +1,9 @@
{
"is-builtin": false,
"arch": "aarch64",
"os": "none",
"abi": "softfloat",
"llvm-target": "aarch64-unknown-none",
"data-layout": "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32",
"data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32",
"max-atomic-width": 128,
"target-pointer-width": "64",
"features": "+v8a,+strict-align,-neon,-fp-armv8",

BIN
etc/dtb/bcm2711-rpi-4-b.dtb Normal file

Binary file not shown.

View File

@ -1,8 +1,8 @@
{
"is-builtin": false,
"arch": "x86",
"cpu": "pentium4",
"os": "none",
"abi": "softfloat",
"llvm-target": "i686-unknown-linux-gnu",
"data-layout": "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:128-f64:32:64-f80:32-n8:16:32-S128",
"max-atomic-width": 64,

View File

@ -21,12 +21,13 @@ SECTIONS {
. = ALIGN(4K);
.rodata : AT(. - KERNEL_VIRT_OFFSET) {
*(.eh_frame*)
. = ALIGN(16);
PROVIDE(__dt_probes_start = .);
KEEP(*(.dt_probes));
PROVIDE(__dt_probes_end = .);
*(.rodata*)
*(.eh_frame*)
. = ALIGN(16);
PROVIDE(__init_array_start = .);
KEEP(*(.init_array*))
PROVIDE(__init_array_end = .);
}
. = ALIGN(4K);
@ -48,6 +49,7 @@ SECTIONS {
}
. = ALIGN(4K);
PROVIDE(__bss_end_phys = . - KERNEL_VIRT_OFFSET);
PROVIDE(__bss_size = __bss_end_phys - __bss_start_phys);
PROVIDE(__kernel_end = .);
};

View File

@ -0,0 +1,55 @@
ENTRY(__aarch64_entry);
KERNEL_PHYS_BASE = 0x80000;
KERNEL_VIRT_OFFSET = 0xFFFFFF8000000000;
SECTIONS {
. = KERNEL_PHYS_BASE;
PROVIDE(__kernel_start = . + KERNEL_VIRT_OFFSET);
.text.entry : {
*(.text.entry)
}
. = ALIGN(16);
. = . + KERNEL_VIRT_OFFSET;
.text : AT(. - KERNEL_VIRT_OFFSET) {
KEEP(*(.text.vectors));
*(.text*)
}
. = ALIGN(4K);
.rodata : AT(. - KERNEL_VIRT_OFFSET) {
*(.rodata*)
*(.eh_frame*)
. = ALIGN(16);
PROVIDE(__init_array_start = .);
KEEP(*(.init_array*))
PROVIDE(__init_array_end = .);
}
. = ALIGN(4K);
.data.tables : AT (. - KERNEL_VIRT_OFFSET) {
KEEP(*(.data.tables))
}
. = ALIGN(4K);
.data : AT(. - KERNEL_VIRT_OFFSET) {
*(.data*)
*(.got*)
}
. = ALIGN(4K);
PROVIDE(__bss_start_phys = . - KERNEL_VIRT_OFFSET);
.bss : AT(. - KERNEL_VIRT_OFFSET) {
*(COMMON)
*(.bss*)
}
. = ALIGN(4K);
PROVIDE(__bss_end_phys = . - KERNEL_VIRT_OFFSET);
PROVIDE(__bss_size = __bss_end_phys - __bss_start_phys);
PROVIDE(__kernel_end = .);
};

View File

@ -0,0 +1,58 @@
ENTRY(__rv64_entry);
KERNEL_PHYS_BASE = 0x40200000;
KERNEL_VIRT_OFFSET = 0xFFFFFFF000000000;
SECTIONS {
. = KERNEL_PHYS_BASE;
PROVIDE(__kernel_start = . + KERNEL_VIRT_OFFSET);
.text.entry : {
*(.text.entry)
}
. = ALIGN(16);
. = . + KERNEL_VIRT_OFFSET;
.text : AT(. - KERNEL_VIRT_OFFSET) {
KEEP(*(.text.vectors));
*(.text*)
}
. = ALIGN(4K);
.rodata : AT(. - KERNEL_VIRT_OFFSET) {
*(.rodata*)
*(.eh_frame*)
}
. = ALIGN(4K);
.data.tables : AT (. - KERNEL_VIRT_OFFSET) {
KEEP(*(.data.tables))
}
. = ALIGN(4K);
.data : AT(. - KERNEL_VIRT_OFFSET) {
*(.data*)
. = ALIGN(8);
/* PROVIDE(__global_pointer = . + 0x800 - KERNEL_VIRT_OFFSET); */
. = ALIGN(16);
PROVIDE(__init_array_start = .);
KEEP(*(.init_array*))
PROVIDE(__init_array_end = .);
*(.got*)
}
. = ALIGN(4K);
PROVIDE(__bss_start_phys = . - KERNEL_VIRT_OFFSET);
.bss : AT(. - KERNEL_VIRT_OFFSET) {
*(COMMON)
*(.bss*)
}
. = ALIGN(4K);
PROVIDE(__bss_end_phys = . - KERNEL_VIRT_OFFSET);
PROVIDE(__bss_size = __bss_end_phys - __bss_start_phys);
PROVIDE(__kernel_end = .);
};

View File

@ -0,0 +1,58 @@
ENTRY(__rv64_entry);
KERNEL_PHYS_BASE = 0x80200000;
KERNEL_VIRT_OFFSET = 0xFFFFFFF000000000;
SECTIONS {
. = KERNEL_PHYS_BASE;
PROVIDE(__kernel_start = . + KERNEL_VIRT_OFFSET);
.text.entry : {
*(.text.entry)
}
. = ALIGN(16);
. = . + KERNEL_VIRT_OFFSET;
.text : AT(. - KERNEL_VIRT_OFFSET) {
KEEP(*(.text.vectors));
*(.text*)
}
. = ALIGN(4K);
.rodata : AT(. - KERNEL_VIRT_OFFSET) {
*(.rodata*)
*(.eh_frame*)
}
. = ALIGN(4K);
.data.tables : AT (. - KERNEL_VIRT_OFFSET) {
KEEP(*(.data.tables))
}
. = ALIGN(4K);
.data : AT(. - KERNEL_VIRT_OFFSET) {
*(.data*)
. = ALIGN(8);
/* PROVIDE(__global_pointer = . + 0x800 - KERNEL_VIRT_OFFSET); */
. = ALIGN(16);
PROVIDE(__init_array_start = .);
KEEP(*(.init_array*))
PROVIDE(__init_array_end = .);
*(.got*)
}
. = ALIGN(4K);
PROVIDE(__bss_start_phys = . - KERNEL_VIRT_OFFSET);
.bss : AT(. - KERNEL_VIRT_OFFSET) {
*(COMMON)
*(.bss*)
}
. = ALIGN(4K);
PROVIDE(__bss_end_phys = . - KERNEL_VIRT_OFFSET);
PROVIDE(__bss_size = __bss_end_phys - __bss_start_phys);
PROVIDE(__kernel_end = .);
};

View File

@ -0,0 +1,26 @@
{
"arch": "riscv64",
"os": "none",
"abi": "softfloat",
"cpu": "generic-rv64",
"llvm-target": "riscv64",
"data-layout": "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128",
"max-atomic-width": 64,
"target-pointer-width": "64",
"features": "+m,+a,+c",
"disable-redzone": true,
"executables": true,
"panic-strategy": "abort",
"dynamic-linking": true,
"relocation-model": "pic",
"code-model": "medium",
"eh-frame-header": false,
"crt-objects-fallback": "false",
"emit-debug-gdb-scripts": false,
"llvm-abiname": "lp64",
"linker": "rust-lld",
"linker-flavor": "ld.lld"
}

View File

@ -1,13 +1,14 @@
{
"is-builtin": false,
"arch": "x86_64",
"cpu": "x86-64",
"os": "none",
"abi": "softfloat",
"rustc-abi": "x86-softfloat",
"llvm-target": "x86_64-unknown-linux-gnu",
"data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128",
"max-atomic-width": 64,
"target-pointer-width": "64",
"features": "-avx,-sse,+soft-float",
"features": "-avx,-sse,-avx2,+soft-float",
"disable-redzone": true,
"executables": true,

View File

@ -7,12 +7,12 @@ authors = ["Mark Poliakov <mark@alnyan.me>"]
[dependencies]
abi-lib.workspace = true
abi-serde.workspace = true
yggdrasil-abi.workspace = true
kernel-arch-interface.workspace = true
libk.workspace = true
libk-util.workspace = true
libk-mm.workspace = true
libk-device.workspace = true
elf.workspace = true
chrono.workspace = true
@ -29,13 +29,17 @@ ygg_driver_usb = { path = "driver/bus/usb" }
ygg_driver_net_core = { path = "driver/net/core" }
ygg_driver_net_loopback = { path = "driver/net/loopback" }
ygg_driver_virtio_net = { path = "driver/virtio/net", features = ["pci"] }
ygg_driver_virtio_gpu = { path = "driver/virtio/gpu", features = ["pci"] }
ygg_driver_virtio_blk = { path = "driver/virtio/blk", features = ["pci"] }
ygg_driver_nvme = { path = "driver/block/nvme" }
ygg_driver_ahci = { path = "driver/block/ahci" }
ygg_driver_usb_xhci = { path = "driver/usb/xhci" }
ygg_driver_input = { path = "driver/input" }
ygg_driver_usb_xhci.path = "driver/usb/xhci"
ygg_driver_net_rtl81xx.path = "driver/net/rtl81xx"
kernel-fs = { path = "driver/fs/kernel-fs" }
memfs = { path = "driver/fs/memfs" }
ext2 = { path = "driver/fs/ext2" }
ygg_driver_fat32.path = "driver/fs/fat32"
log.workspace = true
bitflags.workspace = true
@ -45,6 +49,7 @@ bytemuck.workspace = true
futures-util.workspace = true
crossbeam-queue.workspace = true
async-trait.workspace = true
cfg-if.workspace = true
git-version = "0.3.9"
@ -53,16 +58,20 @@ aarch64-cpu.workspace = true
device-tree.workspace = true
kernel-arch-aarch64.workspace = true
[target.'cfg(target_arch = "riscv64")'.dependencies]
device-tree.workspace = true
kernel-arch-riscv64.workspace = true
ygg_driver_net_stmmac.path = "driver/net/stmmac"
[target.'cfg(target_arch = "x86_64")'.dependencies]
yboot-proto.workspace = true
kernel-arch-x86_64.workspace = true
kernel-arch-x86.workspace = true
ygg_driver_nvme = { path = "driver/block/nvme" }
ygg_driver_acpi.path = "driver/acpi"
ygg_driver_net_igbe.path = "driver/net/igbe"
acpi.workspace = true
aml.workspace = true
acpi-system.workspace = true
[target.'cfg(target_arch = "x86")'.dependencies]
kernel-arch-i686.workspace = true
@ -81,10 +90,21 @@ kernel-arch-x86_64.workspace = true
kernel-arch-i686.workspace = true
kernel-arch-x86.workspace = true
kernel-arch-aarch64.workspace = true
kernel-arch-riscv64.workspace = true
ygg_driver_acpi.path = "driver/acpi"
ygg_driver_net_stmmac.path = "driver/net/stmmac"
[features]
default = ["fb_console"]
fb_console = []
# TODO replace this with a better configuration mechanism
aarch64_board_virt = ["kernel-arch-aarch64/aarch64_board_virt"]
aarch64_board_raspi4b = ["kernel-arch-aarch64/aarch64_board_raspi4b"]
riscv64_board_virt = ["kernel-arch-riscv64/riscv64_board_virt"]
riscv64_board_jh7110 = ["kernel-arch-riscv64/riscv64_board_jh7110"]
[lints]
workspace = true

View File

@ -3,21 +3,22 @@ name = "kernel-arch"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[target.'cfg(all(target_os = "none", target_arch = "x86_64"))'.dependencies]
kernel-arch-x86_64 = { path = "x86_64" }
kernel-arch-x86_64.path = "x86_64"
[target.'cfg(all(target_os = "none", target_arch = "aarch64"))'.dependencies]
kernel-arch-aarch64 = { path = "aarch64" }
kernel-arch-aarch64.path = "aarch64"
[target.'cfg(all(target_os = "none", target_arch = "x86"))'.dependencies]
kernel-arch-i686 = { path = "i686" }
kernel-arch-i686.path = "i686"
[target.'cfg(all(target_os = "none", target_arch = "riscv64"))'.dependencies]
kernel-arch-riscv64.path = "riscv64"
[target.'cfg(not(target_os = "none"))'.dependencies]
kernel-arch-hosted = { path = "hosted" }
kernel-arch-hosted.path = "hosted"
[dependencies]
kernel-arch-interface = { path = "interface" }
kernel-arch-interface.path = "interface"
cfg-if.workspace = true

View File

@ -17,3 +17,11 @@ tock-registers.workspace = true
[build-dependencies]
cc = "1.0"
[features]
default = []
aarch64_board_virt = []
aarch64_board_raspi4b = []
[lints]
workspace = true

View File

@ -23,6 +23,8 @@
.endm
.macro LOAD_TASK_STATE
dsb ishst
// x19 == tpidr_el0, x20 = ttbr0_el1
ldp x19, x20, [sp, #16 * 6]
msr tpidr_el0, x19
@ -36,6 +38,12 @@
ldp x29, x30, [sp, #16 * 5]
add sp, sp, #{context_size}
isb sy
tlbi vmalle1is
ic iallu
dsb ish
isb sy
.endm
__aarch64_task_enter_kernel:
@ -87,7 +95,7 @@ __aarch64_task_enter_user:
mov lr, xzr
dmb ish
dsb ish
isb sy
eret

View File

@ -208,10 +208,12 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
stack.push(mdscr_el1);
stack.push(context.stack_pointer);
let ttbr0 = context.address_space | (context.asid << 48) | 1;
setup_common_context(
&mut stack,
__aarch64_task_enter_user as _,
context.address_space,
ttbr0,
context.thread_pointer as _,
);

View File

@ -1,16 +1,19 @@
#![no_std]
#![feature(naked_functions, trait_upcasting)]
#![feature(naked_functions, decl_macro)]
#![allow(clippy::new_without_default)]
extern crate alloc;
use core::sync::atomic::{AtomicUsize, Ordering};
use aarch64_cpu::registers::{DAIF, MPIDR_EL1, TPIDR_EL1};
use alloc::{boxed::Box, vec::Vec};
use device_api::interrupt::{LocalInterruptController, MessageInterruptController};
use aarch64_cpu::{
asm::barrier,
registers::{DAIF, MPIDR_EL1, TPIDR_EL1},
};
use alloc::{boxed::Box, sync::Arc, vec::Vec};
use device_api::interrupt::LocalInterruptController;
use kernel_arch_interface::{
cpu::{CpuImpl, IpiQueue},
cpu::{CpuData, CpuImpl, IpiQueue},
guard::IrqGuard,
task::Scheduler,
util::OneTimeInit,
@ -29,9 +32,11 @@ pub struct ArchitectureImpl;
pub trait GicInterface: LocalInterruptController {}
pub struct PerCpuData {
pub gic: OneTimeInit<&'static dyn GicInterface>,
pub gic: OneTimeInit<Arc<dyn GicInterface>>,
}
impl CpuData for PerCpuData {}
static IPI_QUEUES: OneTimeInit<Vec<IpiQueue<ArchitectureImpl>>> = OneTimeInit::new();
pub static CPU_COUNT: AtomicUsize = AtomicUsize::new(1);
@ -43,6 +48,7 @@ extern "C" fn idle_task(_: usize) -> ! {
}
impl ArchitectureImpl {
#[inline]
pub fn local_cpu_data() -> Option<&'static mut PerCpuData> {
unsafe { (Self::local_cpu() as *mut PerCpuData).as_mut() }
}
@ -63,6 +69,7 @@ impl Architecture for ArchitectureImpl {
DAIF.read(DAIF::I) != 0
}
#[inline(never)]
unsafe fn set_interrupt_mask(mask: bool) -> bool {
let old = Self::interrupt_mask();
if mask {
@ -120,13 +127,7 @@ impl Architecture for ArchitectureImpl {
}
fn local_interrupt_controller() -> Option<&'static dyn LocalInterruptController> {
let local = Self::local_cpu_data()?;
let intc = *local.gic.try_get()?;
Some(intc)
}
fn message_interrupt_controller() -> &'static dyn MessageInterruptController {
todo!()
None
}
fn cpu_available_features<S: Scheduler>(_cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> {
@ -136,4 +137,30 @@ impl Architecture for ArchitectureImpl {
fn cpu_enabled_features<S: Scheduler>(_cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> {
None
}
// Cache/barrier operation
fn load_barrier() {
barrier::dmb(barrier::ISHLD);
}
fn store_barrier() {
barrier::dmb(barrier::ISHST);
}
fn memory_barrier() {
barrier::dsb(barrier::SY);
}
fn flush_virtual_range(range: core::ops::Range<usize>) {
// TODO cache line assumed to be 64 bytes
const CLSIZE: usize = 64;
let start = range.start & !(CLSIZE - 1);
let end = (range.end + (CLSIZE - 1)) & !(CLSIZE - 1);
for line in (start..end).step_by(CLSIZE) {
unsafe {
core::arch::asm!("dc ivac, {address}", address = in(reg) line);
}
}
}
}

View File

@ -1,16 +1,16 @@
use core::{
alloc::Layout,
ops::{Deref, DerefMut},
ptr::addr_of,
sync::atomic::AtomicUsize,
sync::atomic::Ordering,
sync::atomic::{self, AtomicUsize, Ordering},
};
use aarch64_cpu::registers::{TTBR0_EL1, TTBR1_EL1};
use aarch64_cpu::{
asm::barrier,
registers::{MAIR_EL1, PAR_EL1, SCTLR_EL1, TTBR0_EL1, TTBR1_EL1},
};
use kernel_arch_interface::{
mem::{DeviceMemoryAttributes, KernelTableManager, RawDeviceMemoryMapping},
sync::split_spinlock,
KERNEL_VIRT_OFFSET,
split_spinlock, Architecture, KERNEL_VIRT_OFFSET,
};
use libk_mm_interface::{
address::PhysicalAddress,
@ -18,9 +18,11 @@ use libk_mm_interface::{
};
use memtables::aarch64::{FixedTables, KERNEL_L3_COUNT};
use static_assertions::const_assert_eq;
use tock_registers::interfaces::Writeable;
use tock_registers::interfaces::{ReadWriteable, Readable, Writeable};
use yggdrasil_abi::error::Error;
use crate::ArchitectureImpl;
use self::table::{PageAttributes, PageEntry, PageTable, L1, L2, L3};
pub mod process;
@ -31,7 +33,11 @@ pub struct KernelTableManagerImpl;
// TODO eliminate this requirement by using precomputed indices
const MAPPING_OFFSET: usize = KERNEL_VIRT_OFFSET;
#[cfg(any(feature = "aarch64_board_virt", rust_analyzer))]
const KERNEL_PHYS_BASE: usize = 0x40080000;
#[cfg(any(feature = "aarch64_board_raspi4b", rust_analyzer))]
const KERNEL_PHYS_BASE: usize = 0x80000;
// Precomputed mappings
const KERNEL_L1_INDEX: usize = page_index::<L1>(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
@ -41,6 +47,9 @@ const KERNEL_END_L2_INDEX: usize = KERNEL_START_L2_INDEX + KERNEL_L3_COUNT;
// Must not be zero, should be at 4MiB
const_assert_eq!(KERNEL_START_L2_INDEX, 0);
// From static mapping
#[cfg(any(feature = "aarch64_board_raspi4b", rust_analyzer))]
const_assert_eq!(KERNEL_L1_INDEX, 0);
#[cfg(any(feature = "aarch64_board_virt", rust_analyzer))]
const_assert_eq!(KERNEL_L1_INDEX, 1);
// Runtime mappings
@ -72,7 +81,7 @@ split_spinlock! {
use libk_mm_interface::KernelImageObject;
#[link_section = ".data.tables"]
static KERNEL_TABLES<lock: ArchitectureImpl>: KernelImageObject<FixedTables> =
static KERNEL_TABLES: KernelImageObject<FixedTables> =
unsafe { KernelImageObject::new(FixedTables::zeroed()) };
}
@ -175,7 +184,7 @@ fn ram_block_flags() -> PageAttributes {
// TODO UXN, PXN
PageAttributes::BLOCK
| PageAttributes::ACCESS
| PageAttributes::SH_INNER
| PageAttributes::SH_OUTER
| PageAttributes::PAGE_ATTR_NORMAL
| PageAttributes::PRESENT
}
@ -199,6 +208,7 @@ unsafe fn map_early_pages(physical: PhysicalAddress, count: usize) -> Result<usi
let page = physical.add(i * L3::SIZE);
// TODO NX, NC
EARLY_MAPPING_L3[i + l3i] = PageEntry::normal_page(page, PageAttributes::empty());
tlb_flush_vaae1(EARLY_MAPPING_OFFSET + (l3i + i) * L3::SIZE);
}
return Ok(EARLY_MAPPING_OFFSET + l3i * L3::SIZE);
@ -216,8 +226,6 @@ unsafe fn unmap_early_page(address: usize) {
assert!(EARLY_MAPPING_L3[l3i].is_present());
EARLY_MAPPING_L3[l3i] = PageEntry::INVALID;
// TODO invalidate tlb
}
/// # Safety
@ -225,13 +233,17 @@ unsafe fn unmap_early_page(address: usize) {
/// Only meant to be used by the architecture initialization functions.
pub unsafe fn map_ram_l1(index: usize) {
if index >= RAM_MAPPING_L1_COUNT {
todo!()
ArchitectureImpl::halt();
}
let mut tables = KERNEL_TABLES.lock();
assert_eq!(tables.l1.data[index + RAM_MAPPING_START_L1I], 0);
let table_index = index + RAM_MAPPING_START_L1I;
tables.l1.data[index + RAM_MAPPING_START_L1I] =
((index * L1::SIZE) as u64) | ram_block_flags().bits();
if tables.l1.data[table_index] != 0 {
ArchitectureImpl::halt();
}
tables.l1.data[table_index] = ((index * L1::SIZE) as u64) | ram_block_flags().bits();
tlb_flush_vaae1(RAM_MAPPING_OFFSET + index * L1::SIZE);
}
// Device mappings
@ -258,6 +270,7 @@ unsafe fn map_device_memory_l3(
// TODO NX, NC
DEVICE_MAPPING_L3S[l2i][l3i] = PageEntry::device_page(base.add(j * L3::SIZE));
tlb_flush_vaae1(DEVICE_MAPPING_OFFSET + l2i * L2::SIZE + l3i * L3::SIZE);
}
return Ok(DEVICE_MAPPING_OFFSET + i * L3::SIZE);
@ -266,6 +279,7 @@ unsafe fn map_device_memory_l3(
Err(Error::OutOfMemory)
}
#[allow(unused)]
unsafe fn map_device_memory_l2(
base: PhysicalAddress,
count: usize,
@ -280,14 +294,9 @@ unsafe fn map_device_memory_l2(
for j in 0..count {
DEVICE_MAPPING_L2[i + j] = PageEntry::<L2>::device_block(base.add(j * L2::SIZE));
tlb_flush_vaae1(DEVICE_MAPPING_OFFSET + (i + j) * L2::SIZE);
}
// log::debug!(
// "map l2s: base={:#x}, count={} -> {:#x}",
// base,
// count,
// DEVICE_MAPPING_OFFSET + i * L2::SIZE
// );
return Ok(DEVICE_MAPPING_OFFSET + i * L2::SIZE);
}
@ -313,6 +322,7 @@ pub(crate) unsafe fn map_device_memory(
let address = base_address + l2_offset;
Ok(RawDeviceMemoryMapping::from_raw_parts(
l2_aligned.into_u64(),
address,
base_address,
page_count,
@ -324,6 +334,7 @@ pub(crate) unsafe fn map_device_memory(
let address = base_address + l3_offset;
Ok(RawDeviceMemoryMapping::from_raw_parts(
l3_aligned.into_u64(),
address,
base_address,
page_count,
@ -351,10 +362,90 @@ pub(crate) unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping<KernelTabl
}
#[inline]
pub fn tlb_flush_vaae1(mut page: usize) {
page >>= 12;
pub fn tlb_flush_asid(asid: u8) {
barrier::dsb(barrier::ISHST);
let value = (asid as u64) << 48;
unsafe {
core::arch::asm!("tlbi vaae1, {page}", page = in(reg) page);
core::arch::asm!("tlbi aside1, {value}", value = in(reg) value);
}
barrier::dsb(barrier::ISH);
barrier::isb(barrier::SY);
}
#[inline]
pub fn tlb_flush_all() {
barrier::dsb(barrier::ISHST);
unsafe {
core::arch::asm!("tlbi vmalle1is");
}
barrier::dsb(barrier::ISH);
barrier::isb(barrier::SY);
}
#[inline]
pub fn tlb_flush_vaae1(page: usize) {
barrier::dsb(barrier::ISHST);
let argument = page >> 12;
unsafe {
core::arch::asm!("tlbi vaae1, {argument}", argument = in(reg) argument);
}
barrier::dsb(barrier::ISH);
barrier::isb(barrier::SY);
}
pub fn at_s1e0r(input: usize) -> Option<u64> {
barrier::dsb(barrier::ISHST);
unsafe {
core::arch::asm!("at s1e0r, {address}", address = in(reg) input);
}
barrier::dsb(barrier::ISH);
barrier::isb(barrier::SY);
if PAR_EL1.matches_all(PAR_EL1::F::TranslationSuccessfull) {
Some(PAR_EL1.read(PAR_EL1::PA))
} else {
None
}
}
pub fn at_s1e1r(input: usize) -> Option<u64> {
barrier::dsb(barrier::ISHST);
unsafe {
core::arch::asm!("at s1e1r, {address}", address = in(reg) input);
}
barrier::dsb(barrier::ISH);
barrier::isb(barrier::SY);
if PAR_EL1.matches_all(PAR_EL1::F::TranslationSuccessfull) {
Some(PAR_EL1.read(PAR_EL1::PA))
} else {
None
}
}
pub fn ic_iallu() {
atomic::compiler_fence(Ordering::SeqCst);
barrier::dsb(barrier::ISH);
barrier::isb(barrier::SY);
unsafe {
core::arch::asm!("ic iallu");
}
barrier::isb(barrier::SY);
}
pub fn dc_cvac(input: usize) {
barrier::dsb(barrier::ISHST);
unsafe {
core::arch::asm!("dc cvac, {address}", address = in(reg) input);
}
}
fn auto_address<T>(value: *const T) -> usize {
let addr = value.addr();
if addr < KERNEL_VIRT_OFFSET {
// Called from lower half
addr
} else {
// Called from higher-half
addr - KERNEL_VIRT_OFFSET
}
}
@ -364,7 +455,8 @@ pub fn tlb_flush_vaae1(mut page: usize) {
///
/// Unsafe, must only be called by BSP during its early init while still in "lower-half"
pub unsafe fn load_fixed_tables() {
let ttbr0 = KERNEL_TABLES.lock().l1.data.as_ptr().addr() as u64;
let ttbr0 = auto_address(&raw const KERNEL_TABLES) as u64;
TTBR0_EL1.set(ttbr0);
TTBR1_EL1.set(ttbr0);
}
@ -376,9 +468,9 @@ pub unsafe fn load_fixed_tables() {
/// Unsafe, must only be called by BSP during its early init, must already be in "higher-half"
pub unsafe fn init_fixed_tables() {
// TODO this could be built in compile-time too?
let mut tables = KERNEL_TABLES.lock();
let early_mapping_l3_phys = addr_of!(EARLY_MAPPING_L3) as usize - KERNEL_VIRT_OFFSET;
let device_mapping_l2_phys = addr_of!(DEVICE_MAPPING_L2) as usize - KERNEL_VIRT_OFFSET;
let mut tables = KERNEL_TABLES.grab();
let early_mapping_l3_phys = auto_address(&raw const EARLY_MAPPING_L3);
let device_mapping_l2_phys = auto_address(&raw const DEVICE_MAPPING_L2);
for i in 0..DEVICE_MAPPING_L3_COUNT {
let device_mapping_l3_phys = PhysicalAddress::from_usize(
@ -390,8 +482,70 @@ pub unsafe fn init_fixed_tables() {
assert_eq!(tables.l2.data[EARLY_MAPPING_L2I], 0);
tables.l2.data[EARLY_MAPPING_L2I] =
(early_mapping_l3_phys as u64) | kernel_table_flags().bits();
tlb_flush_vaae1(EARLY_MAPPING_OFFSET);
assert_eq!(tables.l1.data[DEVICE_MAPPING_L1I], 0);
tables.l1.data[DEVICE_MAPPING_L1I] =
(device_mapping_l2_phys as u64) | kernel_table_flags().bits();
tlb_flush_all();
}
pub fn setup_memory_attributes() {
// TODO: Figure out why WriteBack_NonTransient_ReadWriteAlloc doesn't work on Pi 4B
MAIR_EL1.write(
//// Attribute 0 -- normal memory
MAIR_EL1::Attr0_Normal_Inner::WriteBack_NonTransient +
MAIR_EL1::Attr0_Normal_Outer::WriteBack_NonTransient +
//// Attribute 1 -- normal non-cacheable memory
MAIR_EL1::Attr0_Normal_Inner::NonCacheable +
MAIR_EL1::Attr0_Normal_Outer::NonCacheable +
//// Attribute 2 -- device memory
MAIR_EL1::Attr1_Device::nonGathering_nonReordering_EarlyWriteAck,
);
}
/// Enables data cache.
///
/// # Safety
///
/// Manipulates low-level machine state, use with care.
pub unsafe fn enable_dcache() {
barrier::dsb(barrier::ISHST);
barrier::isb(barrier::SY);
SCTLR_EL1.modify(SCTLR_EL1::C::Cacheable);
barrier::dsb(barrier::ISH);
barrier::isb(barrier::SY);
}
/// Enables instruction cache.
///
/// # Safety
///
/// Manipulates low-level machine state, use with care.
pub unsafe fn enable_icache() {
barrier::isb(barrier::SY);
SCTLR_EL1.modify(SCTLR_EL1::I::Cacheable);
barrier::dsb(barrier::ISH);
barrier::isb(barrier::SY);
}
/// Disables instruction cache.
///
/// # Safety
///
/// Manipulates low-level machine state, use with care. Might break some instructions.
pub unsafe fn disable_icache() {
barrier::isb(barrier::SY);
ic_iallu();
SCTLR_EL1.modify(SCTLR_EL1::I::NonCacheable);
barrier::dsb(barrier::ISH);
barrier::isb(barrier::SY);
}

View File

@ -7,7 +7,7 @@ use core::{
use libk_mm_interface::{
address::{AsPhysicalAddress, PhysicalAddress},
pointer::PhysicalRefMut,
process::ProcessAddressSpaceManager,
process::{PageAttributeUpdate, ProcessAddressSpaceManager},
table::{
EntryLevel, EntryLevelDrop, EntryLevelExt, MapAttributes, NextPageTable, TableAllocator,
},
@ -17,8 +17,9 @@ use yggdrasil_abi::error::Error;
use crate::{mem::table::PageEntry, KernelTableManagerImpl};
use super::{
table::{PageTable, L1, L2, L3},
tlb_flush_vaae1,
dc_cvac, ic_iallu,
table::{PageAttributes, PageTable, L1, L2, L3},
tlb_flush_asid, tlb_flush_vaae1,
};
/// AArch64 implementation of a process address space table
@ -49,6 +50,8 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
l1[i] = PageEntry::INVALID;
}
tlb_flush_asid(asid);
Ok(Self {
l1,
asid,
@ -68,17 +71,29 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
) -> Result<(), Error> {
self.write_l3_entry(
address,
PageEntry::normal_page(physical, flags.into()),
PageEntry::normal_page(
physical,
PageAttributes::from(flags) | PageAttributes::NON_GLOBAL,
),
false,
)
}
unsafe fn unmap_page(&mut self, address: usize) -> Result<PhysicalAddress, Error> {
unsafe fn update_page_attributes(
&mut self,
address: usize,
update: &PageAttributeUpdate,
) -> Result<(), Error> {
self.update_l3_entry(address, |entry| entry.update(update))
}
unsafe fn unmap_page(&mut self, address: usize) -> Result<(PhysicalAddress, bool), Error> {
self.pop_l3_entry(address)
}
fn as_address_with_asid(&self) -> u64 {
unsafe { u64::from(self.l1.as_physical_address()) | ((self.asid as u64) << 48) }
fn as_address_with_asid(&self) -> (u64, u64) {
let physical = unsafe { u64::from(self.l1.as_physical_address()) };
(physical, self.asid as u64)
}
unsafe fn clear(&mut self) {
@ -107,12 +122,38 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
}
l3[l3i] = entry;
dc_cvac((&raw const l3[l3i]).addr());
tlb_flush_vaae1(virt);
Ok(())
}
fn pop_l3_entry(&mut self, virt: usize) -> Result<PhysicalAddress, Error> {
fn update_l3_entry<F: FnOnce(&mut PageEntry<L3>) -> Result<(), Error>>(
&mut self,
virt: usize,
mapper: F,
) -> Result<(), Error> {
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
let mut l2 = self.l1.get_mut(l1i).ok_or(Error::DoesNotExist)?;
let mut l3 = l2.get_mut(l2i).ok_or(Error::DoesNotExist)?;
let entry = &mut l3[l3i];
if !entry.is_present() {
return Err(Error::DoesNotExist);
}
mapper(entry)?;
ic_iallu();
dc_cvac((&raw const l3[l3i]).addr());
tlb_flush_vaae1(virt);
Ok(())
}
fn pop_l3_entry(&mut self, virt: usize) -> Result<(PhysicalAddress, bool), Error> {
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
@ -121,12 +162,16 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
let mut l2 = self.l1.get_mut(l1i).ok_or(Error::DoesNotExist)?;
let mut l3 = l2.get_mut(l2i).ok_or(Error::DoesNotExist)?;
let page = l3[l3i].as_page().ok_or(Error::DoesNotExist)?;
let entry = l3[l3i];
let page = entry.as_page().ok_or(Error::DoesNotExist)?;
let dirty = entry.is_dirty();
l3[l3i] = PageEntry::INVALID;
ic_iallu();
dc_cvac((&raw const l3[l3i]).addr());
tlb_flush_vaae1(virt);
Ok(page)
Ok((page, dirty))
}
fn read_l3_entry(&self, virt: usize) -> Option<(PhysicalAddress, MapAttributes)> {

View File

@ -1,12 +1,15 @@
use core::{
fmt,
marker::PhantomData,
ops::{Index, IndexMut, Range},
};
use bitflags::bitflags;
use kernel_arch_interface::KERNEL_VIRT_OFFSET;
use libk_mm_interface::{
address::{AsPhysicalAddress, PhysicalAddress},
pointer::{PhysicalRef, PhysicalRefMut},
process::PageAttributeUpdate,
table::{
EntryLevel, EntryLevelDrop, MapAttributes, NextPageTable, NonTerminalEntryLevel,
TableAllocator,
@ -16,6 +19,8 @@ use yggdrasil_abi::error::Error;
use crate::KernelTableManagerImpl;
use super::dc_cvac;
bitflags! {
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct PageAttributes: u64 {
@ -37,10 +42,12 @@ bitflags! {
const SH_INNER = 3 << 8;
const PAGE_ATTR_NORMAL = 0 << 2;
const PAGE_ATTR_DEVICE = 1 << 2;
const PAGE_ATTR_NORMAL_NC = 1 << 2;
const PAGE_ATTR_DEVICE = 2 << 2;
const NON_GLOBAL = 1 << 11;
const DIRTY = 1 << 51;
const PXN = 1 << 53;
const UXN = 1 << 54;
}
@ -62,6 +69,13 @@ pub struct L2;
#[derive(Clone, Copy)]
pub struct L3;
#[derive(Debug, Clone, Copy)]
pub enum EntryType {
Table(PhysicalAddress),
Page(PhysicalAddress),
Invalid,
}
impl NonTerminalEntryLevel for L1 {
type NextLevel = L2;
}
@ -101,11 +115,55 @@ impl<L: EntryLevel> PageTable<L> {
Ok(table)
}
/// Creates a reference to [PageTable] from a physical address.
///
/// # Safety
///
/// The function takes in a raw physical address.
pub unsafe fn from_physical(
physical: PhysicalAddress,
) -> Option<PhysicalRefMut<'static, Self, KernelTableManagerImpl>> {
if physical.into_usize() >= KERNEL_VIRT_OFFSET {
// Looks fishy
return None;
}
if !physical.is_aligned_for::<L3>() {
return None;
}
let inner = PhysicalRefMut::map(physical);
Some(inner)
}
}
impl<L: EntryLevel> PageEntry<L> {
const ATTR_MASK: u64 = 0xFFF | (0xFFFF << 48);
pub const INVALID: Self = Self(0, PhantomData);
pub fn update(&mut self, update: &PageAttributeUpdate) -> Result<(), Error> {
let mut attrs = PageAttributes::from_bits_retain(self.0);
if let Some(write) = update.user_write {
// Make writeable/non-writeable
if write {
attrs &= !PageAttributes::AP_ACCESS_MASK;
attrs |= PageAttributes::AP_BOTH_READWRITE;
} else {
todo!();
}
}
if let Some(dirty) = update.dirty {
if dirty {
attrs |= PageAttributes::DIRTY;
} else {
attrs &= !PageAttributes::DIRTY;
}
}
self.0 &= !Self::ATTR_MASK;
self.0 |= attrs.bits() & Self::ATTR_MASK;
Ok(())
}
pub const fn is_present(self) -> bool {
self.0 & PageAttributes::PRESENT.bits() != 0
}
@ -115,6 +173,12 @@ impl<L: EntryLevel> PageEntry<L> {
}
}
impl<L: NonTerminalEntryLevel> PageTable<L> {
pub fn walk(&self, index: usize) -> EntryType {
self[index].classify()
}
}
impl<L: NonTerminalEntryLevel + 'static> NextPageTable for PageTable<L> {
type NextLevel = PageTable<L::NextLevel>;
type TableRef = PhysicalRef<'static, PageTable<L::NextLevel>, KernelTableManagerImpl>;
@ -146,6 +210,7 @@ impl<L: NonTerminalEntryLevel + 'static> NextPageTable for PageTable<L> {
unsafe { table.as_physical_address() },
PageAttributes::empty(),
);
dc_cvac((&raw const self[index]).addr());
Ok(table)
}
}
@ -184,6 +249,7 @@ where
}
self[index] = PageEntry::INVALID;
dc_cvac((&raw const self[index]).addr());
}
}
}
@ -202,7 +268,7 @@ impl<L: NonTerminalEntryLevel> PageEntry<L> {
| (PageAttributes::BLOCK
| PageAttributes::PRESENT
| PageAttributes::ACCESS
| PageAttributes::SH_INNER
| PageAttributes::SH_OUTER
| PageAttributes::PAGE_ATTR_NORMAL
| attrs)
.bits(),
@ -231,11 +297,21 @@ impl<L: NonTerminalEntryLevel> PageEntry<L> {
if self.0 & PageAttributes::PRESENT.bits() != 0
&& self.0 & PageAttributes::BLOCK.bits() == 0
{
Some(PhysicalAddress::from_u64(self.0 & !0xFFF))
Some(PhysicalAddress::from_u64(self.0 & !Self::ATTR_MASK))
} else {
None
}
}
pub fn classify(self) -> EntryType {
if !self.is_present() {
EntryType::Invalid
} else if let Some(table) = self.as_table() {
EntryType::Table(table)
} else {
EntryType::Page(PhysicalAddress::from_u64(self.0 & !Self::ATTR_MASK))
}
}
}
impl PageEntry<L3> {
@ -260,18 +336,20 @@ impl PageEntry<L3> {
| PageAttributes::PRESENT
| PageAttributes::ACCESS
| PageAttributes::SH_OUTER
| PageAttributes::PAGE_ATTR_DEVICE
| PageAttributes::UXN
| PageAttributes::PXN)
| PageAttributes::PAGE_ATTR_DEVICE)
.bits(),
PhantomData,
)
}
pub fn is_dirty(&self) -> bool {
self.0 & PageAttributes::DIRTY.bits() != 0
}
pub fn as_page(&self) -> Option<PhysicalAddress> {
let mask = (PageAttributes::PRESENT | PageAttributes::PAGE).bits();
if self.0 & mask == mask {
Some(PhysicalAddress::from_u64(self.0 & !0xFFF))
Some(PhysicalAddress::from_u64(self.0 & !Self::ATTR_MASK))
} else {
None
}
@ -307,6 +385,10 @@ impl From<MapAttributes> for PageAttributes {
out |= PageAttributes::AP_KERNEL_READONLY;
}
if value.contains(MapAttributes::DIRTY) {
out |= PageAttributes::DIRTY;
}
if value.contains(MapAttributes::NON_GLOBAL) {
out |= PageAttributes::NON_GLOBAL;
}
@ -329,6 +411,10 @@ impl From<PageAttributes> for MapAttributes {
_ => unreachable!(),
};
if value.contains(PageAttributes::DIRTY) {
out |= MapAttributes::DIRTY;
}
if value.contains(PageAttributes::NON_GLOBAL) {
out |= MapAttributes::NON_GLOBAL;
}
@ -336,3 +422,13 @@ impl From<PageAttributes> for MapAttributes {
out
}
}
impl fmt::Display for EntryType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Table(address) => write!(f, "table @ {address:#x}"),
Self::Page(address) => write!(f, "page @ {address:#x}"),
Self::Invalid => f.write_str("<invalid>"),
}
}
}

View File

@ -7,3 +7,4 @@ edition = "2021"
kernel-arch-interface.workspace = true
yggdrasil-abi.workspace = true
libk-mm-interface.workspace = true
device-api.workspace = true

View File

@ -1,11 +1,13 @@
#![feature(never_type)]
#![feature(never_type, allocator_api, slice_ptr_get)]
use std::{
alloc::{Allocator, Global, Layout},
marker::PhantomData,
sync::atomic::{AtomicBool, Ordering},
};
use device_api::dma::{DmaAllocation, DmaAllocator};
use kernel_arch_interface::{
cpu::IpiQueue,
cpu::{CpuData, IpiQueue},
mem::{
DeviceMemoryAttributes, KernelTableManager, PhysicalMemoryAllocator, RawDeviceMemoryMapping,
},
@ -36,17 +38,21 @@ pub struct TaskContextImpl<K: KernelTableManager, PA: PhysicalMemoryAllocator>(
static DUMMY_INTERRUPT_MASK: AtomicBool = AtomicBool::new(true);
pub struct DummyCpuData;
impl CpuData for DummyCpuData {}
impl Architecture for ArchitectureImpl {
type PerCpuData = ();
type PerCpuData = DummyCpuData;
type CpuFeatures = ();
type BreakpointType = u8;
const BREAKPOINT_VALUE: Self::BreakpointType = 0x00;
fn local_cpu() -> *mut Self::PerCpuData {
fn local_cpu() -> *mut () {
unimplemented!()
}
unsafe fn set_local_cpu(_cpu: *mut Self::PerCpuData) {
unsafe fn set_local_cpu(_cpu: *mut ()) {
unimplemented!()
}
@ -101,6 +107,14 @@ impl Architecture for ArchitectureImpl {
fn ipi_queue(_cpu_id: u32) -> Option<&'static IpiQueue<Self>> {
None
}
fn load_barrier() {}
fn store_barrier() {}
fn memory_barrier() {}
fn flush_virtual_range(_range: std::ops::Range<usize>) {}
}
impl KernelTableManager for KernelTableManagerImpl {
@ -146,7 +160,7 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
unimplemented!()
}
unsafe fn unmap_page(&mut self, _address: usize) -> Result<PhysicalAddress, Error> {
unsafe fn unmap_page(&mut self, _address: usize) -> Result<(PhysicalAddress, bool), Error> {
unimplemented!()
}
@ -154,7 +168,7 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
unimplemented!()
}
fn as_address_with_asid(&self) -> u64 {
fn as_address_with_asid(&self) -> (u64, u64) {
unimplemented!()
}
}
@ -198,3 +212,19 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator> TaskContext<K, PA>
extern "Rust" fn __signal_process_group(_group_id: ProcessGroupId, _signal: Signal) {
unimplemented!()
}
pub struct HostedDmaAllocator;
impl DmaAllocator for HostedDmaAllocator {
fn allocate(&self, layout: Layout) -> Result<DmaAllocation, Error> {
let ptr = Global.allocate(layout.align_to(0x1000).unwrap()).unwrap();
let base = ptr.as_non_null_ptr();
let addr: usize = base.addr().into();
Ok(DmaAllocation {
host_virtual: base.cast(),
host_physical: addr as _,
page_count: layout.size().div_ceil(0x1000),
bus_address: addr as _,
})
}
}

View File

@ -8,7 +8,7 @@ use core::ptr::null_mut;
use alloc::vec::Vec;
use device_api::interrupt::{LocalInterruptController, MessageInterruptController};
use kernel_arch_interface::{
cpu::{CpuImpl, IpiQueue},
cpu::{CpuData, CpuImpl, IpiQueue},
task::Scheduler,
Architecture,
};
@ -29,6 +29,8 @@ pub struct PerCpuData {
pub enabled_features: CpuFeatures,
}
impl CpuData for PerCpuData {}
static mut CPU: *mut () = null_mut();
#[naked]
@ -106,11 +108,11 @@ impl Architecture for ArchitectureImpl {
1
}
fn message_interrupt_controller() -> &'static dyn MessageInterruptController {
unimplemented!()
fn local_interrupt_controller() -> Option<&'static dyn LocalInterruptController> {
None
}
fn local_interrupt_controller() -> Option<&'static dyn LocalInterruptController> {
fn message_interrupt_controller() -> Option<&'static dyn MessageInterruptController> {
None
}

View File

@ -1,8 +1,7 @@
use fixed::FixedTables;
use kernel_arch_interface::{
mem::{DeviceMemoryAttributes, KernelTableManager, RawDeviceMemoryMapping},
sync::split_spinlock,
KERNEL_VIRT_OFFSET,
split_spinlock, KERNEL_VIRT_OFFSET,
};
use libk_mm_interface::{
address::{AsPhysicalAddress, PhysicalAddress},
@ -26,7 +25,7 @@ split_spinlock! {
use crate::ArchitectureImpl;
#[link_section = ".data.tables"]
static KERNEL_TABLES<lock: ArchitectureImpl>: KernelImageObject<FixedTables> = unsafe {
static KERNEL_TABLES: KernelImageObject<FixedTables> = unsafe {
KernelImageObject::new(FixedTables::zeroed())
};
}
@ -38,8 +37,12 @@ impl KernelTableManager for KernelTableManagerImpl {
_attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping<Self>, Error> {
// TODO page align up
let end = base + count as u64;
assert_eq!(base & 0xFFF, 0);
let offset = (base & 0xFFF) as usize;
let base = base & !0xFFF;
let end = (base + count as u64 + 0xFFF) & !0xFFF;
// assert_eq!(base & 0xFFF, 0);
if end < fixed::MAX_FIXED_PHYSICAL.into_u64() {
// 1:1
let address = Self::virtualize(base);
@ -53,13 +56,16 @@ impl KernelTableManager for KernelTableManagerImpl {
let virt = KERNEL_TABLES.lock().map_dynamic_memory(base, page_count)?;
Ok(RawDeviceMemoryMapping::from_raw_parts(
virt, virt, page_count, 0,
virt + offset,
virt,
page_count,
0,
))
}
}
unsafe fn unmap_device_pages(_mapping: &RawDeviceMemoryMapping<Self>) {
todo!()
// todo!()
}
fn virtualize(phys: u64) -> usize {

View File

@ -68,8 +68,8 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
self.pop_l3_entry(address)
}
fn as_address_with_asid(&self) -> u64 {
unsafe { self.l0.as_physical_address().into_u64() }
fn as_address_with_asid(&self) -> (u64, u64) {
(unsafe { self.l0.as_physical_address().into_u64() }, 0)
}
}

View File

@ -30,6 +30,18 @@ pub struct IpiQueue<A: Architecture> {
data: IrqSafeSpinlock<A, Option<IpiMessage>>,
}
pub trait CpuData {
fn is_bootstrap(&self, id: u32) -> bool {
// On most architectures
id == 0
}
fn queue_index(&self, id: u32) -> usize {
// On most architectures
id as usize
}
}
pub trait CpuFeatureSet {
fn iter(&self) -> impl Iterator<Item = &'static str>;
}
@ -50,6 +62,14 @@ impl<A: Architecture, S: Scheduler + 'static> CpuImpl<A, S> {
unsafe { A::init_ipi_queues(queues) }
}
pub fn is_bootstrap(&self) -> bool {
self.inner.is_bootstrap(self.id)
}
pub fn queue_index(&self) -> usize {
self.inner.queue_index(self.id)
}
pub fn set_current_thread_id(&mut self, id: Option<S::ThreadId>) {
self.current_thread_id = id;
}

View File

@ -2,13 +2,18 @@
#![feature(step_trait, const_trait_impl, never_type, decl_macro)]
#![allow(clippy::new_without_default)]
use core::ops::Range;
use alloc::vec::Vec;
use cpu::{CpuFeatureSet, CpuImpl, IpiQueue};
use device_api::interrupt::{LocalInterruptController, MessageInterruptController};
use cpu::{CpuData, CpuFeatureSet, CpuImpl, IpiQueue};
use device_api::interrupt::LocalInterruptController;
use task::Scheduler;
extern crate alloc;
#[macro_use]
pub mod macros;
pub mod cpu;
pub mod guard;
pub mod mem;
@ -16,14 +21,15 @@ pub mod sync;
pub mod task;
pub mod util;
#[cfg(any(target_pointer_width = "32", rust_analyzer))]
#[cfg(any(target_arch = "x86", rust_analyzer))]
pub const KERNEL_VIRT_OFFSET: usize = 0xC0000000;
#[cfg(any(target_pointer_width = "64", rust_analyzer))]
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64", rust_analyzer))]
pub const KERNEL_VIRT_OFFSET: usize = 0xFFFFFF8000000000;
#[cfg(any(target_arch = "riscv64", rust_analyzer))]
pub const KERNEL_VIRT_OFFSET: usize = 0xFFFFFFF000000000;
pub trait Architecture: Sized + 'static {
type PerCpuData;
type PerCpuData: CpuData;
type CpuFeatures: CpuFeatureSet;
type BreakpointType;
@ -65,11 +71,7 @@ pub trait Architecture: Sized + 'static {
// Architectural devices
fn local_interrupt_controller() -> Option<&'static dyn LocalInterruptController> {
unimplemented!()
}
fn message_interrupt_controller() -> &'static dyn MessageInterruptController {
unimplemented!()
None
}
#[allow(unused)]
@ -80,4 +82,16 @@ pub trait Architecture: Sized + 'static {
fn cpu_enabled_features<S: Scheduler>(cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> {
None
}
// Cache/barrier operation
fn load_barrier();
fn store_barrier();
fn memory_barrier() {
Self::store_barrier();
Self::load_barrier();
}
/// Flushes/invalidates a range of virtual memory from the CPU's data cache.
fn flush_virtual_range(range: Range<usize>);
}

View File

@ -0,0 +1,92 @@
/// Helper macro to implement "split" locks. This may be needed when a very specific storage
/// layout for the locked type is required.
// pub macro split_spinlock(
// ) {
#[macro_export]
macro_rules! split_spinlock {
(
$(use $use:path;)*
$(#[$meta:meta])*
static $name:ident: $ty:ty = $init:expr;
) => {
pub use $name::$name;
#[allow(non_snake_case)]
pub mod $name {
$(use $use;)*
use core::cell::UnsafeCell;
use core::marker::PhantomData;
use core::sync::atomic::{AtomicU32, Ordering};
#[repr(transparent)]
pub struct __Wrapper {
inner: UnsafeCell<$ty>
}
$(#[$meta])*
pub static $name: __Wrapper = __Wrapper {
inner: UnsafeCell::new($init)
};
static __LOCK: AtomicU32 = AtomicU32::new(0);
pub struct __Guard($crate::guard::IrqGuard<ArchitectureImpl>);
pub struct __UnsafeGuard($crate::guard::IrqGuard<ArchitectureImpl>);
impl __Wrapper {
#[inline(never)]
pub fn lock(&self) -> __Guard {
let irq = $crate::guard::IrqGuard::acquire();
while __LOCK.compare_exchange(0, 1, Ordering::Acquire, Ordering::Relaxed).is_err() {
core::hint::spin_loop();
}
__Guard(irq)
}
#[inline(never)]
pub unsafe fn grab(&self) -> __UnsafeGuard {
let irq = $crate::guard::IrqGuard::acquire();
__UnsafeGuard(irq)
}
}
unsafe impl Sync for __Wrapper {}
impl core::ops::Deref for __Guard {
type Target = $ty;
fn deref(&self) -> &Self::Target {
unsafe { &*$name.inner.get() }
}
}
impl core::ops::DerefMut for __Guard {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *$name.inner.get() }
}
}
impl core::ops::Deref for __UnsafeGuard {
type Target = $ty;
fn deref(&self) -> &Self::Target {
unsafe { &*$name.inner.get() }
}
}
impl core::ops::DerefMut for __UnsafeGuard {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *$name.inner.get() }
}
}
impl Drop for __Guard {
fn drop(&mut self) {
__LOCK.store(0, Ordering::Release)
}
}
}
};
}

View File

@ -35,6 +35,8 @@ pub struct DeviceMemoryAttributes {
/// Describes a single device memory mapping
#[derive(Debug)]
pub struct RawDeviceMemoryMapping<A: KernelTableManager> {
/// Physical base address of the object
pub physical_base: u64,
/// Virtual address of the mapped object
pub address: usize,
/// Base address of the mapping start
@ -98,7 +100,8 @@ impl<A: KernelTableManager> RawDeviceMemoryMapping<A> {
address
}
pub fn into_raw_parts(self) -> (usize, usize, usize, usize) {
pub fn into_raw_parts(self) -> (u64, usize, usize, usize, usize) {
let physical_base = self.physical_base;
let address = self.address;
let base_address = self.base_address;
let page_count = self.page_count;
@ -106,7 +109,7 @@ impl<A: KernelTableManager> RawDeviceMemoryMapping<A> {
core::mem::forget(self);
(address, base_address, page_count, page_size)
(physical_base, address, base_address, page_count, page_size)
}
/// # Safety
@ -114,12 +117,14 @@ impl<A: KernelTableManager> RawDeviceMemoryMapping<A> {
/// Preconditions: all the fields must come from a [RawDeviceMemoryMapping::into_raw_parts]
/// call.
pub unsafe fn from_raw_parts(
physical_base: u64,
address: usize,
base_address: usize,
page_count: usize,
page_size: usize,
) -> Self {
Self {
physical_base,
address,
base_address,
page_count,

View File

@ -154,67 +154,6 @@ impl<A: Architecture, T> DerefMut for IrqSafeSpinlockGuard<'_, A, T> {
}
}
/// Helper macro to implement "split" locks. This may be needed when a very specific storage
/// layout for the locked type is required.
pub macro split_spinlock(
$(use $use:path;)*
$(#[$meta:meta])*
static $name:ident<$lock:ident: $arch:ty>: $ty:ty = $init:expr;
) {
pub use $name::$name;
#[allow(non_snake_case)]
pub mod $name {
$(use $use;)*
use core::cell::UnsafeCell;
use core::marker::PhantomData;
use core::sync::atomic::{AtomicBool, Ordering};
#[repr(transparent)]
pub struct __Wrapper(UnsafeCell<$ty>);
$(#[$meta])*
pub static $name: __Wrapper = __Wrapper(UnsafeCell::new($init));
static __LOCK: AtomicBool = AtomicBool::new(false);
pub struct __Guard($crate::guard::IrqGuard<$arch>);
impl __Wrapper {
pub fn $lock(&self) -> __Guard {
let irq = $crate::guard::IrqGuard::acquire();
while __LOCK.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() {
core::hint::spin_loop();
}
__Guard(irq)
}
}
unsafe impl Sync for __Wrapper {}
impl core::ops::Deref for __Guard {
type Target = $ty;
fn deref(&self) -> &Self::Target {
unsafe { &*$name.0.get() }
}
}
impl core::ops::DerefMut for __Guard {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *$name.0.get() }
}
}
impl Drop for __Guard {
fn drop(&mut self) {
__LOCK.store(false, Ordering::Release)
}
}
}
}
static LOCK_HACK: AtomicBool = AtomicBool::new(false);
/// "Hacks" all the locks in the kernel to make them function as "NULL"-locks instead of spinlocks.

View File

@ -83,6 +83,7 @@ pub struct UserContextInfo {
pub stack_pointer: usize,
pub thread_pointer: usize,
pub address_space: u64,
pub asid: u64,
pub single_step: bool,
}

View File

@ -45,31 +45,80 @@ impl<T> OneTimeInit<T> {
}
pub fn try_init_with_opt<F: FnOnce() -> Result<T, Error>>(&self, f: F) -> Result<&T, Error> {
if self
.state
.compare_exchange(
Self::STATE_UNINITIALIZED,
Self::STATE_INITIALIZING,
Ordering::Release,
Ordering::Relaxed,
)
.is_err()
{
if !self.try_begin_init() {
// Already initialized
return Err(Error::AlreadyExists);
}
let value = match f() {
Ok(val) => val,
Err(err) => {
self.state
.store(Self::STATE_UNINITIALIZED, Ordering::Release);
return Err(err);
match f() {
Ok(value) => {
let value = unsafe { (*self.value.get()).write(value) };
self.finish_init();
Ok(value)
}
};
Err(error) => {
self.fail_init();
Err(error)
}
}
}
let value = unsafe { (*self.value.get()).write(value) };
pub fn or_init_with<F: FnOnce() -> T>(&self, f: F) -> &T {
if !self.try_begin_init() {
return self.wait_for_init();
}
let value = unsafe { (*self.value.get()).write(f()) };
self.finish_init();
value
}
pub fn or_init_with_opt<F: FnOnce() -> Option<T>>(&self, f: F) -> Option<&T> {
if !self.try_begin_init() {
return Some(self.wait_for_init());
}
match f() {
Some(value) => {
let value = unsafe { (*self.value.get()).write(value) };
self.finish_init();
Some(value)
}
None => {
self.fail_init();
None
}
}
}
pub fn or_try_init_with<F: FnOnce() -> Result<T, Error>>(&self, f: F) -> Result<&T, Error> {
if !self.try_begin_init() {
return Ok(self.wait_for_init());
}
match f() {
Ok(value) => {
let value = unsafe { (*self.value.get()).write(value) };
self.finish_init();
Ok(value)
}
Err(error) => {
// Init failed
self.fail_init();
Err(error)
}
}
}
fn try_begin_init(&self) -> bool {
self.state
.compare_exchange(
Self::STATE_UNINITIALIZED,
Self::STATE_INITIALIZING,
Ordering::Acquire,
Ordering::Relaxed,
)
.is_ok()
}
fn finish_init(&self) {
self.state
.compare_exchange(
Self::STATE_INITIALIZING,
@ -78,37 +127,38 @@ impl<T> OneTimeInit<T> {
Ordering::Relaxed,
)
.unwrap();
Ok(value)
}
pub fn try_init_with<F: FnOnce() -> T>(&self, f: F) -> Option<&T> {
if self
.state
fn fail_init(&self) {
self.state
.compare_exchange(
Self::STATE_UNINITIALIZED,
Self::STATE_INITIALIZING,
Self::STATE_UNINITIALIZED,
Ordering::Release,
Ordering::Relaxed,
)
.is_err()
{
.unwrap();
}
fn wait_for_init(&self) -> &T {
while self.state.load(Ordering::Acquire) != Self::STATE_INITIALIZED {
core::hint::spin_loop();
}
unsafe { (*self.value.get()).assume_init_ref() }
}
pub fn try_init_with<F: FnOnce() -> T>(&self, f: F) -> Result<&T, Error> {
if !self.try_begin_init() {
// Already initialized
return None;
return Err(Error::AlreadyExists);
}
let value = unsafe { (*self.value.get()).write(f()) };
self.state
.compare_exchange(
Self::STATE_INITIALIZING,
Self::STATE_INITIALIZED,
Ordering::Release,
Ordering::Relaxed,
)
.unwrap();
self.finish_init();
Some(value)
Ok(value)
}
/// Sets the underlying value of the [OneTimeInit]. If already initialized, panics.

View File

@ -0,0 +1,26 @@
[package]
name = "kernel-arch-riscv64"
version = "0.1.0"
edition = "2024"
[dependencies]
yggdrasil-abi.workspace = true
kernel-arch-interface.workspace = true
libk-mm-interface.workspace = true
memtables.workspace = true
device-api = { workspace = true, features = ["derive"] }
tock-registers.workspace = true
bitflags.workspace = true
static_assertions.workspace = true
log.workspace = true
cfg-if.workspace = true
[features]
default = []
riscv64_board_virt = []
riscv64_board_jh7110 = []
[lints]
workspace = true

View File

@ -0,0 +1,128 @@
// vi:ft=asm:
.section .text
.macro SAVE_TASK_STATE
addi sp, sp, -{context_size}
sd ra, 0 * 8(sp)
sd gp, 1 * 8(sp)
sd s11, 2 * 8(sp)
sd s10, 3 * 8(sp)
sd s9, 4 * 8(sp)
sd s8, 5 * 8(sp)
sd s7, 6 * 8(sp)
sd s6, 7 * 8(sp)
sd s5, 8 * 8(sp)
sd s4, 9 * 8(sp)
sd s3, 10 * 8(sp)
sd s2, 11 * 8(sp)
sd s1, 12 * 8(sp)
sd s0, 13 * 8(sp)
.endm
.macro LOAD_TASK_STATE
ld ra, 0 * 8(sp)
ld gp, 1 * 8(sp)
ld s11, 2 * 8(sp)
ld s10, 3 * 8(sp)
ld s9, 4 * 8(sp)
ld s8, 5 * 8(sp)
ld s7, 6 * 8(sp)
ld s6, 7 * 8(sp)
ld s5, 8 * 8(sp)
ld s4, 9 * 8(sp)
ld s3, 10 * 8(sp)
ld s2, 11 * 8(sp)
ld s1, 12 * 8(sp)
ld s0, 13 * 8(sp)
addi sp, sp, {context_size}
.endm
.option push
.option norvc
.global __rv64_task_enter_kernel
.global __rv64_task_enter_user
.global __rv64_switch_task
.global __rv64_switch_task_and_drop
.global __rv64_enter_task
// Context switching
.type __rv64_enter_task, @function
__rv64_enter_task:
// a0 - task ctx
ld sp, (a0)
LOAD_TASK_STATE
ret
.size __rv64_enter_task, . - __rv64_enter_task
.type __rv64_switch_task, @function
__rv64_switch_task:
// a0 - destination task ctx
// a1 - source task ctx
SAVE_TASK_STATE
sd sp, (a1)
ld sp, (a0)
LOAD_TASK_STATE
ret
.size __rv64_switch_task, . - __rv64_switch_task
.type __rv64_switch_task_and_drop, @function
__rv64_switch_task_and_drop:
// a0 - destination task ctx
// a1 - thread struct to drop
ld sp, (a0)
mv a0, a1
call __arch_drop_thread
LOAD_TASK_STATE
ret
.size __rv64_switch_task_and_drop, . - __rv64_switch_task_and_drop
// Entry functions
.type __rv64_task_enter_kernel, @function
__rv64_task_enter_kernel:
ld a0, (sp) // argument
ld ra, 8(sp) // entry
addi sp, sp, 16
// Set SPIE to enable interrupts
// Set SPP = 1 to indicate a return to S-mode
csrr t0, sstatus
ori t0, t0, (1 << 5)
ori t0, t0, (1 << 8)
csrw sstatus, t0
csrw sepc, ra
csrw sscratch, zero
sret
.size __rv64_task_enter_kernel, . - __rv64_task_enter_kernel
.type __rv64_task_enter_user, @function
__rv64_task_enter_user:
csrw sscratch, tp
ld a0, 0 * 8(sp) // argument
ld ra, 1 * 8(sp) // entry
ld tp, 2 * 8(sp) // thread pointer
ld sp, 3 * 8(sp) // user stack
// Set SPIE to enable interrupts
// Set SPP = 0 to indicate a return to U-mode
li t1, (1 << 8)
not t1, t1
csrr t0, sstatus
ori t0, t0, (1 << 5)
and t0, t0, t1
csrw sstatus, t0
csrw sepc, ra
sret
.size __rv64_task_enter_user, . - __rv64_task_enter_user
.option pop

View File

@ -0,0 +1,222 @@
use core::{arch::global_asm, cell::UnsafeCell, marker::PhantomData};
use kernel_arch_interface::{
mem::{KernelTableManager, PhysicalMemoryAllocator},
task::{StackBuilder, TaskContext, UserContextInfo},
Architecture,
};
use libk_mm_interface::address::PhysicalAddress;
use tock_registers::{
interfaces::{Readable, Writeable},
registers::InMemoryRegister,
};
use yggdrasil_abi::error::Error;
use crate::{
mem::{self, KERNEL_VIRT_OFFSET},
registers::SATP,
ArchitectureImpl, PerCpuData,
};
pub const CONTEXT_SIZE: usize = 14 * size_of::<usize>();
#[repr(C, align(0x10))]
struct TaskContextInner {
// 0x00
sp: usize,
satp: InMemoryRegister<u64, SATP::Register>,
}
pub struct TaskContextImpl<
K: KernelTableManager,
PA: PhysicalMemoryAllocator<Address = PhysicalAddress>,
> {
inner: UnsafeCell<TaskContextInner>,
// fp_context: UnsafeCell<FpContext>,
stack_base_phys: PhysicalAddress,
stack_top: usize,
stack_size: usize,
_pd: PhantomData<(K, PA)>,
}
impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddress>>
TaskContextImpl<K, PA>
{
unsafe fn load_state(&self) {
// TODO load new SATP value
let inner = unsafe { &*self.inner.get() };
let cpu = unsafe { &mut *ArchitectureImpl::local_cpu().cast::<PerCpuData>() };
// Copy new SATP
let satp = inner.satp.get();
let asid = inner.satp.read(SATP::ASID);
if satp != SATP.get() {
mem::tlb_flush_asid(asid as usize);
SATP.set(satp);
}
cpu.smode_sp = self.stack_top;
}
unsafe fn store_state(&self) {}
}
impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddress>>
TaskContext<K, PA> for TaskContextImpl<K, PA>
{
const USER_STACK_EXTRA_ALIGN: usize = 8;
const SIGNAL_STACK_EXTRA_ALIGN: usize = 0;
fn user(context: UserContextInfo) -> Result<Self, Error> {
const USER_TASK_PAGES: usize = 16;
let stack_base_phys = PA::allocate_contiguous_pages(USER_TASK_PAGES)?;
let stack_base = stack_base_phys.raw_virtualize::<K>();
let mut stack = StackBuilder::new(stack_base, USER_TASK_PAGES * 0x1000);
log::debug!(
"Set up user task: pc={:#x}, sp={:#x}, tp={:#x}",
context.entry,
context.stack_pointer,
context.thread_pointer
);
stack.push(context.stack_pointer);
stack.push(context.thread_pointer);
stack.push(context.entry);
stack.push(context.argument);
setup_common_context(&mut stack, __rv64_task_enter_user as _);
let sp = stack.build();
let satp = InMemoryRegister::new(0);
satp.write(
SATP::MODE::Sv39
+ SATP::ASID.val(context.asid)
+ SATP::PPN.val(context.address_space >> 12),
);
Ok(Self {
inner: UnsafeCell::new(TaskContextInner { sp, satp }),
// fp_context: UnsafeCell::new(FpContext::new()),
stack_base_phys,
stack_top: stack_base + USER_TASK_PAGES * 0x1000,
stack_size: USER_TASK_PAGES * 0x1000,
_pd: PhantomData,
})
}
fn kernel(entry: extern "C" fn(usize) -> !, arg: usize) -> Result<Self, Error> {
const KERNEL_TASK_PAGES: usize = 8;
let stack_base_phys = PA::allocate_contiguous_pages(KERNEL_TASK_PAGES)?;
let stack_base = stack_base_phys.raw_virtualize::<K>();
let mut stack = StackBuilder::new(stack_base, KERNEL_TASK_PAGES * 0x1000);
// Entry and argument
stack.push(entry as _);
stack.push(arg);
setup_common_context(&mut stack, __rv64_task_enter_kernel as _);
let sp = stack.build();
// TODO stack is leaked
let satp = InMemoryRegister::new(0);
let kernel_table_phys =
((&raw const mem::KERNEL_TABLES).addr() - KERNEL_VIRT_OFFSET) as u64;
satp.write(SATP::MODE::Sv39 + SATP::ASID.val(0) + SATP::PPN.val(kernel_table_phys >> 12));
Ok(Self {
inner: UnsafeCell::new(TaskContextInner { sp, satp }),
// fp_context: UnsafeCell::new(FpContext::new()),
stack_base_phys,
stack_top: 0,
stack_size: KERNEL_TASK_PAGES * 0x1000,
_pd: PhantomData,
})
}
fn set_thread_pointer(&self, tp: usize) {
let _ = tp;
todo!()
}
fn align_stack_for_entry(sp: usize) -> usize {
sp
}
unsafe fn enter(&self) -> ! {
unsafe {
self.load_state();
__rv64_enter_task(self.inner.get())
}
}
unsafe fn switch(&self, from: &Self) {
if core::ptr::addr_eq(self, from) {
return;
}
unsafe {
from.store_state();
self.load_state();
__rv64_switch_task(self.inner.get(), from.inner.get())
}
}
unsafe fn switch_and_drop(&self, thread: *const ()) {
unsafe {
self.load_state();
__rv64_switch_task_and_drop(self.inner.get(), thread)
}
}
}
impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddress>> Drop
for TaskContextImpl<K, PA>
{
fn drop(&mut self) {
assert_eq!(self.stack_size % 0x1000, 0);
for offset in (0..self.stack_size).step_by(0x1000) {
unsafe {
PA::free_page(self.stack_base_phys.add(offset));
}
}
}
}
fn setup_common_context(builder: &mut StackBuilder, entry: usize) {
builder.push(0); // x8/s0/fp
builder.push(0); // x9/s1
builder.push(0); // x18/s2
builder.push(0); // x19/s3
builder.push(0); // x20/s4
builder.push(0); // x21/s5
builder.push(0); // x22/s6
builder.push(0); // x23/s7
builder.push(0); // x24/s8
builder.push(0); // x25/s9
builder.push(0); // x26/s10
builder.push(0); // x27/s11
builder.push(0); // x4/gp
builder.push(entry); // x1/ra return address
}
unsafe extern "C" {
fn __rv64_enter_task(to: *mut TaskContextInner) -> !;
fn __rv64_switch_task(to: *mut TaskContextInner, from: *mut TaskContextInner);
fn __rv64_switch_task_and_drop(to: *mut TaskContextInner, thread: *const ()) -> !;
fn __rv64_task_enter_kernel();
fn __rv64_task_enter_user();
// fn __rv64_fp_store_context(to: *mut c_void);
// fn __rv64_fp_restore_context(from: *const c_void);
}
global_asm!(
include_str!("context.S"),
context_size = const CONTEXT_SIZE,
);

View File

@ -0,0 +1,6 @@
#[inline]
pub fn rdtime() -> u64 {
let mut output: u64;
unsafe { core::arch::asm!("rdtime {0}", out(reg) output) };
output
}

View File

@ -0,0 +1,186 @@
#![feature(decl_macro, naked_functions)]
#![no_std]
extern crate alloc;
use core::{
ops::Range,
sync::atomic::{AtomicUsize, Ordering},
};
use alloc::{boxed::Box, collections::btree_map::BTreeMap, vec::Vec};
use device_api::interrupt::LocalInterruptController;
use kernel_arch_interface::{
cpu::{CpuData, CpuImpl, IpiQueue},
sync::IrqSafeSpinlock,
task::Scheduler,
util::OneTimeInit,
Architecture,
};
use tock_registers::interfaces::{ReadWriteable, Readable};
use registers::SSTATUS;
pub mod mem;
pub use mem::{process::ProcessAddressSpaceImpl, KernelTableManagerImpl};
pub mod context;
pub use context::TaskContextImpl;
pub mod intrinsics;
pub mod registers;
pub mod sbi;
pub struct ArchitectureImpl;
#[repr(C)]
pub struct PerCpuData {
// Used in assembly
pub tmp_t0: usize, // 0x00
pub umode_sp: usize, // 0x08
pub smode_sp: usize, // 0x10
// Used elsewhere
pub bootstrap: bool,
pub queue_index: usize,
}
pub static CPU_COUNT: AtomicUsize = AtomicUsize::new(1);
static IPI_QUEUES: OneTimeInit<Vec<IpiQueue<ArchitectureImpl>>> = OneTimeInit::new();
static HART_TO_QUEUE: IrqSafeSpinlock<ArchitectureImpl, BTreeMap<u32, usize>> =
IrqSafeSpinlock::new(BTreeMap::new());
impl CpuData for PerCpuData {
fn is_bootstrap(&self, id: u32) -> bool {
let _ = id;
self.bootstrap
}
fn queue_index(&self, id: u32) -> usize {
let _ = id;
self.queue_index
}
}
#[naked]
extern "C" fn idle_task(_: usize) -> ! {
unsafe {
core::arch::naked_asm!("1: nop; j 1b");
}
}
impl ArchitectureImpl {
pub fn for_each_hart<F: FnMut(u32, usize, &IpiQueue<ArchitectureImpl>)>(mut f: F) {
let map = HART_TO_QUEUE.lock();
map.iter().for_each(|(&hart_id, &queue_index)| {
let queue = &IPI_QUEUES.get()[queue_index];
f(hart_id, queue_index, queue);
});
}
}
impl Architecture for ArchitectureImpl {
type PerCpuData = PerCpuData;
type CpuFeatures = ();
type BreakpointType = u32;
const BREAKPOINT_VALUE: Self::BreakpointType = 0;
fn halt() -> ! {
loop {
unsafe { Self::set_interrupt_mask(true) };
Self::wait_for_interrupt();
}
}
unsafe fn set_local_cpu(cpu: *mut ()) {
unsafe { core::arch::asm!("mv tp, {0}", in(reg) cpu) };
}
#[inline]
fn local_cpu() -> *mut () {
let value: u64;
unsafe { core::arch::asm!("mv {0}, tp", out(reg) value) };
value as _
}
unsafe fn init_local_cpu<S: Scheduler + 'static>(id: Option<u32>, data: Self::PerCpuData) {
let id = id.expect("riscv64 requires an explicit HART ID in its per-processor struct");
let queue_index = data.queue_index;
HART_TO_QUEUE.lock().insert(id, queue_index);
let cpu = Box::leak(Box::new(CpuImpl::<Self, S>::new(id, data)));
unsafe { cpu.set_local() };
}
unsafe fn init_ipi_queues(queues: Vec<IpiQueue<Self>>) {
IPI_QUEUES.init(queues);
}
fn ipi_queue(cpu_id: u32) -> Option<&'static IpiQueue<Self>> {
let queue_index = *HART_TO_QUEUE.lock().get(&cpu_id)?;
IPI_QUEUES.try_get().and_then(|q| q.get(queue_index))
}
#[inline]
unsafe fn set_interrupt_mask(mask: bool) -> bool {
let old = Self::interrupt_mask();
if mask {
SSTATUS.modify(SSTATUS::SIE::CLEAR);
} else {
SSTATUS.modify(SSTATUS::SIE::SET);
}
old
}
#[inline]
fn interrupt_mask() -> bool {
SSTATUS.matches_all(SSTATUS::SIE::CLEAR)
}
fn wait_for_interrupt() {
unsafe {
core::arch::asm!("wfi");
}
}
fn cpu_count() -> usize {
CPU_COUNT.load(Ordering::Acquire)
}
fn cpu_index<S: Scheduler + 'static>() -> u32 {
CpuImpl::<Self, S>::local().id()
}
fn cpu_enabled_features<S: Scheduler>(cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> {
let _ = cpu;
todo!()
}
fn cpu_available_features<S: Scheduler>(cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> {
let _ = cpu;
todo!()
}
fn local_interrupt_controller() -> Option<&'static dyn LocalInterruptController> {
todo!()
}
fn idle_task() -> extern "C" fn(usize) -> ! {
idle_task
}
// Cache/barrier operation
fn load_barrier() {
unsafe { core::arch::asm!("fence r, w") };
}
fn store_barrier() {
unsafe { core::arch::asm!("fence w, r") };
}
fn memory_barrier() {
unsafe { core::arch::asm!("fence rw, rw") };
}
fn flush_virtual_range(_range: Range<usize>) {
// TODO
}
}

View File

@ -0,0 +1,359 @@
use cfg_if::cfg_if;
use kernel_arch_interface::{
mem::{DeviceMemoryAttributes, KernelTableManager, RawDeviceMemoryMapping},
split_spinlock,
};
use libk_mm_interface::{
address::PhysicalAddress,
table::{page_index, EntryLevel, EntryLevelExt},
};
use memtables::riscv64::PageAttributes;
use static_assertions::{const_assert, const_assert_eq};
use table::{PageEntry, PageTable, L1, L2, L3};
use tock_registers::interfaces::Writeable;
use yggdrasil_abi::error::Error;
pub use memtables::riscv64::FixedTables;
use crate::registers::SATP;
pub mod process;
pub mod table;
split_spinlock! {
use crate::ArchitectureImpl;
use crate::mem::FixedTables;
use libk_mm_interface::KernelImageObject;
#[link_section = ".data.tables"]
#[used]
static KERNEL_TABLES: KernelImageObject<FixedTables> =
unsafe { KernelImageObject::new(FixedTables::zeroed()) };
}
cfg_if! {
if #[cfg(feature = "riscv64_board_virt")] {
pub const KERNEL_PHYS_BASE: usize = 0x80200000;
} else if #[cfg(feature = "riscv64_board_jh7110")] {
pub const KERNEL_PHYS_BASE: usize = 0x40200000;
} else if #[cfg(rust_analyzer)] {
pub const KERNEL_PHYS_BASE: usize = 0x80200000;
}
}
pub const KERNEL_VIRT_OFFSET: usize = kernel_arch_interface::KERNEL_VIRT_OFFSET;
pub const SIGN_EXTEND_MASK: usize = 0xFFFFFF80_00000000;
pub const KERNEL_START_L1I: usize = page_index::<L1>(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
pub const KERNEL_L2I: usize = page_index::<L2>(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
const_assert_eq!(KERNEL_L2I, 1);
// Runtime mappings
// 1GiB of device memory space
const DEVICE_MAPPING_L1I: usize = KERNEL_START_L1I + 1;
const DEVICE_MAPPING_L3_COUNT: usize = 4;
// 32GiB of RAM space
const RAM_MAPPING_START_L1I: usize = KERNEL_START_L1I + 2;
const RAM_MAPPING_L1_COUNT: usize = 32;
const_assert!(RAM_MAPPING_START_L1I + RAM_MAPPING_L1_COUNT <= 512);
const_assert!(DEVICE_MAPPING_L1I < 512);
const DEVICE_MAPPING_OFFSET: usize = (DEVICE_MAPPING_L1I << L1::SHIFT) | SIGN_EXTEND_MASK;
const RAM_MAPPING_OFFSET: usize = (RAM_MAPPING_START_L1I << L1::SHIFT) | SIGN_EXTEND_MASK;
// Runtime tables
static mut DEVICE_MAPPING_L2: PageTable<L2> = PageTable::zeroed();
static mut DEVICE_MAPPING_L3S: [PageTable<L3>; DEVICE_MAPPING_L3_COUNT] =
[const { PageTable::zeroed() }; DEVICE_MAPPING_L3_COUNT];
/// Any VAs above this one are sign-extended
pub const USER_BOUNDARY: usize = 0x40_00000000;
#[derive(Debug)]
pub struct KernelTableManagerImpl;
impl KernelTableManager for KernelTableManagerImpl {
fn virtualize(address: u64) -> usize {
let address = address as usize;
if address >= RAM_MAPPING_OFFSET {
panic!("Invalid physical address: {address:#x}");
}
address + RAM_MAPPING_OFFSET
}
fn physicalize(address: usize) -> u64 {
if address < RAM_MAPPING_OFFSET {
panic!("Invalid \"physicalized\" virtual address {address:#x}");
}
(address - RAM_MAPPING_OFFSET) as u64
}
unsafe fn map_device_pages(
base: u64,
count: usize,
attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping<Self>, Error> {
unsafe { map_device_memory(PhysicalAddress::from_u64(base), count, attrs) }
}
unsafe fn unmap_device_pages(mapping: &RawDeviceMemoryMapping<Self>) {
unsafe { unmap_device_memory(mapping) }
}
}
// Device mappings
unsafe fn map_device_memory_l3(
base: PhysicalAddress,
count: usize,
_attrs: DeviceMemoryAttributes,
) -> Result<usize, Error> {
// TODO don't map pages if already mapped
'l0: for i in 0..DEVICE_MAPPING_L3_COUNT * 512 {
for j in 0..count {
let l2i = (i + j) / 512;
let l3i = (i + j) % 512;
unsafe {
if DEVICE_MAPPING_L3S[l2i][l3i].is_present() {
continue 'l0;
}
}
}
for j in 0..count {
let l2i = (i + j) / 512;
let l3i = (i + j) % 512;
unsafe {
DEVICE_MAPPING_L3S[l2i][l3i] =
PageEntry::page(base.add(j * L3::SIZE), PageAttributes::W);
}
}
let start = DEVICE_MAPPING_OFFSET + i * L3::SIZE;
tlb_flush_range_va(start, count * L3::SIZE);
return Ok(start);
}
Err(Error::OutOfMemory)
}
#[allow(unused)]
unsafe fn map_device_memory_l2(
base: PhysicalAddress,
count: usize,
_attrs: DeviceMemoryAttributes,
) -> Result<usize, Error> {
'l0: for i in DEVICE_MAPPING_L3_COUNT..512 {
for j in 0..count {
unsafe {
if DEVICE_MAPPING_L2[i + j].is_present() {
continue 'l0;
}
}
}
unsafe {
for j in 0..count {
DEVICE_MAPPING_L2[i + j] =
PageEntry::<L2>::block(base.add(j * L2::SIZE), PageAttributes::W);
}
}
let start = DEVICE_MAPPING_OFFSET + i * L2::SIZE;
tlb_flush_range_va(start, count * L2::SIZE);
return Ok(start);
}
Err(Error::OutOfMemory)
}
pub(crate) unsafe fn map_device_memory(
base: PhysicalAddress,
size: usize,
attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping<KernelTableManagerImpl>, Error> {
let l3_aligned = base.page_align_down::<L3>();
let l3_offset = base.page_offset::<L3>();
let page_count = (l3_offset + size).page_count::<L3>();
if page_count > 256 {
// Large mapping, use L2 mapping instead
let l2_aligned = base.page_align_down::<L2>();
let l2_offset = base.page_offset::<L2>();
let page_count = (l2_offset + size).page_count::<L2>();
unsafe {
let base_address = map_device_memory_l2(l2_aligned, page_count, attrs)?;
let address = base_address + l2_offset;
Ok(RawDeviceMemoryMapping::from_raw_parts(
l2_aligned.into_u64(),
address,
base_address,
page_count,
L2::SIZE,
))
}
} else {
// Just map the pages directly
unsafe {
let base_address = map_device_memory_l3(l3_aligned, page_count, attrs)?;
let address = base_address + l3_offset;
Ok(RawDeviceMemoryMapping::from_raw_parts(
l3_aligned.into_u64(),
address,
base_address,
page_count,
L3::SIZE,
))
}
}
}
pub(crate) unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping<KernelTableManagerImpl>) {
match map.page_size {
L3::SIZE => {
for i in 0..map.page_count {
let page = map.base_address + i * L3::SIZE;
let l2i = page.page_index::<L2>();
let l3i = page.page_index::<L3>();
unsafe {
assert!(DEVICE_MAPPING_L3S[l2i][l3i].is_present());
DEVICE_MAPPING_L3S[l2i][l3i] = PageEntry::INVALID;
}
}
tlb_flush_range_va(map.base_address, map.page_count * L3::SIZE);
}
L2::SIZE => todo!(),
_ => unimplemented!(),
}
}
pub fn auto_address<T>(x: *const T) -> usize {
let x = x.addr();
if x >= KERNEL_VIRT_OFFSET {
x - KERNEL_VIRT_OFFSET
} else {
x
}
}
/// Enables the memory translation.
///
/// # Safety
///
/// Only meant to be called once per each HART during their early init.
pub unsafe fn enable_mmu() {
let l1_phys = auto_address(&raw const KERNEL_TABLES) as u64;
tlb_flush_full();
SATP.write(SATP::PPN.val(l1_phys >> 12) + SATP::MODE::Sv39);
}
/// Removes the lower half translation mappings.
///
/// # Safety
///
/// Needs to be called once after secondary HARTs are initialized.
pub unsafe fn unmap_lower_half() {
let mut tables = KERNEL_TABLES.lock();
let kernel_l1i_lower = page_index::<L1>(KERNEL_PHYS_BASE);
tables.l1.data[kernel_l1i_lower] = 0;
tlb_flush_range_va(0x0, L1::SIZE);
}
/// Sets up run-time kernel translation tables.
///
/// # Safety
///
/// The caller must ensure MMU is already enabled.
pub unsafe fn setup_fixed_tables() {
let mut tables = KERNEL_TABLES.lock();
let device_mapping_l2_phys = auto_address(&raw const DEVICE_MAPPING_L2);
// Set up static runtime mappings
for i in 0..DEVICE_MAPPING_L3_COUNT {
unsafe {
let device_mapping_l3_phys = PhysicalAddress::from_usize(
(&raw const DEVICE_MAPPING_L3S[i]).addr() - KERNEL_VIRT_OFFSET,
);
DEVICE_MAPPING_L2[i] =
PageEntry::table(device_mapping_l3_phys, PageAttributes::empty());
}
}
assert_eq!(tables.l1.data[DEVICE_MAPPING_L1I], 0);
tables.l1.data[DEVICE_MAPPING_L1I] =
((device_mapping_l2_phys as u64) >> 2) | PageAttributes::V.bits();
for l1i in 0..RAM_MAPPING_L1_COUNT {
let physical = (l1i as u64) << L1::SHIFT;
tables.l1.data[l1i + RAM_MAPPING_START_L1I] = (physical >> 2)
| (PageAttributes::R
| PageAttributes::W
| PageAttributes::A
| PageAttributes::D
| PageAttributes::V)
.bits();
}
tlb_flush_full();
}
pub fn tlb_flush_global_full() {
tlb_flush_full();
// TODO send TLB shootdown IPI to other harts
}
pub fn tlb_flush_global_va(va: usize) {
tlb_flush_va(va);
// TODO send TLB shootdown IPI to other harts
}
pub fn tlb_flush_range_va(start: usize, size: usize) {
let end = (start + size).page_align_up::<L3>();
let start = start.page_align_down::<L3>();
for page in (start..end).step_by(L3::SIZE) {
tlb_flush_va(page);
}
}
pub fn tlb_flush_range_va_asid(asid: usize, start: usize, size: usize) {
let end = (start + size).page_align_up::<L3>();
let start = start.page_align_down::<L3>();
for page in (start..end).step_by(L3::SIZE) {
tlb_flush_va_asid(page, asid);
}
}
#[inline]
pub fn tlb_flush_full() {
unsafe { core::arch::asm!("sfence.vma") };
}
#[inline]
pub fn tlb_flush_va(va: usize) {
unsafe { core::arch::asm!("sfence.vma {0}, zero", in(reg) va) };
}
#[inline]
pub fn tlb_flush_asid(asid: usize) {
unsafe { core::arch::asm!("sfence.vma zero, {0}", in(reg) asid) };
}
#[inline]
pub fn tlb_flush_va_asid(va: usize, asid: usize) {
unsafe { core::arch::asm!("sfence.vma {0}, {1}", in(reg) va, in(reg) asid) };
}
pub fn clone_kernel_tables(dst: &mut PageTable<L1>) {
let tables = KERNEL_TABLES.lock();
for l1i in page_index::<L1>(USER_BOUNDARY)..512 {
dst[l1i] = unsafe { PageEntry::from_raw(tables.l1.data[l1i]) };
}
}

View File

@ -0,0 +1,236 @@
use core::{
marker::PhantomData,
sync::atomic::{AtomicU16, Ordering},
};
use libk_mm_interface::{
address::{AsPhysicalAddress, PhysicalAddress},
pointer::PhysicalRefMut,
process::{PageAttributeUpdate, ProcessAddressSpaceManager},
table::{
EntryLevel, EntryLevelDrop, EntryLevelExt, MapAttributes, NextPageTable, TableAllocator,
},
};
use memtables::riscv64::PageAttributes;
use yggdrasil_abi::error::Error;
use crate::mem::{clone_kernel_tables, table::PageEntry};
use super::{
table::{DroppableRange, PageTable, L1, L2, L3},
KernelTableManagerImpl, USER_BOUNDARY,
};
pub struct ProcessAddressSpaceImpl<TA: TableAllocator> {
l1: PhysicalRefMut<'static, PageTable<L1>, KernelTableManagerImpl>,
asid: u16,
_pd: PhantomData<TA>,
}
impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceImpl<TA> {
const LOWER_LIMIT_PFN: usize = 8;
const UPPER_LIMIT_PFN: usize = (16 << 30) / L3::SIZE;
fn new() -> Result<Self, Error> {
static LAST_ASID: AtomicU16 = AtomicU16::new(1);
let mut l1 = unsafe {
PhysicalRefMut::<'static, PageTable<L1>, KernelTableManagerImpl>::map(
TA::allocate_page_table()?,
)
};
for i in 0..512 {
l1[i] = PageEntry::INVALID;
}
// Copy the kernel mappings
clone_kernel_tables(&mut l1);
let asid = LAST_ASID.fetch_add(1, Ordering::AcqRel);
Ok(Self {
l1,
asid,
_pd: PhantomData,
})
}
unsafe fn map_page(
&mut self,
address: usize,
physical: PhysicalAddress,
flags: MapAttributes,
) -> Result<(), Error> {
self.write_l3_entry(
address,
PageEntry::page(physical, to_page_attributes(flags)),
false,
)
.unwrap();
Ok(())
}
unsafe fn update_page_attributes(
&mut self,
address: usize,
update: &PageAttributeUpdate,
) -> Result<(), Error> {
self.update_l3_entry(address, |entry| entry.update(update))
}
unsafe fn unmap_page(&mut self, address: usize) -> Result<(PhysicalAddress, bool), Error> {
self.pop_l3_entry(address)
}
fn translate(&self, address: usize) -> Result<(PhysicalAddress, MapAttributes), Error> {
self.read_l3_entry(address).ok_or(Error::DoesNotExist)
}
fn as_address_with_asid(&self) -> (u64, u64) {
let physical = unsafe { self.l1.as_physical_address() }.into_u64();
(physical, self.asid as u64)
}
unsafe fn clear(&mut self) {
unsafe { self.l1.drop_range::<TA>(L1::DROPPABLE_RANGE) };
}
}
impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
// Write a single 4KiB entry
fn write_l3_entry(
&mut self,
virt: usize,
entry: PageEntry<L3>,
overwrite: bool,
) -> Result<(), Error> {
if virt >= USER_BOUNDARY {
log::warn!("Tried to map a userspace page to a non-userspace virtual region");
return Err(Error::InvalidArgument);
}
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
let mut l2 = self.l1.get_mut_or_alloc::<TA>(l1i)?;
let mut l3 = l2.get_mut_or_alloc::<TA>(l2i)?;
if l3[l3i].is_present() && !overwrite {
todo!();
}
l3[l3i] = entry;
super::tlb_flush_va_asid(virt, self.asid as usize);
Ok(())
}
fn update_l3_entry<F: FnOnce(&mut PageEntry<L3>) -> Result<(), Error>>(
&mut self,
virt: usize,
mapper: F,
) -> Result<(), Error> {
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
// TODO somehow drop tables if they're known to be empty?
let mut l2 = self.l1.get_mut(l1i).ok_or(Error::DoesNotExist)?;
let mut l3 = l2.get_mut(l2i).ok_or(Error::DoesNotExist)?;
let entry = &mut l3[l3i];
if !entry.is_present() {
return Err(Error::DoesNotExist);
}
mapper(entry)?;
super::tlb_flush_va_asid(virt, self.asid as usize);
Ok(())
}
fn pop_l3_entry(&mut self, virt: usize) -> Result<(PhysicalAddress, bool), Error> {
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
// TODO somehow drop tables if they're known to be empty?
let mut l2 = self.l1.get_mut(l1i).ok_or(Error::DoesNotExist)?;
let mut l3 = l2.get_mut(l2i).ok_or(Error::DoesNotExist)?;
let entry = l3[l3i];
let page = entry.as_page().ok_or(Error::DoesNotExist)?;
let dirty = entry.is_dirty();
l3[l3i] = PageEntry::INVALID;
super::tlb_flush_va_asid(virt, self.asid as usize);
Ok((page, dirty))
}
fn read_l3_entry(&self, virt: usize) -> Option<(PhysicalAddress, MapAttributes)> {
if virt >= USER_BOUNDARY {
log::warn!("Tried read an userspace page to a non-userspace virtual region");
return None;
}
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
let l2 = self.l1.get(l1i)?;
let l3 = l2.get(l2i)?;
let page = l3[l3i].as_page()?;
Some((
page.add(virt & 0xFFF),
to_map_attributes(l3[l3i].attributes()),
))
}
}
impl<TA: TableAllocator> Drop for ProcessAddressSpaceImpl<TA> {
fn drop(&mut self) {
// SAFETY: with safe usage of the ProcessAddressSpaceImpl, clearing and dropping
// is safe, no one refers to the memory
unsafe {
self.clear();
let l1_phys = self.l1.as_physical_address();
TA::free_page_table(l1_phys);
super::tlb_flush_asid(self.asid as usize);
}
}
}
fn to_page_attributes(src: MapAttributes) -> PageAttributes {
let mut result = PageAttributes::R | PageAttributes::X;
if src.contains(MapAttributes::USER_WRITE) {
result |= PageAttributes::W;
}
if src.intersects(MapAttributes::USER_READ | MapAttributes::USER_WRITE) {
result |= PageAttributes::U;
}
if src.contains(MapAttributes::DIRTY) {
result |= PageAttributes::SW_DIRTY;
}
result
}
fn to_map_attributes(src: PageAttributes) -> MapAttributes {
let mut result = MapAttributes::NON_GLOBAL;
if src.contains(PageAttributes::U) {
result |= MapAttributes::USER_READ;
if src.contains(PageAttributes::W) {
result |= MapAttributes::USER_WRITE;
}
}
if src.contains(PageAttributes::SW_DIRTY) {
result |= MapAttributes::DIRTY;
}
result
}

View File

@ -0,0 +1,272 @@
use core::{
marker::PhantomData,
ops::{Index, IndexMut, Range},
};
use libk_mm_interface::{
address::{AsPhysicalAddress, PhysicalAddress},
pointer::{PhysicalRef, PhysicalRefMut},
process::PageAttributeUpdate,
table::{
page_index, EntryLevel, EntryLevelDrop, NextPageTable, NonTerminalEntryLevel,
TableAllocator,
},
};
use yggdrasil_abi::error::Error;
use super::{KernelTableManagerImpl, USER_BOUNDARY};
pub use memtables::riscv64::PageAttributes;
/// L3 - entry is 4KiB
#[derive(Debug, Clone, Copy)]
pub struct L3;
/// L2 - entry is 2MiB
#[derive(Debug, Clone, Copy)]
pub struct L2;
/// L1 - entry is 1GiB
#[derive(Debug, Clone, Copy)]
pub struct L1;
impl EntryLevel for L3 {
const SHIFT: usize = 12;
}
impl EntryLevel for L2 {
const SHIFT: usize = 21;
}
impl EntryLevel for L1 {
const SHIFT: usize = 30;
}
#[repr(C, align(0x1000))]
pub struct PageTable<L: EntryLevel> {
entries: [PageEntry<L>; 512],
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct PageEntry<L: EntryLevel>(pub u64, PhantomData<L>);
pub(super) trait DroppableRange {
const DROPPABLE_RANGE: Range<usize>;
}
impl DroppableRange for L1 {
const DROPPABLE_RANGE: Range<usize> = 0..page_index::<L1>(USER_BOUNDARY);
}
impl DroppableRange for L2 {
const DROPPABLE_RANGE: Range<usize> = 0..512;
}
impl NonTerminalEntryLevel for L1 {
type NextLevel = L2;
}
impl NonTerminalEntryLevel for L2 {
type NextLevel = L3;
}
impl<L: EntryLevel> PageTable<L> {
pub const fn zeroed() -> Self {
Self {
entries: [PageEntry::INVALID; 512],
}
}
pub fn new_zeroed<'a, TA: TableAllocator>(
) -> Result<PhysicalRefMut<'a, PageTable<L>, KernelTableManagerImpl>, Error> {
let physical = TA::allocate_page_table()?;
let mut table =
unsafe { PhysicalRefMut::<'a, Self, KernelTableManagerImpl>::map(physical) };
for i in 0..512 {
table[i] = PageEntry::INVALID;
}
Ok(table)
}
}
impl<L: EntryLevel> PageEntry<L> {
// Upper + lower 10 bits
const ATTR_MASK: u64 = 0xFFC00000000003FF;
pub const INVALID: Self = Self(0, PhantomData);
/// Constructs a [PageEntry] from its raw representation.
///
/// # Safety
///
/// The caller must ensure `value` is actually a "valid" PTE.
pub const unsafe fn from_raw(value: u64) -> Self {
Self(value, PhantomData)
}
pub const fn is_present(&self) -> bool {
self.0 & PageAttributes::V.bits() != 0
}
pub fn update(&mut self, update: &PageAttributeUpdate) -> Result<(), Error> {
let mut attrs = self.attributes();
if let Some(write) = update.user_write {
attrs.set(PageAttributes::W, write);
}
if let Some(dirty) = update.dirty {
attrs.set(PageAttributes::SW_DIRTY, dirty);
}
self.0 &= !Self::ATTR_MASK;
self.0 |= attrs.bits() & Self::ATTR_MASK;
Ok(())
}
pub const fn is_dirty(&self) -> bool {
self.0 & PageAttributes::SW_DIRTY.bits() != 0
}
pub fn attributes(self) -> PageAttributes {
PageAttributes::from_bits_retain(self.0)
}
}
impl<L: NonTerminalEntryLevel + DroppableRange> EntryLevelDrop for PageTable<L>
where
PageTable<L::NextLevel>: EntryLevelDrop,
{
const FULL_RANGE: Range<usize> = L::DROPPABLE_RANGE;
unsafe fn drop_range<TA: TableAllocator>(&mut self, range: Range<usize>) {
for index in range {
let entry = self[index];
if let Some(table) = entry.as_table() {
unsafe {
let mut table_ref: PhysicalRefMut<
PageTable<L::NextLevel>,
KernelTableManagerImpl,
> = PhysicalRefMut::map(table);
table_ref.drop_all::<TA>();
TA::free_page_table(table);
}
} else if entry.is_present() {
// Memory must've been cleared beforehand, so no non-table entries must be present
panic!(
"Expected a table containing only tables, got table[{}] = {:#x?}",
index, entry.0
);
}
self[index] = PageEntry::INVALID;
// dc_cvac((&raw const self[index]).addr());
}
}
}
impl EntryLevelDrop for PageTable<L3> {
const FULL_RANGE: Range<usize> = 0..512;
// Do nothing
unsafe fn drop_range<TA: TableAllocator>(&mut self, _range: Range<usize>) {}
}
impl<L: NonTerminalEntryLevel + 'static> NextPageTable for PageTable<L> {
type NextLevel = PageTable<L::NextLevel>;
type TableRef = PhysicalRef<'static, PageTable<L::NextLevel>, KernelTableManagerImpl>;
type TableRefMut = PhysicalRefMut<'static, PageTable<L::NextLevel>, KernelTableManagerImpl>;
fn get(&self, index: usize) -> Option<Self::TableRef> {
let table = self[index].as_table()?;
Some(unsafe { PhysicalRef::map(table) })
}
fn get_mut(&mut self, index: usize) -> Option<Self::TableRefMut> {
let table = self[index].as_table()?;
Some(unsafe { PhysicalRefMut::map(table) })
}
fn get_mut_or_alloc<TA: TableAllocator>(
&mut self,
index: usize,
) -> Result<Self::TableRefMut, Error> {
if let Some(table) = self[index].as_table() {
Ok(unsafe { PhysicalRefMut::map(table) })
} else {
let table = PageTable::new_zeroed::<TA>()?;
self[index] = PageEntry::<L>::table(
unsafe { table.as_physical_address() },
PageAttributes::empty(),
);
// dc_cvac((&raw const self[index]).addr());
Ok(table)
}
}
}
impl<L: NonTerminalEntryLevel> PageEntry<L> {
pub fn block(address: PhysicalAddress, attrs: PageAttributes) -> Self {
// TODO validate address alignment
Self(
(address.into_u64() >> 2)
| (PageAttributes::R
| PageAttributes::A
| PageAttributes::D
| PageAttributes::V
| attrs)
.bits(),
PhantomData,
)
}
pub fn table(address: PhysicalAddress, mut attrs: PageAttributes) -> Self {
attrs.remove(PageAttributes::R | PageAttributes::W | PageAttributes::X);
Self(
(address.into_u64() >> 2) | (PageAttributes::V | attrs).bits(),
PhantomData,
)
}
pub fn as_table(&self) -> Option<PhysicalAddress> {
(self.0
& (PageAttributes::R | PageAttributes::W | PageAttributes::X | PageAttributes::V)
.bits()
== PageAttributes::V.bits())
.then_some((self.0 & !Self::ATTR_MASK) << 2)
.map(PhysicalAddress::from_u64)
}
}
impl PageEntry<L3> {
pub fn page(address: PhysicalAddress, attrs: PageAttributes) -> Self {
Self(
(address.into_u64() >> 2)
| (PageAttributes::R
| PageAttributes::A
| PageAttributes::D
| PageAttributes::V
| attrs)
.bits(),
PhantomData,
)
}
pub fn as_page(&self) -> Option<PhysicalAddress> {
(self.0 & PageAttributes::V.bits() != 0)
.then_some((self.0 & !Self::ATTR_MASK) << 2)
.map(PhysicalAddress::from_u64)
}
}
impl<L: EntryLevel> Index<usize> for PageTable<L> {
type Output = PageEntry<L>;
fn index(&self, index: usize) -> &Self::Output {
&self.entries[index]
}
}
impl<L: EntryLevel> IndexMut<usize> for PageTable<L> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.entries[index]
}
}

View File

@ -0,0 +1,221 @@
macro impl_csr_read($struct:ident, $repr:ty, $reg:ident, $register:ty) {
impl tock_registers::interfaces::Readable for $struct {
type T = $repr;
type R = $register;
#[inline]
fn get(&self) -> $repr {
let mut value: $repr;
unsafe {
core::arch::asm!(concat!("csrr {0}, ", stringify!($reg)), out(reg) value);
}
value
}
}
}
macro impl_csr_write($struct:ident, $repr:ty, $reg:ident, $register:ty) {
impl tock_registers::interfaces::Writeable for $struct {
type T = $repr;
type R = $register;
#[inline]
fn set(&self, value: $repr) {
unsafe {
core::arch::asm!(concat!("csrw ", stringify!($reg), ", {0}"), in(reg) value);
}
}
}
}
pub mod satp {
use tock_registers::register_bitfields;
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub SATP [
PPN OFFSET(0) NUMBITS(44) [],
ASID OFFSET(44) NUMBITS(16) [],
MODE OFFSET(60) NUMBITS(4) [
Bare = 0,
Sv39 = 8,
Sv48 = 9,
Sv57 = 10,
Sv64 = 11,
],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, satp, SATP::Register);
impl_csr_write!(Reg, u64, satp, SATP::Register);
pub const SATP: Reg = Reg;
}
pub mod stvec {
use tock_registers::{interfaces::ReadWriteable, register_bitfields};
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub STVEC [
MODE OFFSET(0) NUMBITS(2) [
Direct = 0,
Vectored = 1
],
BASE OFFSET(2) NUMBITS(62) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, stvec, STVEC::Register);
impl_csr_write!(Reg, u64, stvec, STVEC::Register);
impl Reg {
pub fn set_base(&self, base: usize) {
debug_assert_eq!(base & 0xF, 0);
let mask = match base & 63 != 0 {
false => 0,
true => 0x3 << 62,
};
self.modify(STVEC::BASE.val(((base as u64) >> 2) | mask));
}
}
pub const STVEC: Reg = Reg;
}
pub mod scause {
use tock_registers::register_bitfields;
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub SCAUSE [
CODE OFFSET(0) NUMBITS(63) [],
INTERRUPT OFFSET(63) NUMBITS(1) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, scause, SCAUSE::Register);
impl_csr_write!(Reg, u64, scause, SCAUSE::Register);
pub const SCAUSE: Reg = Reg;
}
pub mod stval {
use super::{impl_csr_read, impl_csr_write};
pub struct Reg;
impl_csr_read!(Reg, u64, stval, ());
impl_csr_write!(Reg, u64, stval, ());
pub const STVAL: Reg = Reg;
}
pub mod sepc {
use super::{impl_csr_read, impl_csr_write};
pub struct Reg;
impl_csr_read!(Reg, u64, sepc, ());
impl_csr_write!(Reg, u64, sepc, ());
pub const SEPC: Reg = Reg;
}
pub mod sstatus {
use tock_registers::register_bitfields;
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub SSTATUS [
SUM OFFSET(18) NUMBITS(1) [],
SPP OFFSET(8) NUMBITS(1) [],
SIE OFFSET(1) NUMBITS(1) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, sstatus, SSTATUS::Register);
impl_csr_write!(Reg, u64, sstatus, SSTATUS::Register);
pub const SSTATUS: Reg = Reg;
}
pub mod sscratch {
use super::{impl_csr_read, impl_csr_write};
pub struct Reg;
impl_csr_read!(Reg, u64, sscratch, ());
impl_csr_write!(Reg, u64, sscratch, ());
pub const SSCRATCH: Reg = Reg;
}
pub mod sip {
use tock_registers::register_bitfields;
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub SIP [
SSIP OFFSET(1) NUMBITS(1) [],
STIP OFFSET(5) NUMBITS(1) [],
SEIP OFFSET(9) NUMBITS(1) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, sip, SIP::Register);
impl_csr_write!(Reg, u64, sip, SIP::Register);
pub const SIP: Reg = Reg;
}
pub mod sie {
use tock_registers::register_bitfields;
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub SIE [
SSIE OFFSET(1) NUMBITS(1) [],
STIE OFFSET(5) NUMBITS(1) [],
SEIE OFFSET(9) NUMBITS(1) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, sie, SIE::Register);
impl_csr_write!(Reg, u64, sie, SIE::Register);
pub const SIE: Reg = Reg;
}
pub use satp::SATP;
pub use scause::SCAUSE;
pub use sepc::SEPC;
pub use sie::SIE;
pub use sip::SIP;
pub use sscratch::SSCRATCH;
pub use sstatus::SSTATUS;
pub use stval::STVAL;
pub use stvec::STVEC;

View File

@ -0,0 +1,110 @@
use yggdrasil_abi::{error::Error, primitive_enum};
const EXT_HSM: u64 = 0x48534D;
const EXT_TIME: u64 = 0x54494D45;
const EXT_DBCN: u64 = 0x4442434E;
const EXT_SPI: u64 = 0x735049;
primitive_enum! {
pub enum Status: i64 {
Failed = -1,
NotSupported = -2,
InvalidParam = -3,
Denied = -4,
InvalidAddress = -5,
AlreadyAvailable = -6,
AlreadyStarted = -7,
AlreadyStopped = -8,
NoShmem = -9,
InvalidState = -10,
BadRange = -11,
Timeout = -12,
Io = -13,
}
}
primitive_enum! {
pub enum HartState: u64 {
Started = 0,
Stopped = 1,
StartPending = 2,
StopPending = 3,
Suspended = 4,
SuspendPending = 5,
ResumePending = 6,
}
}
pub enum SbiError {
Status(Status),
Other(i64),
}
impl From<i64> for SbiError {
#[inline]
fn from(value: i64) -> Self {
match Status::try_from(value) {
Ok(value) => Self::Status(value),
Err(_) => Self::Other(value),
}
}
}
#[allow(clippy::too_many_arguments)]
#[inline(always)]
unsafe fn sbi_do_call(
extension: u64,
function: u64,
mut a0: u64,
mut a1: u64,
a2: u64,
a3: u64,
a4: u64,
a5: u64,
) -> Result<u64, SbiError> {
unsafe {
core::arch::asm!(
"ecall",
inlateout("a0") a0,
inlateout("a1") a1,
in("a2") a2,
in("a3") a3,
in("a4") a4,
in("a5") a5,
in("a6") function,
in("a7") extension,
);
}
let a0 = a0 as i64;
if a0 == 0 {
Ok(a1)
} else {
Err(a0.into())
}
}
pub fn sbi_hart_start(hart_id: u64, start_addr: u64, opaque: u64) -> Result<(), Error> {
match unsafe { sbi_do_call(EXT_HSM, 0x00, hart_id, start_addr, opaque, 0, 0, 0) } {
Ok(_) => Ok(()),
Err(SbiError::Status(Status::AlreadyAvailable)) => Err(Error::AlreadyExists),
Err(SbiError::Status(Status::InvalidParam)) => Err(Error::DoesNotExist),
Err(SbiError::Status(Status::InvalidAddress)) => Err(Error::InvalidArgument),
Err(_) => Err(Error::InvalidOperation),
}
}
pub fn sbi_send_ipi(hart_mask: u64, hart_mask_base: u64) -> Result<(), Error> {
match unsafe { sbi_do_call(EXT_SPI, 0x00, hart_mask, hart_mask_base, 0, 0, 0, 0) } {
Ok(_) => Ok(()),
Err(SbiError::Status(Status::InvalidParam)) => Err(Error::DoesNotExist),
Err(_) => Err(Error::InvalidOperation),
}
}
pub fn sbi_debug_console_write_byte(byte: u8) {
unsafe { sbi_do_call(EXT_DBCN, 0x02, byte as u64, 0, 0, 0, 0, 0) }.ok();
}
pub fn sbi_set_timer(next_event: u64) {
unsafe { sbi_do_call(EXT_TIME, 0x00, next_event, 0, 0, 0, 0, 0) }.ok();
}

View File

@ -28,6 +28,8 @@ cfg_if! {
extern crate kernel_arch_x86_64 as imp;
} else if #[cfg(target_arch = "x86")] {
extern crate kernel_arch_i686 as imp;
} else if #[cfg(target_arch = "riscv64")] {
extern crate kernel_arch_riscv64 as imp;
} else {
compile_error!("Unsupported architecture");
}

View File

@ -95,10 +95,17 @@ cpuid_features! {
]
}
cpuid_features! {
pub ExtEdxFeatures: u32 [
PDPE1GB: 26
]
}
#[derive(Clone, Copy, Debug)]
pub struct CpuFeatures {
pub ecx: EcxFeatures,
pub edx: EdxFeatures,
pub ext_edx: ExtEdxFeatures,
}
impl CpuFeatures {
@ -106,6 +113,7 @@ impl CpuFeatures {
Self {
ecx: EcxFeatures::empty(),
edx: EdxFeatures::empty(),
ext_edx: ExtEdxFeatures::empty(),
}
}
@ -120,6 +128,7 @@ impl CpuFeatures {
Err(Self {
ecx: features.ecx & !self.ecx,
edx: features.edx & !self.edx,
ext_edx: features.ext_edx & !self.ext_edx,
})
}
}
@ -132,6 +141,7 @@ impl BitAnd<CpuFeatures> for CpuFeatures {
Self {
ecx: self.ecx & rhs.ecx,
edx: self.edx & rhs.edx,
ext_edx: self.ext_edx & rhs.ext_edx,
}
}
}
@ -143,6 +153,7 @@ impl BitOr<CpuFeatures> for CpuFeatures {
Self {
ecx: self.ecx | rhs.ecx,
edx: self.edx | rhs.edx,
ext_edx: self.ext_edx | rhs.ext_edx,
}
}
}
@ -151,8 +162,9 @@ impl CpuFeatureSet for CpuFeatures {
fn iter(&self) -> impl Iterator<Item = &'static str> {
let ecx = self.ecx.iter().map(|e| e.as_str());
let edx = self.edx.iter().map(|e| e.as_str());
let ext_edx = self.ext_edx.iter().map(|e| e.as_str());
core::iter::chain(ecx, edx)
core::iter::chain(core::iter::chain(ecx, edx), ext_edx)
}
}
@ -190,20 +202,26 @@ unsafe fn raw_cpuid(eax: u32, result: &mut [u32]) {
);
}
fn cpuid_features() -> (EcxFeatures, EdxFeatures) {
fn cpuid_features() -> (EcxFeatures, EdxFeatures, ExtEdxFeatures) {
let mut raw = [0; 3];
unsafe {
raw_cpuid(0x1, &mut raw);
}
(
EcxFeatures::from_bits_truncate(raw[2]),
EdxFeatures::from_bits_truncate(raw[1]),
)
let ecx = EcxFeatures::from_bits_truncate(raw[2]);
let edx = EdxFeatures::from_bits_truncate(raw[1]);
unsafe {
raw_cpuid(0x80000001, &mut raw);
}
let ext_edx = ExtEdxFeatures::from_bits_truncate(raw[1]);
(ecx, edx, ext_edx)
}
fn enable_features(ecx: EcxFeatures, edx: EdxFeatures) {
fn enable_features(ecx: EcxFeatures, edx: EdxFeatures, _ext_edx: ExtEdxFeatures) {
if ecx.contains(EcxFeatures::XSAVE) {
CR4.modify(CR4::OSXSAVE::SET);
}
@ -230,13 +248,16 @@ fn enable_features(ecx: EcxFeatures, edx: EdxFeatures) {
if ecx.contains(EcxFeatures::PCID) {
CR4.modify(CR4::PCIDE::SET);
}
if edx.contains(EdxFeatures::PSE) {
CR4.modify(CR4::PSE::SET);
}
CR0.modify(CR0::TS::CLEAR);
}
fn read_features() -> CpuFeatures {
let (ecx, edx) = cpuid_features();
CpuFeatures { ecx, edx }
let (ecx, edx, ext_edx) = cpuid_features();
CpuFeatures { ecx, edx, ext_edx }
}
pub fn setup_features(
@ -250,7 +271,7 @@ pub fn setup_features(
return (have_features, Err(missing_features));
}
enable_features(will_features.ecx, will_features.edx);
enable_features(will_features.ecx, will_features.edx, will_features.ext_edx);
(have_features, Ok(will_features))
}

View File

@ -16,6 +16,10 @@ pub trait IoPortAccess<T> {
fn read(&self) -> T;
/// Writes a value to the port
fn write(&self, value: T);
fn modify<F: FnOnce(T) -> T>(&self, f: F) {
self.write(f(self.read()));
}
}
impl<T> IoPort<T> {

View File

@ -6,4 +6,10 @@ extern crate alloc;
pub mod cpuid;
pub mod gdt;
pub mod intrinsics;
pub mod registers;
#[cfg(any(target_arch = "x86_64", rust_analyzer))]
pub const ISA_IRQ_OFFSET: u32 = 1024;
#[cfg(any(target_arch = "x86", rust_analyzer))]
pub const ISA_IRQ_OFFSET: u32 = 0;

View File

@ -146,7 +146,7 @@ mod cr2 {
}
mod cr3 {
use tock_registers::{interfaces::ReadWriteable, register_bitfields};
use tock_registers::{interfaces::Writeable, register_bitfields};
register_bitfields! {
usize,
@ -164,7 +164,7 @@ mod cr3 {
impl Reg {
pub fn set_address(&self, address: usize) {
assert_eq!(address & 0xFFF, 0);
self.modify(CR3::ADDR.val(address >> 12))
self.write(CR3::ADDR.val(address >> 12))
}
}

View File

@ -536,7 +536,7 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
for TaskContextImpl<K, PA>
{
fn drop(&mut self) {
log::info!("Drop Context {:#p}", self);
log::trace!("Drop Context {:#p}", self);
assert_eq!(self.stack_size % 0x1000, 0);
for offset in (0..self.stack_size).step_by(0x1000) {

View File

@ -1,18 +1,18 @@
#![no_std]
#![allow(clippy::new_without_default)]
#![feature(naked_functions, trait_upcasting)]
#![feature(naked_functions)]
extern crate alloc;
use core::{
ops::DerefMut,
ops::{DerefMut, Range},
sync::atomic::{AtomicUsize, Ordering},
};
use alloc::vec::Vec;
use alloc::{sync::Arc, vec::Vec};
use device_api::interrupt::{LocalInterruptController, MessageInterruptController};
use kernel_arch_interface::{
cpu::{CpuImpl, IpiQueue},
cpu::{CpuData, CpuImpl, IpiQueue},
task::Scheduler,
util::OneTimeInit,
Architecture,
@ -52,14 +52,17 @@ pub struct PerCpuData {
// 0x10, used in assembly
pub tmp_address: usize,
pub local_apic: &'static dyn LocalApicInterface,
pub local_apic: Arc<dyn LocalApicInterface>,
pub available_features: CpuFeatures,
pub enabled_features: CpuFeatures,
}
impl CpuData for PerCpuData {}
impl PerCpuData {
pub fn local_apic(&self) -> &'static dyn LocalApicInterface {
self.local_apic
#[inline]
pub fn local_apic(&self) -> &dyn LocalApicInterface {
self.local_apic.as_ref()
}
}
@ -182,12 +185,7 @@ impl Architecture for ArchitectureImpl {
fn local_interrupt_controller() -> Option<&'static dyn LocalInterruptController> {
let cpu = Self::local_cpu_data()?;
Some(cpu.local_apic)
}
fn message_interrupt_controller() -> &'static dyn MessageInterruptController {
let local = Self::local_cpu_data().unwrap();
local.local_apic
Some(cpu.local_apic.as_ref())
}
fn cpu_enabled_features<S: Scheduler>(cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> {
@ -197,4 +195,30 @@ impl Architecture for ArchitectureImpl {
fn cpu_available_features<S: Scheduler>(cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> {
Some(&cpu.available_features)
}
// Cache/barrier
fn load_barrier() {
unsafe { core::arch::x86_64::_mm_lfence() };
}
fn store_barrier() {
unsafe { core::arch::x86_64::_mm_sfence() };
}
fn memory_barrier() {
unsafe { core::arch::x86_64::_mm_mfence() };
}
fn flush_virtual_range(range: Range<usize>) {
// TODO I assume 64-byte cache line on all CPUs
// TODO clflush instruction may not be available, test for it
const CLSIZE: usize = 64;
let start = range.start & !(CLSIZE - 1);
let end = (range.end + (CLSIZE - 1)) & !(CLSIZE - 1);
for line in (start..end).step_by(CLSIZE) {
unsafe { core::arch::x86_64::_mm_clflush(line as _) };
}
}
}

View File

@ -1,20 +1,18 @@
use core::{
alloc::Layout,
ops::{Deref, DerefMut},
ptr::addr_of,
sync::atomic::{AtomicUsize, Ordering},
};
use kernel_arch_interface::{
mem::{DeviceMemoryAttributes, KernelTableManager, RawDeviceMemoryMapping},
sync::split_spinlock,
split_spinlock,
};
use kernel_arch_x86::registers::CR3;
use libk_mm_interface::{
address::PhysicalAddress,
table::{page_index, EntryLevel, EntryLevelExt},
};
use memtables::x86_64::FixedTables;
use static_assertions::{const_assert_eq, const_assert_ne};
use yggdrasil_abi::error::Error;
@ -51,12 +49,12 @@ const RAM_MAPPING_L0I: usize = KERNEL_L0_INDEX - 1;
const DEVICE_MAPPING_L3_COUNT: usize = 4;
split_spinlock! {
use crate::ArchitectureImpl;
use crate::mem::FixedTables;
use libk_mm_interface::KernelImageObject;
use memtables::x86_64::FixedTables;
use crate::ArchitectureImpl;
#[link_section = ".data.tables"]
static KERNEL_TABLES<lock: ArchitectureImpl>: KernelImageObject<FixedTables> =
static KERNEL_TABLES: KernelImageObject<FixedTables> =
unsafe { KernelImageObject::new(FixedTables::zeroed()) };
}
@ -129,6 +127,7 @@ unsafe fn map_early_pages(physical: PhysicalAddress, count: usize) -> Result<usi
// TODO NX, NC
EARLY_MAPPING_L3[i + l3i] =
PageEntry::page(physical.add(i * L3::SIZE), PageAttributes::WRITABLE);
flush_tlb_entry(EARLY_MAPPING_OFFSET + (i + l3i) * L3::SIZE);
}
return Ok(EARLY_MAPPING_OFFSET + l3i * L3::SIZE);
@ -223,6 +222,7 @@ unsafe fn map_device_memory(
let address = base_address + l2_offset;
Ok(RawDeviceMemoryMapping::from_raw_parts(
l2_aligned.into_u64(),
address,
base_address,
page_count,
@ -234,6 +234,7 @@ unsafe fn map_device_memory(
let address = base_address + l3_offset;
Ok(RawDeviceMemoryMapping::from_raw_parts(
l3_aligned.into_u64(),
address,
base_address,
page_count,
@ -337,6 +338,15 @@ pub fn clone_kernel_tables(dst: &mut PageTable<L0>) {
}
}
pub fn auto_address<T>(pointer: *const T) -> usize {
let address = pointer.addr();
if address < KERNEL_VIRT_OFFSET {
address
} else {
address - KERNEL_VIRT_OFFSET
}
}
/// Sets up the following memory map:
/// ...: KERNEL_TABLES.l0:
/// * 0xFFFFFF0000000000 .. 0xFFFFFFFF8000000000 : RAM_MAPPING_L1
@ -355,15 +365,15 @@ pub fn clone_kernel_tables(dst: &mut PageTable<L0>) {
/// Unsafe, must only be called by BSP during its early init, must already be in "higher-half"
pub unsafe fn init_fixed_tables() {
let mut tables = KERNEL_TABLES.lock();
// TODO this could be built in compile-time too?
let early_mapping_l3_phys = addr_of!(EARLY_MAPPING_L3) as usize - KERNEL_VIRT_OFFSET;
let device_mapping_l2_phys = addr_of!(DEVICE_MAPPING_L2) as usize - KERNEL_VIRT_OFFSET;
let ram_mapping_l1_phys = addr_of!(RAM_MAPPING_L1) as usize - KERNEL_VIRT_OFFSET;
let early_mapping_l3_phys = auto_address(&raw const EARLY_MAPPING_L3);
let device_mapping_l2_phys = auto_address(&raw const DEVICE_MAPPING_L2);
let ram_mapping_l1_phys = auto_address(&raw const RAM_MAPPING_L1);
for i in 0..DEVICE_MAPPING_L3_COUNT {
let device_mapping_l3_phys = PhysicalAddress::from_usize(
&DEVICE_MAPPING_L3S[i] as *const _ as usize - KERNEL_VIRT_OFFSET,
);
let device_mapping_l3_phys =
PhysicalAddress::from_usize(auto_address(&raw const DEVICE_MAPPING_L3S[i]));
DEVICE_MAPPING_L2[i] = PageEntry::table(device_mapping_l3_phys, PageAttributes::WRITABLE);
}
@ -380,7 +390,7 @@ pub unsafe fn init_fixed_tables() {
(ram_mapping_l1_phys as u64) | (PageAttributes::WRITABLE | PageAttributes::PRESENT).bits();
// TODO ENABLE EFER.NXE
let cr3 = (&raw const tables.l0).addr() - KERNEL_VIRT_OFFSET;
let cr3 = auto_address(&raw const tables.l0);
CR3.set_address(cr3);
}

View File

@ -4,7 +4,7 @@ use core::marker::PhantomData;
use libk_mm_interface::{
address::{AsPhysicalAddress, PhysicalAddress},
pointer::PhysicalRefMut,
process::ProcessAddressSpaceManager,
process::{PageAttributeUpdate, ProcessAddressSpaceManager},
table::{
EntryLevel, EntryLevelDrop, EntryLevelExt, MapAttributes, NextPageTable, TableAllocator,
},
@ -61,7 +61,15 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
self.write_l3_entry(address, PageEntry::page(physical, flags.into()), false)
}
unsafe fn unmap_page(&mut self, address: usize) -> Result<PhysicalAddress, Error> {
unsafe fn update_page_attributes(
&mut self,
address: usize,
update: &PageAttributeUpdate,
) -> Result<(), Error> {
self.update_l3_entry(address, |entry| entry.update(update))
}
unsafe fn unmap_page(&mut self, address: usize) -> Result<(PhysicalAddress, bool), Error> {
self.pop_l3_entry(address)
}
@ -71,9 +79,9 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
.ok_or(Error::InvalidMemoryOperation)
}
fn as_address_with_asid(&self) -> u64 {
fn as_address_with_asid(&self) -> (u64, u64) {
// TODO x86-64 PCID/ASID?
unsafe { self.l0.as_physical_address().into_u64() }
(unsafe { self.l0.as_physical_address().into_u64() }, 0)
}
unsafe fn clear(&mut self) {
@ -111,7 +119,33 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
Ok(())
}
fn pop_l3_entry(&mut self, virt: usize) -> Result<PhysicalAddress, Error> {
fn update_l3_entry<F: FnOnce(&mut PageEntry<L3>) -> Result<(), Error>>(
&mut self,
virt: usize,
mapper: F,
) -> Result<(), Error> {
let l0i = virt.page_index::<L0>();
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
let mut l1 = self.l0.get_mut(l0i).ok_or(Error::DoesNotExist)?;
let mut l2 = l1.get_mut(l1i).ok_or(Error::DoesNotExist)?;
let mut l3 = l2.get_mut(l2i).ok_or(Error::DoesNotExist)?;
let entry = &mut l3[l3i];
if !entry.is_present() {
return Err(Error::DoesNotExist);
}
mapper(entry)?;
unsafe {
flush_tlb_entry(virt);
}
Ok(())
}
fn pop_l3_entry(&mut self, virt: usize) -> Result<(PhysicalAddress, bool), Error> {
let l0i = virt.page_index::<L0>();
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
@ -123,13 +157,14 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
let mut l3 = l2.get_mut(l2i).ok_or(Error::DoesNotExist)?;
let page = l3[l3i].as_page().ok_or(Error::DoesNotExist)?;
let dirty = l3[l3i].is_dirty();
l3[l3i] = PageEntry::INVALID;
unsafe {
flush_tlb_entry(virt);
}
Ok(page)
Ok((page, dirty))
}
fn read_l3_entry(&self, virt: usize) -> Option<(PhysicalAddress, MapAttributes)> {

View File

@ -8,6 +8,7 @@ use bitflags::bitflags;
use libk_mm_interface::{
address::{AsPhysicalAddress, PhysicalAddress},
pointer::{PhysicalRef, PhysicalRefMut},
process::PageAttributeUpdate,
table::{
EntryLevel, EntryLevelDrop, MapAttributes, NextPageTable, NonTerminalEntryLevel,
TableAllocator,
@ -31,6 +32,8 @@ bitflags! {
/// For tables, allows user access to further translation levels, for pages/blocks, allows
/// user access to the region covered by the entry
const USER = 1 << 2;
/// If set, the page has been written to
const DIRTY = 1 << 6;
}
}
@ -98,11 +101,15 @@ impl PageEntry<L3> {
/// not
pub fn as_page(self) -> Option<PhysicalAddress> {
if self.0 & PageAttributes::PRESENT.bits() != 0 {
Some(PhysicalAddress::from_u64(self.0 & !0xFFF))
Some(PhysicalAddress::from_u64(self.0 & !Self::ATTR_MASK))
} else {
None
}
}
pub fn is_dirty(&self) -> bool {
self.0 & PageAttributes::DIRTY.bits() != 0
}
}
impl PageEntry<L2> {
@ -145,7 +152,7 @@ impl<L: NonTerminalEntryLevel> PageEntry<L> {
if self.0 & PageAttributes::PRESENT.bits() != 0
&& self.0 & PageAttributes::BLOCK.bits() == 0
{
Some(PhysicalAddress::from_u64(self.0 & !0xFFF))
Some(PhysicalAddress::from_u64(self.0 & !Self::ATTR_MASK))
} else {
None
}
@ -158,6 +165,8 @@ impl<L: NonTerminalEntryLevel> PageEntry<L> {
}
impl<L: EntryLevel> PageEntry<L> {
const ATTR_MASK: u64 = 0xFFF | (1 << 63);
/// An entry that is not mapped
pub const INVALID: Self = Self(0, PhantomData);
@ -179,6 +188,21 @@ impl<L: EntryLevel> PageEntry<L> {
pub fn is_present(&self) -> bool {
self.0 & PageAttributes::PRESENT.bits() != 0
}
pub fn update(&mut self, update: &PageAttributeUpdate) -> Result<(), Error> {
let mut attrs = PageAttributes::from_bits_retain(self.0);
if let Some(write) = update.user_write {
if write {
attrs |= PageAttributes::WRITABLE;
} else {
attrs &= !PageAttributes::WRITABLE;
}
}
// Dirty is ignored, it's hardware-managed
self.0 &= !Self::ATTR_MASK;
self.0 |= attrs.bits() & Self::ATTR_MASK;
Ok(())
}
}
impl<L: EntryLevel> PageTable<L> {

View File

@ -92,6 +92,7 @@ fn main() {
"x86" => (),
"x86_64" => build_x86_64(),
"aarch64" => (),
"riscv64" => (),
_ => panic!("Unknown target arch: {:?}", arch),
}
}

View File

@ -0,0 +1,18 @@
[package]
name = "ygg_driver_acpi"
version = "0.1.0"
edition = "2024"
[dependencies]
libk-util.workspace = true
libk-mm.workspace = true
libk.workspace = true
device-api.workspace = true
kernel-arch-x86.path = "../../arch/x86"
acpi.workspace = true
rsdp.workspace = true
aml.workspace = true
acpi-system.workspace = true
log.workspace = true

View File

@ -0,0 +1,131 @@
use core::time::Duration;
use crate::AcpiHandlerImpl;
impl aml::Handler for AcpiHandlerImpl {
fn read_io_u8(&self, port: u16) -> u8 {
<Self as acpi_system::Handler>::io_read_u8(port)
}
fn read_io_u16(&self, port: u16) -> u16 {
<Self as acpi_system::Handler>::io_read_u16(port)
}
fn read_io_u32(&self, port: u16) -> u32 {
<Self as acpi_system::Handler>::io_read_u32(port)
}
fn write_io_u8(&self, port: u16, value: u8) {
<Self as acpi_system::Handler>::io_write_u8(port, value)
}
fn write_io_u16(&self, port: u16, value: u16) {
<Self as acpi_system::Handler>::io_write_u16(port, value)
}
fn write_io_u32(&self, port: u16, value: u32) {
<Self as acpi_system::Handler>::io_write_u32(port, value)
}
fn read_u8(&self, address: usize) -> u8 {
<Self as acpi_system::Handler>::mem_read_u8(address as u64)
}
fn read_u16(&self, address: usize) -> u16 {
<Self as acpi_system::Handler>::mem_read_u16(address as u64)
}
fn read_u32(&self, address: usize) -> u32 {
<Self as acpi_system::Handler>::mem_read_u32(address as u64)
}
fn read_u64(&self, address: usize) -> u64 {
<Self as acpi_system::Handler>::mem_read_u64(address as u64)
}
fn write_u8(&self, address: usize, value: u8) {
<Self as acpi_system::Handler>::mem_write_u8(address as u64, value)
}
fn write_u16(&self, address: usize, value: u16) {
<Self as acpi_system::Handler>::mem_write_u16(address as u64, value)
}
fn write_u32(&self, address: usize, value: u32) {
<Self as acpi_system::Handler>::mem_write_u32(address as u64, value)
}
fn write_u64(&self, address: usize, value: u64) {
<Self as acpi_system::Handler>::mem_write_u64(address as u64, value)
}
fn read_pci_u8(&self, _segment: u16, _bus: u8, _device: u8, _function: u8, _offset: u16) -> u8 {
0xFF
}
fn read_pci_u16(
&self,
_segment: u16,
_bus: u8,
_device: u8,
_function: u8,
_offset: u16,
) -> u16 {
0xFFFF
}
fn read_pci_u32(
&self,
_segment: u16,
_bus: u8,
_device: u8,
_function: u8,
_offset: u16,
) -> u32 {
0xFFFFFFFF
}
fn write_pci_u8(
&self,
_segment: u16,
_bus: u8,
_device: u8,
_function: u8,
_offset: u16,
_value: u8,
) {
}
fn write_pci_u16(
&self,
_segment: u16,
_bus: u8,
_device: u8,
_function: u8,
_offset: u16,
_value: u16,
) {
}
fn write_pci_u32(
&self,
_segment: u16,
_bus: u8,
_device: u8,
_function: u8,
_offset: u16,
_value: u32,
) {
}
fn read_ec_u8(&self, _address: u64) -> u8 {
0x00
}
fn write_ec_u8(&self, _address: u64, _value: u8) {}
fn sleep(&self, _duration: Duration) {
todo!()
// util::polling_sleep(duration).unwrap();
}
}

View File

@ -0,0 +1,171 @@
use core::{ptr::NonNull, time::Duration};
use acpi::PhysicalMapping;
use acpi_system::AcpiSystemError;
use alloc::sync::Arc;
use device_api::{
device::Device,
interrupt::{InterruptHandler, Irq, IrqVector},
};
use kernel_arch_x86::{intrinsics, ISA_IRQ_OFFSET};
use libk::device::external_interrupt_controller;
use libk_mm::{
address::{PhysicalAddress, Virtualize},
pointer::PhysicalRef,
};
use crate::{
mem::{read_memory, write_memory},
ACPI_SYSTEM,
};
#[derive(Clone, Copy)]
#[doc(hidden)]
pub struct AcpiHandlerImpl;
struct SciHandler;
impl acpi_system::Handler for AcpiHandlerImpl {
type MappedSlice = PhysicalRef<'static, [u8]>;
unsafe fn map_slice(address: u64, length: u64) -> Self::MappedSlice {
unsafe {
PhysicalRef::map_slice(
PhysicalAddress::from_u64(address),
length.try_into().unwrap(),
)
}
}
fn io_read_u8(port: u16) -> u8 {
let value = unsafe { intrinsics::inb(port) };
log::trace!("io_read_u8 {:#x} <- {:#x}", port, value);
value
}
fn io_read_u16(port: u16) -> u16 {
let value = unsafe { intrinsics::inw(port) };
log::trace!("io_read_u16 {:#x} <- {:#x}", port, value);
value
}
fn io_read_u32(port: u16) -> u32 {
let value = unsafe { intrinsics::inl(port) };
log::trace!("io_read_u32 {:#x} <- {:#x}", port, value);
value
}
fn io_write_u8(port: u16, value: u8) {
log::trace!("io_write_u8 {:#x}, {:#x}", port, value);
unsafe { intrinsics::outb(port, value) }
}
fn io_write_u16(port: u16, value: u16) {
log::trace!("io_write_u16 {:#x}, {:#x}", port, value);
unsafe { intrinsics::outw(port, value) }
}
fn io_write_u32(port: u16, value: u32) {
log::trace!("io_write_u32 {:#x}, {:#x}", port, value);
unsafe { intrinsics::outl(port, value) }
}
fn mem_read_u8(address: u64) -> u8 {
let value = unsafe { read_memory(PhysicalAddress::from_u64(address)) };
log::trace!("mem_read_u8 {:#x} -> {:#x}", address, value);
value
}
fn mem_read_u16(address: u64) -> u16 {
let value = unsafe { read_memory(PhysicalAddress::from_u64(address)) };
log::trace!("mem_read_u16 {:#x} -> {:#x}", address, value);
value
}
fn mem_read_u32(address: u64) -> u32 {
let value = unsafe { read_memory(PhysicalAddress::from_u64(address)) };
log::trace!("mem_read_u32 {:#x} -> {:#x}", address, value);
value
}
fn mem_read_u64(address: u64) -> u64 {
let value = unsafe { read_memory(PhysicalAddress::from_u64(address)) };
log::trace!("mem_read_u64 {:#x} -> {:#x}", address, value);
value
}
fn mem_write_u8(address: u64, value: u8) {
log::trace!("mem_write_u8 {:#x}, {:#x}", address, value);
unsafe { write_memory(PhysicalAddress::from_u64(address), value) }
}
fn mem_write_u16(address: u64, value: u16) {
log::trace!("mem_write_u16 {:#x}, {:#x}", address, value);
unsafe { write_memory(PhysicalAddress::from_u64(address), value) }
}
fn mem_write_u32(address: u64, value: u32) {
log::trace!("mem_write_u32 {:#x}, {:#x}", address, value);
unsafe { write_memory(PhysicalAddress::from_u64(address), value) }
}
fn mem_write_u64(address: u64, value: u64) {
log::trace!("mem_write_u64 {:#x}, {:#x}", address, value);
unsafe { write_memory(PhysicalAddress::from_u64(address), value) }
}
fn install_interrupt_handler(irq: u32) -> Result<(), AcpiSystemError> {
log::info!("Installing ACPI SCI handler at IRQ #{}", irq);
let intc = external_interrupt_controller().expect("No external intc");
let handler = Arc::new(SciHandler);
let irq = Irq::External(irq + ISA_IRQ_OFFSET);
intc.register_irq(irq, Default::default(), handler).unwrap();
intc.enable_irq(irq).unwrap();
Ok(())
}
fn stall(_duration: Duration) {
// TODO polling_sleep is not yet implemented properly
todo!()
// util::polling_sleep(duration).ok();
}
}
impl rsdp::handler::AcpiHandler for AcpiHandlerImpl {
unsafe fn map_physical_region<T>(
&self,
physical_address: usize,
size: usize,
) -> PhysicalMapping<Self, T> {
unsafe {
PhysicalMapping::new(
physical_address,
NonNull::new_unchecked(
PhysicalAddress::from_usize(physical_address).virtualize() as *mut T
),
size,
size,
*self,
)
}
}
fn unmap_physical_region<T>(_region: &acpi::PhysicalMapping<Self, T>) {}
}
impl InterruptHandler for SciHandler {
fn handle_irq(self: Arc<Self>, _vector: IrqVector) -> bool {
log::trace!("ACPI SCI received");
ACPI_SYSTEM.get().lock().handle_sci();
true
}
}
impl Device for SciHandler {
fn display_name(&self) -> &str {
"ACPI SCI handler"
}
}

View File

@ -0,0 +1,89 @@
#![feature(allocator_api)]
#![no_std]
use acpi::AcpiTables;
use acpi_system::{AcpiInterruptMethod, AcpiSystem};
use alloc::boxed::Box;
use libk::error::Error;
use libk_util::{sync::IrqSafeSpinlock, OneTimeInit};
extern crate alloc;
pub mod mem;
pub use mem::AcpiAllocator;
pub mod handler;
pub use handler::AcpiHandlerImpl;
pub mod aml_handler;
pub use acpi_system::{
EventAction, FixedEvent, InterruptPolarity, InterruptTrigger, IrqDescriptor, PciPin,
};
static ACPI_SYSTEM: OneTimeInit<IrqSafeSpinlock<AcpiSystem<AcpiHandlerImpl>>> = OneTimeInit::new();
pub fn add_event_handler<F: Fn(&AcpiSystem<AcpiHandlerImpl>) -> EventAction + 'static>(
event: &FixedEvent,
handler: F,
) -> Result<(), Error> {
ACPI_SYSTEM
.get()
.lock()
.enable_fixed_event(event, Box::new(handler))
.map_err(|_| Error::InvalidArgument)
}
pub fn get_pci_route(
aml_path: &str,
device: u16,
function: u16,
pin: PciPin,
) -> Option<IrqDescriptor> {
ACPI_SYSTEM
.get()
.lock()
.pci_route(aml_path, device, function, pin)
.ok()
}
/// Initializes ACPI management
pub fn switch_to_acpi(tables: &'static AcpiTables<AcpiHandlerImpl>) -> Result<(), Error> {
// NOTE mostly broken for real HW
let mut system = AcpiSystem::new(tables, Box::new(AcpiHandlerImpl)).unwrap();
system.initialize(AcpiInterruptMethod::Apic).unwrap();
// system
// .enable_fixed_event(
// &FixedEvent::POWER_BUTTON,
// Box::new(|_| {
// log::info!("Power button was pressed");
// // TODO the correct way would be to
// // 1. Nicely ask all the processes to quit
// // 2. Wait for some time
// // 3. Kill the remaining ones
// // 4. Halt other cores
// // 5. Sync filesystem
// // 6. Do something with the devices
// // 7. Actually enter the S5 state
// unsafe {
// PLATFORM
// .send_ipi(IpiDeliveryTarget::OtherCpus, IpiMessage::Shutdown)
// .unwrap();
// }
// SHUTDOWN_FENCE.signal();
// SHUTDOWN_FENCE.wait_all(CPU_COUNT.load(Ordering::Acquire));
// log::info!("CPUs are parked, can shutdown now");
// EventAction::EnterSleepState(AcpiSleepState::S5)
// }),
// )
// .unwrap();
ACPI_SYSTEM.init(IrqSafeSpinlock::new(system));
Ok(())
}

View File

@ -0,0 +1,64 @@
//! ACPI memory IO and management functions
use core::{
alloc::{AllocError, Allocator, GlobalAlloc, Layout},
ptr::NonNull,
};
use libk_mm::{address::PhysicalAddress, device::DeviceMemoryMapping, heap::GLOBAL_HEAP};
#[derive(Clone, Copy)]
#[doc(hidden)]
pub struct AcpiAllocator;
unsafe impl Allocator for AcpiAllocator {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
let ptr = unsafe { GLOBAL_HEAP.alloc(layout) };
log::trace!("ACPI alloc: {:?} -> {:p}", layout, ptr);
if ptr.is_null() {
Err(AllocError)
} else {
unsafe {
Ok(NonNull::slice_from_raw_parts(
NonNull::new_unchecked(ptr),
layout.size(),
))
}
}
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
log::trace!("ACPI dealloc: {:?}, {:?}", ptr, layout);
unsafe { GLOBAL_HEAP.dealloc(ptr.as_ptr(), layout) };
}
}
// TODO don't map memory as device if not necessary
pub unsafe fn read_memory<T>(address: PhysicalAddress) -> T {
let io =
unsafe { DeviceMemoryMapping::map(address, size_of::<T>(), Default::default()).unwrap() };
let address = io.address();
unsafe {
if address % align_of::<T>() == 0 {
(address as *const T).read_volatile()
} else {
(address as *const T).read_unaligned()
}
}
}
pub unsafe fn write_memory<T>(address: PhysicalAddress, value: T) {
let io =
unsafe { DeviceMemoryMapping::map(address, size_of::<T>(), Default::default()).unwrap() };
let address = io.address();
unsafe {
if address % align_of::<T>() == 0 {
(address as *mut T).write_volatile(value)
} else {
(address as *mut T).write_unaligned(value)
}
}
}

View File

@ -1,12 +1,10 @@
use core::mem::{size_of, MaybeUninit};
use libk_mm::{
address::{AsPhysicalAddress, PhysicalAddress},
PageBox, PageSlice,
};
use device_api::dma::DmaAllocator;
use libk::dma::{BusAddress, DmaBuffer, DmaSliceMut};
use tock_registers::register_structs;
use crate::{data::AtaString, error::AhciError, MAX_PRD_SIZE, SECTOR_SIZE};
use crate::{data::AtaString, error::AhciError, MAX_PRD_SIZE};
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
#[repr(u8)]
@ -22,7 +20,7 @@ pub trait AtaCommand {
fn lba(&self) -> u64;
fn sector_count(&self) -> usize;
fn buffer(&self) -> Option<(PhysicalAddress, usize)>;
fn buffer(&self) -> Option<(BusAddress, usize)>;
unsafe fn into_response(self) -> Self::Response;
fn prd_count(&self) -> usize {
@ -64,44 +62,41 @@ register_structs! {
}
pub struct AtaIdentify {
buffer: PageBox<MaybeUninit<AtaIdentifyResponse>>,
buffer: DmaBuffer<MaybeUninit<AtaIdentifyResponse>>,
}
pub struct AtaReadDmaEx {
lba: u64,
sector_count: usize,
buffer_base: PhysicalAddress,
buffer_base: BusAddress,
buffer_size: usize,
}
impl AtaIdentify {
pub fn create() -> Result<Self, AhciError> {
PageBox::new_uninit()
pub fn create(dma: &dyn DmaAllocator) -> Result<Self, AhciError> {
DmaBuffer::new_uninit(dma)
.map(Self::with_data)
.map_err(AhciError::MemoryError)
}
pub fn with_data(buffer: PageBox<MaybeUninit<AtaIdentifyResponse>>) -> Self {
pub fn with_data(buffer: DmaBuffer<MaybeUninit<AtaIdentifyResponse>>) -> Self {
Self { buffer }
}
}
impl AtaReadDmaEx {
pub fn new(lba: u64, sector_count: usize, buffer: &PageSlice<MaybeUninit<u8>>) -> Self {
assert_eq!(buffer.len() % SECTOR_SIZE, 0);
assert_ne!(buffer.len(), 0);
pub fn new(lba: u64, sector_count: usize, buffer: DmaSliceMut<MaybeUninit<u8>>) -> Self {
Self {
lba,
sector_count,
buffer_base: unsafe { buffer.as_physical_address() },
buffer_base: buffer.bus_address(),
buffer_size: buffer.len(),
}
}
}
impl AtaCommand for AtaIdentify {
type Response = PageBox<AtaIdentifyResponse>;
type Response = DmaBuffer<AtaIdentifyResponse>;
const COMMAND_ID: AtaCommandId = AtaCommandId::Identify;
@ -113,14 +108,14 @@ impl AtaCommand for AtaIdentify {
0
}
fn buffer(&self) -> Option<(PhysicalAddress, usize)> {
let base = unsafe { self.buffer.as_physical_address() };
fn buffer(&self) -> Option<(BusAddress, usize)> {
let base = self.buffer.bus_address();
let size = size_of::<AtaIdentifyResponse>();
Some((base, size))
}
unsafe fn into_response(self) -> Self::Response {
self.buffer.assume_init()
DmaBuffer::assume_init(self.buffer)
}
}
@ -137,7 +132,7 @@ impl AtaCommand for AtaReadDmaEx {
self.sector_count
}
fn buffer(&self) -> Option<(PhysicalAddress, usize)> {
fn buffer(&self) -> Option<(BusAddress, usize)> {
Some((self.buffer_base, self.buffer_size))
}

View File

@ -2,7 +2,7 @@ use core::mem::size_of;
use alloc::string::String;
use bytemuck::{Pod, Zeroable};
use libk_mm::address::PhysicalAddress;
use libk::dma::BusAddress;
use libk_util::{ConstAssert, IsTrue};
use static_assertions::const_assert_eq;
@ -166,7 +166,7 @@ impl CommandTable {
}
assert_eq!(prd, command.prd_count());
self.prdt[prd..].fill_with(|| PhysicalRegionDescriptor::zeroed());
self.prdt[prd..].fill_with(PhysicalRegionDescriptor::zeroed);
}
Ok(())
@ -174,7 +174,7 @@ impl CommandTable {
}
impl CommandListEntry {
pub fn new(command_table_entry: PhysicalAddress, prd_count: usize) -> Result<Self, AhciError> {
pub fn new(command_table_entry: BusAddress, prd_count: usize) -> Result<Self, AhciError> {
if prd_count > 0xFFFF {
todo!()
}
@ -183,7 +183,7 @@ impl CommandListEntry {
attr: (size_of::<RegisterHostToDeviceFis>() / size_of::<u32>()) as _,
prdtl: prd_count as _,
prdbc: 0,
ctba: command_table_entry.into(),
ctba: command_table_entry.into_u64(),
_0: [0; 4],
})
}
@ -201,18 +201,14 @@ unsafe impl Zeroable for CommandTable {
}
impl PhysicalRegionDescriptor {
pub fn new(
address: PhysicalAddress,
byte_count: usize,
is_last: bool,
) -> Result<Self, AhciError> {
pub fn new(address: BusAddress, byte_count: usize, is_last: bool) -> Result<Self, AhciError> {
if byte_count > MAX_PRD_SIZE {
return Err(AhciError::RegionTooLarge);
}
let dbc_mask = (is_last as u32) << 31;
Ok(Self {
buffer_address: address.into(),
buffer_address: address.into_u64(),
_0: 0,
dbc: ((byte_count as u32 - 1) << 1) | 1 | dbc_mask,
})

View File

@ -4,29 +4,27 @@
extern crate alloc;
use alloc::{boxed::Box, format, vec, vec::Vec};
use alloc::{format, sync::Arc, vec::Vec};
use bytemuck::Zeroable;
use data::ReceivedFis;
use device_api::{
interrupt::{InterruptAffinity, InterruptHandler},
Device,
device::{Device, DeviceInitContext},
dma::DmaAllocator,
interrupt::{InterruptAffinity, InterruptHandler, IrqVector},
};
use error::AhciError;
use kernel_fs::devfs;
use libk::{
task::runtime,
vfs::block::{probe_partitions, NgBlockDeviceWrapper},
};
use libk_mm::{address::AsPhysicalAddress, device::DeviceMemoryIo, PageBox};
use libk::{device::manager::probe_partitions, dma::DmaBuffer, fs::devfs, task::runtime};
use libk_mm::device::DeviceMemoryIo;
use libk_util::{sync::IrqSafeSpinlock, OneTimeInit};
use port::AhciPort;
use regs::{PortRegs, Regs};
use tock_registers::interfaces::{ReadWriteable, Readable, Writeable};
use ygg_driver_pci::{
device::{PciDeviceInfo, PreferredInterruptMode},
macros::pci_driver,
PciCommandRegister, PciConfigurationSpace,
};
use yggdrasil_abi::error::Error;
use yggdrasil_abi::{error::Error, io::FileMode};
use crate::regs::{Version, CAP, GHC, SSTS};
@ -43,8 +41,9 @@ const MAX_DRIVES: usize = (b'z' - b'a') as usize;
pub struct AhciController {
regs: IrqSafeSpinlock<DeviceMemoryIo<'static, Regs>>,
ports: OneTimeInit<Vec<&'static AhciPort>>,
received_fis_buffers: OneTimeInit<[Option<PageBox<ReceivedFis>>; 16]>,
dma: Arc<dyn DmaAllocator>,
ports: OneTimeInit<Vec<Arc<AhciPort>>>,
received_fis_buffers: OneTimeInit<[Option<DmaBuffer<ReceivedFis>>; 16]>,
version: Version,
max_port_count: usize,
@ -53,7 +52,7 @@ pub struct AhciController {
}
impl AhciController {
async fn late_init(&'static self) -> Result<(), AhciError> {
async fn late_init(self: Arc<Self>) -> Result<(), AhciError> {
log::info!("Initializing AHCI SATA Controller {:?}", self.version);
let regs = self.regs.lock();
@ -70,7 +69,7 @@ impl AhciController {
let pi = regs.PI.get();
let mut ports = vec![];
let mut ports = Vec::new();
drop(regs);
@ -84,8 +83,9 @@ impl AhciController {
let regs = self.regs.lock();
let port = &regs.PORTS[i];
let buffer = PageBox::new(ReceivedFis::zeroed()).map_err(AhciError::MemoryError)?;
port.set_received_fis_address_64(unsafe { buffer.as_physical_address() });
let buffer = DmaBuffer::new(&*self.dma, ReceivedFis::zeroed())
.map_err(AhciError::MemoryError)?;
port.set_received_fis_address_64(buffer.bus_address());
*fis_buffer_slot = Some(buffer);
}
@ -117,7 +117,7 @@ impl AhciController {
drop(regs);
let port = match AhciPort::create(port, self, i) {
let port = match AhciPort::create(port, self.clone(), i) {
Ok(port) => port,
Err(error) => {
log::warn!("Port {} init error: {:?}", i, error);
@ -134,13 +134,13 @@ impl AhciController {
self.regs.lock().GHC.modify(GHC::IE::SET);
// Setup the detected ports
for (i, &port) in ports.iter().enumerate() {
for (i, port) in ports.iter().enumerate() {
log::info!("Init port {}", i);
port.init().await?;
port.init_inner().await?;
}
// Dump info about the drives
for (i, &port) in ports.iter().enumerate() {
for (i, port) in ports.iter().enumerate() {
let info = port.info().unwrap();
log::info!(
"Port {}: model={:?}, serial={:?}, lba_count={}",
@ -151,25 +151,8 @@ impl AhciController {
);
}
{
let mut lock = SATA_DRIVES.lock();
for &port in ports.iter() {
let n = lock.len();
if n >= MAX_DRIVES {
todo!("Too many drives, ran out of letters");
}
let n = n as u8;
lock.push(port);
let name = format!("sd{}", (n + b'a') as char);
let blk = NgBlockDeviceWrapper::new(port);
devfs::add_named_block_device(blk, name.clone()).ok();
probe_partitions(blk, move |index, partition| {
devfs::add_block_device_partition(name.clone(), index, partition)
})
.ok();
}
for port in ports.iter() {
register_sata_drive(port.clone(), true);
}
log::debug!("All ports initialized");
@ -179,7 +162,7 @@ impl AhciController {
}
impl InterruptHandler for AhciController {
fn handle_irq(&self, _vector: Option<usize>) -> bool {
fn handle_irq(self: Arc<Self>, _vector: IrqVector) -> bool {
let regs = self.regs.lock();
let is = regs.IS.get();
@ -188,7 +171,7 @@ impl InterruptHandler for AhciController {
// Clear global interrupt status
regs.IS.set(u32::MAX);
for &port in ports {
for port in ports {
if is & (1 << port.index) != 0 {
port.handle_pending_interrupts();
}
@ -201,58 +184,106 @@ impl InterruptHandler for AhciController {
}
impl Device for AhciController {
unsafe fn init(&'static self) -> Result<(), Error> {
unsafe fn init(self: Arc<Self>, _cx: DeviceInitContext) -> Result<(), Error> {
// Do the init in background
runtime::spawn(self.late_init())?;
Ok(())
}
fn display_name(&self) -> &'static str {
"AHCI SATA Controller"
fn display_name(&self) -> &str {
"AHCI Controller"
}
}
static SATA_DRIVES: IrqSafeSpinlock<Vec<&'static AhciPort>> = IrqSafeSpinlock::new(Vec::new());
pub fn register_sata_drive(drive: Arc<AhciPort>, probe: bool) {
let index = {
let mut drives = SATA_DRIVES.lock();
let index = drives.len();
if index >= MAX_DRIVES {
log::error!("Cannot add a SATA drive: too many of them");
return;
}
drives.push(drive.clone());
index
};
let letter = (index as u8 + b'a') as char;
pub fn probe(info: &PciDeviceInfo) -> Result<&'static dyn Device, Error> {
let bar5 = info.config_space.bar(5).ok_or(Error::InvalidOperation)?;
let bar5 = bar5.as_memory().ok_or(Error::InvalidOperation)?;
let name = format!("sd{letter}");
log::info!("Register SATA drive: {name}");
let mut cmd = PciCommandRegister::from_bits_retain(info.config_space.command());
cmd &= !(PciCommandRegister::DISABLE_INTERRUPTS | PciCommandRegister::ENABLE_IO);
cmd |= PciCommandRegister::ENABLE_MEMORY | PciCommandRegister::BUS_MASTER;
info.config_space.set_command(cmd.bits());
devfs::add_named_block_device(drive.clone(), name.clone(), FileMode::new(0o600)).ok();
info.init_interrupts(PreferredInterruptMode::Msi)?;
// // TODO support regular PCI interrupts (ACPI dependency)
// let Some(mut msi) = info.config_space.capability::<MsiCapability>() else {
// log::warn!("Ignoring AHCI: does not support MSI (and the OS doesn't yet support PCI IRQ)");
// return Err(Error::InvalidOperation);
// };
// Map the registers
let regs = unsafe { DeviceMemoryIo::<Regs>::map(bar5, Default::default()) }?;
let version = Version::try_from(regs.VS.get())?;
let ahci_only = regs.CAP.matches_all(CAP::SAM::SET);
let max_port_count = regs.CAP.read(CAP::NP) as usize;
let has_64_bit = regs.CAP.matches_all(CAP::S64A::SET);
// TODO extract Number of Command Slots
let ahci = Box::leak(Box::new(AhciController {
regs: IrqSafeSpinlock::new(regs),
ports: OneTimeInit::new(),
received_fis_buffers: OneTimeInit::new(),
version,
max_port_count,
ahci_only,
has_64_bit,
}));
// TODO use multiple vectors if capable
info.map_interrupt(InterruptAffinity::Any, ahci)?;
Ok(ahci)
if probe {
runtime::spawn(async move {
let name = name;
log::info!("Probing partitions for {name}");
probe_partitions(drive, |index, partition| {
let partition_name = format!("{name}{}", index + 1);
devfs::add_named_block_device(
Arc::new(partition),
partition_name,
FileMode::new(0o600),
)
.ok();
})
.await
.ok();
})
.ok();
}
}
static SATA_DRIVES: IrqSafeSpinlock<Vec<Arc<AhciPort>>> = IrqSafeSpinlock::new(Vec::new());
pci_driver! {
matches: [class (0x01:0x06:0x01)],
driver: {
fn driver_name(&self) -> &str {
"ahci"
}
fn probe(&self, info: &PciDeviceInfo, dma: &Arc<dyn DmaAllocator>) -> Result<Arc<dyn Device>, Error> {
let bar5 = info.config_space.bar(5).ok_or(Error::InvalidOperation)?;
let bar5 = bar5.as_memory().ok_or(Error::InvalidOperation)?;
let mut cmd = PciCommandRegister::from_bits_retain(info.config_space.command());
cmd &= !(PciCommandRegister::DISABLE_INTERRUPTS | PciCommandRegister::ENABLE_IO);
cmd |= PciCommandRegister::ENABLE_MEMORY | PciCommandRegister::BUS_MASTER;
info.config_space.set_command(cmd.bits());
info.init_interrupts(PreferredInterruptMode::Msi(true))?;
// // TODO support regular PCI interrupts (ACPI dependency)
// let Some(mut msi) = info.config_space.capability::<MsiCapability>() else {
// log::warn!("Ignoring AHCI: does not support MSI (and the OS doesn't yet support PCI IRQ)");
// return Err(Error::InvalidOperation);
// };
// Map the registers
let regs = unsafe { DeviceMemoryIo::<Regs>::map(bar5, Default::default()) }?;
let version = Version::try_from(regs.VS.get())?;
let ahci_only = regs.CAP.matches_all(CAP::SAM::SET);
let max_port_count = regs.CAP.read(CAP::NP) as usize;
let has_64_bit = regs.CAP.matches_all(CAP::S64A::SET);
// TODO extract Number of Command Slots
let ahci = Arc::new(AhciController {
regs: IrqSafeSpinlock::new(regs),
dma: dma.clone(),
ports: OneTimeInit::new(),
received_fis_buffers: OneTimeInit::new(),
version,
max_port_count,
ahci_only,
has_64_bit,
});
// TODO use multiple vectors if capable
info.map_interrupt(InterruptAffinity::Any, ahci.clone())?;
Ok(ahci)
}
}
}

View File

@ -5,12 +5,20 @@ use core::{
task::{Context, Poll},
};
use alloc::{boxed::Box, string::String};
use alloc::{boxed::Box, string::String, sync::Arc};
use async_trait::async_trait;
use bytemuck::Zeroable;
use device_api::{device::Device, dma::DmaAllocator};
use futures_util::task::AtomicWaker;
use libk::vfs::block::NgBlockDevice;
use libk_mm::{address::AsPhysicalAddress, device::DeviceMemoryIo, PageBox, PageSlice};
use libk::{
device::block::BlockDevice,
dma::{DmaBuffer, DmaSlice, DmaSliceMut},
error::Error,
};
use libk_mm::{
address::PhysicalAddress, device::DeviceMemoryIo, table::MapAttributes, OnDemandPage,
PageProvider, VirtualPage,
};
use libk_util::{sync::IrqSafeSpinlock, waker::QueueWaker, OneTimeInit};
use tock_registers::interfaces::{Readable, Writeable};
@ -31,8 +39,8 @@ struct PortInner {
regs: DeviceMemoryIo<'static, PortRegs>,
#[allow(unused)]
received_fis: PageBox<ReceivedFis>,
command_list: PageBox<[CommandListEntry]>,
received_fis: DmaBuffer<ReceivedFis>,
command_list: DmaBuffer<[CommandListEntry]>,
}
pub struct PortInfo {
@ -44,7 +52,7 @@ pub struct PortInfo {
#[allow(unused)]
pub struct AhciPort {
inner: IrqSafeSpinlock<PortInner>,
ahci: &'static AhciController,
ahci: Arc<AhciController>,
ty: PortType,
pub(crate) index: usize,
info: OneTimeInit<PortInfo>,
@ -84,18 +92,16 @@ impl Drop for SubmittedCommand<'_> {
impl PortInner {
fn submit_command<C: AtaCommand>(
&mut self,
dma: &dyn DmaAllocator,
index: usize,
command: &C,
) -> Result<(), AhciError> {
let list_entry = &mut self.command_list[index];
let mut table_entry =
PageBox::new(CommandTable::zeroed()).map_err(AhciError::MemoryError)?;
DmaBuffer::new(dma, CommandTable::zeroed()).map_err(AhciError::MemoryError)?;
table_entry.setup_command(command)?;
*list_entry = CommandListEntry::new(
unsafe { table_entry.as_physical_address() },
command.prd_count(),
)?;
*list_entry = CommandListEntry::new(table_entry.bus_address(), command.prd_count())?;
// Sync before send
// XXX do this properly
@ -120,9 +126,9 @@ impl PortInner {
impl AhciPort {
pub fn create(
regs: DeviceMemoryIo<'static, PortRegs>,
ahci: &'static AhciController,
ahci: Arc<AhciController>,
index: usize,
) -> Result<&'static Self, AhciError> {
) -> Result<Arc<Self>, AhciError> {
log::debug!("Initialize port {}", index);
regs.stop()?;
@ -131,12 +137,14 @@ impl AhciPort {
return Err(AhciError::DeviceError);
}
let received_fis = PageBox::new(ReceivedFis::zeroed()).map_err(AhciError::MemoryError)?;
let command_list = PageBox::new_slice(CommandListEntry::zeroed(), COMMAND_LIST_LENGTH)
.map_err(AhciError::MemoryError)?;
let received_fis =
DmaBuffer::new(&*ahci.dma, ReceivedFis::zeroed()).map_err(AhciError::MemoryError)?;
let command_list =
DmaBuffer::new_slice(&*ahci.dma, CommandListEntry::zeroed(), COMMAND_LIST_LENGTH)
.map_err(AhciError::MemoryError)?;
regs.set_received_fis_address_64(unsafe { received_fis.as_physical_address() });
regs.set_command_list_address_64(unsafe { command_list.as_physical_address() });
regs.set_received_fis_address_64(received_fis.bus_address());
regs.set_command_list_address_64(command_list.bus_address());
regs.IE.write(
IE::DPE::SET
@ -160,7 +168,7 @@ impl AhciPort {
let command_available = QueueWaker::new();
let command_allocation = IrqSafeSpinlock::new(0);
Ok(Box::leak(Box::new(Self {
let port = Arc::new(Self {
inner: IrqSafeSpinlock::new(inner),
ty: PortType::Sata,
info: OneTimeInit::new(),
@ -170,11 +178,15 @@ impl AhciPort {
command_completion,
command_allocation,
command_available,
})))
});
Ok(port)
}
pub async fn init(&'static self) -> Result<(), AhciError> {
let identify = self.perform_command(AtaIdentify::create()?).await?;
pub async fn init_inner(&self) -> Result<(), AhciError> {
let identify = self
.perform_command(AtaIdentify::create(&*self.ahci.dma)?)
.await?;
let model = identify.model_number.to_string();
let serial = identify.serial_number.to_string();
@ -229,7 +241,11 @@ impl AhciPort {
return Err(AhciError::RegionTooLarge);
}
let index = self.allocate_command().await;
if let Err(error) = self.inner.lock().submit_command(index, command) {
if let Err(error) = self
.inner
.lock()
.submit_command(&*self.ahci.dma, index, command)
{
self.free_command(index);
return Err(error);
}
@ -293,32 +309,48 @@ impl AhciPort {
}
#[async_trait]
impl NgBlockDevice for AhciPort {
type Error = AhciError;
async fn read(
&self,
lba: u64,
buffer: &mut PageSlice<MaybeUninit<u8>>,
) -> Result<(), AhciError> {
if buffer.len() % SECTOR_SIZE != 0 {
return Err(AhciError::InvalidBufferSize(buffer.len()));
}
let command = AtaReadDmaEx::new(lba, buffer.len() / SECTOR_SIZE, buffer);
self.submit(&command).await?.wait_for_completion().await
impl BlockDevice for AhciPort {
fn allocate_buffer(&self, size: usize) -> Result<DmaBuffer<[MaybeUninit<u8>]>, Error> {
DmaBuffer::new_uninit_slice(&*self.ahci.dma, size)
}
async fn write(&self, _lba: u64, _buffer: &PageSlice<u8>) -> Result<(), AhciError> {
// TODO AtaDmaWriteEx
Err(AhciError::FeatureNotImplemented)
async fn read_aligned(
&self,
position: u64,
buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<(), Error> {
if buffer.len() % SECTOR_SIZE != 0 {
log::warn!("ahci: misaligned buffer size: {}", buffer.len());
return Err(Error::InvalidOperation);
}
if position % SECTOR_SIZE as u64 != 0 {
log::warn!("ahci: misaligned read");
return Err(Error::InvalidOperation);
}
let lba = position / SECTOR_SIZE as u64;
let lba_count = buffer.len() / SECTOR_SIZE;
if lba + lba_count as u64 >= self.block_count() {
log::warn!("ahci: read crosses medium end");
return Err(Error::InvalidOperation);
}
let command = AtaReadDmaEx::new(lba, lba_count, buffer);
self.submit(&command).await?.wait_for_completion().await?;
Ok(())
}
async fn write_aligned(&self, _position: u64, _buffer: DmaSlice<'_, u8>) -> Result<(), Error> {
// TODO AtaWriteDmaEx
Err(Error::NotImplemented)
}
fn block_size(&self) -> usize {
SECTOR_SIZE
}
fn block_count(&self) -> usize {
fn block_count(&self) -> u64 {
self.info().as_ref().map(|i| i.lba_count).unwrap() as _
}
@ -326,3 +358,37 @@ impl NgBlockDevice for AhciPort {
(MAX_PRD_SIZE * 2) / SECTOR_SIZE
}
}
impl Device for AhciPort {
fn display_name(&self) -> &str {
"AHCI SATA Drive"
}
}
impl PageProvider for AhciPort {
fn ondemand_fetch(&self, _opaque: u64) -> Result<OnDemandPage, Error> {
unimplemented!()
}
fn get_page(&self, _offset: u64) -> Result<VirtualPage, Error> {
unimplemented!()
}
fn release_page(
&self,
_offset: u64,
_phys: PhysicalAddress,
_dirty: bool,
) -> Result<(), Error> {
unimplemented!()
}
fn clone_page(
&self,
_offset: u64,
_src_phys: PhysicalAddress,
_src_attrs: MapAttributes,
) -> Result<PhysicalAddress, Error> {
unimplemented!()
}
}

View File

@ -1,4 +1,4 @@
use libk_mm::address::PhysicalAddress;
use libk::dma::BusAddress;
use tock_registers::{
interfaces::{ReadWriteable, Readable, Writeable},
register_bitfields, register_structs,
@ -141,14 +141,14 @@ impl PortRegs {
Ok(())
}
pub fn set_received_fis_address_64(&self, address: PhysicalAddress) {
let address: u64 = address.into();
pub fn set_received_fis_address_64(&self, address: BusAddress) {
let address: u64 = address.into_u64();
self.FB.set(address as u32);
self.FBU.set((address >> 32) as u32);
}
pub fn set_command_list_address_64(&self, address: PhysicalAddress) {
let address: u64 = address.into();
pub fn set_command_list_address_64(&self, address: BusAddress) {
let address: u64 = address.into_u64();
self.CLB.set(address as u32);
self.CLBU.set((address >> 32) as u32);
}

View File

@ -10,6 +10,7 @@ libk-util.workspace = true
libk-mm.workspace = true
libk.workspace = true
device-api = { workspace = true, features = ["derive"] }
kernel-arch.workspace = true
ygg_driver_pci = { path = "../../bus/pci" }
kernel-fs = { path = "../../fs/kernel-fs" }

View File

@ -2,7 +2,7 @@
use core::fmt::{self, Write};
use libk_mm::address::PhysicalAddress;
use libk::dma::BusAddress;
use tock_registers::{interfaces::Readable, register_structs, registers::ReadOnly, UIntLike};
use crate::queue::PhysicalRegionPage;
@ -74,7 +74,7 @@ pub struct CreateIoCompletionQueue {
pub id: u32,
pub size: usize,
pub vector: u32,
pub data: PhysicalAddress,
pub data: BusAddress,
}
#[derive(Clone, Copy, Debug)]
@ -82,7 +82,7 @@ pub struct CreateIoSubmissionQueue {
pub id: u32,
pub cq_id: u32,
pub size: usize,
pub data: PhysicalAddress,
pub data: BusAddress,
}
// Replies

View File

@ -1,32 +1,42 @@
use core::mem::MaybeUninit;
use alloc::{boxed::Box, format};
use alloc::{boxed::Box, sync::Arc};
use async_trait::async_trait;
use kernel_fs::devfs;
use libk::vfs::block::{probe_partitions, NgBlockDevice, NgBlockDeviceWrapper};
use libk_mm::{address::AsPhysicalAddress, PageSlice};
use device_api::device::Device;
use libk::{
device::block::BlockDevice,
dma::{DmaBuffer, DmaSlice, DmaSliceMut},
error::Error,
};
use libk_mm::{
address::{AsPhysicalAddress, PhysicalAddress},
table::MapAttributes,
OnDemandPage, PageProvider, PageSlice, VirtualPage,
};
use crate::{command::IdentifyNamespaceRequest, IoDirection};
use crate::{command::IdentifyNamespaceRequest, register_nvme_namespace, IoDirection};
use super::{error::NvmeError, NvmeController};
#[allow(unused)]
pub struct NvmeDrive {
controller: &'static NvmeController,
pub struct NvmeNamespace {
controller: Arc<NvmeController>,
nsid: u32,
total_lba_count: u64,
lba_size: u64,
max_lba_per_request: usize,
}
impl NvmeDrive {
impl NvmeNamespace {
pub async fn create(
controller: &'static NvmeController,
controller: Arc<NvmeController>,
nsid: u32,
max_transfer_size: usize,
) -> Result<&'static NvmeDrive, NvmeError> {
) -> Result<Arc<NvmeNamespace>, NvmeError> {
let admin_q = controller.admin_q.get();
let identify = admin_q.request(IdentifyNamespaceRequest { nsid }).await?;
let identify = admin_q
.request(&*controller.dma, IdentifyNamespaceRequest { nsid })
.await?;
let current_lba_format_idx = identify.current_lba_fmt_idx();
let current_lba_format = identify.lba_fmt(current_lba_format_idx).unwrap();
@ -41,88 +51,129 @@ impl NvmeDrive {
(total_lba_count * lba_size) / (1024 * 1024),
max_lba_per_request,
);
let dev = Box::leak(Box::new(NvmeDrive {
let dev = NvmeNamespace {
controller,
nsid,
total_lba_count,
lba_size,
max_lba_per_request,
}));
};
let dev = Arc::new(dev);
let node_name = format!("nvme{}n{}", controller.controller_id.get(), nsid);
let blk = NgBlockDeviceWrapper::new(dev);
devfs::add_named_block_device(blk, node_name.clone()).ok();
probe_partitions(blk, move |index, partition| {
devfs::add_block_device_partition(format!("{}p", node_name), index, partition)
})
.ok();
register_nvme_namespace(dev.clone(), true);
Ok(dev)
}
pub fn controller_id(&self) -> u32 {
*self.controller.controller_id.get()
}
pub fn id(&self) -> u32 {
self.nsid
}
}
impl Device for NvmeNamespace {
fn display_name(&self) -> &str {
"NVMe Namespace"
}
}
#[async_trait]
impl NgBlockDevice for NvmeDrive {
type Error = NvmeError;
async fn read(
&self,
lba: u64,
buffer: &mut PageSlice<MaybeUninit<u8>>,
) -> Result<(), NvmeError> {
debug_assert_eq!(buffer.len() % self.block_size(), 0);
let buffer_address = unsafe { buffer.as_physical_address() };
debug_assert_eq!(buffer_address.into_u64() % self.block_size() as u64, 0);
let lba_count = buffer.len() / self.block_size();
let result = self
.controller
.perform_io(
self.nsid,
lba,
lba_count,
buffer_address,
buffer.len(),
IoDirection::Read,
)
.await;
log::info!(target: "io", "read #{lba}, {lba_count} blocks -> {result:?} @ {buffer_address:#x}");
result
impl BlockDevice for NvmeNamespace {
fn allocate_buffer(&self, size: usize) -> Result<DmaBuffer<[MaybeUninit<u8>]>, Error> {
DmaBuffer::new_uninit_slice(&*self.controller.dma, size)
}
async fn write(&self, lba: u64, buffer: &PageSlice<u8>) -> Result<(), NvmeError> {
debug_assert_eq!(buffer.len() % self.block_size(), 0);
let buffer_address = unsafe { buffer.as_physical_address() };
debug_assert_eq!(buffer_address.into_u64() % self.block_size() as u64, 0);
let lba_count = buffer.len() / self.block_size();
// TODO read directly to cache
async fn read_aligned(
&self,
position: u64,
buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<(), Error> {
if position % self.block_size() as u64 != 0 {
return Err(Error::InvalidOperation);
}
if buffer.len() % self.block_size() != 0 || buffer.is_empty() {
return Err(Error::InvalidOperation);
}
let lba = position / self.block_size() as u64;
let lba_count = buffer.len().div_ceil(self.block_size());
if lba + lba_count as u64 > self.block_count() {
return Err(Error::InvalidOperation);
}
let result = self
.controller
.perform_io(
self.nsid,
lba,
lba_count,
buffer_address,
buffer.len(),
IoDirection::Write,
)
.perform_read(self.nsid, lba, lba_count, buffer)
.await;
log::info!(target: "io", "write -> #{lba}, {lba_count} blocks -> {result:?} @ {buffer_address:#x}");
log::trace!(target: "io", "read #{lba}, {lba_count} blocks -> {result:?}");
result
result.map_err(NvmeError::into)
}
async fn write_aligned(&self, position: u64, buffer: DmaSlice<'_, u8>) -> Result<(), Error> {
if position % self.block_size() as u64 != 0 {
return Err(Error::InvalidOperation);
}
if buffer.len() % self.block_size() != 0 || buffer.is_empty() {
return Err(Error::InvalidOperation);
}
let lba = position / self.block_size() as u64;
let lba_count = buffer.len().div_ceil(self.block_size());
if lba + lba_count as u64 > self.block_count() {
return Err(Error::InvalidOperation);
}
let result = self
.controller
.perform_write(self.nsid, lba, lba_count, buffer)
.await;
log::trace!(target: "io", "write -> #{lba}, {lba_count} blocks -> {result:?}");
result.map_err(NvmeError::into)
}
fn block_size(&self) -> usize {
self.lba_size as _
}
fn block_count(&self) -> usize {
self.total_lba_count as _
fn block_count(&self) -> u64 {
self.total_lba_count
}
fn max_blocks_per_request(&self) -> usize {
self.max_lba_per_request
}
}
impl PageProvider for NvmeNamespace {
fn ondemand_fetch(&self, _opaque: u64) -> Result<OnDemandPage, Error> {
unimplemented!()
}
fn get_page(&self, _offset: u64) -> Result<VirtualPage, Error> {
unimplemented!()
}
fn release_page(
&self,
_offset: u64,
_phys: PhysicalAddress,
_dirty: bool,
) -> Result<(), Error> {
unimplemented!()
}
fn clone_page(
&self,
_offset: u64,
_src_phys: PhysicalAddress,
_src_attrs: MapAttributes,
) -> Result<PhysicalAddress, Error> {
unimplemented!()
}
}

View File

@ -5,6 +5,7 @@ use super::queue::CommandError;
#[derive(Debug)]
pub enum NvmeError {
InitError(Error),
InvalidBuffer(PhysicalAddress, usize),
RequestTooLarge(usize),
MemoryError(Error),
@ -20,6 +21,7 @@ impl From<CommandError> for NvmeError {
impl From<NvmeError> for Error {
fn from(value: NvmeError) -> Self {
match value {
NvmeError::InitError(error) => error,
NvmeError::RequestTooLarge(_) => Error::InvalidArgument,
NvmeError::InvalidBuffer(_, _) => Error::InvalidArgument,
NvmeError::MemoryError(err) => err,

View File

@ -1,29 +1,39 @@
#![feature(const_trait_impl, let_chains, if_let_guard, maybe_uninit_slice)]
#![allow(missing_docs)]
#![no_std]
// TODO
#![allow(unused)]
extern crate alloc;
use core::{
mem::size_of,
mem::{size_of, MaybeUninit},
sync::atomic::{AtomicUsize, Ordering},
time::Duration,
};
use alloc::{boxed::Box, collections::BTreeMap, vec::Vec};
use alloc::{collections::BTreeMap, format, sync::Arc, vec::Vec};
use command::{IdentifyActiveNamespaceIdListRequest, IdentifyControllerRequest};
use device_api::{
interrupt::{InterruptAffinity, InterruptHandler},
Device,
device::{Device, DeviceInitContext},
dma::DmaAllocator,
interrupt::{InterruptAffinity, InterruptHandler, IrqVector},
};
use drive::NvmeNamespace;
use kernel_arch::{Architecture, ArchitectureImpl};
use libk::{
device::manager::probe_partitions,
dma::{BusAddress, DmaSlice, DmaSliceMut},
fs::devfs,
task::{cpu_count, cpu_index, runtime},
};
use drive::NvmeDrive;
use libk::task::{cpu_count, cpu_index, runtime};
use libk_mm::{address::PhysicalAddress, device::DeviceMemoryIo, L3_PAGE_SIZE};
use libk_util::{
sync::{IrqGuard, IrqSafeSpinlock},
OneTimeInit,
};
use queue::PrpList;
use regs::{CAP, CC};
use tock_registers::{
interfaces::{ReadWriteable, Readable, Writeable},
register_bitfields, register_structs,
@ -31,9 +41,10 @@ use tock_registers::{
};
use ygg_driver_pci::{
device::{PciDeviceInfo, PreferredInterruptMode},
macros::pci_driver,
PciCommandRegister, PciConfigurationSpace,
};
use yggdrasil_abi::error::Error;
use yggdrasil_abi::{error::Error, io::FileMode};
use crate::{
command::{IoRead, IoWrite},
@ -44,92 +55,29 @@ use self::{
command::{CreateIoCompletionQueue, CreateIoSubmissionQueue, SetFeatureRequest},
error::NvmeError,
queue::QueuePair,
regs::Regs,
};
mod command;
mod drive;
mod error;
mod queue;
mod regs;
pub const MAX_PAGES_PER_REQUEST: usize = 256;
// Use host page
pub const PAGE_SIZE: usize = L3_PAGE_SIZE;
register_bitfields! {
u32,
CC [
IOCQES OFFSET(20) NUMBITS(4) [],
IOSQES OFFSET(16) NUMBITS(4) [],
AMS OFFSET(11) NUMBITS(3) [],
MPS OFFSET(7) NUMBITS(4) [],
CSS OFFSET(4) NUMBITS(3) [
NvmCommandSet = 0
],
ENABLE OFFSET(0) NUMBITS(1) [],
],
CSTS [
CFS OFFSET(1) NUMBITS(1) [],
RDY OFFSET(0) NUMBITS(1) [],
],
AQA [
/// Admin Completion Queue Size in entries - 1
ACQS OFFSET(16) NUMBITS(12) [],
/// Admin Submission Queue Size in entries - 1
ASQS OFFSET(0) NUMBITS(12) [],
]
}
register_bitfields! {
u64,
CAP [
/// Maximum Queue Entries Supported - 1. i.e., 0 means maximum queue len of 1, 1 = 2 etc.
MQES OFFSET(0) NUMBITS(16) [],
/// Timeout. Represents the worst-case time the host software should wait for CSTS.RDY to
/// change its state.
TO OFFSET(24) NUMBITS(8) [],
/// Doorbell stride. Stride in bytes = pow(2, 2 + DSTRD).
DSTRD OFFSET(32) NUMBITS(4) [],
/// NVM Subsystem Reset Supported (see NVMe BS Section 3.7.1)
NSSRS OFFSET(36) NUMBITS(1) [],
/// Controller supports one or more I/O command sets
CSS_IO_COMMANDS OFFSET(43) NUMBITS(1) [],
/// Controller only supports admin commands and no I/O commands
CSS_ADMIN_ONLY OFFSET(44) NUMBITS(1) [],
/// Memory page size minimum (bytes = pow(2, 12 + MPSMIN))
MPSMIN OFFSET(48) NUMBITS(4) [],
/// Memory page size maximum -|-
MPSMAX OFFSET(52) NUMBITS(4) [],
]
}
register_structs! {
#[allow(non_snake_case)]
Regs {
(0x00 => CAP: ReadOnly<u64, CAP::Register>),
(0x08 => VS: ReadOnly<u32>),
(0x0C => INTMS: WriteOnly<u32>),
(0x10 => INTMC: WriteOnly<u32>),
(0x14 => CC: ReadWrite<u32, CC::Register>),
(0x18 => _0),
(0x1C => CSTS: ReadOnly<u32, CSTS::Register>),
(0x20 => _1),
(0x24 => AQA: ReadWrite<u32, AQA::Register>),
(0x28 => ASQ: ReadWrite<u64>),
(0x30 => ACQ: ReadWrite<u64>),
(0x38 => _2),
(0x2000 => @END),
}
}
pub struct NvmeController {
regs: IrqSafeSpinlock<DeviceMemoryIo<'static, Regs>>,
admin_q: OneTimeInit<QueuePair>,
ioqs: OneTimeInit<Vec<QueuePair>>,
io_queue_count: AtomicUsize,
drive_table: IrqSafeSpinlock<BTreeMap<u32, &'static NvmeDrive>>,
controller_id: OneTimeInit<usize>,
drive_table: IrqSafeSpinlock<BTreeMap<u32, Arc<NvmeNamespace>>>,
controller_id: OneTimeInit<u32>,
pci: PciDeviceInfo,
dma: Arc<dyn DmaAllocator>,
doorbell_shift: usize,
min_page_size: usize,
@ -153,7 +101,7 @@ impl NvmeController {
const ADMIN_QUEUE_SIZE: usize = 32;
const IO_QUEUE_SIZE: usize = 32;
async fn create_queues(&'static self) -> Result<(), NvmeError> {
async fn create_queues(&self) -> Result<(), NvmeError> {
let admin_q = self.admin_q.get();
let io_queue_count = self.io_queue_count.load(Ordering::Acquire);
@ -176,15 +124,22 @@ impl NvmeController {
let id = i as u32;
let (sq_doorbell, cq_doorbell) = unsafe { self.doorbell_pair(i) };
let queue = QueuePair::new(id, i, Self::IO_QUEUE_SIZE, sq_doorbell, cq_doorbell)
.map_err(NvmeError::MemoryError)?;
let queue = QueuePair::new(
&*self.dma,
id,
i,
Self::IO_QUEUE_SIZE,
sq_doorbell,
cq_doorbell,
)
.map_err(NvmeError::MemoryError)?;
admin_q
.request_no_data(CreateIoCompletionQueue {
id,
vector: id,
size: Self::IO_QUEUE_SIZE,
data: queue.cq_physical_pointer(),
data: queue.cq_bus_pointer(),
})
.await?;
@ -193,7 +148,7 @@ impl NvmeController {
id,
cq_id: id,
size: Self::IO_QUEUE_SIZE,
data: queue.sq_physical_pointer(),
data: queue.sq_bus_pointer(),
})
.await?;
@ -205,15 +160,17 @@ impl NvmeController {
Ok(())
}
async fn late_init(&'static self) -> Result<(), NvmeError> {
async fn late_init(self: Arc<Self>) -> Result<(), NvmeError> {
register_nvme_controller(self.clone());
let io_queue_count = cpu_count();
self.io_queue_count.store(io_queue_count, Ordering::Release);
{
let range = self
.pci
.map_interrupt_multiple(0..io_queue_count + 1, InterruptAffinity::Any, self)
.unwrap();
.map_interrupt_multiple(0..io_queue_count + 1, InterruptAffinity::Any, self.clone())
.map_err(NvmeError::InitError)?;
// TODO handle different MSI range allocations
for (i, msi) in range.iter().enumerate() {
@ -221,12 +178,12 @@ impl NvmeController {
}
}
register_nvme_controller(self);
let admin_q = self.admin_q.get();
// Identify the controller
let identify = admin_q.request(IdentifyControllerRequest).await?;
let identify = admin_q
.request(&*self.dma, IdentifyControllerRequest)
.await?;
let max_transfer_size = if identify.mdts == 0 {
// Pick some sane default value
@ -244,20 +201,23 @@ impl NvmeController {
}
async fn enumerate_namespaces(
&'static self,
self: &Arc<Self>,
max_transfer_size: usize,
) -> Result<(), NvmeError> {
let admin_q = self.admin_q.get();
let namespaces = admin_q
.request(IdentifyActiveNamespaceIdListRequest { start_id: 0 })
.request(
&*self.dma,
IdentifyActiveNamespaceIdListRequest { start_id: 0 },
)
.await?;
let count = namespaces.entries.iter().position(|&x| x == 0).unwrap();
let list = &namespaces.entries[..count];
for &nsid in list {
match NvmeDrive::create(self, nsid, max_transfer_size).await {
match NvmeNamespace::create(self.clone(), nsid, max_transfer_size).await {
Ok(drive) => {
self.drive_table.lock().insert(nsid, drive);
}
@ -270,42 +230,53 @@ impl NvmeController {
Ok(())
}
pub async fn perform_io(
&'static self,
pub async fn perform_read(
&self,
nsid: u32,
lba: u64,
lba_count: usize,
buffer_address: PhysicalAddress,
transfer_size: usize,
direction: IoDirection,
buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<(), NvmeError> {
let prp_list = PrpList::from_buffer(buffer_address, transfer_size)?;
let _guard = IrqGuard::acquire();
let prp_list = PrpList::from_buffer(&*self.dma, buffer.bus_address(), buffer.len())?;
let cpu_index = cpu_index();
let ioq = &self.ioqs.get()[cpu_index as usize];
let cmd_id = ioq.submit(
IoRead {
nsid,
lba,
count: lba_count as _,
},
&prp_list,
true,
)?;
ioq.wait_for_completion(cmd_id, ()).await?;
let cmd_id = match direction {
IoDirection::Read => ioq.submit(
IoRead {
nsid,
lba,
count: lba_count as _,
},
&prp_list,
true,
)?,
IoDirection::Write => ioq.submit(
IoWrite {
nsid,
lba,
count: lba_count as _,
},
&prp_list,
true,
)?,
};
buffer.cache_flush_all(false);
Ok(())
}
pub async fn perform_write(
&self,
nsid: u32,
lba: u64,
lba_count: usize,
buffer: DmaSlice<'_, u8>,
) -> Result<(), NvmeError> {
buffer.cache_flush_all(true);
let prp_list = PrpList::from_buffer(&*self.dma, buffer.bus_address(), buffer.len())?;
let cpu_index = cpu_index();
let ioq = &self.ioqs.get()[cpu_index as usize];
let cmd_id = ioq.submit(
IoWrite {
nsid,
lba,
count: lba_count as _,
},
&prp_list,
true,
)?;
ioq.wait_for_completion(cmd_id, ()).await?;
Ok(())
@ -320,8 +291,10 @@ impl NvmeController {
}
impl InterruptHandler for NvmeController {
fn handle_irq(&self, vector: Option<usize>) -> bool {
let vector = vector.expect("Only MSI-X interrupts are supported");
fn handle_irq(self: Arc<Self>, vector: IrqVector) -> bool {
let IrqVector::Msi(vector) = vector else {
unreachable!("Only MSI-x interrupts are supported for NVMe");
};
if vector == 0 {
self.admin_q.get().process_completions() != 0
@ -336,76 +309,44 @@ impl InterruptHandler for NvmeController {
}
impl Device for NvmeController {
unsafe fn init(&'static self) -> Result<(), Error> {
unsafe fn init(self: Arc<Self>, _cx: DeviceInitContext) -> Result<(), Error> {
let regs = self.regs.lock();
let timeout = Duration::from_millis(regs.CAP.read(CAP::TO) * 500);
log::debug!("Worst-case timeout: {:?}", timeout);
while regs.CSTS.matches_all(CSTS::RDY::SET) {
core::hint::spin_loop();
}
if Self::ADMIN_QUEUE_SIZE as u64 > regs.CAP.read(CAP::MQES) + 1 {
todo!(
"queue_slots too big, max = {}",
regs.CAP.read(CAP::MQES) + 1
);
}
// Setup the admin queue (index 0)
let admin_sq_doorbell = unsafe { regs.doorbell_ptr(self.doorbell_shift, false, 0) };
let admin_cq_doorbell = unsafe { regs.doorbell_ptr(self.doorbell_shift, true, 0) };
log::debug!("sq_doorbell for adminq = {:p}", admin_sq_doorbell);
let admin_q = QueuePair::new(
&*self.dma,
0,
0,
Self::ADMIN_QUEUE_SIZE,
admin_sq_doorbell,
admin_cq_doorbell,
)
.unwrap();
)?;
regs.AQA.modify(
AQA::ASQS.val(Self::ADMIN_QUEUE_SIZE as u32 - 1)
+ AQA::ACQS.val(Self::ADMIN_QUEUE_SIZE as u32 - 1),
);
regs.ASQ.set(admin_q.sq_physical_pointer().into());
regs.ACQ.set(admin_q.cq_physical_pointer().into());
regs.configure_admin_queue(
admin_q.sq_bus_pointer(),
admin_q.cq_bus_pointer(),
Self::ADMIN_QUEUE_SIZE,
Self::ADMIN_QUEUE_SIZE,
)?;
// Configure the controller
const IOSQES: u32 = size_of::<SubmissionQueueEntry>().ilog2();
const IOCQES: u32 = size_of::<CompletionQueueEntry>().ilog2();
regs.CC.modify(
CC::IOCQES.val(IOCQES)
+ CC::IOSQES.val(IOSQES)
+ CC::MPS.val(0)
+ CC::CSS::NvmCommandSet,
);
// Enable the controller
regs.CC.modify(CC::ENABLE::SET);
log::debug!("Reset the controller");
while !regs.CSTS.matches_any(&[CSTS::RDY::SET, CSTS::CFS::SET]) {
core::hint::spin_loop();
}
if regs.CSTS.matches_all(CSTS::CFS::SET) {
todo!("CFS set after reset!");
}
regs.configure_controller();
regs.enable_controller(10000000)?;
self.admin_q.init(admin_q);
// Schedule late_init task
runtime::spawn(self.late_init())?;
runtime::spawn(self.clone().late_init())?;
Ok(())
}
fn display_name(&self) -> &'static str {
fn display_name(&self) -> &str {
"NVM Express Controller"
}
}
@ -413,55 +354,92 @@ impl Device for NvmeController {
// TODO
unsafe impl Sync for NvmeController {}
static NVME_CONTROLLERS: IrqSafeSpinlock<Vec<&'static NvmeController>> =
IrqSafeSpinlock::new(Vec::new());
pub fn probe(info: &PciDeviceInfo) -> Result<&'static dyn Device, Error> {
let bar0 = info
.config_space
.bar(0)
.unwrap()
.as_memory()
.expect("Expected a memory BAR0");
info.init_interrupts(PreferredInterruptMode::Msi)?;
let mut cmd = PciCommandRegister::from_bits_retain(info.config_space.command());
cmd &= !(PciCommandRegister::DISABLE_INTERRUPTS | PciCommandRegister::ENABLE_IO);
cmd |= PciCommandRegister::ENABLE_MEMORY | PciCommandRegister::BUS_MASTER;
info.config_space.set_command(cmd.bits());
let regs = unsafe { DeviceMemoryIo::<Regs>::map(bar0, Default::default()) }?;
// Disable the controller
regs.CC.modify(CC::ENABLE::CLEAR);
let doorbell_shift = regs.CAP.read(CAP::DSTRD) as usize + 1;
let min_page_size = 1 << (regs.CAP.read(CAP::MPSMIN) + 12);
if min_page_size > PAGE_SIZE {
log::error!("Cannot support NVMe HC: min page size ({min_page_size}) > host page size ({PAGE_SIZE})");
return Err(Error::InvalidArgument);
}
Ok(Box::leak(Box::new(NvmeController {
regs: IrqSafeSpinlock::new(regs),
admin_q: OneTimeInit::new(),
ioqs: OneTimeInit::new(),
drive_table: IrqSafeSpinlock::new(BTreeMap::new()),
controller_id: OneTimeInit::new(),
pci: info.clone(),
io_queue_count: AtomicUsize::new(1),
doorbell_shift,
min_page_size,
})))
}
pub fn register_nvme_controller(ctrl: &'static NvmeController) {
pub fn register_nvme_controller(controller: Arc<NvmeController>) {
let mut list = NVME_CONTROLLERS.lock();
let id = list.len();
list.push(ctrl);
ctrl.controller_id.init(id);
list.push(controller.clone());
controller.controller_id.init(id as u32);
}
pub fn register_nvme_namespace(namespace: Arc<NvmeNamespace>, probe: bool) {
let name = format!("nvme{}n{}", namespace.controller_id(), namespace.id());
log::info!("Register NVMe namespace: {name}");
devfs::add_named_block_device(namespace.clone(), name.clone(), FileMode::new(0o600)).ok();
if probe {
runtime::spawn(async move {
let name = name;
log::info!("Probing partitions for {name}");
probe_partitions(namespace, |index, partition| {
let partition_name = format!("{name}p{}", index + 1);
devfs::add_named_block_device(
Arc::new(partition),
partition_name,
FileMode::new(0o600),
)
.ok();
})
.await
.inspect_err(|error| log::error!("{name}: partition probe failed: {error:?}"))
})
.ok();
}
}
static NVME_CONTROLLERS: IrqSafeSpinlock<Vec<Arc<NvmeController>>> =
IrqSafeSpinlock::new(Vec::new());
pci_driver! {
matches: [class (0x01:0x08:0x02)],
driver: {
fn driver_name(&self) -> &str {
"nvme"
}
fn probe(&self, info: &PciDeviceInfo, dma: &Arc<dyn DmaAllocator>) -> Result<Arc<dyn Device>, Error> {
let bar0 = info
.config_space
.bar(0)
.unwrap()
.as_memory()
.expect("Expected a memory BAR0");
info.init_interrupts(PreferredInterruptMode::Msi(true))?;
let mut cmd = PciCommandRegister::from_bits_retain(info.config_space.command());
cmd &= !(PciCommandRegister::DISABLE_INTERRUPTS | PciCommandRegister::ENABLE_IO);
cmd |= PciCommandRegister::ENABLE_MEMORY | PciCommandRegister::BUS_MASTER;
info.config_space.set_command(cmd.bits());
let regs = unsafe { DeviceMemoryIo::<Regs>::map(bar0, Default::default()) }?;
// Disable the controller
regs.disable_controller(10000000)?;
let doorbell_shift = regs.CAP.read(CAP::DSTRD) as usize + 1;
let min_page_size = 1 << (regs.CAP.read(CAP::MPSMIN) + 12);
if min_page_size > PAGE_SIZE {
log::error!("Cannot support NVMe HC: min page size ({min_page_size}) > host page size ({PAGE_SIZE})");
return Err(Error::InvalidArgument);
}
let device = NvmeController {
regs: IrqSafeSpinlock::new(regs),
admin_q: OneTimeInit::new(),
ioqs: OneTimeInit::new(),
drive_table: IrqSafeSpinlock::new(BTreeMap::new()),
controller_id: OneTimeInit::new(),
pci: info.clone(),
dma: dma.clone(),
io_queue_count: AtomicUsize::new(1),
doorbell_shift,
min_page_size,
};
Ok(Arc::new(device))
}
}
}

View File

@ -1,14 +1,11 @@
use core::{future::poll_fn, mem::size_of, ptr::null_mut, task::Poll};
use alloc::{
collections::{BTreeMap, BTreeSet},
vec::Vec,
};
use alloc::collections::{BTreeMap, BTreeSet};
use bytemuck::{Pod, Zeroable};
use libk_mm::{
address::{AsPhysicalAddress, PhysicalAddress},
PageBox,
};
use device_api::dma::DmaAllocator;
use kernel_arch::{Architecture, ArchitectureImpl};
use libk::dma::{BusAddress, DmaBuffer};
use libk_mm::address::AsPhysicalAddress;
use libk_util::{sync::IrqSafeSpinlock, waker::QueueWaker};
use static_assertions::const_assert;
use yggdrasil_abi::error::Error;
@ -61,7 +58,7 @@ pub struct CompletionQueueEntry {
}
pub struct Queue<T> {
data: PageBox<[T]>,
data: DmaBuffer<[T]>,
mask: usize,
head: usize,
tail: usize,
@ -85,8 +82,8 @@ pub struct QueuePair {
#[allow(unused)]
vector: usize,
sq_base: PhysicalAddress,
cq_base: PhysicalAddress,
sq_base: BusAddress,
cq_base: BusAddress,
pub completion_notify: QueueWaker,
@ -96,7 +93,8 @@ pub struct QueuePair {
pub struct PrpList {
prp1: PhysicalRegionPage,
prp2: PhysicalRegionPage,
list: Option<PageBox<[PhysicalAddress]>>,
#[allow(unused)]
list: Option<DmaBuffer<[BusAddress]>>,
}
impl PrpList {
@ -108,7 +106,11 @@ impl PrpList {
}
}
pub fn from_buffer(base: PhysicalAddress, size: usize) -> Result<Self, NvmeError> {
pub fn from_buffer(
dma: &dyn DmaAllocator,
base: BusAddress,
size: usize,
) -> Result<Self, NvmeError> {
// TODO hardcoded page size
if base.into_u64() % 0x1000 != 0 {
todo!();
@ -128,12 +130,13 @@ impl PrpList {
}),
_ => {
let count = (size + 0xFFF) / 0x1000;
let list = PageBox::new_slice_with(|i| base.add((i + 1) * 0x1000), count - 1)
.map_err(NvmeError::MemoryError)?;
let list =
DmaBuffer::new_slice_with(dma, |i| base.add((i + 1) * 0x1000), count - 1)
.map_err(NvmeError::MemoryError)?;
Ok(Self {
prp1: PhysicalRegionPage::with_addr(base),
prp2: PhysicalRegionPage::with_addr(unsafe { list.as_physical_address() }),
prp2: PhysicalRegionPage::with_addr(list.bus_address()),
list: Some(list),
})
}
@ -148,7 +151,7 @@ impl PhysicalRegionPage {
Self(0)
}
pub const fn with_addr(address: PhysicalAddress) -> Self {
pub const fn with_addr(address: BusAddress) -> Self {
Self(address.into_u64())
}
}
@ -199,7 +202,7 @@ impl CompletionQueueEntry {
impl<T> Queue<T> {
pub fn new(
data: PageBox<[T]>,
data: DmaBuffer<[T]>,
head_doorbell: *mut u32,
tail_doorbell: *mut u32,
phase: bool,
@ -252,10 +255,12 @@ impl<T> Queue<T> {
self.tail = new_tail;
if !self.tail_doorbell.is_null() {
self.data.cache_flush_element(self.tail, true);
unsafe {
self.tail_doorbell
.write_volatile(self.tail.try_into().unwrap());
}
ArchitectureImpl::memory_barrier();
}
wrapped
@ -279,17 +284,18 @@ impl<T> Queue<T> {
impl QueuePair {
pub fn new(
dma: &dyn DmaAllocator,
id: u32,
vector: usize,
capacity: usize,
sq_doorbell: *mut u32,
cq_doorbell: *mut u32,
) -> Result<Self, Error> {
let sq_data = PageBox::new_slice(SubmissionQueueEntry::zeroed(), capacity)?;
let cq_data = PageBox::new_slice(CompletionQueueEntry::zeroed(), capacity)?;
let sq_data = DmaBuffer::new_slice(dma, SubmissionQueueEntry::zeroed(), capacity)?;
let cq_data = DmaBuffer::new_slice(dma, CompletionQueueEntry::zeroed(), capacity)?;
let sq_base = unsafe { sq_data.as_physical_address() };
let cq_base = unsafe { cq_data.as_physical_address() };
let sq_base = sq_data.bus_address();
let cq_base = cq_data.bus_address();
log::debug!("Allocated queue pair: sq={:p}, cq={:p}", sq_data, cq_data);
@ -315,12 +321,12 @@ impl QueuePair {
}
#[inline]
pub fn sq_physical_pointer(&self) -> PhysicalAddress {
pub fn sq_bus_pointer(&self) -> BusAddress {
self.sq_base
}
#[inline]
pub fn cq_physical_pointer(&self) -> PhysicalAddress {
pub fn cq_bus_pointer(&self) -> BusAddress {
self.cq_base
}
@ -387,16 +393,17 @@ impl QueuePair {
pub async fn request<'r, R: Request>(
&'r self,
dma: &dyn DmaAllocator,
req: R,
) -> Result<PageBox<R::Response>, NvmeError>
) -> Result<DmaBuffer<R::Response>, NvmeError>
where
R::Response: 'r,
{
let response = PageBox::new_uninit().map_err(NvmeError::MemoryError)?;
let list = PrpList::from_buffer(unsafe { response.as_physical_address() }, size_of::<R>())?;
let response = DmaBuffer::new_uninit(dma).map_err(NvmeError::MemoryError)?;
let list = PrpList::from_buffer(dma, response.bus_address(), size_of::<R>())?;
let command_id = self.submit(req, &list, true)?;
let result = self.wait_for_completion(command_id, response).await?;
Ok(unsafe { result.assume_init() })
Ok(unsafe { DmaBuffer::assume_init(result) })
}
pub fn process_completions(&self) -> usize {

View File

@ -0,0 +1,150 @@
use libk::{dma::BusAddress, error::Error};
use tock_registers::{
interfaces::{ReadWriteable, Readable, Writeable},
register_bitfields, register_structs,
registers::{ReadOnly, ReadWrite, WriteOnly},
};
use crate::queue::{CompletionQueueEntry, SubmissionQueueEntry};
register_bitfields! {
u32,
pub CC [
IOCQES OFFSET(20) NUMBITS(4) [],
IOSQES OFFSET(16) NUMBITS(4) [],
AMS OFFSET(11) NUMBITS(3) [],
MPS OFFSET(7) NUMBITS(4) [],
CSS OFFSET(4) NUMBITS(3) [
NvmCommandSet = 0
],
ENABLE OFFSET(0) NUMBITS(1) [],
],
pub CSTS [
CFS OFFSET(1) NUMBITS(1) [],
RDY OFFSET(0) NUMBITS(1) [],
],
pub AQA [
/// Admin Completion Queue Size in entries - 1
ACQS OFFSET(16) NUMBITS(12) [],
/// Admin Submission Queue Size in entries - 1
ASQS OFFSET(0) NUMBITS(12) [],
]
}
register_bitfields! {
u64,
pub CAP [
/// Maximum Queue Entries Supported - 1. i.e., 0 means maximum queue len of 1, 1 = 2 etc.
MQES OFFSET(0) NUMBITS(16) [],
/// Timeout. Represents the worst-case time the host software should wait for CSTS.RDY to
/// change its state.
TO OFFSET(24) NUMBITS(8) [],
/// Doorbell stride. Stride in bytes = pow(2, 2 + DSTRD).
DSTRD OFFSET(32) NUMBITS(4) [],
/// NVM Subsystem Reset Supported (see NVMe BS Section 3.7.1)
NSSRS OFFSET(36) NUMBITS(1) [],
/// Controller supports one or more I/O command sets
CSS_IO_COMMANDS OFFSET(43) NUMBITS(1) [],
/// Controller only supports admin commands and no I/O commands
CSS_ADMIN_ONLY OFFSET(44) NUMBITS(1) [],
/// Memory page size minimum (bytes = pow(2, 12 + MPSMIN))
MPSMIN OFFSET(48) NUMBITS(4) [],
/// Memory page size maximum -|-
MPSMAX OFFSET(52) NUMBITS(4) [],
]
}
register_structs! {
#[allow(non_snake_case)]
pub Regs {
(0x00 => pub CAP: ReadOnly<u64, CAP::Register>),
(0x08 => pub VS: ReadOnly<u32>),
(0x0C => pub INTMS: WriteOnly<u32>),
(0x10 => pub INTMC: WriteOnly<u32>),
(0x14 => pub CC: ReadWrite<u32, CC::Register>),
(0x18 => _0),
(0x1C => pub CSTS: ReadOnly<u32, CSTS::Register>),
(0x20 => _1),
(0x24 => AQA: ReadWrite<u32, AQA::Register>),
(0x28 => ASQ: ReadWrite<u64>),
(0x30 => ACQ: ReadWrite<u64>),
(0x38 => _2),
(0x2000 => @END),
}
}
impl Regs {
pub fn configure_admin_queue(
&self,
submission_queue_pointer: BusAddress,
completion_queue_pointer: BusAddress,
submission_queue_size: usize,
completion_queue_size: usize,
) -> Result<(), Error> {
let max_queue_size = self.CAP.read(CAP::MQES) + 1;
if submission_queue_size as u64 > max_queue_size {
log::error!("admin submission queue too large");
return Err(Error::InvalidArgument);
}
if completion_queue_size as u64 > max_queue_size {
log::error!("admin completion queue too large");
return Err(Error::InvalidArgument);
}
self.AQA.write(
AQA::ASQS.val(submission_queue_size as u32 - 1)
+ AQA::ACQS.val(completion_queue_size as u32 - 1),
);
self.ASQ.set(submission_queue_pointer.into_u64());
self.ACQ.set(completion_queue_pointer.into_u64());
Ok(())
}
pub fn configure_controller(&self) {
const IOSQES: u32 = size_of::<SubmissionQueueEntry>().ilog2();
const IOCQES: u32 = size_of::<CompletionQueueEntry>().ilog2();
self.CC.modify(
CC::IOCQES.val(IOCQES)
+ CC::IOSQES.val(IOSQES)
+ CC::MPS.val(0)
+ CC::CSS::NvmCommandSet,
);
}
pub fn enable_controller(&self, mut timeout_cycles: u64) -> Result<(), Error> {
self.CC.modify(CC::ENABLE::SET);
while timeout_cycles > 0 && !self.CSTS.matches_any(&[CSTS::RDY::SET, CSTS::CFS::SET]) {
timeout_cycles -= 1;
core::hint::spin_loop();
}
if timeout_cycles == 0 {
return Err(Error::TimedOut);
}
if self.CSTS.matches_all(CSTS::CFS::SET) {
log::error!("nvme: controller fatal status after enable");
return Err(Error::InvalidArgument);
}
Ok(())
}
pub fn disable_controller(&self, mut timeout_cycles: u64) -> Result<(), Error> {
self.CC.modify(CC::ENABLE::CLEAR);
while timeout_cycles > 0 && self.CSTS.matches_all(CSTS::RDY::SET) {
timeout_cycles -= 1;
core::hint::spin_loop();
}
if timeout_cycles > 0 {
Ok(())
} else {
Err(Error::TimedOut)
}
}
}

View File

@ -0,0 +1,14 @@
[package]
name = "ygg_driver_scsi"
version = "0.1.0"
edition = "2024"
[dependencies]
yggdrasil-abi.workspace = true
device-api.workspace = true
libk-util.workspace = true
libk-mm.workspace = true
libk.workspace = true
async-trait.workspace = true
log.workspace = true

View File

@ -0,0 +1,102 @@
use libk::error::Error;
use crate::device::ScsiDeviceType;
pub trait ScsiCommand {
type Response;
const REQUEST_LEN: usize;
const RESPONSE_LEN: usize;
fn into_bytes(self) -> [u8; Self::REQUEST_LEN];
fn parse_response(bytes: &[u8]) -> Result<Self::Response, Error>;
}
// Add more info when needed
pub struct ScsiInquiry;
#[derive(Debug)]
pub struct ScsiInquiryResponse {
pub device_type: ScsiDeviceType,
}
impl ScsiCommand for ScsiInquiry {
type Response = ScsiInquiryResponse;
const REQUEST_LEN: usize = 6;
const RESPONSE_LEN: usize = 36;
fn into_bytes(self) -> [u8; Self::REQUEST_LEN] {
[0x12, 0x00, 0x00, 0x00, 0x00, 0x00]
}
fn parse_response(bytes: &[u8]) -> Result<Self::Response, Error> {
if bytes.len() != 36 {
return Err(Error::InvalidArgument);
}
let device_type = ScsiDeviceType::try_from(bytes[0] & 0x1F).unwrap_or_default();
Ok(ScsiInquiryResponse { device_type })
}
}
pub struct ScsiTestUnitReady;
#[derive(Debug)]
pub struct ScsiTestUnitReadyResponse;
impl ScsiCommand for ScsiTestUnitReady {
type Response = ScsiTestUnitReadyResponse;
const RESPONSE_LEN: usize = 0;
const REQUEST_LEN: usize = 6;
fn into_bytes(self) -> [u8; Self::REQUEST_LEN] {
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
}
fn parse_response(_bytes: &[u8]) -> Result<Self::Response, Error> {
Ok(ScsiTestUnitReadyResponse)
}
}
pub struct ScsiRequestSense;
#[derive(Debug)]
pub struct ScsiRequestSenseResponse;
impl ScsiCommand for ScsiRequestSense {
type Response = ScsiRequestSenseResponse;
const RESPONSE_LEN: usize = 0;
const REQUEST_LEN: usize = 6;
fn into_bytes(self) -> [u8; Self::REQUEST_LEN] {
[0x03, 0x00, 0x00, 0x00, 0x00, 0x00]
}
fn parse_response(_bytes: &[u8]) -> Result<Self::Response, Error> {
Ok(ScsiRequestSenseResponse)
}
}
pub struct ScsiReadCapacity;
#[derive(Debug)]
pub struct ScsiReadCapacityResponse {
pub block_size: u32,
pub block_count: u32,
}
impl ScsiCommand for ScsiReadCapacity {
type Response = ScsiReadCapacityResponse;
const REQUEST_LEN: usize = 10;
const RESPONSE_LEN: usize = 8;
fn into_bytes(self) -> [u8; Self::REQUEST_LEN] {
[0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
}
fn parse_response(bytes: &[u8]) -> Result<Self::Response, Error> {
if bytes.len() != 8 {
return Err(Error::InvalidArgument);
}
let block_count = u32::from_be_bytes(bytes[0..4].try_into().unwrap());
let block_size = u32::from_be_bytes(bytes[4..8].try_into().unwrap());
Ok(ScsiReadCapacityResponse {
block_size,
block_count,
})
}
}

View File

@ -0,0 +1,24 @@
use yggdrasil_abi::primitive_enum;
primitive_enum! {
#[derive(Default)]
pub enum ScsiDeviceType: u8 {
DirectAccessBlock = 0x00,
SequentialAccess = 0x01,
Printer = 0x02,
Processor = 0x03,
WriteOnce = 0x04,
CdDvd = 0x05,
OpticalMemory = 0x07,
MediumChanger = 0x08,
StorageArrayController = 0x0C,
EnclosureServices = 0x0D,
SimplifiedDirectAccess = 0x0E,
OpticalCard = 0x0F,
BridgeController = 0x10,
ObjectBasedStorage = 0x11,
AutomationDriveInterface = 0x12,
#[default]
Other = 0x1F,
}
}

View File

@ -0,0 +1,381 @@
#![feature(generic_const_exprs, maybe_uninit_slice)]
#![allow(incomplete_features)]
#![no_std]
use core::{
mem::MaybeUninit,
sync::atomic::{AtomicBool, Ordering},
time::Duration,
};
use alloc::{
boxed::Box, collections::btree_map::BTreeMap, format, string::String, sync::Arc, vec::Vec,
};
use async_trait::async_trait;
use command::{ScsiReadCapacity, ScsiRequestSense, ScsiTestUnitReady};
use device_api::device::Device;
use libk::{
block,
device::{block::BlockDevice, manager::probe_partitions},
dma::{DmaBuffer, DmaSlice, DmaSliceMut},
error::Error,
fs::devfs,
task::{runtime, sync::AsyncMutex},
};
use libk_mm::{
address::PhysicalAddress, table::MapAttributes, OnDemandPage, PageProvider, VirtualPage,
};
use libk_util::{
sync::{spin_rwlock::IrqSafeRwLock, IrqSafeSpinlock},
OneTimeInit,
};
use transport::{ScsiTransport, ScsiTransportWrapper};
use yggdrasil_abi::io::FileMode;
extern crate alloc;
pub mod command;
pub mod device;
pub mod transport;
pub struct ScsiEnclosure {
transport: AsyncMutex<ScsiTransportWrapper>,
units: Vec<IrqSafeRwLock<Option<Arc<ScsiUnit>>>>,
index: OneTimeInit<u32>,
shutdown: AtomicBool,
}
pub struct ScsiUnit {
enclosure: Arc<ScsiEnclosure>,
lun: u8,
lba_count: u64,
lba_size: usize,
max_lba_per_request: usize,
names: IrqSafeRwLock<Vec<String>>,
}
impl ScsiEnclosure {
pub async fn setup(
transport: Box<dyn ScsiTransport>,
lun_count: usize,
) -> Result<Arc<Self>, Error> {
let transport = AsyncMutex::new(ScsiTransportWrapper::new(transport));
let units = (0..lun_count).map(|_| IrqSafeRwLock::new(None)).collect();
let this = Arc::new(Self {
transport,
units,
index: OneTimeInit::new(),
shutdown: AtomicBool::new(false),
});
register_enclosure(this.clone())?;
// Probe LUNs
for i in 0..lun_count {
if this.probe_lun(i as u8).await {
if let Ok(unit) = ScsiUnit::setup(this.clone(), i as u8).await {
*this.units[i].write() = Some(unit);
}
}
}
// Start enclosure poll task
let enclosure = this.clone();
runtime::spawn(async move {
while !enclosure.shutdown.load(Ordering::Acquire) {
enclosure.poll().await;
runtime::sleep(Duration::from_millis(100)).await;
}
})
.ok();
Ok(this)
}
async fn probe_lun(self: &Arc<Self>, lun: u8) -> bool {
let mut attempts = 3;
let mut timeout = 10;
// TODO get statuses to better see if there's a real error or the LUN is not present
while attempts > 0 {
let mut transport = self.transport.lock().await;
// TEST UNIT READY (6)
if transport
.perform_command(lun, ScsiTestUnitReady)
.await
.is_ok()
{
break;
}
// If not, send a REQUEST SENSE (6)
transport.perform_command(lun, ScsiRequestSense).await.ok();
drop(transport);
runtime::sleep(Duration::from_millis(timeout)).await;
timeout *= 2;
attempts -= 1;
}
if attempts == 0 {
false
} else {
true
}
}
async fn poll(self: &Arc<Self>) {
let index = *self.index.get();
for lun in 0..self.units.len() {
let mut slot = self.units[lun].write();
let present = self.probe_lun(lun as u8).await;
if let Some(unit) = slot.as_ref() {
if !present {
log::warn!("scsi{index}u{lun} lost");
unit.detach();
*slot = None;
}
} else if present {
if let Ok(unit) = ScsiUnit::setup(self.clone(), lun as u8).await {
log::info!("scsi{index}u{lun} attached");
*slot = Some(unit);
} else {
log::warn!("scsi{index}u{lun} attached, but could not setup");
}
}
}
}
pub fn detach(&self) {
self.shutdown.store(true, Ordering::Release);
let index = self.index.try_get().copied();
for unit in self.units.iter() {
if let Some(unit) = unit.write().take() {
unit.detach();
}
}
// Deregister the enclosure
if let Some(index) = index {
remove_enclosure(index);
}
}
}
impl ScsiUnit {
pub async fn setup(enclosure: Arc<ScsiEnclosure>, lun: u8) -> Result<Arc<Self>, Error> {
let enclosure_index = *enclosure.index.get();
let mut transport = enclosure.transport.lock().await;
// TODO INQUIRY fails for real USB flash drives
// transport.perform_command(0, ScsiInquiry).await?;
let capacity_info = transport.perform_command(lun, ScsiReadCapacity).await?;
let max_lba_per_request =
transport.max_bytes_per_request() / capacity_info.block_size as usize;
log::info!(
"scsi{enclosure_index}u{lun}: lba_size={}, lba_count={}, max_lba_per_request={}",
capacity_info.block_size,
capacity_info.block_count,
max_lba_per_request
);
drop(transport);
let unit = Arc::new(Self {
enclosure,
lun,
lba_count: capacity_info.block_count.into(),
lba_size: capacity_info.block_size as usize,
max_lba_per_request,
names: IrqSafeRwLock::new(Vec::new()),
});
register_unit(enclosure_index, lun, unit.clone());
Ok(unit)
}
fn detach(&self) {
let id = *self.enclosure.index.get();
log::info!("scsi{id}u{} detached", self.lun);
for name in self.names.read().iter() {
devfs::remove_node(name).ok();
}
}
}
#[async_trait]
impl BlockDevice for ScsiUnit {
fn allocate_buffer(&self, size: usize) -> Result<DmaBuffer<[MaybeUninit<u8>]>, Error> {
block!(self.enclosure.transport.lock().await.allocate_buffer(size))?
}
async fn read_aligned(
&self,
position: u64,
buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<(), Error> {
if position % self.lba_size as u64 != 0 {
log::warn!("scsi: misaligned read");
return Err(Error::InvalidArgument);
}
if buffer.len() % self.lba_size != 0 {
log::warn!("scsi: misaligned buffer size");
return Err(Error::InvalidArgument);
}
let lba_start = position / self.lba_size as u64;
let lba_count = buffer.len() / self.lba_size;
if lba_start.saturating_add(lba_count as u64) >= self.lba_count {
log::warn!("scsi: read beyond medium end");
return Err(Error::InvalidArgument);
}
let lba_end = lba_start + lba_count as u64;
let mut transport = self.enclosure.transport.lock().await;
// TODO DmaSliceMut subslicing
let (buffer, range) = buffer.into_parts();
let mut offset = range.start;
for i in (0..lba_count).step_by(self.max_lba_per_request) {
let lba = lba_start + i as u64;
let end = (lba + self.max_lba_per_request as u64).min(lba_end);
let count = (end - lba) as usize;
let amount = count * self.lba_size;
let dst_slice = buffer.slice_mut(offset..offset + amount);
let len = transport
.read(self.lun, lba, count as u16, dst_slice)
.await?;
if len != amount {
return Err(Error::InvalidArgument);
}
offset += amount;
}
Ok(())
}
async fn write_aligned(&self, _position: u64, _buffer: DmaSlice<'_, u8>) -> Result<(), Error> {
Err(Error::NotImplemented)
}
fn block_size(&self) -> usize {
self.lba_size
}
fn block_count(&self) -> u64 {
self.lba_count
}
fn max_blocks_per_request(&self) -> usize {
self.max_lba_per_request
}
}
impl PageProvider for ScsiUnit {
fn ondemand_fetch(&self, _opaque: u64) -> Result<OnDemandPage, Error> {
unimplemented!()
}
fn get_page(&self, _offset: u64) -> Result<VirtualPage, Error> {
unimplemented!()
}
fn release_page(
&self,
_offset: u64,
_phys: PhysicalAddress,
_dirty: bool,
) -> Result<(), Error> {
unimplemented!()
}
fn clone_page(
&self,
_offset: u64,
_src_phys: PhysicalAddress,
_src_attrs: MapAttributes,
) -> Result<PhysicalAddress, Error> {
unimplemented!()
}
}
impl Device for ScsiUnit {
fn display_name(&self) -> &str {
"SCSI Unit"
}
}
impl Drop for ScsiUnit {
fn drop(&mut self) {
if let Some(index) = self.enclosure.index.try_get() {
log::info!("scsi{index}u{} dropped", self.lun);
}
}
}
// TODO this is crap
static SCSI_ENCLOSURES: IrqSafeSpinlock<BTreeMap<u32, Arc<ScsiEnclosure>>> =
IrqSafeSpinlock::new(BTreeMap::new());
static SCSI_BITMAP: IrqSafeSpinlock<u32> = IrqSafeSpinlock::new(0);
fn register_enclosure(enclosure: Arc<ScsiEnclosure>) -> Result<(), Error> {
let index = {
let mut bitmap = SCSI_BITMAP.lock();
let index = (0..8)
.position(|p| *bitmap & (1 << p) == 0)
.ok_or(Error::InvalidOperation)
.inspect_err(|_| log::warn!("Cannot attach SCSI enclosure: too many of them"))?
as u32;
let mut devices = SCSI_ENCLOSURES.lock();
*bitmap |= 1 << index;
assert!(!devices.contains_key(&index));
devices.insert(index, enclosure.clone());
index
};
enclosure.index.init(index);
Ok(())
}
fn register_unit(enclosure_index: u32, lun: u8, unit: Arc<ScsiUnit>) {
let name = format!("scsi{enclosure_index}u{lun}");
unit.names.write().push(name.clone());
devfs::add_named_block_device(unit.clone(), name.clone(), FileMode::new(0o600)).ok();
// TODO this code is repeated everywhere
runtime::spawn(async move {
let name = name;
probe_partitions(unit.clone(), |index, partition| {
let partition_name = format!("{name}p{}", index + 1);
log::info!("{name}: partition {partition_name}");
unit.names.write().push(partition_name.clone());
devfs::add_named_block_device(
Arc::new(partition),
partition_name,
FileMode::new(0o600),
)
.ok();
})
.await
.ok();
})
.ok();
}
fn remove_enclosure(index: u32) {
let mut devices = SCSI_ENCLOSURES.lock();
let mut bitmap = SCSI_BITMAP.lock();
*bitmap &= !(1 << index);
devices.remove(&index);
log::info!("scsi: enclosure {index} detached");
}

View File

@ -0,0 +1,99 @@
use core::{mem::MaybeUninit, ops::Deref};
use alloc::boxed::Box;
use async_trait::async_trait;
use libk::{
dma::{DmaBuffer, DmaSliceMut},
error::Error,
};
use crate::command::ScsiCommand;
#[async_trait]
pub trait ScsiTransport: Send + Sync {
fn allocate_buffer(&self, size: usize) -> Result<DmaBuffer<[MaybeUninit<u8>]>, Error>;
/// Perform a no-data request
async fn perform_request_raw(
&mut self,
lun: u8,
request_data: &[u8],
response_buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<usize, Error>;
fn max_bytes_per_request(&self) -> usize;
}
pub struct ScsiTransportWrapper {
inner: Box<dyn ScsiTransport>,
}
impl ScsiTransportWrapper {
pub fn new(inner: Box<dyn ScsiTransport>) -> Self {
Self { inner }
}
pub async fn read(
&mut self,
lun: u8,
lba: u64,
lba_count: u16,
buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<usize, Error> {
if lba >= u32::MAX as u64 {
return Err(Error::InvalidArgument);
}
let lba_bytes = (lba as u32).to_be_bytes();
let lba_count = (lba_count as u16).to_be_bytes();
// Issue a READ (10) command
let request_buffer = [
0x28,
0x00,
lba_bytes[0],
lba_bytes[1],
lba_bytes[2],
lba_bytes[3],
0x00,
lba_count[0],
lba_count[1],
0x00,
];
self.inner
.perform_request_raw(lun, &request_buffer, buffer)
.await
}
pub async fn perform_command<R: ScsiCommand>(
&mut self,
lun: u8,
request: R,
) -> Result<R::Response, Error>
where
[u8; R::RESPONSE_LEN]: Sized,
[u8; R::REQUEST_LEN]: Sized,
{
let mut response_buffer = self.allocate_buffer(R::RESPONSE_LEN)?;
let request_buffer = request.into_bytes();
let response_len = self
.inner
.perform_request_raw(
lun,
&request_buffer,
response_buffer.slice_mut(0..R::RESPONSE_LEN),
)
.await?;
let response_bytes = unsafe { response_buffer[..response_len].assume_init_ref() };
R::parse_response(response_bytes)
}
}
impl Deref for ScsiTransportWrapper {
type Target = dyn ScsiTransport;
fn deref(&self) -> &Self::Target {
self.inner.as_ref()
}
}

View File

@ -8,15 +8,17 @@ authors = ["Mark Poliakov <mark@alnyan.me>"]
yggdrasil-abi.workspace = true
device-api = { workspace = true, features = ["derive"] }
libk-mm.workspace = true
libk-device.workspace = true
libk-util.workspace = true
libk.workspace = true
log.workspace = true
bitflags.workspace = true
tock-registers.workspace = true
[target.'cfg(target_arch = "x86_64")'.dependencies]
ygg_driver_acpi.path = "../../acpi"
acpi.workspace = true
kernel-arch-x86.workspace = true
[lints]
workspace = true

View File

@ -1,6 +1,7 @@
//! PCI capability structures and queries
use alloc::{vec, vec::Vec};
use alloc::{sync::Arc, vec, vec::Vec};
use bitflags::bitflags;
use device_api::interrupt::{
InterruptAffinity, InterruptHandler, MessageInterruptController, MsiInfo,
};
@ -11,8 +12,25 @@ use tock_registers::{
};
use yggdrasil_abi::error::Error;
use crate::PciBaseAddress;
use super::{PciCapability, PciCapabilityId, PciConfigurationSpace};
bitflags! {
pub struct PcieLinkControl: u16 {
const ASPM_DISABLE = 0 << 0;
// Active state power management control
const ASPM_MASK = 0x3 << 0;
// Enable clock power management
const ECPM = 1 << 8;
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64", rust_analyzer))]
use core::mem::offset_of;
#[cfg(any(target_arch = "x86", target_arch = "x86_64", rust_analyzer))]
use kernel_arch_x86::intrinsics;
pub trait VirtioCapabilityData<'s, S: PciConfigurationSpace + ?Sized + 's>: Sized {
fn from_space_offset(space: &'s S, offset: usize) -> Self;
@ -41,11 +59,14 @@ pub trait VirtioCapability {
type Output<'a, S: PciConfigurationSpace + ?Sized + 'a>: VirtioCapabilityData<'a, S>;
}
/// Power management capability entry
pub struct PowerManagementCapability;
/// MSI-X capability query
pub struct MsiXCapability;
/// MSI capability query
pub struct MsiCapability;
/// PCIe capability
pub struct PciExpressCapability;
// VirtIO-over-PCI capabilities
/// VirtIO PCI configuration access
@ -57,6 +78,15 @@ pub struct VirtioNotifyConfigCapability;
/// VirtIO interrupt status
pub struct VirtioInterruptStatusCapability;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum DevicePowerState {
D0,
D1,
D2,
D3Cold,
D3Hot,
}
/// Represents an entry in MSI-X vector table
#[repr(C)]
pub struct MsiXEntry {
@ -68,8 +98,21 @@ pub struct MsiXEntry {
pub control: ReadWrite<u32>,
}
enum MsiXVectorTableAccess<'a> {
Memory(DeviceMemoryIoMut<'a, [MsiXEntry]>),
#[cfg(any(target_arch = "x86", target_arch = "x86_64", rust_analyzer))]
Io(u16),
}
pub struct MsiXVectorTable<'a> {
vectors: DeviceMemoryIoMut<'a, [MsiXEntry]>,
access: MsiXVectorTableAccess<'a>,
len: usize,
}
/// PCI Power Management capability data structure
pub struct PowerManagementData<'s, S: PciConfigurationSpace + ?Sized + 's> {
space: &'s S,
offset: usize,
}
/// MSI-X capability data structure
@ -84,6 +127,12 @@ pub struct MsiData<'s, S: PciConfigurationSpace + ?Sized + 's> {
offset: usize,
}
/// PCI Express capability data structure
pub struct PcieData<'s, S: PciConfigurationSpace + ?Sized + 's> {
space: &'s S,
offset: usize,
}
pub struct VirtioDeviceConfigData<'s, S: PciConfigurationSpace + ?Sized + 's> {
space: &'s S,
offset: usize,
@ -122,6 +171,19 @@ impl<T: VirtioCapability> PciCapability for T {
}
}
impl PciCapability for PowerManagementCapability {
const ID: PciCapabilityId = PciCapabilityId::PowerManagement;
type CapabilityData<'a, S: PciConfigurationSpace + ?Sized + 'a> = PowerManagementData<'a, S>;
fn data<'s, S: PciConfigurationSpace + ?Sized + 's>(
space: &'s S,
offset: usize,
_len: usize,
) -> Self::CapabilityData<'s, S> {
PowerManagementData { space, offset }
}
}
impl PciCapability for MsiXCapability {
const ID: PciCapabilityId = PciCapabilityId::MsiX;
type CapabilityData<'a, S: PciConfigurationSpace + ?Sized + 'a> = MsiXData<'a, S>;
@ -148,6 +210,19 @@ impl PciCapability for MsiCapability {
}
}
impl PciCapability for PciExpressCapability {
const ID: PciCapabilityId = PciCapabilityId::PciExpress;
type CapabilityData<'a, S: PciConfigurationSpace + ?Sized + 'a> = PcieData<'a, S>;
fn data<'s, S: PciConfigurationSpace + ?Sized + 's>(
space: &'s S,
offset: usize,
_len: usize,
) -> Self::CapabilityData<'s, S> {
PcieData { space, offset }
}
}
impl VirtioCapability for VirtioDeviceConfigCapability {
const CFG_TYPE: u8 = 0x04;
type Output<'a, S: PciConfigurationSpace + ?Sized + 'a> = VirtioDeviceConfigData<'a, S>;
@ -246,6 +321,56 @@ impl<'s, S: PciConfigurationSpace + ?Sized + 's> VirtioCapabilityData<'s, S>
}
}
impl<'s, S: PciConfigurationSpace + ?Sized + 's> PowerManagementData<'s, S> {
pub fn set_device_power_state(&self, state: DevicePowerState) {
let pmcsr = self.space.read_u16(self.offset + 4) & !0x3;
let current = self.get_device_power_state();
if state == current {
return;
}
log::info!("Set device power state: {state:?}");
match state {
DevicePowerState::D0 => {
// power = 0b00 | PME_EN
self.space.write_u16(self.offset + 4, pmcsr);
}
_ => {
log::warn!("TODO: {state:?} power state");
}
}
}
pub fn set_pme_en(&self, state: bool) {
let pmcsr = self.space.read_u16(self.offset + 4);
let new = if state {
pmcsr | (1 << 8)
} else {
pmcsr & !(1 << 8)
};
if pmcsr == new {
return;
}
log::info!("Set PMCSR.PME_En = {state}");
self.space.write_u16(self.offset + 4, new);
}
pub fn get_device_power_state(&self) -> DevicePowerState {
let pmcsr = self.space.read_u16(self.offset + 4);
match pmcsr & 0x3 {
0b00 => DevicePowerState::D0,
0b01 => DevicePowerState::D1,
0b10 => DevicePowerState::D2,
0b11 => DevicePowerState::D3Hot,
_ => unreachable!(),
}
}
}
impl<'s, S: PciConfigurationSpace + ?Sized + 's> MsiXData<'s, S> {
// TODO use pending bits as well
/// Maps and returns the vector table associated with the device's MSI-X capability
@ -260,13 +385,30 @@ impl<'s, S: PciConfigurationSpace + ?Sized + 's> MsiXData<'s, S> {
let Some(base) = self.space.bar(bir) else {
return Err(Error::DoesNotExist);
};
let Some(base) = base.as_memory() else {
return Err(Error::InvalidOperation);
};
log::debug!("MSI-X table address: {:#x}", base.add(table_offset));
unsafe { MsiXVectorTable::from_raw_parts(base.add(table_offset), table_size) }
match base {
PciBaseAddress::Memory32(mem32) => unsafe {
log::info!("MSI-X table address: {:#x}", mem32 + table_offset as u32);
MsiXVectorTable::memory_from_raw_parts(
PhysicalAddress::from_u32(mem32).add(table_offset),
table_size,
)
},
PciBaseAddress::Memory64(mem64) => unsafe {
log::info!("MSI-X table address: {:#x}", mem64 + table_offset as u64);
MsiXVectorTable::memory_from_raw_parts(
PhysicalAddress::from_u64(mem64).add(table_offset),
table_size,
)
},
#[cfg(any(target_arch = "x86", target_arch = "x86_64", rust_analyzer))]
PciBaseAddress::Io(io) => unsafe {
log::info!("MSI-X table I/O: {:#x}", io + table_offset as u16);
MsiXVectorTable::io_from_raw_parts(io + table_offset as u16, table_size)
},
#[cfg(any(not(any(target_arch = "x86", target_arch = "x86_64")), rust_analyzer))]
PciBaseAddress::Io(_) => Err(Error::DoesNotExist),
}
}
/// Changes the global enable status for the device's MSI-X capability. If set, regular IRQs
@ -292,25 +434,98 @@ impl<'s, S: PciConfigurationSpace + ?Sized + 's> MsiXData<'s, S> {
}
}
impl MsiXVectorTable<'_> {
unsafe fn from_raw_parts(base: PhysicalAddress, len: usize) -> Result<Self, Error> {
let vectors = DeviceMemoryIoMut::map_slice(base, len, Default::default())?;
Ok(Self { vectors })
}
pub fn mask_all(&mut self) {
for vector in self.vectors.iter_mut() {
vector.set_masked(true);
impl MsiXVectorTableAccess<'_> {
fn set_vector_masked(&mut self, vector: usize, masked: bool) {
let old = self.read_control(vector);
let new = if masked { old | 1 } else { old & !1 };
if old != new {
self.write_control(vector, new);
}
}
pub fn register_range<C: MessageInterruptController + ?Sized>(
fn read_control(&mut self, vector: usize) -> u32 {
match self {
#[cfg(any(target_arch = "x86", target_arch = "x86_64", rust_analyzer))]
&mut Self::Io(base) => unsafe {
let a = base
+ (vector * size_of::<MsiXEntry>() + offset_of!(MsiXEntry, control)) as u16;
intrinsics::inl(a)
},
Self::Memory(vectors) => vectors[vector].control.get(),
}
}
fn write_address(&mut self, vector: usize, value: u64) {
match self {
#[cfg(any(target_arch = "x86", target_arch = "x86_64", rust_analyzer))]
&mut Self::Io(base) => unsafe {
let a = base + (vector * size_of::<MsiXEntry>()) as u16;
intrinsics::outl(a, value as u32);
intrinsics::outl(a + 4, (value >> 32) as u32);
},
Self::Memory(vectors) => vectors[vector].address.set(value),
}
}
fn write_data(&mut self, vector: usize, value: u32) {
match self {
#[cfg(any(target_arch = "x86", target_arch = "x86_64", rust_analyzer))]
&mut Self::Io(base) => unsafe {
let a =
base + (vector * size_of::<MsiXEntry>() + offset_of!(MsiXEntry, data)) as u16;
intrinsics::outl(a, value)
},
Self::Memory(vectors) => vectors[vector].data.set(value),
}
}
fn write_control(&mut self, vector: usize, value: u32) {
match self {
#[cfg(any(target_arch = "x86", target_arch = "x86_64", rust_analyzer))]
&mut Self::Io(base) => unsafe {
let a = base
+ (vector * size_of::<MsiXEntry>() + offset_of!(MsiXEntry, control)) as u16;
intrinsics::outl(a, value)
},
Self::Memory(vectors) => vectors[vector].control.set(value),
}
}
}
impl MsiXVectorTable<'_> {
unsafe fn memory_from_raw_parts(base: PhysicalAddress, len: usize) -> Result<Self, Error> {
let vectors = DeviceMemoryIoMut::map_slice(base, len, Default::default())?;
Ok(Self {
access: MsiXVectorTableAccess::Memory(vectors),
len,
})
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64", rust_analyzer))]
unsafe fn io_from_raw_parts(base: u16, len: usize) -> Result<Self, Error> {
Ok(Self {
access: MsiXVectorTableAccess::Io(base),
len,
})
}
pub fn mask_all(&mut self) {
for i in 0..self.len {
self.access.set_vector_masked(i, true);
}
}
pub fn register_range(
&mut self,
start: usize,
end: usize,
ic: &C,
ic: &Arc<dyn MessageInterruptController>,
affinity: InterruptAffinity,
handler: &'static dyn InterruptHandler,
handler: Arc<dyn InterruptHandler>,
) -> Result<Vec<MsiInfo>, Error> {
assert!(end > start);
let mut range = vec![
@ -320,38 +535,37 @@ impl MsiXVectorTable<'_> {
};
end - start
];
ic.register_msi_range(&mut range, handler)?;
ic.clone().register_msi_range(&mut range, handler)?;
for (i, info) in range.iter().enumerate() {
let index = i + start;
self.vectors[index].address.set(info.address as _);
self.vectors[index].data.set(info.value);
self.vectors[index].set_masked(false);
self.access.write_address(index, info.address as _);
self.access.write_data(index, info.value);
self.access.set_vector_masked(index, false);
}
Ok(range)
}
}
impl MsiXEntry {
/// If set, prevents the MSI-X interrupt from being delivered
fn set_masked(&mut self, masked: bool) {
if masked {
self.control.set(self.control.get() | 1);
} else {
self.control.set(self.control.get() & !1);
}
}
}
impl<'s, S: PciConfigurationSpace + ?Sized + 's> MsiData<'s, S> {
pub fn register<C: MessageInterruptController + ?Sized>(
pub fn set_enabled(&mut self, enabled: bool) {
let mut w0 = self.space.read_u16(self.offset + 2);
if enabled {
w0 |= 1 << 0;
} else {
w0 &= !(1 << 0);
}
self.space.write_u16(self.offset + 2, w0);
}
pub fn register(
&mut self,
ic: &C,
ic: &Arc<dyn MessageInterruptController>,
affinity: InterruptAffinity,
handler: &'static dyn InterruptHandler,
handler: Arc<dyn InterruptHandler>,
) -> Result<MsiInfo, Error> {
let info = ic.register_msi(affinity, handler)?;
let info = ic.clone().register_msi(affinity, handler)?;
let mut w0 = self.space.read_u16(self.offset + 2);
// Enable the vector first
@ -387,3 +601,13 @@ impl<'s, S: PciConfigurationSpace + ?Sized + 's> MsiData<'s, S> {
Ok(info)
}
}
impl<'s, S: PciConfigurationSpace + ?Sized + 's> PcieData<'s, S> {
pub fn link_control(&self) -> PcieLinkControl {
PcieLinkControl::from_bits_retain(self.space.read_u16(self.offset + 0x10))
}
pub fn set_link_control(&mut self, value: PcieLinkControl) {
self.space.write_u16(self.offset + 0x10, value.bits());
}
}

View File

@ -2,16 +2,20 @@ use core::ops::Range;
use alloc::{sync::Arc, vec::Vec};
use device_api::{
interrupt::{InterruptAffinity, InterruptHandler, IrqOptions, MsiInfo},
Device,
device::Device,
interrupt::{
ExternalInterruptController, InterruptAffinity, InterruptHandler, Irq, IrqOptions,
MessageInterruptController, MsiInfo,
},
};
use libk_device::{message_interrupt_controller, register_global_interrupt};
use libk::device::external_interrupt_controller;
use libk_util::{sync::spin_rwlock::IrqSafeRwLock, OneTimeInit};
use yggdrasil_abi::error::Error;
use crate::{
capability::{MsiCapability, MsiXCapability, MsiXVectorTable},
PciAddress, PciConfigSpace, PciConfigurationSpace, PciSegmentInfo,
driver::PciDriver,
PciAddress, PciCommandRegister, PciConfigSpace, PciConfigurationSpace, PciSegmentInfo,
};
/// Describes a PCI device
@ -19,6 +23,16 @@ use crate::{
pub struct PciDeviceInfo {
/// Address of the device
pub address: PciAddress,
/// Class field of the configuration space
pub class: u8,
/// Subclass field of the configuration space
pub subclass: u8,
/// Prog IF field of the configuration space
pub prog_if: u8,
/// Vendor ID field of the configuration space
pub vendor_id: u16,
/// Device ID field of the configuration space
pub device_id: u16,
/// Configuration space access method
pub config_space: PciConfigSpace,
/// Describes the PCI segment this device is a part of
@ -43,16 +57,19 @@ pub enum PciInterruptPin {
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum PreferredInterruptMode {
Msi,
Msi(bool),
Legacy,
}
enum ConfiguredInterruptMode {
MsiX(MsiXVectorTable<'static>),
Msi,
LegacyPin(PciInterruptPin),
MsiX(
Arc<dyn MessageInterruptController>,
MsiXVectorTable<'static>,
),
Msi(Arc<dyn MessageInterruptController>),
LegacyPin(Arc<dyn ExternalInterruptController>, PciInterruptPin),
#[cfg_attr(not(target_arch = "x86"), allow(unused))]
LegacyLine(u8),
LegacyLine(Arc<dyn ExternalInterruptController>, u8),
None,
}
@ -68,69 +85,133 @@ pub struct PciInterruptRoute {
pub options: IrqOptions,
}
pub enum PciMatch {
Generic(fn(&PciDeviceInfo) -> bool),
Vendor(u16, u16),
Class(u8, Option<u8>, Option<u8>),
#[derive(Clone)]
pub struct PciMsiRoute {
// TODO `msi-base`
pub controller: Arc<dyn MessageInterruptController>,
}
pub struct PciDriver {
pub(crate) name: &'static str,
pub(crate) check: PciMatch,
pub(crate) probe: fn(&PciDeviceInfo) -> Result<&'static dyn Device, Error>,
#[derive(Clone, Copy, PartialEq, Debug)]
pub enum PciDeviceState {
None,
Probed,
Initialized,
Failed,
}
/// Used to store PCI bus devices which were enumerated by the kernel
pub struct PciBusDevice {
pub(crate) info: PciDeviceInfo,
pub(crate) driver: Option<&'static dyn Device>,
pub(crate) device: Option<Arc<dyn Device>>,
pub(crate) driver: Option<&'static dyn PciDriver>,
pub(crate) state: PciDeviceState,
}
impl PciDeviceInfo {
pub fn set_command(
&self,
enable_irq: bool,
enable_mem: bool,
enable_io: bool,
enable_bus_master: bool,
) {
let command = PciCommandRegister::from_bits_retain(self.config_space.command());
let mut new = command;
if enable_irq {
new &= !PciCommandRegister::DISABLE_INTERRUPTS;
} else {
new |= PciCommandRegister::DISABLE_INTERRUPTS;
}
if enable_mem {
new |= PciCommandRegister::ENABLE_MEMORY;
} else {
new &= !PciCommandRegister::ENABLE_MEMORY;
}
if enable_io {
new |= PciCommandRegister::ENABLE_IO;
} else {
new &= !PciCommandRegister::ENABLE_IO;
}
if enable_bus_master {
new |= PciCommandRegister::BUS_MASTER;
} else {
new &= !PciCommandRegister::BUS_MASTER;
}
if new != command {
self.config_space.set_command(new.bits());
}
}
pub fn init_interrupts(&self, preferred_mode: PreferredInterruptMode) -> Result<(), Error> {
self.interrupt_config
.try_init_with(|| {
let configured_mode =
if self.segment.has_msi && preferred_mode == PreferredInterruptMode::Msi {
if let Some(mut msix) = self.config_space.capability::<MsiXCapability>() {
let mut vt = msix.vector_table().unwrap();
let configured_mode = if let PreferredInterruptMode::Msi(want_msix) = preferred_mode
&& let Some(msi_route) = self.segment.msi_translation_map.map_msi(self.address)
{
// Try to setup MSI (or MSI-x, if requested)
let mut result = None;
if want_msix
&& let Some(mut msix) = self.config_space.capability::<MsiXCapability>()
{
if let Ok(mut vt) = msix.vector_table() {
if let Some(mut msi) = self.config_space.capability::<MsiCapability>() {
msi.set_enabled(false);
}
vt.mask_all();
msix.set_function_mask(false);
msix.set_enabled(true);
ConfiguredInterruptMode::MsiX(vt)
} else if self.config_space.capability::<MsiCapability>().is_some() {
ConfiguredInterruptMode::Msi
} else {
self.legacy_interrupt_mode()
result = Some(ConfiguredInterruptMode::MsiX(
msi_route.controller.clone(),
vt,
));
}
}
// Fall back to MSI if MSI-x is not available or not requested
if result.is_none() && self.config_space.capability::<MsiCapability>().is_some()
{
result = Some(ConfiguredInterruptMode::Msi(msi_route.controller));
}
// Fall back to legacy IRQ if nothing else works
if let Some(result) = result {
result
} else {
// Ignore preferred_mode, the only supported is Legacy
self.legacy_interrupt_mode()
};
}
} else {
// MSI not requested or segment does not have MSI functionality
self.legacy_interrupt_mode()
};
IrqSafeRwLock::new(InterruptConfig {
preferred_mode,
configured_mode,
})
})
.expect("Attempted to double-configure interrupts for a PCI device");
.expect("Possible bug: double-initialization of PCI(e) interrupt config");
Ok(())
}
fn legacy_interrupt_mode(&self) -> ConfiguredInterruptMode {
let Ok(intc) = external_interrupt_controller() else {
return ConfiguredInterruptMode::None;
};
// TODO this should be retrieved from interrupt map
#[cfg(any(target_arch = "x86", rust_analyzer))]
{
if let Some(irq) = self.config_space.interrupt_line() {
return ConfiguredInterruptMode::LegacyLine(irq);
return ConfiguredInterruptMode::LegacyLine(intc.clone(), irq);
}
}
match self.config_space.interrupt_pin() {
Some(pin) => ConfiguredInterruptMode::LegacyPin(pin),
Some(pin) => ConfiguredInterruptMode::LegacyPin(intc.clone(), pin),
None => ConfiguredInterruptMode::None,
}
}
@ -138,32 +219,31 @@ impl PciDeviceInfo {
pub fn map_interrupt(
&self,
affinity: InterruptAffinity,
handler: &'static dyn InterruptHandler,
handler: Arc<dyn InterruptHandler>,
) -> Result<Option<MsiInfo>, Error> {
let mut irq = self.interrupt_config.get().write();
match &mut irq.configured_mode {
ConfiguredInterruptMode::MsiX(msix) => {
let info =
msix.register_range(0, 1, message_interrupt_controller(), affinity, handler)?;
Ok(Some(info[0]))
}
ConfiguredInterruptMode::Msi => {
ConfiguredInterruptMode::Msi(controller) => {
let mut msi = self
.config_space
.capability::<MsiCapability>()
.ok_or(Error::InvalidOperation)?;
let info = msi.register(message_interrupt_controller(), affinity, handler)?;
let info = msi.register(controller, affinity, handler)?;
Ok(Some(info))
}
ConfiguredInterruptMode::LegacyPin(pin) => {
self.try_map_legacy(*pin, handler)?;
ConfiguredInterruptMode::MsiX(controller, msix) => {
let info = msix.register_range(0, 1, controller, affinity, handler)?;
Ok(Some(info[0]))
}
ConfiguredInterruptMode::LegacyPin(intc, pin) => {
self.try_map_legacy(intc.as_ref(), *pin, handler)?;
Ok(None)
}
ConfiguredInterruptMode::LegacyLine(irq) => {
self.try_map_legacy_line(*irq, handler)?;
ConfiguredInterruptMode::LegacyLine(intc, irq) => {
self.try_map_legacy_line(intc.as_ref(), *irq, handler)?;
Ok(None)
}
ConfiguredInterruptMode::None => Err(Error::InvalidOperation),
@ -174,28 +254,25 @@ impl PciDeviceInfo {
&self,
vector_range: Range<usize>,
affinity: InterruptAffinity,
handler: &'static dyn InterruptHandler,
handler: Arc<dyn InterruptHandler>,
) -> Result<Vec<MsiInfo>, Error> {
let mut irq = self.interrupt_config.get().write();
let start = vector_range.start;
let end = vector_range.end;
match &mut irq.configured_mode {
ConfiguredInterruptMode::MsiX(msix) => msix.register_range(
start,
end,
message_interrupt_controller(),
affinity,
handler,
),
ConfiguredInterruptMode::MsiX(controller, msix) => {
msix.register_range(start, end, controller, affinity, handler)
}
_ => Err(Error::InvalidOperation),
}
}
fn try_map_legacy(
&self,
intc: &dyn ExternalInterruptController,
pin: PciInterruptPin,
handler: &'static dyn InterruptHandler,
handler: Arc<dyn InterruptHandler>,
) -> Result<(), Error> {
let src = PciInterrupt {
address: self.address,
@ -204,8 +281,8 @@ impl PciDeviceInfo {
let route = self
.segment
.irq_translation_map
.get(&src)
.ok_or(Error::InvalidOperation)?;
.map_interrupt(&src)
.inspect_err(|e| log::warn!("Could not map PCI IRQ {pin:?}: {e:?}"))?;
log::debug!(
"PCI {} pin {:?} -> system IRQ #{}",
@ -214,17 +291,22 @@ impl PciDeviceInfo {
route.number
);
register_global_interrupt(route.number, route.options, handler)
let irq = Irq::External(route.number);
intc.register_irq(irq, route.options, handler)?;
intc.enable_irq(irq)
}
fn try_map_legacy_line(
&self,
intc: &dyn ExternalInterruptController,
line: u8,
handler: &'static dyn InterruptHandler,
handler: Arc<dyn InterruptHandler>,
) -> Result<(), Error> {
log::debug!("PCI {} -> IRQ#{}", self.address, line);
register_global_interrupt(line as _, Default::default(), handler)
let irq = Irq::External(line as u32);
intc.register_irq(irq, Default::default(), handler)?;
intc.enable_irq(irq)
}
}

View File

@ -0,0 +1,63 @@
use alloc::{sync::Arc, vec::Vec};
use device_api::{device::Device, dma::DmaAllocator};
use libk::error::Error;
use libk_util::sync::spin_rwlock::IrqSafeRwLock;
use crate::device::PciDeviceInfo;
pub enum PciMatch {
Generic(fn(&PciDeviceInfo) -> bool),
Vendor(u16, u16),
Class(u8, Option<u8>, Option<u8>),
}
pub struct PciDriverMatch {
pub driver: &'static dyn PciDriver,
pub check: PciMatch,
}
pub trait PciDriver: Sync {
fn probe(
&self,
info: &PciDeviceInfo,
dma: &Arc<dyn DmaAllocator>,
) -> Result<Arc<dyn Device>, Error>;
fn driver_name(&self) -> &str;
}
impl PciMatch {
pub fn check_device(&self, info: &PciDeviceInfo) -> bool {
match self {
Self::Generic(f) => f(info),
&Self::Vendor(vendor_, device_) => {
info.vendor_id == vendor_ && info.device_id == device_
}
&Self::Class(class_, Some(subclass_), Some(prog_if_)) => {
class_ == info.class && subclass_ == info.subclass && prog_if_ == info.prog_if
}
&Self::Class(class_, Some(subclass_), _) => {
class_ == info.class && subclass_ == info.subclass
}
&Self::Class(class_, _, _) => class_ == info.class,
}
}
}
pub fn register_match(pmatch: PciMatch, driver: &'static dyn PciDriver) {
DRIVERS.write().push(PciDriverMatch {
check: pmatch,
driver,
});
}
pub fn lookup_driver(info: &PciDeviceInfo) -> Option<&'static dyn PciDriver> {
DRIVERS.read().iter().find_map(|pmatch| {
if pmatch.check.check_device(info) {
Some(pmatch.driver)
} else {
None
}
})
}
static DRIVERS: IrqSafeRwLock<Vec<PciDriverMatch>> = IrqSafeRwLock::new(Vec::new());

View File

@ -0,0 +1,141 @@
use core::fmt;
use alloc::{collections::btree_map::BTreeMap, sync::Arc, vec::Vec};
use device_api::interrupt::MessageInterruptController;
use libk::error::Error;
use crate::{
device::{PciInterrupt, PciInterruptRoute, PciMsiRoute},
PciAddress,
};
#[derive(Debug)]
pub enum PciInterruptMap {
Fixed(BTreeMap<PciInterrupt, PciInterruptRoute>),
#[cfg(any(target_arch = "x86_64", rust_analyzer))]
Acpi(alloc::string::String),
Legacy,
}
// TODO device-tree also provides a "msi-base" value, which is ignored and assumed to be zero for
// now
pub struct PciFixedMsiMapping {
pub start_address: PciAddress,
pub end_address: PciAddress,
pub controller: Arc<dyn MessageInterruptController>,
}
pub struct PciFixedMsiMap {
pub entries: Vec<PciFixedMsiMapping>,
}
pub enum PciMsiMap {
Fixed(PciFixedMsiMap),
Identity(Arc<dyn MessageInterruptController>),
Legacy,
}
impl PciInterruptMap {
pub fn map_interrupt(&self, interrupt: &PciInterrupt) -> Result<PciInterruptRoute, Error> {
match self {
Self::Fixed(map) => map.get(interrupt).cloned().ok_or(Error::DoesNotExist),
#[cfg(any(target_arch = "x86_64", rust_analyzer))]
Self::Acpi(aml_object_name) => {
use device_api::interrupt::{IrqLevel, IrqOptions, IrqTrigger};
use crate::device::PciInterruptPin;
let aml_pin = match interrupt.pin {
PciInterruptPin::A => ygg_driver_acpi::PciPin::IntA,
PciInterruptPin::B => ygg_driver_acpi::PciPin::IntB,
PciInterruptPin::C => ygg_driver_acpi::PciPin::IntC,
PciInterruptPin::D => ygg_driver_acpi::PciPin::IntD,
};
let aml_route = ygg_driver_acpi::get_pci_route(
aml_object_name.as_str(),
interrupt.address.device as u16,
interrupt.address.function as u16,
aml_pin,
)
.or_else(|| {
ygg_driver_acpi::get_pci_route(
aml_object_name.as_str(),
interrupt.address.device as u16,
0xFFFF,
aml_pin,
)
})
.ok_or(Error::DoesNotExist)?;
let trigger = match aml_route.trigger {
ygg_driver_acpi::InterruptTrigger::Edge => IrqTrigger::Edge,
ygg_driver_acpi::InterruptTrigger::Level => IrqTrigger::Level,
};
let level = match aml_route.polarity {
ygg_driver_acpi::InterruptPolarity::ActiveLow => IrqLevel::ActiveLow,
ygg_driver_acpi::InterruptPolarity::ActiveHigh => IrqLevel::ActiveHigh,
};
Ok(PciInterruptRoute {
options: IrqOptions { trigger, level },
number: aml_route.irq,
})
}
Self::Legacy => todo!(),
}
}
}
impl PciMsiMap {
pub fn map_msi(&self, address: PciAddress) -> Option<PciMsiRoute> {
match self {
Self::Fixed(map) => map.map_msi(address),
Self::Identity(controller) => Some(PciMsiRoute {
controller: controller.clone(),
}),
Self::Legacy => None,
}
}
}
impl fmt::Debug for PciMsiMap {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Legacy => f.debug_struct("Legacy").finish(),
Self::Fixed(map) => f
.debug_struct("Fixed")
.field("entries", &map.entries)
.finish(),
Self::Identity(_) => f.debug_struct("Identity").finish(),
}
}
}
impl PciFixedMsiMap {
pub fn map_msi(&self, address: PciAddress) -> Option<PciMsiRoute> {
for entry in self.entries.iter() {
if entry.contains(address) {
let route = PciMsiRoute {
controller: entry.controller.clone(),
};
return Some(route);
}
}
None
}
}
impl PciFixedMsiMapping {
pub fn contains(&self, address: PciAddress) -> bool {
self.start_address <= address && self.end_address > address
}
}
impl fmt::Debug for PciFixedMsiMapping {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PciFixedMsiMapping")
.field("start_address", &self.start_address)
.field("end_address", &self.end_address)
.finish()
}
}

View File

@ -1,24 +1,38 @@
//! PCI/PCIe bus interfaces
#![no_std]
#![feature(let_chains, decl_macro)]
#![allow(clippy::missing_transmute_annotations)]
extern crate alloc;
use core::fmt;
#[cfg(target_arch = "x86_64")]
#[cfg(any(target_arch = "x86_64", rust_analyzer))]
use acpi::mcfg::McfgEntry;
use alloc::{collections::BTreeMap, sync::Arc, vec::Vec};
use alloc::{format, sync::Arc, vec::Vec};
use bitflags::bitflags;
use device::{PciBusDevice, PciDeviceInfo, PciDriver, PciInterrupt, PciInterruptRoute, PciMatch};
use device_api::Device;
use device::{PciBusDevice, PciDeviceInfo, PciDeviceState};
use device_api::{device::DeviceInitContext, dma::DmaAllocator};
use interrupt::{PciInterruptMap, PciMsiMap};
use libk::{
dma::DummyDmaAllocator,
fs::sysfs::{self, object::KObject},
};
use libk_mm::address::PhysicalAddress;
use libk_util::{sync::IrqSafeSpinlock, OneTimeInit};
use space::legacy;
use yggdrasil_abi::error::Error;
use yggdrasil_abi::{error::Error, primitive_enum};
#[cfg(target_arch = "x86_64")]
use device_api::interrupt::MessageInterruptController;
pub mod capability;
pub mod device;
pub mod driver;
pub mod interrupt;
pub mod macros;
mod nodes;
mod space;
pub use space::{
@ -29,6 +43,7 @@ pub use space::{
bitflags! {
/// Command register of the PCI configuration space
#[derive(PartialEq, Clone, Copy)]
pub struct PciCommandRegister: u16 {
/// If set, I/O access to the device is enabled
const ENABLE_IO = 1 << 0;
@ -73,19 +88,14 @@ pub enum PciBaseAddress {
Io(u16),
}
/// Unique ID assigned to PCI capability structures
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
#[non_exhaustive]
#[repr(u8)]
pub enum PciCapabilityId {
/// MSI (32-bit or 64-bit)
Msi = 0x05,
/// Vendor-specific capability
VendorSpecific = 0x09,
/// MSI-X
MsiX = 0x11,
/// Unknown capability missing from this list
Unknown,
primitive_enum! {
pub enum PciCapabilityId: u8 {
PowerManagement = 0x01,
Msi = 0x05,
VendorSpecific = 0x09,
PciExpress = 0x10,
MsiX = 0x11,
}
}
/// Interface used for querying PCI capabilities
@ -122,7 +132,49 @@ struct BusAddressAllocator {
offset_32: u32,
}
#[cfg_attr(any(target_arch = "x86_64", target_arch = "x86"), allow(dead_code))]
#[derive(Debug)]
pub struct PciSegmentInfo {
pub segment_number: u8,
pub bus_number_start: u8,
pub bus_number_end: u8,
pub ecam_phys_base: Option<PhysicalAddress>,
pub irq_translation_map: PciInterruptMap,
pub msi_translation_map: PciMsiMap,
}
/// Represents a single PCIe bus segment
pub struct PciBusSegment {
allocator: Option<BusAddressAllocator>,
info: Arc<PciSegmentInfo>,
devices: Vec<Arc<KObject<IrqSafeSpinlock<PciBusDevice>>>>,
}
#[derive(Debug)]
pub enum PciRangeType {
Configuration,
Io,
Memory32,
Memory64,
}
pub struct PciAddressRange {
pub ty: PciRangeType,
pub bus_number: u8,
pub pci_base: u64,
pub host_base: PhysicalAddress,
pub size: usize,
}
/// Manager struct to store and control all PCI devices in the system
pub struct PciBusManager {
segments: Vec<PciBusSegment>,
}
#[cfg_attr(
any(target_arch = "x86_64", target_arch = "x86", target_arch = "riscv64"),
allow(dead_code)
)]
impl BusAddressAllocator {
pub fn from_ranges(ranges: &[PciAddressRange]) -> Self {
let mut range_32 = None;
@ -190,44 +242,6 @@ impl BusAddressAllocator {
}
}
#[derive(Debug)]
pub struct PciSegmentInfo {
pub segment_number: u8,
pub bus_number_start: u8,
pub bus_number_end: u8,
pub ecam_phys_base: Option<PhysicalAddress>,
pub irq_translation_map: BTreeMap<PciInterrupt, PciInterruptRoute>,
pub has_msi: bool,
}
/// Represents a single PCIe bus segment
pub struct PciBusSegment {
allocator: Option<BusAddressAllocator>,
info: Arc<PciSegmentInfo>,
devices: Vec<PciBusDevice>,
}
pub enum PciRangeType {
Configuration,
Io,
Memory32,
Memory64,
}
pub struct PciAddressRange {
pub ty: PciRangeType,
pub bus_number: u8,
pub pci_base: u64,
pub host_base: PhysicalAddress,
pub size: usize,
}
/// Manager struct to store and control all PCI devices in the system
pub struct PciBusManager {
segments: Vec<PciBusSegment>,
}
impl PciBaseAddress {
pub fn as_memory(self) -> Option<PhysicalAddress> {
match self {
@ -236,6 +250,14 @@ impl PciBaseAddress {
_ => None,
}
}
pub fn is_zero(&self) -> bool {
match *self {
Self::Memory32(base) => base == 0,
Self::Memory64(base) => base == 0,
Self::Io(base) => base == 0,
}
}
}
impl PciBusSegment {
@ -300,7 +322,9 @@ impl PciBusSegment {
for i in 0..6 {
if (1 << i) & bar_mask != 0 {
let orig_value = config.bar(i).unwrap();
let Some(orig_value) = config.bar(i) else {
continue;
};
let size = unsafe { config.bar_size(i) };
if size != 0 {
@ -344,13 +368,40 @@ impl PciBusSegment {
}
}
let vendor_id = config.vendor_id();
let device_id = config.device_id();
let class = config.class_code();
let subclass = config.subclass();
let prog_if = config.prog_if();
let info = PciDeviceInfo {
address,
vendor_id,
device_id,
class,
subclass,
prog_if,
segment: self.info.clone(),
config_space: config,
interrupt_config: Arc::new(OneTimeInit::new()),
};
self.devices.push(PciBusDevice { info, driver: None });
let object = nodes::make_sysfs_object(PciBusDevice {
info,
driver: None,
device: None,
state: PciDeviceState::None,
});
let pci_object = PCI_SYSFS_NODE.or_init_with(|| {
let bus_object = sysfs::bus().unwrap();
let pci_object = KObject::new(());
bus_object.add_object("pci", pci_object.clone()).ok();
pci_object
});
let name = format!("{address}");
pci_object.add_object(name, object.clone()).ok();
self.devices.push(object);
Ok(())
}
@ -376,6 +427,12 @@ impl PciBusSegment {
}
}
impl PciSegmentInfo {
pub fn has_msi(&self) -> bool {
!matches!(self.msi_translation_map, PciMsiMap::Legacy)
}
}
impl PciBusManager {
const fn new() -> Self {
Self {
@ -385,11 +442,16 @@ impl PciBusManager {
/// Walks the bus device list and calls init/init_irq functions on any devices with associated
/// drivers
pub fn setup_bus_devices() -> Result<(), Error> {
log::info!("Setting up bus devices");
pub fn probe_bus_devices() -> Result<(), Error> {
Self::walk_bus_devices(|device| {
log::info!("Set up {}", device.info.address);
setup_bus_device(device)?;
probe_bus_device(device, false)?;
Ok(true)
})
}
pub fn setup_bus_devices(rescan: bool) -> Result<(), Error> {
Self::walk_bus_devices(|device| {
setup_bus_device(device, rescan)?;
Ok(true)
})
}
@ -403,7 +465,8 @@ impl PciBusManager {
for segment in this.segments.iter_mut() {
for device in segment.devices.iter_mut() {
if !f(device)? {
let mut device = device.lock();
if !f(&mut *device)? {
return Ok(());
}
}
@ -421,8 +484,8 @@ impl PciBusManager {
bus_number_start: 0,
bus_number_end: 255,
ecam_phys_base: None,
irq_translation_map: BTreeMap::new(),
has_msi: false,
irq_translation_map: PciInterruptMap::Legacy,
msi_translation_map: PciMsiMap::Legacy,
}),
allocator: None,
devices: Vec::new(),
@ -437,7 +500,12 @@ impl PciBusManager {
/// Enumerates a bus segment provided by ACPI MCFG table entry
#[cfg(target_arch = "x86_64")]
pub fn add_segment_from_mcfg(entry: &McfgEntry) -> Result<(), Error> {
pub fn add_segment_from_mcfg(
entry: &McfgEntry,
msi_controller: Arc<dyn MessageInterruptController>,
) -> Result<(), Error> {
let msi_translation_map = PciMsiMap::Identity(msi_controller);
let mut bus_segment = PciBusSegment {
info: Arc::new(PciSegmentInfo {
segment_number: entry.pci_segment_group as u8,
@ -445,9 +513,9 @@ impl PciBusManager {
bus_number_end: entry.bus_number_end,
ecam_phys_base: Some(PhysicalAddress::from_u64(entry.base_address)),
// TODO obtain this from ACPI SSDT
irq_translation_map: BTreeMap::new(),
has_msi: true,
// TODO get the segment's PCI root bridge AML name
irq_translation_map: PciInterruptMap::Acpi("\\_SB.PCI0._PRT".into()),
msi_translation_map,
}),
// Firmware done this for us
allocator: None,
@ -462,12 +530,13 @@ impl PciBusManager {
Ok(())
}
#[cfg(target_arch = "aarch64")]
#[cfg(any(target_arch = "aarch64", target_arch = "riscv64", rust_analyzer))]
pub fn add_segment_from_device_tree(
cfg_base: PhysicalAddress,
bus_range: core::ops::Range<u8>,
ranges: Vec<PciAddressRange>,
interrupt_map: BTreeMap<PciInterrupt, PciInterruptRoute>,
irq_translation_map: PciInterruptMap,
msi_translation_map: PciMsiMap,
) -> Result<(), Error> {
let mut bus_segment = PciBusSegment {
info: Arc::new(PciSegmentInfo {
@ -476,8 +545,8 @@ impl PciBusManager {
bus_number_end: bus_range.end,
ecam_phys_base: Some(cfg_base),
irq_translation_map: interrupt_map,
has_msi: false,
irq_translation_map,
msi_translation_map,
}),
allocator: Some(BusAddressAllocator::from_ranges(&ranges)),
@ -550,102 +619,66 @@ impl PciConfigurationSpace for PciConfigSpace {
}
}
fn setup_bus_device(device: &mut PciBusDevice) -> Result<(), Error> {
if device.driver.is_some() {
fn probe_bus_device(device: &mut PciBusDevice, _rescan: bool) -> Result<(), Error> {
// Already has a driver/device set up
if device.device.is_some() || device.state != PciDeviceState::None {
return Ok(());
}
let config = &device.info.config_space;
if let Some(driver) = driver::lookup_driver(&device.info) {
let dma: Arc<dyn DmaAllocator> = Arc::new(DummyDmaAllocator);
log::debug!(
"{}: {:04x}:{:04x}",
device.info.address,
config.vendor_id(),
config.device_id()
);
let class = config.class_code();
let subclass = config.subclass();
let prog_if = config.prog_if();
let drivers = PCI_DRIVERS.lock();
for driver in drivers.iter() {
if driver
.check
.check_device(&device.info, class, subclass, prog_if)
{
// TODO add the device to the bus
log::debug!(" -> {:?}", driver.name);
let instance = (driver.probe)(&device.info)?;
unsafe {
instance.init()?;
match driver.probe(&device.info, &dma) {
Ok(instance) => {
log::info!("{} -> {}", device.info.address, driver.driver_name());
device.device.replace(instance);
device.driver.replace(driver);
device.state = PciDeviceState::Probed;
}
Err(error) => {
log::error!(
"{} ({}) probe error: {error:?}",
device.info.address,
driver.driver_name()
);
}
device.driver.replace(instance);
break;
} else {
log::debug!(" -> No driver");
}
}
Ok(())
}
impl PciMatch {
pub fn check_device(&self, info: &PciDeviceInfo, class: u8, subclass: u8, prog_if: u8) -> bool {
match self {
Self::Generic(f) => f(info),
&Self::Vendor(vendor_, device_) => {
info.config_space.vendor_id() == vendor_ && info.config_space.device_id() == device_
}
&Self::Class(class_, Some(subclass_), Some(prog_if_)) => {
class_ == class && subclass_ == subclass && prog_if_ == prog_if
}
&Self::Class(class_, Some(subclass_), _) => class_ == class && subclass_ == subclass,
&Self::Class(class_, _, _) => class_ == class,
fn setup_bus_device(device: &mut PciBusDevice, _rescan: bool) -> Result<(), Error> {
// No driver yet (TODO probe if rescan is asked)
let (Some(dev), Some(driver)) = (device.device.as_ref(), device.driver) else {
return Ok(());
};
// Already initialized/failed
if device.state != PciDeviceState::Probed {
return Ok(());
}
let dma: Arc<dyn DmaAllocator> = Arc::new(DummyDmaAllocator);
let cx = DeviceInitContext {
dma_allocator: dma.clone(),
};
match unsafe { dev.clone().init(cx) } {
Ok(()) => {
device.state = PciDeviceState::Initialized;
}
Err(error) => {
log::error!(
"{} ({}) setup error: {error:?}",
device.info.address,
driver.driver_name()
);
device.state = PciDeviceState::Failed;
}
}
Ok(())
}
pub fn register_class_driver(
name: &'static str,
class: u8,
subclass: Option<u8>,
prog_if: Option<u8>,
probe: fn(&PciDeviceInfo) -> Result<&'static dyn Device, Error>,
) {
PCI_DRIVERS.lock().push(PciDriver {
name,
check: PciMatch::Class(class, subclass, prog_if),
probe,
});
}
pub fn register_vendor_driver(
name: &'static str,
vendor_id: u16,
device_id: u16,
probe: fn(&PciDeviceInfo) -> Result<&'static dyn Device, Error>,
) {
PCI_DRIVERS.lock().push(PciDriver {
name,
check: PciMatch::Vendor(vendor_id, device_id),
probe,
});
}
pub fn register_generic_driver(
name: &'static str,
check: fn(&PciDeviceInfo) -> bool,
probe: fn(&PciDeviceInfo) -> Result<&'static dyn Device, Error>,
) {
PCI_DRIVERS.lock().push(PciDriver {
name,
check: PciMatch::Generic(check),
probe,
});
}
static PCI_DRIVERS: IrqSafeSpinlock<Vec<PciDriver>> = IrqSafeSpinlock::new(Vec::new());
static PCI_MANAGER: IrqSafeSpinlock<PciBusManager> = IrqSafeSpinlock::new(PciBusManager::new());
static PCI_SYSFS_NODE: OneTimeInit<Arc<KObject<()>>> = OneTimeInit::new();

View File

@ -0,0 +1,35 @@
pub macro pci_driver_match {
(class ($class:literal:$subclass:literal:$prog_if:literal)) => {
$crate::driver::PciMatch::Class($class, Some($subclass), Some($prog_if))
},
(class ($class:literal:$subclass:literal)) => {
$crate::driver::PciMatch::Class($class, Some($subclass), None)
},
(class $class:literal) => {
$crate::driver::PciMatch::Class($class, None, None)
},
(device ($vendor:literal:$device:literal)) => {
$crate::driver::PciMatch::Vendor($vendor, $device)
}
}
pub macro pci_driver(
matches: [$($kind:ident $match:tt),+ $(,)?],
driver: $driver:tt
) {
#[link_section = ".init_array"]
#[used]
static __REGISTER_FN: extern "C" fn() = __register_fn;
extern "C" fn __register_fn() {
struct Driver;
impl $crate::driver::PciDriver for Driver $driver
static DRIVER: Driver = Driver;
log::info!("register pci driver: {:?}", $crate::driver::PciDriver::driver_name(&Driver));
$(
let pmatch = $crate::macros::pci_driver_match!($kind $match);
$crate::driver::register_match(pmatch, &DRIVER);
)+
}
}

View File

@ -0,0 +1,139 @@
use alloc::{format, string::String, sync::Arc};
use libk::{
error::Error,
fs::sysfs::{
attribute::{StringAttribute, StringAttributeOps},
object::KObject,
},
};
use libk_util::sync::IrqSafeSpinlock;
use crate::{device::PciBusDevice, PciBaseAddress, PciCapabilityId, PciConfigurationSpace};
pub(crate) fn make_sysfs_object(
device: PciBusDevice,
) -> Arc<KObject<IrqSafeSpinlock<PciBusDevice>>> {
struct Resources;
struct Capabilities;
struct Driver;
struct Class;
struct Id;
impl StringAttributeOps for Driver {
type Data = IrqSafeSpinlock<PciBusDevice>;
const NAME: &'static str = "driver";
fn read(state: &Self::Data) -> Result<String, Error> {
let state = state.lock();
if let Some(driver) = state.driver.map(|driver| driver.driver_name()) {
Ok(driver.into())
} else {
Ok("".into())
}
}
}
impl StringAttributeOps for Id {
type Data = IrqSafeSpinlock<PciBusDevice>;
const NAME: &'static str = "id";
fn read(state: &Self::Data) -> Result<String, Error> {
let state = state.lock();
Ok(format!(
"{:04x}:{:04x}",
state.info.vendor_id, state.info.device_id
))
}
}
impl StringAttributeOps for Class {
type Data = IrqSafeSpinlock<PciBusDevice>;
const NAME: &'static str = "class";
fn read(state: &Self::Data) -> Result<String, Error> {
let state = state.lock();
Ok(format!(
"{:02x}:{:02x}:{:02x}",
state.info.class, state.info.subclass, state.info.prog_if
))
}
}
impl StringAttributeOps for Resources {
type Data = IrqSafeSpinlock<PciBusDevice>;
const NAME: &'static str = "resources";
const NEWLINE: bool = false;
fn read(state: &Self::Data) -> Result<String, Error> {
use core::fmt::Write;
let state = state.lock();
let mut output = String::new();
for i in 0..6 {
if let Some(bar) = state.info.config_space.bar(i) {
if bar.is_zero() {
continue;
}
match bar {
PciBaseAddress::Io(base) => {
writeln!(output, "{i}:pio:{base:#06x}").ok();
}
PciBaseAddress::Memory32(base) => {
writeln!(output, "{i}:m32:{base:#010x}").ok();
}
PciBaseAddress::Memory64(base) => {
writeln!(output, "{i}:m64:{base:#018x}").ok();
}
}
}
}
if output.is_empty() {
output.push('\n');
}
Ok(output)
}
}
impl StringAttributeOps for Capabilities {
type Data = IrqSafeSpinlock<PciBusDevice>;
const NAME: &'static str = "capabilities";
const NEWLINE: bool = false;
fn read(state: &Self::Data) -> Result<String, Error> {
use core::fmt::Write;
let state = state.lock();
let mut output = String::new();
for (capability, offset, _) in state.info.config_space.capability_iter() {
write!(output, "{offset:04x}:").ok();
match capability {
Some(PciCapabilityId::Msi) => write!(output, "msi").ok(),
Some(PciCapabilityId::MsiX) => write!(output, "msix").ok(),
Some(PciCapabilityId::VendorSpecific) => write!(output, "vendor-specific").ok(),
Some(PciCapabilityId::PciExpress) => write!(output, "pcie").ok(),
Some(PciCapabilityId::PowerManagement) => {
write!(output, "power-management").ok()
}
None => write!(output, "unknown").ok(),
};
writeln!(output).ok();
}
if output.is_empty() {
output.push('\n');
}
Ok(output)
}
}
let object = KObject::new(IrqSafeSpinlock::new(device));
object
.add_attribute(StringAttribute::from(Capabilities))
.ok();
object.add_attribute(StringAttribute::from(Resources)).ok();
object.add_attribute(StringAttribute::from(Driver)).ok();
object.add_attribute(StringAttribute::from(Class)).ok();
object.add_attribute(StringAttribute::from(Id)).ok();
object
}

View File

@ -75,12 +75,12 @@ pub struct CapabilityIterator<'s, S: PciConfigurationSpace + ?Sized> {
}
impl<S: PciConfigurationSpace + ?Sized> Iterator for CapabilityIterator<'_, S> {
type Item = (PciCapabilityId, usize, usize);
type Item = (Option<PciCapabilityId>, usize, usize);
fn next(&mut self) -> Option<Self::Item> {
let offset = self.current? & !0x3;
let id = unsafe { core::mem::transmute(self.space.read_u8(offset)) };
let id = PciCapabilityId::try_from(self.space.read_u8(offset)).ok();
let len = self.space.read_u8(offset + 2);
let next_pointer = self.space.read_u8(offset + 1);
@ -339,7 +339,7 @@ pub trait PciConfigurationSpace {
Some(PciBaseAddress::Memory32(w0 & !0xF))
}
// TODO can 64-bit BARs not be on a 64-bit boundary?
2 => todo!(),
2 => None,
_ => unimplemented!(),
},
1 => todo!(),
@ -374,7 +374,7 @@ pub trait PciConfigurationSpace {
/// Locates a capability within this configuration space
fn capability<C: PciCapability>(&self) -> Option<C::CapabilityData<'_, Self>> {
self.capability_iter().find_map(|(id, offset, len)| {
if id == C::ID && C::check(self, offset, len) {
if id.map_or(false, |id| id == C::ID) && C::check(self, offset, len) {
Some(C::data(self, offset, len))
} else {
None

View File

@ -12,7 +12,10 @@ libk-mm.workspace = true
libk.workspace = true
ygg_driver_input = { path = "../../input" }
# For mass storage
ygg_driver_scsi = { path = "../../block/scsi" }
log.workspace = true
bytemuck.workspace = true
futures-util.workspace = true
async-trait.workspace = true

View File

@ -10,20 +10,21 @@ use crate::{
};
pub struct UsbBusManager {
busses: IrqSafeRwLock<BTreeMap<u16, &'static dyn UsbHostController>>,
busses: IrqSafeRwLock<BTreeMap<u16, Arc<dyn UsbHostController>>>,
devices: IrqSafeRwLock<BTreeMap<UsbBusAddress, Arc<UsbDeviceAccess>>>,
last_bus_address: AtomicU16,
}
impl UsbBusManager {
pub fn register_bus(hc: &'static dyn UsbHostController) -> u16 {
pub fn register_bus(hc: Arc<dyn UsbHostController>) -> u16 {
let i = BUS_MANAGER.last_bus_address.fetch_add(1, Ordering::AcqRel);
BUS_MANAGER.busses.write().insert(i, hc);
i
}
pub fn register_device(device: Arc<UsbDeviceAccess>) {
log::info!("usb: register device {}", device.bus_address());
BUS_MANAGER
.devices
.write()

View File

@ -0,0 +1,176 @@
use core::mem::MaybeUninit;
use alloc::{boxed::Box, sync::Arc};
use async_trait::async_trait;
use yggdrasil_abi::io::{KeyboardKey, KeyboardKeyEvent};
use crate::{device::UsbDeviceAccess, error::UsbError, info::UsbDeviceClass};
use super::{UsbClassInfo, UsbDriver};
pub struct UsbHidKeyboardDriver;
const MODIFIER_MAP: &[KeyboardKey] = &[
KeyboardKey::LControl,
KeyboardKey::LShift,
KeyboardKey::LAlt,
KeyboardKey::Unknown,
KeyboardKey::RControl,
KeyboardKey::RShift,
KeyboardKey::RAlt,
KeyboardKey::Unknown,
];
#[derive(Default)]
struct KeyboardState {
state: [u64; 4],
mods: u8,
}
impl KeyboardState {
pub fn new() -> Self {
Self::default()
}
pub fn translate_key(k: u8) -> KeyboardKey {
match k {
4..=29 => KeyboardKey::Char(k - 4 + b'a'),
30..=38 => KeyboardKey::Char(k - 30 + b'1'),
39 => KeyboardKey::Char(b'0'),
40 => KeyboardKey::Enter,
41 => KeyboardKey::Escape,
42 => KeyboardKey::Backspace,
43 => KeyboardKey::Tab,
44 => KeyboardKey::Char(b' '),
45 => KeyboardKey::Char(b'-'),
46 => KeyboardKey::Char(b'='),
47 => KeyboardKey::Char(b'['),
48 => KeyboardKey::Char(b']'),
49 => KeyboardKey::Char(b'\\'),
51 => KeyboardKey::Char(b';'),
52 => KeyboardKey::Char(b'\''),
53 => KeyboardKey::Char(b'`'),
54 => KeyboardKey::Char(b','),
55 => KeyboardKey::Char(b'.'),
56 => KeyboardKey::Char(b'/'),
58..=69 => KeyboardKey::F(k - 58),
_ => {
log::debug!("Unknown key: {}", k);
KeyboardKey::Unknown
}
}
}
pub fn retain_modifiers(
&mut self,
m: u8,
events: &mut [MaybeUninit<KeyboardKeyEvent>],
) -> usize {
let mut count = 0;
let released = self.mods & !m;
for (i, modifier) in MODIFIER_MAP.iter().enumerate().take(8) {
if released & (1 << i) != 0 {
events[count].write(KeyboardKeyEvent::Released(*modifier));
count += 1;
}
}
self.mods &= m;
count
}
pub fn press_modifiers(
&mut self,
m: u8,
events: &mut [MaybeUninit<KeyboardKeyEvent>],
) -> usize {
let mut count = 0;
let pressed = m & !self.mods;
for (i, modifier) in MODIFIER_MAP.iter().enumerate().take(8) {
if pressed & (1 << i) != 0 {
events[count].write(KeyboardKeyEvent::Pressed(*modifier));
count += 1;
}
}
self.mods = m;
count
}
pub fn retain(&mut self, keys: &[u8], events: &mut [MaybeUninit<KeyboardKeyEvent>]) -> usize {
let mut count = 0;
for i in 1..256 {
if self.state[i / 64] & (1 << (i % 64)) != 0 && !keys.contains(&(i as u8)) {
events[count].write(KeyboardKeyEvent::Released(Self::translate_key(i as u8)));
self.state[i / 64] &= !(1 << (i % 64));
count += 1;
}
}
count
}
pub fn press(&mut self, keys: &[u8], events: &mut [MaybeUninit<KeyboardKeyEvent>]) -> usize {
let mut count = 0;
for &k in keys {
let index = (k as usize) / 64;
if self.state[index] & (1 << (k % 64)) == 0 {
self.state[index] |= 1 << (k % 64);
events[count].write(KeyboardKeyEvent::Pressed(Self::translate_key(k)));
count += 1;
}
}
count
}
}
#[async_trait]
impl UsbDriver for UsbHidKeyboardDriver {
async fn run(self: Arc<Self>, device: Arc<UsbDeviceAccess>) -> Result<(), UsbError> {
// TODO not sure whether to use boot protocol (easy) or GetReport
let config = device.select_configuration(|_| true).await?.unwrap();
log::info!("Setup HID keyboard");
let pipe = device
.open_interrupt_in_pipe(1, config.endpoints[0].max_packet_size as u16)
.await?;
let mut buffer = [0; 8];
let mut state = KeyboardState::new();
let mut events = [MaybeUninit::uninit(); 16];
loop {
let mut event_count = 0;
let len = pipe.read(&mut buffer).await?;
if len < 8 {
continue;
}
event_count += state.retain_modifiers(buffer[0], &mut events);
event_count += state.press_modifiers(buffer[0], &mut events[event_count..]);
event_count += state.retain(&buffer[2..], &mut events[event_count..]);
event_count += state.press(&buffer[2..], &mut events[event_count..]);
let events = unsafe { events[..event_count].assume_init_ref() };
for &event in events {
log::trace!("Generic Keyboard: {:?}", event);
ygg_driver_input::send_event(event);
}
}
}
fn name(&self) -> &'static str {
"USB HID Keyboard"
}
fn probe(&self, class: &UsbClassInfo, _device: &UsbDeviceAccess) -> bool {
log::info!(
"class = {:?}, subclass = {:02x}",
class.class,
class.subclass
);
class.class == UsbDeviceClass::Hid && (class.subclass == 0x00 || class.subclass == 0x01)
}
}

View File

@ -0,0 +1,273 @@
use core::mem::MaybeUninit;
use alloc::{boxed::Box, sync::Arc};
use async_trait::async_trait;
use bytemuck::{Pod, Zeroable};
use libk::{
dma::{DmaBuffer, DmaSliceMut},
error::Error,
};
use ygg_driver_scsi::{transport::ScsiTransport, ScsiEnclosure};
use crate::{
communication::UsbDirection,
device::{UsbDeviceAccess, UsbDeviceDetachHandler},
error::UsbError,
info::{UsbDeviceClass, UsbEndpointType},
pipe::{
control::{ControlTransferSetup, UsbClassSpecificRequest},
normal::{UsbBulkInPipeAccess, UsbBulkOutPipeAccess},
},
};
use super::{UsbClassInfo, UsbDriver};
pub struct UsbMassStorageDriverBulkOnly;
#[derive(Debug, Clone, Copy, Zeroable, Pod)]
#[repr(C)]
struct Cbw {
signature: u32, // 0x00
tag: u32, // 0x04
transfer_length: u32, // 0x08
flags: u8, // 0x0C
lun: u8, // 0x0D
cb_length: u8, // 0x0E
cb_data: [u8; 16], // 0x0F
// Not sent
_0: u8,
}
#[derive(Debug, Clone, Copy, Zeroable, Pod)]
#[repr(C)]
struct Csw {
signature: u32,
tag: u32,
data_residue: u32,
status: u8,
_0: [u8; 3],
}
struct Bbb {
#[allow(unused)]
device: Arc<UsbDeviceAccess>,
in_pipe: UsbBulkInPipeAccess,
out_pipe: UsbBulkOutPipeAccess,
last_tag: u32,
}
struct DetachHandler(Arc<ScsiEnclosure>);
impl Bbb {
pub fn new(
device: Arc<UsbDeviceAccess>,
in_pipe: UsbBulkInPipeAccess,
out_pipe: UsbBulkOutPipeAccess,
) -> Result<Self, UsbError> {
Ok(Self {
device,
in_pipe,
out_pipe,
last_tag: 0,
})
}
}
impl Bbb {
async fn send_cbw(
&mut self,
lun: u8,
host_to_dev: bool,
command: &[u8],
response_len: usize,
) -> Result<u32, Error> {
self.last_tag = self.last_tag.wrapping_add(1);
let flags = if !host_to_dev { 1 << 7 } else { 0 };
let tag = self.last_tag;
let mut cbw_bytes = [0; 32];
let cbw = bytemuck::from_bytes_mut::<Cbw>(&mut cbw_bytes);
cbw.signature = 0x43425355;
cbw.transfer_length = response_len as u32;
cbw.flags = flags;
cbw.tag = tag;
cbw.lun = lun;
cbw.cb_length = command.len() as u8;
cbw.cb_data[..command.len()].copy_from_slice(command);
self.out_pipe
.write(&cbw_bytes[..31])
.await
.inspect_err(|error| log::error!("msc: CBW send error: {error:?}"))?;
Ok(tag)
}
async fn read_csw(&mut self, tag: u32) -> Result<(), Error> {
let mut csw_bytes = [0; 16];
self.in_pipe
.read_exact(&mut csw_bytes[..13])
.await
.inspect_err(|error| log::error!("msc: CSW receive error: {error:?}"))?;
let csw = bytemuck::from_bytes::<Csw>(&csw_bytes);
if csw.signature != 0x53425355 {
log::warn!("msc: invalid csw signature");
return Err(Error::InvalidArgument);
}
if csw.tag != tag {
let csw_tag = csw.tag;
log::warn!("msc: invalid csw tag (got {}, expected {tag})", csw_tag);
return Err(Error::InvalidArgument);
}
if csw.status != 0x00 {
return Err(Error::InvalidArgument);
}
Ok(())
}
async fn read_response_data(
&mut self,
buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<usize, Error> {
if buffer.len() == 0 {
return Ok(0);
}
let len = self
.in_pipe
.read_dma(buffer)
.await
.inspect_err(|error| log::error!("msc: DMA read error: {error:?}"))?;
Ok(len)
}
}
#[async_trait]
impl ScsiTransport for Bbb {
fn allocate_buffer(&self, size: usize) -> Result<DmaBuffer<[MaybeUninit<u8>]>, Error> {
Ok(self.in_pipe.allocate_dma_buffer(size)?)
}
async fn perform_request_raw(
&mut self,
lun: u8,
request_data: &[u8],
response_buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<usize, Error> {
if request_data.len() > 16 || response_buffer.len() > self.max_bytes_per_request() {
return Err(Error::InvalidArgument);
}
let tag = self
.send_cbw(lun, false, request_data, response_buffer.len())
.await?;
let response_len = self.read_response_data(response_buffer).await?;
self.read_csw(tag).await?;
Ok(response_len)
}
fn max_bytes_per_request(&self) -> usize {
32768
}
}
impl UsbDeviceDetachHandler for DetachHandler {
fn handle_device_detach(&self) {
log::info!("Mass storage detached");
self.0.detach();
}
}
#[derive(Debug, Pod, Zeroable, Clone, Copy)]
#[repr(C)]
pub struct BulkOnlyMassStorageReset;
#[derive(Debug, Pod, Zeroable, Clone, Copy)]
#[repr(C)]
pub struct GetMaxLun;
impl UsbClassSpecificRequest for BulkOnlyMassStorageReset {
const BM_REQUEST_TYPE: u8 = 0b00100001;
const B_REQUEST: u8 = 0b11111111;
}
impl UsbClassSpecificRequest for GetMaxLun {
const BM_REQUEST_TYPE: u8 = 0b10100001;
const B_REQUEST: u8 = 0b11111110;
}
#[async_trait]
impl UsbDriver for UsbMassStorageDriverBulkOnly {
async fn run(self: Arc<Self>, device: Arc<UsbDeviceAccess>) -> Result<(), UsbError> {
// TODO filter to only accept BBB config
let config = device.select_configuration(|_| true).await?.unwrap();
// Bulk-in, bulk-out
assert_eq!(config.endpoints.len(), 2);
let control_pipe = device.control_pipe();
let (in_index, in_info) = config
.find_endpoint(|ep| ep.is(UsbEndpointType::Bulk, UsbDirection::In))
.ok_or(UsbError::InvalidConfiguration)?;
let (out_index, out_info) = config
.find_endpoint(|ep| ep.is(UsbEndpointType::Bulk, UsbDirection::Out))
.ok_or(UsbError::InvalidConfiguration)?;
let in_pipe = device
.open_bulk_in_pipe(in_index, in_info.max_packet_size as u16)
.await?;
let out_pipe = device
.open_bulk_out_pipe(out_index, out_info.max_packet_size as u16)
.await?;
// Perform a Bulk-Only Mass Storage Reset
// TODO interface id?
control_pipe
.control_transfer(ControlTransferSetup {
bm_request_type: BulkOnlyMassStorageReset::BM_REQUEST_TYPE,
b_request: BulkOnlyMassStorageReset::B_REQUEST,
w_value: 0,
w_index: 0,
w_length: 0,
})
.await?;
// Get max LUN
// TODO on devices which do not support multiple LUNs, this command may STALL
let mut buffer = [MaybeUninit::uninit()];
let len = control_pipe
.control_transfer_in(
ControlTransferSetup {
bm_request_type: GetMaxLun::BM_REQUEST_TYPE,
b_request: GetMaxLun::B_REQUEST,
w_value: 0,
w_index: 0,
w_length: 1,
},
&mut buffer,
)
.await?;
let max_lun = if len < 1 {
0
} else {
unsafe { buffer[0].assume_init() }
};
let bbb = Bbb::new(device.clone(), in_pipe, out_pipe)?;
let scsi = ScsiEnclosure::setup(Box::new(bbb), max_lun as usize + 1)
.await
.inspect_err(|error| log::error!("msc: scsi error {error:?}"))
.map_err(|_| UsbError::DriverError)?;
let detach = DetachHandler(scsi.clone());
device.set_detach_handler(Arc::new(detach));
Ok(())
}
fn name(&self) -> &'static str {
"USB Mass Storage"
}
fn probe(&self, class: &UsbClassInfo, _device: &UsbDeviceAccess) -> bool {
// TODO support other protocols
class.class == UsbDeviceClass::MassStorage && class.interface_protocol_number == 0x50
}
}

Some files were not shown because too many files have changed in this diff Show More