bus/pci: basic PCI support for AArch64

This commit is contained in:
Mark Poliakov 2024-02-03 20:44:04 +02:00
parent 26479eea84
commit f03b390933
42 changed files with 1328 additions and 305 deletions

View File

@ -19,9 +19,11 @@ vmalloc = { path = "lib/vmalloc" }
device-api-macros = { path = "lib/device-api/macros" }
# Drivers
ygg_driver_pci = { path = "driver/bus/pci" }
ygg_driver_block = { path = "driver/block/core" }
ygg_driver_net_core = { path = "driver/net/core" }
ygg_driver_net_loopback = { path = "driver/net/loopback" }
ygg_driver_virtio_net = { path = "driver/virtio/net", features = ["pci"] }
kernel-fs = { path = "driver/fs/kernel-fs" }
memfs = { path = "driver/fs/memfs" }
@ -55,10 +57,8 @@ acpi_lib = { git = "https://github.com/alnyan/acpi.git", package = "acpi", branc
acpi-system = { git = "https://github.com/alnyan/acpi-system.git" }
# TODO currently only supported here
xhci_lib = { git = "https://github.com/rust-osdev/xhci.git", package = "xhci" }
ygg_driver_pci = { path = "driver/bus/pci" }
ygg_driver_nvme = { path = "driver/block/nvme" }
ygg_driver_ahci = { path = "driver/block/ahci" }
ygg_driver_virtio_net = { path = "driver/virtio/net", features = ["pci"] }
[features]
default = ["fb_console"]

View File

@ -6,27 +6,19 @@ extern crate alloc;
use alloc::{boxed::Box, format, vec, vec::Vec};
use device_api::{
interrupt::{InterruptAffinity, MsiHandler},
interrupt::{InterruptAffinity, InterruptHandler},
Device,
};
use error::AhciError;
use kernel_fs::devfs;
use kernel_util::{
mem::{
address::{FromRaw, PhysicalAddress},
device::DeviceMemoryIo,
},
message_interrupt_controller, runtime,
sync::IrqSafeSpinlock,
util::OneTimeInit,
};
use kernel_util::{mem::device::DeviceMemoryIo, runtime, sync::IrqSafeSpinlock, util::OneTimeInit};
use port::AhciPort;
use regs::{PortRegs, Regs};
use tock_registers::interfaces::{ReadWriteable, Readable, Writeable};
use ygg_driver_block::{probe_partitions, NgBlockDeviceWrapper};
use ygg_driver_pci::{
capability::MsiCapability, PciBaseAddress, PciCommandRegister, PciConfigurationSpace,
PciDeviceInfo,
device::{PciDeviceInfo, PreferredInterruptMode},
PciCommandRegister, PciConfigurationSpace,
};
use yggdrasil_abi::error::Error;
@ -162,8 +154,8 @@ impl AhciController {
}
}
impl MsiHandler for AhciController {
fn handle_msi(&self, _vector: usize) -> bool {
impl InterruptHandler for AhciController {
fn handle_irq(&self, _vector: Option<usize>) -> bool {
let regs = self.regs.lock();
let is = regs.IS.get();
@ -198,18 +190,18 @@ static SATA_DRIVES: IrqSafeSpinlock<Vec<&'static AhciPort>> = IrqSafeSpinlock::n
pub fn probe(info: &PciDeviceInfo) -> Result<&'static dyn Device, Error> {
let bar5 = info.config_space.bar(5).ok_or(Error::InvalidOperation)?;
let PciBaseAddress::Memory(bar5) = bar5 else {
return Err(Error::InvalidOperation);
};
let bar5 = bar5.as_memory().ok_or(Error::InvalidOperation)?;
// TODO support regular PCI interrupts (ACPI dependency)
let Some(mut msi) = info.config_space.capability::<MsiCapability>() else {
log::warn!("Ignoring AHCI: does not support MSI (and the OS doesn't yet support PCI IRQ)");
return Err(Error::InvalidOperation);
};
info.init_interrupts(PreferredInterruptMode::Msi)?;
// // TODO support regular PCI interrupts (ACPI dependency)
// let Some(mut msi) = info.config_space.capability::<MsiCapability>() else {
// log::warn!("Ignoring AHCI: does not support MSI (and the OS doesn't yet support PCI IRQ)");
// return Err(Error::InvalidOperation);
// };
// Map the registers
let regs = unsafe { DeviceMemoryIo::<Regs>::map(PhysicalAddress::from_raw(bar5)) }?;
let regs = unsafe { DeviceMemoryIo::<Regs>::map(bar5, Default::default()) }?;
let version = Version::try_from(regs.VS.get())?;
let ahci_only = regs.CAP.matches_all(CAP::SAM::SET);
let max_port_count = regs.CAP.read(CAP::NP) as usize;
@ -232,7 +224,7 @@ pub fn probe(info: &PciDeviceInfo) -> Result<&'static dyn Device, Error> {
}));
// TODO use multiple vectors if capable
msi.register(message_interrupt_controller(), InterruptAffinity::Any, ahci)?;
info.map_interrupt(InterruptAffinity::Any, ahci)?;
Ok(ahci)
}

View File

@ -13,17 +13,17 @@ use core::{
use alloc::{boxed::Box, collections::BTreeMap, vec::Vec};
use command::{IdentifyActiveNamespaceIdListRequest, IdentifyControllerRequest};
use device_api::{
interrupt::{InterruptAffinity, MsiHandler},
interrupt::{InterruptAffinity, InterruptHandler},
Device,
};
use drive::NvmeDrive;
use kernel_util::{
cpu_count, cpu_index,
mem::{
address::{FromRaw, IntoRaw, PhysicalAddress},
address::{IntoRaw, PhysicalAddress},
device::DeviceMemoryIo,
},
message_interrupt_controller, runtime,
runtime,
sync::{IrqGuard, IrqSafeSpinlock},
util::OneTimeInit,
};
@ -33,8 +33,8 @@ use tock_registers::{
registers::{ReadOnly, ReadWrite, WriteOnly},
};
use ygg_driver_pci::{
capability::{MsiXCapability, MsiXVectorTable},
PciBaseAddress, PciCommandRegister, PciConfigurationSpace, PciDeviceInfo,
device::{PciDeviceInfo, PreferredInterruptMode},
PciCommandRegister, PciConfigurationSpace,
};
use yggdrasil_abi::error::Error;
@ -128,7 +128,7 @@ pub struct NvmeController {
drive_table: IrqSafeSpinlock<BTreeMap<u32, &'static NvmeDrive>>,
controller_id: OneTimeInit<usize>,
vector_table: IrqSafeSpinlock<MsiXVectorTable<'static>>,
pci: PciDeviceInfo,
doorbell_shift: usize,
}
@ -208,18 +208,9 @@ impl NvmeController {
self.io_queue_count.store(io_queue_count, Ordering::Release);
{
// Register io_queue_count + 1 vectors
// TODO register vectors on different CPUs
let mut vt = self.vector_table.lock();
let range = vt
.register_range(
0,
io_queue_count + 1,
message_interrupt_controller(),
InterruptAffinity::Any,
self,
)
let range = self
.pci
.map_interrupt_multiple(0..io_queue_count + 1, InterruptAffinity::Any, self)
.unwrap();
// TODO handle different MSI range allocations
@ -322,8 +313,10 @@ impl NvmeController {
}
}
impl MsiHandler for NvmeController {
fn handle_msi(&self, vector: usize) -> bool {
impl InterruptHandler for NvmeController {
fn handle_irq(&self, vector: Option<usize>) -> bool {
let vector = vector.expect("Only MSI-X interrupts are supported");
if vector == 0 {
self.admin_q.get().process_completions() != 0
} else if vector <= self.io_queue_count.load(Ordering::Acquire)
@ -421,26 +414,21 @@ static NVME_CONTROLLERS: IrqSafeSpinlock<Vec<&'static NvmeController>> =
IrqSafeSpinlock::new(Vec::new());
pub fn probe(info: &PciDeviceInfo) -> Result<&'static dyn Device, Error> {
let PciBaseAddress::Memory(bar0) = info.config_space.bar(0).unwrap() else {
panic!();
};
let bar0 = info
.config_space
.bar(0)
.unwrap()
.as_memory()
.expect("Expected a memory BAR0");
// TODO also support MSI
let mut msix = info.config_space.capability::<MsiXCapability>().unwrap();
let mut vt = msix.vector_table()?;
// TODO is this really needed? PCI spec says this is masked on reset, though I'm not sure if
// firmware puts it back in masked state after loading the kernel
vt.mask_all();
msix.set_function_mask(false);
msix.set_enabled(true);
info.init_interrupts(PreferredInterruptMode::Msi)?;
let mut cmd = PciCommandRegister::from_bits_retain(info.config_space.command());
cmd &= !(PciCommandRegister::DISABLE_INTERRUPTS | PciCommandRegister::ENABLE_IO);
cmd |= PciCommandRegister::ENABLE_MEMORY | PciCommandRegister::BUS_MASTER;
info.config_space.set_command(cmd.bits());
let regs = unsafe { DeviceMemoryIo::<Regs>::map(PhysicalAddress::from_raw(bar0)) }?;
let regs = unsafe { DeviceMemoryIo::<Regs>::map(bar0, Default::default()) }?;
// Disable the controller
regs.CC.modify(CC::ENABLE::CLEAR);
@ -454,7 +442,7 @@ pub fn probe(info: &PciDeviceInfo) -> Result<&'static dyn Device, Error> {
drive_table: IrqSafeSpinlock::new(BTreeMap::new()),
controller_id: OneTimeInit::new(),
vector_table: IrqSafeSpinlock::new(vt),
pci: info.clone(),
io_queue_count: AtomicUsize::new(1),
doorbell_shift,

View File

@ -15,4 +15,5 @@ log = "0.4.20"
bitflags = "2.3.3"
tock-registers = "0.8.1"
[target.'cfg(target_arch = "x86_64")'.dependencies]
acpi = { git = "https://github.com/alnyan/acpi.git", package = "acpi", branch = "acpi-system" }

View File

@ -1,11 +1,10 @@
//! PCI capability structures and queries
use alloc::{vec, vec::Vec};
use device_api::interrupt::{InterruptAffinity, MessageInterruptController, MsiHandler, MsiInfo};
use kernel_util::mem::{
address::{FromRaw, PhysicalAddress},
device::DeviceMemoryIoMut,
use device_api::interrupt::{
InterruptAffinity, InterruptHandler, MessageInterruptController, MsiInfo,
};
use kernel_util::mem::{address::PhysicalAddress, device::DeviceMemoryIoMut};
use tock_registers::{
interfaces::{Readable, Writeable},
registers::{ReadWrite, WriteOnly},
@ -13,7 +12,6 @@ use tock_registers::{
use yggdrasil_abi::error::Error;
use super::{PciCapability, PciCapabilityId, PciConfigurationSpace};
use crate::PciBaseAddress;
pub trait VirtioCapabilityData<'s, S: PciConfigurationSpace + ?Sized + 's>: Sized {
fn from_space_offset(space: &'s S, offset: usize) -> Self;
@ -56,6 +54,8 @@ pub struct VirtioDeviceConfigCapability;
pub struct VirtioCommonConfigCapability;
/// VirtIO notify configuration
pub struct VirtioNotifyConfigCapability;
/// VirtIO interrupt status
pub struct VirtioInterruptStatusCapability;
/// Represents an entry in MSI-X vector table
#[repr(C)]
@ -99,6 +99,11 @@ pub struct VirtioNotifyConfigData<'s, S: PciConfigurationSpace + ?Sized + 's> {
offset: usize,
}
pub struct VirtioInterruptStatusData<'s, S: PciConfigurationSpace + ?Sized + 's> {
space: &'s S,
offset: usize,
}
impl<T: VirtioCapability> PciCapability for T {
const ID: PciCapabilityId = PciCapabilityId::VendorSpecific;
type CapabilityData<'a, S: PciConfigurationSpace + ?Sized + 'a> = T::Output<'a, S>;
@ -213,6 +218,34 @@ impl<'s, S: PciConfigurationSpace + ?Sized + 's> VirtioCapabilityData<'s, S>
}
}
impl VirtioCapability for VirtioInterruptStatusCapability {
const CFG_TYPE: u8 = 0x03;
const MIN_LEN: usize = 1;
type Output<'a, S: PciConfigurationSpace + ?Sized + 'a> = VirtioInterruptStatusData<'a, S>;
}
impl<'s, S: PciConfigurationSpace + ?Sized + 's> VirtioInterruptStatusData<'s, S> {
pub fn read_status(&self) -> (bool, bool) {
todo!()
}
}
impl<'s, S: PciConfigurationSpace + ?Sized + 's> VirtioCapabilityData<'s, S>
for VirtioInterruptStatusData<'s, S>
{
fn from_space_offset(space: &'s S, offset: usize) -> Self {
Self { space, offset }
}
fn space(&self) -> &'s S {
self.space
}
fn offset(&self) -> usize {
self.offset
}
}
impl<'s, S: PciConfigurationSpace + ?Sized + 's> MsiXData<'s, S> {
// TODO use pending bits as well
/// Maps and returns the vector table associated with the device's MSI-X capability
@ -227,18 +260,13 @@ impl<'s, S: PciConfigurationSpace + ?Sized + 's> MsiXData<'s, S> {
let Some(base) = self.space.bar(bir) else {
return Err(Error::DoesNotExist);
};
let PciBaseAddress::Memory(base) = base else {
let Some(base) = base.as_memory() else {
return Err(Error::InvalidOperation);
};
log::debug!("MSI-X table address: {:#x}", base + table_offset);
log::debug!("MSI-X table address: {:#x}", base.add(table_offset));
unsafe {
MsiXVectorTable::from_raw_parts(
PhysicalAddress::from_raw(base + table_offset),
table_size,
)
}
unsafe { MsiXVectorTable::from_raw_parts(base.add(table_offset), table_size) }
}
/// Changes the global enable status for the device's MSI-X capability. If set, regular IRQs
@ -266,7 +294,7 @@ impl<'s, S: PciConfigurationSpace + ?Sized + 's> MsiXData<'s, S> {
impl MsiXVectorTable<'_> {
unsafe fn from_raw_parts(base: PhysicalAddress, len: usize) -> Result<Self, Error> {
let vectors = DeviceMemoryIoMut::map_slice(base, len)?;
let vectors = DeviceMemoryIoMut::map_slice(base, len, Default::default())?;
Ok(Self { vectors })
}
@ -282,7 +310,7 @@ impl MsiXVectorTable<'_> {
end: usize,
ic: &C,
affinity: InterruptAffinity,
handler: &'static dyn MsiHandler,
handler: &'static dyn InterruptHandler,
) -> Result<Vec<MsiInfo>, Error> {
assert!(end > start);
let mut range = vec![
@ -321,7 +349,7 @@ impl<'s, S: PciConfigurationSpace + ?Sized + 's> MsiData<'s, S> {
&mut self,
ic: &C,
affinity: InterruptAffinity,
handler: &'static dyn MsiHandler,
handler: &'static dyn InterruptHandler,
) -> Result<MsiInfo, Error> {
let info = ic.register_msi(affinity, handler)?;

View File

@ -0,0 +1,222 @@
use core::ops::Range;
use alloc::{sync::Arc, vec::Vec};
use device_api::{
interrupt::{InterruptAffinity, InterruptHandler, IrqOptions, MsiInfo},
Device,
};
use kernel_util::{
message_interrupt_controller, register_global_interrupt, sync::spin_rwlock::IrqSafeRwLock,
util::OneTimeInit,
};
use yggdrasil_abi::error::Error;
use crate::{
capability::{MsiCapability, MsiXCapability, MsiXVectorTable},
PciAddress, PciConfigSpace, PciConfigurationSpace, PciSegmentInfo,
};
/// Describes a PCI device
#[derive(Clone)]
pub struct PciDeviceInfo {
/// Address of the device
pub address: PciAddress,
/// Configuration space access method
pub config_space: PciConfigSpace,
/// Describes the PCI segment this device is a part of
pub segment: Arc<PciSegmentInfo>,
pub(crate) interrupt_config: Arc<OneTimeInit<IrqSafeRwLock<InterruptConfig>>>,
}
pub struct InterruptConfig {
#[allow(unused)]
preferred_mode: PreferredInterruptMode,
configured_mode: ConfiguredInterruptMode,
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
pub enum PciInterruptPin {
A,
B,
C,
D,
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum PreferredInterruptMode {
Msi,
Legacy,
}
enum ConfiguredInterruptMode {
MsiX(MsiXVectorTable<'static>),
Msi,
Legacy(PciInterruptPin),
None,
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
pub struct PciInterrupt {
pub address: PciAddress,
pub pin: PciInterruptPin,
}
#[derive(Clone, Copy, Debug)]
pub struct PciInterruptRoute {
pub number: u32,
pub options: IrqOptions,
}
pub enum PciMatch {
Generic(fn(&PciDeviceInfo) -> bool),
Vendor(u16, u16),
Class(u8, Option<u8>, Option<u8>),
}
pub struct PciDriver {
pub(crate) name: &'static str,
pub(crate) check: PciMatch,
pub(crate) probe: fn(&PciDeviceInfo) -> Result<&'static dyn Device, Error>,
}
/// Used to store PCI bus devices which were enumerated by the kernel
pub struct PciBusDevice {
pub(crate) info: PciDeviceInfo,
pub(crate) driver: Option<&'static dyn Device>,
}
impl PciDeviceInfo {
pub fn init_interrupts(&self, preferred_mode: PreferredInterruptMode) -> Result<(), Error> {
self.interrupt_config
.try_init_with(|| {
let configured_mode =
if self.segment.has_msi && preferred_mode == PreferredInterruptMode::Msi {
if let Some(mut msix) = self.config_space.capability::<MsiXCapability>() {
let mut vt = msix.vector_table().unwrap();
vt.mask_all();
msix.set_function_mask(false);
msix.set_enabled(true);
ConfiguredInterruptMode::MsiX(vt)
} else if self.config_space.capability::<MsiCapability>().is_some() {
ConfiguredInterruptMode::Msi
} else {
self.interrupt_mode_from_pin()
}
} else {
// Ignore preferred_mode, the only supported is Legacy
self.interrupt_mode_from_pin()
};
IrqSafeRwLock::new(InterruptConfig {
preferred_mode,
configured_mode,
})
})
.expect("Attempted to double-configure interrupts for a PCI device");
Ok(())
}
fn interrupt_mode_from_pin(&self) -> ConfiguredInterruptMode {
match self.config_space.interrupt_pin() {
Some(pin) => ConfiguredInterruptMode::Legacy(pin),
None => ConfiguredInterruptMode::None,
}
}
pub fn map_interrupt(
&self,
affinity: InterruptAffinity,
handler: &'static dyn InterruptHandler,
) -> Result<Option<MsiInfo>, Error> {
let mut irq = self.interrupt_config.get().write();
match &mut irq.configured_mode {
ConfiguredInterruptMode::MsiX(msix) => {
let info =
msix.register_range(0, 1, message_interrupt_controller(), affinity, handler)?;
Ok(Some(info[0]))
}
ConfiguredInterruptMode::Msi => {
let mut msi = self
.config_space
.capability::<MsiCapability>()
.ok_or(Error::InvalidOperation)?;
let info = msi.register(message_interrupt_controller(), affinity, handler)?;
Ok(Some(info))
}
ConfiguredInterruptMode::Legacy(pin) => {
self.try_map_legacy(*pin, handler)?;
Ok(None)
}
ConfiguredInterruptMode::None => Err(Error::InvalidOperation),
}
}
pub fn map_interrupt_multiple(
&self,
vector_range: Range<usize>,
affinity: InterruptAffinity,
handler: &'static dyn InterruptHandler,
) -> Result<Vec<MsiInfo>, Error> {
let mut irq = self.interrupt_config.get().write();
let start = vector_range.start;
let end = vector_range.end;
match &mut irq.configured_mode {
ConfiguredInterruptMode::MsiX(msix) => msix.register_range(
start,
end,
message_interrupt_controller(),
affinity,
handler,
),
_ => Err(Error::InvalidOperation),
}
}
fn try_map_legacy(
&self,
pin: PciInterruptPin,
handler: &'static dyn InterruptHandler,
) -> Result<(), Error> {
let src = PciInterrupt {
address: self.address,
pin,
};
let route = self
.segment
.irq_translation_map
.get(&src)
.ok_or(Error::InvalidOperation)?;
log::debug!(
"PCI {} pin {:?} -> system IRQ #{}",
src.address,
src.pin,
route.number
);
register_global_interrupt(route.number, route.options, handler)
}
}
impl TryFrom<u32> for PciInterruptPin {
type Error = ();
fn try_from(value: u32) -> Result<Self, Self::Error> {
match value {
1 => Ok(Self::A),
2 => Ok(Self::B),
3 => Ok(Self::C),
4 => Ok(Self::D),
_ => Err(()),
}
}
}

View File

@ -5,17 +5,21 @@ extern crate alloc;
use core::fmt;
#[cfg(target_arch = "x86_64")]
use acpi::mcfg::McfgEntry;
use alloc::vec::Vec;
use alloc::{collections::BTreeMap, sync::Arc, vec::Vec};
use bitflags::bitflags;
use device::{PciBusDevice, PciDeviceInfo, PciDriver, PciInterrupt, PciInterruptRoute, PciMatch};
use device_api::Device;
use kernel_util::{
mem::address::{FromRaw, PhysicalAddress},
sync::IrqSafeSpinlock,
util::OneTimeInit,
};
use yggdrasil_abi::error::Error;
pub mod capability;
pub mod device;
mod space;
pub use space::{
@ -45,7 +49,7 @@ bitflags! {
}
/// Represents the address of a single object on a bus (or the bus itself)
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct PciAddress {
/// PCIe segment group, ignored (?) with PCI
pub segment: u8,
@ -58,10 +62,12 @@ pub struct PciAddress {
}
/// Address provided by PCI configuration space Base Address Register
#[derive(Debug)]
#[derive(Debug, Clone, Copy)]
pub enum PciBaseAddress {
/// 32/64-bit memory address
Memory(usize),
/// 32-bit memory address
Memory32(u32),
/// 64-bit memory address
Memory64(u64),
/// I/O space address
Io(u16),
}
@ -101,62 +107,141 @@ pub trait PciCapability {
) -> Self::CapabilityData<'s, S>;
}
/// Describes a PCI device
struct BusAddressAllocator {
pci_base_64: u64,
pci_base_32: u32,
// pci_base_io: u16,
host_base_64: PhysicalAddress,
host_base_32: PhysicalAddress,
// host_base_io: PhysicalAddress,
size_64: usize,
size_32: usize,
// size_io: usize,
offset_64: u64,
offset_32: u32,
}
#[cfg_attr(target_arch = "x86_64", allow(dead_code))]
impl BusAddressAllocator {
pub fn from_ranges(ranges: &[PciAddressRange]) -> Self {
let mut range_32 = None;
let mut range_64 = None;
// let mut range_io = None;
for range in ranges {
let range_val = (range.pci_base, range.host_base, range.size);
match range.ty {
// PciRangeType::Io if range_io.is_none() => {
// range_io.replace(range_val);
// }
PciRangeType::Memory32 if range_32.is_none() => {
range_32.replace(range_val);
}
PciRangeType::Memory64 if range_64.is_none() => {
range_64.replace(range_val);
}
_ => (),
}
}
let (pci_base_32, host_base_32, size_32) = range_32.unwrap();
let (pci_base_64, host_base_64, size_64) = range_64.unwrap();
// let (pci_base_io, host_base_io, size_io) = range_io.unwrap();
Self {
pci_base_64,
pci_base_32: pci_base_32.try_into().unwrap(),
// pci_base_io: pci_base_io.try_into().unwrap(),
host_base_64,
host_base_32,
// host_base_io,
size_64,
size_32,
// size_io,
offset_64: 0,
offset_32: 0,
}
}
pub fn allocate(&mut self, ty: PciRangeType, size: usize) -> (PciBaseAddress, PhysicalAddress) {
match ty {
PciRangeType::Io => todo!(),
PciRangeType::Memory32 => {
if self.offset_32 as usize + size >= self.size_32 {
todo!();
}
let bar = PciBaseAddress::Memory32(self.pci_base_32 + self.offset_32);
let host = self.host_base_32.add(self.offset_32 as usize);
self.offset_32 += size as u32;
(bar, host)
}
PciRangeType::Memory64 => {
if self.offset_64 as usize + size >= self.size_64 {
todo!();
}
let bar = PciBaseAddress::Memory64(self.pci_base_64 + self.offset_64);
let host = self.host_base_64.add(self.offset_64 as usize);
self.offset_64 += size as u64;
(bar, host)
}
PciRangeType::Configuration => unimplemented!(),
}
}
}
#[derive(Debug)]
pub struct PciDeviceInfo {
/// Address of the device
pub address: PciAddress,
/// Configuration space access method
pub config_space: PciConfigSpace,
}
pub struct PciSegmentInfo {
pub segment_number: u8,
pub bus_number_start: u8,
pub bus_number_end: u8,
pub ecam_phys_base: Option<PhysicalAddress>,
pub enum PciMatch {
Generic(fn(&PciDeviceInfo) -> bool),
Vendor(u16, u16),
Class(u8, Option<u8>, Option<u8>),
}
pub struct PciDriver {
name: &'static str,
check: PciMatch,
probe: fn(&PciDeviceInfo) -> Result<&'static dyn Device, Error>,
}
/// Used to store PCI bus devices which were enumerated by the kernel
pub struct PciBusDevice {
info: PciDeviceInfo,
driver: Option<&'static dyn Device>,
pub irq_translation_map: BTreeMap<PciInterrupt, PciInterruptRoute>,
pub has_msi: bool,
}
/// Represents a single PCIe bus segment
pub struct PciBusSegment {
segment_number: u8,
bus_number_start: u8,
bus_number_end: u8,
ecam_phys_base: Option<PhysicalAddress>,
allocator: Option<BusAddressAllocator>,
info: Arc<PciSegmentInfo>,
devices: Vec<PciBusDevice>,
}
pub enum PciRangeType {
Configuration,
Io,
Memory32,
Memory64,
}
pub struct PciAddressRange {
pub ty: PciRangeType,
pub bus_number: u8,
pub pci_base: u64,
pub host_base: PhysicalAddress,
pub size: usize,
}
/// Manager struct to store and control all PCI devices in the system
pub struct PciBusManager {
segments: Vec<PciBusSegment>,
}
impl PciBaseAddress {
pub fn as_memory(self) -> usize {
pub fn as_memory(self) -> Option<PhysicalAddress> {
match self {
Self::Memory(address) => address,
_ => panic!("Not a memory BAR"),
Self::Memory32(address) => Some(PhysicalAddress::from_raw(address as u64)),
Self::Memory64(address) => Some(PhysicalAddress::from_raw(address)),
_ => None,
}
}
}
impl PciBusSegment {
fn probe_config_space(&self, address: PciAddress) -> Result<Option<PciConfigSpace>, Error> {
match self.ecam_phys_base {
match self.info.ecam_phys_base {
Some(ecam_phys_base) => Ok(unsafe {
PciEcam::probe_raw_parts(ecam_phys_base, self.bus_number_start, address)?
PciEcam::probe_raw_parts(ecam_phys_base, self.info.bus_number_start, address)?
}
.map(PciConfigSpace::Ecam)),
None => todo!(),
@ -183,9 +268,86 @@ impl PciBusSegment {
// // TODO
// }
if let Some(allocator) = self.allocator.as_mut() {
log::debug!("Remapping BARs for {}", address);
// Find valid BARs
let mut i = 0;
let mut bar_mask = 0;
while i < 6 {
let w0 = config.read_u32(0x10 + i * 4);
let bar_width = match w0 & 1 == 0 {
// Memory BAR
true => match (w0 >> 1) & 3 {
// 32-bit BAR
0 => 1,
// Reserved
1 => unimplemented!(),
// 64-bit BAR
2 => 2,
// Unknown
_ => unreachable!(),
},
false => 1,
};
bar_mask |= 1 << i;
i += bar_width;
}
for i in 0..6 {
if (1 << i) & bar_mask != 0 {
let orig_value = config.bar(i).unwrap();
let size = unsafe { config.bar_size(i) };
if size != 0 {
log::debug!("BAR{}: size={:#x}", i, size);
match orig_value {
PciBaseAddress::Io(_) => (),
PciBaseAddress::Memory64(_) => {
let (bar, host) = allocator.allocate(PciRangeType::Memory64, size);
let bar_address = bar.as_memory().unwrap();
unsafe {
config.set_bar(i, bar);
}
log::debug!(
"Mapped BAR{} -> pci {:#x} host {:#x}",
i,
bar_address,
host
);
// TODO Don't yet differentiate between Host/PCI addresses, lol
assert_eq!(bar_address, host);
}
PciBaseAddress::Memory32(_) => {
let (bar, host) = allocator.allocate(PciRangeType::Memory32, size);
let bar_address = bar.as_memory().unwrap();
unsafe {
config.set_bar(i, bar);
}
log::debug!(
"Mapped BAR{} -> pci {:#x} host {:#x}",
i,
bar_address,
host
);
// TODO Don't yet differentiate between Host/PCI addresses, lol
assert_eq!(bar_address, host);
}
}
}
}
}
}
let info = PciDeviceInfo {
address,
segment: self.info.clone(),
config_space: config,
interrupt_config: Arc::new(OneTimeInit::new()),
};
self.devices.push(PciBusDevice { info, driver: None });
@ -193,7 +355,7 @@ impl PciBusSegment {
}
fn enumerate_bus(&mut self, bus: u8) -> Result<(), Error> {
let address = PciAddress::for_bus(self.segment_number, bus);
let address = PciAddress::for_bus(self.info.segment_number, bus);
for i in 0..32 {
let device_address = address.with_device(i);
@ -206,7 +368,7 @@ impl PciBusSegment {
/// Enumerates the bus segment, placing found devices into the manager
pub fn enumerate(&mut self) -> Result<(), Error> {
for bus in self.bus_number_start..self.bus_number_end {
for bus in self.info.bus_number_start..self.info.bus_number_end {
self.enumerate_bus(bus)?;
}
Ok(())
@ -250,12 +412,50 @@ impl PciBusManager {
}
/// Enumerates a bus segment provided by ACPI MCFG table entry
#[cfg(target_arch = "x86_64")]
pub fn add_segment_from_mcfg(entry: &McfgEntry) -> Result<(), Error> {
let mut bus_segment = PciBusSegment {
segment_number: entry.pci_segment_group as u8,
bus_number_start: entry.bus_number_start,
bus_number_end: entry.bus_number_end,
ecam_phys_base: Some(PhysicalAddress::from_raw(entry.base_address)),
info: Arc::new(PciSegmentInfo {
segment_number: entry.pci_segment_group as u8,
bus_number_start: entry.bus_number_start,
bus_number_end: entry.bus_number_end,
ecam_phys_base: Some(PhysicalAddress::from_raw(entry.base_address)),
// TODO obtain this from ACPI SSDT
irq_translation_map: BTreeMap::new(),
has_msi: true,
}),
// Firmware done this for us
allocator: None,
devices: Vec::new(),
};
let mut this = PCI_MANAGER.lock();
bus_segment.enumerate()?;
this.segments.push(bus_segment);
Ok(())
}
#[cfg(target_arch = "aarch64")]
pub fn add_segment_from_device_tree(
cfg_base: PhysicalAddress,
bus_range: core::ops::Range<u8>,
ranges: Vec<PciAddressRange>,
interrupt_map: BTreeMap<PciInterrupt, PciInterruptRoute>,
) -> Result<(), Error> {
let mut bus_segment = PciBusSegment {
info: Arc::new(PciSegmentInfo {
segment_number: 0,
bus_number_start: bus_range.start,
bus_number_end: bus_range.end,
ecam_phys_base: Some(cfg_base),
irq_translation_map: interrupt_map,
has_msi: false,
}),
allocator: Some(BusAddressAllocator::from_ranges(&ranges)),
devices: Vec::new(),
};
@ -285,6 +485,16 @@ impl PciAddress {
}
}
/// Constructs a [PciAddress] representing a specific function
pub const fn for_function(segment: u8, bus: u8, device: u8, function: u8) -> Self {
Self {
segment,
bus,
device,
function,
}
}
/// Constructs a [PciAddress] representing a device on a given bus
pub const fn with_device(self, device: u8) -> Self {
Self {

View File

@ -5,7 +5,7 @@ use yggdrasil_abi::error::Error;
use super::{PciAddress, PciConfigurationSpace};
/// PCI Express Enhanced Configuration Access Mechanism
#[derive(Debug)]
#[derive(Debug, Clone)]
#[repr(transparent)]
pub struct PciEcam {
mapping: DeviceMemoryMapping,
@ -32,7 +32,7 @@ impl PciEcam {
/// regions. The address must be aligned to a 4KiB boundary and be valid for accesses within a
/// 4KiB-sized range.
pub unsafe fn map(phys_addr: PhysicalAddress) -> Result<Self, Error> {
let mapping = DeviceMemoryMapping::map(phys_addr, 0x1000)?;
let mapping = DeviceMemoryMapping::map(phys_addr, 0x1000, Default::default())?;
Ok(Self { mapping })
}

View File

@ -1,5 +1,7 @@
use alloc::sync::Arc;
use super::{PciAddress, PciBaseAddress, PciCapability, PciCapabilityId, PciEcam};
use crate::PciStatusRegister;
use crate::{device::PciInterruptPin, PciCommandRegister, PciStatusRegister};
pub(super) mod ecam;
@ -60,7 +62,7 @@ pub struct PciLegacyConfigurationSpace {
}
/// Describes a configuration space access method for a PCI device
#[derive(Debug)]
#[derive(Debug, Clone)]
pub enum PciConfigSpace {
/// Legacy configuration space.
///
@ -220,6 +222,63 @@ pub trait PciConfigurationSpace {
capability_pointer
);
fn interrupt_pin(&self) -> Option<PciInterruptPin> {
PciInterruptPin::try_from(self.read_u8(0x3D) as u32).ok()
}
unsafe fn bar_size(&self, index: usize) -> usize {
let cmd = self.command();
// Disable I/O and memory
self.set_command(
cmd & !(PciCommandRegister::ENABLE_IO | PciCommandRegister::ENABLE_MEMORY).bits(),
);
let orig_value = self.bar(index).unwrap();
// TODO preserve prefetch bit
let mask_value = match orig_value {
PciBaseAddress::Io(_) => PciBaseAddress::Io(0xFFFC),
PciBaseAddress::Memory32(_) => PciBaseAddress::Memory32(0xFFFFFFF0),
PciBaseAddress::Memory64(_) => PciBaseAddress::Memory64(0xFFFFFFFFFFFFFFF0),
};
self.set_bar(index, mask_value);
let new_value = self.bar(index).unwrap();
let size = match new_value {
PciBaseAddress::Io(address) if address != 0 => ((!address) + 1) as usize,
PciBaseAddress::Memory32(address) if address != 0 => ((!address) + 1) as usize,
PciBaseAddress::Memory64(address) if address != 0 => ((!address) + 1) as usize,
_ => 0,
};
self.set_bar(index, orig_value);
self.set_command(cmd);
size
}
/// Updates the value of the Base Address Register with given index.
///
/// # Note
///
/// The function is only valid for devices with `header_type() == 0`
///
/// The `index` corresponds to the actual configuration space BAR index.
unsafe fn set_bar(&self, index: usize, value: PciBaseAddress) {
assert!(index < 6);
match value {
PciBaseAddress::Io(value) => {
self.write_u32(0x10 + index * 4, ((value as u32) & !0x3) | 1)
}
PciBaseAddress::Memory32(address) => self.write_u32(0x10 + index * 4, address & !0xF),
PciBaseAddress::Memory64(address) => {
self.write_u32(0x10 + index * 4, ((address as u32) & !0xF) | (2 << 1));
self.write_u32(0x10 + (index + 1) * 4, (address >> 32) as u32);
}
}
}
/// Returns the value of the Base Address Register with given index.
///
/// # Note
@ -238,18 +297,18 @@ pub trait PciConfigurationSpace {
0 => match (w0 >> 1) & 3 {
0 => {
// 32-bit memory BAR
Some(PciBaseAddress::Memory((w0 as usize) & !0xF))
Some(PciBaseAddress::Memory32(w0 & !0xF))
}
2 => {
// 64-bit memory BAR
let w1 = self.read_u32(0x10 + (index + 1) * 4);
Some(PciBaseAddress::Memory(
((w1 as usize) << 32) | ((w0 as usize) & !0xF),
Some(PciBaseAddress::Memory64(
((w1 as u64) << 32) | ((w0 as u64) & !0xF),
))
}
_ => unimplemented!(),
},
1 => todo!(),
1 => Some(PciBaseAddress::Io((w0 as u16) & !0x3)),
_ => unreachable!(),
}
} else {
@ -265,7 +324,7 @@ pub trait PciConfigurationSpace {
0 => match (w0 >> 1) & 3 {
0 => {
// 32-bit memory BAR
Some(PciBaseAddress::Memory((w0 as usize) & !0xF))
Some(PciBaseAddress::Memory32(w0 & !0xF))
}
// TODO can 64-bit BARs not be on a 64-bit boundary?
2 => todo!(),
@ -311,3 +370,13 @@ pub trait PciConfigurationSpace {
})
}
}
impl<T: PciConfigurationSpace> PciConfigurationSpace for Arc<T> {
fn read_u32(&self, offset: usize) -> u32 {
T::read_u32(self.as_ref(), offset)
}
fn write_u32(&self, offset: usize, value: u32) {
T::write_u32(self.as_ref(), offset, value);
}
}

View File

@ -5,6 +5,7 @@ pub enum Error {
NoCommonConfigCapability,
NoNotifyConfigCapability,
NoDeviceConfigCapability,
NoInterruptStatusCapability,
QueueTooLarge,
InvalidQueueSize,
EmptyTransaction,

View File

@ -66,9 +66,13 @@ pub struct VirtQueue {
}
impl AvailableRing {
pub fn with_capacity(capacity: usize) -> Result<Self, Error> {
pub fn with_capacity(no_irq: bool, capacity: usize) -> Result<Self, Error> {
let mut data = PageBox::new_uninit_slice(capacity + 3)?;
if no_irq {
data[0].write(1);
}
data[1].write(0);
Ok(Self { data })
@ -112,6 +116,7 @@ impl VirtQueue {
index: u16,
capacity: usize,
msix_vector: Option<u16>,
no_avail_irq: bool,
) -> Result<Self, Error> {
// TODO check if queue is already set up
@ -126,7 +131,7 @@ impl VirtQueue {
}
let descriptor_table = PageBox::new_uninit_slice(capacity)?;
let available = AvailableRing::with_capacity(capacity)?;
let available = AvailableRing::with_capacity(no_avail_irq, capacity)?;
let used = UsedRing::with_capacity(capacity)?;
transport.set_queue(
@ -164,11 +169,12 @@ impl VirtQueue {
index: u16,
capacity: usize,
msix_vector: Option<u16>,
no_avail_irq: bool,
) -> Result<Self, Error> {
let max_capacity = transport.max_queue_size(index);
let capacity = capacity.min(max_capacity as usize);
Self::with_capacity(transport, index, capacity, msix_vector)
Self::with_capacity(transport, index, capacity, msix_vector, no_avail_irq)
}
pub unsafe fn add<'a, 'b>(

View File

@ -19,6 +19,7 @@ pub trait Transport {
fn notify_off_mul(&self) -> usize;
fn supports_msix(&self) -> bool;
fn device_cfg(&self) -> Option<&DeviceMemoryIo<[u8]>>;
fn read_interrupt_status(&self) -> (bool, bool);
fn read_device_features(&mut self) -> u64 {
let cfg = self.common_cfg();
@ -71,6 +72,8 @@ pub trait Transport {
cfg.queue_device.set(used_ring_phys.into_raw());
if self.supports_msix() {
cfg.queue_msix_vector.set(msix_vector.unwrap_or(0xFFFF));
} else {
cfg.queue_msix_vector.set(0xFFFF);
}
cfg.queue_enable.set(1);
}

View File

@ -1,12 +1,12 @@
use kernel_util::mem::{
address::{FromRaw, PhysicalAddress},
device::DeviceMemoryIo,
use kernel_util::mem::device::DeviceMemoryIo;
use tock_registers::{
interfaces::Readable,
registers::{ReadOnly, WriteOnly},
};
use tock_registers::registers::WriteOnly;
use ygg_driver_pci::{
capability::{
VirtioCapabilityData, VirtioCommonConfigCapability, VirtioDeviceConfigCapability,
VirtioNotifyConfigCapability,
VirtioInterruptStatusCapability, VirtioNotifyConfigCapability,
},
PciCommandRegister, PciConfigurationSpace,
};
@ -19,6 +19,7 @@ pub struct PciTransport {
common_cfg: DeviceMemoryIo<'static, CommonConfiguration>,
device_cfg: DeviceMemoryIo<'static, [u8]>,
notify_cfg: DeviceMemoryIo<'static, [WriteOnly<u16>]>,
isr: DeviceMemoryIo<'static, ReadOnly<u32>>,
notify_cfg_mul: usize,
}
@ -42,6 +43,11 @@ impl Transport for PciTransport {
fn device_cfg(&self) -> Option<&DeviceMemoryIo<[u8]>> {
Some(&self.device_cfg)
}
fn read_interrupt_status(&self) -> (bool, bool) {
let value = self.isr.get();
(value & 1 != 0, value & 2 != 0)
}
}
impl PciTransport {
@ -73,6 +79,9 @@ impl PciTransport {
let notify_cfg_cap = space
.capability::<VirtioNotifyConfigCapability>()
.ok_or(Error::NoNotifyConfigCapability)?;
let isr_cap = space
.capability::<VirtioInterruptStatusCapability>()
.ok_or(Error::NoInterruptStatusCapability)?;
// TODO MSI/MSI-X
@ -82,38 +91,52 @@ impl PciTransport {
.bar(common_cfg_cap.bar_index().unwrap())
.unwrap()
.as_memory()
+ common_cfg_cap.bar_offset();
.unwrap()
.add(common_cfg_cap.bar_offset());
let device_cfg_base = space
.bar(device_cfg_cap.bar_index().unwrap())
.unwrap()
.as_memory()
+ device_cfg_cap.bar_offset();
.unwrap()
.add(device_cfg_cap.bar_offset());
let device_cfg_len = device_cfg_cap.length();
let notify_cfg_base = space
.bar(notify_cfg_cap.bar_index().unwrap())
.unwrap()
.as_memory()
+ notify_cfg_cap.bar_offset();
.unwrap()
.add(notify_cfg_cap.bar_offset());
let notify_cfg_len = notify_cfg_cap.length();
let notify_cfg_mul = notify_cfg_cap.offset_multiplier();
let isr_base = space
.bar(isr_cap.bar_index().unwrap())
.unwrap()
.as_memory()
.unwrap()
.add(isr_cap.bar_offset());
let isr_len = isr_cap.length();
let common_cfg_base = PhysicalAddress::from_raw(common_cfg_base);
let device_cfg_base = PhysicalAddress::from_raw(device_cfg_base);
let notify_cfg_base = PhysicalAddress::from_raw(notify_cfg_base);
assert!(isr_len >= 4);
assert_eq!(notify_cfg_len % 2, 0);
let common_cfg = unsafe { DeviceMemoryIo::map(common_cfg_base) }.unwrap();
let device_cfg =
unsafe { DeviceMemoryIo::map_slice(device_cfg_base, device_cfg_len) }.unwrap();
let notify_cfg =
unsafe { DeviceMemoryIo::map_slice(notify_cfg_base, notify_cfg_len / 2) }.unwrap();
let common_cfg =
unsafe { DeviceMemoryIo::map(common_cfg_base, Default::default()) }.unwrap();
let device_cfg = unsafe {
DeviceMemoryIo::map_slice(device_cfg_base, device_cfg_len, Default::default())
}
.unwrap();
let notify_cfg = unsafe {
DeviceMemoryIo::map_slice(notify_cfg_base, notify_cfg_len / 2, Default::default())
}
.unwrap();
let isr = unsafe { DeviceMemoryIo::map(isr_base, Default::default()) }.unwrap();
Ok(Self {
common_cfg,
device_cfg,
notify_cfg,
notify_cfg_mul,
isr,
})
}
}

View File

@ -9,12 +9,11 @@ use core::mem::size_of;
use alloc::{boxed::Box, collections::BTreeMap};
use bytemuck::{Pod, Zeroable};
use device_api::{
interrupt::{InterruptAffinity, MsiHandler},
interrupt::{InterruptAffinity, InterruptHandler},
Device,
};
use kernel_util::{
mem::PageBox,
message_interrupt_controller,
sync::{spin_rwlock::IrqSafeRwLock, IrqSafeSpinlock, IrqSafeSpinlockGuard},
util::OneTimeInit,
};
@ -22,10 +21,7 @@ use ygg_driver_net_core::{
interface::{NetworkDevice, NetworkInterfaceType},
Packet,
};
use ygg_driver_pci::{
capability::{MsiXCapability, MsiXVectorTable},
PciConfigurationSpace, PciDeviceInfo,
};
use ygg_driver_pci::device::{PciDeviceInfo, PreferredInterruptMode};
use ygg_driver_virtio_core::{
queue::VirtQueue,
transport::{pci::PciTransport, Transport},
@ -36,10 +32,9 @@ use yggdrasil_abi::{error::Error, net::MacAddress};
struct Queues {
receive: IrqSafeSpinlock<VirtQueue>,
transmit: IrqSafeSpinlock<VirtQueue>,
#[allow(unused)]
configuration_vector: usize,
receive_vector: usize,
// #[allow(unused)]
// configuration_vector: usize,
receive_vector: Option<u16>,
}
pub struct VirtioNet<T: Transport> {
@ -51,7 +46,7 @@ pub struct VirtioNet<T: Transport> {
pending_packets: IrqSafeRwLock<BTreeMap<u16, PageBox<[u8]>>>,
vector_table: IrqSafeRwLock<MsiXVectorTable<'static>>,
pci_device_info: Option<PciDeviceInfo>,
}
#[derive(Clone, Copy, Debug, Pod, Zeroable)]
@ -77,7 +72,7 @@ impl Queues {
impl<T: Transport> VirtioNet<T> {
const PACKET_SIZE: usize = 4096;
pub fn new(transport: T, vector_table: MsiXVectorTable<'static>) -> Self {
pub fn new(transport: T, pci_device_info: Option<PciDeviceInfo>) -> Self {
// Read MAC from device config
let device_cfg = transport
.device_cfg()
@ -94,7 +89,8 @@ impl<T: Transport> VirtioNet<T> {
mac: IrqSafeRwLock::new(mac),
pending_packets: IrqSafeRwLock::new(BTreeMap::new()),
vector_table: IrqSafeRwLock::new(vector_table),
pci_device_info,
}
}
@ -108,6 +104,9 @@ impl<T: Transport> VirtioNet<T> {
let token = unsafe { queue.add(&[&mut packet], &[]).unwrap() };
packets.insert(token, packet);
}
let mut transport = self.transport.lock();
transport.notify(0);
}
fn handle_receive_interrupt(&self, queue: usize) -> bool {
@ -130,7 +129,7 @@ impl<T: Transport> VirtioNet<T> {
}
if count != 0 {
self.transport.lock().notify(1);
self.transport.lock().notify(0);
}
count != 0
@ -177,35 +176,34 @@ impl<T: Transport> VirtioNet<T> {
receive_count: usize,
transmit_count: usize,
) -> Result<(), Error> {
let receive_vector = if let Some(pci) = self.pci_device_info.as_ref() {
pci.init_interrupts(PreferredInterruptMode::Msi)?;
let info = pci.map_interrupt(InterruptAffinity::Any, self)?;
if let Some(info) = info {
Some(info.vector as u16)
} else {
None
}
} else {
None
};
// TODO multiqueue capability
assert_eq!(receive_count, 1);
assert_eq!(transmit_count, 1);
let mut transport = self.transport.lock();
let mut vt = self.vector_table.write();
let msix_range = vt.register_range(
0,
1 + receive_count,
message_interrupt_controller(),
InterruptAffinity::Any,
self,
)?;
// TODO set the configuration vector in virtio common cfg
let receive_vector: u16 = msix_range[1].vector.try_into().unwrap();
// Setup the virtqs
let rx = VirtQueue::with_max_capacity(&mut *transport, 0, 128, Some(receive_vector))
let rx = VirtQueue::with_max_capacity(&mut *transport, 0, 128, receive_vector, false)
.map_err(cvt_error)?;
let tx = VirtQueue::with_max_capacity(&mut *transport, 1, 128, None).map_err(cvt_error)?;
let tx =
VirtQueue::with_max_capacity(&mut *transport, 1, 128, None, true).map_err(cvt_error)?;
self.queues.init(Queues {
receive: IrqSafeSpinlock::new(rx),
transmit: IrqSafeSpinlock::new(tx),
configuration_vector: msix_range[0].vector,
receive_vector: msix_range[1].vector,
receive_vector,
});
Ok(())
@ -234,20 +232,40 @@ impl<T: Transport + 'static> NetworkDevice for VirtioNet<T> {
}
}
impl<T: Transport + 'static> MsiHandler for VirtioNet<T> {
fn handle_msi(&self, vector: usize) -> bool {
let Some(queues) = self.queues.try_get() else {
return false;
};
impl<T: Transport + 'static> InterruptHandler for VirtioNet<T> {
fn handle_irq(&self, vector: Option<usize>) -> bool {
if let Some(_) = vector {
// MSI/MSI-X
let Some(queues) = self.queues.try_get() else {
return false;
};
if vector == queues.receive_vector {
self.handle_receive_interrupt(0)
if vector == queues.receive_vector.map(Into::into) {
self.handle_receive_interrupt(0)
} else {
false
}
} else {
false
// Legacy IRQ
let (queue_irq, config_irq) = self.transport.lock().read_interrupt_status();
if queue_irq {
log::debug!("Handle IRQ");
self.handle_receive_interrupt(0);
}
queue_irq || config_irq
}
}
}
// impl<T: Transport + 'static> MsiHandler for VirtioNet<T> {
// fn handle_msi(&self, vector: usize) -> bool {
//
// todo!()
// }
// }
impl<T: Transport + 'static> Device for VirtioNet<T> {
fn display_name(&self) -> &'static str {
"VirtIO Network Device"
@ -284,17 +302,8 @@ fn cvt_error(error: ygg_driver_virtio_core::error::Error) -> Error {
pub fn probe(info: &PciDeviceInfo) -> Result<&'static dyn Device, Error> {
let space = &info.config_space;
let mut msix = space.capability::<MsiXCapability>().unwrap();
let mut vt = msix.vector_table()?;
// TODO is this really needed? PCI spec says this is masked on reset, though I'm not sure if
// firmware puts it back in masked state after loading the kernel
vt.mask_all();
msix.set_function_mask(false);
msix.set_enabled(true);
let transport = PciTransport::from_config_space(space).unwrap();
let device = VirtioNet::new(transport, vt);
let device = VirtioNet::new(transport, Some(info.clone()));
let device = Box::leak(Box::new(device));

View File

@ -56,7 +56,7 @@ pub trait MessageInterruptController {
fn register_msi(
&self,
affinity: InterruptAffinity,
handler: &'static dyn MsiHandler,
handler: &'static dyn InterruptHandler,
) -> Result<MsiInfo, Error> {
let mut range = [MsiInfo {
affinity,
@ -70,7 +70,7 @@ pub trait MessageInterruptController {
fn register_msi_range(
&self,
range: &mut [MsiInfo],
handler: &'static dyn MsiHandler,
handler: &'static dyn InterruptHandler,
) -> Result<(), Error> {
Err(Error::NotImplemented)
}
@ -118,11 +118,7 @@ pub trait LocalInterruptController {
}
pub trait InterruptHandler: Device {
fn handle_irq(&self) -> bool;
}
pub trait MsiHandler: Device {
fn handle_msi(&self, vector: usize) -> bool;
fn handle_irq(&self, vector: Option<usize>) -> bool;
}
pub struct FixedInterruptTable<const SIZE: usize> {

View File

@ -1,14 +1,17 @@
use core::time::Duration;
use alloc::{string::String, sync::Arc};
use device_api::interrupt::MessageInterruptController;
use device_api::interrupt::{InterruptHandler, IrqOptions, MessageInterruptController};
use yggdrasil_abi::{
error::Error,
process::{ExitCode, Signal},
};
use crate::{
mem::{address::PhysicalAddress, device::RawDeviceMemoryMapping},
mem::{
address::PhysicalAddress,
device::{DeviceMemoryAttributes, RawDeviceMemoryMapping},
},
thread::{CurrentThread, Thread},
};
@ -30,6 +33,7 @@ extern "Rust" {
pub fn __map_device_pages(
base: PhysicalAddress,
count: usize,
attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping, Error>;
pub fn __unmap_device_pages(mapping: &RawDeviceMemoryMapping);
@ -49,6 +53,11 @@ extern "Rust" {
pub fn __monotonic_timestamp() -> Result<Duration, Error>;
pub fn __message_interrupt_controller() -> &'static dyn MessageInterruptController;
pub fn __register_global_interrupt(
irq: u32,
options: IrqOptions,
handler: &'static dyn InterruptHandler,
) -> Result<(), Error>;
pub fn __signal_process_group(group_id: u32, signal: Signal);
}

View File

@ -17,7 +17,7 @@
use core::time::Duration;
use device_api::interrupt::MessageInterruptController;
use device_api::interrupt::{InterruptHandler, IrqOptions, MessageInterruptController};
use yggdrasil_abi::{error::Error, process::Signal};
extern crate alloc;
@ -35,6 +35,15 @@ pub fn message_interrupt_controller() -> &'static dyn MessageInterruptController
unsafe { api::__message_interrupt_controller() }
}
#[inline]
pub fn register_global_interrupt(
irq: u32,
options: IrqOptions,
handler: &'static dyn InterruptHandler,
) -> Result<(), Error> {
unsafe { api::__register_global_interrupt(irq, options, handler) }
}
#[inline]
pub fn cpu_index() -> usize {
unsafe { api::__cpu_index() }

View File

@ -25,6 +25,18 @@ pub struct RawDeviceMemoryMapping {
pub page_count: usize,
}
#[derive(Debug, Default, Clone, Copy)]
pub enum DeviceMemoryCaching {
#[default]
None,
Cacheable,
}
#[derive(Default, Debug, Clone, Copy)]
pub struct DeviceMemoryAttributes {
pub caching: DeviceMemoryCaching,
}
/// Describes a single untyped device memory mapping
#[derive(Clone, Debug)]
pub struct DeviceMemoryMapping {
@ -56,8 +68,12 @@ impl RawDeviceMemoryMapping {
///
/// The caller must ensure proper access synchronization, as well as the address' origin.
#[inline]
pub unsafe fn map(base: PhysicalAddress, size: usize) -> Result<Self, Error> {
__map_device_pages(base, size)
pub unsafe fn map(
base: PhysicalAddress,
size: usize,
attrs: DeviceMemoryAttributes,
) -> Result<Self, Error> {
__map_device_pages(base, size, attrs)
}
/// Consumes the device mapping, leaking its address without deallocating the translation
@ -118,8 +134,12 @@ impl DeviceMemoryMapping {
/// # Safety
///
/// The caller must ensure proper access synchronization, as well as the address' origin.
pub unsafe fn map(base: PhysicalAddress, size: usize) -> Result<Self, Error> {
let inner = RawDeviceMemoryMapping::map(base, size)?;
pub unsafe fn map(
base: PhysicalAddress,
size: usize,
attrs: DeviceMemoryAttributes,
) -> Result<Self, Error> {
let inner = RawDeviceMemoryMapping::map(base, size, attrs)?;
let address = inner.address;
Ok(Self {
inner: Arc::new(inner),
@ -160,9 +180,10 @@ impl<'a, T: Sized> DeviceMemoryIo<'a, T> {
pub unsafe fn map_slice(
base: PhysicalAddress,
count: usize,
attrs: DeviceMemoryAttributes,
) -> Result<DeviceMemoryIo<'a, [T]>, Error> {
let layout = Layout::array::<T>(count).unwrap();
let inner = RawDeviceMemoryMapping::map(base, layout.size())?;
let inner = RawDeviceMemoryMapping::map(base, layout.size(), attrs)?;
let value = core::slice::from_raw_parts(inner.address as *mut T, count);
Ok(DeviceMemoryIo {
@ -177,8 +198,11 @@ impl<'a, T: Sized> DeviceMemoryIo<'a, T> {
///
/// The caller must ensure the address actually points to a value of type `T`, as well as
/// proper access synchronization.
pub unsafe fn map(base: PhysicalAddress) -> Result<DeviceMemoryIo<'a, T>, Error> {
let inner = RawDeviceMemoryMapping::map(base, size_of::<T>())?;
pub unsafe fn map(
base: PhysicalAddress,
attrs: DeviceMemoryAttributes,
) -> Result<DeviceMemoryIo<'a, T>, Error> {
let inner = RawDeviceMemoryMapping::map(base, size_of::<T>(), attrs)?;
let value = &*(inner.address as *const T);
Ok(DeviceMemoryIo {
@ -235,9 +259,10 @@ impl<'a, T: Sized> DeviceMemoryIoMut<'a, T> {
pub unsafe fn map_slice(
base: PhysicalAddress,
len: usize,
attrs: DeviceMemoryAttributes,
) -> Result<DeviceMemoryIoMut<'a, [T]>, Error> {
let layout = Layout::array::<T>(len).unwrap();
let inner = RawDeviceMemoryMapping::map(base, layout.size())?;
let inner = RawDeviceMemoryMapping::map(base, layout.size(), attrs)?;
let value = core::slice::from_raw_parts_mut(inner.address as *mut T, len);
Ok(DeviceMemoryIoMut { inner, value })

View File

@ -51,6 +51,35 @@ impl<T> OneTimeInit<T> {
self.state.load(Ordering::Acquire) == Self::STATE_INITIALIZED
}
pub fn try_init_with<F: FnOnce() -> T>(&self, f: F) -> Option<&T> {
if self
.state
.compare_exchange(
Self::STATE_UNINITIALIZED,
Self::STATE_INITIALIZING,
Ordering::Release,
Ordering::Relaxed,
)
.is_err()
{
// Already initialized
return None;
}
let value = unsafe { (*self.value.get()).write(f()) };
self.state
.compare_exchange(
Self::STATE_INITIALIZING,
Self::STATE_INITIALIZED,
Ordering::Release,
Ordering::Relaxed,
)
.unwrap();
Some(value)
}
/// Sets the underlying value of the [OneTimeInit]. If already initialized, panics.
#[track_caller]
pub fn init(&self, value: T) -> &T {

View File

@ -18,7 +18,7 @@ use crate::{
task::{sched::CpuQueue, thread::ThreadId},
};
use super::smp::CPU_COUNT;
use super::{gic::GicPerCpu, smp::CPU_COUNT};
// use super::smp::CPU_COUNT;
@ -29,6 +29,8 @@ pub struct Cpu {
queue: OneTimeInit<&'static CpuQueue>,
thread_id: Option<ThreadId>,
pub(super) gic_per_cpu: GicPerCpu,
}
/// Handle allowing safe access to the local CPU.
@ -83,6 +85,7 @@ impl Cpu {
id: Self::local_id(),
queue: OneTimeInit::new(),
thread_id: None,
gic_per_cpu: GicPerCpu::new(),
});
TPIDR_EL1.set(Box::into_raw(this) as _);
}

View File

@ -1,5 +1,5 @@
//! ARM GICv2 Distributor registers
use device_api::interrupt::IpiDeliveryTarget;
use device_api::interrupt::{IpiDeliveryTarget, IrqLevel, IrqOptions, IrqTrigger};
use kernel_util::mem::device::DeviceMemoryIo;
use spinning_top::Spinlock;
use tock_registers::{
@ -113,6 +113,34 @@ impl Gicd {
self.banked_regs.ITARGETSR[0].read(ITARGETSR::Offset0)
}
pub fn configure_irq(&self, irq: usize, options: IrqOptions) {
// TODO configure trigger level
// 2 bits per IRQ, 16 entries per register
let reg = irq / 16;
let shift = (irq % 16) * 2;
let cfgr_value = match (options.trigger, options.level) {
(IrqTrigger::Level, IrqLevel::ActiveLow) => 0,
(IrqTrigger::Level, _) => 0,
(_, IrqLevel::ActiveLow) => 1,
(_, _) => 1,
};
match reg {
// Private IRQs
0..=1 => {
todo!();
}
// Shared IRQs
_ => {
let regs = self.shared_regs.lock();
let reg = &regs.ICFGR[reg - 2];
let v = reg.get() & !(0x3 << shift);
reg.set(v | (cfgr_value << shift));
}
}
}
pub fn enable_irq(&self, irq: usize) {
let reg = irq >> 5;
let bit = 1u32 << (irq & 0x1F);

View File

@ -8,7 +8,8 @@ use alloc::{boxed::Box, sync::Arc};
use device_api::{
interrupt::{
ExternalInterruptController, FixedInterruptTable, InterruptHandler, InterruptTable,
IpiDeliveryTarget, IrqOptions, LocalInterruptController,
IpiDeliveryTarget, IrqOptions, LocalInterruptController, MessageInterruptController,
MsiInfo,
},
Device,
};
@ -46,16 +47,23 @@ pub struct Gic {
table: IrqSafeSpinlock<FixedInterruptTable<MAX_IRQ>>,
}
/// Per-CPU GIC information
pub struct GicPerCpu {}
impl Device for Gic {
fn display_name(&self) -> &'static str {
"ARM Generic Interrupt Controller v2"
}
unsafe fn init(&'static self) -> Result<(), Error> {
let gicd_mmio = Arc::new(RawDeviceMemoryMapping::map(self.gicd_base, 0x1000)?);
let gicd_mmio = Arc::new(RawDeviceMemoryMapping::map(
self.gicd_base,
0x1000,
Default::default(),
)?);
let gicd_mmio_shared = DeviceMemoryIo::from_raw(gicd_mmio.clone())?;
let gicd_mmio_banked = DeviceMemoryIo::from_raw(gicd_mmio)?;
let gicc_mmio = DeviceMemoryIo::map(self.gicc_base)?;
let gicc_mmio = DeviceMemoryIo::map(self.gicc_base, Default::default())?;
let gicd = Gicd::new(gicd_mmio_shared, gicd_mmio_banked);
let gicc = Gicc::new(gicc_mmio);
@ -68,6 +76,7 @@ impl Device for Gic {
ARCHITECTURE.register_external_interrupt_controller(self)?;
ARCHITECTURE.register_local_interrupt_controller(self)?;
ARCHITECTURE.register_message_interrupt_controller(self)?;
Ok(())
}
@ -79,17 +88,27 @@ impl ExternalInterruptController for Gic {
fn register_irq(
&self,
irq: IrqNumber,
_options: IrqOptions,
options: IrqOptions,
handler: &'static dyn InterruptHandler,
) -> Result<(), Error> {
let mut table = self.table.lock();
let gicd = self.gicd.get();
let index = match irq {
IrqNumber::Shared(i) => i + 32,
IrqNumber::Private(i) => i + 16,
} as usize;
debugln!("Bound irq{} to {:?}", index, handler.display_name());
debugln!(
"Bound irq{} to {:?} {:?} {:?}",
index,
handler.display_name(),
options.trigger,
options.level
);
if index >= 32 {
gicd.configure_irq(index, options);
}
table.insert(index, handler)?;
Ok(())
@ -125,7 +144,7 @@ impl ExternalInterruptController for Gic {
match table.handler(irq_number) {
Some(handler) => {
drop(table);
handler.handle_irq();
handler.handle_irq(None);
}
None => warnln!("No handler for irq{}", irq_number),
}
@ -133,6 +152,20 @@ impl ExternalInterruptController for Gic {
}
}
impl MessageInterruptController for Gic {
fn handle_msi(&self, _vector: usize) {
todo!()
}
fn register_msi_range(
&self,
_range: &mut [MsiInfo],
_handler: &'static dyn InterruptHandler,
) -> Result<(), Error> {
todo!()
}
}
impl LocalInterruptController for Gic {
type IpiMessage = CpuMessage;
@ -174,7 +207,7 @@ impl Gic {
/// # Safety
///
/// The caller must ensure the addresses actually point to the GIC components.
pub const unsafe fn new(gicd_base: PhysicalAddress, gicc_base: PhysicalAddress) -> Self {
pub unsafe fn new(gicd_base: PhysicalAddress, gicc_base: PhysicalAddress) -> Self {
Self {
gicc: OneTimeInit::new(),
gicd: OneTimeInit::new(),
@ -185,6 +218,13 @@ impl Gic {
}
}
impl GicPerCpu {
/// Constructs per-CPU GIC data structure
pub fn new() -> Self {
Self {}
}
}
device_tree_driver! {
compatible: ["arm,cortex-a15-gic", "arm,gic-400"],
probe(dt) => {

View File

@ -10,7 +10,7 @@ use cfg_if::cfg_if;
use kernel_util::{
mem::{
address::{FromRaw, PhysicalAddress},
device::RawDeviceMemoryMapping,
device::{DeviceMemoryAttributes, RawDeviceMemoryMapping},
table::{EntryLevel, EntryLevelExt},
},
util::OneTimeInit,
@ -208,7 +208,11 @@ pub(super) unsafe fn map_heap_l2(index: usize, page: PhysicalAddress) {
}
// Device mappings
unsafe fn map_device_memory_l3(base: PhysicalAddress, count: usize) -> Result<usize, Error> {
unsafe fn map_device_memory_l3(
base: PhysicalAddress,
count: usize,
_attrs: DeviceMemoryAttributes,
) -> Result<usize, Error> {
// TODO don't map pages if already mapped
'l0: for i in 0..DEVICE_MAPPING_L3_COUNT * 512 {
@ -235,7 +239,11 @@ unsafe fn map_device_memory_l3(base: PhysicalAddress, count: usize) -> Result<us
Err(Error::OutOfMemory)
}
unsafe fn map_device_memory_l2(base: PhysicalAddress, count: usize) -> Result<usize, Error> {
unsafe fn map_device_memory_l2(
base: PhysicalAddress,
count: usize,
_attrs: DeviceMemoryAttributes,
) -> Result<usize, Error> {
'l0: for i in DEVICE_MAPPING_L3_COUNT..512 {
for j in 0..count {
if DEVICE_MAPPING_L2[i + j].is_present() {
@ -262,6 +270,7 @@ unsafe fn map_device_memory_l2(base: PhysicalAddress, count: usize) -> Result<us
pub(super) unsafe fn map_device_memory(
base: PhysicalAddress,
size: usize,
attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping, Error> {
// debugln!("Map {}B @ {:#x}", size, base);
let l3_aligned = base.page_align_down::<L3>();
@ -274,7 +283,7 @@ pub(super) unsafe fn map_device_memory(
let l2_offset = base.page_offset::<L2>();
let page_count = (l2_offset + size).page_count::<L2>();
let base_address = map_device_memory_l2(l2_aligned, page_count)?;
let base_address = map_device_memory_l2(l2_aligned, page_count, attrs)?;
let address = base_address + l2_offset;
Ok(RawDeviceMemoryMapping {
@ -285,7 +294,7 @@ pub(super) unsafe fn map_device_memory(
})
} else {
// Just map the pages directly
let base_address = map_device_memory_l3(l3_aligned, page_count)?;
let base_address = map_device_memory_l3(l3_aligned, page_count, attrs)?;
let address = base_address + l3_offset;
Ok(RawDeviceMemoryMapping {
@ -312,8 +321,7 @@ pub(super) unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping) {
assert!(DEVICE_MAPPING_L3S[l2i][l3i].is_present());
DEVICE_MAPPING_L3S[l2i][l3i] = PageEntry::INVALID;
todo!();
// intrinsics::flush_tlb_entry(page);
tlb_flush_vaae1(page);
}
}
L2::SIZE => todo!(),
@ -321,6 +329,14 @@ pub(super) unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping) {
}
}
#[inline]
fn tlb_flush_vaae1(mut page: usize) {
page >>= 12;
unsafe {
core::arch::asm!("tlbi vaae1, {page}", page = in(reg) page);
}
}
/// (BSP-early init) loads precomputed kernel mapping tables for the kernel to jump to "higher-half"
///
/// # Safety

View File

@ -14,7 +14,10 @@ use crate::mem::{
table::{EntryLevelDrop, MapAttributes, NextPageTable},
};
use super::table::{PageEntry, PageTable, L1, L2, L3};
use super::{
table::{PageEntry, PageTable, L1, L2, L3},
tlb_flush_vaae1,
};
/// AArch64 implementation of a process address space table
#[repr(C)]
@ -141,11 +144,3 @@ impl Drop for ProcessAddressSpaceImpl {
}
}
}
#[inline]
fn tlb_flush_vaae1(mut page: usize) {
page >>= 12;
unsafe {
core::arch::asm!("tlbi vaae1, {page}", page = in(reg) page);
}
}

View File

@ -7,7 +7,10 @@ use core::sync::atomic::Ordering;
use aarch64_cpu::registers::{CNTP_CTL_EL0, CNTP_TVAL_EL0, DAIF};
use abi::error::Error;
use device_api::{
interrupt::{ExternalInterruptController, IpiDeliveryTarget, LocalInterruptController},
interrupt::{
ExternalInterruptController, IpiDeliveryTarget, LocalInterruptController,
MessageInterruptController,
},
timer::MonotonicTimestampProviderDevice,
ResetDevice,
};
@ -16,13 +19,14 @@ use git_version::git_version;
use kernel_util::{
mem::{
address::{FromRaw, IntoRaw, PhysicalAddress},
device::RawDeviceMemoryMapping,
device::{DeviceMemoryAttributes, RawDeviceMemoryMapping},
pointer::PhysicalRef,
table::{EntryLevel, EntryLevelExt},
},
util::OneTimeInit,
};
use tock_registers::interfaces::{ReadWriteable, Readable, Writeable};
use ygg_driver_pci::PciBusManager;
use crate::{
arch::aarch64::{
@ -72,6 +76,7 @@ pub struct AArch64 {
lintc: OneTimeInit<&'static dyn LocalInterruptController<IpiMessage = CpuMessage>>,
xintc: OneTimeInit<&'static dyn ExternalInterruptController<IrqNumber = IrqNumber>>,
msi_intc: OneTimeInit<&'static dyn MessageInterruptController>,
mtimer: OneTimeInit<&'static dyn MonotonicTimestampProviderDevice>,
@ -116,8 +121,9 @@ impl Architecture for AArch64 {
&self,
base: PhysicalAddress,
size: usize,
attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping, Error> {
mem::map_device_memory(base, size)
mem::map_device_memory(base, size, attrs)
}
unsafe fn unmap_device_memory(&self, map: &RawDeviceMemoryMapping) {
@ -178,6 +184,10 @@ impl Architecture for AArch64 {
*self.xintc.get()
}
fn message_interrupt_controller(&'static self) -> &'static dyn MessageInterruptController {
*self.msi_intc.get()
}
fn register_local_interrupt_controller(
&self,
intc: &'static dyn LocalInterruptController<IpiMessage = super::CpuMessage>,
@ -194,6 +204,14 @@ impl Architecture for AArch64 {
Ok(())
}
fn register_message_interrupt_controller(
&self,
intc: &'static dyn MessageInterruptController,
) -> Result<(), Error> {
self.msi_intc.init(intc);
Ok(())
}
fn monotonic_timer(&'static self) -> &'static dyn MonotonicTimestampProviderDevice {
*self.mtimer.get()
}
@ -334,6 +352,13 @@ impl AArch64 {
Cpu::init_local();
if is_bsp {
ygg_driver_pci::register_vendor_driver(
"Virtio PCI Network Device",
0x1AF4,
0x1000,
ygg_driver_virtio_net::probe,
);
let dt = self.dt.get();
let address_cells = dt.address_cells();
@ -364,6 +389,8 @@ impl AArch64 {
);
infoln!("Initializing aarch64 platform");
dt.dump(crate::debug::LogLevel::Debug);
let nodes = dt.root().children();
if let Err(error) =
devtree::enumerate_dt(address_cells, size_cells, nodes, |_, probe| {
@ -402,6 +429,8 @@ impl AArch64 {
device::manager_lock().devices().for_each(|dev| {
infoln!("* {:?}", dev.display_name());
});
PciBusManager::setup_bus_devices()?;
} else {
// BSP already initialized everything needed
// Setup timer and local interrupt controller
@ -431,6 +460,7 @@ pub static ARCHITECTURE: AArch64 = AArch64 {
lintc: OneTimeInit::new(),
xintc: OneTimeInit::new(),
msi_intc: OneTimeInit::new(),
mtimer: OneTimeInit::new(),
};

View File

@ -25,7 +25,7 @@ pub struct ArmTimer {
pub const TICK_INTERVAL: u64 = 1000000;
impl InterruptHandler for ArmTimer {
fn handle_irq(&self) -> bool {
fn handle_irq(&self, _vector: Option<usize>) -> bool {
CNTP_TVAL_EL0.set(TICK_INTERVAL);
let now = self.monotonic_timestamp().unwrap();

View File

@ -24,14 +24,18 @@ macro_rules! absolute_address {
use cfg_if::cfg_if;
use device_api::{
interrupt::{
ExternalInterruptController, IpiDeliveryTarget, LocalInterruptController,
MessageInterruptController,
ExternalInterruptController, InterruptHandler, IpiDeliveryTarget, IrqOptions,
LocalInterruptController, MessageInterruptController,
},
timer::MonotonicTimestampProviderDevice,
ResetDevice,
};
use kernel_util::{
mem::{address::PhysicalAddress, device::RawDeviceMemoryMapping, table::EntryLevel},
mem::{
address::PhysicalAddress,
device::{DeviceMemoryAttributes, RawDeviceMemoryMapping},
table::EntryLevel,
},
sync::IrqGuard,
};
@ -98,6 +102,7 @@ pub trait Architecture {
&self,
base: PhysicalAddress,
size: usize,
attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping, Error>;
/// Removes the provided mapping from the kernel's translation tables.
@ -339,8 +344,9 @@ fn __physicalize(addr: usize) -> u64 {
fn __map_device_pages(
base: PhysicalAddress,
count: usize,
attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping, Error> {
unsafe { ARCHITECTURE.map_device_memory(base, count) }
unsafe { ARCHITECTURE.map_device_memory(base, count, attrs) }
}
#[no_mangle]
fn __unmap_device_pages(mapping: &RawDeviceMemoryMapping) {
@ -356,3 +362,28 @@ fn __monotonic_timestamp() -> Result<Duration, Error> {
fn __message_interrupt_controller() -> &'static dyn MessageInterruptController {
ARCHITECTURE.message_interrupt_controller()
}
#[no_mangle]
fn __register_global_interrupt(
irq: u32,
options: IrqOptions,
handler: &'static dyn InterruptHandler,
) -> Result<(), Error> {
let intc = ARCHITECTURE.external_interrupt_controller();
let irq = {
#[cfg(target_arch = "aarch64")]
{
aarch64::IrqNumber::Shared(irq)
}
#[cfg(target_arch = "x86_64")]
{
x86_64::IrqNumber::Gsi(irq.try_into().unwrap())
}
};
intc.register_irq(irq, options, handler)?;
intc.enable_irq(irq)?;
Ok(())
}

View File

@ -52,7 +52,7 @@ impl Device for SciHandler {
}
impl InterruptHandler for SciHandler {
fn handle_irq(&self) -> bool {
fn handle_irq(&self, _vector: Option<usize>) -> bool {
log::trace!("ACPI SCI received");
ACPI_SYSTEM.get().lock().handle_sci();
true

View File

@ -222,7 +222,7 @@ impl ExternalInterruptController for IoApic {
let table = self.table.lock();
if let Some(handler) = table.handler(gsi) {
handler.handle_irq();
handler.handle_irq(None);
} else {
warnln!("No handler set for GSI #{}", gsi);
}
@ -280,7 +280,10 @@ impl IoApic {
// };
// let mapping = unsafe { DeviceMemoryMapping::map(base, size) };
let regs = unsafe {
DeviceMemoryIo::<'_, Regs>::map(PhysicalAddress::from_raw(ioapic.address as u64))?
DeviceMemoryIo::<'_, Regs>::map(
PhysicalAddress::from_raw(ioapic.address as u64),
Default::default(),
)?
};
let max_gsi = (regs.read(REG_IOAPIC_VERSION) >> 16) & 0xFF;

View File

@ -5,8 +5,8 @@ use abi::error::Error;
use alloc::{vec, vec::Vec};
use device_api::{
interrupt::{
InterruptAffinity, IpiDeliveryTarget, LocalInterruptController, MessageInterruptController,
MsiHandler, MsiInfo,
InterruptAffinity, InterruptHandler, IpiDeliveryTarget, LocalInterruptController,
MessageInterruptController, MsiInfo,
},
Device,
};
@ -147,7 +147,7 @@ register_structs! {
pub struct LocalApic {
regs: DeviceMemoryIo<'static, Regs>,
id: u32,
msi_vectors: Vec<IrqSafeSpinlock<Vec<&'static dyn MsiHandler>>>,
msi_vectors: Vec<IrqSafeSpinlock<Vec<&'static dyn InterruptHandler>>>,
}
unsafe impl Send for LocalApic {}
@ -172,7 +172,7 @@ impl MessageInterruptController for LocalApic {
};
drop(table);
if handler.handle_msi(vector) {
if handler.handle_irq(Some(vector)) {
break;
}
@ -183,7 +183,7 @@ impl MessageInterruptController for LocalApic {
fn register_msi_range(
&self,
range: &mut [MsiInfo],
handler: &'static dyn MsiHandler,
handler: &'static dyn InterruptHandler,
) -> Result<(), Error> {
let _guard = IrqGuard::acquire();
@ -265,7 +265,7 @@ impl LocalApic {
///
/// Only meant to be called once per processor during their init.
pub unsafe fn new() -> Self {
let regs = DeviceMemoryIo::<Regs>::map(Self::base()).unwrap();
let regs = DeviceMemoryIo::<Regs>::map(Self::base(), Default::default()).unwrap();
let id = regs.Id.read(Id::ApicId);

View File

@ -9,7 +9,7 @@ use abi::error::Error;
use kernel_util::{
mem::{
address::{FromRaw, PhysicalAddress},
device::RawDeviceMemoryMapping,
device::{DeviceMemoryAttributes, RawDeviceMemoryMapping},
table::EntryLevelExt,
},
util::OneTimeInit,
@ -114,7 +114,11 @@ unsafe fn unmap_early_page(address: usize) {
}
// Device mappings
unsafe fn map_device_memory_l3(base: PhysicalAddress, count: usize) -> Result<usize, Error> {
unsafe fn map_device_memory_l3(
base: PhysicalAddress,
count: usize,
_attrs: DeviceMemoryAttributes,
) -> Result<usize, Error> {
// TODO don't map pages if already mapped
'l0: for i in 0..DEVICE_MAPPING_L3_COUNT * 512 {
@ -142,7 +146,11 @@ unsafe fn map_device_memory_l3(base: PhysicalAddress, count: usize) -> Result<us
Err(Error::OutOfMemory)
}
unsafe fn map_device_memory_l2(base: PhysicalAddress, count: usize) -> Result<usize, Error> {
unsafe fn map_device_memory_l2(
base: PhysicalAddress,
count: usize,
_attrs: DeviceMemoryAttributes,
) -> Result<usize, Error> {
'l0: for i in DEVICE_MAPPING_L3_COUNT..512 {
for j in 0..count {
if DEVICE_MAPPING_L2[i + j].is_present() {
@ -170,6 +178,7 @@ unsafe fn map_device_memory_l2(base: PhysicalAddress, count: usize) -> Result<us
pub(super) unsafe fn map_device_memory(
base: PhysicalAddress,
size: usize,
attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping, Error> {
// debugln!("Map {}B @ {:#x}", size, base);
let l3_aligned = base.page_align_down::<L3>();
@ -182,7 +191,7 @@ pub(super) unsafe fn map_device_memory(
let l2_offset = base.page_offset::<L2>();
let page_count = (l2_offset + size).page_count::<L2>();
let base_address = map_device_memory_l2(l2_aligned, page_count)?;
let base_address = map_device_memory_l2(l2_aligned, page_count, attrs)?;
let address = base_address + l2_offset;
Ok(RawDeviceMemoryMapping {
@ -193,7 +202,7 @@ pub(super) unsafe fn map_device_memory(
})
} else {
// Just map the pages directly
let base_address = map_device_memory_l3(l3_aligned, page_count)?;
let base_address = map_device_memory_l3(l3_aligned, page_count, attrs)?;
let address = base_address + l3_offset;
Ok(RawDeviceMemoryMapping {
@ -311,23 +320,6 @@ fn clone_kernel_tables(dst: &mut PageTable<L0>) {
}
}
pub(super) fn translate_kernel_address(address: usize) -> Option<PhysicalAddress> {
let l0i = address.page_index::<L0>();
let l1i = address.page_index::<L1>();
// let l2i = address.page_index::<L2>();
// let l3i = address.page_index::<L3>();
match l0i {
KERNEL_L0_INDEX => match l1i {
HEAP_MAPPING_L1I => Some(PhysicalAddress::from_raw(address - HEAP_MAPPING_OFFSET)),
DEVICE_MAPPING_L1I => todo!(),
_ => todo!(),
},
RAM_MAPPING_L0I => todo!(),
_ => None,
}
}
/// Sets up the following memory map:
/// ...: KERNEL_TABLES.l0:
/// * 0xFFFFFF0000000000 .. 0xFFFFFFFF8000000000 : RAM_MAPPING_L1

View File

@ -14,7 +14,7 @@ use kernel_fs::devfs;
use kernel_util::{
mem::{
address::{FromRaw, IntoRaw, PhysicalAddress},
device::RawDeviceMemoryMapping,
device::{DeviceMemoryAttributes, RawDeviceMemoryMapping},
table::EntryLevelExt,
},
sync::SpinFence,
@ -164,8 +164,9 @@ impl Architecture for X86_64 {
&self,
base: PhysicalAddress,
size: usize,
attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping, Error> {
mem::map_device_memory(base, size)
mem::map_device_memory(base, size, attrs)
}
#[inline]

View File

@ -42,7 +42,7 @@ impl MonotonicTimestampProviderDevice for I8253 {
}
impl InterruptHandler for I8253 {
fn handle_irq(&self) -> bool {
fn handle_irq(&self, _vector: Option<usize>) -> bool {
let mut inner = self.inner.lock();
inner.tick += 1;

View File

@ -77,7 +77,7 @@ impl Inner {
}
impl InterruptHandler for PS2Controller {
fn handle_irq(&self) -> bool {
fn handle_irq(&self, _vector: Option<usize>) -> bool {
let mut count = 0;
let mut inner = self.inner.lock();

189
src/device/bus/dt_pci.rs Normal file
View File

@ -0,0 +1,189 @@
//! PCI bus device wrapper for device tree
use alloc::{collections::BTreeMap, vec::Vec};
use device_api::interrupt::{IrqLevel, IrqOptions, IrqTrigger};
use kernel_util::mem::address::{FromRaw, PhysicalAddress};
use ygg_driver_pci::{
device::{PciInterrupt, PciInterruptPin, PciInterruptRoute},
PciAddress, PciAddressRange, PciBusManager, PciRangeType,
};
use crate::{
device::devtree::{self, DevTreeIndexNodeExt, DevTreeIndexPropExt, DevTreeNodeInfo},
device_tree_driver,
};
fn extract_ranges(dt: &DevTreeNodeInfo) -> Vec<PciAddressRange> {
let Some(ranges) = devtree::find_prop(&dt.node, "ranges") else {
return Vec::new();
};
let pci_address_cells = dt.node.address_cells();
let pci_size_cells = dt.node.size_cells();
let cells_per_range = dt.address_cells + pci_address_cells + pci_size_cells;
assert_eq!(ranges.len() % cells_per_range, 0);
let range_count = ranges.len() / (cells_per_range * 4);
let mut result = Vec::new();
for i in 0..range_count {
let ty_bits = ranges.cell1_array_item(i * cells_per_range, 1).unwrap();
let ty = match (ty_bits >> 24) & 0x3 {
0 => PciRangeType::Configuration,
1 => PciRangeType::Io,
2 => PciRangeType::Memory32,
3 => PciRangeType::Memory64,
_ => unreachable!(),
};
let bus_number = (ty_bits >> 16) as u8;
let pci_base = match pci_address_cells {
3 => {
let hi = ranges.cell1_array_item(i * cells_per_range + 1, 1).unwrap();
let lo = ranges.cell1_array_item(i * cells_per_range + 2, 1).unwrap();
(hi << 32) | lo
}
_ => unimplemented!(),
};
let host_base = PhysicalAddress::from_raw(match dt.address_cells {
2 => {
let hi = ranges
.cell1_array_item(i * cells_per_range + pci_address_cells, 1)
.unwrap();
let lo = ranges
.cell1_array_item(i * cells_per_range + pci_address_cells + 1, 1)
.unwrap();
(hi << 32) | lo
}
_ => unimplemented!(),
});
let size = match pci_size_cells {
2 => {
let hi = ranges
.cell1_array_item(
i * cells_per_range + pci_address_cells + dt.address_cells,
1,
)
.unwrap();
let lo = ranges
.cell1_array_item(
i * cells_per_range + pci_address_cells + dt.address_cells + 1,
1,
)
.unwrap();
(hi << 32) | lo
}
_ => unimplemented!(),
} as usize;
result.push(PciAddressRange {
ty,
bus_number,
host_base,
pci_base,
size,
});
}
result
}
fn extract_interrupt_map(dt: &DevTreeNodeInfo) -> BTreeMap<PciInterrupt, PciInterruptRoute> {
// let interrupt_map_mask = devtree::find_prop(&dt.node, "interrupt-map").unwrap();
let interrupt_map = devtree::find_prop(&dt.node, "interrupt-map").unwrap();
let pci_address_cells = dt.node.address_cells();
// TODO replace 3 with interrupt-cells in interrupt-controller
let cells_per_imap = pci_address_cells + /* Pin */ 1 + /* #interrupt-cells in interrupt-controller */ 3 + /* Interrupt Controller Data */ 3;
assert_eq!(interrupt_map.len() % (4 * cells_per_imap), 0);
let mut imap = BTreeMap::new();
for i in 0..interrupt_map.len() / (4 * cells_per_imap) {
let pci_address_0 = interrupt_map
.cell1_array_item(i * cells_per_imap, 1)
.unwrap();
let bus = (pci_address_0 >> 24) as u8;
let device = ((pci_address_0 >> 11) & 0x1F) as u8;
let function = ((pci_address_0 >> 8) & 0x7) as u8;
let address = PciAddress::for_function(0, bus, device, function);
let pin = interrupt_map
.cell1_array_item(i * cells_per_imap + pci_address_cells, 1)
.unwrap() as u32;
let Ok(pin) = PciInterruptPin::try_from(pin) else {
continue;
};
let _interrupt_ty = interrupt_map
.cell1_array_item(i * cells_per_imap + pci_address_cells + 4, 1)
.unwrap();
let interrupt_number = interrupt_map
.cell1_array_item(i * cells_per_imap + pci_address_cells + 5, 1)
.unwrap();
let interrupt_mode = interrupt_map
.cell1_array_item(i * cells_per_imap + pci_address_cells + 6, 1)
.unwrap();
let (trigger, level) = match interrupt_mode {
0x04 => (IrqTrigger::Level, IrqLevel::ActiveHigh),
_ => todo!(),
};
let src = PciInterrupt { address, pin };
let dst = PciInterruptRoute {
number: interrupt_number as _,
options: IrqOptions { trigger, level },
};
// TODO use phandle for interrupt-controller
// TODO interrupt-controller-specific decoding of idata
// TODO don't ignore interrupt_ty, don't assume they're all SPIs
imap.insert(src, dst);
}
imap
}
device_tree_driver! {
compatible: ["pci-host-ecam-generic"],
probe(dt) => {
let reg = devtree::find_prop(&dt.node, "reg")?;
let bus_range = devtree::find_prop(&dt.node, "bus-range")?;
let (cfg_space_base, _) = reg
.cell2_array_item(0, dt.address_cells, dt.size_cells)
.unwrap();
let cfg_space_base = PhysicalAddress::from_raw(cfg_space_base);
let bus_start = bus_range.cell1_array_item(0, 1)? as u8;
let bus_end = bus_range.cell1_array_item(1, 1)? as u8;
let ranges = extract_ranges(dt);
let interrupt_map = extract_interrupt_map(dt);
if ranges.is_empty() {
return None;
}
PciBusManager::add_segment_from_device_tree(
cfg_space_base,
bus_start..bus_end,
ranges,
interrupt_map
).ok();
None
}
}

View File

@ -1,4 +1,6 @@
//! Bus devices
#[cfg(target_arch = "aarch64")]
pub mod dt_pci;
#[cfg(feature = "device-tree")]
pub mod simple_bus;

View File

@ -88,6 +88,9 @@ pub trait DevTreeIndexPropExt {
/// Reads a cell value from the property at given offset
fn read_cell(&self, u32_offset: usize, cell_size: usize) -> Option<u64>;
/// Returns the length in bytes
fn len(&self) -> usize;
}
/// Helper trait to provide extra functionality for [DevTreeIndexNode]
@ -248,6 +251,19 @@ impl<'a, 'i, 'dt> DevTreeIndexNodeExt for DevTreeIndexNode<'a, 'i, 'dt> {
"compatible" | "stdout-path" => {
log_print_raw!(level, "{:?}", prop.str().unwrap_or("<???>"))
}
"#interrupt-cells" | "#size-cells" | "#address-cells" => {
log_print_raw!(level, "{:#x}", prop.u32(0).unwrap_or(0xFFFFFFFF));
}
_ if prop.length() % 4 == 0 => {
log_print_raw!(level, "[");
for i in 0..prop.length() / 4 {
if i != 0 {
log_print_raw!(level, ", ");
}
log_print_raw!(level, "{:#010x}", prop.u32(i).unwrap());
}
log_print_raw!(level, "]");
}
_ => log_print_raw!(level, "{:x?}", prop.raw()),
}
@ -287,6 +303,10 @@ impl<'a, 'i, 'dt> DevTreeIndexPropExt for DevTreeIndexProp<'a, 'i, 'dt> {
let cell1 = self.read_cell(u32_index + cells0, cells1)?;
Some((cell0, cell1))
}
fn len(&self) -> usize {
self.length()
}
}
impl<'a> FdtMemoryRegionIter<'a> {

View File

@ -9,7 +9,10 @@ use abi::{error::Error, io::DeviceRequest};
use device_api::Device;
use kernel_util::{
mem::{
address::PhysicalAddress, device::RawDeviceMemoryMapping, table::EntryLevel, PageProvider,
address::PhysicalAddress,
device::{DeviceMemoryAttributes, DeviceMemoryCaching, RawDeviceMemoryMapping},
table::EntryLevel,
PageProvider,
},
sync::IrqSafeSpinlock,
};
@ -56,7 +59,16 @@ impl LinearFramebuffer {
width: u32,
height: u32,
) -> Result<Self, Error> {
let base = unsafe { RawDeviceMemoryMapping::map(phys_base, size) }?.leak();
let base = unsafe {
RawDeviceMemoryMapping::map(
phys_base,
size,
DeviceMemoryAttributes {
caching: DeviceMemoryCaching::Cacheable,
},
)
}?
.leak();
let inner = Inner {
base,

View File

@ -6,8 +6,6 @@ use kernel_util::sync::{IrqSafeSpinlock, IrqSafeSpinlockGuard};
#[cfg(target_arch = "aarch64")]
pub mod devtree;
// TODO bus device support on aarch64
#[cfg(not(target_arch = "aarch64"))]
pub mod bus;
pub mod display;

View File

@ -176,7 +176,7 @@ impl SerialDevice for Pl011 {
}
impl InterruptHandler for Pl011 {
fn handle_irq(&self) -> bool {
fn handle_irq(&self, _vector: Option<usize>) -> bool {
let inner = self.inner.get().lock();
inner.regs.ICR.write(ICR::ALL::CLEAR);
@ -214,7 +214,7 @@ impl Device for Pl011 {
unsafe fn init(&'static self) -> Result<(), Error> {
let mut inner = Pl011Inner {
regs: DeviceMemoryIo::map(self.base)?,
regs: DeviceMemoryIo::map(self.base, Default::default())?,
};
inner.init();

View File

@ -29,7 +29,7 @@ pub const KERNEL_VIRT_OFFSET: usize = ArchitectureImpl::KERNEL_VIRT_OFFSET;
/// The caller must ensure the correct origin of the address, its alignment and that the access is
/// properly synchronized.
pub unsafe fn read_memory<T>(address: PhysicalAddress) -> T {
let io = DeviceMemoryMapping::map(address, size_of::<T>()).unwrap();
let io = DeviceMemoryMapping::map(address, size_of::<T>(), Default::default()).unwrap();
let address = io.address();
if address % align_of::<T>() == 0 {
@ -46,7 +46,7 @@ pub unsafe fn read_memory<T>(address: PhysicalAddress) -> T {
/// The caller must ensure the correct origin of the address, its alignment and that the access is
/// properly synchronized.
pub unsafe fn write_memory<T>(address: PhysicalAddress, value: T) {
let io = DeviceMemoryMapping::map(address, size_of::<T>()).unwrap();
let io = DeviceMemoryMapping::map(address, size_of::<T>(), Default::default()).unwrap();
let address = io.address();
if address % align_of::<T>() == 0 {

View File

@ -159,7 +159,7 @@ impl Inner {
&mut self,
address: usize,
page_count: usize,
backing: VirtualRangeBacking,
backing: &VirtualRangeBacking,
attributes: MapAttributes,
) -> Result<(), (usize, Error)> {
for i in 0..page_count {
@ -173,6 +173,7 @@ impl Inner {
};
if let Err(err) = unsafe { self.table.map_page(virt, phys, attributes) } {
backing.release_page(offset, phys).unwrap();
return Err((i, err));
}
}
@ -180,6 +181,31 @@ impl Inner {
Ok(())
}
unsafe fn rollback_allocation(
&mut self,
start_pfn: usize,
pages_mapped: usize,
region_size: usize,
) {
let unmap_range = start_pfn..start_pfn + pages_mapped;
self.allocator
.free(start_pfn, region_size, |origin_pfn, pfn_range, backing| {
for pfn in pfn_range {
if unmap_range.contains(&pfn) {
let offset = (pfn - origin_pfn) * ProcessAddressSpaceImpl::PAGE_SIZE;
let virt = pfn * ProcessAddressSpaceImpl::PAGE_SIZE;
let phys = self.table.unmap_page(virt)?;
backing.release_page(offset as u64, phys)?;
}
}
Ok(())
})
.unwrap();
}
fn map_range(
&mut self,
address: usize,
@ -192,9 +218,13 @@ impl Inner {
self.allocator
.insert(start_pfn, page_count, backing.clone())?;
if let Err(_e) = self.try_map_pages(address, page_count, backing, attributes) {
// TODO rollback & remove the range
todo!();
if let Err((mapped, error)) = self.try_map_pages(address, page_count, &backing, attributes)
{
debug_assert!(mapped < page_count);
unsafe {
self.rollback_allocation(start_pfn, mapped, page_count);
}
return Err(error);
};
Ok(())
@ -211,15 +241,24 @@ impl Inner {
let phys = match backing.get_page(0) {
Ok(page) => page,
Err(_err) => {
// TODO rollback
todo!();
Err(err) => {
// Do nothing, as the page has not been allocated to this range yet
self.allocator.free(start_pfn, 1, |_, _, _| Ok(())).unwrap();
return Err(err);
}
};
if let Err(_err) = unsafe { self.table.map_page(address, phys, attributes) } {
// TODO rollback
todo!();
if let Err(err) = unsafe { self.table.map_page(address, phys, attributes) } {
self.allocator
.free(start_pfn, 1, |_, _, _| {
// Deallocate the page, but do not unmap, as the mapping failed
unsafe {
phys::free_page(phys);
}
Ok(())
})
.unwrap();
return Err(err);
}
Ok(phys)
@ -234,9 +273,13 @@ impl Inner {
let start_pfn = self.allocator.allocate(page_count, backing.clone())?;
let address = start_pfn * ProcessAddressSpaceImpl::PAGE_SIZE;
if let Err(_e) = self.try_map_pages(address, page_count, backing, attributes) {
// TODO rollback
todo!("alloc_range({})", page_count);
if let Err((mapped, error)) = self.try_map_pages(address, page_count, &backing, attributes)
{
debug_assert!(mapped < page_count);
unsafe {
self.rollback_allocation(start_pfn, mapped, page_count);
}
return Err(error);
};
Ok(address)