refactor: fix all warnings
This commit is contained in:
parent
49942563ef
commit
e5aef02981
1
Cargo.lock
generated
1
Cargo.lock
generated
@ -2188,6 +2188,7 @@ dependencies = [
|
||||
"bitflags 2.6.0",
|
||||
"device-api",
|
||||
"libk-mm",
|
||||
"libk-util",
|
||||
"log",
|
||||
"tock-registers 0.9.0",
|
||||
"ygg_driver_pci",
|
||||
|
@ -76,3 +76,8 @@ version = "0.7.2"
|
||||
git = "https://git.alnyan.me/yggdrasil/yggdrasil-elf.git"
|
||||
default-features = false
|
||||
features = ["no_std_stream"]
|
||||
|
||||
[workspace.lints.rust]
|
||||
unexpected_cfgs = { level = "allow", check-cfg = ['cfg(rust_analyzer)'] }
|
||||
[workspace.lints.clippy]
|
||||
derivable_impls = { level = "allow" }
|
||||
|
@ -85,5 +85,5 @@ kernel-arch-aarch64.workspace = true
|
||||
default = ["fb_console"]
|
||||
fb_console = []
|
||||
|
||||
[lints.rust]
|
||||
unexpected_cfgs = { level = "allow", check-cfg = ['cfg(rust_analyzer)'] }
|
||||
[lints]
|
||||
workspace = true
|
||||
|
@ -60,10 +60,20 @@ impl FpContext {
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores the FPU context into the `this` pointer.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// It is up to the caller to ensure `this` is a valid pointer to store the FPU context in.
|
||||
pub unsafe fn store(this: *mut Self) {
|
||||
__aarch64_fp_store_context(this as _)
|
||||
}
|
||||
|
||||
/// Loads the FPU with the context stored in `this` pointer.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// It is up to the caller to ensure `this` is a valid pointer to load the FPU context from.
|
||||
pub unsafe fn restore(this: *const Self) {
|
||||
__aarch64_fp_restore_context(this as _)
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
#![no_std]
|
||||
#![feature(naked_functions, trait_upcasting)]
|
||||
#![allow(clippy::new_without_default)]
|
||||
|
||||
extern crate alloc;
|
||||
|
||||
|
@ -126,7 +126,7 @@ impl<'a, T: Sized> EarlyMapping<'a, T> {
|
||||
let layout = Layout::array::<T>(len).unwrap();
|
||||
let aligned = physical.page_align_down::<L3>();
|
||||
let offset = physical.page_offset::<L3>();
|
||||
let page_count = (offset + layout.size() + L3::SIZE - 1) / L3::SIZE;
|
||||
let page_count = (offset + layout.size()).div_ceil(L3::SIZE);
|
||||
|
||||
let virt = map_early_pages(aligned, page_count)?;
|
||||
let value = core::slice::from_raw_parts_mut((virt + offset) as *mut T, len);
|
||||
@ -135,7 +135,7 @@ impl<'a, T: Sized> EarlyMapping<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> Deref for EarlyMapping<'a, T> {
|
||||
impl<T: ?Sized> Deref for EarlyMapping<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
@ -143,13 +143,13 @@ impl<'a, T: ?Sized> Deref for EarlyMapping<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> DerefMut for EarlyMapping<'a, T> {
|
||||
impl<T: ?Sized> DerefMut for EarlyMapping<'_, T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> Drop for EarlyMapping<'a, T> {
|
||||
impl<T: ?Sized> Drop for EarlyMapping<'_, T> {
|
||||
fn drop(&mut self) {
|
||||
let address = (self.value as *mut T).addr() & !(L3::SIZE - 1);
|
||||
|
||||
|
@ -7,5 +7,5 @@ edition = "2021"
|
||||
yggdrasil-abi.workspace = true
|
||||
device-api = { workspace = true, features = ["derive"] }
|
||||
|
||||
[lints.rust]
|
||||
unexpected_cfgs = { level = "allow", check-cfg = ['cfg(rust_analyzer)'] }
|
||||
[lints]
|
||||
workspace = true
|
||||
|
@ -125,13 +125,13 @@ impl<A: Architecture, S: Scheduler> DerefMut for CpuImpl<A, S> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: Architecture, S: Scheduler + 'static> LocalCpuImpl<'a, A, S> {
|
||||
impl<A: Architecture, S: Scheduler + 'static> LocalCpuImpl<'_, A, S> {
|
||||
pub fn into_guard(self) -> IrqGuard<A> {
|
||||
self.guard
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: Architecture, S: Scheduler> Deref for LocalCpuImpl<'a, A, S> {
|
||||
impl<A: Architecture, S: Scheduler> Deref for LocalCpuImpl<'_, A, S> {
|
||||
type Target = CpuImpl<A, S>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
@ -139,7 +139,7 @@ impl<'a, A: Architecture, S: Scheduler> Deref for LocalCpuImpl<'a, A, S> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: Architecture, S: Scheduler> DerefMut for LocalCpuImpl<'a, A, S> {
|
||||
impl<A: Architecture, S: Scheduler> DerefMut for LocalCpuImpl<'_, A, S> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.cpu
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ impl<A: Architecture, T> Spinlock<A, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: Architecture, T> Deref for SpinlockGuard<'a, A, T> {
|
||||
impl<A: Architecture, T> Deref for SpinlockGuard<'_, A, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
@ -67,13 +67,13 @@ impl<'a, A: Architecture, T> Deref for SpinlockGuard<'a, A, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: Architecture, T> DerefMut for SpinlockGuard<'a, A, T> {
|
||||
impl<A: Architecture, T> DerefMut for SpinlockGuard<'_, A, T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
unsafe { &mut *self.lock.value.get() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: Architecture, T> Drop for SpinlockGuard<'a, A, T> {
|
||||
impl<A: Architecture, T> Drop for SpinlockGuard<'_, A, T> {
|
||||
fn drop(&mut self) {
|
||||
// if !LOCK_HACK.load(Ordering::Acquire) {
|
||||
self.lock
|
||||
@ -84,8 +84,8 @@ impl<'a, A: Architecture, T> Drop for SpinlockGuard<'a, A, T> {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<A: Architecture, T> Sync for Spinlock<A, T> {}
|
||||
unsafe impl<A: Architecture, T> Send for Spinlock<A, T> {}
|
||||
unsafe impl<A: Architecture, T: Send> Sync for Spinlock<A, T> {}
|
||||
unsafe impl<A: Architecture, T: Send> Send for Spinlock<A, T> {}
|
||||
|
||||
// IrqSafeSpinlock impls
|
||||
impl<A: Architecture, T> IrqSafeSpinlock<A, T> {
|
||||
@ -140,7 +140,7 @@ impl<A: Architecture, T: Clone> Clone for IrqSafeSpinlock<A, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: Architecture, T> Deref for IrqSafeSpinlockGuard<'a, A, T> {
|
||||
impl<A: Architecture, T> Deref for IrqSafeSpinlockGuard<'_, A, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
@ -148,7 +148,7 @@ impl<'a, A: Architecture, T> Deref for IrqSafeSpinlockGuard<'a, A, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: Architecture, T> DerefMut for IrqSafeSpinlockGuard<'a, A, T> {
|
||||
impl<A: Architecture, T> DerefMut for IrqSafeSpinlockGuard<'_, A, T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.inner.deref_mut()
|
||||
}
|
||||
|
@ -12,5 +12,5 @@ tock-registers.workspace = true
|
||||
static_assertions.workspace = true
|
||||
log.workspace = true
|
||||
|
||||
[lints.rust]
|
||||
unexpected_cfgs = { level = "allow", check-cfg = ['cfg(rust_analyzer)'] }
|
||||
[lints]
|
||||
workspace = true
|
||||
|
@ -263,6 +263,11 @@ mod imp {
|
||||
);
|
||||
}
|
||||
|
||||
/// Initializes and loads the GDT data structure for the current CPU.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Intended to be called once per each CPU during their early initialization.
|
||||
pub unsafe fn init() -> usize {
|
||||
let (gdt, tss) = create_gdt();
|
||||
load_gdt(gdt);
|
||||
|
@ -504,7 +504,12 @@ impl FpuContext {
|
||||
Self { inner }
|
||||
}
|
||||
|
||||
pub fn store(this: *mut Self) {
|
||||
/// Stores the FPU context into the `this` pointer.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// It is up to the caller to ensure `this` is a valid pointer to store the FPU context in.
|
||||
pub unsafe fn store(this: *mut Self) {
|
||||
#[cfg(any(target_arch = "x86", rust_analyzer))]
|
||||
unsafe {
|
||||
core::arch::x86::_fxsave(Box::as_mut_ptr(&mut (*this).inner) as _)
|
||||
@ -515,7 +520,12 @@ impl FpuContext {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn restore(this: *const Self) {
|
||||
/// Loads the FPU with the context stored in `this` pointer.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// It is up to the caller to ensure `this` is a valid pointer to load the FPU context from.
|
||||
pub unsafe fn restore(this: *const Self) {
|
||||
#[cfg(any(target_arch = "x86", rust_analyzer))]
|
||||
unsafe {
|
||||
core::arch::x86::_fxrstor(Box::as_ptr(&(*this).inner) as _)
|
||||
|
@ -285,7 +285,7 @@ impl<'a, T: Sized> EarlyMapping<'a, T> {
|
||||
let layout = Layout::new::<T>();
|
||||
let aligned = physical.page_align_down::<L3>();
|
||||
let offset = physical.page_offset::<L3>();
|
||||
let page_count = (offset + layout.size() + L3::SIZE - 1) / L3::SIZE;
|
||||
let page_count = (offset + layout.size()).div_ceil(L3::SIZE);
|
||||
|
||||
let virt = map_early_pages(aligned, page_count)?;
|
||||
let value = &mut *((virt + offset) as *mut T);
|
||||
@ -304,7 +304,7 @@ impl<'a, T: Sized> EarlyMapping<'a, T> {
|
||||
let layout = Layout::array::<T>(len).unwrap();
|
||||
let aligned = physical.page_align_down::<L3>();
|
||||
let offset = physical.page_offset::<L3>();
|
||||
let page_count = (offset + layout.size() + L3::SIZE - 1) / L3::SIZE;
|
||||
let page_count = (offset + layout.size()).div_ceil(L3::SIZE);
|
||||
|
||||
let virt = map_early_pages(aligned, page_count)?;
|
||||
let value = core::slice::from_raw_parts_mut((virt + offset) as *mut T, len);
|
||||
@ -313,7 +313,7 @@ impl<'a, T: Sized> EarlyMapping<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> Deref for EarlyMapping<'a, T> {
|
||||
impl<T: ?Sized> Deref for EarlyMapping<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
@ -321,13 +321,13 @@ impl<'a, T: ?Sized> Deref for EarlyMapping<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> DerefMut for EarlyMapping<'a, T> {
|
||||
impl<T: ?Sized> DerefMut for EarlyMapping<'_, T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> Drop for EarlyMapping<'a, T> {
|
||||
impl<T: ?Sized> Drop for EarlyMapping<'_, T> {
|
||||
fn drop(&mut self) {
|
||||
let address = (self.value as *mut T).addr() & !(L3::SIZE - 1);
|
||||
|
||||
|
@ -60,7 +60,7 @@ struct SubmittedCommand<'a> {
|
||||
index: usize,
|
||||
}
|
||||
|
||||
impl<'a> SubmittedCommand<'a> {
|
||||
impl SubmittedCommand<'_> {
|
||||
pub async fn wait_for_completion(self) -> Result<(), AhciError> {
|
||||
let result = poll_fn(|cx| self.port.poll_slot(cx, self.index)).await;
|
||||
|
||||
@ -72,7 +72,7 @@ impl<'a> SubmittedCommand<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Drop for SubmittedCommand<'a> {
|
||||
impl Drop for SubmittedCommand<'_> {
|
||||
fn drop(&mut self) {
|
||||
panic!(
|
||||
"Cannot drop command in flight: port{}, slot{}",
|
||||
|
@ -407,6 +407,9 @@ impl Device for NvmeController {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO
|
||||
unsafe impl Sync for NvmeController {}
|
||||
|
||||
static NVME_CONTROLLERS: IrqSafeSpinlock<Vec<&'static NvmeController>> =
|
||||
IrqSafeSpinlock::new(Vec::new());
|
||||
|
||||
|
@ -430,3 +430,7 @@ impl QueuePair {
|
||||
n
|
||||
}
|
||||
}
|
||||
|
||||
// TODO
|
||||
unsafe impl Sync for QueuePair {}
|
||||
unsafe impl Send for QueuePair {}
|
||||
|
@ -18,5 +18,5 @@ tock-registers.workspace = true
|
||||
[target.'cfg(target_arch = "x86_64")'.dependencies]
|
||||
acpi.workspace = true
|
||||
|
||||
[lints.rust]
|
||||
unexpected_cfgs = { level = "allow", check-cfg = ['cfg(rust_analyzer)'] }
|
||||
[lints]
|
||||
workspace = true
|
||||
|
@ -74,7 +74,7 @@ pub struct CapabilityIterator<'s, S: PciConfigurationSpace + ?Sized> {
|
||||
current: Option<usize>,
|
||||
}
|
||||
|
||||
impl<'s, S: PciConfigurationSpace + ?Sized> Iterator for CapabilityIterator<'s, S> {
|
||||
impl<S: PciConfigurationSpace + ?Sized> Iterator for CapabilityIterator<'_, S> {
|
||||
type Item = (PciCapabilityId, usize, usize);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
|
@ -17,6 +17,6 @@ pub mod class_driver;
|
||||
|
||||
pub use communication::{UsbControlTransfer, UsbDirection, UsbTransferStatus, UsbTransferToken};
|
||||
|
||||
pub trait UsbEndpoint {}
|
||||
pub trait UsbEndpoint: Sync {}
|
||||
|
||||
pub trait UsbHostController {}
|
||||
pub trait UsbHostController: Sync {}
|
||||
|
@ -183,7 +183,7 @@ impl DerefMut for ExtendedSuperblock {
|
||||
|
||||
impl Inode {
|
||||
pub fn blocks(&self, fs: &Ext2Fs) -> usize {
|
||||
(self.size_lower as usize + fs.block_size - 1) / fs.block_size
|
||||
(self.size_lower as usize).div_ceil(fs.block_size)
|
||||
}
|
||||
|
||||
pub fn user_id(&self) -> UserId {
|
||||
|
@ -76,12 +76,13 @@ impl Ext2Fs {
|
||||
}
|
||||
|
||||
let block_size = 1024usize << superblock.block_size_log2;
|
||||
let bgdt_block_index = (data::SUPERBLOCK_OFFSET as usize + block_size - 1) / block_size;
|
||||
let bgdt_block_index = (data::SUPERBLOCK_OFFSET as usize).div_ceil(block_size);
|
||||
|
||||
let bgdt_entry_count = ((superblock.total_blocks + superblock.block_group_block_count - 1)
|
||||
/ superblock.block_group_block_count) as usize;
|
||||
let bgdt_entry_count = superblock
|
||||
.total_blocks
|
||||
.div_ceil(superblock.block_group_block_count) as usize;
|
||||
let bgdt_block_count =
|
||||
(bgdt_entry_count * size_of::<BlockGroupDescriptor>() + block_size - 1) / block_size;
|
||||
(bgdt_entry_count * size_of::<BlockGroupDescriptor>()).div_ceil(block_size);
|
||||
|
||||
log::info!(
|
||||
"ext2 v{}.{}",
|
||||
|
@ -15,3 +15,6 @@ log.workspace = true
|
||||
[features]
|
||||
default = []
|
||||
test-io = []
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
@ -120,7 +120,7 @@ impl<'a, A: BlockAllocator> BlockRef<'a, A> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: BlockAllocator> Drop for BlockRef<'a, A> {
|
||||
impl<A: BlockAllocator> Drop for BlockRef<'_, A> {
|
||||
fn drop(&mut self) {
|
||||
if self.is_allocated() && !self.is_null() {
|
||||
unsafe {
|
||||
@ -173,7 +173,7 @@ impl<'a, A: BlockAllocator> BlockRaw<'a, A> {
|
||||
}
|
||||
|
||||
// Data block
|
||||
impl<'a, A: BlockAllocator> BlockData<'a, A> {
|
||||
impl<A: BlockAllocator> BlockData<'_, A> {
|
||||
/// Dummy entry representing a missing block
|
||||
pub const fn null() -> Self {
|
||||
Self {
|
||||
@ -231,7 +231,7 @@ impl<A: BlockAllocator> DerefMut for BlockData<'_, A> {
|
||||
}
|
||||
|
||||
// Indirect block
|
||||
impl<'a, A: BlockAllocator> BlockIndirect<'a, A> {
|
||||
impl<A: BlockAllocator> BlockIndirect<'_, A> {
|
||||
/// Dummy entry representing a missing block
|
||||
pub const fn null() -> Self {
|
||||
Self {
|
||||
@ -263,13 +263,13 @@ impl<'a, A: BlockAllocator> Deref for BlockIndirect<'a, A> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: BlockAllocator> DerefMut for BlockIndirect<'a, A> {
|
||||
impl<A: BlockAllocator> DerefMut for BlockIndirect<'_, A> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
unsafe { &mut *(self.inner.inner.as_mut() as *mut _ as *mut _) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: BlockAllocator> Drop for BlockIndirect<'a, A> {
|
||||
impl<A: BlockAllocator> Drop for BlockIndirect<'_, A> {
|
||||
fn drop(&mut self) {
|
||||
if self.is_null() {
|
||||
return;
|
||||
|
@ -52,7 +52,7 @@ impl<'a, A: BlockAllocator> BVec<'a, A> {
|
||||
let data_ptr = data.as_ptr() as usize;
|
||||
assert_eq!(data_ptr & 1, 0);
|
||||
|
||||
let blocks = (data.len() + block::SIZE - 1) / block::SIZE;
|
||||
let blocks = data.len().div_ceil(block::SIZE);
|
||||
self.resize(blocks)?;
|
||||
|
||||
for i in 0..blocks {
|
||||
@ -91,10 +91,7 @@ impl<'a, A: BlockAllocator> BVec<'a, A> {
|
||||
fn caps(cap: usize) -> (usize, usize) {
|
||||
let l0_cap = core::cmp::min(cap, L0_BLOCKS);
|
||||
let l1_cap = if cap > L0_BLOCKS {
|
||||
core::cmp::min(
|
||||
(cap - L0_BLOCKS + block::ENTRY_COUNT - 1) / block::ENTRY_COUNT,
|
||||
L1_BLOCKS,
|
||||
)
|
||||
core::cmp::min((cap - L0_BLOCKS).div_ceil(block::ENTRY_COUNT), L1_BLOCKS)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
@ -147,8 +144,7 @@ impl<'a, A: BlockAllocator> BVec<'a, A> {
|
||||
|
||||
fn ensure_write_capacity(&mut self, pos: usize, need_to_write: usize) -> Result<(), Error> {
|
||||
let current_capacity = self.capacity;
|
||||
let need_capacity =
|
||||
(core::cmp::max(pos + need_to_write, self.size) + block::SIZE - 1) / block::SIZE;
|
||||
let need_capacity = core::cmp::max(pos + need_to_write, self.size).div_ceil(block::SIZE);
|
||||
|
||||
if need_capacity > current_capacity {
|
||||
self.resize(need_capacity)
|
||||
@ -221,7 +217,7 @@ impl<'a, A: BlockAllocator> BVec<'a, A> {
|
||||
/// Resize the block vector to requested size
|
||||
pub fn truncate(&mut self, new_size: u64) -> Result<(), Error> {
|
||||
let new_size: usize = new_size.try_into().unwrap();
|
||||
let requested_capacity = (new_size + block::SIZE - 1) / block::SIZE;
|
||||
let requested_capacity = new_size.div_ceil(block::SIZE);
|
||||
self.resize(requested_capacity)?;
|
||||
// TODO fill with zeros if resizing larger?
|
||||
self.size = new_size;
|
||||
@ -280,7 +276,7 @@ impl<'a, A: BlockAllocator> Index<usize> for BVec<'a, A> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: BlockAllocator> IndexMut<usize> for BVec<'a, A> {
|
||||
impl<A: BlockAllocator> IndexMut<usize> for BVec<'_, A> {
|
||||
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
|
||||
if index > self.capacity {
|
||||
panic!(
|
||||
@ -293,7 +289,7 @@ impl<'a, A: BlockAllocator> IndexMut<usize> for BVec<'a, A> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: BlockAllocator> TryFrom<&'static [u8]> for BVec<'a, A> {
|
||||
impl<A: BlockAllocator> TryFrom<&'static [u8]> for BVec<'_, A> {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: &'static [u8]) -> Result<Self, Self::Error> {
|
||||
@ -405,7 +401,7 @@ mod bvec_allocation {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "test-io"))]
|
||||
#[cfg(all(test, any(feature = "test-io", rust_analyzer)))]
|
||||
mod bvec_io {
|
||||
use crate::{block, bvec::L0_BLOCKS};
|
||||
|
||||
|
@ -64,7 +64,7 @@ impl TcpSocket {
|
||||
) -> Result<(SocketAddr, Arc<TcpSocket>), Error> {
|
||||
let future = Self::connect_async(remote);
|
||||
match timeout {
|
||||
Some(timeout) => with_timeout(future, timeout).await?.into(),
|
||||
Some(timeout) => with_timeout(future, timeout).await?,
|
||||
None => future.await,
|
||||
}
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
yggdrasil-abi.workspace = true
|
||||
libk-util.workspace = true
|
||||
libk-mm.workspace = true
|
||||
device-api = { workspace = true, features = ["derive"] }
|
||||
|
||||
|
@ -10,7 +10,7 @@ use crate::{CommonConfiguration, DeviceStatus};
|
||||
|
||||
pub mod pci;
|
||||
|
||||
pub trait Transport {
|
||||
pub trait Transport: Send {
|
||||
fn common_cfg(&self) -> &CommonConfiguration;
|
||||
fn notify_cfg(&self) -> &[WriteOnly<u16>];
|
||||
fn notify_off_mul(&self) -> usize;
|
||||
|
@ -140,3 +140,6 @@ impl PciTransport {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TODO sort this out
|
||||
unsafe impl Sync for PciTransport {}
|
||||
|
@ -4,7 +4,7 @@ use yggdrasil_abi::error::Error;
|
||||
#[repr(transparent)]
|
||||
pub struct DeviceId(u64);
|
||||
|
||||
pub trait Device: Send + 'static {
|
||||
pub trait Device: Sync + 'static {
|
||||
fn display_name(&self) -> &'static str;
|
||||
|
||||
/// Initializes the device, making it ready for operation.
|
||||
|
@ -90,7 +90,7 @@ pub struct DevTreeNodeInfo<'a, 'i, 'dt> {
|
||||
pub node: DevTreeIndexNode<'a, 'i, 'dt>,
|
||||
}
|
||||
|
||||
impl<'a> DeviceTree<'a> {
|
||||
impl DeviceTree<'_> {
|
||||
pub const MIN_HEADER_SIZE: usize = DevTree::MIN_HEADER_SIZE;
|
||||
|
||||
/// Constructs a device tree wrapper from the DTB virtual address.
|
||||
@ -155,7 +155,7 @@ impl<'a> DeviceTree<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'i, 'dt> DevTreeIndexNodeExt for DevTreeIndexNode<'a, 'i, 'dt> {
|
||||
impl DevTreeIndexNodeExt for DevTreeIndexNode<'_, '_, '_> {
|
||||
fn get_address_cells(&self) -> Option<usize> {
|
||||
self.props()
|
||||
.find(|p| p.name().unwrap_or("") == "#address-cells")
|
||||
@ -169,7 +169,7 @@ impl<'a, 'i, 'dt> DevTreeIndexNodeExt for DevTreeIndexNode<'a, 'i, 'dt> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'i, 'dt> DevTreeIndexPropExt for DevTreeIndexProp<'a, 'i, 'dt> {
|
||||
impl DevTreeIndexPropExt for DevTreeIndexProp<'_, '_, '_> {
|
||||
fn read_cell(&self, u32_offset: usize, cell_size: usize) -> Option<u64> {
|
||||
match cell_size {
|
||||
1 => self.u32(u32_offset).map(|x| x as u64).ok(),
|
||||
@ -243,7 +243,7 @@ impl Iterator for FdtMemoryRegionIter<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'i, 'dt> DevTreeIndexNodePropGet<u32> for DevTreeIndexNode<'a, 'i, 'dt> {
|
||||
impl DevTreeIndexNodePropGet<u32> for DevTreeIndexNode<'_, '_, '_> {
|
||||
fn prop(&self, name: &str) -> Option<u32> {
|
||||
self.props().find_map(|prop| {
|
||||
if prop.name().ok()? == name {
|
||||
@ -255,7 +255,7 @@ impl<'a, 'i, 'dt> DevTreeIndexNodePropGet<u32> for DevTreeIndexNode<'a, 'i, 'dt>
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'i, 'dt> DevTreeIndexNodePropGet<&'a str> for DevTreeIndexNode<'a, 'i, 'dt> {
|
||||
impl<'a> DevTreeIndexNodePropGet<&'a str> for DevTreeIndexNode<'a, '_, '_> {
|
||||
fn prop(&self, name: &str) -> Option<&'a str> {
|
||||
self.props().find_map(|prop| {
|
||||
if prop.name().ok()? == name {
|
||||
|
@ -15,5 +15,5 @@ discrete_range_map.workspace = true
|
||||
itertools = "0.11.0"
|
||||
proptest = "1.2.0"
|
||||
|
||||
[lints.rust]
|
||||
unexpected_cfgs = { level = "allow", check-cfg = ['cfg(rust_analyzer)'] }
|
||||
[lints]
|
||||
workspace = true
|
||||
|
@ -34,5 +34,5 @@ elf.workspace = true
|
||||
uuid = { version = "1.10.0", features = ["bytemuck"], default-features = false }
|
||||
lru = "0.12.3"
|
||||
|
||||
[lints.rust]
|
||||
unexpected_cfgs = { level = "allow", check-cfg = ['cfg(rust_analyzer)'] }
|
||||
[lints]
|
||||
workspace = true
|
||||
|
@ -12,5 +12,5 @@ kernel-arch-interface.workspace = true
|
||||
bitflags.workspace = true
|
||||
bytemuck.workspace = true
|
||||
|
||||
[lints.rust]
|
||||
unexpected_cfgs = { level = "allow", check-cfg = ['cfg(rust_analyzer)'] }
|
||||
[lints]
|
||||
workspace = true
|
||||
|
@ -5,9 +5,9 @@ use yggdrasil_abi::error::Error;
|
||||
|
||||
use super::address::PhysicalAddress;
|
||||
|
||||
#[cfg(not(target_arch = "x86"))]
|
||||
#[cfg(any(not(target_arch = "x86"), rust_analyzer))]
|
||||
const TABLE_SIZE_MASK: usize = 0x1FF;
|
||||
#[cfg(target_arch = "x86")]
|
||||
#[cfg(any(target_arch = "x86", rust_analyzer))]
|
||||
const TABLE_SIZE_MASK: usize = 0x3FF;
|
||||
|
||||
/// Interface for a single level of address translation
|
||||
@ -49,7 +49,7 @@ pub const fn page_offset<T: EntryLevel>(address: usize) -> usize {
|
||||
}
|
||||
|
||||
pub const fn page_count<T: EntryLevel>(address: usize) -> usize {
|
||||
(address + T::SIZE - 1) / T::SIZE
|
||||
address.div_ceil(T::SIZE)
|
||||
}
|
||||
|
||||
pub const fn page_align_up<T: EntryLevel>(address: usize) -> usize {
|
||||
|
@ -144,7 +144,7 @@ impl<'a, T: ?Sized> DeviceMemoryIo<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> Deref for DeviceMemoryIo<'a, T> {
|
||||
impl<T: ?Sized> Deref for DeviceMemoryIo<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
@ -159,6 +159,7 @@ impl<T: ?Sized> AsPhysicalAddress for DeviceMemoryIo<'_, T> {
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized> Send for DeviceMemoryIo<'_, T> {}
|
||||
impl<T: ?Sized> !Sync for DeviceMemoryIo<'_, T> {}
|
||||
|
||||
impl<'a, T: Sized> DeviceMemoryIoMut<'a, T> {
|
||||
/// Maps a physical address as device memory to a slice `[T; len]`
|
||||
@ -180,7 +181,7 @@ impl<'a, T: Sized> DeviceMemoryIoMut<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> Deref for DeviceMemoryIoMut<'a, T> {
|
||||
impl<T: ?Sized> Deref for DeviceMemoryIoMut<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
@ -188,8 +189,11 @@ impl<'a, T: ?Sized> Deref for DeviceMemoryIoMut<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> DerefMut for DeviceMemoryIoMut<'a, T> {
|
||||
impl<T: ?Sized> DerefMut for DeviceMemoryIoMut<'_, T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized> !Send for DeviceMemoryIoMut<'_, T> {}
|
||||
impl<T: ?Sized> !Sync for DeviceMemoryIoMut<'_, T> {}
|
||||
|
@ -63,6 +63,8 @@ unsafe impl GlobalAlloc for KernelAllocator {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Sync for KernelAllocator {}
|
||||
|
||||
/// Kernel's global allocator
|
||||
#[global_allocator]
|
||||
pub static GLOBAL_HEAP: KernelAllocator = KernelAllocator::empty();
|
||||
|
@ -3,7 +3,8 @@
|
||||
slice_ptr_get,
|
||||
step_trait,
|
||||
const_trait_impl,
|
||||
maybe_uninit_as_bytes
|
||||
maybe_uninit_as_bytes,
|
||||
negative_impls
|
||||
)]
|
||||
#![no_std]
|
||||
|
||||
@ -55,7 +56,7 @@ pub const L2_PAGE_SIZE: usize = 1 << 21;
|
||||
#[cfg(target_arch = "x86")]
|
||||
pub const L2_PAGE_SIZE: usize = 1 << 22;
|
||||
|
||||
pub trait PageProvider {
|
||||
pub trait PageProvider: Send + Sync {
|
||||
fn get_page(&self, offset: u64) -> Result<PhysicalAddress, Error>;
|
||||
fn release_page(&self, offset: u64, phys: PhysicalAddress) -> Result<(), Error>;
|
||||
fn clone_page(
|
||||
@ -80,7 +81,7 @@ impl<T> PageBox<T> {
|
||||
fn alloc_slice(count: usize, zeroed: bool) -> Result<(PhysicalAddress, usize), Error> {
|
||||
// TODO hardcoded page sizes
|
||||
let layout = Layout::array::<T>(count).unwrap();
|
||||
let page_count = (layout.size() + L3_PAGE_SIZE - 1) / L3_PAGE_SIZE;
|
||||
let page_count = layout.size().div_ceil(L3_PAGE_SIZE);
|
||||
let base = phys::alloc_pages_contiguous(page_count)?;
|
||||
if zeroed {
|
||||
let ptr = base.virtualize() as *mut u8;
|
||||
@ -92,7 +93,7 @@ impl<T> PageBox<T> {
|
||||
|
||||
#[inline]
|
||||
fn alloc() -> Result<(PhysicalAddress, usize), Error> {
|
||||
let page_count = (size_of::<T>() + L3_PAGE_SIZE - 1) / L3_PAGE_SIZE;
|
||||
let page_count = size_of::<T>().div_ceil(L3_PAGE_SIZE);
|
||||
Ok((phys::alloc_pages_contiguous(page_count)?, page_count))
|
||||
}
|
||||
|
||||
|
@ -48,7 +48,7 @@ impl PhysicalMemoryManager {
|
||||
offset: usize,
|
||||
page_count: usize,
|
||||
) -> PhysicalMemoryManager {
|
||||
let bitmap_len = (page_count + (BITMAP_WORD_SIZE - 1)) / BITMAP_WORD_SIZE;
|
||||
let bitmap_len = page_count.div_ceil(BITMAP_WORD_SIZE);
|
||||
let mut bitmap = PhysicalRefMut::<'static, _, KernelTableManagerImpl>::map_slice(
|
||||
bitmap_phys_base,
|
||||
bitmap_len,
|
||||
|
@ -183,7 +183,7 @@ pub unsafe fn init_from_iter<
|
||||
|
||||
let total_count = (phys_end - phys_start) / L3_PAGE_SIZE;
|
||||
let page_bitmap_size = (total_count + BITMAP_WORD_SIZE - 1) / (BITMAP_WORD_SIZE / 8);
|
||||
let page_bitmap_page_count = (page_bitmap_size + L3_PAGE_SIZE - 1) / L3_PAGE_SIZE;
|
||||
let page_bitmap_page_count = page_bitmap_size.div_ceil(L3_PAGE_SIZE);
|
||||
|
||||
let page_bitmap_phys_base = find_contiguous_region(it.clone(), page_bitmap_page_count).unwrap();
|
||||
|
||||
|
@ -28,7 +28,7 @@ pub struct FlushIter<'a, K, V, H: BuildHasher> {
|
||||
cache: &'a mut LruCache<K, V, H>,
|
||||
}
|
||||
|
||||
impl<'a, K, V, H: BuildHasher> Iterator for FlushIter<'a, K, V, H> {
|
||||
impl<K, V, H: BuildHasher> Iterator for FlushIter<'_, K, V, H> {
|
||||
type Item = (K, V);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
|
@ -17,7 +17,7 @@ pub type SpinlockGuard<'a, T> = kernel_arch::sync::SpinlockGuard<'a, Architectur
|
||||
|
||||
static LOCK_HACK: AtomicBool = AtomicBool::new(false);
|
||||
|
||||
pub trait LockMethod<'q>: Sync {
|
||||
pub trait LockMethod<'q> {
|
||||
type Guard<'a>
|
||||
where
|
||||
'a: 'q,
|
||||
|
@ -142,10 +142,10 @@ impl<T> IrqSafeRwLock<T> {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T> Sync for IrqSafeRwLock<T> {}
|
||||
unsafe impl<T> Send for IrqSafeRwLock<T> {}
|
||||
unsafe impl<T: Send> Sync for IrqSafeRwLock<T> {}
|
||||
unsafe impl<T: Send> Send for IrqSafeRwLock<T> {}
|
||||
|
||||
impl<'a, T> Deref for IrqSafeRwLockReadGuard<'a, T> {
|
||||
impl<T> Deref for IrqSafeRwLockReadGuard<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
@ -153,13 +153,13 @@ impl<'a, T> Deref for IrqSafeRwLockReadGuard<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Drop for IrqSafeRwLockReadGuard<'a, T> {
|
||||
impl<T> Drop for IrqSafeRwLockReadGuard<'_, T> {
|
||||
fn drop(&mut self) {
|
||||
unsafe { self.lock.release_read() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> IrqSafeRwLockReadGuard<'a, T> {
|
||||
impl<T> IrqSafeRwLockReadGuard<'_, T> {
|
||||
pub fn get(guard: &Self) -> *const T {
|
||||
guard.lock.value.get()
|
||||
}
|
||||
@ -184,7 +184,7 @@ impl<'a, T> IrqSafeRwLockWriteGuard<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Deref for IrqSafeRwLockWriteGuard<'a, T> {
|
||||
impl<T> Deref for IrqSafeRwLockWriteGuard<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
@ -192,13 +192,13 @@ impl<'a, T> Deref for IrqSafeRwLockWriteGuard<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> DerefMut for IrqSafeRwLockWriteGuard<'a, T> {
|
||||
impl<T> DerefMut for IrqSafeRwLockWriteGuard<'_, T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
unsafe { &mut *self.lock.value.get() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Drop for IrqSafeRwLockWriteGuard<'a, T> {
|
||||
impl<T> Drop for IrqSafeRwLockWriteGuard<'_, T> {
|
||||
fn drop(&mut self) {
|
||||
unsafe { self.lock.release_write() }
|
||||
}
|
||||
|
@ -65,14 +65,14 @@ impl DerefMut for Cpu {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> LocalCpu<'a> {
|
||||
impl LocalCpu<'_> {
|
||||
/// Converts the local CPU handle into its IRQ guard
|
||||
pub fn into_guard(self) -> IrqGuard {
|
||||
self.0.into_guard()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Deref for LocalCpu<'a> {
|
||||
impl Deref for LocalCpu<'_> {
|
||||
type Target = CpuImpl<CpuQueue>;
|
||||
|
||||
#[inline]
|
||||
@ -81,7 +81,7 @@ impl<'a> Deref for LocalCpu<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> DerefMut for LocalCpu<'a> {
|
||||
impl DerefMut for LocalCpu<'_> {
|
||||
#[inline]
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.0.deref_mut()
|
||||
|
@ -21,7 +21,8 @@
|
||||
arbitrary_self_types,
|
||||
slice_split_once,
|
||||
arbitrary_self_types_pointers,
|
||||
result_flattening
|
||||
result_flattening,
|
||||
negative_impls
|
||||
)]
|
||||
|
||||
extern crate alloc;
|
||||
|
@ -74,3 +74,6 @@ impl UserspaceMutex {
|
||||
self.queue.wake_all();
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Send for UserspaceMutex {}
|
||||
unsafe impl Sync for UserspaceMutex {}
|
||||
|
@ -384,6 +384,7 @@ impl Process {
|
||||
let mut inner = self.inner.write();
|
||||
match inner.mutexes.entry(address) {
|
||||
btree_map::Entry::Vacant(slot) => {
|
||||
#[allow(clippy::arc_with_non_send_sync)]
|
||||
let mutex = Arc::new(UserspaceMutex::new(&space, address)?);
|
||||
slot.insert(mutex.clone());
|
||||
Ok(mutex)
|
||||
|
@ -128,15 +128,13 @@ pub fn with_timeout<'a, T: 'a, F: Future<Output = T> + Send + 'a>(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn maybe_timeout<'a, T: 'a, F: Future<Output = T> + Send + 'a>(
|
||||
pub async fn maybe_timeout<T, F: Future<Output = T> + Send>(
|
||||
fut: F,
|
||||
timeout: Option<Duration>,
|
||||
) -> impl Future<Output = Result<T, Error>> + Send + 'a {
|
||||
async move {
|
||||
match timeout {
|
||||
Some(timeout) => with_timeout(fut, timeout).await,
|
||||
None => Ok(fut.await),
|
||||
}
|
||||
) -> Result<T, Error> {
|
||||
match timeout {
|
||||
Some(timeout) => with_timeout(fut, timeout).await,
|
||||
None => Ok(fut.await),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,9 @@ pub struct CpuQueue {
|
||||
last_stats_measure: Cell<Option<Duration>>,
|
||||
}
|
||||
|
||||
// TODO sort out Cell<...>
|
||||
unsafe impl Sync for CpuQueue {}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct SchedulerStats {
|
||||
pub idle: AtomicU64,
|
||||
|
@ -39,7 +39,7 @@ pub struct MutexGuard<'a, T> {
|
||||
lock: &'a ThreadedMutexInner,
|
||||
}
|
||||
|
||||
pub struct MappedAsyncMutexGuard<'a, U, T> {
|
||||
pub struct MappedAsyncMutexGuard<'a, U: ?Sized, T> {
|
||||
mutex: &'a AsyncMutex<T>,
|
||||
value: *mut U,
|
||||
}
|
||||
@ -85,10 +85,11 @@ impl<T> AsyncMutex<T> {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T> Sync for AsyncMutex<T> {}
|
||||
unsafe impl<T: Send> Send for AsyncMutex<T> {}
|
||||
unsafe impl<T: Send> Sync for AsyncMutex<T> {}
|
||||
|
||||
unsafe impl<'a, T> Send for AsyncMutexGuard<'a, T> {}
|
||||
unsafe impl<'a, T> Sync for AsyncMutexGuard<'a, T> {}
|
||||
impl<T> !Send for AsyncMutexGuard<'_, T> {}
|
||||
unsafe impl<T: Sync> Sync for AsyncMutexGuard<'_, T> {}
|
||||
|
||||
impl<'a, T> AsyncMutexGuard<'a, T> {
|
||||
pub async fn try_map_guard_async<
|
||||
@ -108,7 +109,7 @@ impl<'a, T> AsyncMutexGuard<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Deref for AsyncMutexGuard<'a, T> {
|
||||
impl<T> Deref for AsyncMutexGuard<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
@ -116,13 +117,13 @@ impl<'a, T> Deref for AsyncMutexGuard<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> DerefMut for AsyncMutexGuard<'a, T> {
|
||||
impl<T> DerefMut for AsyncMutexGuard<'_, T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
unsafe { &mut *self.mutex.get() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Drop for AsyncMutexGuard<'a, T> {
|
||||
impl<T> Drop for AsyncMutexGuard<'_, T> {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
self.mutex.force_unlock();
|
||||
@ -130,10 +131,10 @@ impl<'a, T> Drop for AsyncMutexGuard<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<'a, U, T> Send for MappedAsyncMutexGuard<'a, U, T> {}
|
||||
unsafe impl<'a, U, T> Sync for MappedAsyncMutexGuard<'a, U, T> {}
|
||||
impl<U: ?Sized, T> !Send for MappedAsyncMutexGuard<'_, U, T> {}
|
||||
unsafe impl<U: Sync + ?Sized, T: Sync> Sync for MappedAsyncMutexGuard<'_, U, T> {}
|
||||
|
||||
impl<'a, U, T> Deref for MappedAsyncMutexGuard<'a, U, T> {
|
||||
impl<U: ?Sized, T> Deref for MappedAsyncMutexGuard<'_, U, T> {
|
||||
type Target = U;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
@ -141,13 +142,13 @@ impl<'a, U, T> Deref for MappedAsyncMutexGuard<'a, U, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, U, T> DerefMut for MappedAsyncMutexGuard<'a, U, T> {
|
||||
impl<U: ?Sized, T> DerefMut for MappedAsyncMutexGuard<'_, U, T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
unsafe { &mut *self.value }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, U, T> Drop for MappedAsyncMutexGuard<'a, U, T> {
|
||||
impl<U: ?Sized, T> Drop for MappedAsyncMutexGuard<'_, U, T> {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
self.mutex.force_unlock();
|
||||
@ -179,7 +180,11 @@ impl ThreadedMutexInner {
|
||||
}
|
||||
|
||||
impl<'q> LockMethod<'q> for ThreadedMutexInner {
|
||||
type Guard<'a> = () where 'a: 'q, Self: 'a;
|
||||
type Guard<'a>
|
||||
= ()
|
||||
where
|
||||
'a: 'q,
|
||||
Self: 'a;
|
||||
|
||||
fn lock(&self) -> Result<(), Error> {
|
||||
// Fast-path
|
||||
@ -223,7 +228,11 @@ impl<T> Mutex<T> {
|
||||
}
|
||||
|
||||
impl<'q, T> LockMethod<'q> for Mutex<T> {
|
||||
type Guard<'a> = MutexGuard<'a, T> where 'a: 'q, Self: 'a;
|
||||
type Guard<'a>
|
||||
= MutexGuard<'a, T>
|
||||
where
|
||||
'a: 'q,
|
||||
Self: 'a;
|
||||
|
||||
fn lock(&'q self) -> Result<Self::Guard<'q>, Error> {
|
||||
self.lock.lock()?;
|
||||
@ -238,9 +247,10 @@ impl<'q, T> LockMethod<'q> for Mutex<T> {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T> Sync for Mutex<T> {}
|
||||
unsafe impl<T: Send> Sync for Mutex<T> {}
|
||||
unsafe impl<T: Send> Send for Mutex<T> {}
|
||||
|
||||
impl<'a, T> Deref for MutexGuard<'a, T> {
|
||||
impl<T> Deref for MutexGuard<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
@ -248,13 +258,13 @@ impl<'a, T> Deref for MutexGuard<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> DerefMut for MutexGuard<'a, T> {
|
||||
impl<T> DerefMut for MutexGuard<'_, T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
unsafe { &mut *self.value.get() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Drop for MutexGuard<'a, T> {
|
||||
impl<T> Drop for MutexGuard<'_, T> {
|
||||
fn drop(&mut self) {
|
||||
unsafe { self.lock.release() }
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ impl<'a, D: NgBlockDevice + 'a> NgBlockDeviceWrapper<'a, D> {
|
||||
|
||||
fn lba_range(&self, byte_range: Range<u64>) -> (Range<u64>, usize) {
|
||||
let lba_start = byte_range.start / self.block_size as u64;
|
||||
let lba_end = (byte_range.end + self.block_size as u64 - 1) / self.block_size as u64;
|
||||
let lba_end = byte_range.end.div_ceil(self.block_size as u64);
|
||||
let lba_off = (byte_range.start % self.block_size as u64) as usize;
|
||||
|
||||
(lba_start..lba_end, lba_off)
|
||||
|
@ -1,5 +1,5 @@
|
||||
use core::{
|
||||
pin::Pin,
|
||||
future::poll_fn,
|
||||
sync::atomic::{AtomicU32, Ordering},
|
||||
task::{Context, Poll},
|
||||
};
|
||||
@ -10,7 +10,7 @@ use alloc::{
|
||||
string::String,
|
||||
sync::Arc,
|
||||
};
|
||||
use futures_util::{task::AtomicWaker, Future};
|
||||
use futures_util::task::AtomicWaker;
|
||||
use libk_util::sync::{IrqSafeSpinlock, LockMethod};
|
||||
use yggdrasil_abi::{
|
||||
error::Error,
|
||||
@ -160,33 +160,17 @@ impl Channel {
|
||||
}
|
||||
|
||||
impl Subscription {
|
||||
fn receive_message_async(&self) -> impl Future<Output = Result<Arc<Message>, Error>> + '_ {
|
||||
struct F<'f> {
|
||||
rx: &'f Subscription,
|
||||
}
|
||||
|
||||
impl<'f> Future for F<'f> {
|
||||
type Output = Result<Arc<Message>, Error>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let mut lock = self.rx.queue.lock()?;
|
||||
if let Some(msg) = lock.pop_front() {
|
||||
return Poll::Ready(Ok(msg));
|
||||
}
|
||||
drop(lock);
|
||||
|
||||
self.rx.notify.register(cx.waker());
|
||||
|
||||
let mut lock = self.rx.queue.lock()?;
|
||||
if let Some(msg) = lock.pop_front() {
|
||||
Poll::Ready(Ok(msg))
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
async fn receive_message_async(&self) -> Result<Arc<Message>, Error> {
|
||||
poll_fn(|cx| {
|
||||
let mut lock = self.queue.lock()?;
|
||||
if let Some(msg) = lock.pop_front() {
|
||||
Poll::Ready(Ok(msg))
|
||||
} else {
|
||||
self.notify.register(cx.waker());
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
F { rx: self }
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
fn receive_message_inner(&self) -> Result<Arc<Message>, Error> {
|
||||
|
@ -1,12 +1,10 @@
|
||||
use core::{
|
||||
future::poll_fn,
|
||||
pin::Pin,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use alloc::{sync::Arc, vec, vec::Vec};
|
||||
use futures_util::Future;
|
||||
use libk_util::{
|
||||
sync::{IrqSafeSpinlock, IrqSafeSpinlockGuard},
|
||||
waker::QueueWaker,
|
||||
@ -99,33 +97,23 @@ impl Pipe {
|
||||
self.write_notify.wake_all();
|
||||
}
|
||||
|
||||
pub fn blocking_write(&self, val: u8) -> impl Future<Output = Result<(), Error>> + '_ {
|
||||
struct F<'a> {
|
||||
pipe: &'a Pipe,
|
||||
val: u8,
|
||||
}
|
||||
|
||||
impl<'a> Future for F<'a> {
|
||||
type Output = Result<(), Error>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let mut lock = self.pipe.inner.lock();
|
||||
|
||||
if self.pipe.shutdown.load(Ordering::Acquire) {
|
||||
self.pipe.write_notify.remove(cx.waker());
|
||||
Poll::Ready(Err(Error::ReadOnly))
|
||||
} else if lock.try_write(self.val) {
|
||||
self.pipe.write_notify.remove(cx.waker());
|
||||
self.pipe.read_notify.wake_one();
|
||||
Poll::Ready(Ok(()))
|
||||
} else {
|
||||
self.pipe.write_notify.register(cx.waker());
|
||||
Poll::Pending
|
||||
}
|
||||
pub async fn blocking_write(&self, val: u8) -> Result<(), Error> {
|
||||
poll_fn(|cx| {
|
||||
let mut lock = self.inner.lock();
|
||||
if self.shutdown.load(Ordering::Acquire) {
|
||||
self.write_notify.remove(cx.waker());
|
||||
// TODO broken pipe
|
||||
Poll::Ready(Err(Error::ReadOnly))
|
||||
} else if lock.try_write(val) {
|
||||
self.write_notify.remove(cx.waker());
|
||||
self.read_notify.wake_one();
|
||||
Poll::Ready(Ok(()))
|
||||
} else {
|
||||
self.write_notify.register(cx.waker());
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
F { pipe: self, val }
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
fn poll_read_end(
|
||||
|
@ -45,7 +45,7 @@ impl Read for TimerFile {
|
||||
todo!()
|
||||
} else {
|
||||
let inner = self.inner.lock()?;
|
||||
if let Some(_) = inner.as_ref().and_then(SleepFuture::remaining) {
|
||||
if inner.as_ref().and_then(SleepFuture::remaining).is_some() {
|
||||
return Err(Error::WouldBlock);
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,7 @@ use crate::{
|
||||
unsafe fn pre_init_mmu() {
|
||||
if ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran4::NotSupported) {
|
||||
// TODO early panic
|
||||
loop {}
|
||||
ArchitectureImpl::halt();
|
||||
}
|
||||
|
||||
MAIR_EL1.write(
|
||||
|
@ -44,7 +44,7 @@ pub struct MultibootModuleEntry {
|
||||
_pad: u32,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for MultibootMemoryMapIter<'a> {
|
||||
impl Iterator for MultibootMemoryMapIter<'_> {
|
||||
type Item = PhysicalMemoryRegion;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
|
@ -150,3 +150,5 @@ fn convert_color(c: ColorAttribute) -> u8 {
|
||||
ColorAttribute::White => 7,
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Sync for TextFramebuffer {}
|
||||
|
@ -42,7 +42,7 @@ pub enum LogLevel {
|
||||
}
|
||||
|
||||
/// Generic interface for debug output
|
||||
pub trait DebugSink {
|
||||
pub trait DebugSink: Sync {
|
||||
/// Sends a single byte to the output
|
||||
fn putc(&self, c: u8) -> Result<(), Error>;
|
||||
|
||||
|
@ -195,7 +195,7 @@ impl ConsoleChar {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> RowIter<'a> {
|
||||
impl RowIter<'_> {
|
||||
/// Returns the next dirty row
|
||||
pub fn next_dirty(&mut self) -> Option<(u32, &[ConsoleChar])> {
|
||||
loop {
|
||||
|
@ -4,7 +4,6 @@ use abi::error::Error;
|
||||
use alloc::borrow::ToOwned;
|
||||
use kernel_fs::devfs;
|
||||
use libk::{
|
||||
module::load_kernel_symbol_table,
|
||||
random,
|
||||
task::{binary::LoadOptions, process::Process, runtime, thread::Thread},
|
||||
vfs::{impls::fn_symlink, IoContext, NodeRef},
|
||||
@ -73,6 +72,8 @@ pub fn kinit() -> Result<(), Error> {
|
||||
// attempt to load a module?
|
||||
#[cfg(not(target_arch = "aarch64"))]
|
||||
{
|
||||
use libk::module::load_kernel_symbol_table;
|
||||
|
||||
load_kernel_symbol_table(&mut ioctx, "/kernel.sym")?;
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,6 @@ extern crate alloc;
|
||||
extern crate rustc_std_alloc as alloc;
|
||||
|
||||
pub(crate) mod macros;
|
||||
pub mod pass;
|
||||
pub mod util;
|
||||
|
||||
mod generated {
|
||||
|
@ -1,116 +0,0 @@
|
||||
//! Interfaces for passing stuff between the kernel and the userspace
|
||||
|
||||
use core::ptr::NonNull;
|
||||
|
||||
use crate::error::Error;
|
||||
|
||||
/// Interface for a "writer", producing objects "owned" by the userspace.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The trait is marked unsafe as it is the implementor's concern that the produced pointers can
|
||||
/// actually hold the requested values and are properly aligned for that.
|
||||
pub unsafe trait Placer {
|
||||
/// Converts a kernel-space reference into a pointer that can be safely dereferenced in
|
||||
/// userspace
|
||||
fn place_ref<T: Place>(&mut self, r: &T) -> Result<NonNull<T::Output>, Error>;
|
||||
/// Converts a kernel-space slice reference into a slice pointer, elements of which are placed
|
||||
/// in safely-accessible memory by userspace
|
||||
fn place_slice<T: Place>(&mut self, r: &[T]) -> Result<NonNull<[T::Output]>, Error>;
|
||||
}
|
||||
|
||||
/// Interface for "transferring ownership" of an object to userspace.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The trait is marked unsafe because it may produce arbitrary pointer, so it's the implementor's
|
||||
/// concern that they are actually correct.
|
||||
pub unsafe trait Place {
|
||||
/// Conversion result type, must be safely accessible from userspace
|
||||
type Output: 'static;
|
||||
|
||||
/// Creates a userspace-reachable variant of the internal value
|
||||
fn place<P: Placer>(&self, placer: &mut P) -> Result<Self::Output, Error>;
|
||||
|
||||
/// Converts the internal value using [Place::place] and returns a safe userspace reference
|
||||
fn place_ref<P: Placer>(&self, placer: &mut P) -> Result<&'static Self::Output, Error>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
let ptr = placer.place_ref(self)?;
|
||||
unsafe { Ok(ptr.as_ref()) }
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_primitive {
|
||||
($($ty:ty),+) => {
|
||||
$(
|
||||
unsafe impl $crate::pass::Place for $ty {
|
||||
type Output = $ty;
|
||||
|
||||
fn place<P: $crate::pass::Placer>(&self, _placer: &mut P) -> Result<$ty, Error> {
|
||||
Ok(*self)
|
||||
}
|
||||
}
|
||||
)+
|
||||
};
|
||||
}
|
||||
|
||||
impl_primitive!(u8, u16, u32, u64, i8, i16, i32, i64, bool, char);
|
||||
|
||||
unsafe impl<'s> Place for &'s str {
|
||||
type Output = &'static str;
|
||||
|
||||
fn place<P: Placer>(&self, placer: &mut P) -> Result<Self::Output, Error> {
|
||||
let data = placer.place_slice(self.as_bytes())?;
|
||||
// Safety: safe, object passed in was already a proper &str
|
||||
Ok(unsafe { core::str::from_utf8_unchecked(data.as_ref()) })
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: Place> Place for [T] {
|
||||
type Output = &'static [T::Output];
|
||||
|
||||
fn place<P: Placer>(&self, placer: &mut P) -> Result<Self::Output, Error> {
|
||||
let data = placer.place_slice(self)?;
|
||||
Ok(unsafe { data.as_ref() })
|
||||
}
|
||||
}
|
||||
|
||||
/// Automatically implements the [Place] interface for a struct, properly handling any nested
|
||||
/// references
|
||||
#[macro_export]
|
||||
macro_rules! impl_place_lifetime_struct {
|
||||
(
|
||||
$( #[ $struct_meta:meta ] )*
|
||||
$struct_vis:vis struct $struct_name:ident<$struct_lifetime:lifetime> {
|
||||
$(
|
||||
$( #[ $field_meta:meta ] )*
|
||||
$field_vis:vis $field_name:ident: $field_ty:ty,
|
||||
)*
|
||||
}
|
||||
) => {
|
||||
$( #[$struct_meta] )*
|
||||
$struct_vis struct $struct_name<$struct_lifetime> {
|
||||
$(
|
||||
$( #[$field_meta] )*
|
||||
$field_vis $field_name: $field_ty
|
||||
),*
|
||||
}
|
||||
|
||||
unsafe impl<$struct_lifetime> $crate::pass::Place for $struct_name<$struct_lifetime> {
|
||||
type Output = $struct_name<'static>;
|
||||
|
||||
fn place<P: $crate::pass::Placer>(&self, placer: &mut P)
|
||||
-> Result<Self::Output, $crate::error::Error> {
|
||||
$(
|
||||
let $field_name = self.$field_name.place(placer)?;
|
||||
)*
|
||||
|
||||
Ok($struct_name {
|
||||
$($field_name),*
|
||||
})
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
@ -124,7 +124,7 @@ impl<P: PageProvider> BucketAllocator<P> {
|
||||
size if size <= 256 => self.buckets_256.allocate(),
|
||||
size if size <= 512 => self.buckets_512.allocate(),
|
||||
size if size <= 1024 => self.buckets_1024.allocate(),
|
||||
size => P::map_pages((size + sys::PAGE_SIZE - 1) / sys::PAGE_SIZE),
|
||||
size => P::map_pages(size.div_ceil(sys::PAGE_SIZE)),
|
||||
}
|
||||
}
|
||||
|
||||
@ -143,7 +143,7 @@ impl<P: PageProvider> BucketAllocator<P> {
|
||||
size if size <= 1024 => self.buckets_1024.free(ptr),
|
||||
size => {
|
||||
assert_eq!(usize::from(ptr.addr()) % sys::PAGE_SIZE, 0);
|
||||
P::unmap_pages(ptr, (size + sys::PAGE_SIZE - 1) / sys::PAGE_SIZE);
|
||||
P::unmap_pages(ptr, size.div_ceil(sys::PAGE_SIZE));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2,12 +2,12 @@ use core::ptr::NonNull;
|
||||
|
||||
pub const PAGE_SIZE: usize = 0x1000;
|
||||
|
||||
pub trait PageProvider {
|
||||
pub trait PageProvider: Sync {
|
||||
fn map_pages(count: usize) -> Option<NonNull<u8>>;
|
||||
fn unmap_pages(address: NonNull<u8>, count: usize);
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "dep-of-kernel"))]
|
||||
#[cfg(any(not(feature = "dep-of-kernel"), rust_analyzer))]
|
||||
pub struct OsPageProvider;
|
||||
|
||||
#[cfg(any(all(unix, not(feature = "dep-of-kernel")), rust_analyzer))]
|
||||
|
@ -90,11 +90,11 @@ pub fn connect_tcp(
|
||||
remote: SocketAddr,
|
||||
timeout: Option<Duration>,
|
||||
) -> Result<(SocketAddr, RawFd), Error> {
|
||||
connect_inner(&mut SocketConnect::Tcp(remote.into(), timeout))
|
||||
connect_inner(&mut SocketConnect::Tcp(remote, timeout))
|
||||
}
|
||||
|
||||
/// "Connect" an UDP socket
|
||||
pub fn connect_udp(socket_fd: RawFd, remote: SocketAddr) -> Result<(), Error> {
|
||||
connect_inner(&mut SocketConnect::Udp(socket_fd, remote.into()))?;
|
||||
connect_inner(&mut SocketConnect::Udp(socket_fd, remote))?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -59,8 +59,7 @@ fn terminate_by_signal(signal: Signal) -> ! {
|
||||
/// crate.
|
||||
pub unsafe fn set_handler(signal: Signal, handler: SignalHandler) -> SignalHandler {
|
||||
let entry = &mut TABLE[signal.into_raw() as usize];
|
||||
let old_handler = core::mem::replace(entry, handler);
|
||||
old_handler
|
||||
core::mem::replace(entry, handler)
|
||||
}
|
||||
|
||||
/// Sets the stack that will be used to handle signals **on this thread**.
|
||||
|
@ -10,6 +10,11 @@ pub fn get_thread_pointer() -> usize {
|
||||
tp
|
||||
}
|
||||
|
||||
/// Writes `value` into `tpidr_el0` register.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Usual pointer rules apply.
|
||||
pub unsafe fn set_thread_pointer(value: usize) -> Result<(), Error> {
|
||||
unsafe {
|
||||
core::arch::asm!("msr tpidr_el0, {0}", in(reg) value);
|
||||
|
@ -10,6 +10,12 @@ pub fn get_thread_pointer() -> usize {
|
||||
tp
|
||||
}
|
||||
|
||||
/// Sets the %gs register base to `value` for this thread.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// `value` must hold an address to a structure, first element of which is a pointer to itself.
|
||||
/// Usual pointer safety requirements apply.
|
||||
pub unsafe fn set_thread_pointer(value: usize) -> Result<(), Error> {
|
||||
crate::sys::set_thread_option(&ThreadOption::ThreadPointer(value))
|
||||
}
|
||||
|
@ -131,7 +131,7 @@ impl Dtv {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_key(list: &Vec<*mut c_void>, key: usize) -> *mut c_void {
|
||||
fn get_key(list: &[*mut c_void], key: usize) -> *mut c_void {
|
||||
if key == 0 || key > list.len() {
|
||||
panic!("Out-of-bounds TLS key: {key}");
|
||||
}
|
||||
@ -139,7 +139,7 @@ impl Dtv {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn set_key(list: &mut Vec<*mut c_void>, key: usize, value: *mut c_void) {
|
||||
fn set_key(list: &mut [*mut c_void], key: usize, value: *mut c_void) {
|
||||
if key == 0 || key > list.len() {
|
||||
panic!("Out-of-bounds TLS key: {key}")
|
||||
}
|
||||
@ -169,8 +169,7 @@ impl Dtv {
|
||||
/// Allocates a new thread-specific key in the DTV, filling it with a NULL value.
|
||||
pub fn new_specific(&mut self) -> usize {
|
||||
self.specific.push(null_mut());
|
||||
let key = self.specific.len();
|
||||
key
|
||||
self.specific.len()
|
||||
}
|
||||
|
||||
/// Returns a value associated with a given key.
|
||||
@ -225,7 +224,7 @@ pub fn init_tls(image: Option<&TlsImage>, force: bool) -> Result<(), Error> {
|
||||
if force || !image.already_initialized {
|
||||
let (base, tp) = clone_tls(image)?;
|
||||
unsafe { set_thread_pointer(tp) }?;
|
||||
setup_dtv(&image, base)?;
|
||||
setup_dtv(image, base)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -31,7 +31,7 @@ pub fn clone_tls(image: &TlsImage) -> Result<(usize, usize), Error> {
|
||||
let align = image.align;
|
||||
|
||||
let aligned_size = (image.full_size + tcb_size + align - 1) & !(align - 1);
|
||||
let page_aligned_size = (aligned_size + 0xFFf) & !0xFFF;
|
||||
let page_aligned_size = (aligned_size + 0xFFF) & !0xFFF;
|
||||
|
||||
let base = unsafe {
|
||||
crate::sys::map_memory(
|
||||
|
@ -10,6 +10,12 @@ pub fn get_thread_pointer() -> usize {
|
||||
tp
|
||||
}
|
||||
|
||||
/// Sets the %fs register base to `value` for this thread.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// `value` must hold an address to a structure, first element of which is a pointer to itself.
|
||||
/// Usual pointer safety requirements apply.
|
||||
pub unsafe fn set_thread_pointer(value: usize) -> Result<(), Error> {
|
||||
crate::sys::set_thread_option(&ThreadOption::ThreadPointer(value))
|
||||
}
|
||||
|
@ -1,5 +1,4 @@
|
||||
/// i686 implementation of a system call macro
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! syscall {
|
||||
($num:expr $(,)?) => {{
|
||||
|
@ -1,12 +1,12 @@
|
||||
//! System call implementations
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
#[cfg(any(target_arch = "aarch64", rust_analyzer))]
|
||||
#[macro_use]
|
||||
mod aarch64;
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
#[cfg(any(target_arch = "x86_64", rust_analyzer))]
|
||||
#[macro_use]
|
||||
mod x86_64;
|
||||
#[cfg(target_arch = "x86")]
|
||||
#[cfg(any(target_arch = "x86", rust_analyzer))]
|
||||
#[macro_use]
|
||||
mod i686;
|
||||
|
||||
|
@ -19,7 +19,7 @@ pub struct Display<'a> {
|
||||
|
||||
pub struct Point<T>(pub T, pub T);
|
||||
|
||||
impl<'a> Display<'a> {
|
||||
impl Display<'_> {
|
||||
pub fn open() -> Result<Self, Error> {
|
||||
let file = OpenOptions::new().open("/dev/fb0")?;
|
||||
|
||||
|
@ -141,7 +141,7 @@ impl Row {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'd> Server<'a, 'd> {
|
||||
impl<'a> Server<'a, '_> {
|
||||
pub fn new() -> Result<Self, Error> {
|
||||
let mut poll = PollChannel::new()?;
|
||||
|
||||
|
@ -52,9 +52,9 @@ pub fn generate_rsa(
|
||||
bits: usize,
|
||||
) -> Result<(), Error> {
|
||||
if rsa.format == KeyFormat::Pkcs1Der && options.public_key.is_none() {
|
||||
return Err(Error::IncompatibleOptions(format!(
|
||||
"DER output format requires -o/--public-key"
|
||||
)));
|
||||
return Err(Error::IncompatibleOptions(
|
||||
"DER output format requires -o/--public-key".into(),
|
||||
));
|
||||
}
|
||||
|
||||
println!("Generating a new RSA keypair, bits={}", bits);
|
||||
|
@ -2,7 +2,7 @@
|
||||
// Has to be naked: need to prevent the function from clobbering any registers besides x0
|
||||
#[cfg(any(target_arch = "aarch64", rust_analyzer))]
|
||||
#[naked]
|
||||
pub unsafe extern "C" fn tlsdesc_resolve_static(argument: *const usize) -> usize {
|
||||
pub(crate) unsafe extern "C" fn tlsdesc_resolve_static(argument: *const usize) -> usize {
|
||||
// x0 -- pointer to tlsdesc struct:
|
||||
// [0]: this function pointer
|
||||
// [1]: static offset to return
|
||||
|
@ -1,7 +1,10 @@
|
||||
use bytemuck::Pod;
|
||||
use yggdrasil_rt::{mem::MappingFlags, process::{auxv, AuxValue, ProgramArgumentInner}};
|
||||
use yggdrasil_rt::{
|
||||
mem::MappingFlags,
|
||||
process::{auxv, AuxValue},
|
||||
};
|
||||
|
||||
use crate::{error::Error, mapping::Mapping, thread_local::{TlsImage, TlsLayout}};
|
||||
use crate::{error::Error, mapping::Mapping};
|
||||
|
||||
struct ArgPlacer<'a> {
|
||||
buffer: &'a mut [u8],
|
||||
@ -70,7 +73,6 @@ impl<'a> ArgPlacer<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn build_argument(args: &[String], auxv: &[AuxValue]) -> Result<usize, Error> {
|
||||
let mut buffer = Mapping::new(0x1000, MappingFlags::WRITE)?;
|
||||
let arg_base = buffer.as_ptr().addr();
|
||||
@ -85,7 +87,7 @@ pub fn build_argument(args: &[String], auxv: &[AuxValue]) -> Result<usize, Error
|
||||
// TODO env
|
||||
let argv = placer.put_ptr_array(&argv)? + arg_base;
|
||||
let envp = placer.put_ptr_array(&[])? + arg_base;
|
||||
let auxv = placer.put_aux_array(&auxv)? + arg_base;
|
||||
let auxv = placer.put_aux_array(auxv)? + arg_base;
|
||||
|
||||
let argument = placer.position + arg_base;
|
||||
|
||||
|
@ -1,4 +1,12 @@
|
||||
#![feature(yggdrasil_os, never_type, map_try_insert, slice_ptr_get, iter_chain, naked_functions)]
|
||||
#![feature(
|
||||
yggdrasil_os,
|
||||
never_type,
|
||||
map_try_insert,
|
||||
slice_ptr_get,
|
||||
iter_chain,
|
||||
naked_functions
|
||||
)]
|
||||
#![allow(clippy::new_without_default, clippy::missing_transmute_annotations)]
|
||||
|
||||
use std::process::ExitCode;
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
use std::{
|
||||
mem, ops::{Deref, DerefMut, Range}, ptr::NonNull
|
||||
mem,
|
||||
ops::{Deref, DerefMut, Range},
|
||||
ptr::NonNull,
|
||||
};
|
||||
|
||||
use yggdrasil_rt::mem::{MappingFlags, MappingSource};
|
||||
@ -13,8 +15,9 @@ pub struct Mapping {
|
||||
impl Mapping {
|
||||
pub fn new(size: usize, flags: MappingFlags) -> Result<Mapping, Error> {
|
||||
let size = (size + 0xFFF) & !0xFFF;
|
||||
let base = unsafe { yggdrasil_rt::sys::map_memory(None, size, flags, &MappingSource::Anonymous) }
|
||||
.map_err(|e| Error::MemoryMap(e))?;
|
||||
let base =
|
||||
unsafe { yggdrasil_rt::sys::map_memory(None, size, flags, &MappingSource::Anonymous) }
|
||||
.map_err(Error::MemoryMap)?;
|
||||
let base_ptr =
|
||||
unsafe { NonNull::new_unchecked(core::ptr::with_exposed_provenance_mut(base)) };
|
||||
let data = NonNull::slice_from_raw_parts(base_ptr, size);
|
||||
|
@ -14,10 +14,9 @@ use elf::{
|
||||
DT_INIT_ARRAYSZ, DT_NEEDED, DT_PREINIT_ARRAY, DT_PREINIT_ARRAYSZ, ET_DYN, ET_EXEC,
|
||||
PT_DYNAMIC, PT_GNU_EH_FRAME, PT_GNU_RELRO, PT_GNU_STACK, PT_INTERP, PT_LOAD, PT_NOTE,
|
||||
PT_NULL, PT_PHDR, PT_TLS, SHN_UNDEF, SHT_REL, SHT_RELA, STB_GLOBAL, STB_LOCAL, STB_WEAK,
|
||||
STT_TLS,
|
||||
},
|
||||
endian::AnyEndian,
|
||||
parse::{ParseAt, ParsingIterator},
|
||||
parse::ParsingIterator,
|
||||
segment::ProgramHeader,
|
||||
symbol::Symbol,
|
||||
ElfStream,
|
||||
@ -27,7 +26,7 @@ use yggdrasil_rt::mem::MappingFlags;
|
||||
use crate::{
|
||||
error::Error,
|
||||
mapping::Mapping,
|
||||
relocation::{Relocation, RelocationValue, SymbolRelocation},
|
||||
relocation::{RelocationValue, SymbolRelocation},
|
||||
state::{ExportedNormalSymbol, ExportedSymbol, ExportedTlsSymbol, State},
|
||||
};
|
||||
|
||||
@ -154,7 +153,7 @@ impl Object {
|
||||
if tls_segments.len() > 1 {
|
||||
todo!("Support objects with more than one TLS segment");
|
||||
}
|
||||
let tls = tls_segments.get(0).map(|s| **s);
|
||||
let tls = tls_segments.first().map(|s| **s);
|
||||
|
||||
Ok(Self {
|
||||
path,
|
||||
@ -331,12 +330,20 @@ impl Object {
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// Only allowed to be called once. The object must be loaded and relocated before calling
|
||||
/// this function.
|
||||
pub unsafe fn call_early_constructors(&mut self) {
|
||||
if let Some(pre_init_array) = self.pre_init_array.as_ref() {
|
||||
Self::call_constructor_list(pre_init_array.clone());
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// Only allowed to be called once. The object must be loaded and relocated before calling
|
||||
/// this function.
|
||||
pub unsafe fn call_constructors(&mut self) {
|
||||
if let Some(init_array) = self.init_array.as_ref() {
|
||||
Self::call_constructor_list(init_array.clone());
|
||||
|
@ -32,7 +32,8 @@ fn make_tlsdesc_relocation(
|
||||
panic!("TLSDESC relocation against unknown module_id={module_id}, offset={symbol_offset}");
|
||||
};
|
||||
|
||||
RelaValue::DQWord(builtins::tlsdesc_resolve_static as i64, offset as i64)
|
||||
let resolver = builtins::tlsdesc_resolve_static as usize;
|
||||
RelaValue::DQWord(resolver as i64, offset as i64)
|
||||
}
|
||||
|
||||
impl Relocation for Rel {
|
||||
@ -59,8 +60,8 @@ impl Relocation for Rela {
|
||||
symbol: &ResolvedSymbol,
|
||||
load_base: usize,
|
||||
) -> Result<Option<Self::Value>, Error> {
|
||||
match symbol {
|
||||
&ResolvedSymbol::Tls(tls) => match self.r_type {
|
||||
match *symbol {
|
||||
ResolvedSymbol::Tls(tls) => match self.r_type {
|
||||
// See make_tlsdesc_relocation()
|
||||
R_AARCH64_TLSDESC => Ok(Some(make_tlsdesc_relocation(
|
||||
false,
|
||||
@ -76,7 +77,7 @@ impl Relocation for Rela {
|
||||
}
|
||||
_ => todo!("Unsupported relocation against TLS symbol: {}", self.r_type),
|
||||
},
|
||||
&ResolvedSymbol::Null(object_id) => match self.r_type {
|
||||
ResolvedSymbol::Null(object_id) => match self.r_type {
|
||||
// See make_tlsdesc_relocation()
|
||||
R_AARCH64_TLSDESC => Ok(Some(make_tlsdesc_relocation(false, state, object_id, 0))),
|
||||
// B + A
|
||||
@ -87,7 +88,7 @@ impl Relocation for Rela {
|
||||
),
|
||||
},
|
||||
_ => {
|
||||
let s = symbol.value() as i64;
|
||||
let s = symbol.value();
|
||||
if s == 0 {
|
||||
todo!()
|
||||
}
|
||||
|
@ -1,5 +1,3 @@
|
||||
use std::path::Path;
|
||||
|
||||
use elf::{
|
||||
abi::{
|
||||
R_X86_64_64, R_X86_64_DTPMOD64, R_X86_64_DTPOFF64, R_X86_64_GLOB_DAT, R_X86_64_JUMP_SLOT,
|
||||
@ -8,11 +6,7 @@ use elf::{
|
||||
relocation::{Rel, Rela},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
error::Error,
|
||||
object::{DynamicSymbol, ResolvedSymbol},
|
||||
state::State,
|
||||
};
|
||||
use crate::{error::Error, object::ResolvedSymbol, state::State};
|
||||
|
||||
use super::{RelValue, RelaValue, Relocation};
|
||||
|
||||
@ -56,7 +50,7 @@ impl Relocation for Rela {
|
||||
Ok(Some(RelaValue::QWord(offset_from_tp)))
|
||||
}
|
||||
|
||||
_ => todo!("Unsupported relocation against TLS symbol: {}", self.r_type)
|
||||
_ => todo!("Unsupported relocation against TLS symbol: {}", self.r_type),
|
||||
},
|
||||
&ResolvedSymbol::Null(object_id) => match self.r_type {
|
||||
// TLS module ID
|
||||
@ -64,10 +58,13 @@ impl Relocation for Rela {
|
||||
// B + A
|
||||
R_X86_64_RELATIVE => Ok(Some(RelaValue::QWord(load_base as i64 + self.r_addend))),
|
||||
|
||||
_ => todo!("Unsupported relocation against NULL symbol: {}", self.r_type),
|
||||
_ => todo!(
|
||||
"Unsupported relocation against NULL symbol: {}",
|
||||
self.r_type
|
||||
),
|
||||
},
|
||||
_ => {
|
||||
let s = symbol.value() as i64;
|
||||
let s = symbol.value();
|
||||
if s == 0 {
|
||||
todo!()
|
||||
}
|
||||
|
@ -158,7 +158,7 @@ impl State {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn undefined(&mut self, source: impl AsRef<Path>, sym: &Rc<str>) {
|
||||
pub fn undefined(&mut self, _source: impl AsRef<Path>, _sym: &Rc<str>) {
|
||||
todo!()
|
||||
// let source = source.as_ref();
|
||||
// let list = if let Some(list) = self.undefined_references.get_mut(source) {
|
||||
|
@ -1,7 +1,5 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use elf::segment::ProgramHeader;
|
||||
|
||||
use crate::object::Object;
|
||||
|
||||
use super::{TlsLayout, TlsLayoutBuilder, TlsSegment};
|
||||
@ -35,7 +33,6 @@ impl TlsLayoutBuilder for TlsLayoutImpl {
|
||||
if let Some(tls) = root.tls.as_ref() {
|
||||
let size = tls.p_memsz as usize;
|
||||
let aligned_size = (size + align - 1) & !(align - 1);
|
||||
let pad = aligned_size - size;
|
||||
|
||||
// module_id, offset from tp, full size
|
||||
modules.push((0, size, aligned_size));
|
||||
|
@ -48,7 +48,7 @@ pub struct Window<'a> {
|
||||
on_focus_changed: Box<dyn OnFocusChanged>,
|
||||
}
|
||||
|
||||
impl<'a> Window<'a> {
|
||||
impl Window<'_> {
|
||||
pub fn new(application: &Application) -> Result<Self, Error> {
|
||||
let mut connection = application.connection.lock().unwrap();
|
||||
|
||||
|
@ -10,10 +10,16 @@ use std::{
|
||||
pub struct RawMode(TerminalOptions);
|
||||
|
||||
impl RawMode {
|
||||
/// # Safety
|
||||
///
|
||||
/// May leave the terminal in broken state, unsafe.
|
||||
pub unsafe fn enter(stdin: &Stdin) -> io::Result<Self> {
|
||||
update_terminal_options(stdin, |_| TerminalOptions::raw_input()).map(RawMode)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// May leave the terminal in broken state, unsafe.
|
||||
pub unsafe fn leave(&self, stdin: &Stdin) {
|
||||
update_terminal_options(stdin, |_| self.0).ok();
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ impl ArcWake for Task {
|
||||
}
|
||||
|
||||
thread_local! {
|
||||
static RUNTIME: OnceCell<Arc<RuntimeInner>> = OnceCell::new();
|
||||
static RUNTIME: OnceCell<Arc<RuntimeInner>> = const { OnceCell::new() };
|
||||
}
|
||||
|
||||
impl RuntimeInner {
|
||||
|
@ -51,7 +51,7 @@ fn generate_header(config_path: impl AsRef<Path>, header_output: impl AsRef<Path
|
||||
config
|
||||
.export
|
||||
.rename
|
||||
.extend(RENAMES.into_iter().map(|&(x, y)| (x.into(), y.into())));
|
||||
.extend(RENAMES.iter().map(|&(x, y)| (x.into(), y.into())));
|
||||
config.cpp_compat = true;
|
||||
|
||||
cbindgen::Builder::new()
|
||||
@ -188,9 +188,8 @@ fn main() {
|
||||
|
||||
fs::read_dir("src/headers")
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.filter_map(Result::ok)
|
||||
.filter(|d| include_dir(d))
|
||||
.filter(include_dir)
|
||||
.map(|d| d.path().as_path().join("cbindgen.toml"))
|
||||
.filter(|p| p.exists())
|
||||
.for_each(|p| {
|
||||
|
@ -7,14 +7,17 @@ use core::{
|
||||
};
|
||||
|
||||
use alloc::{borrow::ToOwned, ffi::CString, vec::Vec};
|
||||
use yggdrasil_rt::process::{thread_local, ProgramArgumentInner};
|
||||
// use yggdrasil_rt::process::ProgramArgumentInner;
|
||||
use yggdrasil_rt::process::ProgramArgumentInner;
|
||||
|
||||
use crate::{
|
||||
allocator::{c_alloc, c_free}, error::EResult, headers::{
|
||||
allocator::{c_alloc, c_free},
|
||||
error::EResult,
|
||||
headers::{
|
||||
errno,
|
||||
string::{mem::memcpy, str::strlen},
|
||||
}, thread::{self, tls}, util::PointerExt
|
||||
},
|
||||
thread,
|
||||
util::PointerExt,
|
||||
};
|
||||
|
||||
#[no_mangle]
|
||||
@ -155,6 +158,7 @@ pub unsafe fn remove_env(name: &[u8]) -> EResult<bool> {
|
||||
|
||||
reclaim_env()?;
|
||||
|
||||
#[allow(clippy::needless_range_loop)]
|
||||
for i in 0..shadow.len() {
|
||||
let Some(entry) = NonNull::new(shadow[i]) else {
|
||||
continue;
|
||||
|
@ -144,7 +144,7 @@ impl Errno {
|
||||
}
|
||||
|
||||
pub fn from_c_int(v: c_int) -> Option<Self> {
|
||||
if v < 0 || v > MAX_ERROR.0 {
|
||||
if !(0..=MAX_ERROR.0).contains(&v) {
|
||||
None
|
||||
} else {
|
||||
Some(Self(v))
|
||||
|
@ -140,9 +140,7 @@ unsafe extern "C" fn fcntl(fd: c_int, cmd: c_int, _args: ...) -> CIntCountResult
|
||||
// TODO kernel support for fcntl
|
||||
let _file = RawFile::e_try_from(fd)?;
|
||||
|
||||
match cmd {
|
||||
_ => todo!("fcntl({}, {}, ...)", fd, cmd),
|
||||
}
|
||||
todo!("fcntl({}, {}, ...)", fd, cmd);
|
||||
}
|
||||
|
||||
unsafe fn vopenat(atfd: c_int, pathname: *const c_char, opts: c_int, mut ap: VaList) -> CFdResult {
|
||||
|
@ -1,32 +1,13 @@
|
||||
use core::{
|
||||
ffi::{c_int, c_void},
|
||||
mem::MaybeUninit,
|
||||
ptr::null_mut,
|
||||
sync::atomic::{AtomicPtr, Ordering},
|
||||
};
|
||||
use core::ffi::c_int;
|
||||
|
||||
use alloc::{boxed::Box, collections::BTreeMap, sync::Arc};
|
||||
use yggdrasil_rt::{
|
||||
mem::{MappingFlags, MappingSource},
|
||||
process::{ThreadId, ThreadSpawnOptions},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
error::{EResult, ResultExt},
|
||||
headers::errno,
|
||||
sync::Mutex,
|
||||
};
|
||||
|
||||
use super::sys_types::{pthread_attr_t, pthread_t};
|
||||
|
||||
mod attr;
|
||||
mod barrier;
|
||||
mod cond;
|
||||
mod mutex;
|
||||
mod rwlock;
|
||||
mod spin;
|
||||
mod thread;
|
||||
mod tls;
|
||||
pub mod attr;
|
||||
pub mod barrier;
|
||||
pub mod cond;
|
||||
pub mod mutex;
|
||||
pub mod rwlock;
|
||||
pub mod spin;
|
||||
pub mod thread;
|
||||
pub mod tls;
|
||||
|
||||
// PTHREAD_CANCEL_ASYNCHRONOUS
|
||||
// PTHREAD_CANCEL_ENABLE
|
||||
@ -38,21 +19,15 @@ mod tls;
|
||||
// PTHREAD_EXPLICIT_SCHED
|
||||
// PTHREAD_INHERIT_SCHED
|
||||
// PTHREAD_ONCE_INIT
|
||||
// PTHREAD_PRIO_INHERIT
|
||||
// PTHREAD_PRIO_NONE
|
||||
// PTHREAD_PRIO_PROTECT
|
||||
// PTHREAD_PROCESS_SHARED
|
||||
// PTHREAD_PROCESS_PRIVATE
|
||||
// PTHREAD_SCOPE_PROCESS
|
||||
// PTHREAD_SCOPE_SYSTEM
|
||||
//
|
||||
|
||||
pub const PTHREAD_PRIO_NONE: c_int = 0;
|
||||
pub const PTHREAD_PRIO_INHERIOT: c_int = 1;
|
||||
pub const PTHREAD_PRIO_INHERIT: c_int = 1;
|
||||
pub const PTHREAD_PRIO_PROTECT: c_int = 2;
|
||||
|
||||
pub const PTHREAD_PROCESS_PRIVATE: c_int = 0;
|
||||
pub const PTHREAD_PROCESS_SHARED: c_int = 1;
|
||||
|
||||
// int pthread_once(pthread_once_t *, void (*)(void));
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use core::ffi::{c_int, c_void};
|
||||
use core::{ffi::{c_int, c_void}, ptr::NonNull};
|
||||
|
||||
use crate::{
|
||||
error::CIntZeroResult,
|
||||
@ -75,7 +75,9 @@ unsafe extern "C" fn pthread_getschedparam(
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn pthread_join(thread: pthread_t, result: *mut *mut c_void) -> CIntZeroResult {
|
||||
let thread = Thread::join(thread)?;
|
||||
// TODO write result
|
||||
if let Some(result) = NonNull::new(result) {
|
||||
result.write(thread.result());
|
||||
}
|
||||
CIntZeroResult::SUCCESS
|
||||
}
|
||||
|
||||
|
@ -229,7 +229,7 @@ unsafe extern "C" fn sigismember(_mask: *const sigset_t, _signum: c_int) -> c_in
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn signal(handler: sig_handler_t, signum: c_int) -> sig_handler_t {
|
||||
// Validate handler
|
||||
let address: usize = core::mem::transmute(handler);
|
||||
let address = handler as usize;
|
||||
// NULL or SIG_ERR
|
||||
if address == 0 || address == 1 {
|
||||
yggdrasil_rt::debug_trace!("libc: signal() was passed an invalid handler");
|
||||
|
@ -27,9 +27,6 @@ unsafe extern "C" fn fwrite(
|
||||
nmemb: usize,
|
||||
fp: *mut FILE,
|
||||
) -> CUsizeResult {
|
||||
if fp.is_null() {
|
||||
loop {}
|
||||
}
|
||||
let size = size.checked_mul(nmemb).expect("size * nmemb too large");
|
||||
let buf = buf.cast::<u8>().ensure_slice(size);
|
||||
let fp = fp.ensure_mut();
|
||||
|
@ -46,8 +46,8 @@ fn fmt_float_exp<W: Write + fmt::Write>(
|
||||
|
||||
let f_len = output.write(string.as_bytes())?;
|
||||
|
||||
let _e_len = match write!(output, "{}{:+03}", exp_fmt as char, exp) {
|
||||
Ok(count) => count,
|
||||
match write!(output, "{}{:+03}", exp_fmt as char, exp) {
|
||||
Ok(()) => (),
|
||||
// TODO proper error code
|
||||
Err(_) => return EResult::Err(errno::EINVAL),
|
||||
};
|
||||
|
@ -28,7 +28,7 @@ struct StringWriter {
|
||||
|
||||
trait FmtWriter: Write + fmt::Write {}
|
||||
|
||||
impl<'w, W: Write> fmt::Write for FileWriter<'w, W> {
|
||||
impl<W: Write> fmt::Write for FileWriter<'_, W> {
|
||||
fn write_str(&mut self, s: &str) -> fmt::Result {
|
||||
self.0
|
||||
.write_all(s.as_bytes())
|
||||
@ -36,7 +36,7 @@ impl<'w, W: Write> fmt::Write for FileWriter<'w, W> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'w, W: Write> Write for FileWriter<'w, W> {
|
||||
impl<W: Write> Write for FileWriter<'_, W> {
|
||||
fn write(&mut self, data: &[u8]) -> EResult<usize> {
|
||||
self.0.write(data)
|
||||
}
|
||||
@ -46,7 +46,7 @@ impl<'w, W: Write> Write for FileWriter<'w, W> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'w, W: Write> FmtWriter for FileWriter<'w, W> {}
|
||||
impl<W: Write> FmtWriter for FileWriter<'_, W> {}
|
||||
|
||||
impl StringWriter {
|
||||
pub fn new(buffer: NonNull<c_char>, capacity: usize) -> Self {
|
||||
@ -89,7 +89,7 @@ impl Write for StringWriter {
|
||||
impl FmtWriter for StringWriter {}
|
||||
|
||||
fn printf_inner<W: FmtWriter>(output: &mut W, format: &[u8], mut ap: VaList) -> EResult<usize> {
|
||||
let mut fmt = format.into_iter();
|
||||
let mut fmt = format.iter();
|
||||
let mut count = 0;
|
||||
|
||||
while let Some(&c) = fmt.next() {
|
||||
|
@ -23,7 +23,7 @@ impl ScanCharSet {
|
||||
let mut start: Option<u8> = None;
|
||||
let mut set = ScanCharSet::new();
|
||||
|
||||
while let Some(&ch) = it.next() {
|
||||
for &ch in it.by_ref() {
|
||||
if ch == b'^' && maybe_close_bracket {
|
||||
// "[^...]"
|
||||
set.set_invert();
|
||||
@ -75,11 +75,9 @@ impl ScanCharSet {
|
||||
} else {
|
||||
return Err(FormatError);
|
||||
}
|
||||
} else {
|
||||
if let Some(_start) = start.replace(ch) {
|
||||
// "c" without range
|
||||
set.insert(ch);
|
||||
}
|
||||
} else if let Some(_start) = start.replace(ch) {
|
||||
// "c" without range
|
||||
set.insert(ch);
|
||||
}
|
||||
}
|
||||
|
||||
@ -112,4 +110,3 @@ impl ScanCharSet {
|
||||
self.invert ^ self.contains(ch)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,7 @@ fn scanf_inner<G: GetChar>(
|
||||
format: &[u8],
|
||||
mut ap: VaList,
|
||||
) -> EResult<usize> {
|
||||
let mut it = format.into_iter();
|
||||
let mut it = format.iter();
|
||||
let mut matched = 0;
|
||||
let mut skip_space = false;
|
||||
|
||||
@ -200,7 +200,7 @@ unsafe extern "C" fn vsscanf(
|
||||
) -> CEofResult {
|
||||
let input = input.ensure_cstr();
|
||||
let format = format.ensure_cstr();
|
||||
let mut str_it = input.to_bytes().into_iter();
|
||||
let mut str_it = input.to_bytes().iter();
|
||||
let mut reader = ScanReader::new(&mut str_it);
|
||||
let count = scanf_inner(&mut reader, format.to_bytes(), args)?;
|
||||
CEofResult::success(count.try_into().unwrap())
|
||||
|
@ -18,9 +18,8 @@ unsafe extern "C" fn calloc(size: usize, nmemb: usize) -> CPtrResult<c_void> {
|
||||
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn free(ptr: *mut c_void) {
|
||||
match NonNull::new(ptr) {
|
||||
Some(ptr) => allocator::c_free(ptr),
|
||||
None => (),
|
||||
if let Some(ptr) = NonNull::new(ptr) {
|
||||
allocator::c_free(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user