x86_64: enable PDPE1GB support

This commit is contained in:
Mark Poliakov 2024-12-28 12:52:24 +02:00
parent 46854c0f81
commit 37f8182eae
2 changed files with 92 additions and 69 deletions

View File

@ -95,10 +95,17 @@ cpuid_features! {
]
}
cpuid_features! {
pub ExtEdxFeatures: u32 [
PDPE1GB: 26
]
}
#[derive(Clone, Copy, Debug)]
pub struct CpuFeatures {
pub ecx: EcxFeatures,
pub edx: EdxFeatures,
pub ext_edx: ExtEdxFeatures,
}
impl CpuFeatures {
@ -106,6 +113,7 @@ impl CpuFeatures {
Self {
ecx: EcxFeatures::empty(),
edx: EdxFeatures::empty(),
ext_edx: ExtEdxFeatures::empty(),
}
}
@ -120,6 +128,7 @@ impl CpuFeatures {
Err(Self {
ecx: features.ecx & !self.ecx,
edx: features.edx & !self.edx,
ext_edx: features.ext_edx & !self.ext_edx,
})
}
}
@ -132,6 +141,7 @@ impl BitAnd<CpuFeatures> for CpuFeatures {
Self {
ecx: self.ecx & rhs.ecx,
edx: self.edx & rhs.edx,
ext_edx: self.ext_edx & rhs.ext_edx,
}
}
}
@ -143,6 +153,7 @@ impl BitOr<CpuFeatures> for CpuFeatures {
Self {
ecx: self.ecx | rhs.ecx,
edx: self.edx | rhs.edx,
ext_edx: self.ext_edx | rhs.ext_edx,
}
}
}
@ -151,8 +162,9 @@ impl CpuFeatureSet for CpuFeatures {
fn iter(&self) -> impl Iterator<Item = &'static str> {
let ecx = self.ecx.iter().map(|e| e.as_str());
let edx = self.edx.iter().map(|e| e.as_str());
let ext_edx = self.ext_edx.iter().map(|e| e.as_str());
core::iter::chain(ecx, edx)
core::iter::chain(core::iter::chain(ecx, edx), ext_edx)
}
}
@ -190,20 +202,26 @@ unsafe fn raw_cpuid(eax: u32, result: &mut [u32]) {
);
}
fn cpuid_features() -> (EcxFeatures, EdxFeatures) {
fn cpuid_features() -> (EcxFeatures, EdxFeatures, ExtEdxFeatures) {
let mut raw = [0; 3];
unsafe {
raw_cpuid(0x1, &mut raw);
}
(
EcxFeatures::from_bits_truncate(raw[2]),
EdxFeatures::from_bits_truncate(raw[1]),
)
let ecx = EcxFeatures::from_bits_truncate(raw[2]);
let edx = EdxFeatures::from_bits_truncate(raw[1]);
unsafe {
raw_cpuid(0x80000001, &mut raw);
}
let ext_edx = ExtEdxFeatures::from_bits_truncate(raw[1]);
(ecx, edx, ext_edx)
}
fn enable_features(ecx: EcxFeatures, edx: EdxFeatures) {
fn enable_features(ecx: EcxFeatures, edx: EdxFeatures, _ext_edx: ExtEdxFeatures) {
if ecx.contains(EcxFeatures::XSAVE) {
CR4.modify(CR4::OSXSAVE::SET);
}
@ -238,8 +256,8 @@ fn enable_features(ecx: EcxFeatures, edx: EdxFeatures) {
}
fn read_features() -> CpuFeatures {
let (ecx, edx) = cpuid_features();
CpuFeatures { ecx, edx }
let (ecx, edx, ext_edx) = cpuid_features();
CpuFeatures { ecx, edx, ext_edx }
}
pub fn setup_features(
@ -253,7 +271,7 @@ pub fn setup_features(
return (have_features, Err(missing_features));
}
enable_features(will_features.ecx, will_features.edx);
enable_features(will_features.ecx, will_features.edx, will_features.ext_edx);
(have_features, Ok(will_features))
}

View File

@ -8,7 +8,7 @@ use alloc::{boxed::Box, sync::Arc};
use apic::{ioapic::IoApic, local::LocalApic};
use device_api::device::Device;
use kernel_arch_x86::{
cpuid::{self, CpuFeatures, EcxFeatures, EdxFeatures},
cpuid::{self, CpuFeatures, EcxFeatures, EdxFeatures, ExtEdxFeatures},
gdt,
};
use kernel_arch_x86_64::{
@ -136,6 +136,7 @@ impl X86_64 {
it: I,
_memory_start: PhysicalAddress,
memory_end: PhysicalAddress,
have_1gib_pages: bool,
) -> Result<(), Error> {
let end_l1i = memory_end
.into_usize()
@ -151,69 +152,69 @@ impl X86_64 {
MEMORY_LIMIT.store(memory_end.into_usize(), Ordering::Release);
// Check if 1GiB pages are supported
// TODO
// if PROCESSOR_FEATURES
// .get()
// .contains(ProcessorFeatures::PDPE1GB)
// {
// // Just map gigabytes of RAM
// for l1i in 0..end_l1i {
// // TODO NX
// unsafe {
// RAM_MAPPING_L1[l1i] = PageEntry::<L1>::block(
// PhysicalAddress::from_usize(l1i * L1::SIZE),
// PageAttributes::WRITABLE,
// );
// }
// }
// } else {
// Allocate the intermediate tables first
let l2_tables_start = phys::find_contiguous_region(it, end_l1i)
.expect("Could not allocate the memory for RAM mapping L2 tables");
reserve_region(
"ram-l2-tables",
PhysicalMemoryRegion {
base: l2_tables_start,
size: end_l1i * L3::SIZE,
},
);
// Fill in the tables
for l1i in 0..end_l1i {
let l2_phys_addr = l2_tables_start.add(l1i * L3::SIZE);
// Safety: ok, the mapping is done to the memory obtained from
// find_contiguous_region()
let mut l2_data = unsafe { EarlyMapping::<[PageEntry<L2>; 512]>::map(l2_phys_addr)? };
// Safety: ok, the slice comes from EarlyMapping of a page-aligned region
let l2 = unsafe { PageTable::from_raw_slice_mut(l2_data.deref_mut()) };
for l2i in 0..512 {
if have_1gib_pages {
// Map RAM a gigabyte at a time
for l1i in 0..end_l1i {
// TODO NX
l2[l2i] = PageEntry::<L2>::block(
PhysicalAddress::from_usize((l1i * L1::SIZE) | (l2i * L2::SIZE)),
PageAttributes::WRITABLE,
);
unsafe {
RAM_MAPPING_L1[l1i] = PageEntry::<L1>::block(
PhysicalAddress::from_usize(l1i * L1::SIZE),
PageAttributes::WRITABLE,
);
flush_tlb_entry(RAM_MAPPING_OFFSET + (l1i << L1::SHIFT));
}
}
} else {
// Allocate the intermediate tables first
let l2_tables_start = phys::find_contiguous_region(it, end_l1i)
.expect("Could not allocate the memory for RAM mapping L2 tables");
// Point the L1 entry to the L2 table
unsafe {
RAM_MAPPING_L1[l1i] = PageEntry::<L1>::table(l2_phys_addr, PageAttributes::WRITABLE)
};
reserve_region(
"ram-l2-tables",
PhysicalMemoryRegion {
base: l2_tables_start,
size: end_l1i * L3::SIZE,
},
);
unsafe { flush_tlb_entry(RAM_MAPPING_OFFSET + (l1i << L1::SHIFT)) };
intrinsics::flush_cpu_cache();
// The EarlyMapping is then dropped
// Fill in the tables
for l1i in 0..end_l1i {
let l2_phys_addr = l2_tables_start.add(l1i * L3::SIZE);
// Safety: ok, the mapping is done to the memory obtained from
// find_contiguous_region()
let mut l2_data =
unsafe { EarlyMapping::<[PageEntry<L2>; 512]>::map(l2_phys_addr)? };
// Safety: ok, the slice comes from EarlyMapping of a page-aligned region
let l2 = unsafe { PageTable::from_raw_slice_mut(l2_data.deref_mut()) };
for l2i in 0..512 {
// TODO NX
l2[l2i] = PageEntry::<L2>::block(
PhysicalAddress::from_usize((l1i * L1::SIZE) | (l2i * L2::SIZE)),
PageAttributes::WRITABLE,
);
}
// Point the L1 entry to the L2 table
unsafe {
RAM_MAPPING_L1[l1i] =
PageEntry::<L1>::table(l2_phys_addr, PageAttributes::WRITABLE)
};
unsafe { flush_tlb_entry(RAM_MAPPING_OFFSET + (l1i << L1::SHIFT)) };
intrinsics::flush_cpu_cache();
// The EarlyMapping is then dropped
}
}
// }
Ok(())
}
unsafe fn init_physical_memory_from_yboot(data: &LoadProtocolV1) -> Result<(), Error> {
unsafe fn init_physical_memory_from_yboot(
data: &LoadProtocolV1,
have_1gib_pages: bool,
) -> Result<(), Error> {
let mmap = EarlyMapping::<AvailableMemoryRegion>::map_slice(
PhysicalAddress::from_u64(data.memory_map.address),
data.memory_map.len as usize,
@ -224,11 +225,13 @@ impl X86_64 {
base: PhysicalAddress::from_u64(reg.start_address),
size: reg.page_count as usize * L3::SIZE,
}),
Self::map_physical_memory,
|it, start, end| Self::map_physical_memory(it, start, end, have_1gib_pages),
)
}
unsafe fn init_memory_management(&self) -> Result<(), Error> {
unsafe fn init_memory_management(&self, enabled_features: &CpuFeatures) -> Result<(), Error> {
let have_1gib_pages = enabled_features.ext_edx.contains(ExtEdxFeatures::PDPE1GB);
init_fixed_tables();
// Reserve lower 4MiB just in case
@ -241,7 +244,7 @@ impl X86_64 {
);
match self.boot_data.get() {
&BootData::YBoot(data) => Self::init_physical_memory_from_yboot(data)?,
&BootData::YBoot(data) => Self::init_physical_memory_from_yboot(data, have_1gib_pages)?,
}
Ok(())
@ -252,7 +255,7 @@ impl X86_64 {
if cpu_id == 0 {
PLATFORM
.init_memory_management()
.init_memory_management(&enabled_features)
.expect("Could not initialize memory management");
}
@ -305,6 +308,7 @@ impl X86_64 {
| EcxFeatures::SSE4_2
| EcxFeatures::AVX,
edx: EdxFeatures::empty(),
ext_edx: ExtEdxFeatures::PDPE1GB,
},
CpuFeatures {
ecx: EcxFeatures::XSAVE,
@ -313,6 +317,7 @@ impl X86_64 {
| EdxFeatures::PGE
| EdxFeatures::PSE
| EdxFeatures::FPU,
ext_edx: ExtEdxFeatures::empty(),
},
);
let will_features = will_features.expect("Could not initialize CPU features");