From 3d72dd1bc0429a1918770a03e308f5a3b19fadcf Mon Sep 17 00:00:00 2001 From: HoShiMin Date: Mon, 8 Jun 2020 00:08:03 +0300 Subject: [PATCH] Extremely fast & unsafe physmem resolver --- CommonTypes/PTE.h | 6 +- Kernel-Bridge/API/Hypervisor.cpp | 254 +++++++++++++++++--- Kernel-Bridge/API/PteUtils.cpp | 67 ++++-- Kernel-Bridge/API/PteUtils.h | 3 +- Kernel-Bridge/API/Stopwatch.cpp | 46 ++++ Kernel-Bridge/API/Stopwatch.h | 19 ++ Kernel-Bridge/API/VMM.asm | 3 + Kernel-Bridge/Kernel-Bridge.vcxproj | 2 + Kernel-Bridge/Kernel-Bridge.vcxproj.filters | 6 + Kernel-Tests/Main.cpp | 11 +- 10 files changed, 352 insertions(+), 65 deletions(-) create mode 100644 Kernel-Bridge/API/Stopwatch.cpp create mode 100644 Kernel-Bridge/API/Stopwatch.h diff --git a/CommonTypes/PTE.h b/CommonTypes/PTE.h index e118581..f2397ca 100644 --- a/CommonTypes/PTE.h +++ b/CommonTypes/PTE.h @@ -197,7 +197,7 @@ union PDPE { unsigned long long PCD : 1; // Page-Level Cache Disable unsigned long long A : 1; // Accessed unsigned long long Reserved0 : 1; - unsigned long long PS : 1; // PageSize == 0 + unsigned long long PS : 1; // PageSize unsigned long long Reserved1 : 1; unsigned long long AVL : 3; // Available to software unsigned long long Reserved3 : 51; @@ -299,7 +299,7 @@ union PDE { unsigned long long PCD : 1; // Page-Level Cache Disable unsigned long long A : 1; // Accessed unsigned long long Reserved0 : 1; - unsigned long long PS : 1; // PageSize == 0 + unsigned long long PS : 1; // PageSize unsigned long long Reserved1 : 1; unsigned long long AVL : 3; // Available to software unsigned long long Reserved2 : 51; @@ -349,7 +349,7 @@ union PDE { unsigned long long PCD : 1; // Page-Level Cache Disable unsigned long long A : 1; // Accessed unsigned long long Reserved0 : 1; - unsigned long long PS : 1; // PageSize == 0 + unsigned long long PS : 1; // PageSize unsigned long long Reserved1 : 1; unsigned long long AVL : 3; // Available to software unsigned long long Reserved2 : 51; diff --git a/Kernel-Bridge/API/Hypervisor.cpp b/Kernel-Bridge/API/Hypervisor.cpp index 7f2809d..0d4ecf1 100644 --- a/Kernel-Bridge/API/Hypervisor.cpp +++ b/Kernel-Bridge/API/Hypervisor.cpp @@ -93,6 +93,67 @@ namespace Supplementation { PhysicalMemory::FreePhysicalMemory(Memory); } + + namespace FastPhys + { + // As is from ntoskrnl.exe disassembly (VirtualAddress may be unaligned): + inline static unsigned long long MiGetPteAddress(unsigned long long VirtualAddress) + { + return 0xFFFFF680'00000000ull + ((VirtualAddress >> 9ull) & 0x7FFFFFFFF8ull); + } + + // To fixup differences between different kernels: + static const unsigned long long g_PteCorrective = []() -> unsigned long long + { + unsigned long long TestVa = reinterpret_cast(&g_PteCorrective); + + /* Manual traversal to obtain a valid PTE pointer in system memory */ + + VIRTUAL_ADDRESS Va = { TestVa }; + + auto Pml4ePhys = PFN_TO_PAGE(CR3{ __readcr3() }.x64.Bitmap.PML4) + Va.x64.NonPageSize.Generic.PageMapLevel4Offset * sizeof(PML4E); + const PML4E* Pml4e = reinterpret_cast(MmGetVirtualForPhysical(PHYSICAL_ADDRESS{ .QuadPart = static_cast(Pml4ePhys) })); + + auto PdpePhys = PFN_TO_PAGE(Pml4e->x64.Generic.PDP) + Va.x64.NonPageSize.Generic.PageDirectoryPointerOffset * sizeof(PDPE); + const PDPE* Pdpe = reinterpret_cast(MmGetVirtualForPhysical(PHYSICAL_ADDRESS{ .QuadPart = static_cast(PdpePhys) })); + + auto PdePhys = PFN_TO_PAGE(Pdpe->x64.NonPageSize.Generic.PD) + Va.x64.NonPageSize.Generic.PageDirectoryOffset * sizeof(PDE); + const PDE* Pde = reinterpret_cast(MmGetVirtualForPhysical(PHYSICAL_ADDRESS{ .QuadPart = static_cast(PdePhys) })); + + auto PtePhys = PFN_TO_PAGE(Pde->x64.Page4Kb.PT) + Va.x64.NonPageSize.Page4Kb.PageTableOffset * sizeof(PTE); + const PTE* Pte = reinterpret_cast(MmGetVirtualForPhysical(PHYSICAL_ADDRESS{ .QuadPart = static_cast(PtePhys) })); + + /* Then get a PTE pointer by MiGetPteAddress and calculate a difference */ + + unsigned long long PteByMi = MiGetPteAddress(TestVa & 0xFFFFFFFFFFFFF000ull); + + return reinterpret_cast(Pte) - PteByMi; + }(); + + inline unsigned long long GetPhysAddressFast4KbUnsafe(unsigned long long Va) + { + return PFN_TO_PAGE(reinterpret_cast(MiGetPteAddress(Va) + g_PteCorrective)->x64.Page4Kb.PhysicalPageFrameNumber) + (Va & 0xFFF); + } + + unsigned long long GetPhysAddressFast4Kb(unsigned long long Cr3, unsigned long long VirtualAddress) + { + VIRTUAL_ADDRESS Va = { VirtualAddress }; + + auto Pml4ePhys = PFN_TO_PAGE(CR3{ Cr3 }.x64.Bitmap.PML4) + Va.x64.NonPageSize.Generic.PageMapLevel4Offset * sizeof(PML4E); + const PML4E* Pml4e = reinterpret_cast(MmGetVirtualForPhysical(PHYSICAL_ADDRESS{ .QuadPart = static_cast(Pml4ePhys) })); + + auto PdpePhys = PFN_TO_PAGE(Pml4e->x64.Generic.PDP) + Va.x64.NonPageSize.Generic.PageDirectoryPointerOffset * sizeof(PDPE); + const PDPE* Pdpe = reinterpret_cast(MmGetVirtualForPhysical(PHYSICAL_ADDRESS{ .QuadPart = static_cast(PdpePhys) })); + + auto PdePhys = PFN_TO_PAGE(Pdpe->x64.NonPageSize.Generic.PD) + Va.x64.NonPageSize.Generic.PageDirectoryOffset * sizeof(PDE); + const PDE* Pde = reinterpret_cast(MmGetVirtualForPhysical(PHYSICAL_ADDRESS{ .QuadPart = static_cast(PdePhys) })); + + auto PtePhys = PFN_TO_PAGE(Pde->x64.Page4Kb.PT) + Va.x64.NonPageSize.Page4Kb.PageTableOffset * sizeof(PTE); + const PTE* Pte = reinterpret_cast(MmGetVirtualForPhysical(PHYSICAL_ADDRESS{ .QuadPart = static_cast(PtePhys) })); + + return PFN_TO_PAGE(Pte->x64.Page4Kb.PhysicalPageFrameNumber) + Va.x64.NonPageSize.Page4Kb.PageOffset; + } + } } namespace VMX @@ -586,7 +647,7 @@ namespace VMX // For the first 1 megabyte of the physical address space: union { - MTRR_FIXED_GENERIC Generic[10]; + MTRR_FIXED_GENERIC Generic[11]; struct { // 512-Kbyte range: IA32_MTRR_FIX64K RangeFrom00000To7FFFF; @@ -805,11 +866,14 @@ namespace VMX if (AreRangesIntersects(PhysRange, FixedRanges[i])) { MTRR_MEMORY_TYPE FixedMemType = CalcMemoryTypeByFixedMtrr(MtrrFixedGeneric, FixedRanges[i], PhysRange); - if (FixedMemType == MTRR_MEMORY_TYPE::Uncacheable) return MemType; + if (FixedMemType == MTRR_MEMORY_TYPE::Uncacheable) return FixedMemType; if (IsMemTypeInitialized) { bool IsMixed = MixMtrrTypes(MemType, FixedMemType, OUT MemType); - if (!IsMixed) return MTRR_MEMORY_TYPE::Uncacheable; + if (!IsMixed) + { + return MTRR_MEMORY_TYPE::Uncacheable; + } } else { @@ -890,6 +954,7 @@ namespace VMX DECLSPEC_ALIGN(PAGE_SIZE) EPT_PML4E Pml4e; DECLSPEC_ALIGN(PAGE_SIZE) EPT_PDPTE Pdpte[512]; DECLSPEC_ALIGN(PAGE_SIZE) EPT_PDE Pde[512][512]; + DECLSPEC_ALIGN(PAGE_SIZE) EPT_PTE PteForFirstLargePage[2 * 1048576 / 4096]; }; struct EPT_ENTRIES @@ -927,16 +992,13 @@ namespace VMX PAGE_HANDLER Handlers; }; - static void InitializeEptTables(__out EPT_TABLES* Ept, __out EPTP* Eptp) + static void InitializeEptTables(__in const MTRR_INFO* MtrrInfo, __out EPT_TABLES* Ept, __out EPTP* Eptp) { using namespace PhysicalMemory; memset(Ept, 0, sizeof(EPT_TABLES)); memset(Eptp, 0, sizeof(EPTP)); - MTRR_INFO MtrrInfo = {}; - InitMtrr(&MtrrInfo); - PVOID64 Pml4ePhys = GetPhysicalAddress(&Ept->Pml4e); Eptp->Bitmap.EptMemoryType = static_cast(MTRR_MEMORY_TYPE::WriteBack); Eptp->Bitmap.PageWalkLength = 3; @@ -959,21 +1021,47 @@ namespace VMX for (unsigned int j = 0; j < _ARRAYSIZE(Ept->Pde[i]); ++j) { - unsigned long long PagePfn = i * _ARRAYSIZE(Ept->Pde[i]) + j; - constexpr unsigned long long PageSize = 2 * 1048576; // 2 Mb - - MTRR_MEMORY_TYPE MemType = MTRR_MEMORY_TYPE::Uncacheable; - if (MtrrInfo.IsSupported) + if (i == 0 && j == 0) { - MemType = GetMtrrMemoryType(&MtrrInfo, PFN_TO_LARGE_PAGE(PagePfn), PageSize); + PVOID64 PtePhys = GetPhysicalAddress(Ept->PteForFirstLargePage); + Ept->Pde[i][j].Page4Kb.ReadAccess = TRUE; + Ept->Pde[i][j].Page4Kb.WriteAccess = TRUE; + Ept->Pde[i][j].Page4Kb.ExecuteAccess = TRUE; + Ept->Pde[i][j].Page4Kb.EptPtePhysicalPfn = PAGE_TO_PFN(reinterpret_cast(PtePhys)); + + for (unsigned int k = 0; k < _ARRAYSIZE(Ept->PteForFirstLargePage); ++k) + { + MTRR_MEMORY_TYPE MemType = MTRR_MEMORY_TYPE::Uncacheable; + if (MtrrInfo->IsSupported) + { + MemType = GetMtrrMemoryType(MtrrInfo, PFN_TO_PAGE(static_cast(k)), PAGE_SIZE); + } + + Ept->PteForFirstLargePage[k].Page4Kb.ReadAccess = TRUE; + Ept->PteForFirstLargePage[k].Page4Kb.WriteAccess = TRUE; + Ept->PteForFirstLargePage[k].Page4Kb.ExecuteAccess = TRUE; + Ept->PteForFirstLargePage[k].Page4Kb.Type = static_cast(MemType); + Ept->PteForFirstLargePage[k].Page4Kb.PagePhysicalPfn = k; + } } + else + { + unsigned long long PagePfn = i * _ARRAYSIZE(Ept->Pde[i]) + j; + constexpr unsigned long long LargePageSize = 2 * 1048576; // 2 Mb + + MTRR_MEMORY_TYPE MemType = MTRR_MEMORY_TYPE::Uncacheable; + if (MtrrInfo->IsSupported) + { + MemType = GetMtrrMemoryType(MtrrInfo, PFN_TO_LARGE_PAGE(PagePfn), LargePageSize); + } - Ept->Pde[i][j].Page2Mb.ReadAccess = TRUE; - Ept->Pde[i][j].Page2Mb.WriteAccess = TRUE; - Ept->Pde[i][j].Page2Mb.ExecuteAccess = TRUE; - Ept->Pde[i][j].Page2Mb.Type = static_cast(MemType); - Ept->Pde[i][j].Page2Mb.LargePage = TRUE; - Ept->Pde[i][j].Page2Mb.PagePhysicalPfn = PagePfn; + Ept->Pde[i][j].Page2Mb.ReadAccess = TRUE; + Ept->Pde[i][j].Page2Mb.WriteAccess = TRUE; + Ept->Pde[i][j].Page2Mb.ExecuteAccess = TRUE; + Ept->Pde[i][j].Page2Mb.Type = static_cast(MemType); + Ept->Pde[i][j].Page2Mb.LargePage = TRUE; + Ept->Pde[i][j].Page2Mb.PagePhysicalPfn = PagePfn; + } } } } @@ -1394,6 +1482,7 @@ namespace VMX struct VCPU_INFO { PRIVATE_VM_DATA* VmData; + MTRR_INFO* MtrrInfo; VMX::VM_INSTRUCTION_ERROR Error; bool Status; }; @@ -1806,7 +1895,7 @@ namespace VMX __vmx_vmwrite(VMX::VMCS_FIELD_HOST_IDTR_BASE, Idtr.BaseAddress); EPTP Eptp = {}; - InitializeEptTables(OUT &Private->Ept, OUT &Eptp); + InitializeEptTables(IN Shared->Processors[CurrentProcessor].MtrrInfo, OUT &Private->Ept, OUT &Eptp); __vmx_vmwrite(VMX::VMCS_FIELD_EPT_POINTER_FULL, Eptp.Value); Private->EptInterceptor->CompleteInitialization(Eptp); @@ -2136,7 +2225,6 @@ namespace VMX VMX::EXIT_QUALIFICATION Info = { vmread(VMX::VMCS_FIELD_EXIT_QUALIFICATION) }; unsigned long long AccessedPa = vmread(VMX::VMCS_FIELD_GUEST_PHYSICAL_ADDRESS_FULL); - unsigned long long AccessedVa = vmread(VMX::VMCS_FIELD_GUEST_LINEAR_ADDRESS); if (!(Info.EptViolations.GuestPhysicalReadable || Info.EptViolations.GuestPhysicalExecutable)) { @@ -2146,11 +2234,16 @@ namespace VMX } bool Handled = false; - bool InterceptedPageSelfAccess = ALIGN_DOWN_BY(AccessedVa, PAGE_SIZE) == ALIGN_DOWN_BY(Rip, PAGE_SIZE); - + if (Info.EptViolations.AccessedRead) { - if (InterceptedPageSelfAccess) + unsigned long long HostCr3 = __readcr3(); + unsigned long long GuestCr3 = vmread(VMX::VMCS_FIELD_GUEST_CR3); + __writecr3(GuestCr3); + unsigned long long RipPa = Supplementation::FastPhys::GetPhysAddressFast4KbUnsafe(Rip); + __writecr3(HostCr3); + + if (ALIGN_DOWN_BY(AccessedPa, PAGE_SIZE) == ALIGN_DOWN_BY(RipPa, PAGE_SIZE)) { unsigned long long InstructionLength = vmread(VMX::VMCS_FIELD_VMEXIT_INSTRUCTION_LENGTH); Handled = Private->EptInterceptor->HandleExecuteRead(AccessedPa, reinterpret_cast(Rip + InstructionLength)); @@ -2170,7 +2263,13 @@ namespace VMX { unsigned long long InstructionLength = vmread(VMX::VMCS_FIELD_VMEXIT_INSTRUCTION_LENGTH); - if (InterceptedPageSelfAccess) + unsigned long long HostCr3 = __readcr3(); + unsigned long long GuestCr3 = vmread(VMX::VMCS_FIELD_GUEST_CR3); + __writecr3(GuestCr3); + unsigned long long RipPa = Supplementation::FastPhys::GetPhysAddressFast4KbUnsafe(Rip); + __writecr3(HostCr3); + + if (ALIGN_DOWN_BY(AccessedPa, PAGE_SIZE) == ALIGN_DOWN_BY(RipPa, PAGE_SIZE)) { Handled = Private->EptInterceptor->HandleExecuteWrite(AccessedPa, reinterpret_cast(Rip + InstructionLength)); } @@ -2501,13 +2600,10 @@ namespace VMX } _IRQL_requires_same_ + _IRQL_requires_min_(HIGH_LEVEL) extern "C" VMM_STATUS VmxVmexitHandler(PRIVATE_VM_DATA* Private, __inout GUEST_CONTEXT* Context) { - KIRQL Irql = KeGetCurrentIrql(); - if (Irql < DISPATCH_LEVEL) - { - Irql = KeRaiseIrqlToDpcLevel(); - } + /* Interrupts are locked */ unsigned long long Rip = vmread(VMX::VMCS_FIELD_GUEST_RIP); @@ -2525,12 +2621,92 @@ namespace VMX __vmx_vmwrite(VMX::VMCS_FIELD_GUEST_RIP, Rip); } - if (Irql < DISPATCH_LEVEL) + return Status; + } + + static void DbgPrintMtrrEptCacheLayout(__in const EPT_TABLES* Ept, __in const MTRR_INFO* MtrrInfo) + { + auto MemTypeToStr = [](MTRR_MEMORY_TYPE MemType) -> const char* + { + switch (MemType) + { + case MTRR_MEMORY_TYPE::Uncacheable: return "Uncacheable (0)"; + case MTRR_MEMORY_TYPE::WriteCombining: return "WriteCombining (1)"; + case MTRR_MEMORY_TYPE::WriteThrough: return "WriteThrough (4)"; + case MTRR_MEMORY_TYPE::WriteProtected: return "WriteProtected (5)"; + case MTRR_MEMORY_TYPE::WriteBack: return "WriteBack (6)"; + default: + return "Unknown"; + } + }; + + MTRR_MEMORY_TYPE CurrentRangeType = MTRR_MEMORY_TYPE::Uncacheable; + unsigned long long RangeBeginning = 0; + for (unsigned int i = 0; i < _ARRAYSIZE(Ept->Pdpte); ++i) { - KeLowerIrql(Irql); + for (unsigned int j = 0; j < _ARRAYSIZE(Ept->Pde[i]); ++j) + { + if (i == 0 && j == 0) + { + for (unsigned int k = 0; k < _ARRAYSIZE(Ept->PteForFirstLargePage); ++k) + { + auto Page = Ept->PteForFirstLargePage[k].Page4Kb; + MTRR_MEMORY_TYPE MemType = static_cast(Page.Type); + unsigned long long PagePa = Page.PagePhysicalPfn * PAGE_SIZE; + if (MemType != CurrentRangeType) + { + if ((PagePa - RangeBeginning) > 0) + { + DbgPrint("Physical range [%p..%p]: %s\r\n", reinterpret_cast(RangeBeginning), reinterpret_cast(PagePa - 1), MemTypeToStr(CurrentRangeType)); + } + CurrentRangeType = MemType; + RangeBeginning = PagePa; + } + } + } + else + { + constexpr unsigned long long PageSize = 2 * 1048576; // 2 Mb + + auto Page = Ept->Pde[i][j].Page2Mb; + MTRR_MEMORY_TYPE MemType = static_cast(Page.Type); + unsigned long long PagePa = Page.PagePhysicalPfn * PageSize; + if (MemType != CurrentRangeType) + { + if ((PagePa - RangeBeginning) > 0) + { + DbgPrint("Physical range [%p..%p]: %s\r\n", reinterpret_cast(RangeBeginning), reinterpret_cast(PagePa - 1), MemTypeToStr(CurrentRangeType)); + } + CurrentRangeType = MemType; + RangeBeginning = PagePa; + } + } + } } - return Status; + DbgPrint("%p..%p: %s\r\n", reinterpret_cast(RangeBeginning), reinterpret_cast(512ull * 1024ull * 1048576ull - 1ull), MemTypeToStr(CurrentRangeType)); + + DbgPrint("EptVpidCap : 0x%I64X\r\n", MtrrInfo->EptVpidCap.Value); + DbgPrint("MaxPhysAddrBits : 0x%I64X\r\n", MtrrInfo->MaxPhysAddrBits); + DbgPrint("MtrrCap : 0x%I64X\r\n", MtrrInfo->MtrrCap.Value); + DbgPrint("MtrrDefType : 0x%I64X\r\n", MtrrInfo->MtrrDefType.Value); + DbgPrint("PhysAddrMask : 0x%I64X\r\n", MtrrInfo->PhysAddrMask); + DbgPrint("MTRR.Fixed [00000..7FFFF]: 0x%I64X\r\n", MtrrInfo->Fixed.Ranges.RangeFrom00000To7FFFF.Value); + DbgPrint("MTRR.Fixed [80000..9FFFF]: 0x%I64X\r\n", MtrrInfo->Fixed.Ranges.RangeFrom80000To9FFFF.Value); + DbgPrint("MTRR.Fixed [A0000..BFFFF]: 0x%I64X\r\n", MtrrInfo->Fixed.Ranges.RangeFromA0000ToBFFFF.Value); + DbgPrint("MTRR.Fixed [C0000..C7FFF]: 0x%I64X\r\n", MtrrInfo->Fixed.Ranges.RangeFromC0000ToC7FFF.Value); + DbgPrint("MTRR.Fixed [C8000..CFFFF]: 0x%I64X\r\n", MtrrInfo->Fixed.Ranges.RangeFromC8000ToCFFFF.Value); + DbgPrint("MTRR.Fixed [D0000..D7FFF]: 0x%I64X\r\n", MtrrInfo->Fixed.Ranges.RangeFromD0000ToD7FFF.Value); + DbgPrint("MTRR.Fixed [D8000..DFFFF]: 0x%I64X\r\n", MtrrInfo->Fixed.Ranges.RangeFromD8000ToDFFFF.Value); + DbgPrint("MTRR.Fixed [E0000..E7FFF]: 0x%I64X\r\n", MtrrInfo->Fixed.Ranges.RangeFromE0000ToE7FFF.Value); + DbgPrint("MTRR.Fixed [E8000..EFFFF]: 0x%I64X\r\n", MtrrInfo->Fixed.Ranges.RangeFromE8000ToEFFFF.Value); + DbgPrint("MTRR.Fixed [F0000..F7FFF]: 0x%I64X\r\n", MtrrInfo->Fixed.Ranges.RangeFromF0000ToF7FFF.Value); + DbgPrint("MTRR.Fixed [F8000..FFFFF]: 0x%I64X\r\n", MtrrInfo->Fixed.Ranges.RangeFromF8000ToFFFFF.Value); + + for (unsigned int i = 0; i < _ARRAYSIZE(MtrrInfo->Variable); ++i) + { + DbgPrint("MTRR.Variable[%u]: Base: 0x%I64X, Mask: 0x%I64X\r\n", i, MtrrInfo->Variable[i].PhysBase.Value, MtrrInfo->Variable[i].PhysMask.Value); + } } static bool VirtualizeAllProcessors() @@ -2549,6 +2725,11 @@ namespace VMX CPUID::Intel::VIRTUAL_AND_PHYSICAL_ADDRESS_SIZES MaxAddrSizes = {}; __cpuid(MaxAddrSizes.Regs.Raw, CPUID::Intel::CPUID_VIRTUAL_AND_PHYSICAL_ADDRESS_SIZES); + // Initializing MTRRs shared between all processors: + MTRR_INFO MtrrInfo; + memset(&MtrrInfo, 0, sizeof(MtrrInfo)); + InitMtrr(&MtrrInfo); + ULONG ProcessorsCount = KeQueryActiveProcessorCountEx(ALL_PROCESSOR_GROUPS); Shared->Processors = VirtualMemory::AllocArray(ProcessorsCount); for (ULONG i = 0; i < ProcessorsCount; ++i) @@ -2566,6 +2747,7 @@ namespace VMX } return false; } + Proc->MtrrInfo = &MtrrInfo; Proc->VmData->EptInterceptor = new EptHandler(&Proc->VmData->Ept); } @@ -2588,7 +2770,11 @@ namespace VMX } } - if (!Status) + if (Status) + { + DbgPrintMtrrEptCacheLayout(&Shared->Processors[0].VmData->Ept, Shared->Processors[0].MtrrInfo); + } + else { DevirtualizeAllProcessors(); VirtualMemory::FreePoolMemory(Shared->Processors); diff --git a/Kernel-Bridge/API/PteUtils.cpp b/Kernel-Bridge/API/PteUtils.cpp index 3f25fda..cc1579c 100644 --- a/Kernel-Bridge/API/PteUtils.cpp +++ b/Kernel-Bridge/API/PteUtils.cpp @@ -16,7 +16,8 @@ namespace Pte { #endif _IRQL_requires_max_(APC_LEVEL) - BOOLEAN GetPageTables(PVOID Address, OUT PAGE_TABLES_INFO* Info) { + BOOLEAN GetPageTables(PVOID Address, OUT PAGE_TABLES_INFO* Info) + { if (!Info) return FALSE; *Info = {}; @@ -40,20 +41,24 @@ namespace Pte { PVOID64 PdpePhys = reinterpret_cast(PFN_TO_PAGE(Info->Pml4e->x64.Generic.PDP) + Va.x64.Generic.PageDirectoryPointerOffset * sizeof(PDPE::x64)); Info->Pdpe = reinterpret_cast(GetVirtualForPhysical(PdpePhys)); if (!Info->Pdpe) return FALSE; - if (Info->Pdpe->x64.Generic.PS) { + if (Info->Pdpe->x64.Generic.PS) + { // Page size = 1 Gb: if (!Info->Pdpe->x64.PageSize.Page1Gb.P) return FALSE; Info->Type = PAGE_TABLES_INFO::pt64Page1Gb; } - else { + else + { PVOID64 PdePhys = reinterpret_cast(PFN_TO_PAGE(Info->Pdpe->x64.NonPageSize.Generic.PD) + Va.x64.NonPageSize.Generic.PageDirectoryOffset * sizeof(PDE::x64)); Info->Pde = reinterpret_cast(GetVirtualForPhysical(PdePhys)); if (!Info->Pde) return FALSE; - if (Info->Pde->x64.Generic.PS) { + if (Info->Pde->x64.Generic.PS) + { // Page size = 2 Mb: Info->Type = PAGE_TABLES_INFO::pt64Page2Mb; } - else { + else + { // Page size = 4 Kb: Info->Type = PAGE_TABLES_INFO::pt64Page4Kb; @@ -63,7 +68,8 @@ namespace Pte { } } #else - if (Cr4.x32.Bitmap.PAE) { + if (Cr4.x32.Bitmap.PAE) + { PVOID64 PdpePhys = reinterpret_cast(PFN_TO_PDP_PAE(Cr3.x32.Pae.PDP) + Va.x32.Pae.Generic.PageDirectoryPointerOffset * sizeof(PDPE::x32)); Info->Pdpe = reinterpret_cast(GetVirtualForPhysical(PdpePhys)); if (!Info->Pdpe) return FALSE; @@ -72,11 +78,13 @@ namespace Pte { PVOID64 PdePhys = reinterpret_cast(PFN_TO_PAGE(Info->Pdpe->x32.Pae.Generic.PD) + Va.x32.Pae.Generic.PageDirectoryOffset * sizeof(PDE::x32)); Info->Pde = reinterpret_cast(GetVirtualForPhysical(PdePhys)); if (!Info->Pde) return FALSE; - if (!Info->Pde->x32.Pae.Generic.PS) { + if (!Info->Pde->x32.Pae.Generic.PS) + { // Page size = 2 Mb: Info->Type = PAGE_TABLES_INFO::pt32PaePage2Mb; } - else { + else + { // Page size = 4 Kb: Info->Type = PAGE_TABLES_INFO::pt32PaePage4Kb; @@ -86,17 +94,21 @@ namespace Pte { if (!Info->Pte) return FALSE; } } - else { - if (Cr4.x32.Bitmap.PSE) { + else + { + if (Cr4.x32.Bitmap.PSE) + { PVOID64 PdePhys = reinterpret_cast(PFN_TO_PAGE(Cr3.x32.NonPae.PD) + Va.x32.NonPae.Page4Kb.PageDirectoryOffset * sizeof(PDE::x32)); Info->Pde = reinterpret_cast(GetVirtualForPhysical(PdePhys)); if (!Info->Pde) return FALSE; - if (Info->Pde->x32.NonPae.Generic.PS) { + if (Info->Pde->x32.NonPae.Generic.PS) + { // Page size = 4 Mb: Info->Type = PAGE_TABLES_INFO::pt32NonPaePage4Mb; } - else { + else + { // Page size = 4 Kb: Info->Type = PAGE_TABLES_INFO::pt32NonPaePage4Kb; @@ -107,7 +119,8 @@ namespace Pte { if (!Info->Pte) return FALSE; } } - else { + else + { // Page size = 4 Kb: Info->Type = PAGE_TABLES_INFO::pt32NonPaePage4Kb; @@ -126,7 +139,8 @@ namespace Pte { } _IRQL_requires_max_(APC_LEVEL) - BOOLEAN TriggerCopyOnWrite(OPTIONAL PEPROCESS Process, PVOID Address, OPTIONAL OUT PULONG PageSize) { + BOOLEAN TriggerCopyOnWrite(OPTIONAL PEPROCESS Process, PVOID Address, OPTIONAL OUT PULONG PageSize) + { BOOLEAN NeedToAttach = Process && Process != PsGetCurrentProcess(); KAPC_STATE ApcState; if (NeedToAttach) @@ -135,7 +149,8 @@ namespace Pte { BOOLEAN Status = FALSE; PAGE_TABLES_INFO Info = {}; Status = GetPageTables(Address, &Info); - if (Status) __try { + if (Status) __try + { // AVL is a 3-bit field: // AVL:CopyOnWrite : 1 // AVL:Unused : 1 @@ -145,7 +160,8 @@ namespace Pte { if (PageSize) *PageSize = 0; - switch (Info.Type) { + switch (Info.Type) + { case PAGE_TABLES_INFO::pt32NonPaePage4Kb: // PDE -> PTE -> PA: if (PageSize) *PageSize = 4096; @@ -193,7 +209,8 @@ namespace Pte { __invlpg(Address); // Reset the TLB *reinterpret_cast(Address) = *reinterpret_cast(Address); } - __except (EXCEPTION_EXECUTE_HANDLER) { + __except (EXCEPTION_EXECUTE_HANDLER) + { Status = FALSE; } @@ -204,11 +221,12 @@ namespace Pte { } _IRQL_requires_max_(APC_LEVEL) - BOOLEAN IsPagePresent(PVOID Address, OPTIONAL OUT PULONG PageSize) { + BOOLEAN IsPagePresent(PVOID Address, OPTIONAL OUT PULONG PageSize) + { BOOLEAN IsPresent = FALSE; PAGE_TABLES_INFO Info = {}; - if (GetPageTables(Address, &Info)) __try { - + if (GetPageTables(Address, &Info)) __try + { if (PageSize) *PageSize = 0; switch (Info.Type) { @@ -249,7 +267,8 @@ namespace Pte { break; } } - __except (EXCEPTION_EXECUTE_HANDLER) { + __except (EXCEPTION_EXECUTE_HANDLER) + { IsPresent = FALSE; } @@ -257,7 +276,8 @@ namespace Pte { } _IRQL_requires_max_(APC_LEVEL) - BOOLEAN IsProcessPagePresent(OPTIONAL PEPROCESS Process, PVOID Address, OPTIONAL OUT PULONG PageSize) { + BOOLEAN IsProcessPagePresent(OPTIONAL PEPROCESS Process, PVOID Address, OPTIONAL OUT PULONG PageSize) + { if (!Process || Process == PsGetCurrentProcess()) return IsPagePresent(Address, PageSize); KAPC_STATE ApcState; @@ -268,7 +288,8 @@ namespace Pte { } _IRQL_requires_max_(APC_LEVEL) - BOOLEAN IsMemoryRangePresent(OPTIONAL PEPROCESS Process, PVOID Address, SIZE_T Size) { + BOOLEAN IsMemoryRangePresent(OPTIONAL PEPROCESS Process, PVOID Address, SIZE_T Size) + { if (!Size) return FALSE; BOOLEAN NeedToAttach = Process && Process != PsGetCurrentProcess(); diff --git a/Kernel-Bridge/API/PteUtils.h b/Kernel-Bridge/API/PteUtils.h index 6ede7d3..9cb2a81 100644 --- a/Kernel-Bridge/API/PteUtils.h +++ b/Kernel-Bridge/API/PteUtils.h @@ -1,6 +1,7 @@ #pragma once -namespace Pte { +namespace Pte +{ struct PAGE_TABLES_INFO { PML4E* Pml4e; PDPE* Pdpe; diff --git a/Kernel-Bridge/API/Stopwatch.cpp b/Kernel-Bridge/API/Stopwatch.cpp new file mode 100644 index 0000000..52b6695 --- /dev/null +++ b/Kernel-Bridge/API/Stopwatch.cpp @@ -0,0 +1,46 @@ +#include "Stopwatch.h" + +#include + +Stopwatch::Stopwatch() + : m_begin(0) + , m_end(0) + , m_freq(0) +{ + KeQueryPerformanceCounter(reinterpret_cast(&m_freq)); +} + +Stopwatch::Stopwatch(bool init) + : m_begin(0) + , m_end(0) + , m_freq(0) +{ + if (init) + { + start(); + } +} + +void Stopwatch::reset() +{ + m_begin = 0; + m_end = 0; + KeQueryPerformanceCounter(reinterpret_cast(&m_freq)); +} + +void Stopwatch::start() +{ + m_begin = KeQueryPerformanceCounter(reinterpret_cast(&m_freq)).QuadPart; + m_end = m_begin; +} + +float Stopwatch::stop() +{ + m_end = KeQueryPerformanceCounter(NULL).QuadPart; + return delta(); +} + +float Stopwatch::delta() +{ + return static_cast(m_end - m_begin) / m_freq; +} \ No newline at end of file diff --git a/Kernel-Bridge/API/Stopwatch.h b/Kernel-Bridge/API/Stopwatch.h new file mode 100644 index 0000000..d6502cf --- /dev/null +++ b/Kernel-Bridge/API/Stopwatch.h @@ -0,0 +1,19 @@ +#pragma once + +class Stopwatch +{ +protected: + unsigned long long m_begin, m_end; + unsigned long long m_freq; + +public: + Stopwatch(); + Stopwatch(bool init); + + void reset(); + + void start(); + float stop(); + + float delta(); +}; \ No newline at end of file diff --git a/Kernel-Bridge/API/VMM.asm b/Kernel-Bridge/API/VMM.asm index d675389..9e1c980 100644 --- a/Kernel-Bridge/API/VMM.asm +++ b/Kernel-Bridge/API/VMM.asm @@ -203,6 +203,7 @@ __invvpid PROC PUBLIC __invvpid ENDP VmxVmmRun PROC PUBLIC + cli PUSHAQ mov rcx, [rsp + GPR_CONTEXT_SIZE + 16] mov rdx, rsp @@ -217,6 +218,7 @@ VmxVmmRun PROC PUBLIC jz VmmExit POPAQ + sti vmresume VmmExit: @@ -230,6 +232,7 @@ VmmExit: mov rsp, rcx mov ecx, CPUID_VMM_SHUTDOWN ; Signature that says about the VM shutdown + sti jmp rbx VmxVmmRun ENDP diff --git a/Kernel-Bridge/Kernel-Bridge.vcxproj b/Kernel-Bridge/Kernel-Bridge.vcxproj index 3482bc2..8a2dbb1 100644 --- a/Kernel-Bridge/Kernel-Bridge.vcxproj +++ b/Kernel-Bridge/Kernel-Bridge.vcxproj @@ -37,6 +37,7 @@ + @@ -283,6 +284,7 @@ + diff --git a/Kernel-Bridge/Kernel-Bridge.vcxproj.filters b/Kernel-Bridge/Kernel-Bridge.vcxproj.filters index a9dd36d..ce5bd25 100644 --- a/Kernel-Bridge/Kernel-Bridge.vcxproj.filters +++ b/Kernel-Bridge/Kernel-Bridge.vcxproj.filters @@ -110,6 +110,9 @@ API + + API + @@ -226,6 +229,9 @@ HookLib + + API + diff --git a/Kernel-Tests/Main.cpp b/Kernel-Tests/Main.cpp index bb4a4cd..d2464e3 100644 --- a/Kernel-Tests/Main.cpp +++ b/Kernel-Tests/Main.cpp @@ -23,7 +23,8 @@ #include "SymParser.h" #include -#include "Registers.h" +#include +#include #include @@ -279,9 +280,11 @@ void* GetFuncPtr(const void* Func) __declspec(code_seg(".hidden")) unsigned int HiddenFunc() { printf("Called from hidden func!\n"); - auto* Self = reinterpret_cast(GetFuncPtr(HiddenFunc)); - *Self = 0x55; - printf("Self memory: 0x%X\n", static_cast(*Self)); + for (unsigned int i = 0; i < 100; ++i) + { + volatile BYTE* Self = reinterpret_cast(GetFuncPtr(HiddenFunc)); + *Self = 0x55; + } return 0x1EE7C0DE; }