83 static_assert(
sizeof(
MtrrData) == 24,
"Size check");
101 _When_(ept_data ==
nullptr,
104 _In_ ULONG64 physical_address,
108 _In_ ULONG table_level);
110 _Must_inspect_result_ _When_(ept_data ==
nullptr,
111 __drv_allocatesMem(Mem) _IRQL_requires_max_(
118 _Must_inspect_result_ __drv_allocatesMem(Mem) _IRQL_requires_max_(
122 _In_ ULONG table_level,
123 _In_ ULONG64 physical_address);
136 _In_ ULONG table_level,
137 _In_ ULONG64 physical_address);
140 _Pre_notnull_ __drv_freesMem(Mem)
EptCommonEntry **preallocated_entries,
141 _In_
long used_count);
143 #if defined(ALLOC_PRAGMA) 144 #pragma alloc_text(PAGE, EptIsEptAvailable) 145 #pragma alloc_text(PAGE, EptInitialization) 146 #pragma alloc_text(PAGE, EptInitializeMtrrEntries) 207 "MTRR Default=%llu, VariableCount=%llu, FixedSupported=%llu, " 217 static const auto k64kBase = 0x0;
218 static const auto k64kManagedSize = 0x10000;
219 static const auto k16kBase = 0x80000;
220 static const auto k16kManagedSize = 0x4000;
221 static const auto k4kBase = 0xC0000;
222 static const auto k4kManagedSize = 0x1000;
235 ULONG64 base = k64kBase + offset;
236 offset += k64kManagedSize;
239 mtrr_entries[index].
enabled =
true;
243 mtrr_entries[index].
range_end = base + k64kManagedSize - 1;
246 NT_ASSERT(k64kBase + offset == k16kBase);
267 ULONG64 base = k16kBase + offset;
268 offset += k16kManagedSize;
271 mtrr_entries[index].
enabled =
true;
275 mtrr_entries[index].
range_end = base + k16kManagedSize - 1;
279 NT_ASSERT(k16kBase + offset == k4kBase);
296 ULONG64 base = k4kBase + offset;
297 offset += k4kManagedSize;
300 mtrr_entries[index].
enabled =
true;
304 mtrr_entries[index].
range_end = base + k4kManagedSize - 1;
308 NT_ASSERT(k4kBase + offset == 0x100000);
328 ULONG64 end = base + (1ull << length) - 1;
331 mtrr_entries[index].
enabled =
true;
342 ULONG64 physical_address) {
344 UCHAR result_type = MAXUCHAR;
348 if (!mtrr_entry.enabled) {
354 mtrr_entry.range_end)) {
360 if (mtrr_entry.fixedMtrr) {
362 result_type = mtrr_entry.type;
369 result_type = mtrr_entry.type;
387 result_type = mtrr_entry.type;
391 if (result_type == MAXUCHAR) {
402 static const auto kEptPageWalkLevel = 4ul;
405 const auto ept_data =
reinterpret_cast<EptData *
>(ExAllocatePoolWithTag(
410 RtlZeroMemory(ept_data,
sizeof(
EptData));
413 const auto ept_poiner =
reinterpret_cast<EptPointer *
>(ExAllocatePoolWithTag(
419 RtlZeroMemory(ept_poiner, PAGE_SIZE);
422 const auto ept_pml4 =
430 RtlZeroMemory(ept_pml4, PAGE_SIZE);
431 ept_poiner->fields.memory_type =
433 ept_poiner->fields.page_walk_length = kEptPageWalkLevel - 1;
438 for (
auto run_index = 0ul; run_index < pm_ranges->number_of_runs;
440 const auto run = &pm_ranges->run[run_index];
441 const auto base_addr = run->base_page * PAGE_SIZE;
442 for (
auto page_index = 0ull; page_index < run->page_count; ++page_index) {
443 const auto indexed_addr = base_addr + page_index * PAGE_SIZE;
444 const auto ept_pt_entry =
467 const auto preallocated_entries_size =
469 const auto preallocated_entries =
reinterpret_cast<EptCommonEntry **
>(
470 ExAllocatePoolWithTag(NonPagedPool, preallocated_entries_size,
472 if (!preallocated_entries) {
478 RtlZeroMemory(preallocated_entries, preallocated_entries_size);
490 preallocated_entries[i] = ept_entry;
494 ept_data->ept_pointer = ept_poiner;
495 ept_data->ept_pml4 = ept_pml4;
496 ept_data->preallocated_entries = preallocated_entries;
497 ept_data->preallocated_entries_count = 0;
503 EptCommonEntry *table, ULONG table_level, ULONG64 physical_address,
505 switch (table_level) {
509 const auto ept_pml4_entry = &table[pxe_index];
510 if (!ept_pml4_entry->all) {
518 reinterpret_cast<EptCommonEntry *>(
520 table_level - 1, physical_address, ept_data);
525 const auto ept_pdpt_entry = &table[ppe_index];
526 if (!ept_pdpt_entry->all) {
534 reinterpret_cast<EptCommonEntry *>(
536 table_level - 1, physical_address, ept_data);
541 const auto ept_pdt_entry = &table[pde_index];
542 if (!ept_pdt_entry->all) {
550 reinterpret_cast<EptCommonEntry *>(
552 table_level - 1, physical_address, ept_data);
557 const auto ept_pt_entry = &table[pte_index];
558 NT_ASSERT(!ept_pt_entry->all);
586 reinterpret_cast<ULONG_PTR>(ept_data), 0);
594 static_assert(kAllocSize == PAGE_SIZE,
"Size check");
596 const auto entry =
reinterpret_cast<EptCommonEntry *
>(ExAllocatePoolWithTag(
601 RtlZeroMemory(entry, kAllocSize);
607 EptCommonEntry *entry, ULONG table_level, ULONG64 physical_address) {
612 if (table_level == 1) {
620 ULONG64 physical_address) {
627 ULONG64 physical_address) {
634 ULONG64 physical_address) {
641 ULONG64 physical_address) {
652 const auto fault_va =
reinterpret_cast<void *
>(
667 if (ept_entry && ept_entry->all) {
687 ULONG64 physical_address) {
689 for (
auto i = 0ul; i < pm_ranges->number_of_runs; ++i) {
690 const auto current_run = &pm_ranges->run[i];
691 const auto base_addr =
692 static_cast<ULONG64
>(current_run->base_page) * PAGE_SIZE;
693 const auto endAddr = base_addr + current_run->page_count * PAGE_SIZE - 1;
703 EptData *ept_data, ULONG64 physical_address) {
709 EptCommonEntry *table, ULONG table_level, ULONG64 physical_address) {
713 switch (table_level) {
717 const auto ept_pml4_entry = &table[pxe_index];
718 if (!ept_pml4_entry->all) {
722 ept_pml4_entry->fields.physial_address)),
723 table_level - 1, physical_address);
728 const auto ept_pdpt_entry = &table[ppe_index];
729 if (!ept_pdpt_entry->all) {
733 ept_pdpt_entry->fields.physial_address)),
734 table_level - 1, physical_address);
739 const auto ept_pdt_entry = &table[pde_index];
740 if (!ept_pdt_entry->all) {
744 ept_pdt_entry->fields.physial_address)),
745 table_level - 1, physical_address);
750 const auto ept_pt_entry = &table[pte_index];
777 if (!preallocated_entries[i]) {
780 #pragma warning(push) 781 #pragma warning(disable : 6001) 791 for (
auto i = 0ul; i < 512; ++i) {
792 const auto entry = table[i];
793 if (entry.fields.physial_address) {
797 switch (table_level) {
static memory_type EptpGetMemoryType(_In_ ULONG64 physical_address)
struct Ia32VmxEptVpidCapMsr::@31 fields
memory_type
See: Memory Types That Can Be Encoded With PAT Memory Types Recommended for VMCS and Related Data Str...
ULONG64 physial_address
[12:48-1]
static EptCommonEntry * EptpAllocateEptEntryFromPreAllocated(_In_ EptData *ept_data)
#define HYPERPLATFORM_LOG_DEBUG(format,...)
Logs a message as respective severity.
static ULONG64 EptpAddressToPxeIndex(_In_ ULONG64 physical_address)
struct EptCommonEntry::@0 fields
unsigned support_single_context_retaining_globals_invvpid
[43]
See: Extended-Page-Table Pointer (EPTP)
EptCommonEntry * EptGetEptPtEntry(EptData *ept_data, ULONG64 physical_address)
static MtrrData g_eptp_mtrr_entries[kEptpMtrrEntriesSize]
#define HYPERPLATFORM_COMMON_BUG_CHECK(hp_bug_check_code, param1, param2, param3)
Issues a bug check.
static const ULONG kHyperPlatformCommonPoolTag
A pool tag.
#define HYPERPLATFORM_COMMON_DBG_BREAK()
Sets a break point that works only when a debugger is present.
static const auto kEptpNumberOfPreallocatedEntries
static UCHAR g_eptp_mtrr_default_type
const PhysicalMemoryDescriptor * UtilGetPhysicalMemoryRanges()
Returns ranges of physical memory on the system.
See: VPID AND EPT CAPABILITIES.
unsigned support_single_context_invvpid
[41]
struct Ia32ApicBaseMsr::@19 fields
struct Ia32MtrrPhysMaskMsr::@18 fields
unsigned support_page_walk_length4
[6]
unsigned support_all_context_invvpid
[42]
static EptCommonEntry * EptpGetEptPtEntry(_In_ EptCommonEntry *table, _In_ ULONG table_level, _In_ ULONG64 physical_address)
static ULONG64 EptpAddressToPdeIndex(_In_ ULONG64 physical_address)
struct Ia32MtrrFixedRangeMsr::@16 fields
ULONG64 UtilVmRead64(VmcsField field)
See: IA32_MTRR_PHYSBASEn and IA32_MTRR_PHYSMASKn Variable-Range Register Pair.
Declares interfaces to EPT functions.
void * UtilVaFromPfn(PFN_NUMBER pfn)
VmxStatus UtilInveptGlobal()
Executes the INVEPT instruction and invalidates EPT entry cache.
static EptCommonEntry * EptpAllocateEptEntryFromPool()
unsigned support_all_context_invept
[26]
ULONG64 valid_guest_linear_address
[7]
unsigned support_single_context_invept
[25]
struct Ia32MtrrPhysBaseMsr::@17 fields
See: IA32_APIC_BASE MSR Supporting x2APIC.
See: Exit Qualification for EPT Violations.
void EptHandleEptViolation(EptData *ept_data)
void EptTermination(EptData *ept_data)
static const auto kEptpMtrrEntriesSize
Declares interfaces to assembly functions.
bool EptIsEptAvailable()
Checks if the system supports EPT technology sufficient enough.
constexpr bool IsReleaseBuild()
Checks if the project is compiled as Release.
Declares interfaces to utility functions.
ULONG64 UtilPaFromVa(void *va)
static EptCommonEntry * EptpAllocateEptEntry(_In_opt_ EptData *ept_data)
void EptInitializeMtrrEntries()
Reads and stores all MTRRs to set a correct memory type for EPT.
ULONG64 default_mtemory_type
static bool EptpIsDeviceMemory(_In_ ULONG64 physical_address)
unsigned support_individual_address_invvpid
[40]
#define HYPERPLATFORM_LOG_ERROR_SAFE(format,...)
static const auto kEptpPtiShift
ULONG_PTR UtilVmRead(VmcsField field)
ULONG64 execute_access
[2]
ULONG64 ept_executable
[5]
ULONG64 phys_mask
[12:MAXPHYADDR]
static const auto kEptpNumOfFixedRangeMtrrs
static const auto kEptpPtxMask
static const auto kEptpPdiShift
unsigned support_invept
[20]
unsigned support_invvpid
[32]
ULONG64 variable_range_count
struct Ia32MtrrDefaultTypeMsr::@15 fields
unsigned support_write_back_memory_type
[14]
See: IA32_MTRR_DEF_TYPE MSR.
ULONG64 phys_base
[12:MAXPHYADDR]
See: IA32_MTRRCAP Register.
volatile long preallocated_entries_count
Declares and implements common things across the project.
static const auto kEptpNumOfMaxVariableRangeMtrrs
ULONG64 UtilReadMsr64(Msr msr)
static const auto kEptpPxiShift
EptData * EptInitialization()
Builds EPT, allocates pre-allocated entires, initializes and returns EptData.
static EptCommonEntry * EptpConstructTables(_In_ EptCommonEntry *table, _In_ ULONG table_level, _In_ ULONG64 physical_address, _In_opt_ EptData *ept_data)
All pre-allocated entries are used.
static ULONG64 EptpAddressToPteIndex(_In_ ULONG64 physical_address)
EptCommonEntry ** preallocated_entries
Declares interfaces to logging functions.
ULONG64 fixed_range_supported
ULONG64 fixed_mtrrs_enabled
constexpr bool UtilIsInBounds(_In_ const T &value, _In_ const T &min, _In_ const T &max)
Tests if value is in between min and max.
PFN_NUMBER UtilPfnFromPa(ULONG64 pa)
ULONG64 EptGetEptPointer(EptData *ept_data)
struct Ia32MtrrCapabilitiesMsr::@14 fields
static void EptpFreeUnusedPreAllocatedEntries(_Pre_notnull_ __drv_freesMem(Mem) EptCommonEntry **preallocated_entries, _In_ long used_count)
static void EptpDestructTables(_In_ EptCommonEntry *table, _In_ ULONG table_level)
static const auto kEptpPpiShift
static void EptpInitTableEntry(_In_ EptCommonEntry *Entry, _In_ ULONG table_level, _In_ ULONG64 physical_address)
See: IA32_MTRR_PHYSBASEn and IA32_MTRR_PHYSMASKn Variable-Range Register Pair.
static ULONG64 EptpAddressToPpeIndex(_In_ ULONG64 physical_address)
struct EptViolationQualification::@47 fields
A structure made up of mutual fields across all EPT entry types.
EptCommonEntry * ept_pml4